Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.3.5/, 2.6.32/, 3.3.6/, 3.2.16/, 3.2.17/
Date: Mon, 14 May 2012 01:08:20
Message-Id: 1336957640.c17d5a9f4cc40f82beeedf119798bf4ee78b2b1a.blueness@gentoo
1 commit: c17d5a9f4cc40f82beeedf119798bf4ee78b2b1a
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Mon May 14 01:07:20 2012 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Mon May 14 01:07:20 2012 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=c17d5a9f
7
8 Grsec/PaX: 2.9-{2.6.32.59,3.2.17,3.3.6}-201205131658
9
10 ---
11 2.6.32/0000_README | 2 +-
12 ...20_grsecurity-2.9-2.6.32.59-201205131656.patch} | 674 ++-
13 {3.2.16 => 3.2.17}/0000_README | 6 +-
14 3.2.17/1016_linux-3.2.17.patch | 5695 ++++++++++++++++++++
15 .../4420_grsecurity-2.9-3.2.17-201205131657.patch | 1031 +++--
16 .../4430_grsec-remove-localversion-grsec.patch | 0
17 {3.2.16 => 3.2.17}/4435_grsec-mute-warnings.patch | 0
18 .../4440_grsec-remove-protected-paths.patch | 0
19 .../4445_grsec-pax-without-grsec.patch | 0
20 .../4450_grsec-kconfig-default-gids.patch | 0
21 {3.2.16 => 3.2.17}/4455_grsec-kconfig-gentoo.patch | 0
22 .../4460-grsec-kconfig-proc-user.patch | 0
23 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
24 {3.2.16 => 3.2.17}/4470_disable-compat_vdso.patch | 0
25 3.3.5/1004_linux-3.3.5.patch | 3285 -----------
26 {3.3.5 => 3.3.6}/0000_README | 6 +-
27 3.3.6/1005_linux-3.3.6.patch | 1832 +++++++
28 .../4420_grsecurity-2.9-3.3.6-201205131658.patch | 773 +++-
29 .../4430_grsec-remove-localversion-grsec.patch | 0
30 {3.3.5 => 3.3.6}/4435_grsec-mute-warnings.patch | 0
31 .../4440_grsec-remove-protected-paths.patch | 0
32 .../4445_grsec-pax-without-grsec.patch | 0
33 .../4450_grsec-kconfig-default-gids.patch | 0
34 {3.3.5 => 3.3.6}/4455_grsec-kconfig-gentoo.patch | 0
35 .../4460-grsec-kconfig-proc-user.patch | 0
36 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
37 {3.3.5 => 3.3.6}/4470_disable-compat_vdso.patch | 0
38 27 files changed, 9438 insertions(+), 3866 deletions(-)
39
40 diff --git a/2.6.32/0000_README b/2.6.32/0000_README
41 index cfcffd4..3655217 100644
42 --- a/2.6.32/0000_README
43 +++ b/2.6.32/0000_README
44 @@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
45 From: http://www.kernel.org
46 Desc: Linux 2.6.32.59
47
48 -Patch: 4420_grsecurity-2.9-2.6.32.59-201205071838.patch
49 +Patch: 4420_grsecurity-2.9-2.6.32.59-201205131656.patch
50 From: http://www.grsecurity.net
51 Desc: hardened-sources base patch from upstream grsecurity
52
53
54 diff --git a/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205071838.patch b/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205131656.patch
55 similarity index 99%
56 rename from 2.6.32/4420_grsecurity-2.9-2.6.32.59-201205071838.patch
57 rename to 2.6.32/4420_grsecurity-2.9-2.6.32.59-201205131656.patch
58 index 185e1d4..d324f88 100644
59 --- a/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205071838.patch
60 +++ b/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205131656.patch
61 @@ -1171,6 +1171,34 @@ index d65b2f5..9d87555 100644
62 #endif /* __ASSEMBLY__ */
63
64 #define arch_align_stack(x) (x)
65 +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
66 +index 2dfb7d7..8fadd73 100644
67 +--- a/arch/arm/include/asm/thread_info.h
68 ++++ b/arch/arm/include/asm/thread_info.h
69 +@@ -138,6 +138,12 @@ extern void vfp_sync_state(struct thread_info *thread);
70 + #define TIF_NEED_RESCHED 1
71 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
72 + #define TIF_SYSCALL_TRACE 8
73 ++
74 ++/* within 8 bits of TIF_SYSCALL_TRACE
75 ++ to meet flexible second operand requirements
76 ++*/
77 ++#define TIF_GRSEC_SETXID 9
78 ++
79 + #define TIF_POLLING_NRFLAG 16
80 + #define TIF_USING_IWMMXT 17
81 + #define TIF_MEMDIE 18
82 +@@ -152,6 +158,10 @@ extern void vfp_sync_state(struct thread_info *thread);
83 + #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
84 + #define _TIF_FREEZE (1 << TIF_FREEZE)
85 + #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
86 ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
87 ++
88 ++/* Checks for any syscall work in entry-common.S */
89 ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_GRSEC_SETXID)
90 +
91 + /*
92 + * Change these and you break ASM code in entry-common.S
93 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
94 index 1d6bd40..fba0cb9 100644
95 --- a/arch/arm/include/asm/uaccess.h
96 @@ -1245,6 +1273,28 @@ index 0e62770..e2c2cd6 100644
97 EXPORT_SYMBOL(__clear_user);
98
99 EXPORT_SYMBOL(__get_user_1);
100 +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
101 +index a6c66f5..bfdad39 100644
102 +--- a/arch/arm/kernel/entry-common.S
103 ++++ b/arch/arm/kernel/entry-common.S
104 +@@ -77,7 +77,7 @@ ENTRY(ret_from_fork)
105 + get_thread_info tsk
106 + ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
107 + mov why, #1
108 +- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
109 ++ tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
110 + beq ret_slow_syscall
111 + mov r1, sp
112 + mov r0, #1 @ trace exit [IP = 1]
113 +@@ -275,7 +275,7 @@ ENTRY(vector_swi)
114 + #endif
115 +
116 + stmdb sp!, {r4, r5} @ push fifth and sixth args
117 +- tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
118 ++ tst ip, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
119 + bne __sys_trace
120 +
121 + cmp scno, #NR_syscalls @ check upper syscall limit
122 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
123 index ba8ccfe..2dc34dc 100644
124 --- a/arch/arm/kernel/kgdb.c
125 @@ -1296,6 +1346,30 @@ index 61f90d3..771ab27 100644
126 }
127
128 void machine_restart(char *cmd)
129 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
130 +index a2ea385..4783488 100644
131 +--- a/arch/arm/kernel/ptrace.c
132 ++++ b/arch/arm/kernel/ptrace.c
133 +@@ -847,10 +847,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
134 + return ret;
135 + }
136 +
137 ++#ifdef CONFIG_GRKERNSEC_SETXID
138 ++extern void gr_delayed_cred_worker(void);
139 ++#endif
140 ++
141 + asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
142 + {
143 + unsigned long ip;
144 +
145 ++#ifdef CONFIG_GRKERNSEC_SETXID
146 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
147 ++ gr_delayed_cred_worker();
148 ++#endif
149 ++
150 + if (!test_thread_flag(TIF_SYSCALL_TRACE))
151 + return scno;
152 + if (!(current->ptrace & PT_PTRACED))
153 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
154 index c6c57b6..0c3b29e 100644
155 --- a/arch/arm/kernel/setup.c
156 @@ -2917,6 +2991,35 @@ index 83b5509..9fa24a23 100644
157 +#define arch_align_stack(x) ((x) & ~0xfUL)
158
159 #endif /* _ASM_SYSTEM_H */
160 +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
161 +index 845da21..f2a91b9 100644
162 +--- a/arch/mips/include/asm/thread_info.h
163 ++++ b/arch/mips/include/asm/thread_info.h
164 +@@ -120,6 +120,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
165 + #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
166 + #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
167 + #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
168 ++/* li takes a 32bit immediate */
169 ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
170 + #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
171 +
172 + #ifdef CONFIG_MIPS32_O32
173 +@@ -144,11 +146,14 @@ register struct thread_info *__current_thread_info __asm__("$28");
174 + #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
175 + #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
176 + #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
177 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
178 ++
179 ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
180 +
181 + /* work to do on interrupt/exception return */
182 + #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP)
183 + /* work to do on any return to u-space */
184 +-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
185 ++#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
186 +
187 + #endif /* __KERNEL__ */
188 +
189 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
190 index 9fdd8bc..fcf9d68 100644
191 --- a/arch/mips/kernel/binfmt_elfn32.c
192 @@ -2953,6 +3056,19 @@ index ff44823..cf0b48a 100644
193 #include <asm/processor.h>
194
195 /*
196 +diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
197 +index ffa3310..f8b1e06 100644
198 +--- a/arch/mips/kernel/entry.S
199 ++++ b/arch/mips/kernel/entry.S
200 +@@ -167,7 +167,7 @@ work_notifysig: # deal with pending signals and
201 + FEXPORT(syscall_exit_work_partial)
202 + SAVE_STATIC
203 + syscall_exit_work:
204 +- li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
205 ++ li t0, _TIF_SYSCALL_WORK
206 + and t0, a2 # a2 is preloaded with TI_FLAGS
207 + beqz t0, work_pending # trace bit set?
208 + local_irq_enable # could let do_syscall_trace()
209 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
210 index 50c9bb8..efdd5f8 100644
211 --- a/arch/mips/kernel/kgdb.c
212 @@ -2985,6 +3101,33 @@ index f3d73e1..bb3f57a 100644
213 -
214 - return sp & ALMASK;
215 -}
216 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
217 +index 054861c..ddbbc7d 100644
218 +--- a/arch/mips/kernel/ptrace.c
219 ++++ b/arch/mips/kernel/ptrace.c
220 +@@ -558,6 +558,10 @@ static inline int audit_arch(void)
221 + return arch;
222 + }
223 +
224 ++#ifdef CONFIG_GRKERNSEC_SETXID
225 ++extern void gr_delayed_cred_worker(void);
226 ++#endif
227 ++
228 + /*
229 + * Notification of system call entry/exit
230 + * - triggered by current->work.syscall_trace
231 +@@ -568,6 +572,11 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
232 + if (!entryexit)
233 + secure_computing(regs->regs[0]);
234 +
235 ++#ifdef CONFIG_GRKERNSEC_SETXID
236 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
237 ++ gr_delayed_cred_worker();
238 ++#endif
239 ++
240 + if (unlikely(current->audit_context) && entryexit)
241 + audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
242 + regs->regs[2]);
243 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
244 index 060563a..7fbf310 100644
245 --- a/arch/mips/kernel/reset.c
246 @@ -3020,6 +3163,58 @@ index 060563a..7fbf310 100644
247 pm_power_off();
248 + BUG();
249 }
250 +diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
251 +index fd2a9bb..73ecc89 100644
252 +--- a/arch/mips/kernel/scall32-o32.S
253 ++++ b/arch/mips/kernel/scall32-o32.S
254 +@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
255 +
256 + stack_done:
257 + lw t0, TI_FLAGS($28) # syscall tracing enabled?
258 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
259 ++ li t1, _TIF_SYSCALL_WORK
260 + and t0, t1
261 + bnez t0, syscall_trace_entry # -> yes
262 +
263 +diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
264 +index 18bf7f3..6659dde 100644
265 +--- a/arch/mips/kernel/scall64-64.S
266 ++++ b/arch/mips/kernel/scall64-64.S
267 +@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
268 +
269 + sd a3, PT_R26(sp) # save a3 for syscall restarting
270 +
271 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
272 ++ li t1, _TIF_SYSCALL_WORK
273 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
274 + and t0, t1, t0
275 + bnez t0, syscall_trace_entry
276 +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
277 +index 6ebc079..a16f976 100644
278 +--- a/arch/mips/kernel/scall64-n32.S
279 ++++ b/arch/mips/kernel/scall64-n32.S
280 +@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
281 +
282 + sd a3, PT_R26(sp) # save a3 for syscall restarting
283 +
284 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
285 ++ li t1, _TIF_SYSCALL_WORK
286 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
287 + and t0, t1, t0
288 + bnez t0, n32_syscall_trace_entry
289 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
290 +index 14dde4c..dc68acf 100644
291 +--- a/arch/mips/kernel/scall64-o32.S
292 ++++ b/arch/mips/kernel/scall64-o32.S
293 +@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
294 + PTR 4b, bad_stack
295 + .previous
296 +
297 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
298 ++ li t1, _TIF_SYSCALL_WORK
299 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
300 + and t0, t1, t0
301 + bnez t0, trace_a_syscall
302 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
303 index 3f7f466..3abe0b5 100644
304 --- a/arch/mips/kernel/syscall.c
305 @@ -3893,6 +4088,33 @@ index 094a12a..877a60a 100644
306
307 /* Used in very early kernel initialization. */
308 extern unsigned long reloc_offset(void);
309 +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
310 +index aa9d383..0380a05 100644
311 +--- a/arch/powerpc/include/asm/thread_info.h
312 ++++ b/arch/powerpc/include/asm/thread_info.h
313 +@@ -110,7 +110,9 @@ static inline struct thread_info *current_thread_info(void)
314 + #define TIF_NOERROR 12 /* Force successful syscall return */
315 + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
316 + #define TIF_FREEZE 14 /* Freezing for suspend */
317 +-#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
318 ++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
319 ++#define TIF_GRSEC_SETXID 15 /* update credentials on syscall entry/exit */
320 ++#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
321 +
322 + /* as above, but as bit values */
323 + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
324 +@@ -128,7 +130,10 @@ static inline struct thread_info *current_thread_info(void)
325 + #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
326 + #define _TIF_FREEZE (1<<TIF_FREEZE)
327 + #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
328 +-#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
329 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
330 ++
331 ++#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
332 ++ _TIF_SECCOMP|_TIF_GRSEC_SETXID)
333 +
334 + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
335 + _TIF_NOTIFY_RESUME)
336 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
337 index bd0fb84..a42a14b 100644
338 --- a/arch/powerpc/include/asm/uaccess.h
339 @@ -4422,7 +4644,7 @@ index 7b816da..8d5c277 100644
340 - return ret;
341 -}
342 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
343 -index ef14988..856c4bc 100644
344 +index ef14988..8a37ddb 100644
345 --- a/arch/powerpc/kernel/ptrace.c
346 +++ b/arch/powerpc/kernel/ptrace.c
347 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
348 @@ -4443,6 +4665,41 @@ index ef14988..856c4bc 100644
349 } else {
350 flush_fp_to_thread(child);
351 tmp = ((unsigned long *)child->thread.fpr)
352 +@@ -1033,6 +1033,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
353 + return ret;
354 + }
355 +
356 ++#ifdef CONFIG_GRKERNSEC_SETXID
357 ++extern void gr_delayed_cred_worker(void);
358 ++#endif
359 ++
360 + /*
361 + * We must return the syscall number to actually look up in the table.
362 + * This can be -1L to skip running any syscall at all.
363 +@@ -1043,6 +1047,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
364 +
365 + secure_computing(regs->gpr[0]);
366 +
367 ++#ifdef CONFIG_GRKERNSEC_SETXID
368 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
369 ++ gr_delayed_cred_worker();
370 ++#endif
371 ++
372 + if (test_thread_flag(TIF_SYSCALL_TRACE) &&
373 + tracehook_report_syscall_entry(regs))
374 + /*
375 +@@ -1076,6 +1085,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
376 + {
377 + int step;
378 +
379 ++#ifdef CONFIG_GRKERNSEC_SETXID
380 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
381 ++ gr_delayed_cred_worker();
382 ++#endif
383 ++
384 + if (unlikely(current->audit_context))
385 + audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
386 + regs->result);
387 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
388 index d670429..2bc59b2 100644
389 --- a/arch/powerpc/kernel/signal_32.c
390 @@ -5951,7 +6208,7 @@ index 844d73a..f787fb9 100644
391
392 /*
393 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
394 -index f78ad9a..9f55fc7 100644
395 +index f78ad9a..a3213ed 100644
396 --- a/arch/sparc/include/asm/thread_info_64.h
397 +++ b/arch/sparc/include/asm/thread_info_64.h
398 @@ -68,6 +68,8 @@ struct thread_info {
399 @@ -5963,6 +6220,34 @@ index f78ad9a..9f55fc7 100644
400 unsigned long fpregs[0] __attribute__ ((aligned(64)));
401 };
402
403 +@@ -227,6 +229,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
404 + /* flag bit 8 is available */
405 + #define TIF_SECCOMP 9 /* secure computing */
406 + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
407 ++#define TIF_GRSEC_SETXID 11 /* update credentials on syscall entry/exit */
408 ++
409 + /* NOTE: Thread flags >= 12 should be ones we have no interest
410 + * in using in assembly, else we can't use the mask as
411 + * an immediate value in instructions such as andcc.
412 +@@ -247,12 +251,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
413 + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
414 + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
415 + #define _TIF_FREEZE (1<<TIF_FREEZE)
416 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
417 +
418 + #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
419 + _TIF_DO_NOTIFY_RESUME_MASK | \
420 + _TIF_NEED_RESCHED | _TIF_PERFCTR)
421 + #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
422 +
423 ++#define _TIF_WORK_SYSCALL \
424 ++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
425 ++ _TIF_GRSEC_SETXID)
426 ++
427 ++
428 + /*
429 + * Thread-synchronous status.
430 + *
431 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
432 index e88fbe5..96b0ce5 100644
433 --- a/arch/sparc/include/asm/uaccess.h
434 @@ -6275,6 +6560,45 @@ index cb70476..3d0c191 100644
435 (void *) gp->tpc,
436 (void *) gp->o7,
437 (void *) gp->i7,
438 +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
439 +index 4ae91dc..c2e705e 100644
440 +--- a/arch/sparc/kernel/ptrace_64.c
441 ++++ b/arch/sparc/kernel/ptrace_64.c
442 +@@ -1049,6 +1049,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
443 + return ret;
444 + }
445 +
446 ++#ifdef CONFIG_GRKERNSEC_SETXID
447 ++extern void gr_delayed_cred_worker(void);
448 ++#endif
449 ++
450 + asmlinkage int syscall_trace_enter(struct pt_regs *regs)
451 + {
452 + int ret = 0;
453 +@@ -1056,6 +1060,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
454 + /* do the secure computing check first */
455 + secure_computing(regs->u_regs[UREG_G1]);
456 +
457 ++#ifdef CONFIG_GRKERNSEC_SETXID
458 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
459 ++ gr_delayed_cred_worker();
460 ++#endif
461 ++
462 + if (test_thread_flag(TIF_SYSCALL_TRACE))
463 + ret = tracehook_report_syscall_entry(regs);
464 +
465 +@@ -1074,6 +1083,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
466 +
467 + asmlinkage void syscall_trace_leave(struct pt_regs *regs)
468 + {
469 ++#ifdef CONFIG_GRKERNSEC_SETXID
470 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
471 ++ gr_delayed_cred_worker();
472 ++#endif
473 ++
474 + if (unlikely(current->audit_context)) {
475 + unsigned long tstate = regs->tstate;
476 + int result = AUDITSC_SUCCESS;
477 diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
478 index fd3cee4..cc4b1ff 100644
479 --- a/arch/sparc/kernel/rtrap_64.S
480 @@ -6486,6 +6810,55 @@ index cfa0e19..98972ac 100644
481 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
482 mm->unmap_area = arch_unmap_area_topdown;
483 }
484 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
485 +index d150c2a..bffda9d 100644
486 +--- a/arch/sparc/kernel/syscalls.S
487 ++++ b/arch/sparc/kernel/syscalls.S
488 +@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
489 + #endif
490 + .align 32
491 + 1: ldx [%g6 + TI_FLAGS], %l5
492 +- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
493 ++ andcc %l5, _TIF_WORK_SYSCALL, %g0
494 + be,pt %icc, rtrap
495 + nop
496 + call syscall_trace_leave
497 +@@ -198,7 +198,7 @@ linux_sparc_syscall32:
498 +
499 + srl %i5, 0, %o5 ! IEU1
500 + srl %i2, 0, %o2 ! IEU0 Group
501 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
502 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0
503 + bne,pn %icc, linux_syscall_trace32 ! CTI
504 + mov %i0, %l5 ! IEU1
505 + call %l7 ! CTI Group brk forced
506 +@@ -221,7 +221,7 @@ linux_sparc_syscall:
507 +
508 + mov %i3, %o3 ! IEU1
509 + mov %i4, %o4 ! IEU0 Group
510 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
511 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0
512 + bne,pn %icc, linux_syscall_trace ! CTI Group
513 + mov %i0, %l5 ! IEU0
514 + 2: call %l7 ! CTI Group brk forced
515 +@@ -245,7 +245,7 @@ ret_sys_call:
516 +
517 + cmp %o0, -ERESTART_RESTARTBLOCK
518 + bgeu,pn %xcc, 1f
519 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
520 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6
521 + 80:
522 + /* System call success, clear Carry condition code. */
523 + andn %g3, %g2, %g3
524 +@@ -260,7 +260,7 @@ ret_sys_call:
525 + /* System call failure, set Carry condition code.
526 + * Also, get abs(errno) to return to the process.
527 + */
528 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
529 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6
530 + sub %g0, %o0, %o0
531 + or %g3, %g2, %g3
532 + stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
533 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
534 index c0490c7..84959d1 100644
535 --- a/arch/sparc/kernel/traps_32.c
536 @@ -13413,7 +13786,7 @@ index e0fbf29..858ef4a 100644
537 /*
538 * Force strict CPU ordering.
539 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
540 -index 19c3ce4..8962535 100644
541 +index 19c3ce4..4ad5ba4 100644
542 --- a/arch/x86/include/asm/thread_info.h
543 +++ b/arch/x86/include/asm/thread_info.h
544 @@ -10,6 +10,7 @@
545 @@ -13462,7 +13835,45 @@ index 19c3ce4..8962535 100644
546 #define init_stack (init_thread_union.stack)
547
548 #else /* !__ASSEMBLY__ */
549 -@@ -163,45 +157,40 @@ struct thread_info {
550 +@@ -95,6 +89,7 @@ struct thread_info {
551 + #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
552 + #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
553 + #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
554 ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
555 +
556 + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
557 + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
558 +@@ -117,16 +112,17 @@ struct thread_info {
559 + #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
560 + #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
561 + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
562 ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
563 +
564 + /* work to do in syscall_trace_enter() */
565 + #define _TIF_WORK_SYSCALL_ENTRY \
566 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
567 +- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
568 ++ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
569 +
570 + /* work to do in syscall_trace_leave() */
571 + #define _TIF_WORK_SYSCALL_EXIT \
572 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
573 +- _TIF_SYSCALL_TRACEPOINT)
574 ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
575 +
576 + /* work to do on interrupt/exception return */
577 + #define _TIF_WORK_MASK \
578 +@@ -136,7 +132,8 @@ struct thread_info {
579 +
580 + /* work to do on any return to user space */
581 + #define _TIF_ALLWORK_MASK \
582 +- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
583 ++ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
584 ++ _TIF_GRSEC_SETXID)
585 +
586 + /* Only used for 64 bit */
587 + #define _TIF_DO_NOTIFY_MASK \
588 +@@ -163,45 +160,40 @@ struct thread_info {
589 #define alloc_thread_info(tsk) \
590 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
591
592 @@ -13533,7 +13944,7 @@ index 19c3ce4..8962535 100644
593 /*
594 * macros/functions for gaining access to the thread information structure
595 * preempt_count needs to be 1 initially, until the scheduler is functional.
596 -@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
597 +@@ -209,21 +201,8 @@ static inline struct thread_info *current_thread_info(void)
598 #ifndef __ASSEMBLY__
599 DECLARE_PER_CPU(unsigned long, kernel_stack);
600
601 @@ -13557,7 +13968,7 @@ index 19c3ce4..8962535 100644
602 #endif
603
604 #endif /* !X86_32 */
605 -@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
606 +@@ -260,5 +239,16 @@ extern void arch_task_cache_init(void);
607 extern void free_thread_info(struct thread_info *ti);
608 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
609 #define arch_task_cache_init arch_task_cache_init
610 @@ -16397,7 +16808,7 @@ index 4c07cca..2c8427d 100644
611 ret
612 ENDPROC(efi_call6)
613 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
614 -index c097e7d..91be126 100644
615 +index c097e7d..853746c 100644
616 --- a/arch/x86/kernel/entry_32.S
617 +++ b/arch/x86/kernel/entry_32.S
618 @@ -95,12 +95,6 @@
619 @@ -16618,7 +17029,7 @@ index c097e7d..91be126 100644
620 +#ifdef CONFIG_PAX_KERNEXEC
621 + jae resume_userspace
622 +
623 -+ PAX_EXIT_KERNEL
624 ++ pax_exit_kernel
625 + jmp resume_kernel
626 +#else
627 jb resume_kernel # not returning to v8086 or userspace
628 @@ -20524,7 +20935,7 @@ index 39493bc..196816d 100644
629 ip = *(u64 *)(fp+8);
630 if (!in_sched_functions(ip))
631 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
632 -index c06acdd..09de221 100644
633 +index c06acdd..e7dffe1 100644
634 --- a/arch/x86/kernel/ptrace.c
635 +++ b/arch/x86/kernel/ptrace.c
636 @@ -559,6 +559,10 @@ static int ioperm_active(struct task_struct *target,
637 @@ -20606,7 +21017,15 @@ index c06acdd..09de221 100644
638
639 /* Send us the fake SIGTRAP */
640 force_sig_info(SIGTRAP, &info, tsk);
641 -@@ -1469,7 +1473,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
642 +@@ -1465,14 +1469,23 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
643 + # define IS_IA32 0
644 + #endif
645 +
646 ++#ifdef CONFIG_GRKERNSEC_SETXID
647 ++extern void gr_delayed_cred_worker(void);
648 ++#endif
649 ++
650 + /*
651 * We must return the syscall number to actually look up in the table.
652 * This can be -1L to skip running any syscall at all.
653 */
654 @@ -20615,15 +21034,29 @@ index c06acdd..09de221 100644
655 {
656 long ret = 0;
657
658 -@@ -1514,7 +1518,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
659 ++#ifdef CONFIG_GRKERNSEC_SETXID
660 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
661 ++ gr_delayed_cred_worker();
662 ++#endif
663 ++
664 + /*
665 + * If we stepped into a sysenter/syscall insn, it trapped in
666 + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
667 +@@ -1514,8 +1527,13 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
668 return ret ?: regs->orig_ax;
669 }
670
671 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
672 +void syscall_trace_leave(struct pt_regs *regs)
673 {
674 ++#ifdef CONFIG_GRKERNSEC_SETXID
675 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
676 ++ gr_delayed_cred_worker();
677 ++#endif
678 ++
679 if (unlikely(current->audit_context))
680 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
681 +
682 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
683 index cf98100..e76e03d 100644
684 --- a/arch/x86/kernel/reboot.c
685 @@ -26424,7 +26857,7 @@ index 63a6ba6..79abd7a 100644
686 return (void *)vaddr;
687 }
688 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
689 -index f46c3407..6ff9a26 100644
690 +index f46c3407..f7e72b0 100644
691 --- a/arch/x86/mm/hugetlbpage.c
692 +++ b/arch/x86/mm/hugetlbpage.c
693 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
694 @@ -26500,7 +26933,7 @@ index f46c3407..6ff9a26 100644
695
696 /* don't allow allocations above current base */
697 if (mm->free_area_cache > base)
698 -@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
699 +@@ -322,64 +329,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
700 largest_hole = 0;
701 mm->free_area_cache = base;
702 }
703 @@ -26515,15 +26948,16 @@ index f46c3407..6ff9a26 100644
704 + addr = (mm->free_area_cache - len);
705 do {
706 + addr &= huge_page_mask(h);
707 -+ vma = find_vma(mm, addr);
708 /*
709 * Lookup failure means no vma is above this address,
710 * i.e. return with success:
711 -- */
712 + */
713 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
714 -- return addr;
715 --
716 -- /*
717 ++ vma = find_vma(mm, addr);
718 ++ if (!vma)
719 + return addr;
720 +
721 + /*
722 * new region fits between prev_vma->vm_end and
723 * vma->vm_start, use it:
724 */
725 @@ -26595,7 +27029,7 @@ index f46c3407..6ff9a26 100644
726 mm->cached_hole_size = ~0UL;
727 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
728 len, pgoff, flags);
729 -@@ -387,6 +393,7 @@ fail:
730 +@@ -387,6 +398,7 @@ fail:
731 /*
732 * Restore the topdown base:
733 */
734 @@ -26603,7 +27037,7 @@ index f46c3407..6ff9a26 100644
735 mm->free_area_cache = base;
736 mm->cached_hole_size = ~0UL;
737
738 -@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
739 +@@ -400,10 +412,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
740 struct hstate *h = hstate_file(file);
741 struct mm_struct *mm = current->mm;
742 struct vm_area_struct *vma;
743 @@ -26624,7 +27058,7 @@ index f46c3407..6ff9a26 100644
744 return -ENOMEM;
745
746 if (flags & MAP_FIXED) {
747 -@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
748 +@@ -415,8 +436,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
749 if (addr) {
750 addr = ALIGN(addr, huge_page_size(h));
751 vma = find_vma(mm, addr);
752 @@ -27083,7 +27517,7 @@ index 30938c1..bda3d5d 100644
753 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
754 size >> 10);
755 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
756 -index 7d095ad..acf1be9 100644
757 +index 7d095ad..f833fa2 100644
758 --- a/arch/x86/mm/init_64.c
759 +++ b/arch/x86/mm/init_64.c
760 @@ -123,7 +123,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
761 @@ -27131,6 +27565,15 @@ index 7d095ad..acf1be9 100644
762 }
763 pmd = pmd_offset(pud, phys);
764 BUG_ON(!pmd_none(*pmd));
765 +@@ -507,7 +507,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
766 + unmap_low_page(pmd);
767 +
768 + spin_lock(&init_mm.page_table_lock);
769 +- pud_populate(&init_mm, pud, __va(pmd_phys));
770 ++ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
771 + spin_unlock(&init_mm.page_table_lock);
772 + }
773 + __flush_tlb_all();
774 @@ -560,7 +560,7 @@ kernel_physical_mapping_init(unsigned long start,
775 unmap_low_page(pud);
776
777 @@ -74487,10 +74930,10 @@ index 8f32f50..b6a41e8 100644
778 link[pathlen] = '\0';
779 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
780 new file mode 100644
781 -index 0000000..50819f8
782 +index 0000000..5be91c0
783 --- /dev/null
784 +++ b/grsecurity/Kconfig
785 -@@ -0,0 +1,1077 @@
786 +@@ -0,0 +1,1078 @@
787 +#
788 +# grecurity configuration
789 +#
790 @@ -74625,7 +75068,7 @@ index 0000000..50819f8
791 + select GRKERNSEC_PROC_ADD
792 + select GRKERNSEC_CHROOT_CHMOD
793 + select GRKERNSEC_CHROOT_NICE
794 -+ select GRKERNSEC_SETXID
795 ++ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
796 + select GRKERNSEC_AUDIT_MOUNT
797 + select GRKERNSEC_MODHARDEN if (MODULES)
798 + select GRKERNSEC_HARDEN_PTRACE
799 @@ -75319,6 +75762,7 @@ index 0000000..50819f8
800 +
801 +config GRKERNSEC_SETXID
802 + bool "Enforce consistent multithreaded privileges"
803 ++ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
804 + help
805 + If you say Y here, a change from a root uid to a non-root uid
806 + in a multithreaded application will cause the resulting uids,
807 @@ -75614,10 +76058,10 @@ index 0000000..1b9afa9
808 +endif
809 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
810 new file mode 100644
811 -index 0000000..67b34b9
812 +index 0000000..c475143
813 --- /dev/null
814 +++ b/grsecurity/gracl.c
815 -@@ -0,0 +1,4169 @@
816 +@@ -0,0 +1,4171 @@
817 +#include <linux/kernel.h>
818 +#include <linux/module.h>
819 +#include <linux/sched.h>
820 @@ -79454,20 +79898,22 @@ index 0000000..67b34b9
821 + return 0;
822 +#endif
823 +
824 -+ read_lock(&tasklist_lock);
825 -+ while (tmp->pid > 0) {
826 -+ if (tmp == curtemp)
827 -+ break;
828 -+ tmp = tmp->real_parent;
829 -+ }
830 ++ if (request == PTRACE_ATTACH) {
831 ++ read_lock(&tasklist_lock);
832 ++ while (tmp->pid > 0) {
833 ++ if (tmp == curtemp)
834 ++ break;
835 ++ tmp = tmp->real_parent;
836 ++ }
837 +
838 -+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
839 -+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
840 ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
841 ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
842 ++ read_unlock(&tasklist_lock);
843 ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
844 ++ return 1;
845 ++ }
846 + read_unlock(&tasklist_lock);
847 -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
848 -+ return 1;
849 + }
850 -+ read_unlock(&tasklist_lock);
851 +
852 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
853 + if (!(gr_status & GR_READY))
854 @@ -91553,7 +91999,7 @@ index 3f2f04f..4e53ded 100644
855 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
856 * Should always be manipulated under cpu_add_remove_lock
857 diff --git a/kernel/cred.c b/kernel/cred.c
858 -index 0b5b5fc..3fe945c 100644
859 +index 0b5b5fc..f20c6b9 100644
860 --- a/kernel/cred.c
861 +++ b/kernel/cred.c
862 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
863 @@ -91676,7 +92122,7 @@ index 0b5b5fc..3fe945c 100644
864 */
865 alter_cred_subscribers(new, 2);
866 if (new->user != old->user)
867 -@@ -595,8 +622,96 @@ int commit_creds(struct cred *new)
868 +@@ -595,8 +622,105 @@ int commit_creds(struct cred *new)
869 put_cred(old);
870 return 0;
871 }
872 @@ -91743,6 +92189,8 @@ index 0b5b5fc..3fe945c 100644
873 +int commit_creds(struct cred *new)
874 +{
875 +#ifdef CONFIG_GRKERNSEC_SETXID
876 ++ int ret;
877 ++ int schedule_it = 0;
878 + struct task_struct *t;
879 +
880 + /* we won't get called with tasklist_lock held for writing
881 @@ -91751,20 +92199,27 @@ index 0b5b5fc..3fe945c 100644
882 + */
883 + if (grsec_enable_setxid && !current_is_single_threaded() &&
884 + !current_uid() && new->uid) {
885 ++ schedule_it = 1;
886 ++ }
887 ++ ret = __commit_creds(new);
888 ++ if (schedule_it) {
889 + rcu_read_lock();
890 + read_lock(&tasklist_lock);
891 + for (t = next_thread(current); t != current;
892 + t = next_thread(t)) {
893 + if (t->delayed_cred == NULL) {
894 + t->delayed_cred = get_cred(new);
895 ++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
896 + set_tsk_need_resched(t);
897 + }
898 + }
899 + read_unlock(&tasklist_lock);
900 + rcu_read_unlock();
901 + }
902 -+#endif
903 ++ return ret;
904 ++#else
905 + return __commit_creds(new);
906 ++#endif
907 +}
908 +
909 EXPORT_SYMBOL(commit_creds);
910 @@ -91773,7 +92228,7 @@ index 0b5b5fc..3fe945c 100644
911 /**
912 * abort_creds - Discard a set of credentials and unlock the current task
913 * @new: The credentials that were going to be applied
914 -@@ -606,6 +721,8 @@ EXPORT_SYMBOL(commit_creds);
915 +@@ -606,6 +730,8 @@ EXPORT_SYMBOL(commit_creds);
916 */
917 void abort_creds(struct cred *new)
918 {
919 @@ -91782,7 +92237,7 @@ index 0b5b5fc..3fe945c 100644
920 kdebug("abort_creds(%p{%d,%d})", new,
921 atomic_read(&new->usage),
922 read_cred_subscribers(new));
923 -@@ -629,6 +746,8 @@ const struct cred *override_creds(const struct cred *new)
924 +@@ -629,6 +755,8 @@ const struct cred *override_creds(const struct cred *new)
925 {
926 const struct cred *old = current->cred;
927
928 @@ -91791,7 +92246,7 @@ index 0b5b5fc..3fe945c 100644
929 kdebug("override_creds(%p{%d,%d})", new,
930 atomic_read(&new->usage),
931 read_cred_subscribers(new));
932 -@@ -658,6 +777,8 @@ void revert_creds(const struct cred *old)
933 +@@ -658,6 +786,8 @@ void revert_creds(const struct cred *old)
934 {
935 const struct cred *override = current->cred;
936
937 @@ -91800,7 +92255,7 @@ index 0b5b5fc..3fe945c 100644
938 kdebug("revert_creds(%p{%d,%d})", old,
939 atomic_read(&old->usage),
940 read_cred_subscribers(old));
941 -@@ -704,6 +825,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
942 +@@ -704,6 +834,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
943 const struct cred *old;
944 struct cred *new;
945
946 @@ -91809,7 +92264,7 @@ index 0b5b5fc..3fe945c 100644
947 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
948 if (!new)
949 return NULL;
950 -@@ -758,6 +881,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
951 +@@ -758,6 +890,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
952 */
953 int set_security_override(struct cred *new, u32 secid)
954 {
955 @@ -91818,7 +92273,7 @@ index 0b5b5fc..3fe945c 100644
956 return security_kernel_act_as(new, secid);
957 }
958 EXPORT_SYMBOL(set_security_override);
959 -@@ -777,6 +902,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
960 +@@ -777,6 +911,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
961 u32 secid;
962 int ret;
963
964 @@ -94871,7 +95326,7 @@ index 29bd4ba..8c5de90 100644
965 WARN_ON(pendowner->pi_blocked_on->lock != lock);
966
967 diff --git a/kernel/sched.c b/kernel/sched.c
968 -index 0591df8..e3af3a4 100644
969 +index 0591df8..db35e3d 100644
970 --- a/kernel/sched.c
971 +++ b/kernel/sched.c
972 @@ -5043,7 +5043,7 @@ out:
973 @@ -94883,27 +95338,7 @@ index 0591df8..e3af3a4 100644
974 {
975 int this_cpu = smp_processor_id();
976 struct rq *this_rq = cpu_rq(this_cpu);
977 -@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
978 - }
979 - }
980 -
981 -+#ifdef CONFIG_GRKERNSEC_SETXID
982 -+extern void gr_delayed_cred_worker(void);
983 -+static inline void gr_cred_schedule(void)
984 -+{
985 -+ if (unlikely(current->delayed_cred))
986 -+ gr_delayed_cred_worker();
987 -+}
988 -+#else
989 -+static inline void gr_cred_schedule(void)
990 -+{
991 -+}
992 -+#endif
993 -+
994 - /*
995 - * schedule() is the main scheduler function.
996 - */
997 -@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
998 +@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
999 struct rq *rq;
1000 int cpu;
1001
1002 @@ -94912,16 +95347,7 @@ index 0591df8..e3af3a4 100644
1003 need_resched:
1004 preempt_disable();
1005 cpu = smp_processor_id();
1006 -@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
1007 -
1008 - schedule_debug(prev);
1009 -
1010 -+ gr_cred_schedule();
1011 -+
1012 - if (sched_feat(HRTICK))
1013 - hrtick_clear(rq);
1014 -
1015 -@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
1016 +@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
1017 * Look out! "owner" is an entirely speculative pointer
1018 * access and not reliable.
1019 */
1020 @@ -94930,7 +95356,7 @@ index 0591df8..e3af3a4 100644
1021 {
1022 unsigned int cpu;
1023 struct rq *rq;
1024 -@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
1025 +@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
1026 * DEBUG_PAGEALLOC could have unmapped it if
1027 * the mutex owner just released it and exited.
1028 */
1029 @@ -94943,7 +95369,7 @@ index 0591df8..e3af3a4 100644
1030 #endif
1031
1032 /*
1033 -@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
1034 +@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
1035 /*
1036 * Is that owner really running on that cpu?
1037 */
1038 @@ -94952,7 +95378,7 @@ index 0591df8..e3af3a4 100644
1039 return 0;
1040
1041 cpu_relax();
1042 -@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
1043 +@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p, const int nice)
1044 /* convert nice value [19,-20] to rlimit style value [1,40] */
1045 int nice_rlim = 20 - nice;
1046
1047 @@ -94961,7 +95387,7 @@ index 0591df8..e3af3a4 100644
1048 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
1049 capable(CAP_SYS_NICE));
1050 }
1051 -@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
1052 +@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
1053 if (nice > 19)
1054 nice = 19;
1055
1056 @@ -94971,7 +95397,7 @@ index 0591df8..e3af3a4 100644
1057 return -EPERM;
1058
1059 retval = security_task_setnice(current, nice);
1060 -@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
1061 +@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
1062 long power;
1063 int weight;
1064
1065 @@ -96268,6 +96694,28 @@ index d102559..4215f31 100644
1066 #define free(a) kfree(a)
1067 #endif
1068
1069 +diff --git a/lib/ioremap.c b/lib/ioremap.c
1070 +index 14c6078..65526a1 100644
1071 +--- a/lib/ioremap.c
1072 ++++ b/lib/ioremap.c
1073 +@@ -37,7 +37,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
1074 + unsigned long next;
1075 +
1076 + phys_addr -= addr;
1077 +- pmd = pmd_alloc(&init_mm, pud, addr);
1078 ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
1079 + if (!pmd)
1080 + return -ENOMEM;
1081 + do {
1082 +@@ -55,7 +55,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
1083 + unsigned long next;
1084 +
1085 + phys_addr -= addr;
1086 +- pud = pud_alloc(&init_mm, pgd, addr);
1087 ++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
1088 + if (!pud)
1089 + return -ENOMEM;
1090 + do {
1091 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
1092 index bd2bea9..6b3c95e 100644
1093 --- a/lib/is_single_threaded.c
1094 @@ -96853,7 +97301,7 @@ index 8aeba53..b4a4198 100644
1095 /*
1096 * We need/can do nothing about count=0 pages.
1097 diff --git a/mm/memory.c b/mm/memory.c
1098 -index 6c836d3..693224d 100644
1099 +index 6c836d3..b2296e1 100644
1100 --- a/mm/memory.c
1101 +++ b/mm/memory.c
1102 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
1103 @@ -96955,7 +97403,29 @@ index 6c836d3..693224d 100644
1104
1105 if (addr < vma->vm_start || addr >= vma->vm_end)
1106 return -EFAULT;
1107 -@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
1108 +@@ -1855,7 +1879,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1109 +
1110 + BUG_ON(pud_huge(*pud));
1111 +
1112 +- pmd = pmd_alloc(mm, pud, addr);
1113 ++ pmd = (mm == &init_mm) ?
1114 ++ pmd_alloc_kernel(mm, pud, addr) :
1115 ++ pmd_alloc(mm, pud, addr);
1116 + if (!pmd)
1117 + return -ENOMEM;
1118 + do {
1119 +@@ -1875,7 +1901,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1120 + unsigned long next;
1121 + int err;
1122 +
1123 +- pud = pud_alloc(mm, pgd, addr);
1124 ++ pud = (mm == &init_mm) ?
1125 ++ pud_alloc_kernel(mm, pgd, addr) :
1126 ++ pud_alloc(mm, pgd, addr);
1127 + if (!pud)
1128 + return -ENOMEM;
1129 + do {
1130 +@@ -1977,6 +2005,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
1131 copy_user_highpage(dst, src, va, vma);
1132 }
1133
1134 @@ -97142,7 +97612,7 @@ index 6c836d3..693224d 100644
1135 /*
1136 * This routine handles present pages, when users try to write
1137 * to a shared page. It is done by copying the page to a new address
1138 -@@ -2156,6 +2360,12 @@ gotten:
1139 +@@ -2156,6 +2364,12 @@ gotten:
1140 */
1141 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1142 if (likely(pte_same(*page_table, orig_pte))) {
1143 @@ -97155,7 +97625,7 @@ index 6c836d3..693224d 100644
1144 if (old_page) {
1145 if (!PageAnon(old_page)) {
1146 dec_mm_counter(mm, file_rss);
1147 -@@ -2207,6 +2417,10 @@ gotten:
1148 +@@ -2207,6 +2421,10 @@ gotten:
1149 page_remove_rmap(old_page);
1150 }
1151
1152 @@ -97166,7 +97636,7 @@ index 6c836d3..693224d 100644
1153 /* Free the old page.. */
1154 new_page = old_page;
1155 ret |= VM_FAULT_WRITE;
1156 -@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1157 +@@ -2606,6 +2824,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1158 swap_free(entry);
1159 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
1160 try_to_free_swap(page);
1161 @@ -97178,7 +97648,7 @@ index 6c836d3..693224d 100644
1162 unlock_page(page);
1163
1164 if (flags & FAULT_FLAG_WRITE) {
1165 -@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1166 +@@ -2617,6 +2840,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1167
1168 /* No need to invalidate - it was non-present before */
1169 update_mmu_cache(vma, address, pte);
1170 @@ -97190,7 +97660,7 @@ index 6c836d3..693224d 100644
1171 unlock:
1172 pte_unmap_unlock(page_table, ptl);
1173 out:
1174 -@@ -2632,40 +2856,6 @@ out_release:
1175 +@@ -2632,40 +2860,6 @@ out_release:
1176 }
1177
1178 /*
1179 @@ -97231,7 +97701,7 @@ index 6c836d3..693224d 100644
1180 * We enter with non-exclusive mmap_sem (to exclude vma changes,
1181 * but allow concurrent faults), and pte mapped but not yet locked.
1182 * We return with mmap_sem still held, but pte unmapped and unlocked.
1183 -@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1184 +@@ -2674,27 +2868,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1185 unsigned long address, pte_t *page_table, pmd_t *pmd,
1186 unsigned int flags)
1187 {
1188 @@ -97264,7 +97734,7 @@ index 6c836d3..693224d 100644
1189 if (unlikely(anon_vma_prepare(vma)))
1190 goto oom;
1191 page = alloc_zeroed_user_highpage_movable(vma, address);
1192 -@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1193 +@@ -2713,6 +2903,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1194 if (!pte_none(*page_table))
1195 goto release;
1196
1197 @@ -97276,7 +97746,7 @@ index 6c836d3..693224d 100644
1198 inc_mm_counter(mm, anon_rss);
1199 page_add_new_anon_rmap(page, vma, address);
1200 setpte:
1201 -@@ -2720,6 +2911,12 @@ setpte:
1202 +@@ -2720,6 +2915,12 @@ setpte:
1203
1204 /* No need to invalidate - it was non-present before */
1205 update_mmu_cache(vma, address, entry);
1206 @@ -97289,7 +97759,7 @@ index 6c836d3..693224d 100644
1207 unlock:
1208 pte_unmap_unlock(page_table, ptl);
1209 return 0;
1210 -@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1211 +@@ -2862,6 +3063,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1212 */
1213 /* Only go through if we didn't race with anybody else... */
1214 if (likely(pte_same(*page_table, orig_pte))) {
1215 @@ -97302,7 +97772,7 @@ index 6c836d3..693224d 100644
1216 flush_icache_page(vma, page);
1217 entry = mk_pte(page, vma->vm_page_prot);
1218 if (flags & FAULT_FLAG_WRITE)
1219 -@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1220 +@@ -2881,6 +3088,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1221
1222 /* no need to invalidate: a not-present page won't be cached */
1223 update_mmu_cache(vma, address, entry);
1224 @@ -97317,7 +97787,7 @@ index 6c836d3..693224d 100644
1225 } else {
1226 if (charged)
1227 mem_cgroup_uncharge_page(page);
1228 -@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
1229 +@@ -3028,6 +3243,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
1230 if (flags & FAULT_FLAG_WRITE)
1231 flush_tlb_page(vma, address);
1232 }
1233 @@ -97330,7 +97800,7 @@ index 6c836d3..693224d 100644
1234 unlock:
1235 pte_unmap_unlock(pte, ptl);
1236 return 0;
1237 -@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1238 +@@ -3044,6 +3265,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1239 pmd_t *pmd;
1240 pte_t *pte;
1241
1242 @@ -97341,7 +97811,7 @@ index 6c836d3..693224d 100644
1243 __set_current_state(TASK_RUNNING);
1244
1245 count_vm_event(PGFAULT);
1246 -@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1247 +@@ -3051,6 +3276,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1248 if (unlikely(is_vm_hugetlb_page(vma)))
1249 return hugetlb_fault(mm, vma, address, flags);
1250
1251 @@ -97376,7 +97846,7 @@ index 6c836d3..693224d 100644
1252 pgd = pgd_offset(mm, address);
1253 pud = pud_alloc(mm, pgd, address);
1254 if (!pud)
1255 -@@ -3086,6 +3335,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1256 +@@ -3086,6 +3339,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1257 spin_unlock(&mm->page_table_lock);
1258 return 0;
1259 }
1260 @@ -97400,7 +97870,7 @@ index 6c836d3..693224d 100644
1261 #endif /* __PAGETABLE_PUD_FOLDED */
1262
1263 #ifndef __PAGETABLE_PMD_FOLDED
1264 -@@ -3116,6 +3382,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1265 +@@ -3116,6 +3386,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1266 spin_unlock(&mm->page_table_lock);
1267 return 0;
1268 }
1269 @@ -97431,7 +97901,7 @@ index 6c836d3..693224d 100644
1270 #endif /* __PAGETABLE_PMD_FOLDED */
1271
1272 int make_pages_present(unsigned long addr, unsigned long end)
1273 -@@ -3148,7 +3438,7 @@ static int __init gate_vma_init(void)
1274 +@@ -3148,7 +3442,7 @@ static int __init gate_vma_init(void)
1275 gate_vma.vm_start = FIXADDR_USER_START;
1276 gate_vma.vm_end = FIXADDR_USER_END;
1277 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
1278
1279 diff --git a/3.2.16/0000_README b/3.2.17/0000_README
1280 similarity index 93%
1281 rename from 3.2.16/0000_README
1282 rename to 3.2.17/0000_README
1283 index b39a326..d74a42e 100644
1284 --- a/3.2.16/0000_README
1285 +++ b/3.2.17/0000_README
1286 @@ -2,7 +2,11 @@ README
1287 -----------------------------------------------------------------------------
1288 Individual Patch Descriptions:
1289 -----------------------------------------------------------------------------
1290 -Patch: 4420_grsecurity-2.9-3.2.16-201205071838.patch
1291 +Patch: 1016_linux-3.2.17.patch
1292 +From: http://www.kernel.org
1293 +Desc: Linux 3.2.17
1294 +
1295 +Patch: 4420_grsecurity-2.9-3.2.17-201205131657.patch
1296 From: http://www.grsecurity.net
1297 Desc: hardened-sources base patch from upstream grsecurity
1298
1299
1300 diff --git a/3.2.17/1016_linux-3.2.17.patch b/3.2.17/1016_linux-3.2.17.patch
1301 new file mode 100644
1302 index 0000000..5aeed10
1303 --- /dev/null
1304 +++ b/3.2.17/1016_linux-3.2.17.patch
1305 @@ -0,0 +1,5695 @@
1306 +diff --git a/Makefile b/Makefile
1307 +index 3da29cb..4c4efa3 100644
1308 +--- a/Makefile
1309 ++++ b/Makefile
1310 +@@ -1,6 +1,6 @@
1311 + VERSION = 3
1312 + PATCHLEVEL = 2
1313 +-SUBLEVEL = 16
1314 ++SUBLEVEL = 17
1315 + EXTRAVERSION =
1316 + NAME = Saber-toothed Squirrel
1317 +
1318 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
1319 +index ab3740e..ef642a0 100644
1320 +--- a/arch/arm/Kconfig
1321 ++++ b/arch/arm/Kconfig
1322 +@@ -1155,6 +1155,15 @@ if !MMU
1323 + source "arch/arm/Kconfig-nommu"
1324 + endif
1325 +
1326 ++config ARM_ERRATA_326103
1327 ++ bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
1328 ++ depends on CPU_V6
1329 ++ help
1330 ++ Executing a SWP instruction to read-only memory does not set bit 11
1331 ++ of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
1332 ++ treat the access as a read, preventing a COW from occurring and
1333 ++ causing the faulting task to livelock.
1334 ++
1335 + config ARM_ERRATA_411920
1336 + bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
1337 + depends on CPU_V6 || CPU_V6K
1338 +diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
1339 +index 60843eb..73409e6 100644
1340 +--- a/arch/arm/include/asm/tls.h
1341 ++++ b/arch/arm/include/asm/tls.h
1342 +@@ -7,6 +7,8 @@
1343 +
1344 + .macro set_tls_v6k, tp, tmp1, tmp2
1345 + mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
1346 ++ mov \tmp1, #0
1347 ++ mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
1348 + .endm
1349 +
1350 + .macro set_tls_v6, tp, tmp1, tmp2
1351 +@@ -15,6 +17,8 @@
1352 + mov \tmp2, #0xffff0fff
1353 + tst \tmp1, #HWCAP_TLS @ hardware TLS available?
1354 + mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
1355 ++ movne \tmp1, #0
1356 ++ mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
1357 + streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
1358 + .endm
1359 +
1360 +diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
1361 +index 3efd82c..87c8be5 100644
1362 +--- a/arch/arm/kernel/irq.c
1363 ++++ b/arch/arm/kernel/irq.c
1364 +@@ -156,10 +156,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
1365 + }
1366 +
1367 + c = irq_data_get_irq_chip(d);
1368 +- if (c->irq_set_affinity)
1369 +- c->irq_set_affinity(d, affinity, true);
1370 +- else
1371 ++ if (!c->irq_set_affinity)
1372 + pr_debug("IRQ%u: unable to set affinity\n", d->irq);
1373 ++ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
1374 ++ cpumask_copy(d->affinity, affinity);
1375 +
1376 + return ret;
1377 + }
1378 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
1379 +index ef5640b..e10e59a 100644
1380 +--- a/arch/arm/kernel/smp.c
1381 ++++ b/arch/arm/kernel/smp.c
1382 +@@ -297,8 +297,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
1383 + struct mm_struct *mm = &init_mm;
1384 + unsigned int cpu = smp_processor_id();
1385 +
1386 +- printk("CPU%u: Booted secondary processor\n", cpu);
1387 +-
1388 + /*
1389 + * All kernel threads share the same mm context; grab a
1390 + * reference and switch to it.
1391 +@@ -310,6 +308,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
1392 + enter_lazy_tlb(mm, current);
1393 + local_flush_tlb_all();
1394 +
1395 ++ printk("CPU%u: Booted secondary processor\n", cpu);
1396 ++
1397 + cpu_init();
1398 + preempt_disable();
1399 + trace_hardirqs_off();
1400 +diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
1401 +index d2b1779..76cbb05 100644
1402 +--- a/arch/arm/kernel/sys_arm.c
1403 ++++ b/arch/arm/kernel/sys_arm.c
1404 +@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
1405 + "Ir" (THREAD_START_SP - sizeof(regs)),
1406 + "r" (&regs),
1407 + "Ir" (sizeof(regs))
1408 +- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
1409 ++ : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
1410 +
1411 + out:
1412 + return ret;
1413 +diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
1414 +index 6e90665..fb202af 100644
1415 +--- a/arch/arm/mach-omap1/timer.c
1416 ++++ b/arch/arm/mach-omap1/timer.c
1417 +@@ -47,9 +47,9 @@ static int omap1_dm_timer_set_src(struct platform_device *pdev,
1418 + int n = (pdev->id - 1) << 1;
1419 + u32 l;
1420 +
1421 +- l = __raw_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
1422 ++ l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
1423 + l |= source << n;
1424 +- __raw_writel(l, MOD_CONF_CTRL_1);
1425 ++ omap_writel(l, MOD_CONF_CTRL_1);
1426 +
1427 + return 0;
1428 + }
1429 +diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
1430 +index ff1f7cc..8074199 100644
1431 +--- a/arch/arm/mm/abort-ev6.S
1432 ++++ b/arch/arm/mm/abort-ev6.S
1433 +@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
1434 + mrc p15, 0, r1, c5, c0, 0 @ get FSR
1435 + mrc p15, 0, r0, c6, c0, 0 @ get FAR
1436 + /*
1437 +- * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
1438 +- * The test below covers all the write situations, including Java bytecodes
1439 ++ * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
1440 + */
1441 +- bic r1, r1, #1 << 11 @ clear bit 11 of FSR
1442 ++#ifdef CONFIG_ARM_ERRATA_326103
1443 ++ ldr ip, =0x4107b36
1444 ++ mrc p15, 0, r3, c0, c0, 0 @ get processor id
1445 ++ teq ip, r3, lsr #4 @ r0 ARM1136?
1446 ++ bne do_DataAbort
1447 + tst r5, #PSR_J_BIT @ Java?
1448 ++ tsteq r5, #PSR_T_BIT @ Thumb?
1449 + bne do_DataAbort
1450 +- do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
1451 +- ldreq r3, [r4] @ read aborted ARM instruction
1452 ++ bic r1, r1, #1 << 11 @ clear bit 11 of FSR
1453 ++ ldr r3, [r4] @ read aborted ARM instruction
1454 + #ifdef CONFIG_CPU_ENDIAN_BE8
1455 +- reveq r3, r3
1456 ++ rev r3, r3
1457 + #endif
1458 + do_ldrd_abort tmp=ip, insn=r3
1459 + tst r3, #1 << 20 @ L = 0 -> write
1460 + orreq r1, r1, #1 << 11 @ yes.
1461 ++#endif
1462 + b do_DataAbort
1463 +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
1464 +index b1e192b..db7bcc0 100644
1465 +--- a/arch/arm/mm/cache-l2x0.c
1466 ++++ b/arch/arm/mm/cache-l2x0.c
1467 +@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
1468 + static DEFINE_RAW_SPINLOCK(l2x0_lock);
1469 + static uint32_t l2x0_way_mask; /* Bitmask of active ways */
1470 + static uint32_t l2x0_size;
1471 ++static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
1472 +
1473 + struct l2x0_regs l2x0_saved_regs;
1474 +
1475 +@@ -61,12 +62,7 @@ static inline void cache_sync(void)
1476 + {
1477 + void __iomem *base = l2x0_base;
1478 +
1479 +-#ifdef CONFIG_PL310_ERRATA_753970
1480 +- /* write to an unmmapped register */
1481 +- writel_relaxed(0, base + L2X0_DUMMY_REG);
1482 +-#else
1483 +- writel_relaxed(0, base + L2X0_CACHE_SYNC);
1484 +-#endif
1485 ++ writel_relaxed(0, base + sync_reg_offset);
1486 + cache_wait(base + L2X0_CACHE_SYNC, 1);
1487 + }
1488 +
1489 +@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
1490 + }
1491 +
1492 + #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
1493 ++static inline void debug_writel(unsigned long val)
1494 ++{
1495 ++ if (outer_cache.set_debug)
1496 ++ outer_cache.set_debug(val);
1497 ++}
1498 +
1499 +-#define debug_writel(val) outer_cache.set_debug(val)
1500 +-
1501 +-static void l2x0_set_debug(unsigned long val)
1502 ++static void pl310_set_debug(unsigned long val)
1503 + {
1504 + writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
1505 + }
1506 +@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
1507 + {
1508 + }
1509 +
1510 +-#define l2x0_set_debug NULL
1511 ++#define pl310_set_debug NULL
1512 + #endif
1513 +
1514 + #ifdef CONFIG_PL310_ERRATA_588369
1515 +@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
1516 + else
1517 + ways = 8;
1518 + type = "L310";
1519 ++#ifdef CONFIG_PL310_ERRATA_753970
1520 ++ /* Unmapped register. */
1521 ++ sync_reg_offset = L2X0_DUMMY_REG;
1522 ++#endif
1523 ++ outer_cache.set_debug = pl310_set_debug;
1524 + break;
1525 + case L2X0_CACHE_ID_PART_L210:
1526 + ways = (aux >> 13) & 0xf;
1527 +@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
1528 + outer_cache.flush_all = l2x0_flush_all;
1529 + outer_cache.inv_all = l2x0_inv_all;
1530 + outer_cache.disable = l2x0_disable;
1531 +- outer_cache.set_debug = l2x0_set_debug;
1532 +
1533 + printk(KERN_INFO "%s cache controller enabled\n", type);
1534 + printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
1535 +diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
1536 +index 89bbf4e..e77f4e4 100644
1537 +--- a/arch/x86/boot/compressed/relocs.c
1538 ++++ b/arch/x86/boot/compressed/relocs.c
1539 +@@ -402,13 +402,11 @@ static void print_absolute_symbols(void)
1540 + for (i = 0; i < ehdr.e_shnum; i++) {
1541 + struct section *sec = &secs[i];
1542 + char *sym_strtab;
1543 +- Elf32_Sym *sh_symtab;
1544 + int j;
1545 +
1546 + if (sec->shdr.sh_type != SHT_SYMTAB) {
1547 + continue;
1548 + }
1549 +- sh_symtab = sec->symtab;
1550 + sym_strtab = sec->link->strtab;
1551 + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
1552 + Elf32_Sym *sym;
1553 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1554 +index f98d84c..c4e3581 100644
1555 +--- a/arch/x86/kernel/apic/apic.c
1556 ++++ b/arch/x86/kernel/apic/apic.c
1557 +@@ -1577,9 +1577,11 @@ static int __init apic_verify(void)
1558 + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1559 +
1560 + /* The BIOS may have set up the APIC at some other address */
1561 +- rdmsr(MSR_IA32_APICBASE, l, h);
1562 +- if (l & MSR_IA32_APICBASE_ENABLE)
1563 +- mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1564 ++ if (boot_cpu_data.x86 >= 6) {
1565 ++ rdmsr(MSR_IA32_APICBASE, l, h);
1566 ++ if (l & MSR_IA32_APICBASE_ENABLE)
1567 ++ mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1568 ++ }
1569 +
1570 + pr_info("Found and enabled local APIC!\n");
1571 + return 0;
1572 +@@ -1597,13 +1599,15 @@ int __init apic_force_enable(unsigned long addr)
1573 + * MSR. This can only be done in software for Intel P6 or later
1574 + * and AMD K7 (Model > 1) or later.
1575 + */
1576 +- rdmsr(MSR_IA32_APICBASE, l, h);
1577 +- if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1578 +- pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1579 +- l &= ~MSR_IA32_APICBASE_BASE;
1580 +- l |= MSR_IA32_APICBASE_ENABLE | addr;
1581 +- wrmsr(MSR_IA32_APICBASE, l, h);
1582 +- enabled_via_apicbase = 1;
1583 ++ if (boot_cpu_data.x86 >= 6) {
1584 ++ rdmsr(MSR_IA32_APICBASE, l, h);
1585 ++ if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1586 ++ pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1587 ++ l &= ~MSR_IA32_APICBASE_BASE;
1588 ++ l |= MSR_IA32_APICBASE_ENABLE | addr;
1589 ++ wrmsr(MSR_IA32_APICBASE, l, h);
1590 ++ enabled_via_apicbase = 1;
1591 ++ }
1592 + }
1593 + return apic_verify();
1594 + }
1595 +@@ -2149,10 +2153,12 @@ static void lapic_resume(void)
1596 + * FIXME! This will be wrong if we ever support suspend on
1597 + * SMP! We'll need to do this as part of the CPU restore!
1598 + */
1599 +- rdmsr(MSR_IA32_APICBASE, l, h);
1600 +- l &= ~MSR_IA32_APICBASE_BASE;
1601 +- l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1602 +- wrmsr(MSR_IA32_APICBASE, l, h);
1603 ++ if (boot_cpu_data.x86 >= 6) {
1604 ++ rdmsr(MSR_IA32_APICBASE, l, h);
1605 ++ l &= ~MSR_IA32_APICBASE_BASE;
1606 ++ l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1607 ++ wrmsr(MSR_IA32_APICBASE, l, h);
1608 ++ }
1609 + }
1610 +
1611 + maxlvt = lapic_get_maxlvt();
1612 +diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
1613 +index 9d46f5e..563a09d 100644
1614 +--- a/arch/x86/kernel/microcode_core.c
1615 ++++ b/arch/x86/kernel/microcode_core.c
1616 +@@ -418,10 +418,8 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
1617 + if (err)
1618 + return err;
1619 +
1620 +- if (microcode_init_cpu(cpu) == UCODE_ERROR) {
1621 +- sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
1622 ++ if (microcode_init_cpu(cpu) == UCODE_ERROR)
1623 + return -EINVAL;
1624 +- }
1625 +
1626 + return err;
1627 + }
1628 +diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
1629 +index 71f4727..5a98aa2 100644
1630 +--- a/arch/x86/kernel/setup_percpu.c
1631 ++++ b/arch/x86/kernel/setup_percpu.c
1632 +@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
1633 + #endif
1634 + rc = -EINVAL;
1635 + if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1636 +- const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
1637 + const size_t dyn_size = PERCPU_MODULE_RESERVE +
1638 + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
1639 ++ size_t atom_size;
1640 +
1641 ++ /*
1642 ++ * On 64bit, use PMD_SIZE for atom_size so that embedded
1643 ++ * percpu areas are aligned to PMD. This, in the future,
1644 ++ * can also allow using PMD mappings in vmalloc area. Use
1645 ++ * PAGE_SIZE on 32bit as vmalloc space is highly contended
1646 ++ * and large vmalloc area allocs can easily fail.
1647 ++ */
1648 ++#ifdef CONFIG_X86_64
1649 ++ atom_size = PMD_SIZE;
1650 ++#else
1651 ++ atom_size = PAGE_SIZE;
1652 ++#endif
1653 + rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
1654 + dyn_size, atom_size,
1655 + pcpu_cpu_distance,
1656 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1657 +index 1f92865..e7c920b 100644
1658 +--- a/arch/x86/xen/enlighten.c
1659 ++++ b/arch/x86/xen/enlighten.c
1660 +@@ -62,6 +62,7 @@
1661 + #include <asm/reboot.h>
1662 + #include <asm/stackprotector.h>
1663 + #include <asm/hypervisor.h>
1664 ++#include <asm/pci_x86.h>
1665 +
1666 + #include "xen-ops.h"
1667 + #include "mmu.h"
1668 +@@ -1278,8 +1279,10 @@ asmlinkage void __init xen_start_kernel(void)
1669 + /* Make sure ACS will be enabled */
1670 + pci_request_acs();
1671 + }
1672 +-
1673 +-
1674 ++#ifdef CONFIG_PCI
1675 ++ /* PCI BIOS service won't work from a PV guest. */
1676 ++ pci_probe &= ~PCI_PROBE_BIOS;
1677 ++#endif
1678 + xen_raw_console_write("about to get started...\n");
1679 +
1680 + xen_setup_runstate_info(0);
1681 +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
1682 +index 87f6673..ec3d603 100644
1683 +--- a/arch/x86/xen/mmu.c
1684 ++++ b/arch/x86/xen/mmu.c
1685 +@@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
1686 + {
1687 + if (val & _PAGE_PRESENT) {
1688 + unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
1689 ++ unsigned long pfn = mfn_to_pfn(mfn);
1690 ++
1691 + pteval_t flags = val & PTE_FLAGS_MASK;
1692 +- val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
1693 ++ if (unlikely(pfn == ~0))
1694 ++ val = flags & ~_PAGE_PRESENT;
1695 ++ else
1696 ++ val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
1697 + }
1698 +
1699 + return val;
1700 +diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
1701 +index 041d4fe..9a23fff 100644
1702 +--- a/arch/x86/xen/smp.c
1703 ++++ b/arch/x86/xen/smp.c
1704 +@@ -172,6 +172,7 @@ static void __init xen_fill_possible_map(void)
1705 + static void __init xen_filter_cpu_maps(void)
1706 + {
1707 + int i, rc;
1708 ++ unsigned int subtract = 0;
1709 +
1710 + if (!xen_initial_domain())
1711 + return;
1712 +@@ -186,8 +187,22 @@ static void __init xen_filter_cpu_maps(void)
1713 + } else {
1714 + set_cpu_possible(i, false);
1715 + set_cpu_present(i, false);
1716 ++ subtract++;
1717 + }
1718 + }
1719 ++#ifdef CONFIG_HOTPLUG_CPU
1720 ++ /* This is akin to using 'nr_cpus' on the Linux command line.
1721 ++ * Which is OK as when we use 'dom0_max_vcpus=X' we can only
1722 ++ * have up to X, while nr_cpu_ids is greater than X. This
1723 ++ * normally is not a problem, except when CPU hotplugging
1724 ++ * is involved and then there might be more than X CPUs
1725 ++ * in the guest - which will not work as there is no
1726 ++ * hypercall to expand the max number of VCPUs an already
1727 ++ * running guest has. So cap it up to X. */
1728 ++ if (subtract)
1729 ++ nr_cpu_ids = nr_cpu_ids - subtract;
1730 ++#endif
1731 ++
1732 + }
1733 +
1734 + static void __init xen_smp_prepare_boot_cpu(void)
1735 +diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
1736 +index 79d7362..3e45aa0 100644
1737 +--- a/arch/x86/xen/xen-asm.S
1738 ++++ b/arch/x86/xen/xen-asm.S
1739 +@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
1740 +
1741 + /* check for unmasked and pending */
1742 + cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
1743 +- jz 1f
1744 ++ jnz 1f
1745 + 2: call check_events
1746 + 1:
1747 + ENDPATCH(xen_restore_fl_direct)
1748 +diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
1749 +index 107f6f7..dd30f40 100644
1750 +--- a/crypto/sha512_generic.c
1751 ++++ b/crypto/sha512_generic.c
1752 +@@ -174,7 +174,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
1753 + index = sctx->count[0] & 0x7f;
1754 +
1755 + /* Update number of bytes */
1756 +- if (!(sctx->count[0] += len))
1757 ++ if ((sctx->count[0] += len) < len)
1758 + sctx->count[1]++;
1759 +
1760 + part_len = 128 - index;
1761 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1762 +index a9b2820..58db834 100644
1763 +--- a/drivers/ata/libata-eh.c
1764 ++++ b/drivers/ata/libata-eh.c
1765 +@@ -3500,7 +3500,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
1766 + u64 now = get_jiffies_64();
1767 + int *trials = void_arg;
1768 +
1769 +- if (ent->timestamp < now - min(now, interval))
1770 ++ if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
1771 ++ (ent->timestamp < now - min(now, interval)))
1772 + return -1;
1773 +
1774 + (*trials)++;
1775 +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
1776 +index 003cd8d..99fefbd 100644
1777 +--- a/drivers/bluetooth/ath3k.c
1778 ++++ b/drivers/bluetooth/ath3k.c
1779 +@@ -73,6 +73,7 @@ static struct usb_device_id ath3k_table[] = {
1780 + { USB_DEVICE(0x0CF3, 0x3004) },
1781 + { USB_DEVICE(0x0CF3, 0x311D) },
1782 + { USB_DEVICE(0x13d3, 0x3375) },
1783 ++ { USB_DEVICE(0x04CA, 0x3005) },
1784 +
1785 + /* Atheros AR5BBU12 with sflash firmware */
1786 + { USB_DEVICE(0x0489, 0xE02C) },
1787 +@@ -91,6 +92,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
1788 + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
1789 + { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
1790 + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
1791 ++ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
1792 +
1793 + { } /* Terminating entry */
1794 + };
1795 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1796 +index db44ad5..e56da6a 100644
1797 +--- a/drivers/bluetooth/btusb.c
1798 ++++ b/drivers/bluetooth/btusb.c
1799 +@@ -129,6 +129,7 @@ static struct usb_device_id blacklist_table[] = {
1800 + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
1801 + { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
1802 + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
1803 ++ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
1804 +
1805 + /* Atheros AR5BBU12 with sflash firmware */
1806 + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
1807 +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
1808 +index a60adbf..79dcf6e 100644
1809 +--- a/drivers/dma/at_hdmac.c
1810 ++++ b/drivers/dma/at_hdmac.c
1811 +@@ -239,10 +239,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
1812 +
1813 + vdbg_dump_regs(atchan);
1814 +
1815 +- /* clear any pending interrupt */
1816 +- while (dma_readl(atdma, EBCISR))
1817 +- cpu_relax();
1818 +-
1819 + channel_writel(atchan, SADDR, 0);
1820 + channel_writel(atchan, DADDR, 0);
1821 + channel_writel(atchan, CTRLA, 0);
1822 +diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
1823 +index b0a8117..0535c21 100644
1824 +--- a/drivers/firmware/efivars.c
1825 ++++ b/drivers/firmware/efivars.c
1826 +@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
1827 + }
1828 + }
1829 +
1830 ++static bool
1831 ++validate_device_path(struct efi_variable *var, int match, u8 *buffer,
1832 ++ unsigned long len)
1833 ++{
1834 ++ struct efi_generic_dev_path *node;
1835 ++ int offset = 0;
1836 ++
1837 ++ node = (struct efi_generic_dev_path *)buffer;
1838 ++
1839 ++ if (len < sizeof(*node))
1840 ++ return false;
1841 ++
1842 ++ while (offset <= len - sizeof(*node) &&
1843 ++ node->length >= sizeof(*node) &&
1844 ++ node->length <= len - offset) {
1845 ++ offset += node->length;
1846 ++
1847 ++ if ((node->type == EFI_DEV_END_PATH ||
1848 ++ node->type == EFI_DEV_END_PATH2) &&
1849 ++ node->sub_type == EFI_DEV_END_ENTIRE)
1850 ++ return true;
1851 ++
1852 ++ node = (struct efi_generic_dev_path *)(buffer + offset);
1853 ++ }
1854 ++
1855 ++ /*
1856 ++ * If we're here then either node->length pointed past the end
1857 ++ * of the buffer or we reached the end of the buffer without
1858 ++ * finding a device path end node.
1859 ++ */
1860 ++ return false;
1861 ++}
1862 ++
1863 ++static bool
1864 ++validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
1865 ++ unsigned long len)
1866 ++{
1867 ++ /* An array of 16-bit integers */
1868 ++ if ((len % 2) != 0)
1869 ++ return false;
1870 ++
1871 ++ return true;
1872 ++}
1873 ++
1874 ++static bool
1875 ++validate_load_option(struct efi_variable *var, int match, u8 *buffer,
1876 ++ unsigned long len)
1877 ++{
1878 ++ u16 filepathlength;
1879 ++ int i, desclength = 0, namelen;
1880 ++
1881 ++ namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
1882 ++
1883 ++ /* Either "Boot" or "Driver" followed by four digits of hex */
1884 ++ for (i = match; i < match+4; i++) {
1885 ++ if (var->VariableName[i] > 127 ||
1886 ++ hex_to_bin(var->VariableName[i] & 0xff) < 0)
1887 ++ return true;
1888 ++ }
1889 ++
1890 ++ /* Reject it if there's 4 digits of hex and then further content */
1891 ++ if (namelen > match + 4)
1892 ++ return false;
1893 ++
1894 ++ /* A valid entry must be at least 8 bytes */
1895 ++ if (len < 8)
1896 ++ return false;
1897 ++
1898 ++ filepathlength = buffer[4] | buffer[5] << 8;
1899 ++
1900 ++ /*
1901 ++ * There's no stored length for the description, so it has to be
1902 ++ * found by hand
1903 ++ */
1904 ++ desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
1905 ++
1906 ++ /* Each boot entry must have a descriptor */
1907 ++ if (!desclength)
1908 ++ return false;
1909 ++
1910 ++ /*
1911 ++ * If the sum of the length of the description, the claimed filepath
1912 ++ * length and the original header are greater than the length of the
1913 ++ * variable, it's malformed
1914 ++ */
1915 ++ if ((desclength + filepathlength + 6) > len)
1916 ++ return false;
1917 ++
1918 ++ /*
1919 ++ * And, finally, check the filepath
1920 ++ */
1921 ++ return validate_device_path(var, match, buffer + desclength + 6,
1922 ++ filepathlength);
1923 ++}
1924 ++
1925 ++static bool
1926 ++validate_uint16(struct efi_variable *var, int match, u8 *buffer,
1927 ++ unsigned long len)
1928 ++{
1929 ++ /* A single 16-bit integer */
1930 ++ if (len != 2)
1931 ++ return false;
1932 ++
1933 ++ return true;
1934 ++}
1935 ++
1936 ++static bool
1937 ++validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
1938 ++ unsigned long len)
1939 ++{
1940 ++ int i;
1941 ++
1942 ++ for (i = 0; i < len; i++) {
1943 ++ if (buffer[i] > 127)
1944 ++ return false;
1945 ++
1946 ++ if (buffer[i] == 0)
1947 ++ return true;
1948 ++ }
1949 ++
1950 ++ return false;
1951 ++}
1952 ++
1953 ++struct variable_validate {
1954 ++ char *name;
1955 ++ bool (*validate)(struct efi_variable *var, int match, u8 *data,
1956 ++ unsigned long len);
1957 ++};
1958 ++
1959 ++static const struct variable_validate variable_validate[] = {
1960 ++ { "BootNext", validate_uint16 },
1961 ++ { "BootOrder", validate_boot_order },
1962 ++ { "DriverOrder", validate_boot_order },
1963 ++ { "Boot*", validate_load_option },
1964 ++ { "Driver*", validate_load_option },
1965 ++ { "ConIn", validate_device_path },
1966 ++ { "ConInDev", validate_device_path },
1967 ++ { "ConOut", validate_device_path },
1968 ++ { "ConOutDev", validate_device_path },
1969 ++ { "ErrOut", validate_device_path },
1970 ++ { "ErrOutDev", validate_device_path },
1971 ++ { "Timeout", validate_uint16 },
1972 ++ { "Lang", validate_ascii_string },
1973 ++ { "PlatformLang", validate_ascii_string },
1974 ++ { "", NULL },
1975 ++};
1976 ++
1977 ++static bool
1978 ++validate_var(struct efi_variable *var, u8 *data, unsigned long len)
1979 ++{
1980 ++ int i;
1981 ++ u16 *unicode_name = var->VariableName;
1982 ++
1983 ++ for (i = 0; variable_validate[i].validate != NULL; i++) {
1984 ++ const char *name = variable_validate[i].name;
1985 ++ int match;
1986 ++
1987 ++ for (match = 0; ; match++) {
1988 ++ char c = name[match];
1989 ++ u16 u = unicode_name[match];
1990 ++
1991 ++ /* All special variables are plain ascii */
1992 ++ if (u > 127)
1993 ++ return true;
1994 ++
1995 ++ /* Wildcard in the matching name means we've matched */
1996 ++ if (c == '*')
1997 ++ return variable_validate[i].validate(var,
1998 ++ match, data, len);
1999 ++
2000 ++ /* Case sensitive match */
2001 ++ if (c != u)
2002 ++ break;
2003 ++
2004 ++ /* Reached the end of the string while matching */
2005 ++ if (!c)
2006 ++ return variable_validate[i].validate(var,
2007 ++ match, data, len);
2008 ++ }
2009 ++ }
2010 ++
2011 ++ return true;
2012 ++}
2013 ++
2014 + static efi_status_t
2015 + get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
2016 + {
2017 +@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
2018 + return -EINVAL;
2019 + }
2020 +
2021 ++ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
2022 ++ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
2023 ++ printk(KERN_ERR "efivars: Malformed variable content\n");
2024 ++ return -EINVAL;
2025 ++ }
2026 ++
2027 + spin_lock(&efivars->lock);
2028 + status = efivars->ops->set_variable(new_var->VariableName,
2029 + &new_var->VendorGuid,
2030 +@@ -624,6 +814,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
2031 + if (!capable(CAP_SYS_ADMIN))
2032 + return -EACCES;
2033 +
2034 ++ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
2035 ++ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
2036 ++ printk(KERN_ERR "efivars: Malformed variable content\n");
2037 ++ return -EINVAL;
2038 ++ }
2039 ++
2040 + spin_lock(&efivars->lock);
2041 +
2042 + /*
2043 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2044 +index b9da890..a6c2f7a 100644
2045 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2046 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2047 +@@ -984,6 +984,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
2048 + struct intel_ring_buffer *ring;
2049 + u32 exec_start, exec_len;
2050 + u32 seqno;
2051 ++ u32 mask;
2052 + int ret, mode, i;
2053 +
2054 + if (!i915_gem_check_execbuffer(args)) {
2055 +@@ -1021,6 +1022,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
2056 + }
2057 +
2058 + mode = args->flags & I915_EXEC_CONSTANTS_MASK;
2059 ++ mask = I915_EXEC_CONSTANTS_MASK;
2060 + switch (mode) {
2061 + case I915_EXEC_CONSTANTS_REL_GENERAL:
2062 + case I915_EXEC_CONSTANTS_ABSOLUTE:
2063 +@@ -1034,18 +1036,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
2064 + mode == I915_EXEC_CONSTANTS_REL_SURFACE)
2065 + return -EINVAL;
2066 +
2067 +- ret = intel_ring_begin(ring, 4);
2068 +- if (ret)
2069 +- return ret;
2070 +-
2071 +- intel_ring_emit(ring, MI_NOOP);
2072 +- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
2073 +- intel_ring_emit(ring, INSTPM);
2074 +- intel_ring_emit(ring,
2075 +- I915_EXEC_CONSTANTS_MASK << 16 | mode);
2076 +- intel_ring_advance(ring);
2077 +-
2078 +- dev_priv->relative_constants_mode = mode;
2079 ++ /* The HW changed the meaning on this bit on gen6 */
2080 ++ if (INTEL_INFO(dev)->gen >= 6)
2081 ++ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
2082 + }
2083 + break;
2084 + default:
2085 +@@ -1064,6 +1057,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
2086 + return -EINVAL;
2087 + }
2088 +
2089 ++ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
2090 ++ DRM_DEBUG("execbuf with %u cliprects\n",
2091 ++ args->num_cliprects);
2092 ++ return -EINVAL;
2093 ++ }
2094 + cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
2095 + GFP_KERNEL);
2096 + if (cliprects == NULL) {
2097 +@@ -1176,6 +1174,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
2098 + }
2099 + }
2100 +
2101 ++ if (ring == &dev_priv->ring[RCS] &&
2102 ++ mode != dev_priv->relative_constants_mode) {
2103 ++ ret = intel_ring_begin(ring, 4);
2104 ++ if (ret)
2105 ++ goto err;
2106 ++
2107 ++ intel_ring_emit(ring, MI_NOOP);
2108 ++ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
2109 ++ intel_ring_emit(ring, INSTPM);
2110 ++ intel_ring_emit(ring, mask << 16 | mode);
2111 ++ intel_ring_advance(ring);
2112 ++
2113 ++ dev_priv->relative_constants_mode = mode;
2114 ++ }
2115 ++
2116 + trace_i915_gem_ring_dispatch(ring, seqno);
2117 +
2118 + exec_start = batch_obj->gtt_offset + args->batch_start_offset;
2119 +@@ -1314,7 +1327,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
2120 + struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2121 + int ret;
2122 +
2123 +- if (args->buffer_count < 1) {
2124 ++ if (args->buffer_count < 1 ||
2125 ++ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
2126 + DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
2127 + return -EINVAL;
2128 + }
2129 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2130 +index 2f99fd4..cbe5a88 100644
2131 +--- a/drivers/gpu/drm/i915/i915_reg.h
2132 ++++ b/drivers/gpu/drm/i915/i915_reg.h
2133 +@@ -442,6 +442,7 @@
2134 + #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
2135 + will not assert AGPBUSY# and will only
2136 + be delivered when out of C3. */
2137 ++#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
2138 + #define ACTHD 0x020c8
2139 + #define FW_BLC 0x020d8
2140 + #define FW_BLC2 0x020dc
2141 +@@ -522,6 +523,7 @@
2142 + #define CM0_MASK_SHIFT 16
2143 + #define CM0_IZ_OPT_DISABLE (1<<6)
2144 + #define CM0_ZR_OPT_DISABLE (1<<5)
2145 ++#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
2146 + #define CM0_DEPTH_EVICT_DISABLE (1<<4)
2147 + #define CM0_COLOR_EVICT_DISABLE (1<<3)
2148 + #define CM0_DEPTH_WRITE_DISABLE (1<<1)
2149 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
2150 +index 64541f7..9cd81ba 100644
2151 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
2152 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
2153 +@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
2154 +
2155 + val &= ~VIDEO_DIP_SELECT_MASK;
2156 +
2157 +- I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
2158 ++ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
2159 +
2160 + for (i = 0; i < len; i += 4) {
2161 + I915_WRITE(VIDEO_DIP_DATA, *data);
2162 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2163 +index 8673581..62f9ac5 100644
2164 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2165 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2166 +@@ -414,6 +414,22 @@ static int init_render_ring(struct intel_ring_buffer *ring)
2167 + return ret;
2168 + }
2169 +
2170 ++
2171 ++ if (IS_GEN6(dev)) {
2172 ++ /* From the Sandybridge PRM, volume 1 part 3, page 24:
2173 ++ * "If this bit is set, STCunit will have LRA as replacement
2174 ++ * policy. [...] This bit must be reset. LRA replacement
2175 ++ * policy is not supported."
2176 ++ */
2177 ++ I915_WRITE(CACHE_MODE_0,
2178 ++ CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
2179 ++ }
2180 ++
2181 ++ if (INTEL_INFO(dev)->gen >= 6) {
2182 ++ I915_WRITE(INSTPM,
2183 ++ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
2184 ++ }
2185 ++
2186 + return ret;
2187 + }
2188 +
2189 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
2190 +index e334ec3..8eddcca 100644
2191 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
2192 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
2193 +@@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
2194 + uint16_t width, height;
2195 + uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
2196 + uint16_t h_sync_offset, v_sync_offset;
2197 ++ int mode_clock;
2198 +
2199 + width = mode->crtc_hdisplay;
2200 + height = mode->crtc_vdisplay;
2201 +@@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
2202 + h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
2203 + v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
2204 +
2205 +- dtd->part1.clock = mode->clock / 10;
2206 ++ mode_clock = mode->clock;
2207 ++ mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
2208 ++ mode_clock /= 10;
2209 ++ dtd->part1.clock = mode_clock;
2210 ++
2211 + dtd->part1.h_active = width & 0xff;
2212 + dtd->part1.h_blank = h_blank_len & 0xff;
2213 + dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
2214 +@@ -997,7 +1002,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
2215 + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
2216 + u32 sdvox;
2217 + struct intel_sdvo_in_out_map in_out;
2218 +- struct intel_sdvo_dtd input_dtd;
2219 ++ struct intel_sdvo_dtd input_dtd, output_dtd;
2220 + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
2221 + int rate;
2222 +
2223 +@@ -1022,20 +1027,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
2224 + intel_sdvo->attached_output))
2225 + return;
2226 +
2227 +- /* We have tried to get input timing in mode_fixup, and filled into
2228 +- * adjusted_mode.
2229 +- */
2230 +- if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
2231 +- input_dtd = intel_sdvo->input_dtd;
2232 +- } else {
2233 +- /* Set the output timing to the screen */
2234 +- if (!intel_sdvo_set_target_output(intel_sdvo,
2235 +- intel_sdvo->attached_output))
2236 +- return;
2237 +-
2238 +- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
2239 +- (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
2240 +- }
2241 ++ /* lvds has a special fixed output timing. */
2242 ++ if (intel_sdvo->is_lvds)
2243 ++ intel_sdvo_get_dtd_from_mode(&output_dtd,
2244 ++ intel_sdvo->sdvo_lvds_fixed_mode);
2245 ++ else
2246 ++ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
2247 ++ (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
2248 +
2249 + /* Set the input timing to the screen. Assume always input 0. */
2250 + if (!intel_sdvo_set_target_input(intel_sdvo))
2251 +@@ -1053,6 +1051,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
2252 + !intel_sdvo_set_tv_format(intel_sdvo))
2253 + return;
2254 +
2255 ++ /* We have tried to get input timing in mode_fixup, and filled into
2256 ++ * adjusted_mode.
2257 ++ */
2258 ++ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
2259 + (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
2260 +
2261 + switch (pixel_multiplier) {
2262 +@@ -1219,8 +1221,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
2263 +
2264 + static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
2265 + {
2266 ++ struct drm_device *dev = intel_sdvo->base.base.dev;
2267 + u8 response[2];
2268 +
2269 ++ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
2270 ++ * on the line. */
2271 ++ if (IS_I945G(dev) || IS_I945GM(dev))
2272 ++ return false;
2273 ++
2274 + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
2275 + &response, 2) && response[0];
2276 + }
2277 +diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
2278 +index 525744d..3df56c7 100644
2279 +--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
2280 ++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
2281 +@@ -245,7 +245,7 @@ static bool nouveau_dsm_detect(void)
2282 + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
2283 + struct pci_dev *pdev = NULL;
2284 + int has_dsm = 0;
2285 +- int has_optimus;
2286 ++ int has_optimus = 0;
2287 + int vga_count = 0;
2288 + bool guid_valid;
2289 + int retval;
2290 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
2291 +index b30081f..757c549 100644
2292 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
2293 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
2294 +@@ -917,8 +917,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
2295 + break;
2296 + }
2297 +
2298 +- if (radeon_encoder->active_device &
2299 +- (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
2300 ++ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
2301 ++ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
2302 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2303 + struct drm_connector *connector =
2304 + radeon_get_connector_for_encoder(encoder);
2305 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
2306 +index 104b376..427468f 100644
2307 +--- a/drivers/hwmon/coretemp.c
2308 ++++ b/drivers/hwmon/coretemp.c
2309 +@@ -51,7 +51,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
2310 + MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
2311 +
2312 + #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
2313 +-#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
2314 ++#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
2315 + #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
2316 + #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
2317 + #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
2318 +@@ -705,6 +705,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
2319 +
2320 + indx = TO_ATTR_NO(cpu);
2321 +
2322 ++ /* The core id is too big, just return */
2323 ++ if (indx > MAX_CORE_DATA - 1)
2324 ++ return;
2325 ++
2326 + if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
2327 + coretemp_remove_core(pdata, &pdev->dev, indx);
2328 +
2329 +diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
2330 +index 930370d..9a4c3ab 100644
2331 +--- a/drivers/hwmon/fam15h_power.c
2332 ++++ b/drivers/hwmon/fam15h_power.c
2333 +@@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
2334 + return true;
2335 + }
2336 +
2337 ++/*
2338 ++ * Newer BKDG versions have an updated recommendation on how to properly
2339 ++ * initialize the running average range (was: 0xE, now: 0x9). This avoids
2340 ++ * counter saturations resulting in bogus power readings.
2341 ++ * We correct this value ourselves to cope with older BIOSes.
2342 ++ */
2343 ++static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
2344 ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
2345 ++ { 0 }
2346 ++};
2347 ++
2348 ++static void __devinit tweak_runavg_range(struct pci_dev *pdev)
2349 ++{
2350 ++ u32 val;
2351 ++
2352 ++ /*
2353 ++ * let this quirk apply only to the current version of the
2354 ++ * northbridge, since future versions may change the behavior
2355 ++ */
2356 ++ if (!pci_match_id(affected_device, pdev))
2357 ++ return;
2358 ++
2359 ++ pci_bus_read_config_dword(pdev->bus,
2360 ++ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
2361 ++ REG_TDP_RUNNING_AVERAGE, &val);
2362 ++ if ((val & 0xf) != 0xe)
2363 ++ return;
2364 ++
2365 ++ val &= ~0xf;
2366 ++ val |= 0x9;
2367 ++ pci_bus_write_config_dword(pdev->bus,
2368 ++ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
2369 ++ REG_TDP_RUNNING_AVERAGE, val);
2370 ++}
2371 ++
2372 + static void __devinit fam15h_power_init_data(struct pci_dev *f4,
2373 + struct fam15h_power_data *data)
2374 + {
2375 +@@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev,
2376 + struct device *dev;
2377 + int err;
2378 +
2379 ++ /*
2380 ++ * though we ignore every other northbridge, we still have to
2381 ++ * do the tweaking on _each_ node in MCM processors as the counters
2382 ++ * are working hand-in-hand
2383 ++ */
2384 ++ tweak_runavg_range(pdev);
2385 ++
2386 + if (!fam15h_power_is_internal_node0(pdev)) {
2387 + err = -ENODEV;
2388 + goto exit;
2389 +diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
2390 +index 04be9f8..eb8ad53 100644
2391 +--- a/drivers/i2c/busses/i2c-pnx.c
2392 ++++ b/drivers/i2c/busses/i2c-pnx.c
2393 +@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
2394 + {
2395 + struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
2396 +
2397 +- /* FIXME: shouldn't this be clk_disable? */
2398 +- clk_enable(alg_data->clk);
2399 ++ clk_disable(alg_data->clk);
2400 +
2401 + return 0;
2402 + }
2403 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2404 +index 6f37aa4..065ab4f 100644
2405 +--- a/drivers/md/md.c
2406 ++++ b/drivers/md/md.c
2407 +@@ -8100,7 +8100,8 @@ static int md_notify_reboot(struct notifier_block *this,
2408 +
2409 + for_each_mddev(mddev, tmp) {
2410 + if (mddev_trylock(mddev)) {
2411 +- __md_stop_writes(mddev);
2412 ++ if (mddev->pers)
2413 ++ __md_stop_writes(mddev);
2414 + mddev->safemode = 2;
2415 + mddev_unlock(mddev);
2416 + }
2417 +diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
2418 +index f6431ef..a1f5e3d 100644
2419 +--- a/drivers/media/dvb/frontends/drxk_hard.c
2420 ++++ b/drivers/media/dvb/frontends/drxk_hard.c
2421 +@@ -1523,8 +1523,10 @@ static int scu_command(struct drxk_state *state,
2422 + dprintk(1, "\n");
2423 +
2424 + if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
2425 +- ((resultLen > 0) && (result == NULL)))
2426 +- goto error;
2427 ++ ((resultLen > 0) && (result == NULL))) {
2428 ++ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2429 ++ return status;
2430 ++ }
2431 +
2432 + mutex_lock(&state->mutex);
2433 +
2434 +diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
2435 +index 13f54b5..a7e7d6f 100644
2436 +--- a/drivers/media/rc/winbond-cir.c
2437 ++++ b/drivers/media/rc/winbond-cir.c
2438 +@@ -1046,6 +1046,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
2439 + goto exit_unregister_led;
2440 + }
2441 +
2442 ++ data->dev->driver_type = RC_DRIVER_IR_RAW;
2443 + data->dev->driver_name = WBCIR_NAME;
2444 + data->dev->input_name = WBCIR_NAME;
2445 + data->dev->input_phys = "wbcir/cir0";
2446 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
2447 +index e15e47d..34416d4 100644
2448 +--- a/drivers/mmc/card/block.c
2449 ++++ b/drivers/mmc/card/block.c
2450 +@@ -799,7 +799,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
2451 + {
2452 + struct mmc_blk_data *md = mq->data;
2453 + struct mmc_card *card = md->queue.card;
2454 +- unsigned int from, nr, arg;
2455 ++ unsigned int from, nr, arg, trim_arg, erase_arg;
2456 + int err = 0, type = MMC_BLK_SECDISCARD;
2457 +
2458 + if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
2459 +@@ -807,20 +807,26 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
2460 + goto out;
2461 + }
2462 +
2463 ++ from = blk_rq_pos(req);
2464 ++ nr = blk_rq_sectors(req);
2465 ++
2466 + /* The sanitize operation is supported at v4.5 only */
2467 + if (mmc_can_sanitize(card)) {
2468 +- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2469 +- EXT_CSD_SANITIZE_START, 1, 0);
2470 +- goto out;
2471 ++ erase_arg = MMC_ERASE_ARG;
2472 ++ trim_arg = MMC_TRIM_ARG;
2473 ++ } else {
2474 ++ erase_arg = MMC_SECURE_ERASE_ARG;
2475 ++ trim_arg = MMC_SECURE_TRIM1_ARG;
2476 + }
2477 +
2478 +- from = blk_rq_pos(req);
2479 +- nr = blk_rq_sectors(req);
2480 +-
2481 +- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
2482 +- arg = MMC_SECURE_TRIM1_ARG;
2483 +- else
2484 +- arg = MMC_SECURE_ERASE_ARG;
2485 ++ if (mmc_erase_group_aligned(card, from, nr))
2486 ++ arg = erase_arg;
2487 ++ else if (mmc_can_trim(card))
2488 ++ arg = trim_arg;
2489 ++ else {
2490 ++ err = -EINVAL;
2491 ++ goto out;
2492 ++ }
2493 + retry:
2494 + if (card->quirks & MMC_QUIRK_INAND_CMD38) {
2495 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2496 +@@ -830,25 +836,41 @@ retry:
2497 + INAND_CMD38_ARG_SECERASE,
2498 + 0);
2499 + if (err)
2500 +- goto out;
2501 ++ goto out_retry;
2502 + }
2503 ++
2504 + err = mmc_erase(card, from, nr, arg);
2505 +- if (!err && arg == MMC_SECURE_TRIM1_ARG) {
2506 ++ if (err == -EIO)
2507 ++ goto out_retry;
2508 ++ if (err)
2509 ++ goto out;
2510 ++
2511 ++ if (arg == MMC_SECURE_TRIM1_ARG) {
2512 + if (card->quirks & MMC_QUIRK_INAND_CMD38) {
2513 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2514 + INAND_CMD38_ARG_EXT_CSD,
2515 + INAND_CMD38_ARG_SECTRIM2,
2516 + 0);
2517 + if (err)
2518 +- goto out;
2519 ++ goto out_retry;
2520 + }
2521 ++
2522 + err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
2523 ++ if (err == -EIO)
2524 ++ goto out_retry;
2525 ++ if (err)
2526 ++ goto out;
2527 + }
2528 +-out:
2529 +- if (err == -EIO && !mmc_blk_reset(md, card->host, type))
2530 ++
2531 ++ if (mmc_can_sanitize(card))
2532 ++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2533 ++ EXT_CSD_SANITIZE_START, 1, 0);
2534 ++out_retry:
2535 ++ if (err && !mmc_blk_reset(md, card->host, type))
2536 + goto retry;
2537 + if (!err)
2538 + mmc_blk_reset_success(md, type);
2539 ++out:
2540 + spin_lock_irq(&md->lock);
2541 + __blk_end_request(req, err, blk_rq_bytes(req));
2542 + spin_unlock_irq(&md->lock);
2543 +diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
2544 +index dcad59c..78690f2 100644
2545 +--- a/drivers/mmc/card/queue.c
2546 ++++ b/drivers/mmc/card/queue.c
2547 +@@ -134,7 +134,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
2548 +
2549 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
2550 + q->limits.max_discard_sectors = max_discard;
2551 +- if (card->erased_byte == 0)
2552 ++ if (card->erased_byte == 0 && !mmc_can_discard(card))
2553 + q->limits.discard_zeroes_data = 1;
2554 + q->limits.discard_granularity = card->pref_erase << 9;
2555 + /* granularity must not be greater than max. discard */
2556 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
2557 +index 950b97d..411a994 100644
2558 +--- a/drivers/mmc/core/core.c
2559 ++++ b/drivers/mmc/core/core.c
2560 +@@ -1516,7 +1516,10 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2561 + {
2562 + unsigned int erase_timeout;
2563 +
2564 +- if (card->ext_csd.erase_group_def & 1) {
2565 ++ if (arg == MMC_DISCARD_ARG ||
2566 ++ (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2567 ++ erase_timeout = card->ext_csd.trim_timeout;
2568 ++ } else if (card->ext_csd.erase_group_def & 1) {
2569 + /* High Capacity Erase Group Size uses HC timeouts */
2570 + if (arg == MMC_TRIM_ARG)
2571 + erase_timeout = card->ext_csd.trim_timeout;
2572 +@@ -1788,8 +1791,6 @@ int mmc_can_trim(struct mmc_card *card)
2573 + {
2574 + if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2575 + return 1;
2576 +- if (mmc_can_discard(card))
2577 +- return 1;
2578 + return 0;
2579 + }
2580 + EXPORT_SYMBOL(mmc_can_trim);
2581 +@@ -1808,6 +1809,8 @@ EXPORT_SYMBOL(mmc_can_discard);
2582 +
2583 + int mmc_can_sanitize(struct mmc_card *card)
2584 + {
2585 ++ if (!mmc_can_trim(card) && !mmc_can_erase(card))
2586 ++ return 0;
2587 + if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2588 + return 1;
2589 + return 0;
2590 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
2591 +index 4540e37..1b47937 100644
2592 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
2593 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
2594 +@@ -467,8 +467,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
2595 + clk_enable(clk);
2596 + pltfm_host->clk = clk;
2597 +
2598 +- if (!is_imx25_esdhc(imx_data))
2599 +- host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
2600 ++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
2601 +
2602 + if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
2603 + /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
2604 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2605 +index e58aa2b..f65e0b9 100644
2606 +--- a/drivers/net/bonding/bond_main.c
2607 ++++ b/drivers/net/bonding/bond_main.c
2608 +@@ -2982,7 +2982,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2609 + trans_start + delta_in_ticks)) ||
2610 + bond->curr_active_slave != slave) {
2611 + slave->link = BOND_LINK_UP;
2612 +- bond->current_arp_slave = NULL;
2613 ++ if (bond->current_arp_slave) {
2614 ++ bond_set_slave_inactive_flags(
2615 ++ bond->current_arp_slave);
2616 ++ bond->current_arp_slave = NULL;
2617 ++ }
2618 +
2619 + pr_info("%s: link status definitely up for interface %s.\n",
2620 + bond->dev->name, slave->dev->name);
2621 +diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
2622 +index a7c5e88..eeac9ca 100644
2623 +--- a/drivers/net/dummy.c
2624 ++++ b/drivers/net/dummy.c
2625 +@@ -106,14 +106,14 @@ static int dummy_dev_init(struct net_device *dev)
2626 + return 0;
2627 + }
2628 +
2629 +-static void dummy_dev_free(struct net_device *dev)
2630 ++static void dummy_dev_uninit(struct net_device *dev)
2631 + {
2632 + free_percpu(dev->dstats);
2633 +- free_netdev(dev);
2634 + }
2635 +
2636 + static const struct net_device_ops dummy_netdev_ops = {
2637 + .ndo_init = dummy_dev_init,
2638 ++ .ndo_uninit = dummy_dev_uninit,
2639 + .ndo_start_xmit = dummy_xmit,
2640 + .ndo_validate_addr = eth_validate_addr,
2641 + .ndo_set_rx_mode = set_multicast_list,
2642 +@@ -127,7 +127,7 @@ static void dummy_setup(struct net_device *dev)
2643 +
2644 + /* Initialize the device structure. */
2645 + dev->netdev_ops = &dummy_netdev_ops;
2646 +- dev->destructor = dummy_dev_free;
2647 ++ dev->destructor = free_netdev;
2648 +
2649 + /* Fill in device structure with ethernet-generic values. */
2650 + dev->tx_queue_len = 0;
2651 +diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
2652 +index 33a4e35..ee532e1 100644
2653 +--- a/drivers/net/ethernet/atheros/atlx/atl1.c
2654 ++++ b/drivers/net/ethernet/atheros/atlx/atl1.c
2655 +@@ -2473,7 +2473,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2656 + "pcie phy link down %x\n", status);
2657 + if (netif_running(adapter->netdev)) { /* reset MAC */
2658 + iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2659 +- schedule_work(&adapter->pcie_dma_to_rst_task);
2660 ++ schedule_work(&adapter->reset_dev_task);
2661 + return IRQ_HANDLED;
2662 + }
2663 + }
2664 +@@ -2485,7 +2485,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2665 + "pcie DMA r/w error (status = 0x%x)\n",
2666 + status);
2667 + iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2668 +- schedule_work(&adapter->pcie_dma_to_rst_task);
2669 ++ schedule_work(&adapter->reset_dev_task);
2670 + return IRQ_HANDLED;
2671 + }
2672 +
2673 +@@ -2630,10 +2630,10 @@ static void atl1_down(struct atl1_adapter *adapter)
2674 + atl1_clean_rx_ring(adapter);
2675 + }
2676 +
2677 +-static void atl1_tx_timeout_task(struct work_struct *work)
2678 ++static void atl1_reset_dev_task(struct work_struct *work)
2679 + {
2680 + struct atl1_adapter *adapter =
2681 +- container_of(work, struct atl1_adapter, tx_timeout_task);
2682 ++ container_of(work, struct atl1_adapter, reset_dev_task);
2683 + struct net_device *netdev = adapter->netdev;
2684 +
2685 + netif_device_detach(netdev);
2686 +@@ -3032,12 +3032,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2687 + (unsigned long)adapter);
2688 + adapter->phy_timer_pending = false;
2689 +
2690 +- INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2691 ++ INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
2692 +
2693 + INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2694 +
2695 +- INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2696 +-
2697 + err = register_netdev(netdev);
2698 + if (err)
2699 + goto err_common;
2700 +diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
2701 +index 109d6da..e04bf4d 100644
2702 +--- a/drivers/net/ethernet/atheros/atlx/atl1.h
2703 ++++ b/drivers/net/ethernet/atheros/atlx/atl1.h
2704 +@@ -758,9 +758,8 @@ struct atl1_adapter {
2705 + u16 link_speed;
2706 + u16 link_duplex;
2707 + spinlock_t lock;
2708 +- struct work_struct tx_timeout_task;
2709 ++ struct work_struct reset_dev_task;
2710 + struct work_struct link_chg_task;
2711 +- struct work_struct pcie_dma_to_rst_task;
2712 +
2713 + struct timer_list phy_config_timer;
2714 + bool phy_timer_pending;
2715 +diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
2716 +index aabcf4b..41c6d83 100644
2717 +--- a/drivers/net/ethernet/atheros/atlx/atlx.c
2718 ++++ b/drivers/net/ethernet/atheros/atlx/atlx.c
2719 +@@ -193,7 +193,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
2720 + {
2721 + struct atlx_adapter *adapter = netdev_priv(netdev);
2722 + /* Do the reset outside of interrupt context */
2723 +- schedule_work(&adapter->tx_timeout_task);
2724 ++ schedule_work(&adapter->reset_dev_task);
2725 + }
2726 +
2727 + /*
2728 +diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
2729 +index d19c849..77241b6 100644
2730 +--- a/drivers/net/ethernet/micrel/ks8851_mll.c
2731 ++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
2732 +@@ -40,7 +40,7 @@
2733 + #define DRV_NAME "ks8851_mll"
2734 +
2735 + static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
2736 +-#define MAX_RECV_FRAMES 32
2737 ++#define MAX_RECV_FRAMES 255
2738 + #define MAX_BUF_SIZE 2048
2739 + #define TX_BUF_SIZE 2000
2740 + #define RX_BUF_SIZE 2000
2741 +diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
2742 +index 7ece990..4b9f4bd 100644
2743 +--- a/drivers/net/ethernet/micrel/ksz884x.c
2744 ++++ b/drivers/net/ethernet/micrel/ksz884x.c
2745 +@@ -5679,7 +5679,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
2746 + memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
2747 + }
2748 +
2749 +- memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
2750 ++ memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
2751 +
2752 + interrupt = hw_block_intr(hw);
2753 +
2754 +diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
2755 +index aba4f67..8f47907 100644
2756 +--- a/drivers/net/ethernet/realtek/8139cp.c
2757 ++++ b/drivers/net/ethernet/realtek/8139cp.c
2758 +@@ -961,6 +961,11 @@ static inline void cp_start_hw (struct cp_private *cp)
2759 + cpw8(Cmd, RxOn | TxOn);
2760 + }
2761 +
2762 ++static void cp_enable_irq(struct cp_private *cp)
2763 ++{
2764 ++ cpw16_f(IntrMask, cp_intr_mask);
2765 ++}
2766 ++
2767 + static void cp_init_hw (struct cp_private *cp)
2768 + {
2769 + struct net_device *dev = cp->dev;
2770 +@@ -1000,8 +1005,6 @@ static void cp_init_hw (struct cp_private *cp)
2771 +
2772 + cpw16(MultiIntr, 0);
2773 +
2774 +- cpw16_f(IntrMask, cp_intr_mask);
2775 +-
2776 + cpw8_f(Cfg9346, Cfg9346_Lock);
2777 + }
2778 +
2779 +@@ -1133,6 +1136,8 @@ static int cp_open (struct net_device *dev)
2780 + if (rc)
2781 + goto err_out_hw;
2782 +
2783 ++ cp_enable_irq(cp);
2784 ++
2785 + netif_carrier_off(dev);
2786 + mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
2787 + netif_start_queue(dev);
2788 +@@ -2034,6 +2039,7 @@ static int cp_resume (struct pci_dev *pdev)
2789 + /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2790 + cp_init_rings_index (cp);
2791 + cp_init_hw (cp);
2792 ++ cp_enable_irq(cp);
2793 + netif_start_queue (dev);
2794 +
2795 + spin_lock_irqsave (&cp->lock, flags);
2796 +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
2797 +index 8843071..8c7dd21 100644
2798 +--- a/drivers/net/ethernet/smsc/smsc911x.c
2799 ++++ b/drivers/net/ethernet/smsc/smsc911x.c
2800 +@@ -1089,10 +1089,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
2801 +
2802 + /* Quickly dumps bad packets */
2803 + static void
2804 +-smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
2805 ++smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
2806 + {
2807 +- unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
2808 +-
2809 + if (likely(pktwords >= 4)) {
2810 + unsigned int timeout = 500;
2811 + unsigned int val;
2812 +@@ -1156,7 +1154,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
2813 + continue;
2814 + }
2815 +
2816 +- skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
2817 ++ skb = netdev_alloc_skb(dev, pktwords << 2);
2818 + if (unlikely(!skb)) {
2819 + SMSC_WARN(pdata, rx_err,
2820 + "Unable to allocate skb for rx packet");
2821 +@@ -1166,14 +1164,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
2822 + break;
2823 + }
2824 +
2825 +- skb->data = skb->head;
2826 +- skb_reset_tail_pointer(skb);
2827 ++ pdata->ops->rx_readfifo(pdata,
2828 ++ (unsigned int *)skb->data, pktwords);
2829 +
2830 + /* Align IP on 16B boundary */
2831 + skb_reserve(skb, NET_IP_ALIGN);
2832 + skb_put(skb, pktlength - 4);
2833 +- pdata->ops->rx_readfifo(pdata,
2834 +- (unsigned int *)skb->head, pktwords);
2835 + skb->protocol = eth_type_trans(skb, dev);
2836 + skb_checksum_none_assert(skb);
2837 + netif_receive_skb(skb);
2838 +@@ -1396,7 +1392,7 @@ static int smsc911x_open(struct net_device *dev)
2839 + smsc911x_reg_write(pdata, FIFO_INT, temp);
2840 +
2841 + /* set RX Data offset to 2 bytes for alignment */
2842 +- smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
2843 ++ smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
2844 +
2845 + /* enable NAPI polling before enabling RX interrupts */
2846 + napi_enable(&pdata->napi);
2847 +diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
2848 +index 7615040..f470ab6 100644
2849 +--- a/drivers/net/ethernet/ti/davinci_mdio.c
2850 ++++ b/drivers/net/ethernet/ti/davinci_mdio.c
2851 +@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
2852 + __davinci_mdio_reset(data);
2853 + return -EAGAIN;
2854 + }
2855 ++
2856 ++ reg = __raw_readl(&regs->user[0].access);
2857 ++ if ((reg & USERACCESS_GO) == 0)
2858 ++ return 0;
2859 ++
2860 + dev_err(data->dev, "timed out waiting for user access\n");
2861 + return -ETIMEDOUT;
2862 + }
2863 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
2864 +index 486b404..3ed983c 100644
2865 +--- a/drivers/net/ppp/ppp_generic.c
2866 ++++ b/drivers/net/ppp/ppp_generic.c
2867 +@@ -968,7 +968,6 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
2868 + proto = npindex_to_proto[npi];
2869 + put_unaligned_be16(proto, pp);
2870 +
2871 +- netif_stop_queue(dev);
2872 + skb_queue_tail(&ppp->file.xq, skb);
2873 + ppp_xmit_process(ppp);
2874 + return NETDEV_TX_OK;
2875 +@@ -1063,6 +1062,8 @@ ppp_xmit_process(struct ppp *ppp)
2876 + code that we can accept some more. */
2877 + if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
2878 + netif_wake_queue(ppp->dev);
2879 ++ else
2880 ++ netif_stop_queue(ppp->dev);
2881 + }
2882 + ppp_xmit_unlock(ppp);
2883 + }
2884 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2885 +index a5b9b12..7bd219b 100644
2886 +--- a/drivers/net/usb/smsc75xx.c
2887 ++++ b/drivers/net/usb/smsc75xx.c
2888 +@@ -1050,6 +1050,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2889 + dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
2890 + dev->net->flags |= IFF_MULTICAST;
2891 + dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
2892 ++ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2893 + return 0;
2894 + }
2895 +
2896 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
2897 +index eff6767..55b3218 100644
2898 +--- a/drivers/net/usb/smsc95xx.c
2899 ++++ b/drivers/net/usb/smsc95xx.c
2900 +@@ -1190,7 +1190,7 @@ static const struct driver_info smsc95xx_info = {
2901 + .rx_fixup = smsc95xx_rx_fixup,
2902 + .tx_fixup = smsc95xx_tx_fixup,
2903 + .status = smsc95xx_status,
2904 +- .flags = FLAG_ETHER | FLAG_SEND_ZLP,
2905 ++ .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
2906 + };
2907 +
2908 + static const struct usb_device_id products[] = {
2909 +diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
2910 +index 64a1106..4697cf3 100644
2911 +--- a/drivers/net/wimax/i2400m/netdev.c
2912 ++++ b/drivers/net/wimax/i2400m/netdev.c
2913 +@@ -607,7 +607,8 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
2914 + struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
2915 +
2916 + strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
2917 +- strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
2918 ++ strncpy(info->fw_version,
2919 ++ i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
2920 + if (net_dev->dev.parent)
2921 + strncpy(info->bus_info, dev_name(net_dev->dev.parent),
2922 + sizeof(info->bus_info) - 1);
2923 +diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
2924 +index 5634d9a..680709c 100644
2925 +--- a/drivers/net/wireless/b43/main.c
2926 ++++ b/drivers/net/wireless/b43/main.c
2927 +@@ -4820,8 +4820,14 @@ static int b43_op_start(struct ieee80211_hw *hw)
2928 + out_mutex_unlock:
2929 + mutex_unlock(&wl->mutex);
2930 +
2931 +- /* reload configuration */
2932 +- b43_op_config(hw, ~0);
2933 ++ /*
2934 ++ * Configuration may have been overwritten during initialization.
2935 ++ * Reload the configuration, but only if initialization was
2936 ++ * successful. Reloading the configuration after a failed init
2937 ++ * may hang the system.
2938 ++ */
2939 ++ if (!err)
2940 ++ b43_op_config(hw, ~0);
2941 +
2942 + return err;
2943 + }
2944 +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
2945 +index 453f58e..f98becc 100644
2946 +--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
2947 ++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
2948 +@@ -7865,6 +7865,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
2949 + {
2950 + int len_mpdu;
2951 + struct ieee80211_rx_status rx_status;
2952 ++ struct ieee80211_hdr *hdr;
2953 +
2954 + memset(&rx_status, 0, sizeof(rx_status));
2955 + prep_mac80211_status(wlc, rxh, p, &rx_status);
2956 +@@ -7874,6 +7875,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
2957 + skb_pull(p, D11_PHY_HDR_LEN);
2958 + __skb_trim(p, len_mpdu);
2959 +
2960 ++ /* unmute transmit */
2961 ++ if (wlc->hw->suspended_fifos) {
2962 ++ hdr = (struct ieee80211_hdr *)p->data;
2963 ++ if (ieee80211_is_beacon(hdr->frame_control))
2964 ++ brcms_b_mute(wlc->hw, false);
2965 ++ }
2966 ++
2967 + memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
2968 + ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
2969 + }
2970 +diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
2971 +index 99a710d..827889b 100644
2972 +--- a/drivers/net/wireless/ipw2x00/ipw2200.c
2973 ++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
2974 +@@ -2183,6 +2183,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2975 + {
2976 + int rc = 0;
2977 + unsigned long flags;
2978 ++ unsigned long now, end;
2979 +
2980 + spin_lock_irqsave(&priv->lock, flags);
2981 + if (priv->status & STATUS_HCMD_ACTIVE) {
2982 +@@ -2224,10 +2225,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2983 + }
2984 + spin_unlock_irqrestore(&priv->lock, flags);
2985 +
2986 ++ now = jiffies;
2987 ++ end = now + HOST_COMPLETE_TIMEOUT;
2988 ++again:
2989 + rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2990 + !(priv->
2991 + status & STATUS_HCMD_ACTIVE),
2992 +- HOST_COMPLETE_TIMEOUT);
2993 ++ end - now);
2994 ++ if (rc < 0) {
2995 ++ now = jiffies;
2996 ++ if (time_before(now, end))
2997 ++ goto again;
2998 ++ rc = 0;
2999 ++ }
3000 ++
3001 + if (rc == 0) {
3002 + spin_lock_irqsave(&priv->lock, flags);
3003 + if (priv->status & STATUS_HCMD_ACTIVE) {
3004 +diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
3005 +index dd008b0..1e6c8cc 100644
3006 +--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
3007 ++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
3008 +@@ -45,8 +45,8 @@
3009 + #include "iwl-cfg.h"
3010 +
3011 + /* Highest firmware API version supported */
3012 +-#define IWL1000_UCODE_API_MAX 6
3013 +-#define IWL100_UCODE_API_MAX 6
3014 ++#define IWL1000_UCODE_API_MAX 5
3015 ++#define IWL100_UCODE_API_MAX 5
3016 +
3017 + /* Oldest version we won't warn about */
3018 + #define IWL1000_UCODE_API_OK 5
3019 +@@ -244,5 +244,5 @@ struct iwl_cfg iwl100_bg_cfg = {
3020 + IWL_DEVICE_100,
3021 + };
3022 +
3023 +-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
3024 +-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
3025 ++MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
3026 ++MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
3027 +diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
3028 +index 7943197..9823e41 100644
3029 +--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
3030 ++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
3031 +@@ -51,10 +51,10 @@
3032 + #define IWL135_UCODE_API_MAX 6
3033 +
3034 + /* Oldest version we won't warn about */
3035 +-#define IWL2030_UCODE_API_OK 5
3036 +-#define IWL2000_UCODE_API_OK 5
3037 +-#define IWL105_UCODE_API_OK 5
3038 +-#define IWL135_UCODE_API_OK 5
3039 ++#define IWL2030_UCODE_API_OK 6
3040 ++#define IWL2000_UCODE_API_OK 6
3041 ++#define IWL105_UCODE_API_OK 6
3042 ++#define IWL135_UCODE_API_OK 6
3043 +
3044 + /* Lowest firmware API version supported */
3045 + #define IWL2030_UCODE_API_MIN 5
3046 +@@ -372,7 +372,7 @@ struct iwl_cfg iwl135_bgn_cfg = {
3047 + .ht_params = &iwl2000_ht_params,
3048 + };
3049 +
3050 +-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
3051 +-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
3052 +-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
3053 +-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
3054 ++MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
3055 ++MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
3056 ++MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
3057 ++MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
3058 +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
3059 +index f55fb2d..606213f 100644
3060 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
3061 ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
3062 +@@ -50,6 +50,10 @@
3063 + #define IWL5000_UCODE_API_MAX 5
3064 + #define IWL5150_UCODE_API_MAX 2
3065 +
3066 ++/* Oldest version we won't warn about */
3067 ++#define IWL5000_UCODE_API_OK 5
3068 ++#define IWL5150_UCODE_API_OK 2
3069 ++
3070 + /* Lowest firmware API version supported */
3071 + #define IWL5000_UCODE_API_MIN 1
3072 + #define IWL5150_UCODE_API_MIN 1
3073 +@@ -373,6 +377,7 @@ static struct iwl_ht_params iwl5000_ht_params = {
3074 + #define IWL_DEVICE_5000 \
3075 + .fw_name_pre = IWL5000_FW_PRE, \
3076 + .ucode_api_max = IWL5000_UCODE_API_MAX, \
3077 ++ .ucode_api_ok = IWL5000_UCODE_API_OK, \
3078 + .ucode_api_min = IWL5000_UCODE_API_MIN, \
3079 + .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
3080 + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
3081 +@@ -416,6 +421,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
3082 + .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
3083 + .fw_name_pre = IWL5000_FW_PRE,
3084 + .ucode_api_max = IWL5000_UCODE_API_MAX,
3085 ++ .ucode_api_ok = IWL5000_UCODE_API_OK,
3086 + .ucode_api_min = IWL5000_UCODE_API_MIN,
3087 + .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
3088 + .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
3089 +@@ -429,6 +435,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
3090 + #define IWL_DEVICE_5150 \
3091 + .fw_name_pre = IWL5150_FW_PRE, \
3092 + .ucode_api_max = IWL5150_UCODE_API_MAX, \
3093 ++ .ucode_api_ok = IWL5150_UCODE_API_OK, \
3094 + .ucode_api_min = IWL5150_UCODE_API_MIN, \
3095 + .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
3096 + .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
3097 +@@ -450,5 +457,5 @@ struct iwl_cfg iwl5150_abg_cfg = {
3098 + IWL_DEVICE_5150,
3099 + };
3100 +
3101 +-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
3102 +-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
3103 ++MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
3104 ++MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
3105 +diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
3106 +index c840c78..b4f809c 100644
3107 +--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
3108 ++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
3109 +@@ -46,12 +46,15 @@
3110 + #include "iwl-cfg.h"
3111 +
3112 + /* Highest firmware API version supported */
3113 +-#define IWL6000_UCODE_API_MAX 4
3114 ++#define IWL6000_UCODE_API_MAX 6
3115 + #define IWL6050_UCODE_API_MAX 5
3116 + #define IWL6000G2_UCODE_API_MAX 6
3117 +
3118 + /* Oldest version we won't warn about */
3119 ++#define IWL6000_UCODE_API_OK 4
3120 + #define IWL6000G2_UCODE_API_OK 5
3121 ++#define IWL6050_UCODE_API_OK 5
3122 ++#define IWL6000G2B_UCODE_API_OK 6
3123 +
3124 + /* Lowest firmware API version supported */
3125 + #define IWL6000_UCODE_API_MIN 4
3126 +@@ -399,7 +402,7 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
3127 + #define IWL_DEVICE_6030 \
3128 + .fw_name_pre = IWL6030_FW_PRE, \
3129 + .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
3130 +- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
3131 ++ .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
3132 + .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
3133 + .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
3134 + .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
3135 +@@ -479,6 +482,7 @@ struct iwl_cfg iwl130_bg_cfg = {
3136 + #define IWL_DEVICE_6000i \
3137 + .fw_name_pre = IWL6000_FW_PRE, \
3138 + .ucode_api_max = IWL6000_UCODE_API_MAX, \
3139 ++ .ucode_api_ok = IWL6000_UCODE_API_OK, \
3140 + .ucode_api_min = IWL6000_UCODE_API_MIN, \
3141 + .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
3142 + .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
3143 +@@ -559,6 +563,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
3144 + .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
3145 + .fw_name_pre = IWL6000_FW_PRE,
3146 + .ucode_api_max = IWL6000_UCODE_API_MAX,
3147 ++ .ucode_api_ok = IWL6000_UCODE_API_OK,
3148 + .ucode_api_min = IWL6000_UCODE_API_MIN,
3149 + .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
3150 + .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
3151 +@@ -569,7 +574,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
3152 + .led_mode = IWL_LED_BLINK,
3153 + };
3154 +
3155 +-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
3156 +-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
3157 +-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
3158 +-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
3159 ++MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
3160 ++MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
3161 ++MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
3162 ++MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
3163 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
3164 +index e0e9a3d..d7d2512 100644
3165 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
3166 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
3167 +@@ -1504,7 +1504,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
3168 +
3169 + static void iwlagn_prepare_restart(struct iwl_priv *priv)
3170 + {
3171 +- struct iwl_rxon_context *ctx;
3172 + bool bt_full_concurrent;
3173 + u8 bt_ci_compliance;
3174 + u8 bt_load;
3175 +@@ -1513,8 +1512,6 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
3176 +
3177 + lockdep_assert_held(&priv->shrd->mutex);
3178 +
3179 +- for_each_context(priv, ctx)
3180 +- ctx->vif = NULL;
3181 + priv->is_open = 0;
3182 +
3183 + /*
3184 +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
3185 +index 3d75d4c..832ec4d 100644
3186 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c
3187 ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
3188 +@@ -1228,6 +1228,7 @@ int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
3189 + struct iwl_rxon_context *tmp, *ctx = NULL;
3190 + int err;
3191 + enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
3192 ++ bool reset = false;
3193 +
3194 + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
3195 + viftype, vif->addr);
3196 +@@ -1249,6 +1250,13 @@ int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
3197 + tmp->interface_modes | tmp->exclusive_interface_modes;
3198 +
3199 + if (tmp->vif) {
3200 ++ /* On reset we need to add the same interface again */
3201 ++ if (tmp->vif == vif) {
3202 ++ reset = true;
3203 ++ ctx = tmp;
3204 ++ break;
3205 ++ }
3206 ++
3207 + /* check if this busy context is exclusive */
3208 + if (tmp->exclusive_interface_modes &
3209 + BIT(tmp->vif->type)) {
3210 +@@ -1275,7 +1283,7 @@ int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
3211 + ctx->vif = vif;
3212 +
3213 + err = iwl_setup_interface(priv, ctx);
3214 +- if (!err)
3215 ++ if (!err || reset)
3216 + goto out;
3217 +
3218 + ctx->vif = NULL;
3219 +diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
3220 +index 5bede9d..aae992a 100644
3221 +--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
3222 ++++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
3223 +@@ -104,15 +104,29 @@
3224 + * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
3225 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
3226 + * aligned (address bits 0-7 must be 0).
3227 ++ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
3228 ++ * for them are in different places.
3229 + *
3230 + * Bit fields in each pointer register:
3231 + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
3232 + */
3233 +-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
3234 +-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
3235 +-
3236 +-/* Find TFD CB base pointer for given queue (range 0-15). */
3237 +-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
3238 ++#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
3239 ++#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
3240 ++#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
3241 ++#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
3242 ++#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
3243 ++#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
3244 ++
3245 ++/* Find TFD CB base pointer for given queue */
3246 ++static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
3247 ++{
3248 ++ if (chnl < 16)
3249 ++ return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
3250 ++ if (chnl < 20)
3251 ++ return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
3252 ++ WARN_ON_ONCE(chnl >= 32);
3253 ++ return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
3254 ++}
3255 +
3256 +
3257 + /**
3258 +diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
3259 +index bebdd82..d9b089e 100644
3260 +--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
3261 ++++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
3262 +@@ -227,12 +227,33 @@
3263 + #define SCD_AIT (SCD_BASE + 0x0c)
3264 + #define SCD_TXFACT (SCD_BASE + 0x10)
3265 + #define SCD_ACTIVE (SCD_BASE + 0x14)
3266 +-#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
3267 +-#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
3268 + #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
3269 + #define SCD_AGGR_SEL (SCD_BASE + 0x248)
3270 + #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
3271 +-#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
3272 ++
3273 ++static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
3274 ++{
3275 ++ if (chnl < 20)
3276 ++ return SCD_BASE + 0x18 + chnl * 4;
3277 ++ WARN_ON_ONCE(chnl >= 32);
3278 ++ return SCD_BASE + 0x284 + (chnl - 20) * 4;
3279 ++}
3280 ++
3281 ++static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
3282 ++{
3283 ++ if (chnl < 20)
3284 ++ return SCD_BASE + 0x68 + chnl * 4;
3285 ++ WARN_ON_ONCE(chnl >= 32);
3286 ++ return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
3287 ++}
3288 ++
3289 ++static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
3290 ++{
3291 ++ if (chnl < 20)
3292 ++ return SCD_BASE + 0x10c + chnl * 4;
3293 ++ WARN_ON_ONCE(chnl >= 32);
3294 ++ return SCD_BASE + 0x384 + (chnl - 20) * 4;
3295 ++}
3296 +
3297 + /*********************** END TX SCHEDULER *************************************/
3298 +
3299 +diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
3300 +index 445ff21..2f218f9 100644
3301 +--- a/drivers/net/wireless/mwifiex/pcie.h
3302 ++++ b/drivers/net/wireless/mwifiex/pcie.h
3303 +@@ -48,15 +48,15 @@
3304 + #define PCIE_HOST_INT_STATUS_MASK 0xC3C
3305 + #define PCIE_SCRATCH_2_REG 0xC40
3306 + #define PCIE_SCRATCH_3_REG 0xC44
3307 +-#define PCIE_SCRATCH_4_REG 0xCC0
3308 +-#define PCIE_SCRATCH_5_REG 0xCC4
3309 +-#define PCIE_SCRATCH_6_REG 0xCC8
3310 +-#define PCIE_SCRATCH_7_REG 0xCCC
3311 +-#define PCIE_SCRATCH_8_REG 0xCD0
3312 +-#define PCIE_SCRATCH_9_REG 0xCD4
3313 +-#define PCIE_SCRATCH_10_REG 0xCD8
3314 +-#define PCIE_SCRATCH_11_REG 0xCDC
3315 +-#define PCIE_SCRATCH_12_REG 0xCE0
3316 ++#define PCIE_SCRATCH_4_REG 0xCD0
3317 ++#define PCIE_SCRATCH_5_REG 0xCD4
3318 ++#define PCIE_SCRATCH_6_REG 0xCD8
3319 ++#define PCIE_SCRATCH_7_REG 0xCDC
3320 ++#define PCIE_SCRATCH_8_REG 0xCE0
3321 ++#define PCIE_SCRATCH_9_REG 0xCE4
3322 ++#define PCIE_SCRATCH_10_REG 0xCE8
3323 ++#define PCIE_SCRATCH_11_REG 0xCEC
3324 ++#define PCIE_SCRATCH_12_REG 0xCF0
3325 +
3326 + #define CPU_INTR_DNLD_RDY BIT(0)
3327 + #define CPU_INTR_DOOR_BELL BIT(1)
3328 +diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
3329 +index cb71e88..0ffa111 100644
3330 +--- a/drivers/net/wireless/rt2x00/rt2800usb.c
3331 ++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
3332 +@@ -914,12 +914,14 @@ static struct usb_device_id rt2800usb_device_table[] = {
3333 + { USB_DEVICE(0x050d, 0x8053) },
3334 + { USB_DEVICE(0x050d, 0x805c) },
3335 + { USB_DEVICE(0x050d, 0x815c) },
3336 ++ { USB_DEVICE(0x050d, 0x825a) },
3337 + { USB_DEVICE(0x050d, 0x825b) },
3338 + { USB_DEVICE(0x050d, 0x935a) },
3339 + { USB_DEVICE(0x050d, 0x935b) },
3340 + /* Buffalo */
3341 + { USB_DEVICE(0x0411, 0x00e8) },
3342 + { USB_DEVICE(0x0411, 0x0158) },
3343 ++ { USB_DEVICE(0x0411, 0x015d) },
3344 + { USB_DEVICE(0x0411, 0x016f) },
3345 + { USB_DEVICE(0x0411, 0x01a2) },
3346 + /* Corega */
3347 +@@ -934,6 +936,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
3348 + { USB_DEVICE(0x07d1, 0x3c0e) },
3349 + { USB_DEVICE(0x07d1, 0x3c0f) },
3350 + { USB_DEVICE(0x07d1, 0x3c11) },
3351 ++ { USB_DEVICE(0x07d1, 0x3c13) },
3352 ++ { USB_DEVICE(0x07d1, 0x3c15) },
3353 + { USB_DEVICE(0x07d1, 0x3c16) },
3354 + { USB_DEVICE(0x2001, 0x3c1b) },
3355 + /* Draytek */
3356 +@@ -944,6 +948,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
3357 + { USB_DEVICE(0x7392, 0x7711) },
3358 + { USB_DEVICE(0x7392, 0x7717) },
3359 + { USB_DEVICE(0x7392, 0x7718) },
3360 ++ { USB_DEVICE(0x7392, 0x7722) },
3361 + /* Encore */
3362 + { USB_DEVICE(0x203d, 0x1480) },
3363 + { USB_DEVICE(0x203d, 0x14a9) },
3364 +@@ -978,6 +983,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
3365 + { USB_DEVICE(0x1737, 0x0070) },
3366 + { USB_DEVICE(0x1737, 0x0071) },
3367 + { USB_DEVICE(0x1737, 0x0077) },
3368 ++ { USB_DEVICE(0x1737, 0x0078) },
3369 + /* Logitec */
3370 + { USB_DEVICE(0x0789, 0x0162) },
3371 + { USB_DEVICE(0x0789, 0x0163) },
3372 +@@ -1001,9 +1007,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
3373 + { USB_DEVICE(0x0db0, 0x871b) },
3374 + { USB_DEVICE(0x0db0, 0x871c) },
3375 + { USB_DEVICE(0x0db0, 0x899a) },
3376 ++ /* Ovislink */
3377 ++ { USB_DEVICE(0x1b75, 0x3071) },
3378 ++ { USB_DEVICE(0x1b75, 0x3072) },
3379 + /* Para */
3380 + { USB_DEVICE(0x20b8, 0x8888) },
3381 + /* Pegatron */
3382 ++ { USB_DEVICE(0x1d4d, 0x0002) },
3383 + { USB_DEVICE(0x1d4d, 0x000c) },
3384 + { USB_DEVICE(0x1d4d, 0x000e) },
3385 + { USB_DEVICE(0x1d4d, 0x0011) },
3386 +@@ -1056,7 +1066,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
3387 + /* Sparklan */
3388 + { USB_DEVICE(0x15a9, 0x0006) },
3389 + /* Sweex */
3390 ++ { USB_DEVICE(0x177f, 0x0153) },
3391 + { USB_DEVICE(0x177f, 0x0302) },
3392 ++ { USB_DEVICE(0x177f, 0x0313) },
3393 + /* U-Media */
3394 + { USB_DEVICE(0x157e, 0x300e) },
3395 + { USB_DEVICE(0x157e, 0x3013) },
3396 +@@ -1140,27 +1152,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
3397 + { USB_DEVICE(0x13d3, 0x3322) },
3398 + /* Belkin */
3399 + { USB_DEVICE(0x050d, 0x1003) },
3400 +- { USB_DEVICE(0x050d, 0x825a) },
3401 + /* Buffalo */
3402 + { USB_DEVICE(0x0411, 0x012e) },
3403 + { USB_DEVICE(0x0411, 0x0148) },
3404 + { USB_DEVICE(0x0411, 0x0150) },
3405 +- { USB_DEVICE(0x0411, 0x015d) },
3406 + /* Corega */
3407 + { USB_DEVICE(0x07aa, 0x0041) },
3408 + { USB_DEVICE(0x07aa, 0x0042) },
3409 + { USB_DEVICE(0x18c5, 0x0008) },
3410 + /* D-Link */
3411 + { USB_DEVICE(0x07d1, 0x3c0b) },
3412 +- { USB_DEVICE(0x07d1, 0x3c13) },
3413 +- { USB_DEVICE(0x07d1, 0x3c15) },
3414 + { USB_DEVICE(0x07d1, 0x3c17) },
3415 + { USB_DEVICE(0x2001, 0x3c17) },
3416 + /* Edimax */
3417 + { USB_DEVICE(0x7392, 0x4085) },
3418 +- { USB_DEVICE(0x7392, 0x7722) },
3419 + /* Encore */
3420 + { USB_DEVICE(0x203d, 0x14a1) },
3421 ++ /* Fujitsu Stylistic 550 */
3422 ++ { USB_DEVICE(0x1690, 0x0761) },
3423 + /* Gemtek */
3424 + { USB_DEVICE(0x15a9, 0x0010) },
3425 + /* Gigabyte */
3426 +@@ -1172,19 +1181,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
3427 + /* LevelOne */
3428 + { USB_DEVICE(0x1740, 0x0605) },
3429 + { USB_DEVICE(0x1740, 0x0615) },
3430 +- /* Linksys */
3431 +- { USB_DEVICE(0x1737, 0x0078) },
3432 + /* Logitec */
3433 + { USB_DEVICE(0x0789, 0x0168) },
3434 + { USB_DEVICE(0x0789, 0x0169) },
3435 + /* Motorola */
3436 + { USB_DEVICE(0x100d, 0x9032) },
3437 +- /* Ovislink */
3438 +- { USB_DEVICE(0x1b75, 0x3071) },
3439 +- { USB_DEVICE(0x1b75, 0x3072) },
3440 + /* Pegatron */
3441 + { USB_DEVICE(0x05a6, 0x0101) },
3442 +- { USB_DEVICE(0x1d4d, 0x0002) },
3443 + { USB_DEVICE(0x1d4d, 0x0010) },
3444 + /* Planex */
3445 + { USB_DEVICE(0x2019, 0x5201) },
3446 +@@ -1203,9 +1206,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
3447 + { USB_DEVICE(0x083a, 0xc522) },
3448 + { USB_DEVICE(0x083a, 0xd522) },
3449 + { USB_DEVICE(0x083a, 0xf511) },
3450 +- /* Sweex */
3451 +- { USB_DEVICE(0x177f, 0x0153) },
3452 +- { USB_DEVICE(0x177f, 0x0313) },
3453 + /* Zyxel */
3454 + { USB_DEVICE(0x0586, 0x341a) },
3455 + #endif
3456 +diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
3457 +index d44d398..47ba0f7 100644
3458 +--- a/drivers/net/wireless/rtlwifi/pci.c
3459 ++++ b/drivers/net/wireless/rtlwifi/pci.c
3460 +@@ -1961,6 +1961,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
3461 + rtl_deinit_deferred_work(hw);
3462 + rtlpriv->intf_ops->adapter_stop(hw);
3463 + }
3464 ++ rtlpriv->cfg->ops->disable_interrupt(hw);
3465 +
3466 + /*deinit rfkill */
3467 + rtl_deinit_rfkill(hw);
3468 +diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
3469 +index ba3268e..40c1574 100644
3470 +--- a/drivers/net/wireless/wl1251/main.c
3471 ++++ b/drivers/net/wireless/wl1251/main.c
3472 +@@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
3473 + cancel_work_sync(&wl->irq_work);
3474 + cancel_work_sync(&wl->tx_work);
3475 + cancel_work_sync(&wl->filter_work);
3476 ++ cancel_delayed_work_sync(&wl->elp_work);
3477 +
3478 + mutex_lock(&wl->mutex);
3479 +
3480 +diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
3481 +index f786942..1b851f6 100644
3482 +--- a/drivers/net/wireless/wl1251/sdio.c
3483 ++++ b/drivers/net/wireless/wl1251/sdio.c
3484 +@@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
3485 +
3486 + if (wl->irq)
3487 + free_irq(wl->irq, wl);
3488 +- kfree(wl_sdio);
3489 + wl1251_free_hw(wl);
3490 ++ kfree(wl_sdio);
3491 +
3492 + sdio_claim_host(func);
3493 + sdio_release_irq(func);
3494 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3495 +index 6476547..78fda9c 100644
3496 +--- a/drivers/pci/quirks.c
3497 ++++ b/drivers/pci/quirks.c
3498 +@@ -2906,6 +2906,40 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3499 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3500 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3501 +
3502 ++/*
3503 ++ * Some BIOS implementations leave the Intel GPU interrupts enabled,
3504 ++ * even though no one is handling them (f.e. i915 driver is never loaded).
3505 ++ * Additionally the interrupt destination is not set up properly
3506 ++ * and the interrupt ends up -somewhere-.
3507 ++ *
3508 ++ * These spurious interrupts are "sticky" and the kernel disables
3509 ++ * the (shared) interrupt line after 100.000+ generated interrupts.
3510 ++ *
3511 ++ * Fix it by disabling the still enabled interrupts.
3512 ++ * This resolves crashes often seen on monitor unplug.
3513 ++ */
3514 ++#define I915_DEIER_REG 0x4400c
3515 ++static void __devinit disable_igfx_irq(struct pci_dev *dev)
3516 ++{
3517 ++ void __iomem *regs = pci_iomap(dev, 0, 0);
3518 ++ if (regs == NULL) {
3519 ++ dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
3520 ++ return;
3521 ++ }
3522 ++
3523 ++ /* Check if any interrupt line is still enabled */
3524 ++ if (readl(regs + I915_DEIER_REG) != 0) {
3525 ++ dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; "
3526 ++ "disabling\n");
3527 ++
3528 ++ writel(0, regs + I915_DEIER_REG);
3529 ++ }
3530 ++
3531 ++ pci_iounmap(dev, regs);
3532 ++}
3533 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3534 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3535 ++
3536 + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
3537 + struct pci_fixup *end)
3538 + {
3539 +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
3540 +index d93e962..1d3bcce 100644
3541 +--- a/drivers/platform/x86/dell-laptop.c
3542 ++++ b/drivers/platform/x86/dell-laptop.c
3543 +@@ -184,6 +184,34 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
3544 + },
3545 + .driver_data = &quirk_dell_vostro_v130,
3546 + },
3547 ++ {
3548 ++ .callback = dmi_matched,
3549 ++ .ident = "Dell Vostro 3555",
3550 ++ .matches = {
3551 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3552 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
3553 ++ },
3554 ++ .driver_data = &quirk_dell_vostro_v130,
3555 ++ },
3556 ++ {
3557 ++ .callback = dmi_matched,
3558 ++ .ident = "Dell Inspiron N311z",
3559 ++ .matches = {
3560 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3561 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
3562 ++ },
3563 ++ .driver_data = &quirk_dell_vostro_v130,
3564 ++ },
3565 ++ {
3566 ++ .callback = dmi_matched,
3567 ++ .ident = "Dell Inspiron M5110",
3568 ++ .matches = {
3569 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3570 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
3571 ++ },
3572 ++ .driver_data = &quirk_dell_vostro_v130,
3573 ++ },
3574 ++ { }
3575 + };
3576 +
3577 + static struct calling_interface_buffer *buffer;
3578 +@@ -615,6 +643,7 @@ static void touchpad_led_set(struct led_classdev *led_cdev,
3579 + static struct led_classdev touchpad_led = {
3580 + .name = "dell-laptop::touchpad",
3581 + .brightness_set = touchpad_led_set,
3582 ++ .flags = LED_CORE_SUSPENDRESUME,
3583 + };
3584 +
3585 + static int __devinit touchpad_led_init(struct device *dev)
3586 +diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
3587 +index 1b831c5..e48ba4b 100644
3588 +--- a/drivers/scsi/libsas/sas_expander.c
3589 ++++ b/drivers/scsi/libsas/sas_expander.c
3590 +@@ -192,7 +192,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
3591 + phy->attached_sata_ps = dr->attached_sata_ps;
3592 + phy->attached_iproto = dr->iproto << 1;
3593 + phy->attached_tproto = dr->tproto << 1;
3594 +- memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
3595 ++ /* help some expanders that fail to zero sas_address in the 'no
3596 ++ * device' case
3597 ++ */
3598 ++ if (phy->attached_dev_type == NO_DEVICE ||
3599 ++ phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
3600 ++ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
3601 ++ else
3602 ++ memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
3603 + phy->attached_phy_id = dr->attached_phy_id;
3604 + phy->phy_change_count = dr->change_count;
3605 + phy->routing_attr = dr->routing_attr;
3606 +@@ -1643,9 +1650,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
3607 + int phy_change_count = 0;
3608 +
3609 + res = sas_get_phy_change_count(dev, i, &phy_change_count);
3610 +- if (res)
3611 +- goto out;
3612 +- else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
3613 ++ switch (res) {
3614 ++ case SMP_RESP_PHY_VACANT:
3615 ++ case SMP_RESP_NO_PHY:
3616 ++ continue;
3617 ++ case SMP_RESP_FUNC_ACC:
3618 ++ break;
3619 ++ default:
3620 ++ return res;
3621 ++ }
3622 ++
3623 ++ if (phy_change_count != ex->ex_phy[i].phy_change_count) {
3624 + if (update)
3625 + ex->ex_phy[i].phy_change_count =
3626 + phy_change_count;
3627 +@@ -1653,8 +1668,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
3628 + return 0;
3629 + }
3630 + }
3631 +-out:
3632 +- return res;
3633 ++ return 0;
3634 + }
3635 +
3636 + static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
3637 +diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
3638 +index 24cacff..5f748c0 100644
3639 +--- a/drivers/spi/spi-fsl-spi.c
3640 ++++ b/drivers/spi/spi-fsl-spi.c
3641 +@@ -139,10 +139,12 @@ static void fsl_spi_change_mode(struct spi_device *spi)
3642 + static void fsl_spi_chipselect(struct spi_device *spi, int value)
3643 + {
3644 + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
3645 +- struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
3646 ++ struct fsl_spi_platform_data *pdata;
3647 + bool pol = spi->mode & SPI_CS_HIGH;
3648 + struct spi_mpc8xxx_cs *cs = spi->controller_state;
3649 +
3650 ++ pdata = spi->dev.parent->parent->platform_data;
3651 ++
3652 + if (value == BITBANG_CS_INACTIVE) {
3653 + if (pdata->cs_control)
3654 + pdata->cs_control(spi, !pol);
3655 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3656 +index 77eae99..b2ccdea 100644
3657 +--- a/drivers/spi/spi.c
3658 ++++ b/drivers/spi/spi.c
3659 +@@ -319,7 +319,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
3660 + }
3661 +
3662 + spi->master = master;
3663 +- spi->dev.parent = dev;
3664 ++ spi->dev.parent = &master->dev;
3665 + spi->dev.bus = &spi_bus_type;
3666 + spi->dev.release = spidev_release;
3667 + device_initialize(&spi->dev);
3668 +diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
3669 +index fb11743..4bb2797 100644
3670 +--- a/drivers/staging/rtl8712/os_intfs.c
3671 ++++ b/drivers/staging/rtl8712/os_intfs.c
3672 +@@ -476,9 +476,6 @@ static int netdev_close(struct net_device *pnetdev)
3673 + r8712_free_assoc_resources(padapter);
3674 + /*s2-4.*/
3675 + r8712_free_network_queue(padapter);
3676 +- release_firmware(padapter->fw);
3677 +- /* never exit with a firmware callback pending */
3678 +- wait_for_completion(&padapter->rtl8712_fw_ready);
3679 + return 0;
3680 + }
3681 +
3682 +diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
3683 +index 9bade18..ec41d38 100644
3684 +--- a/drivers/staging/rtl8712/usb_intf.c
3685 ++++ b/drivers/staging/rtl8712/usb_intf.c
3686 +@@ -30,6 +30,7 @@
3687 +
3688 + #include <linux/usb.h>
3689 + #include <linux/module.h>
3690 ++#include <linux/firmware.h>
3691 +
3692 + #include "osdep_service.h"
3693 + #include "drv_types.h"
3694 +@@ -621,6 +622,10 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
3695 + struct _adapter *padapter = netdev_priv(pnetdev);
3696 + struct usb_device *udev = interface_to_usbdev(pusb_intf);
3697 +
3698 ++ if (padapter->fw_found)
3699 ++ release_firmware(padapter->fw);
3700 ++ /* never exit with a firmware callback pending */
3701 ++ wait_for_completion(&padapter->rtl8712_fw_ready);
3702 + usb_set_intfdata(pusb_intf, NULL);
3703 + if (padapter) {
3704 + if (drvpriv.drv_registered == true)
3705 +diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
3706 +index b84c834..8daf073 100644
3707 +--- a/drivers/tty/amiserial.c
3708 ++++ b/drivers/tty/amiserial.c
3709 +@@ -1113,8 +1113,10 @@ static int set_serial_info(struct async_struct * info,
3710 + (new_serial.close_delay != state->close_delay) ||
3711 + (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
3712 + ((new_serial.flags & ~ASYNC_USR_MASK) !=
3713 +- (state->flags & ~ASYNC_USR_MASK)))
3714 ++ (state->flags & ~ASYNC_USR_MASK))) {
3715 ++ tty_unlock();
3716 + return -EPERM;
3717 ++ }
3718 + state->flags = ((state->flags & ~ASYNC_USR_MASK) |
3719 + (new_serial.flags & ASYNC_USR_MASK));
3720 + info->flags = ((info->flags & ~ASYNC_USR_MASK) |
3721 +diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
3722 +index e6c3dbd..836fe273 100644
3723 +--- a/drivers/tty/serial/clps711x.c
3724 ++++ b/drivers/tty/serial/clps711x.c
3725 +@@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
3726 + port->x_char = 0;
3727 + return IRQ_HANDLED;
3728 + }
3729 +- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
3730 +- clps711xuart_stop_tx(port);
3731 +- return IRQ_HANDLED;
3732 +- }
3733 ++
3734 ++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
3735 ++ goto disable_tx_irq;
3736 +
3737 + count = port->fifosize >> 1;
3738 + do {
3739 +@@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
3740 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
3741 + uart_write_wakeup(port);
3742 +
3743 +- if (uart_circ_empty(xmit))
3744 +- clps711xuart_stop_tx(port);
3745 ++ if (uart_circ_empty(xmit)) {
3746 ++ disable_tx_irq:
3747 ++ disable_irq_nosync(TX_IRQ(port));
3748 ++ tx_enabled(port) = 0;
3749 ++ }
3750 +
3751 + return IRQ_HANDLED;
3752 + }
3753 +diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
3754 +index da776a0..a4b192d 100644
3755 +--- a/drivers/tty/serial/pch_uart.c
3756 ++++ b/drivers/tty/serial/pch_uart.c
3757 +@@ -1356,9 +1356,11 @@ static int pch_uart_verify_port(struct uart_port *port,
3758 + __func__);
3759 + return -EOPNOTSUPP;
3760 + #endif
3761 +- priv->use_dma = 1;
3762 + priv->use_dma_flag = 1;
3763 + dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
3764 ++ if (!priv->use_dma)
3765 ++ pch_request_dma(port);
3766 ++ priv->use_dma = 1;
3767 + }
3768 +
3769 + return 0;
3770 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
3771 +index 9eb71d8..2db0327 100644
3772 +--- a/drivers/usb/class/cdc-wdm.c
3773 ++++ b/drivers/usb/class/cdc-wdm.c
3774 +@@ -108,8 +108,9 @@ static void wdm_out_callback(struct urb *urb)
3775 + spin_lock(&desc->iuspin);
3776 + desc->werr = urb->status;
3777 + spin_unlock(&desc->iuspin);
3778 +- clear_bit(WDM_IN_USE, &desc->flags);
3779 + kfree(desc->outbuf);
3780 ++ desc->outbuf = NULL;
3781 ++ clear_bit(WDM_IN_USE, &desc->flags);
3782 + wake_up(&desc->wait);
3783 + }
3784 +
3785 +@@ -312,7 +313,7 @@ static ssize_t wdm_write
3786 + if (we < 0)
3787 + return -EIO;
3788 +
3789 +- desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
3790 ++ buf = kmalloc(count, GFP_KERNEL);
3791 + if (!buf) {
3792 + rv = -ENOMEM;
3793 + goto outnl;
3794 +@@ -376,10 +377,12 @@ static ssize_t wdm_write
3795 + req->wIndex = desc->inum;
3796 + req->wLength = cpu_to_le16(count);
3797 + set_bit(WDM_IN_USE, &desc->flags);
3798 ++ desc->outbuf = buf;
3799 +
3800 + rv = usb_submit_urb(desc->command, GFP_KERNEL);
3801 + if (rv < 0) {
3802 + kfree(buf);
3803 ++ desc->outbuf = NULL;
3804 + clear_bit(WDM_IN_USE, &desc->flags);
3805 + dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
3806 + } else {
3807 +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
3808 +index 61d08dd..5f1404a 100644
3809 +--- a/drivers/usb/core/hcd-pci.c
3810 ++++ b/drivers/usb/core/hcd-pci.c
3811 +@@ -495,6 +495,15 @@ static int hcd_pci_suspend_noirq(struct device *dev)
3812 +
3813 + pci_save_state(pci_dev);
3814 +
3815 ++ /*
3816 ++ * Some systems crash if an EHCI controller is in D3 during
3817 ++ * a sleep transition. We have to leave such controllers in D0.
3818 ++ */
3819 ++ if (hcd->broken_pci_sleep) {
3820 ++ dev_dbg(dev, "Staying in PCI D0\n");
3821 ++ return retval;
3822 ++ }
3823 ++
3824 + /* If the root hub is dead rather than suspended, disallow remote
3825 + * wakeup. usb_hc_died() should ensure that both hosts are marked as
3826 + * dying, so we only need to check the primary roothub.
3827 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3828 +index e238b3b..2b0a341 100644
3829 +--- a/drivers/usb/core/hub.c
3830 ++++ b/drivers/usb/core/hub.c
3831 +@@ -1644,7 +1644,6 @@ void usb_disconnect(struct usb_device **pdev)
3832 + {
3833 + struct usb_device *udev = *pdev;
3834 + int i;
3835 +- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
3836 +
3837 + /* mark the device as inactive, so any further urb submissions for
3838 + * this device (and any of its children) will fail immediately.
3839 +@@ -1667,9 +1666,7 @@ void usb_disconnect(struct usb_device **pdev)
3840 + * so that the hardware is now fully quiesced.
3841 + */
3842 + dev_dbg (&udev->dev, "unregistering device\n");
3843 +- mutex_lock(hcd->bandwidth_mutex);
3844 + usb_disable_device(udev, 0);
3845 +- mutex_unlock(hcd->bandwidth_mutex);
3846 + usb_hcd_synchronize_unlinks(udev);
3847 +
3848 + usb_remove_ep_devs(&udev->ep0);
3849 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
3850 +index aed3e07..ca717da 100644
3851 +--- a/drivers/usb/core/message.c
3852 ++++ b/drivers/usb/core/message.c
3853 +@@ -1136,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
3854 + * Deallocates hcd/hardware state for the endpoints (nuking all or most
3855 + * pending urbs) and usbcore state for the interfaces, so that usbcore
3856 + * must usb_set_configuration() before any interfaces could be used.
3857 +- *
3858 +- * Must be called with hcd->bandwidth_mutex held.
3859 + */
3860 + void usb_disable_device(struct usb_device *dev, int skip_ep0)
3861 + {
3862 +@@ -1190,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
3863 + usb_disable_endpoint(dev, i + USB_DIR_IN, false);
3864 + }
3865 + /* Remove endpoints from the host controller internal state */
3866 ++ mutex_lock(hcd->bandwidth_mutex);
3867 + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
3868 ++ mutex_unlock(hcd->bandwidth_mutex);
3869 + /* Second pass: remove endpoint pointers */
3870 + }
3871 + for (i = skip_ep0; i < 16; ++i) {
3872 +@@ -1750,7 +1750,6 @@ free_interfaces:
3873 + /* if it's already configured, clear out old state first.
3874 + * getting rid of old interfaces means unbinding their drivers.
3875 + */
3876 +- mutex_lock(hcd->bandwidth_mutex);
3877 + if (dev->state != USB_STATE_ADDRESS)
3878 + usb_disable_device(dev, 1); /* Skip ep0 */
3879 +
3880 +@@ -1763,6 +1762,7 @@ free_interfaces:
3881 + * host controller will not allow submissions to dropped endpoints. If
3882 + * this call fails, the device state is unchanged.
3883 + */
3884 ++ mutex_lock(hcd->bandwidth_mutex);
3885 + ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
3886 + if (ret < 0) {
3887 + mutex_unlock(hcd->bandwidth_mutex);
3888 +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
3889 +index 27bd50a..c0dcf69 100644
3890 +--- a/drivers/usb/dwc3/ep0.c
3891 ++++ b/drivers/usb/dwc3/ep0.c
3892 +@@ -572,9 +572,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
3893 + dwc->ep0_bounced = false;
3894 + } else {
3895 + transferred = ur->length - trb.length;
3896 +- ur->actual += transferred;
3897 + }
3898 +
3899 ++ ur->actual += transferred;
3900 ++
3901 + if ((epnum & 1) && ur->actual < ur->length) {
3902 + /* for some reason we did not get everything out */
3903 +
3904 +diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
3905 +index ab8f1b4..527736e 100644
3906 +--- a/drivers/usb/gadget/dummy_hcd.c
3907 ++++ b/drivers/usb/gadget/dummy_hcd.c
3908 +@@ -925,7 +925,6 @@ static int dummy_udc_stop(struct usb_gadget *g,
3909 +
3910 + dum->driver = NULL;
3911 +
3912 +- dummy_pullup(&dum->gadget, 0);
3913 + return 0;
3914 + }
3915 +
3916 +diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
3917 +index acb3800..0e641a1 100644
3918 +--- a/drivers/usb/gadget/f_fs.c
3919 ++++ b/drivers/usb/gadget/f_fs.c
3920 +@@ -712,7 +712,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
3921 + if (code == FUNCTIONFS_INTERFACE_REVMAP) {
3922 + struct ffs_function *func = ffs->func;
3923 + ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
3924 +- } else if (gadget->ops->ioctl) {
3925 ++ } else if (gadget && gadget->ops->ioctl) {
3926 + ret = gadget->ops->ioctl(gadget, code, value);
3927 + } else {
3928 + ret = -ENOTTY;
3929 +diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
3930 +index 1a6f415..a5570b6 100644
3931 +--- a/drivers/usb/gadget/f_mass_storage.c
3932 ++++ b/drivers/usb/gadget/f_mass_storage.c
3933 +@@ -2182,7 +2182,7 @@ unknown_cmnd:
3934 + common->data_size_from_cmnd = 0;
3935 + sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
3936 + reply = check_command(common, common->cmnd_size,
3937 +- DATA_DIR_UNKNOWN, 0xff, 0, unknown);
3938 ++ DATA_DIR_UNKNOWN, ~0, 0, unknown);
3939 + if (reply == 0) {
3940 + common->curlun->sense_data = SS_INVALID_COMMAND;
3941 + reply = -EINVAL;
3942 +diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
3943 +index 11b5196..db2d607 100644
3944 +--- a/drivers/usb/gadget/file_storage.c
3945 ++++ b/drivers/usb/gadget/file_storage.c
3946 +@@ -2569,7 +2569,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
3947 + fsg->data_size_from_cmnd = 0;
3948 + sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
3949 + if ((reply = check_command(fsg, fsg->cmnd_size,
3950 +- DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
3951 ++ DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
3952 + fsg->curlun->sense_data = SS_INVALID_COMMAND;
3953 + reply = -EINVAL;
3954 + }
3955 +diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
3956 +index 6939e17..901924a 100644
3957 +--- a/drivers/usb/gadget/udc-core.c
3958 ++++ b/drivers/usb/gadget/udc-core.c
3959 +@@ -211,9 +211,9 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
3960 +
3961 + if (udc_is_newstyle(udc)) {
3962 + udc->driver->disconnect(udc->gadget);
3963 ++ usb_gadget_disconnect(udc->gadget);
3964 + udc->driver->unbind(udc->gadget);
3965 + usb_gadget_udc_stop(udc->gadget, udc->driver);
3966 +- usb_gadget_disconnect(udc->gadget);
3967 + } else {
3968 + usb_gadget_stop(udc->gadget, udc->driver);
3969 + }
3970 +@@ -359,9 +359,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
3971 + struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
3972 +
3973 + if (sysfs_streq(buf, "connect")) {
3974 ++ if (udc_is_newstyle(udc))
3975 ++ usb_gadget_udc_start(udc->gadget, udc->driver);
3976 + usb_gadget_connect(udc->gadget);
3977 + } else if (sysfs_streq(buf, "disconnect")) {
3978 + usb_gadget_disconnect(udc->gadget);
3979 ++ if (udc_is_newstyle(udc))
3980 ++ usb_gadget_udc_stop(udc->gadget, udc->driver);
3981 + } else {
3982 + dev_err(dev, "unsupported command '%s'\n", buf);
3983 + return -EINVAL;
3984 +diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
3985 +index bc78c60..ca4e03a 100644
3986 +--- a/drivers/usb/gadget/uvc.h
3987 ++++ b/drivers/usb/gadget/uvc.h
3988 +@@ -28,7 +28,7 @@
3989 +
3990 + struct uvc_request_data
3991 + {
3992 +- unsigned int length;
3993 ++ __s32 length;
3994 + __u8 data[60];
3995 + };
3996 +
3997 +diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
3998 +index f6e083b..54d7ca5 100644
3999 +--- a/drivers/usb/gadget/uvc_v4l2.c
4000 ++++ b/drivers/usb/gadget/uvc_v4l2.c
4001 +@@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
4002 + if (data->length < 0)
4003 + return usb_ep_set_halt(cdev->gadget->ep0);
4004 +
4005 +- req->length = min(uvc->event_length, data->length);
4006 ++ req->length = min_t(unsigned int, uvc->event_length, data->length);
4007 + req->zero = data->length < uvc->event_length;
4008 + req->dma = DMA_ADDR_INVALID;
4009 +
4010 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
4011 +index 3ff9f82..da2f711 100644
4012 +--- a/drivers/usb/host/ehci-hcd.c
4013 ++++ b/drivers/usb/host/ehci-hcd.c
4014 +@@ -815,8 +815,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
4015 + goto dead;
4016 + }
4017 +
4018 ++ /*
4019 ++ * We don't use STS_FLR, but some controllers don't like it to
4020 ++ * remain on, so mask it out along with the other status bits.
4021 ++ */
4022 ++ masked_status = status & (INTR_MASK | STS_FLR);
4023 ++
4024 + /* Shared IRQ? */
4025 +- masked_status = status & INTR_MASK;
4026 + if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
4027 + spin_unlock(&ehci->lock);
4028 + return IRQ_NONE;
4029 +@@ -867,7 +872,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
4030 + pcd_status = status;
4031 +
4032 + /* resume root hub? */
4033 +- if (!(cmd & CMD_RUN))
4034 ++ if (ehci->rh_state == EHCI_RH_SUSPENDED)
4035 + usb_hcd_resume_root_hub(hcd);
4036 +
4037 + /* get per-port change detect bits */
4038 +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
4039 +index f4b627d..971d312 100644
4040 +--- a/drivers/usb/host/ehci-pci.c
4041 ++++ b/drivers/usb/host/ehci-pci.c
4042 +@@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
4043 + hcd->has_tt = 1;
4044 + tdi_reset(ehci);
4045 + }
4046 ++ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
4047 ++ /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
4048 ++ if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
4049 ++ ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
4050 ++ hcd->broken_pci_sleep = 1;
4051 ++ device_set_wakeup_capable(&pdev->dev, false);
4052 ++ }
4053 ++ }
4054 + break;
4055 + case PCI_VENDOR_ID_TDI:
4056 + if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
4057 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
4058 +index ac5bfd6..2504694 100644
4059 +--- a/drivers/usb/misc/yurex.c
4060 ++++ b/drivers/usb/misc/yurex.c
4061 +@@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref)
4062 + usb_put_dev(dev->udev);
4063 + if (dev->cntl_urb) {
4064 + usb_kill_urb(dev->cntl_urb);
4065 +- if (dev->cntl_req)
4066 +- usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
4067 +- dev->cntl_req, dev->cntl_urb->setup_dma);
4068 ++ kfree(dev->cntl_req);
4069 + if (dev->cntl_buffer)
4070 + usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
4071 + dev->cntl_buffer, dev->cntl_urb->transfer_dma);
4072 +@@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
4073 + }
4074 +
4075 + /* allocate buffer for control req */
4076 +- dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
4077 +- GFP_KERNEL,
4078 +- &dev->cntl_urb->setup_dma);
4079 ++ dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
4080 + if (!dev->cntl_req) {
4081 + err("Could not allocate cntl_req");
4082 + goto error;
4083 +@@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
4084 + usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
4085 + dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
4086 + dev, 1);
4087 +- dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
4088 ++ dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
4089 + if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
4090 + retval = -EIO;
4091 + err("Could not submitting URB");
4092 +diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
4093 +index ba85f27..a8f0c09 100644
4094 +--- a/drivers/usb/musb/omap2430.c
4095 ++++ b/drivers/usb/musb/omap2430.c
4096 +@@ -282,7 +282,8 @@ static int musb_otg_notifications(struct notifier_block *nb,
4097 +
4098 + static int omap2430_musb_init(struct musb *musb)
4099 + {
4100 +- u32 l, status = 0;
4101 ++ u32 l;
4102 ++ int status = 0;
4103 + struct device *dev = musb->controller;
4104 + struct musb_hdrc_platform_data *plat = dev->platform_data;
4105 + struct omap_musb_board_data *data = plat->board_data;
4106 +@@ -299,7 +300,7 @@ static int omap2430_musb_init(struct musb *musb)
4107 +
4108 + status = pm_runtime_get_sync(dev);
4109 + if (status < 0) {
4110 +- dev_err(dev, "pm_runtime_get_sync FAILED");
4111 ++ dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
4112 + goto err1;
4113 + }
4114 +
4115 +@@ -451,14 +452,14 @@ static int __init omap2430_probe(struct platform_device *pdev)
4116 + goto err2;
4117 + }
4118 +
4119 ++ pm_runtime_enable(&pdev->dev);
4120 ++
4121 + ret = platform_device_add(musb);
4122 + if (ret) {
4123 + dev_err(&pdev->dev, "failed to register musb device\n");
4124 + goto err2;
4125 + }
4126 +
4127 +- pm_runtime_enable(&pdev->dev);
4128 +-
4129 + return 0;
4130 +
4131 + err2:
4132 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
4133 +index 4c12404..f2c57e0 100644
4134 +--- a/drivers/usb/serial/cp210x.c
4135 ++++ b/drivers/usb/serial/cp210x.c
4136 +@@ -285,7 +285,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
4137 + /* Issue the request, attempting to read 'size' bytes */
4138 + result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
4139 + request, REQTYPE_DEVICE_TO_HOST, 0x0000,
4140 +- port_priv->bInterfaceNumber, buf, size, 300);
4141 ++ port_priv->bInterfaceNumber, buf, size,
4142 ++ USB_CTRL_GET_TIMEOUT);
4143 +
4144 + /* Convert data into an array of integers */
4145 + for (i = 0; i < length; i++)
4146 +@@ -335,12 +336,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
4147 + result = usb_control_msg(serial->dev,
4148 + usb_sndctrlpipe(serial->dev, 0),
4149 + request, REQTYPE_HOST_TO_DEVICE, 0x0000,
4150 +- port_priv->bInterfaceNumber, buf, size, 300);
4151 ++ port_priv->bInterfaceNumber, buf, size,
4152 ++ USB_CTRL_SET_TIMEOUT);
4153 + } else {
4154 + result = usb_control_msg(serial->dev,
4155 + usb_sndctrlpipe(serial->dev, 0),
4156 + request, REQTYPE_HOST_TO_DEVICE, data[0],
4157 +- port_priv->bInterfaceNumber, NULL, 0, 300);
4158 ++ port_priv->bInterfaceNumber, NULL, 0,
4159 ++ USB_CTRL_SET_TIMEOUT);
4160 + }
4161 +
4162 + kfree(buf);
4163 +diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
4164 +index 7c3ec9e..e093585 100644
4165 +--- a/drivers/usb/serial/sierra.c
4166 ++++ b/drivers/usb/serial/sierra.c
4167 +@@ -221,7 +221,7 @@ static const struct sierra_iface_info typeB_interface_list = {
4168 + };
4169 +
4170 + /* 'blacklist' of interfaces not served by this driver */
4171 +-static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
4172 ++static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
4173 + static const struct sierra_iface_info direct_ip_interface_blacklist = {
4174 + .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
4175 + .ifaceinfo = direct_ip_non_serial_ifaces,
4176 +@@ -289,7 +289,6 @@ static const struct usb_device_id id_table[] = {
4177 + { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
4178 + { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
4179 + { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
4180 +- { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
4181 + /* Sierra Wireless C885 */
4182 + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
4183 + /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
4184 +@@ -299,6 +298,9 @@ static const struct usb_device_id id_table[] = {
4185 + /* Sierra Wireless HSPA Non-Composite Device */
4186 + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
4187 + { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
4188 ++ { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */
4189 ++ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
4190 ++ },
4191 + { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
4192 + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
4193 + },
4194 +diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
4195 +index 2babcd4..86685e9 100644
4196 +--- a/drivers/uwb/hwa-rc.c
4197 ++++ b/drivers/uwb/hwa-rc.c
4198 +@@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb)
4199 + dev_err(dev, "NEEP: URB error %d\n", urb->status);
4200 + }
4201 + result = usb_submit_urb(urb, GFP_ATOMIC);
4202 +- if (result < 0) {
4203 ++ if (result < 0 && result != -ENODEV && result != -EPERM) {
4204 ++ /* ignoring unrecoverable errors */
4205 + dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
4206 + result);
4207 + goto error;
4208 +diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
4209 +index a269937..8cb71bb 100644
4210 +--- a/drivers/uwb/neh.c
4211 ++++ b/drivers/uwb/neh.c
4212 +@@ -107,6 +107,7 @@ struct uwb_rc_neh {
4213 + u8 evt_type;
4214 + __le16 evt;
4215 + u8 context;
4216 ++ u8 completed;
4217 + uwb_rc_cmd_cb_f cb;
4218 + void *arg;
4219 +
4220 +@@ -409,6 +410,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
4221 + struct device *dev = &rc->uwb_dev.dev;
4222 + struct uwb_rc_neh *neh;
4223 + struct uwb_rceb *notif;
4224 ++ unsigned long flags;
4225 +
4226 + if (rceb->bEventContext == 0) {
4227 + notif = kmalloc(size, GFP_ATOMIC);
4228 +@@ -422,7 +424,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
4229 + } else {
4230 + neh = uwb_rc_neh_lookup(rc, rceb);
4231 + if (neh) {
4232 +- del_timer_sync(&neh->timer);
4233 ++ spin_lock_irqsave(&rc->neh_lock, flags);
4234 ++ /* to guard against a timeout */
4235 ++ neh->completed = 1;
4236 ++ del_timer(&neh->timer);
4237 ++ spin_unlock_irqrestore(&rc->neh_lock, flags);
4238 + uwb_rc_neh_cb(neh, rceb, size);
4239 + } else
4240 + dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
4241 +@@ -568,6 +574,10 @@ static void uwb_rc_neh_timer(unsigned long arg)
4242 + unsigned long flags;
4243 +
4244 + spin_lock_irqsave(&rc->neh_lock, flags);
4245 ++ if (neh->completed) {
4246 ++ spin_unlock_irqrestore(&rc->neh_lock, flags);
4247 ++ return;
4248 ++ }
4249 + if (neh->context)
4250 + __uwb_rc_neh_rm(rc, neh);
4251 + else
4252 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
4253 +index afca14d..625890c 100644
4254 +--- a/drivers/xen/gntdev.c
4255 ++++ b/drivers/xen/gntdev.c
4256 +@@ -692,7 +692,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
4257 + vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
4258 +
4259 + if (use_ptemod)
4260 +- vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
4261 ++ vma->vm_flags |= VM_DONTCOPY;
4262 +
4263 + vma->vm_private_data = map;
4264 +
4265 +diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
4266 +index 2f73195..2ce95c0 100644
4267 +--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
4268 ++++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
4269 +@@ -129,7 +129,7 @@ static int read_backend_details(struct xenbus_device *xendev)
4270 + return xenbus_read_otherend_details(xendev, "backend-id", "backend");
4271 + }
4272 +
4273 +-static int is_device_connecting(struct device *dev, void *data)
4274 ++static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential)
4275 + {
4276 + struct xenbus_device *xendev = to_xenbus_device(dev);
4277 + struct device_driver *drv = data;
4278 +@@ -146,16 +146,41 @@ static int is_device_connecting(struct device *dev, void *data)
4279 + if (drv && (dev->driver != drv))
4280 + return 0;
4281 +
4282 ++ if (ignore_nonessential) {
4283 ++ /* With older QEMU, for PVonHVM guests the guest config files
4284 ++ * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
4285 ++ * which is nonsensical as there is no PV FB (there can be
4286 ++ * a PVKB) running as HVM guest. */
4287 ++
4288 ++ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
4289 ++ return 0;
4290 ++
4291 ++ if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
4292 ++ return 0;
4293 ++ }
4294 + xendrv = to_xenbus_driver(dev->driver);
4295 + return (xendev->state < XenbusStateConnected ||
4296 + (xendev->state == XenbusStateConnected &&
4297 + xendrv->is_ready && !xendrv->is_ready(xendev)));
4298 + }
4299 ++static int essential_device_connecting(struct device *dev, void *data)
4300 ++{
4301 ++ return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */);
4302 ++}
4303 ++static int non_essential_device_connecting(struct device *dev, void *data)
4304 ++{
4305 ++ return is_device_connecting(dev, data, false);
4306 ++}
4307 +
4308 +-static int exists_connecting_device(struct device_driver *drv)
4309 ++static int exists_essential_connecting_device(struct device_driver *drv)
4310 + {
4311 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
4312 +- is_device_connecting);
4313 ++ essential_device_connecting);
4314 ++}
4315 ++static int exists_non_essential_connecting_device(struct device_driver *drv)
4316 ++{
4317 ++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
4318 ++ non_essential_device_connecting);
4319 + }
4320 +
4321 + static int print_device_status(struct device *dev, void *data)
4322 +@@ -186,6 +211,23 @@ static int print_device_status(struct device *dev, void *data)
4323 + /* We only wait for device setup after most initcalls have run. */
4324 + static int ready_to_wait_for_devices;
4325 +
4326 ++static bool wait_loop(unsigned long start, unsigned int max_delay,
4327 ++ unsigned int *seconds_waited)
4328 ++{
4329 ++ if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) {
4330 ++ if (!*seconds_waited)
4331 ++ printk(KERN_WARNING "XENBUS: Waiting for "
4332 ++ "devices to initialise: ");
4333 ++ *seconds_waited += 5;
4334 ++ printk("%us...", max_delay - *seconds_waited);
4335 ++ if (*seconds_waited == max_delay)
4336 ++ return true;
4337 ++ }
4338 ++
4339 ++ schedule_timeout_interruptible(HZ/10);
4340 ++
4341 ++ return false;
4342 ++}
4343 + /*
4344 + * On a 5-minute timeout, wait for all devices currently configured. We need
4345 + * to do this to guarantee that the filesystems and / or network devices
4346 +@@ -209,19 +251,14 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
4347 + if (!ready_to_wait_for_devices || !xen_domain())
4348 + return;
4349 +
4350 +- while (exists_connecting_device(drv)) {
4351 +- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
4352 +- if (!seconds_waited)
4353 +- printk(KERN_WARNING "XENBUS: Waiting for "
4354 +- "devices to initialise: ");
4355 +- seconds_waited += 5;
4356 +- printk("%us...", 300 - seconds_waited);
4357 +- if (seconds_waited == 300)
4358 +- break;
4359 +- }
4360 +-
4361 +- schedule_timeout_interruptible(HZ/10);
4362 +- }
4363 ++ while (exists_non_essential_connecting_device(drv))
4364 ++ if (wait_loop(start, 30, &seconds_waited))
4365 ++ break;
4366 ++
4367 ++ /* Skips PVKB and PVFB check.*/
4368 ++ while (exists_essential_connecting_device(drv))
4369 ++ if (wait_loop(start, 270, &seconds_waited))
4370 ++ break;
4371 +
4372 + if (seconds_waited)
4373 + printk("\n");
4374 +diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
4375 +index 308a98b..650d520 100644
4376 +--- a/fs/autofs4/autofs_i.h
4377 ++++ b/fs/autofs4/autofs_i.h
4378 +@@ -110,7 +110,6 @@ struct autofs_sb_info {
4379 + int sub_version;
4380 + int min_proto;
4381 + int max_proto;
4382 +- int compat_daemon;
4383 + unsigned long exp_timeout;
4384 + unsigned int type;
4385 + int reghost_enabled;
4386 +@@ -269,6 +268,17 @@ int autofs4_fill_super(struct super_block *, void *, int);
4387 + struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
4388 + void autofs4_clean_ino(struct autofs_info *);
4389 +
4390 ++static inline int autofs_prepare_pipe(struct file *pipe)
4391 ++{
4392 ++ if (!pipe->f_op || !pipe->f_op->write)
4393 ++ return -EINVAL;
4394 ++ if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
4395 ++ return -EINVAL;
4396 ++ /* We want a packet pipe */
4397 ++ pipe->f_flags |= O_DIRECT;
4398 ++ return 0;
4399 ++}
4400 ++
4401 + /* Queue management functions */
4402 +
4403 + int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
4404 +diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
4405 +index 56bac70..de54271 100644
4406 +--- a/fs/autofs4/dev-ioctl.c
4407 ++++ b/fs/autofs4/dev-ioctl.c
4408 +@@ -376,7 +376,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
4409 + err = -EBADF;
4410 + goto out;
4411 + }
4412 +- if (!pipe->f_op || !pipe->f_op->write) {
4413 ++ if (autofs_prepare_pipe(pipe) < 0) {
4414 + err = -EPIPE;
4415 + fput(pipe);
4416 + goto out;
4417 +@@ -385,7 +385,6 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
4418 + sbi->pipefd = pipefd;
4419 + sbi->pipe = pipe;
4420 + sbi->catatonic = 0;
4421 +- sbi->compat_daemon = is_compat_task();
4422 + }
4423 + out:
4424 + mutex_unlock(&sbi->wq_mutex);
4425 +diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
4426 +index 98a5695..7b5293e 100644
4427 +--- a/fs/autofs4/inode.c
4428 ++++ b/fs/autofs4/inode.c
4429 +@@ -19,7 +19,6 @@
4430 + #include <linux/parser.h>
4431 + #include <linux/bitops.h>
4432 + #include <linux/magic.h>
4433 +-#include <linux/compat.h>
4434 + #include "autofs_i.h"
4435 + #include <linux/module.h>
4436 +
4437 +@@ -225,7 +224,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
4438 + set_autofs_type_indirect(&sbi->type);
4439 + sbi->min_proto = 0;
4440 + sbi->max_proto = 0;
4441 +- sbi->compat_daemon = is_compat_task();
4442 + mutex_init(&sbi->wq_mutex);
4443 + spin_lock_init(&sbi->fs_lock);
4444 + sbi->queues = NULL;
4445 +@@ -294,7 +292,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
4446 + printk("autofs: could not open pipe file descriptor\n");
4447 + goto fail_dput;
4448 + }
4449 +- if (!pipe->f_op || !pipe->f_op->write)
4450 ++ if (autofs_prepare_pipe(pipe) < 0)
4451 + goto fail_fput;
4452 + sbi->pipe = pipe;
4453 + sbi->pipefd = pipefd;
4454 +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
4455 +index 6861f61..e1fbdee 100644
4456 +--- a/fs/autofs4/waitq.c
4457 ++++ b/fs/autofs4/waitq.c
4458 +@@ -90,24 +90,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
4459 +
4460 + return (bytes > 0);
4461 + }
4462 +-
4463 +-/*
4464 +- * The autofs_v5 packet was misdesigned.
4465 +- *
4466 +- * The packets are identical on x86-32 and x86-64, but have different
4467 +- * alignment. Which means that 'sizeof()' will give different results.
4468 +- * Fix it up for the case of running 32-bit user mode on a 64-bit kernel.
4469 +- */
4470 +-static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi)
4471 +-{
4472 +- size_t pktsz = sizeof(struct autofs_v5_packet);
4473 +-#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT)
4474 +- if (sbi->compat_daemon > 0)
4475 +- pktsz -= 4;
4476 +-#endif
4477 +- return pktsz;
4478 +-}
4479 +-
4480 ++
4481 + static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
4482 + struct autofs_wait_queue *wq,
4483 + int type)
4484 +@@ -164,7 +147,8 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
4485 + {
4486 + struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
4487 +
4488 +- pktsz = autofs_v5_packet_size(sbi);
4489 ++ pktsz = sizeof(*packet);
4490 ++
4491 + packet->wait_queue_token = wq->wait_queue_token;
4492 + packet->len = wq->name.len;
4493 + memcpy(packet->name, wq->name.name, wq->name.len);
4494 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
4495 +index 6738503..83a871f 100644
4496 +--- a/fs/btrfs/ctree.h
4497 ++++ b/fs/btrfs/ctree.h
4498 +@@ -2025,7 +2025,7 @@ BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
4499 +
4500 + static inline bool btrfs_root_readonly(struct btrfs_root *root)
4501 + {
4502 +- return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
4503 ++ return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
4504 + }
4505 +
4506 + /* struct btrfs_root_backup */
4507 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
4508 +index 0e6adac..e89803b 100644
4509 +--- a/fs/cifs/cifssmb.c
4510 ++++ b/fs/cifs/cifssmb.c
4511 +@@ -4826,8 +4826,12 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
4512 + max_len = data_end - temp;
4513 + node->node_name = cifs_strndup_from_ucs(temp, max_len,
4514 + is_unicode, nls_codepage);
4515 +- if (!node->node_name)
4516 ++ if (!node->node_name) {
4517 + rc = -ENOMEM;
4518 ++ goto parse_DFS_referrals_exit;
4519 ++ }
4520 ++
4521 ++ ref++;
4522 + }
4523 +
4524 + parse_DFS_referrals_exit:
4525 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
4526 +index ea54cde..4d9d3a4 100644
4527 +--- a/fs/eventpoll.c
4528 ++++ b/fs/eventpoll.c
4529 +@@ -988,6 +988,10 @@ static int path_count[PATH_ARR_SIZE];
4530 +
4531 + static int path_count_inc(int nests)
4532 + {
4533 ++ /* Allow an arbitrary number of depth 1 paths */
4534 ++ if (nests == 0)
4535 ++ return 0;
4536 ++
4537 + if (++path_count[nests] > path_limits[nests])
4538 + return -1;
4539 + return 0;
4540 +diff --git a/fs/exec.c b/fs/exec.c
4541 +index 3625464..160cd2f 100644
4542 +--- a/fs/exec.c
4543 ++++ b/fs/exec.c
4544 +@@ -973,6 +973,9 @@ static int de_thread(struct task_struct *tsk)
4545 + sig->notify_count = 0;
4546 +
4547 + no_thread_group:
4548 ++ /* we have changed execution domain */
4549 ++ tsk->exit_signal = SIGCHLD;
4550 ++
4551 + if (current->mm)
4552 + setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
4553 +
4554 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
4555 +index c2a2012..54f2bdc 100644
4556 +--- a/fs/ext4/extents.c
4557 ++++ b/fs/ext4/extents.c
4558 +@@ -2812,7 +2812,7 @@ static int ext4_split_extent_at(handle_t *handle,
4559 + if (err)
4560 + goto fix_extent_len;
4561 + /* update the extent length and mark as initialized */
4562 +- ex->ee_len = cpu_to_le32(ee_len);
4563 ++ ex->ee_len = cpu_to_le16(ee_len);
4564 + ext4_ext_try_to_merge(inode, path, ex);
4565 + err = ext4_ext_dirty(handle, inode, path + depth);
4566 + goto out;
4567 +diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
4568 +index 4dfbfec..ec2a9c2 100644
4569 +--- a/fs/hfsplus/catalog.c
4570 ++++ b/fs/hfsplus/catalog.c
4571 +@@ -366,6 +366,10 @@ int hfsplus_rename_cat(u32 cnid,
4572 + err = hfs_brec_find(&src_fd);
4573 + if (err)
4574 + goto out;
4575 ++ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
4576 ++ err = -EIO;
4577 ++ goto out;
4578 ++ }
4579 +
4580 + hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
4581 + src_fd.entrylength);
4582 +diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
4583 +index 4536cd3..5adb740 100644
4584 +--- a/fs/hfsplus/dir.c
4585 ++++ b/fs/hfsplus/dir.c
4586 +@@ -150,6 +150,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
4587 + filp->f_pos++;
4588 + /* fall through */
4589 + case 1:
4590 ++ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
4591 ++ err = -EIO;
4592 ++ goto out;
4593 ++ }
4594 ++
4595 + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
4596 + fd.entrylength);
4597 + if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
4598 +@@ -181,6 +186,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
4599 + err = -EIO;
4600 + goto out;
4601 + }
4602 ++
4603 ++ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
4604 ++ err = -EIO;
4605 ++ goto out;
4606 ++ }
4607 ++
4608 + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
4609 + fd.entrylength);
4610 + type = be16_to_cpu(entry.type);
4611 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
4612 +index 68d704d..d751f04 100644
4613 +--- a/fs/jbd2/commit.c
4614 ++++ b/fs/jbd2/commit.c
4615 +@@ -683,7 +683,7 @@ start_journal_io:
4616 + if (commit_transaction->t_need_data_flush &&
4617 + (journal->j_fs_dev != journal->j_dev) &&
4618 + (journal->j_flags & JBD2_BARRIER))
4619 +- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
4620 ++ blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
4621 +
4622 + /* Done it all: now write the commit record asynchronously. */
4623 + if (JBD2_HAS_INCOMPAT_FEATURE(journal,
4624 +@@ -819,7 +819,7 @@ wait_for_iobuf:
4625 + if (JBD2_HAS_INCOMPAT_FEATURE(journal,
4626 + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
4627 + journal->j_flags & JBD2_BARRIER) {
4628 +- blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
4629 ++ blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
4630 + }
4631 +
4632 + if (err)
4633 +diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
4634 +index f848b52..046bb77 100644
4635 +--- a/fs/lockd/clnt4xdr.c
4636 ++++ b/fs/lockd/clnt4xdr.c
4637 +@@ -241,7 +241,7 @@ static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
4638 + p = xdr_inline_decode(xdr, 4);
4639 + if (unlikely(p == NULL))
4640 + goto out_overflow;
4641 +- if (unlikely(*p > nlm4_failed))
4642 ++ if (unlikely(ntohl(*p) > ntohl(nlm4_failed)))
4643 + goto out_bad_xdr;
4644 + *stat = *p;
4645 + return 0;
4646 +diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
4647 +index 180ac34..36057ce 100644
4648 +--- a/fs/lockd/clntxdr.c
4649 ++++ b/fs/lockd/clntxdr.c
4650 +@@ -236,7 +236,7 @@ static int decode_nlm_stat(struct xdr_stream *xdr,
4651 + p = xdr_inline_decode(xdr, 4);
4652 + if (unlikely(p == NULL))
4653 + goto out_overflow;
4654 +- if (unlikely(*p > nlm_lck_denied_grace_period))
4655 ++ if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period)))
4656 + goto out_enum;
4657 + *stat = *p;
4658 + return 0;
4659 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4660 +index 757293b..51f6a40 100644
4661 +--- a/fs/nfs/nfs4proc.c
4662 ++++ b/fs/nfs/nfs4proc.c
4663 +@@ -4453,7 +4453,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
4664 + static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4665 + {
4666 + struct nfs_server *server = NFS_SERVER(state->inode);
4667 +- struct nfs4_exception exception = { };
4668 ++ struct nfs4_exception exception = {
4669 ++ .inode = state->inode,
4670 ++ };
4671 + int err;
4672 +
4673 + do {
4674 +@@ -4471,7 +4473,9 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
4675 + static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4676 + {
4677 + struct nfs_server *server = NFS_SERVER(state->inode);
4678 +- struct nfs4_exception exception = { };
4679 ++ struct nfs4_exception exception = {
4680 ++ .inode = state->inode,
4681 ++ };
4682 + int err;
4683 +
4684 + err = nfs4_set_lock_state(state, request);
4685 +@@ -4551,6 +4555,7 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
4686 + {
4687 + struct nfs4_exception exception = {
4688 + .state = state,
4689 ++ .inode = state->inode,
4690 + };
4691 + int err;
4692 +
4693 +@@ -4596,6 +4601,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4694 +
4695 + if (state == NULL)
4696 + return -ENOLCK;
4697 ++ /*
4698 ++ * Don't rely on the VFS having checked the file open mode,
4699 ++ * since it won't do this for flock() locks.
4700 ++ */
4701 ++ switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4702 ++ case F_RDLCK:
4703 ++ if (!(filp->f_mode & FMODE_READ))
4704 ++ return -EBADF;
4705 ++ break;
4706 ++ case F_WRLCK:
4707 ++ if (!(filp->f_mode & FMODE_WRITE))
4708 ++ return -EBADF;
4709 ++ }
4710 ++
4711 + do {
4712 + status = nfs4_proc_setlk(state, cmd, request);
4713 + if ((status != -EAGAIN) || IS_SETLK(cmd))
4714 +diff --git a/fs/nfs/read.c b/fs/nfs/read.c
4715 +index cfa175c..41bae32 100644
4716 +--- a/fs/nfs/read.c
4717 ++++ b/fs/nfs/read.c
4718 +@@ -324,7 +324,7 @@ out_bad:
4719 + while (!list_empty(res)) {
4720 + data = list_entry(res->next, struct nfs_read_data, list);
4721 + list_del(&data->list);
4722 +- nfs_readdata_free(data);
4723 ++ nfs_readdata_release(data);
4724 + }
4725 + nfs_readpage_release(req);
4726 + return -ENOMEM;
4727 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
4728 +index 3ada13c..376cd65 100644
4729 +--- a/fs/nfs/super.c
4730 ++++ b/fs/nfs/super.c
4731 +@@ -2708,11 +2708,15 @@ static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
4732 + char *root_devname;
4733 + size_t len;
4734 +
4735 +- len = strlen(hostname) + 3;
4736 ++ len = strlen(hostname) + 5;
4737 + root_devname = kmalloc(len, GFP_KERNEL);
4738 + if (root_devname == NULL)
4739 + return ERR_PTR(-ENOMEM);
4740 +- snprintf(root_devname, len, "%s:/", hostname);
4741 ++ /* Does hostname needs to be enclosed in brackets? */
4742 ++ if (strchr(hostname, ':'))
4743 ++ snprintf(root_devname, len, "[%s]:/", hostname);
4744 ++ else
4745 ++ snprintf(root_devname, len, "%s:/", hostname);
4746 + root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
4747 + kfree(root_devname);
4748 + return root_mnt;
4749 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
4750 +index 1dda78d..4efd421 100644
4751 +--- a/fs/nfs/write.c
4752 ++++ b/fs/nfs/write.c
4753 +@@ -974,7 +974,7 @@ out_bad:
4754 + while (!list_empty(res)) {
4755 + data = list_entry(res->next, struct nfs_write_data, list);
4756 + list_del(&data->list);
4757 +- nfs_writedata_free(data);
4758 ++ nfs_writedata_release(data);
4759 + }
4760 + nfs_redirty_request(req);
4761 + return -ENOMEM;
4762 +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
4763 +index 08c6e36..43f46cd 100644
4764 +--- a/fs/nfsd/nfs3xdr.c
4765 ++++ b/fs/nfsd/nfs3xdr.c
4766 +@@ -803,13 +803,13 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
4767 + return p;
4768 + }
4769 +
4770 +-static int
4771 ++static __be32
4772 + compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
4773 + const char *name, int namlen)
4774 + {
4775 + struct svc_export *exp;
4776 + struct dentry *dparent, *dchild;
4777 +- int rv = 0;
4778 ++ __be32 rv = nfserr_noent;
4779 +
4780 + dparent = cd->fh.fh_dentry;
4781 + exp = cd->fh.fh_export;
4782 +@@ -817,26 +817,20 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
4783 + if (isdotent(name, namlen)) {
4784 + if (namlen == 2) {
4785 + dchild = dget_parent(dparent);
4786 +- if (dchild == dparent) {
4787 +- /* filesystem root - cannot return filehandle for ".." */
4788 +- dput(dchild);
4789 +- return -ENOENT;
4790 +- }
4791 ++ /* filesystem root - cannot return filehandle for ".." */
4792 ++ if (dchild == dparent)
4793 ++ goto out;
4794 + } else
4795 + dchild = dget(dparent);
4796 + } else
4797 + dchild = lookup_one_len(name, dparent, namlen);
4798 + if (IS_ERR(dchild))
4799 +- return -ENOENT;
4800 +- rv = -ENOENT;
4801 ++ return rv;
4802 + if (d_mountpoint(dchild))
4803 + goto out;
4804 +- rv = fh_compose(fhp, exp, dchild, &cd->fh);
4805 +- if (rv)
4806 +- goto out;
4807 + if (!dchild->d_inode)
4808 + goto out;
4809 +- rv = 0;
4810 ++ rv = fh_compose(fhp, exp, dchild, &cd->fh);
4811 + out:
4812 + dput(dchild);
4813 + return rv;
4814 +@@ -845,7 +839,7 @@ out:
4815 + static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
4816 + {
4817 + struct svc_fh fh;
4818 +- int err;
4819 ++ __be32 err;
4820 +
4821 + fh_init(&fh, NFS3_FHSIZE);
4822 + err = compose_entry_fh(cd, &fh, name, namlen);
4823 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
4824 +index fa38336..b8c5538 100644
4825 +--- a/fs/nfsd/nfs4proc.c
4826 ++++ b/fs/nfsd/nfs4proc.c
4827 +@@ -231,17 +231,17 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
4828 + */
4829 + if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
4830 + open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
4831 +- FATTR4_WORD1_TIME_MODIFY);
4832 ++ FATTR4_WORD1_TIME_MODIFY);
4833 + } else {
4834 + status = nfsd_lookup(rqstp, current_fh,
4835 + open->op_fname.data, open->op_fname.len, &resfh);
4836 + fh_unlock(current_fh);
4837 +- if (status)
4838 +- goto out;
4839 +- status = nfsd_check_obj_isreg(&resfh);
4840 + }
4841 + if (status)
4842 + goto out;
4843 ++ status = nfsd_check_obj_isreg(&resfh);
4844 ++ if (status)
4845 ++ goto out;
4846 +
4847 + if (is_create_with_attrs(open) && open->op_acl != NULL)
4848 + do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval);
4849 +@@ -827,6 +827,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4850 + struct nfsd4_setattr *setattr)
4851 + {
4852 + __be32 status = nfs_ok;
4853 ++ int err;
4854 +
4855 + if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
4856 + nfs4_lock_state();
4857 +@@ -838,9 +839,9 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4858 + return status;
4859 + }
4860 + }
4861 +- status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
4862 +- if (status)
4863 +- return status;
4864 ++ err = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
4865 ++ if (err)
4866 ++ return nfserrno(err);
4867 + status = nfs_ok;
4868 +
4869 + status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
4870 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4871 +index 5abced7..4cfe260 100644
4872 +--- a/fs/nfsd/nfs4state.c
4873 ++++ b/fs/nfsd/nfs4state.c
4874 +@@ -4080,16 +4080,14 @@ out:
4875 + * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4876 + * inode operation.)
4877 + */
4878 +-static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4879 ++static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4880 + {
4881 + struct file *file;
4882 +- int err;
4883 +-
4884 +- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4885 +- if (err)
4886 +- return err;
4887 +- err = vfs_test_lock(file, lock);
4888 +- nfsd_close(file);
4889 ++ __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4890 ++ if (!err) {
4891 ++ err = nfserrno(vfs_test_lock(file, lock));
4892 ++ nfsd_close(file);
4893 ++ }
4894 + return err;
4895 + }
4896 +
4897 +@@ -4103,7 +4101,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4898 + struct inode *inode;
4899 + struct file_lock file_lock;
4900 + struct nfs4_lockowner *lo;
4901 +- int error;
4902 + __be32 status;
4903 +
4904 + if (locks_in_grace())
4905 +@@ -4149,12 +4146,10 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4906 +
4907 + nfs4_transform_lock_offset(&file_lock);
4908 +
4909 +- status = nfs_ok;
4910 +- error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4911 +- if (error) {
4912 +- status = nfserrno(error);
4913 ++ status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4914 ++ if (status)
4915 + goto out;
4916 +- }
4917 ++
4918 + if (file_lock.fl_type != F_UNLCK) {
4919 + status = nfserr_denied;
4920 + nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4921 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
4922 +index b6fa792..9cfa60a 100644
4923 +--- a/fs/nfsd/nfs4xdr.c
4924 ++++ b/fs/nfsd/nfs4xdr.c
4925 +@@ -3411,7 +3411,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
4926 + nfsd4_decode_stateid(argp, &si);
4927 + valid = nfs4_validate_stateid(cl, &si);
4928 + RESERVE_SPACE(4);
4929 +- *p++ = htonl(valid);
4930 ++ *p++ = valid;
4931 + resp->p = p;
4932 + }
4933 + nfs4_unlock_state();
4934 +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
4935 +index 7a2e442..5c3cd82 100644
4936 +--- a/fs/nfsd/vfs.c
4937 ++++ b/fs/nfsd/vfs.c
4938 +@@ -1439,7 +1439,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
4939 + switch (createmode) {
4940 + case NFS3_CREATE_UNCHECKED:
4941 + if (! S_ISREG(dchild->d_inode->i_mode))
4942 +- err = nfserr_exist;
4943 ++ goto out;
4944 + else if (truncp) {
4945 + /* in nfsv4, we need to treat this case a little
4946 + * differently. we don't want to truncate the
4947 +diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
4948 +index 3165aeb..31b9463 100644
4949 +--- a/fs/ocfs2/alloc.c
4950 ++++ b/fs/ocfs2/alloc.c
4951 +@@ -1134,7 +1134,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
4952 + }
4953 +
4954 + el = path_leaf_el(path);
4955 +- rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
4956 ++ rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
4957 +
4958 + ocfs2_adjust_rightmost_records(handle, et, path, rec);
4959 +
4960 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
4961 +index cf78233..9f32d7c 100644
4962 +--- a/fs/ocfs2/refcounttree.c
4963 ++++ b/fs/ocfs2/refcounttree.c
4964 +@@ -1036,14 +1036,14 @@ static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
4965 +
4966 + tmp_el = left_path->p_node[subtree_root].el;
4967 + blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
4968 +- for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
4969 ++ for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
4970 + if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
4971 + *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
4972 + break;
4973 + }
4974 + }
4975 +
4976 +- BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
4977 ++ BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
4978 +
4979 + out:
4980 + ocfs2_free_path(left_path);
4981 +@@ -1468,7 +1468,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
4982 +
4983 + trace_ocfs2_divide_leaf_refcount_block(
4984 + (unsigned long long)ref_leaf_bh->b_blocknr,
4985 +- le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
4986 ++ le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
4987 +
4988 + /*
4989 + * XXX: Improvement later.
4990 +@@ -2411,7 +2411,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
4991 + rb = (struct ocfs2_refcount_block *)
4992 + prev_bh->b_data;
4993 +
4994 +- if (le64_to_cpu(rb->rf_records.rl_used) +
4995 ++ if (le16_to_cpu(rb->rf_records.rl_used) +
4996 + recs_add >
4997 + le16_to_cpu(rb->rf_records.rl_count))
4998 + ref_blocks++;
4999 +@@ -2476,7 +2476,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
5000 + if (prev_bh) {
5001 + rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
5002 +
5003 +- if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
5004 ++ if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
5005 + le16_to_cpu(rb->rf_records.rl_count))
5006 + ref_blocks++;
5007 +
5008 +@@ -3629,7 +3629,7 @@ int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
5009 + * one will split a refcount rec, so totally we need
5010 + * clusters * 2 new refcount rec.
5011 + */
5012 +- if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
5013 ++ if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
5014 + le16_to_cpu(rb->rf_records.rl_count))
5015 + ref_blocks++;
5016 +
5017 +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
5018 +index ba5d97e..f169da4 100644
5019 +--- a/fs/ocfs2/suballoc.c
5020 ++++ b/fs/ocfs2/suballoc.c
5021 +@@ -600,7 +600,7 @@ static void ocfs2_bg_alloc_cleanup(handle_t *handle,
5022 + ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
5023 + cluster_ac->ac_bh,
5024 + le64_to_cpu(rec->e_blkno),
5025 +- le32_to_cpu(rec->e_leaf_clusters));
5026 ++ le16_to_cpu(rec->e_leaf_clusters));
5027 + if (ret)
5028 + mlog_errno(ret);
5029 + /* Try all the clusters to free */
5030 +@@ -1628,7 +1628,7 @@ static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
5031 + {
5032 + unsigned int bpc = le16_to_cpu(cl->cl_bpc);
5033 + unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
5034 +- unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc;
5035 ++ unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
5036 +
5037 + if (res->sr_bit_offset < bitoff)
5038 + return 0;
5039 +diff --git a/fs/pipe.c b/fs/pipe.c
5040 +index 4065f07..05ed5ca 100644
5041 +--- a/fs/pipe.c
5042 ++++ b/fs/pipe.c
5043 +@@ -345,6 +345,16 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
5044 + .get = generic_pipe_buf_get,
5045 + };
5046 +
5047 ++static const struct pipe_buf_operations packet_pipe_buf_ops = {
5048 ++ .can_merge = 0,
5049 ++ .map = generic_pipe_buf_map,
5050 ++ .unmap = generic_pipe_buf_unmap,
5051 ++ .confirm = generic_pipe_buf_confirm,
5052 ++ .release = anon_pipe_buf_release,
5053 ++ .steal = generic_pipe_buf_steal,
5054 ++ .get = generic_pipe_buf_get,
5055 ++};
5056 ++
5057 + static ssize_t
5058 + pipe_read(struct kiocb *iocb, const struct iovec *_iov,
5059 + unsigned long nr_segs, loff_t pos)
5060 +@@ -406,6 +416,13 @@ redo:
5061 + ret += chars;
5062 + buf->offset += chars;
5063 + buf->len -= chars;
5064 ++
5065 ++ /* Was it a packet buffer? Clean up and exit */
5066 ++ if (buf->flags & PIPE_BUF_FLAG_PACKET) {
5067 ++ total_len = chars;
5068 ++ buf->len = 0;
5069 ++ }
5070 ++
5071 + if (!buf->len) {
5072 + buf->ops = NULL;
5073 + ops->release(pipe, buf);
5074 +@@ -458,6 +475,11 @@ redo:
5075 + return ret;
5076 + }
5077 +
5078 ++static inline int is_packetized(struct file *file)
5079 ++{
5080 ++ return (file->f_flags & O_DIRECT) != 0;
5081 ++}
5082 ++
5083 + static ssize_t
5084 + pipe_write(struct kiocb *iocb, const struct iovec *_iov,
5085 + unsigned long nr_segs, loff_t ppos)
5086 +@@ -592,6 +614,11 @@ redo2:
5087 + buf->ops = &anon_pipe_buf_ops;
5088 + buf->offset = 0;
5089 + buf->len = chars;
5090 ++ buf->flags = 0;
5091 ++ if (is_packetized(filp)) {
5092 ++ buf->ops = &packet_pipe_buf_ops;
5093 ++ buf->flags = PIPE_BUF_FLAG_PACKET;
5094 ++ }
5095 + pipe->nrbufs = ++bufs;
5096 + pipe->tmp_page = NULL;
5097 +
5098 +@@ -1012,7 +1039,7 @@ struct file *create_write_pipe(int flags)
5099 + goto err_dentry;
5100 + f->f_mapping = inode->i_mapping;
5101 +
5102 +- f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
5103 ++ f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
5104 + f->f_version = 0;
5105 +
5106 + return f;
5107 +@@ -1056,7 +1083,7 @@ int do_pipe_flags(int *fd, int flags)
5108 + int error;
5109 + int fdw, fdr;
5110 +
5111 +- if (flags & ~(O_CLOEXEC | O_NONBLOCK))
5112 ++ if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
5113 + return -EINVAL;
5114 +
5115 + fw = create_write_pipe(flags);
5116 +diff --git a/fs/splice.c b/fs/splice.c
5117 +index fa2defa..6d0dfb8 100644
5118 +--- a/fs/splice.c
5119 ++++ b/fs/splice.c
5120 +@@ -31,6 +31,7 @@
5121 + #include <linux/uio.h>
5122 + #include <linux/security.h>
5123 + #include <linux/gfp.h>
5124 ++#include <linux/socket.h>
5125 +
5126 + /*
5127 + * Attempt to steal a page from a pipe buffer. This should perhaps go into
5128 +@@ -691,7 +692,9 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
5129 + if (!likely(file->f_op && file->f_op->sendpage))
5130 + return -EINVAL;
5131 +
5132 +- more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
5133 ++ more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
5134 ++ if (sd->len < sd->total_len)
5135 ++ more |= MSG_SENDPAGE_NOTLAST;
5136 + return file->f_op->sendpage(file, buf->page, buf->offset,
5137 + sd->len, &pos, more);
5138 + }
5139 +diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
5140 +index 0fd28e0..c749af9 100644
5141 +--- a/include/asm-generic/statfs.h
5142 ++++ b/include/asm-generic/statfs.h
5143 +@@ -15,7 +15,7 @@ typedef __kernel_fsid_t fsid_t;
5144 + * with a 10' pole.
5145 + */
5146 + #ifndef __statfs_word
5147 +-#if BITS_PER_LONG == 64
5148 ++#if __BITS_PER_LONG == 64
5149 + #define __statfs_word long
5150 + #else
5151 + #define __statfs_word __u32
5152 +diff --git a/include/linux/efi.h b/include/linux/efi.h
5153 +index 2362a0b..1328d8c 100644
5154 +--- a/include/linux/efi.h
5155 ++++ b/include/linux/efi.h
5156 +@@ -383,7 +383,18 @@ extern int __init efi_setup_pcdp_console(char *);
5157 + #define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001
5158 + #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
5159 + #define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
5160 +-
5161 ++#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
5162 ++#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
5163 ++#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
5164 ++#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
5165 ++
5166 ++#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
5167 ++ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
5168 ++ EFI_VARIABLE_RUNTIME_ACCESS | \
5169 ++ EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
5170 ++ EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
5171 ++ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
5172 ++ EFI_VARIABLE_APPEND_WRITE)
5173 + /*
5174 + * EFI Device Path information
5175 + */
5176 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
5177 +index d526231..35410ef 100644
5178 +--- a/include/linux/kvm_host.h
5179 ++++ b/include/linux/kvm_host.h
5180 +@@ -562,6 +562,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
5181 +
5182 + #ifdef CONFIG_IOMMU_API
5183 + int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
5184 ++void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
5185 + int kvm_iommu_map_guest(struct kvm *kvm);
5186 + int kvm_iommu_unmap_guest(struct kvm *kvm);
5187 + int kvm_assign_device(struct kvm *kvm,
5188 +@@ -575,6 +576,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
5189 + return 0;
5190 + }
5191 +
5192 ++static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
5193 ++ struct kvm_memory_slot *slot)
5194 ++{
5195 ++}
5196 ++
5197 + static inline int kvm_iommu_map_guest(struct kvm *kvm)
5198 + {
5199 + return -ENODEV;
5200 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
5201 +index a82ad4d..cbeb586 100644
5202 +--- a/include/linux/netdevice.h
5203 ++++ b/include/linux/netdevice.h
5204 +@@ -2536,8 +2536,6 @@ extern void net_disable_timestamp(void);
5205 + extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
5206 + extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
5207 + extern void dev_seq_stop(struct seq_file *seq, void *v);
5208 +-extern int dev_seq_open_ops(struct inode *inode, struct file *file,
5209 +- const struct seq_operations *ops);
5210 + #endif
5211 +
5212 + extern int netdev_class_create_file(struct class_attribute *class_attr);
5213 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
5214 +index 77257c9..0072a53 100644
5215 +--- a/include/linux/pipe_fs_i.h
5216 ++++ b/include/linux/pipe_fs_i.h
5217 +@@ -8,6 +8,7 @@
5218 + #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
5219 + #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
5220 + #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
5221 ++#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
5222 +
5223 + /**
5224 + * struct pipe_buffer - a linux kernel pipe buffer
5225 +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
5226 +index c6db9fb..bb1fac5 100644
5227 +--- a/include/linux/seqlock.h
5228 ++++ b/include/linux/seqlock.h
5229 +@@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
5230 + unsigned ret;
5231 +
5232 + repeat:
5233 +- ret = s->sequence;
5234 ++ ret = ACCESS_ONCE(s->sequence);
5235 + if (unlikely(ret & 1)) {
5236 + cpu_relax();
5237 + goto repeat;
5238 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5239 +index 6cf8b53..e689b47 100644
5240 +--- a/include/linux/skbuff.h
5241 ++++ b/include/linux/skbuff.h
5242 +@@ -458,6 +458,7 @@ struct sk_buff {
5243 + union {
5244 + __u32 mark;
5245 + __u32 dropcount;
5246 ++ __u32 avail_size;
5247 + };
5248 +
5249 + __u16 vlan_tci;
5250 +@@ -1326,6 +1327,18 @@ static inline int skb_tailroom(const struct sk_buff *skb)
5251 + }
5252 +
5253 + /**
5254 ++ * skb_availroom - bytes at buffer end
5255 ++ * @skb: buffer to check
5256 ++ *
5257 ++ * Return the number of bytes of free space at the tail of an sk_buff
5258 ++ * allocated by sk_stream_alloc()
5259 ++ */
5260 ++static inline int skb_availroom(const struct sk_buff *skb)
5261 ++{
5262 ++ return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
5263 ++}
5264 ++
5265 ++/**
5266 + * skb_reserve - adjust headroom
5267 + * @skb: buffer to alter
5268 + * @len: bytes to move
5269 +diff --git a/include/linux/socket.h b/include/linux/socket.h
5270 +index d0e77f6..ad919e0 100644
5271 +--- a/include/linux/socket.h
5272 ++++ b/include/linux/socket.h
5273 +@@ -265,7 +265,7 @@ struct ucred {
5274 + #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
5275 + #define MSG_MORE 0x8000 /* Sender will send more */
5276 + #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
5277 +-
5278 ++#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
5279 + #define MSG_EOF MSG_FIN
5280 +
5281 + #define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
5282 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
5283 +index 03354d5..64cec8d 100644
5284 +--- a/include/linux/usb/hcd.h
5285 ++++ b/include/linux/usb/hcd.h
5286 +@@ -128,6 +128,8 @@ struct usb_hcd {
5287 + unsigned wireless:1; /* Wireless USB HCD */
5288 + unsigned authorized_default:1;
5289 + unsigned has_tt:1; /* Integrated TT in root hub */
5290 ++ unsigned broken_pci_sleep:1; /* Don't put the
5291 ++ controller in PCI-D3 for system sleep */
5292 +
5293 + int irq; /* irq allocated */
5294 + void __iomem *regs; /* device memory/io */
5295 +diff --git a/kernel/exit.c b/kernel/exit.c
5296 +index e6e01b9..5a8a66e 100644
5297 +--- a/kernel/exit.c
5298 ++++ b/kernel/exit.c
5299 +@@ -819,25 +819,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
5300 + if (group_dead)
5301 + kill_orphaned_pgrp(tsk->group_leader, NULL);
5302 +
5303 +- /* Let father know we died
5304 +- *
5305 +- * Thread signals are configurable, but you aren't going to use
5306 +- * that to send signals to arbitrary processes.
5307 +- * That stops right now.
5308 +- *
5309 +- * If the parent exec id doesn't match the exec id we saved
5310 +- * when we started then we know the parent has changed security
5311 +- * domain.
5312 +- *
5313 +- * If our self_exec id doesn't match our parent_exec_id then
5314 +- * we have changed execution domain as these two values started
5315 +- * the same after a fork.
5316 +- */
5317 +- if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
5318 +- (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
5319 +- tsk->self_exec_id != tsk->parent_exec_id))
5320 +- tsk->exit_signal = SIGCHLD;
5321 +-
5322 + if (unlikely(tsk->ptrace)) {
5323 + int sig = thread_group_leader(tsk) &&
5324 + thread_group_empty(tsk) &&
5325 +diff --git a/kernel/power/swap.c b/kernel/power/swap.c
5326 +index 11a594c..b313086 100644
5327 +--- a/kernel/power/swap.c
5328 ++++ b/kernel/power/swap.c
5329 +@@ -52,6 +52,23 @@
5330 +
5331 + #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
5332 +
5333 ++/*
5334 ++ * Number of free pages that are not high.
5335 ++ */
5336 ++static inline unsigned long low_free_pages(void)
5337 ++{
5338 ++ return nr_free_pages() - nr_free_highpages();
5339 ++}
5340 ++
5341 ++/*
5342 ++ * Number of pages required to be kept free while writing the image. Always
5343 ++ * half of all available low pages before the writing starts.
5344 ++ */
5345 ++static inline unsigned long reqd_free_pages(void)
5346 ++{
5347 ++ return low_free_pages() / 2;
5348 ++}
5349 ++
5350 + struct swap_map_page {
5351 + sector_t entries[MAP_PAGE_ENTRIES];
5352 + sector_t next_swap;
5353 +@@ -73,7 +90,7 @@ struct swap_map_handle {
5354 + sector_t cur_swap;
5355 + sector_t first_sector;
5356 + unsigned int k;
5357 +- unsigned long nr_free_pages, written;
5358 ++ unsigned long reqd_free_pages;
5359 + u32 crc32;
5360 + };
5361 +
5362 +@@ -317,8 +334,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
5363 + goto err_rel;
5364 + }
5365 + handle->k = 0;
5366 +- handle->nr_free_pages = nr_free_pages() >> 1;
5367 +- handle->written = 0;
5368 ++ handle->reqd_free_pages = reqd_free_pages();
5369 + handle->first_sector = handle->cur_swap;
5370 + return 0;
5371 + err_rel:
5372 +@@ -353,11 +369,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
5373 + handle->cur_swap = offset;
5374 + handle->k = 0;
5375 + }
5376 +- if (bio_chain && ++handle->written > handle->nr_free_pages) {
5377 ++ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
5378 + error = hib_wait_on_bio_chain(bio_chain);
5379 + if (error)
5380 + goto out;
5381 +- handle->written = 0;
5382 ++ handle->reqd_free_pages = reqd_free_pages();
5383 + }
5384 + out:
5385 + return error;
5386 +@@ -619,7 +635,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
5387 + * Adjust number of free pages after all allocations have been done.
5388 + * We don't want to run out of pages when writing.
5389 + */
5390 +- handle->nr_free_pages = nr_free_pages() >> 1;
5391 ++ handle->reqd_free_pages = reqd_free_pages();
5392 +
5393 + /*
5394 + * Start the CRC32 thread.
5395 +diff --git a/kernel/sched.c b/kernel/sched.c
5396 +index d6b149c..299f55c 100644
5397 +--- a/kernel/sched.c
5398 ++++ b/kernel/sched.c
5399 +@@ -3538,13 +3538,10 @@ calc_load_n(unsigned long load, unsigned long exp,
5400 + * Once we've updated the global active value, we need to apply the exponential
5401 + * weights adjusted to the number of cycles missed.
5402 + */
5403 +-static void calc_global_nohz(unsigned long ticks)
5404 ++static void calc_global_nohz(void)
5405 + {
5406 + long delta, active, n;
5407 +
5408 +- if (time_before(jiffies, calc_load_update))
5409 +- return;
5410 +-
5411 + /*
5412 + * If we crossed a calc_load_update boundary, make sure to fold
5413 + * any pending idle changes, the respective CPUs might have
5414 +@@ -3556,31 +3553,25 @@ static void calc_global_nohz(unsigned long ticks)
5415 + atomic_long_add(delta, &calc_load_tasks);
5416 +
5417 + /*
5418 +- * If we were idle for multiple load cycles, apply them.
5419 ++ * It could be the one fold was all it took, we done!
5420 + */
5421 +- if (ticks >= LOAD_FREQ) {
5422 +- n = ticks / LOAD_FREQ;
5423 ++ if (time_before(jiffies, calc_load_update + 10))
5424 ++ return;
5425 +
5426 +- active = atomic_long_read(&calc_load_tasks);
5427 +- active = active > 0 ? active * FIXED_1 : 0;
5428 ++ /*
5429 ++ * Catch-up, fold however many we are behind still
5430 ++ */
5431 ++ delta = jiffies - calc_load_update - 10;
5432 ++ n = 1 + (delta / LOAD_FREQ);
5433 +
5434 +- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
5435 +- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
5436 +- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
5437 ++ active = atomic_long_read(&calc_load_tasks);
5438 ++ active = active > 0 ? active * FIXED_1 : 0;
5439 +
5440 +- calc_load_update += n * LOAD_FREQ;
5441 +- }
5442 ++ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
5443 ++ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
5444 ++ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
5445 +
5446 +- /*
5447 +- * Its possible the remainder of the above division also crosses
5448 +- * a LOAD_FREQ period, the regular check in calc_global_load()
5449 +- * which comes after this will take care of that.
5450 +- *
5451 +- * Consider us being 11 ticks before a cycle completion, and us
5452 +- * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
5453 +- * age us 4 cycles, and the test in calc_global_load() will
5454 +- * pick up the final one.
5455 +- */
5456 ++ calc_load_update += n * LOAD_FREQ;
5457 + }
5458 + #else
5459 + static void calc_load_account_idle(struct rq *this_rq)
5460 +@@ -3592,7 +3583,7 @@ static inline long calc_load_fold_idle(void)
5461 + return 0;
5462 + }
5463 +
5464 +-static void calc_global_nohz(unsigned long ticks)
5465 ++static void calc_global_nohz(void)
5466 + {
5467 + }
5468 + #endif
5469 +@@ -3620,8 +3611,6 @@ void calc_global_load(unsigned long ticks)
5470 + {
5471 + long active;
5472 +
5473 +- calc_global_nohz(ticks);
5474 +-
5475 + if (time_before(jiffies, calc_load_update + 10))
5476 + return;
5477 +
5478 +@@ -3633,6 +3622,16 @@ void calc_global_load(unsigned long ticks)
5479 + avenrun[2] = calc_load(avenrun[2], EXP_15, active);
5480 +
5481 + calc_load_update += LOAD_FREQ;
5482 ++
5483 ++ /*
5484 ++ * Account one period with whatever state we found before
5485 ++ * folding in the nohz state and ageing the entire idle period.
5486 ++ *
5487 ++ * This avoids loosing a sample when we go idle between
5488 ++ * calc_load_account_active() (10 ticks ago) and now and thus
5489 ++ * under-accounting.
5490 ++ */
5491 ++ calc_global_nohz();
5492 + }
5493 +
5494 + /*
5495 +@@ -7605,16 +7604,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
5496 + struct sd_data *sdd = &tl->data;
5497 +
5498 + for_each_cpu(j, cpu_map) {
5499 +- struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
5500 +- if (sd && (sd->flags & SD_OVERLAP))
5501 +- free_sched_groups(sd->groups, 0);
5502 +- kfree(*per_cpu_ptr(sdd->sd, j));
5503 +- kfree(*per_cpu_ptr(sdd->sg, j));
5504 +- kfree(*per_cpu_ptr(sdd->sgp, j));
5505 ++ struct sched_domain *sd;
5506 ++
5507 ++ if (sdd->sd) {
5508 ++ sd = *per_cpu_ptr(sdd->sd, j);
5509 ++ if (sd && (sd->flags & SD_OVERLAP))
5510 ++ free_sched_groups(sd->groups, 0);
5511 ++ kfree(*per_cpu_ptr(sdd->sd, j));
5512 ++ }
5513 ++
5514 ++ if (sdd->sg)
5515 ++ kfree(*per_cpu_ptr(sdd->sg, j));
5516 ++ if (sdd->sgp)
5517 ++ kfree(*per_cpu_ptr(sdd->sgp, j));
5518 + }
5519 + free_percpu(sdd->sd);
5520 ++ sdd->sd = NULL;
5521 + free_percpu(sdd->sg);
5522 ++ sdd->sg = NULL;
5523 + free_percpu(sdd->sgp);
5524 ++ sdd->sgp = NULL;
5525 + }
5526 + }
5527 +
5528 +diff --git a/kernel/signal.c b/kernel/signal.c
5529 +index 2065515..08e0b97 100644
5530 +--- a/kernel/signal.c
5531 ++++ b/kernel/signal.c
5532 +@@ -1610,6 +1610,15 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
5533 + BUG_ON(!tsk->ptrace &&
5534 + (tsk->group_leader != tsk || !thread_group_empty(tsk)));
5535 +
5536 ++ if (sig != SIGCHLD) {
5537 ++ /*
5538 ++ * This is only possible if parent == real_parent.
5539 ++ * Check if it has changed security domain.
5540 ++ */
5541 ++ if (tsk->parent_exec_id != tsk->parent->self_exec_id)
5542 ++ sig = SIGCHLD;
5543 ++ }
5544 ++
5545 + info.si_signo = sig;
5546 + info.si_errno = 0;
5547 + /*
5548 +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
5549 +index 5199930..1dcf253 100644
5550 +--- a/kernel/trace/trace_output.c
5551 ++++ b/kernel/trace/trace_output.c
5552 +@@ -638,6 +638,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
5553 + {
5554 + u64 next_ts;
5555 + int ret;
5556 ++ /* trace_find_next_entry will reset ent_size */
5557 ++ int ent_size = iter->ent_size;
5558 + struct trace_seq *s = &iter->seq;
5559 + struct trace_entry *entry = iter->ent,
5560 + *next_entry = trace_find_next_entry(iter, NULL,
5561 +@@ -646,6 +648,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
5562 + unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
5563 + unsigned long rel_usecs;
5564 +
5565 ++ /* Restore the original ent_size */
5566 ++ iter->ent_size = ent_size;
5567 ++
5568 + if (!next_entry)
5569 + next_ts = iter->ts;
5570 + rel_usecs = ns2usecs(next_ts - iter->ts);
5571 +diff --git a/mm/swap_state.c b/mm/swap_state.c
5572 +index 78cc4d1..7704d9c 100644
5573 +--- a/mm/swap_state.c
5574 ++++ b/mm/swap_state.c
5575 +@@ -27,7 +27,7 @@
5576 + */
5577 + static const struct address_space_operations swap_aops = {
5578 + .writepage = swap_writepage,
5579 +- .set_page_dirty = __set_page_dirty_nobuffers,
5580 ++ .set_page_dirty = __set_page_dirty_no_writeback,
5581 + .migratepage = migrate_page,
5582 + };
5583 +
5584 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
5585 +index e7c69f4..b04a6ef 100644
5586 +--- a/net/ax25/af_ax25.c
5587 ++++ b/net/ax25/af_ax25.c
5588 +@@ -2006,16 +2006,17 @@ static void __exit ax25_exit(void)
5589 + proc_net_remove(&init_net, "ax25_route");
5590 + proc_net_remove(&init_net, "ax25");
5591 + proc_net_remove(&init_net, "ax25_calls");
5592 +- ax25_rt_free();
5593 +- ax25_uid_free();
5594 +- ax25_dev_free();
5595 +
5596 +- ax25_unregister_sysctl();
5597 + unregister_netdevice_notifier(&ax25_dev_notifier);
5598 ++ ax25_unregister_sysctl();
5599 +
5600 + dev_remove_pack(&ax25_packet_type);
5601 +
5602 + sock_unregister(PF_AX25);
5603 + proto_unregister(&ax25_proto);
5604 ++
5605 ++ ax25_rt_free();
5606 ++ ax25_uid_free();
5607 ++ ax25_dev_free();
5608 + }
5609 + module_exit(ax25_exit);
5610 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
5611 +index 8eb6b15..5ac1811 100644
5612 +--- a/net/bridge/br_multicast.c
5613 ++++ b/net/bridge/br_multicast.c
5614 +@@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data)
5615 + hlist_del_rcu(&mp->hlist[mdb->ver]);
5616 + mdb->size--;
5617 +
5618 +- del_timer(&mp->query_timer);
5619 + call_rcu_bh(&mp->rcu, br_multicast_free_group);
5620 +
5621 + out:
5622 +@@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
5623 + rcu_assign_pointer(*pp, p->next);
5624 + hlist_del_init(&p->mglist);
5625 + del_timer(&p->timer);
5626 +- del_timer(&p->query_timer);
5627 + call_rcu_bh(&p->rcu, br_multicast_free_pg);
5628 +
5629 + if (!mp->ports && !mp->mglist &&
5630 +@@ -507,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
5631 + return NULL;
5632 + }
5633 +
5634 +-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
5635 +-{
5636 +- struct net_bridge *br = mp->br;
5637 +- struct sk_buff *skb;
5638 +-
5639 +- skb = br_multicast_alloc_query(br, &mp->addr);
5640 +- if (!skb)
5641 +- goto timer;
5642 +-
5643 +- netif_rx(skb);
5644 +-
5645 +-timer:
5646 +- if (++mp->queries_sent < br->multicast_last_member_count)
5647 +- mod_timer(&mp->query_timer,
5648 +- jiffies + br->multicast_last_member_interval);
5649 +-}
5650 +-
5651 +-static void br_multicast_group_query_expired(unsigned long data)
5652 +-{
5653 +- struct net_bridge_mdb_entry *mp = (void *)data;
5654 +- struct net_bridge *br = mp->br;
5655 +-
5656 +- spin_lock(&br->multicast_lock);
5657 +- if (!netif_running(br->dev) || !mp->mglist ||
5658 +- mp->queries_sent >= br->multicast_last_member_count)
5659 +- goto out;
5660 +-
5661 +- br_multicast_send_group_query(mp);
5662 +-
5663 +-out:
5664 +- spin_unlock(&br->multicast_lock);
5665 +-}
5666 +-
5667 +-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
5668 +-{
5669 +- struct net_bridge_port *port = pg->port;
5670 +- struct net_bridge *br = port->br;
5671 +- struct sk_buff *skb;
5672 +-
5673 +- skb = br_multicast_alloc_query(br, &pg->addr);
5674 +- if (!skb)
5675 +- goto timer;
5676 +-
5677 +- br_deliver(port, skb);
5678 +-
5679 +-timer:
5680 +- if (++pg->queries_sent < br->multicast_last_member_count)
5681 +- mod_timer(&pg->query_timer,
5682 +- jiffies + br->multicast_last_member_interval);
5683 +-}
5684 +-
5685 +-static void br_multicast_port_group_query_expired(unsigned long data)
5686 +-{
5687 +- struct net_bridge_port_group *pg = (void *)data;
5688 +- struct net_bridge_port *port = pg->port;
5689 +- struct net_bridge *br = port->br;
5690 +-
5691 +- spin_lock(&br->multicast_lock);
5692 +- if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
5693 +- pg->queries_sent >= br->multicast_last_member_count)
5694 +- goto out;
5695 +-
5696 +- br_multicast_send_port_group_query(pg);
5697 +-
5698 +-out:
5699 +- spin_unlock(&br->multicast_lock);
5700 +-}
5701 +-
5702 + static struct net_bridge_mdb_entry *br_multicast_get_group(
5703 + struct net_bridge *br, struct net_bridge_port *port,
5704 + struct br_ip *group, int hash)
5705 +@@ -690,8 +620,6 @@ rehash:
5706 + mp->addr = *group;
5707 + setup_timer(&mp->timer, br_multicast_group_expired,
5708 + (unsigned long)mp);
5709 +- setup_timer(&mp->query_timer, br_multicast_group_query_expired,
5710 +- (unsigned long)mp);
5711 +
5712 + hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
5713 + mdb->size++;
5714 +@@ -746,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br,
5715 + hlist_add_head(&p->mglist, &port->mglist);
5716 + setup_timer(&p->timer, br_multicast_port_group_expired,
5717 + (unsigned long)p);
5718 +- setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
5719 +- (unsigned long)p);
5720 +
5721 + rcu_assign_pointer(*pp, p);
5722 +
5723 +@@ -1291,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
5724 + time_after(mp->timer.expires, time) :
5725 + try_to_del_timer_sync(&mp->timer) >= 0)) {
5726 + mod_timer(&mp->timer, time);
5727 +-
5728 +- mp->queries_sent = 0;
5729 +- mod_timer(&mp->query_timer, now);
5730 + }
5731 +
5732 + goto out;
5733 +@@ -1310,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
5734 + time_after(p->timer.expires, time) :
5735 + try_to_del_timer_sync(&p->timer) >= 0)) {
5736 + mod_timer(&p->timer, time);
5737 +-
5738 +- p->queries_sent = 0;
5739 +- mod_timer(&p->query_timer, now);
5740 + }
5741 +
5742 + break;
5743 +@@ -1680,7 +1600,6 @@ void br_multicast_stop(struct net_bridge *br)
5744 + hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
5745 + hlist[ver]) {
5746 + del_timer(&mp->timer);
5747 +- del_timer(&mp->query_timer);
5748 + call_rcu_bh(&mp->rcu, br_multicast_free_group);
5749 + }
5750 + }
5751 +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
5752 +index d7d6fb0..93264df 100644
5753 +--- a/net/bridge/br_private.h
5754 ++++ b/net/bridge/br_private.h
5755 +@@ -82,9 +82,7 @@ struct net_bridge_port_group {
5756 + struct hlist_node mglist;
5757 + struct rcu_head rcu;
5758 + struct timer_list timer;
5759 +- struct timer_list query_timer;
5760 + struct br_ip addr;
5761 +- u32 queries_sent;
5762 + };
5763 +
5764 + struct net_bridge_mdb_entry
5765 +@@ -94,10 +92,8 @@ struct net_bridge_mdb_entry
5766 + struct net_bridge_port_group __rcu *ports;
5767 + struct rcu_head rcu;
5768 + struct timer_list timer;
5769 +- struct timer_list query_timer;
5770 + struct br_ip addr;
5771 + bool mglist;
5772 +- u32 queries_sent;
5773 + };
5774 +
5775 + struct net_bridge_mdb_htable
5776 +diff --git a/net/core/dev.c b/net/core/dev.c
5777 +index 55cd370..cd5050e 100644
5778 +--- a/net/core/dev.c
5779 ++++ b/net/core/dev.c
5780 +@@ -4102,54 +4102,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
5781 +
5782 + #ifdef CONFIG_PROC_FS
5783 +
5784 +-#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
5785 +-
5786 +-struct dev_iter_state {
5787 +- struct seq_net_private p;
5788 +- unsigned int pos; /* bucket << BUCKET_SPACE + offset */
5789 +-};
5790 ++#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
5791 +
5792 + #define get_bucket(x) ((x) >> BUCKET_SPACE)
5793 + #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
5794 + #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
5795 +
5796 +-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
5797 ++static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
5798 + {
5799 +- struct dev_iter_state *state = seq->private;
5800 + struct net *net = seq_file_net(seq);
5801 + struct net_device *dev;
5802 + struct hlist_node *p;
5803 + struct hlist_head *h;
5804 +- unsigned int count, bucket, offset;
5805 ++ unsigned int count = 0, offset = get_offset(*pos);
5806 +
5807 +- bucket = get_bucket(state->pos);
5808 +- offset = get_offset(state->pos);
5809 +- h = &net->dev_name_head[bucket];
5810 +- count = 0;
5811 ++ h = &net->dev_name_head[get_bucket(*pos)];
5812 + hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
5813 +- if (count++ == offset) {
5814 +- state->pos = set_bucket_offset(bucket, count);
5815 ++ if (++count == offset)
5816 + return dev;
5817 +- }
5818 + }
5819 +
5820 + return NULL;
5821 + }
5822 +
5823 +-static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
5824 ++static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
5825 + {
5826 +- struct dev_iter_state *state = seq->private;
5827 + struct net_device *dev;
5828 + unsigned int bucket;
5829 +
5830 +- bucket = get_bucket(state->pos);
5831 + do {
5832 +- dev = dev_from_same_bucket(seq);
5833 ++ dev = dev_from_same_bucket(seq, pos);
5834 + if (dev)
5835 + return dev;
5836 +
5837 +- bucket++;
5838 +- state->pos = set_bucket_offset(bucket, 0);
5839 ++ bucket = get_bucket(*pos) + 1;
5840 ++ *pos = set_bucket_offset(bucket, 1);
5841 + } while (bucket < NETDEV_HASHENTRIES);
5842 +
5843 + return NULL;
5844 +@@ -4162,33 +4149,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
5845 + void *dev_seq_start(struct seq_file *seq, loff_t *pos)
5846 + __acquires(RCU)
5847 + {
5848 +- struct dev_iter_state *state = seq->private;
5849 +-
5850 + rcu_read_lock();
5851 + if (!*pos)
5852 + return SEQ_START_TOKEN;
5853 +
5854 +- /* check for end of the hash */
5855 +- if (state->pos == 0 && *pos > 1)
5856 ++ if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
5857 + return NULL;
5858 +
5859 +- return dev_from_new_bucket(seq);
5860 ++ return dev_from_bucket(seq, pos);
5861 + }
5862 +
5863 + void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5864 + {
5865 +- struct net_device *dev;
5866 +-
5867 + ++*pos;
5868 +-
5869 +- if (v == SEQ_START_TOKEN)
5870 +- return dev_from_new_bucket(seq);
5871 +-
5872 +- dev = dev_from_same_bucket(seq);
5873 +- if (dev)
5874 +- return dev;
5875 +-
5876 +- return dev_from_new_bucket(seq);
5877 ++ return dev_from_bucket(seq, pos);
5878 + }
5879 +
5880 + void dev_seq_stop(struct seq_file *seq, void *v)
5881 +@@ -4287,13 +4261,7 @@ static const struct seq_operations dev_seq_ops = {
5882 + static int dev_seq_open(struct inode *inode, struct file *file)
5883 + {
5884 + return seq_open_net(inode, file, &dev_seq_ops,
5885 +- sizeof(struct dev_iter_state));
5886 +-}
5887 +-
5888 +-int dev_seq_open_ops(struct inode *inode, struct file *file,
5889 +- const struct seq_operations *ops)
5890 +-{
5891 +- return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
5892 ++ sizeof(struct seq_net_private));
5893 + }
5894 +
5895 + static const struct file_operations dev_seq_fops = {
5896 +diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
5897 +index febba51..277faef 100644
5898 +--- a/net/core/dev_addr_lists.c
5899 ++++ b/net/core/dev_addr_lists.c
5900 +@@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = {
5901 +
5902 + static int dev_mc_seq_open(struct inode *inode, struct file *file)
5903 + {
5904 +- return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
5905 ++ return seq_open_net(inode, file, &dev_mc_seq_ops,
5906 ++ sizeof(struct seq_net_private));
5907 + }
5908 +
5909 + static const struct file_operations dev_mc_seq_fops = {
5910 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
5911 +index 0e950fd..31a5ae5 100644
5912 +--- a/net/core/net_namespace.c
5913 ++++ b/net/core/net_namespace.c
5914 +@@ -83,21 +83,29 @@ assign:
5915 +
5916 + static int ops_init(const struct pernet_operations *ops, struct net *net)
5917 + {
5918 +- int err;
5919 ++ int err = -ENOMEM;
5920 ++ void *data = NULL;
5921 ++
5922 + if (ops->id && ops->size) {
5923 +- void *data = kzalloc(ops->size, GFP_KERNEL);
5924 ++ data = kzalloc(ops->size, GFP_KERNEL);
5925 + if (!data)
5926 +- return -ENOMEM;
5927 ++ goto out;
5928 +
5929 + err = net_assign_generic(net, *ops->id, data);
5930 +- if (err) {
5931 +- kfree(data);
5932 +- return err;
5933 +- }
5934 ++ if (err)
5935 ++ goto cleanup;
5936 + }
5937 ++ err = 0;
5938 + if (ops->init)
5939 +- return ops->init(net);
5940 +- return 0;
5941 ++ err = ops->init(net);
5942 ++ if (!err)
5943 ++ return 0;
5944 ++
5945 ++cleanup:
5946 ++ kfree(data);
5947 ++
5948 ++out:
5949 ++ return err;
5950 + }
5951 +
5952 + static void ops_free(const struct pernet_operations *ops, struct net *net)
5953 +@@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
5954 + static int __register_pernet_operations(struct list_head *list,
5955 + struct pernet_operations *ops)
5956 + {
5957 +- int err = 0;
5958 +- err = ops_init(ops, &init_net);
5959 +- if (err)
5960 +- ops_free(ops, &init_net);
5961 +- return err;
5962 +-
5963 ++ return ops_init(ops, &init_net);
5964 + }
5965 +
5966 + static void __unregister_pernet_operations(struct pernet_operations *ops)
5967 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5968 +index 3c30ee4..2ec200de 100644
5969 +--- a/net/core/skbuff.c
5970 ++++ b/net/core/skbuff.c
5971 +@@ -903,9 +903,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
5972 + goto adjust_others;
5973 + }
5974 +
5975 +- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
5976 ++ data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5977 ++ gfp_mask);
5978 + if (!data)
5979 + goto nodata;
5980 ++ size = SKB_WITH_OVERHEAD(ksize(data));
5981 +
5982 + /* Copy only real data... and, alas, header. This should be
5983 + * optimized for the cases when header is void.
5984 +@@ -3111,6 +3113,8 @@ static void sock_rmem_free(struct sk_buff *skb)
5985 + */
5986 + int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5987 + {
5988 ++ int len = skb->len;
5989 ++
5990 + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
5991 + (unsigned)sk->sk_rcvbuf)
5992 + return -ENOMEM;
5993 +@@ -3125,7 +3129,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5994 +
5995 + skb_queue_tail(&sk->sk_error_queue, skb);
5996 + if (!sock_flag(sk, SOCK_DEAD))
5997 +- sk->sk_data_ready(sk, skb->len);
5998 ++ sk->sk_data_ready(sk, len);
5999 + return 0;
6000 + }
6001 + EXPORT_SYMBOL(sock_queue_err_skb);
6002 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
6003 +index 34f5db1..7904db4 100644
6004 +--- a/net/ipv4/tcp.c
6005 ++++ b/net/ipv4/tcp.c
6006 +@@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
6007 + skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
6008 + if (skb) {
6009 + if (sk_wmem_schedule(sk, skb->truesize)) {
6010 ++ skb_reserve(skb, sk->sk_prot->max_header);
6011 + /*
6012 + * Make sure that we have exactly size bytes
6013 + * available to the caller, no more, no less.
6014 + */
6015 +- skb_reserve(skb, skb_tailroom(skb) - size);
6016 ++ skb->avail_size = size;
6017 + return skb;
6018 + }
6019 + __kfree_skb(skb);
6020 +@@ -860,7 +861,7 @@ wait_for_memory:
6021 + }
6022 +
6023 + out:
6024 +- if (copied)
6025 ++ if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
6026 + tcp_push(sk, flags, mss_now, tp->nonagle);
6027 + return copied;
6028 +
6029 +@@ -995,10 +996,9 @@ new_segment:
6030 + copy = seglen;
6031 +
6032 + /* Where to copy to? */
6033 +- if (skb_tailroom(skb) > 0) {
6034 ++ if (skb_availroom(skb) > 0) {
6035 + /* We have some space in skb head. Superb! */
6036 +- if (copy > skb_tailroom(skb))
6037 +- copy = skb_tailroom(skb);
6038 ++ copy = min_t(int, copy, skb_availroom(skb));
6039 + err = skb_add_data_nocache(sk, skb, from, copy);
6040 + if (err)
6041 + goto do_fault;
6042 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
6043 +index e4d1e4a..daedc07 100644
6044 +--- a/net/ipv4/tcp_input.c
6045 ++++ b/net/ipv4/tcp_input.c
6046 +@@ -334,6 +334,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
6047 + incr = __tcp_grow_window(sk, skb);
6048 +
6049 + if (incr) {
6050 ++ incr = max_t(int, incr, 2 * skb->len);
6051 + tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
6052 + tp->window_clamp);
6053 + inet_csk(sk)->icsk_ack.quick |= 1;
6054 +@@ -473,8 +474,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
6055 + if (!win_dep) {
6056 + m -= (new_sample >> 3);
6057 + new_sample += m;
6058 +- } else if (m < new_sample)
6059 +- new_sample = m << 3;
6060 ++ } else {
6061 ++ m <<= 3;
6062 ++ if (m < new_sample)
6063 ++ new_sample = m;
6064 ++ }
6065 + } else {
6066 + /* No previous measure. */
6067 + new_sample = m << 3;
6068 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6069 +index 097e0c7..c51dd5b 100644
6070 +--- a/net/ipv4/tcp_output.c
6071 ++++ b/net/ipv4/tcp_output.c
6072 +@@ -1093,6 +1093,14 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
6073 + {
6074 + int i, k, eat;
6075 +
6076 ++ eat = min_t(int, len, skb_headlen(skb));
6077 ++ if (eat) {
6078 ++ __skb_pull(skb, eat);
6079 ++ skb->avail_size -= eat;
6080 ++ len -= eat;
6081 ++ if (!len)
6082 ++ return;
6083 ++ }
6084 + eat = len;
6085 + k = 0;
6086 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6087 +@@ -1124,11 +1132,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
6088 + if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6089 + return -ENOMEM;
6090 +
6091 +- /* If len == headlen, we avoid __skb_pull to preserve alignment. */
6092 +- if (unlikely(len < skb_headlen(skb)))
6093 +- __skb_pull(skb, len);
6094 +- else
6095 +- __pskb_trim_head(skb, len - skb_headlen(skb));
6096 ++ __pskb_trim_head(skb, len);
6097 +
6098 + TCP_SKB_CB(skb)->seq += len;
6099 + skb->ip_summed = CHECKSUM_PARTIAL;
6100 +@@ -2057,7 +2061,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
6101 + /* Punt if not enough space exists in the first SKB for
6102 + * the data in the second
6103 + */
6104 +- if (skb->len > skb_tailroom(to))
6105 ++ if (skb->len > skb_availroom(to))
6106 + break;
6107 +
6108 + if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
6109 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
6110 +index 2257366..f2d74ea 100644
6111 +--- a/net/ipv6/mcast.c
6112 ++++ b/net/ipv6/mcast.c
6113 +@@ -2054,7 +2054,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
6114 + if (!delta)
6115 + pmc->mca_sfcount[sfmode]--;
6116 + for (j=0; j<i; j++)
6117 +- (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
6118 ++ ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
6119 + } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
6120 + struct ip6_sf_list *psf;
6121 +
6122 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
6123 +index b859e4a..4a56574 100644
6124 +--- a/net/ipv6/tcp_ipv6.c
6125 ++++ b/net/ipv6/tcp_ipv6.c
6126 +@@ -1494,6 +1494,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
6127 + tcp_mtup_init(newsk);
6128 + tcp_sync_mss(newsk, dst_mtu(dst));
6129 + newtp->advmss = dst_metric_advmss(dst);
6130 ++ if (tcp_sk(sk)->rx_opt.user_mss &&
6131 ++ tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
6132 ++ newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
6133 ++
6134 + tcp_initialize_rcv_mss(newsk);
6135 + if (tcp_rsk(req)->snt_synack)
6136 + tcp_valid_rtt_meas(newsk,
6137 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
6138 +index eff1f4e..4ff35bf 100644
6139 +--- a/net/mac80211/tx.c
6140 ++++ b/net/mac80211/tx.c
6141 +@@ -1121,7 +1121,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
6142 + tx->sta = rcu_dereference(sdata->u.vlan.sta);
6143 + if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
6144 + return TX_DROP;
6145 +- } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
6146 ++ } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
6147 ++ tx->sdata->control_port_protocol == tx->skb->protocol) {
6148 + tx->sta = sta_info_get_bss(sdata, hdr->addr1);
6149 + }
6150 + if (!tx->sta)
6151 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
6152 +index 1201b6d..a99fb41 100644
6153 +--- a/net/netlink/af_netlink.c
6154 ++++ b/net/netlink/af_netlink.c
6155 +@@ -830,12 +830,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
6156 + return 0;
6157 + }
6158 +
6159 +-int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
6160 ++static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
6161 + {
6162 + int len = skb->len;
6163 +
6164 + skb_queue_tail(&sk->sk_receive_queue, skb);
6165 + sk->sk_data_ready(sk, len);
6166 ++ return len;
6167 ++}
6168 ++
6169 ++int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
6170 ++{
6171 ++ int len = __netlink_sendskb(sk, skb);
6172 ++
6173 + sock_put(sk);
6174 + return len;
6175 + }
6176 +@@ -960,8 +967,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
6177 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
6178 + !test_bit(0, &nlk->state)) {
6179 + skb_set_owner_r(skb, sk);
6180 +- skb_queue_tail(&sk->sk_receive_queue, skb);
6181 +- sk->sk_data_ready(sk, skb->len);
6182 ++ __netlink_sendskb(sk, skb);
6183 + return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
6184 + }
6185 + return -1;
6186 +@@ -1684,10 +1690,8 @@ static int netlink_dump(struct sock *sk)
6187 +
6188 + if (sk_filter(sk, skb))
6189 + kfree_skb(skb);
6190 +- else {
6191 +- skb_queue_tail(&sk->sk_receive_queue, skb);
6192 +- sk->sk_data_ready(sk, skb->len);
6193 +- }
6194 ++ else
6195 ++ __netlink_sendskb(sk, skb);
6196 + return 0;
6197 + }
6198 +
6199 +@@ -1701,10 +1705,8 @@ static int netlink_dump(struct sock *sk)
6200 +
6201 + if (sk_filter(sk, skb))
6202 + kfree_skb(skb);
6203 +- else {
6204 +- skb_queue_tail(&sk->sk_receive_queue, skb);
6205 +- sk->sk_data_ready(sk, skb->len);
6206 +- }
6207 ++ else
6208 ++ __netlink_sendskb(sk, skb);
6209 +
6210 + if (cb->done)
6211 + cb->done(cb);
6212 +diff --git a/net/phonet/pep.c b/net/phonet/pep.c
6213 +index 2ba6e9f..007546d 100644
6214 +--- a/net/phonet/pep.c
6215 ++++ b/net/phonet/pep.c
6216 +@@ -1046,6 +1046,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
6217 + int flags = msg->msg_flags;
6218 + int err, done;
6219 +
6220 ++ if (len > USHRT_MAX)
6221 ++ return -EMSGSIZE;
6222 ++
6223 + if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
6224 + MSG_CMSG_COMPAT)) ||
6225 + !(msg->msg_flags & MSG_EOR))
6226 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
6227 +index 6cd8ddf..e1afe0c 100644
6228 +--- a/net/sched/sch_gred.c
6229 ++++ b/net/sched/sch_gred.c
6230 +@@ -544,11 +544,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
6231 + opt.packets = q->packetsin;
6232 + opt.bytesin = q->bytesin;
6233 +
6234 +- if (gred_wred_mode(table)) {
6235 +- q->parms.qidlestart =
6236 +- table->tab[table->def]->parms.qidlestart;
6237 +- q->parms.qavg = table->tab[table->def]->parms.qavg;
6238 +- }
6239 ++ if (gred_wred_mode(table))
6240 ++ gred_load_wred_set(table, q);
6241 +
6242 + opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
6243 +
6244 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
6245 +index 54a7cd2..0075554 100644
6246 +--- a/net/sctp/socket.c
6247 ++++ b/net/sctp/socket.c
6248 +@@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
6249 + static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
6250 + int __user *optlen)
6251 + {
6252 +- if (len < sizeof(struct sctp_event_subscribe))
6253 ++ if (len <= 0)
6254 + return -EINVAL;
6255 +- len = sizeof(struct sctp_event_subscribe);
6256 ++ if (len > sizeof(struct sctp_event_subscribe))
6257 ++ len = sizeof(struct sctp_event_subscribe);
6258 + if (put_user(len, optlen))
6259 + return -EFAULT;
6260 + if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
6261 +diff --git a/net/socket.c b/net/socket.c
6262 +index 2dce67a..273cbce 100644
6263 +--- a/net/socket.c
6264 ++++ b/net/socket.c
6265 +@@ -791,9 +791,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
6266 +
6267 + sock = file->private_data;
6268 +
6269 +- flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
6270 +- if (more)
6271 +- flags |= MSG_MORE;
6272 ++ flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
6273 ++ /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
6274 ++ flags |= more;
6275 +
6276 + return kernel_sendpage(sock, page, offset, size, flags);
6277 + }
6278 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
6279 +index ffafda5..c06c365 100644
6280 +--- a/net/wireless/nl80211.c
6281 ++++ b/net/wireless/nl80211.c
6282 +@@ -1258,6 +1258,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
6283 + goto bad_res;
6284 + }
6285 +
6286 ++ if (!netif_running(netdev)) {
6287 ++ result = -ENETDOWN;
6288 ++ goto bad_res;
6289 ++ }
6290 ++
6291 + nla_for_each_nested(nl_txq_params,
6292 + info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
6293 + rem_txq_params) {
6294 +@@ -5944,7 +5949,7 @@ static struct genl_ops nl80211_ops[] = {
6295 + .doit = nl80211_get_key,
6296 + .policy = nl80211_policy,
6297 + .flags = GENL_ADMIN_PERM,
6298 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6299 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6300 + NL80211_FLAG_NEED_RTNL,
6301 + },
6302 + {
6303 +@@ -5976,7 +5981,7 @@ static struct genl_ops nl80211_ops[] = {
6304 + .policy = nl80211_policy,
6305 + .flags = GENL_ADMIN_PERM,
6306 + .doit = nl80211_addset_beacon,
6307 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6308 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6309 + NL80211_FLAG_NEED_RTNL,
6310 + },
6311 + {
6312 +@@ -5984,7 +5989,7 @@ static struct genl_ops nl80211_ops[] = {
6313 + .policy = nl80211_policy,
6314 + .flags = GENL_ADMIN_PERM,
6315 + .doit = nl80211_addset_beacon,
6316 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6317 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6318 + NL80211_FLAG_NEED_RTNL,
6319 + },
6320 + {
6321 +@@ -6008,7 +6013,7 @@ static struct genl_ops nl80211_ops[] = {
6322 + .doit = nl80211_set_station,
6323 + .policy = nl80211_policy,
6324 + .flags = GENL_ADMIN_PERM,
6325 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6326 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6327 + NL80211_FLAG_NEED_RTNL,
6328 + },
6329 + {
6330 +@@ -6024,7 +6029,7 @@ static struct genl_ops nl80211_ops[] = {
6331 + .doit = nl80211_del_station,
6332 + .policy = nl80211_policy,
6333 + .flags = GENL_ADMIN_PERM,
6334 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6335 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6336 + NL80211_FLAG_NEED_RTNL,
6337 + },
6338 + {
6339 +@@ -6057,7 +6062,7 @@ static struct genl_ops nl80211_ops[] = {
6340 + .doit = nl80211_del_mpath,
6341 + .policy = nl80211_policy,
6342 + .flags = GENL_ADMIN_PERM,
6343 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6344 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6345 + NL80211_FLAG_NEED_RTNL,
6346 + },
6347 + {
6348 +@@ -6065,7 +6070,7 @@ static struct genl_ops nl80211_ops[] = {
6349 + .doit = nl80211_set_bss,
6350 + .policy = nl80211_policy,
6351 + .flags = GENL_ADMIN_PERM,
6352 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6353 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6354 + NL80211_FLAG_NEED_RTNL,
6355 + },
6356 + {
6357 +@@ -6091,7 +6096,7 @@ static struct genl_ops nl80211_ops[] = {
6358 + .doit = nl80211_get_mesh_config,
6359 + .policy = nl80211_policy,
6360 + /* can be retrieved by unprivileged users */
6361 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6362 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6363 + NL80211_FLAG_NEED_RTNL,
6364 + },
6365 + {
6366 +@@ -6224,7 +6229,7 @@ static struct genl_ops nl80211_ops[] = {
6367 + .doit = nl80211_setdel_pmksa,
6368 + .policy = nl80211_policy,
6369 + .flags = GENL_ADMIN_PERM,
6370 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6371 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6372 + NL80211_FLAG_NEED_RTNL,
6373 + },
6374 + {
6375 +@@ -6232,7 +6237,7 @@ static struct genl_ops nl80211_ops[] = {
6376 + .doit = nl80211_setdel_pmksa,
6377 + .policy = nl80211_policy,
6378 + .flags = GENL_ADMIN_PERM,
6379 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6380 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6381 + NL80211_FLAG_NEED_RTNL,
6382 + },
6383 + {
6384 +@@ -6240,7 +6245,7 @@ static struct genl_ops nl80211_ops[] = {
6385 + .doit = nl80211_flush_pmksa,
6386 + .policy = nl80211_policy,
6387 + .flags = GENL_ADMIN_PERM,
6388 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6389 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6390 + NL80211_FLAG_NEED_RTNL,
6391 + },
6392 + {
6393 +@@ -6328,7 +6333,7 @@ static struct genl_ops nl80211_ops[] = {
6394 + .doit = nl80211_set_wds_peer,
6395 + .policy = nl80211_policy,
6396 + .flags = GENL_ADMIN_PERM,
6397 +- .internal_flags = NL80211_FLAG_NEED_NETDEV |
6398 ++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6399 + NL80211_FLAG_NEED_RTNL,
6400 + },
6401 + {
6402 +diff --git a/net/wireless/util.c b/net/wireless/util.c
6403 +index 4dde429..8bf8902 100644
6404 +--- a/net/wireless/util.c
6405 ++++ b/net/wireless/util.c
6406 +@@ -996,7 +996,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
6407 + if (rdev->wiphy.software_iftypes & BIT(iftype))
6408 + continue;
6409 + for (j = 0; j < c->n_limits; j++) {
6410 +- if (!(limits[j].types & iftype))
6411 ++ if (!(limits[j].types & BIT(iftype)))
6412 + continue;
6413 + if (limits[j].max < num[iftype])
6414 + goto cont;
6415 +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
6416 +index f936d1f..d1d0ae8 100644
6417 +--- a/scripts/mod/file2alias.c
6418 ++++ b/scripts/mod/file2alias.c
6419 +@@ -926,6 +926,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
6420 + if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections)
6421 + return;
6422 +
6423 ++ /* We're looking for an object */
6424 ++ if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
6425 ++ return;
6426 ++
6427 + /* Handle all-NULL symbols allocated into .bss */
6428 + if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) {
6429 + zeros = calloc(1, sym->st_size);
6430 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6431 +index ae94929..51a1afc 100644
6432 +--- a/sound/pci/hda/patch_conexant.c
6433 ++++ b/sound/pci/hda/patch_conexant.c
6434 +@@ -4003,9 +4003,14 @@ static void cx_auto_init_output(struct hda_codec *codec)
6435 + int i;
6436 +
6437 + mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids);
6438 +- for (i = 0; i < cfg->hp_outs; i++)
6439 ++ for (i = 0; i < cfg->hp_outs; i++) {
6440 ++ unsigned int val = PIN_OUT;
6441 ++ if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) &
6442 ++ AC_PINCAP_HP_DRV)
6443 ++ val |= AC_PINCTL_HP_EN;
6444 + snd_hda_codec_write(codec, cfg->hp_pins[i], 0,
6445 +- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
6446 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, val);
6447 ++ }
6448 + mute_outputs(codec, cfg->hp_outs, cfg->hp_pins);
6449 + mute_outputs(codec, cfg->line_outs, cfg->line_out_pins);
6450 + mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins);
6451 +@@ -4408,8 +4413,10 @@ static void apply_pin_fixup(struct hda_codec *codec,
6452 +
6453 + enum {
6454 + CXT_PINCFG_LENOVO_X200,
6455 ++ CXT_PINCFG_LENOVO_TP410,
6456 + };
6457 +
6458 ++/* ThinkPad X200 & co with cxt5051 */
6459 + static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
6460 + { 0x16, 0x042140ff }, /* HP (seq# overridden) */
6461 + { 0x17, 0x21a11000 }, /* dock-mic */
6462 +@@ -4417,15 +4424,33 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
6463 + {}
6464 + };
6465 +
6466 ++/* ThinkPad 410/420/510/520, X201 & co with cxt5066 */
6467 ++static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
6468 ++ { 0x19, 0x042110ff }, /* HP (seq# overridden) */
6469 ++ { 0x1a, 0x21a190f0 }, /* dock-mic */
6470 ++ { 0x1c, 0x212140ff }, /* dock-HP */
6471 ++ {}
6472 ++};
6473 ++
6474 + static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
6475 + [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
6476 ++ [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
6477 + };
6478 +
6479 +-static const struct snd_pci_quirk cxt_fixups[] = {
6480 ++static const struct snd_pci_quirk cxt5051_fixups[] = {
6481 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
6482 + {}
6483 + };
6484 +
6485 ++static const struct snd_pci_quirk cxt5066_fixups[] = {
6486 ++ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
6487 ++ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
6488 ++ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
6489 ++ SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
6490 ++ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
6491 ++ {}
6492 ++};
6493 ++
6494 + /* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
6495 + * can be created (bko#42825)
6496 + */
6497 +@@ -4462,11 +4487,13 @@ static int patch_conexant_auto(struct hda_codec *codec)
6498 + break;
6499 + case 0x14f15051:
6500 + add_cx5051_fake_mutes(codec);
6501 ++ apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
6502 ++ break;
6503 ++ default:
6504 ++ apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
6505 + break;
6506 + }
6507 +
6508 +- apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
6509 +-
6510 + err = cx_auto_search_adcs(codec);
6511 + if (err < 0)
6512 + return err;
6513 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6514 +index dc8a6fc..0bc5a46 100644
6515 +--- a/sound/pci/hda/patch_realtek.c
6516 ++++ b/sound/pci/hda/patch_realtek.c
6517 +@@ -5032,6 +5032,7 @@ static const struct alc_fixup alc269_fixups[] = {
6518 + };
6519 +
6520 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6521 ++ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
6522 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
6523 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
6524 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
6525 +diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
6526 +index 336de8f..0e7e26e 100644
6527 +--- a/sound/soc/codecs/tlv320aic23.c
6528 ++++ b/sound/soc/codecs/tlv320aic23.c
6529 +@@ -473,7 +473,7 @@ static int tlv320aic23_set_dai_sysclk(struct snd_soc_dai *codec_dai,
6530 + static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
6531 + enum snd_soc_bias_level level)
6532 + {
6533 +- u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0xff7f;
6534 ++ u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0x17f;
6535 +
6536 + switch (level) {
6537 + case SND_SOC_BIAS_ON:
6538 +@@ -492,7 +492,7 @@ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
6539 + case SND_SOC_BIAS_OFF:
6540 + /* everything off, dac mute, inactive */
6541 + snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0);
6542 +- snd_soc_write(codec, TLV320AIC23_PWR, 0xffff);
6543 ++ snd_soc_write(codec, TLV320AIC23_PWR, 0x1ff);
6544 + break;
6545 + }
6546 + codec->dapm.bias_level = level;
6547 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
6548 +index 2f1f5f8..7806301 100644
6549 +--- a/sound/soc/codecs/wm8994.c
6550 ++++ b/sound/soc/codecs/wm8994.c
6551 +@@ -883,61 +883,170 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
6552 + }
6553 + }
6554 +
6555 +-static int late_enable_ev(struct snd_soc_dapm_widget *w,
6556 +- struct snd_kcontrol *kcontrol, int event)
6557 ++static int aif1clk_ev(struct snd_soc_dapm_widget *w,
6558 ++ struct snd_kcontrol *kcontrol, int event)
6559 + {
6560 + struct snd_soc_codec *codec = w->codec;
6561 +- struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
6562 ++ struct wm8994 *control = codec->control_data;
6563 ++ int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
6564 ++ int dac;
6565 ++ int adc;
6566 ++ int val;
6567 ++
6568 ++ switch (control->type) {
6569 ++ case WM8994:
6570 ++ case WM8958:
6571 ++ mask |= WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA;
6572 ++ break;
6573 ++ default:
6574 ++ break;
6575 ++ }
6576 +
6577 + switch (event) {
6578 + case SND_SOC_DAPM_PRE_PMU:
6579 +- if (wm8994->aif1clk_enable) {
6580 +- snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
6581 +- WM8994_AIF1CLK_ENA_MASK,
6582 +- WM8994_AIF1CLK_ENA);
6583 +- wm8994->aif1clk_enable = 0;
6584 +- }
6585 +- if (wm8994->aif2clk_enable) {
6586 +- snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
6587 +- WM8994_AIF2CLK_ENA_MASK,
6588 +- WM8994_AIF2CLK_ENA);
6589 +- wm8994->aif2clk_enable = 0;
6590 +- }
6591 ++ val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
6592 ++ if ((val & WM8994_AIF1ADCL_SRC) &&
6593 ++ (val & WM8994_AIF1ADCR_SRC))
6594 ++ adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA;
6595 ++ else if (!(val & WM8994_AIF1ADCL_SRC) &&
6596 ++ !(val & WM8994_AIF1ADCR_SRC))
6597 ++ adc = WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
6598 ++ else
6599 ++ adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA |
6600 ++ WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
6601 ++
6602 ++ val = snd_soc_read(codec, WM8994_AIF1_CONTROL_2);
6603 ++ if ((val & WM8994_AIF1DACL_SRC) &&
6604 ++ (val & WM8994_AIF1DACR_SRC))
6605 ++ dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA;
6606 ++ else if (!(val & WM8994_AIF1DACL_SRC) &&
6607 ++ !(val & WM8994_AIF1DACR_SRC))
6608 ++ dac = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
6609 ++ else
6610 ++ dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA |
6611 ++ WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
6612 ++
6613 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
6614 ++ mask, adc);
6615 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
6616 ++ mask, dac);
6617 ++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
6618 ++ WM8994_AIF1DSPCLK_ENA |
6619 ++ WM8994_SYSDSPCLK_ENA,
6620 ++ WM8994_AIF1DSPCLK_ENA |
6621 ++ WM8994_SYSDSPCLK_ENA);
6622 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, mask,
6623 ++ WM8994_AIF1ADC1R_ENA |
6624 ++ WM8994_AIF1ADC1L_ENA |
6625 ++ WM8994_AIF1ADC2R_ENA |
6626 ++ WM8994_AIF1ADC2L_ENA);
6627 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, mask,
6628 ++ WM8994_AIF1DAC1R_ENA |
6629 ++ WM8994_AIF1DAC1L_ENA |
6630 ++ WM8994_AIF1DAC2R_ENA |
6631 ++ WM8994_AIF1DAC2L_ENA);
6632 ++ break;
6633 ++
6634 ++ case SND_SOC_DAPM_PRE_PMD:
6635 ++ case SND_SOC_DAPM_POST_PMD:
6636 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
6637 ++ mask, 0);
6638 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
6639 ++ mask, 0);
6640 ++
6641 ++ val = snd_soc_read(codec, WM8994_CLOCKING_1);
6642 ++ if (val & WM8994_AIF2DSPCLK_ENA)
6643 ++ val = WM8994_SYSDSPCLK_ENA;
6644 ++ else
6645 ++ val = 0;
6646 ++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
6647 ++ WM8994_SYSDSPCLK_ENA |
6648 ++ WM8994_AIF1DSPCLK_ENA, val);
6649 + break;
6650 + }
6651 +
6652 +- /* We may also have postponed startup of DSP, handle that. */
6653 +- wm8958_aif_ev(w, kcontrol, event);
6654 +-
6655 + return 0;
6656 + }
6657 +
6658 +-static int late_disable_ev(struct snd_soc_dapm_widget *w,
6659 +- struct snd_kcontrol *kcontrol, int event)
6660 ++static int aif2clk_ev(struct snd_soc_dapm_widget *w,
6661 ++ struct snd_kcontrol *kcontrol, int event)
6662 + {
6663 + struct snd_soc_codec *codec = w->codec;
6664 +- struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
6665 ++ int dac;
6666 ++ int adc;
6667 ++ int val;
6668 +
6669 + switch (event) {
6670 ++ case SND_SOC_DAPM_PRE_PMU:
6671 ++ val = snd_soc_read(codec, WM8994_AIF2_CONTROL_1);
6672 ++ if ((val & WM8994_AIF2ADCL_SRC) &&
6673 ++ (val & WM8994_AIF2ADCR_SRC))
6674 ++ adc = WM8994_AIF2ADCR_ENA;
6675 ++ else if (!(val & WM8994_AIF2ADCL_SRC) &&
6676 ++ !(val & WM8994_AIF2ADCR_SRC))
6677 ++ adc = WM8994_AIF2ADCL_ENA;
6678 ++ else
6679 ++ adc = WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA;
6680 ++
6681 ++
6682 ++ val = snd_soc_read(codec, WM8994_AIF2_CONTROL_2);
6683 ++ if ((val & WM8994_AIF2DACL_SRC) &&
6684 ++ (val & WM8994_AIF2DACR_SRC))
6685 ++ dac = WM8994_AIF2DACR_ENA;
6686 ++ else if (!(val & WM8994_AIF2DACL_SRC) &&
6687 ++ !(val & WM8994_AIF2DACR_SRC))
6688 ++ dac = WM8994_AIF2DACL_ENA;
6689 ++ else
6690 ++ dac = WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA;
6691 ++
6692 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
6693 ++ WM8994_AIF2ADCL_ENA |
6694 ++ WM8994_AIF2ADCR_ENA, adc);
6695 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
6696 ++ WM8994_AIF2DACL_ENA |
6697 ++ WM8994_AIF2DACR_ENA, dac);
6698 ++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
6699 ++ WM8994_AIF2DSPCLK_ENA |
6700 ++ WM8994_SYSDSPCLK_ENA,
6701 ++ WM8994_AIF2DSPCLK_ENA |
6702 ++ WM8994_SYSDSPCLK_ENA);
6703 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
6704 ++ WM8994_AIF2ADCL_ENA |
6705 ++ WM8994_AIF2ADCR_ENA,
6706 ++ WM8994_AIF2ADCL_ENA |
6707 ++ WM8994_AIF2ADCR_ENA);
6708 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
6709 ++ WM8994_AIF2DACL_ENA |
6710 ++ WM8994_AIF2DACR_ENA,
6711 ++ WM8994_AIF2DACL_ENA |
6712 ++ WM8994_AIF2DACR_ENA);
6713 ++ break;
6714 ++
6715 ++ case SND_SOC_DAPM_PRE_PMD:
6716 + case SND_SOC_DAPM_POST_PMD:
6717 +- if (wm8994->aif1clk_disable) {
6718 +- snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
6719 +- WM8994_AIF1CLK_ENA_MASK, 0);
6720 +- wm8994->aif1clk_disable = 0;
6721 +- }
6722 +- if (wm8994->aif2clk_disable) {
6723 +- snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
6724 +- WM8994_AIF2CLK_ENA_MASK, 0);
6725 +- wm8994->aif2clk_disable = 0;
6726 +- }
6727 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
6728 ++ WM8994_AIF2DACL_ENA |
6729 ++ WM8994_AIF2DACR_ENA, 0);
6730 ++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
6731 ++ WM8994_AIF2ADCL_ENA |
6732 ++ WM8994_AIF2ADCR_ENA, 0);
6733 ++
6734 ++ val = snd_soc_read(codec, WM8994_CLOCKING_1);
6735 ++ if (val & WM8994_AIF1DSPCLK_ENA)
6736 ++ val = WM8994_SYSDSPCLK_ENA;
6737 ++ else
6738 ++ val = 0;
6739 ++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
6740 ++ WM8994_SYSDSPCLK_ENA |
6741 ++ WM8994_AIF2DSPCLK_ENA, val);
6742 + break;
6743 + }
6744 +
6745 + return 0;
6746 + }
6747 +
6748 +-static int aif1clk_ev(struct snd_soc_dapm_widget *w,
6749 +- struct snd_kcontrol *kcontrol, int event)
6750 ++static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
6751 ++ struct snd_kcontrol *kcontrol, int event)
6752 + {
6753 + struct snd_soc_codec *codec = w->codec;
6754 + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
6755 +@@ -954,8 +1063,8 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
6756 + return 0;
6757 + }
6758 +
6759 +-static int aif2clk_ev(struct snd_soc_dapm_widget *w,
6760 +- struct snd_kcontrol *kcontrol, int event)
6761 ++static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
6762 ++ struct snd_kcontrol *kcontrol, int event)
6763 + {
6764 + struct snd_soc_codec *codec = w->codec;
6765 + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
6766 +@@ -972,6 +1081,63 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
6767 + return 0;
6768 + }
6769 +
6770 ++static int late_enable_ev(struct snd_soc_dapm_widget *w,
6771 ++ struct snd_kcontrol *kcontrol, int event)
6772 ++{
6773 ++ struct snd_soc_codec *codec = w->codec;
6774 ++ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
6775 ++
6776 ++ switch (event) {
6777 ++ case SND_SOC_DAPM_PRE_PMU:
6778 ++ if (wm8994->aif1clk_enable) {
6779 ++ aif1clk_ev(w, kcontrol, event);
6780 ++ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
6781 ++ WM8994_AIF1CLK_ENA_MASK,
6782 ++ WM8994_AIF1CLK_ENA);
6783 ++ wm8994->aif1clk_enable = 0;
6784 ++ }
6785 ++ if (wm8994->aif2clk_enable) {
6786 ++ aif2clk_ev(w, kcontrol, event);
6787 ++ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
6788 ++ WM8994_AIF2CLK_ENA_MASK,
6789 ++ WM8994_AIF2CLK_ENA);
6790 ++ wm8994->aif2clk_enable = 0;
6791 ++ }
6792 ++ break;
6793 ++ }
6794 ++
6795 ++ /* We may also have postponed startup of DSP, handle that. */
6796 ++ wm8958_aif_ev(w, kcontrol, event);
6797 ++
6798 ++ return 0;
6799 ++}
6800 ++
6801 ++static int late_disable_ev(struct snd_soc_dapm_widget *w,
6802 ++ struct snd_kcontrol *kcontrol, int event)
6803 ++{
6804 ++ struct snd_soc_codec *codec = w->codec;
6805 ++ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
6806 ++
6807 ++ switch (event) {
6808 ++ case SND_SOC_DAPM_POST_PMD:
6809 ++ if (wm8994->aif1clk_disable) {
6810 ++ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
6811 ++ WM8994_AIF1CLK_ENA_MASK, 0);
6812 ++ aif1clk_ev(w, kcontrol, event);
6813 ++ wm8994->aif1clk_disable = 0;
6814 ++ }
6815 ++ if (wm8994->aif2clk_disable) {
6816 ++ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
6817 ++ WM8994_AIF2CLK_ENA_MASK, 0);
6818 ++ aif2clk_ev(w, kcontrol, event);
6819 ++ wm8994->aif2clk_disable = 0;
6820 ++ }
6821 ++ break;
6822 ++ }
6823 ++
6824 ++ return 0;
6825 ++}
6826 ++
6827 + static int adc_mux_ev(struct snd_soc_dapm_widget *w,
6828 + struct snd_kcontrol *kcontrol, int event)
6829 + {
6830 +@@ -1268,9 +1434,9 @@ static const struct snd_kcontrol_new aif2dacr_src_mux =
6831 + SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
6832 +
6833 + static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
6834 +-SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
6835 ++SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_late_ev,
6836 + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
6837 +-SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
6838 ++SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_late_ev,
6839 + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
6840 +
6841 + SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
6842 +@@ -1299,8 +1465,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
6843 + };
6844 +
6845 + static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
6846 +-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
6847 +-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
6848 ++SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
6849 ++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
6850 ++SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
6851 ++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
6852 + SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
6853 + SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
6854 + left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
6855 +@@ -1353,30 +1521,30 @@ SND_SOC_DAPM_SUPPLY("VMID", SND_SOC_NOPM, 0, 0, vmid_event,
6856 + SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
6857 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
6858 +
6859 +-SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
6860 +-SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
6861 +-SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
6862 ++SND_SOC_DAPM_SUPPLY("DSP1CLK", SND_SOC_NOPM, 3, 0, NULL, 0),
6863 ++SND_SOC_DAPM_SUPPLY("DSP2CLK", SND_SOC_NOPM, 2, 0, NULL, 0),
6864 ++SND_SOC_DAPM_SUPPLY("DSPINTCLK", SND_SOC_NOPM, 1, 0, NULL, 0),
6865 +
6866 + SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
6867 +- 0, WM8994_POWER_MANAGEMENT_4, 9, 0),
6868 ++ 0, SND_SOC_NOPM, 9, 0),
6869 + SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
6870 +- 0, WM8994_POWER_MANAGEMENT_4, 8, 0),
6871 ++ 0, SND_SOC_NOPM, 8, 0),
6872 + SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
6873 +- WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
6874 ++ SND_SOC_NOPM, 9, 0, wm8958_aif_ev,
6875 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
6876 + SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
6877 +- WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
6878 ++ SND_SOC_NOPM, 8, 0, wm8958_aif_ev,
6879 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
6880 +
6881 + SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
6882 +- 0, WM8994_POWER_MANAGEMENT_4, 11, 0),
6883 ++ 0, SND_SOC_NOPM, 11, 0),
6884 + SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
6885 +- 0, WM8994_POWER_MANAGEMENT_4, 10, 0),
6886 ++ 0, SND_SOC_NOPM, 10, 0),
6887 + SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
6888 +- WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
6889 ++ SND_SOC_NOPM, 11, 0, wm8958_aif_ev,
6890 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
6891 + SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
6892 +- WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
6893 ++ SND_SOC_NOPM, 10, 0, wm8958_aif_ev,
6894 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
6895 +
6896 + SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
6897 +@@ -1403,14 +1571,14 @@ SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0,
6898 + dac1r_mix, ARRAY_SIZE(dac1r_mix)),
6899 +
6900 + SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0,
6901 +- WM8994_POWER_MANAGEMENT_4, 13, 0),
6902 ++ SND_SOC_NOPM, 13, 0),
6903 + SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
6904 +- WM8994_POWER_MANAGEMENT_4, 12, 0),
6905 ++ SND_SOC_NOPM, 12, 0),
6906 + SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
6907 +- WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
6908 ++ SND_SOC_NOPM, 13, 0, wm8958_aif_ev,
6909 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
6910 + SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
6911 +- WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
6912 ++ SND_SOC_NOPM, 12, 0, wm8958_aif_ev,
6913 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
6914 +
6915 + SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
6916 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6917 +index ea909c5..90e93bf 100644
6918 +--- a/sound/soc/soc-dapm.c
6919 ++++ b/sound/soc/soc-dapm.c
6920 +@@ -69,6 +69,7 @@ static int dapm_up_seq[] = {
6921 + [snd_soc_dapm_out_drv] = 10,
6922 + [snd_soc_dapm_hp] = 10,
6923 + [snd_soc_dapm_spk] = 10,
6924 ++ [snd_soc_dapm_line] = 10,
6925 + [snd_soc_dapm_post] = 11,
6926 + };
6927 +
6928 +@@ -77,6 +78,7 @@ static int dapm_down_seq[] = {
6929 + [snd_soc_dapm_adc] = 1,
6930 + [snd_soc_dapm_hp] = 2,
6931 + [snd_soc_dapm_spk] = 2,
6932 ++ [snd_soc_dapm_line] = 2,
6933 + [snd_soc_dapm_out_drv] = 2,
6934 + [snd_soc_dapm_pga] = 4,
6935 + [snd_soc_dapm_mixer_named_ctl] = 5,
6936 +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
6937 +index adb372d..e0a0970 100644
6938 +--- a/tools/perf/util/hist.c
6939 ++++ b/tools/perf/util/hist.c
6940 +@@ -237,8 +237,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
6941 + * mis-adjust symbol addresses when computing
6942 + * the history counter to increment.
6943 + */
6944 +- if (he->ms.map != entry->ms.map) {
6945 +- he->ms.map = entry->ms.map;
6946 ++ if (he->ms.map != entry.ms.map) {
6947 ++ he->ms.map = entry.ms.map;
6948 + if (he->ms.map)
6949 + he->ms.map->referenced = true;
6950 + }
6951 +diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
6952 +index a195c07..fd817a2 100644
6953 +--- a/virt/kvm/iommu.c
6954 ++++ b/virt/kvm/iommu.c
6955 +@@ -309,6 +309,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
6956 + }
6957 + }
6958 +
6959 ++void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
6960 ++{
6961 ++ kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
6962 ++}
6963 ++
6964 + static int kvm_iommu_unmap_memslots(struct kvm *kvm)
6965 + {
6966 + int i, idx;
6967 +@@ -317,10 +322,9 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
6968 + idx = srcu_read_lock(&kvm->srcu);
6969 + slots = kvm_memslots(kvm);
6970 +
6971 +- for (i = 0; i < slots->nmemslots; i++) {
6972 +- kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
6973 +- slots->memslots[i].npages);
6974 +- }
6975 ++ for (i = 0; i < slots->nmemslots; i++)
6976 ++ kvm_iommu_unmap_pages(kvm, &slots->memslots[i]);
6977 ++
6978 + srcu_read_unlock(&kvm->srcu, idx);
6979 +
6980 + return 0;
6981 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
6982 +index d9cfb78..e401c1b 100644
6983 +--- a/virt/kvm/kvm_main.c
6984 ++++ b/virt/kvm/kvm_main.c
6985 +@@ -802,12 +802,13 @@ skip_lpage:
6986 + if (r)
6987 + goto out_free;
6988 +
6989 +- /* map the pages in iommu page table */
6990 ++ /* map/unmap the pages in iommu page table */
6991 + if (npages) {
6992 + r = kvm_iommu_map_pages(kvm, &new);
6993 + if (r)
6994 + goto out_free;
6995 +- }
6996 ++ } else
6997 ++ kvm_iommu_unmap_pages(kvm, &old);
6998 +
6999 + r = -ENOMEM;
7000 + slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
7001
7002 diff --git a/3.2.16/4420_grsecurity-2.9-3.2.16-201205071838.patch b/3.2.17/4420_grsecurity-2.9-3.2.17-201205131657.patch
7003 similarity index 99%
7004 rename from 3.2.16/4420_grsecurity-2.9-3.2.16-201205071838.patch
7005 rename to 3.2.17/4420_grsecurity-2.9-3.2.17-201205131657.patch
7006 index 390b567..8ddeecb 100644
7007 --- a/3.2.16/4420_grsecurity-2.9-3.2.16-201205071838.patch
7008 +++ b/3.2.17/4420_grsecurity-2.9-3.2.17-201205131657.patch
7009 @@ -195,7 +195,7 @@ index 81c287f..d456d02 100644
7010
7011 pcd. [PARIDE]
7012 diff --git a/Makefile b/Makefile
7013 -index 3da29cb..47b7468 100644
7014 +index 4c4efa3..1171c69 100644
7015 --- a/Makefile
7016 +++ b/Makefile
7017 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
7018 @@ -1454,6 +1454,34 @@ index 984014b..a6d914f 100644
7019 #endif /* __ASSEMBLY__ */
7020
7021 #define arch_align_stack(x) (x)
7022 +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
7023 +index 7b5cc8d..5d70d88 100644
7024 +--- a/arch/arm/include/asm/thread_info.h
7025 ++++ b/arch/arm/include/asm/thread_info.h
7026 +@@ -139,6 +139,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
7027 + #define TIF_NEED_RESCHED 1
7028 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
7029 + #define TIF_SYSCALL_TRACE 8
7030 ++
7031 ++/* within 8 bits of TIF_SYSCALL_TRACE
7032 ++ to meet flexible second operand requirements
7033 ++*/
7034 ++#define TIF_GRSEC_SETXID 9
7035 ++
7036 + #define TIF_POLLING_NRFLAG 16
7037 + #define TIF_USING_IWMMXT 17
7038 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
7039 +@@ -155,6 +161,10 @@ extern void vfp_flush_hwstate(struct thread_info *);
7040 + #define _TIF_FREEZE (1 << TIF_FREEZE)
7041 + #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
7042 + #define _TIF_SECCOMP (1 << TIF_SECCOMP)
7043 ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
7044 ++
7045 ++/* Checks for any syscall work in entry-common.S */
7046 ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_GRSEC_SETXID)
7047 +
7048 + /*
7049 + * Change these and you break ASM code in entry-common.S
7050 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
7051 index b293616..96310e5 100644
7052 --- a/arch/arm/include/asm/uaccess.h
7053 @@ -1528,6 +1556,28 @@ index 5b0bce6..becd81c 100644
7054 EXPORT_SYMBOL(__clear_user);
7055
7056 EXPORT_SYMBOL(__get_user_1);
7057 +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
7058 +index b2a27b6..520889c 100644
7059 +--- a/arch/arm/kernel/entry-common.S
7060 ++++ b/arch/arm/kernel/entry-common.S
7061 +@@ -87,7 +87,7 @@ ENTRY(ret_from_fork)
7062 + get_thread_info tsk
7063 + ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
7064 + mov why, #1
7065 +- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
7066 ++ tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
7067 + beq ret_slow_syscall
7068 + mov r1, sp
7069 + mov r0, #1 @ trace exit [IP = 1]
7070 +@@ -443,7 +443,7 @@ ENTRY(vector_swi)
7071 + 1:
7072 + #endif
7073 +
7074 +- tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
7075 ++ tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
7076 + bne __sys_trace
7077 +
7078 + cmp scno, #NR_syscalls @ check upper syscall limit
7079 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
7080 index 3d0c6fb..9d326fa 100644
7081 --- a/arch/arm/kernel/process.c
7082 @@ -1579,6 +1629,30 @@ index 3d0c6fb..9d326fa 100644
7083 #ifdef CONFIG_MMU
7084 /*
7085 * The vectors page is always readable from user space for the
7086 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
7087 +index 90fa8b3..a3a2212 100644
7088 +--- a/arch/arm/kernel/ptrace.c
7089 ++++ b/arch/arm/kernel/ptrace.c
7090 +@@ -904,10 +904,19 @@ long arch_ptrace(struct task_struct *child, long request,
7091 + return ret;
7092 + }
7093 +
7094 ++#ifdef CONFIG_GRKERNSEC_SETXID
7095 ++extern void gr_delayed_cred_worker(void);
7096 ++#endif
7097 ++
7098 + asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
7099 + {
7100 + unsigned long ip;
7101 +
7102 ++#ifdef CONFIG_GRKERNSEC_SETXID
7103 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7104 ++ gr_delayed_cred_worker();
7105 ++#endif
7106 ++
7107 + if (!test_thread_flag(TIF_SYSCALL_TRACE))
7108 + return scno;
7109 + if (!(current->ptrace & PT_PTRACED))
7110 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
7111 index 8fc2c8f..064c150 100644
7112 --- a/arch/arm/kernel/setup.c
7113 @@ -2779,6 +2853,40 @@ index 6018c80..7c37203 100644
7114 +#define arch_align_stack(x) ((x) & ~0xfUL)
7115
7116 #endif /* _ASM_SYSTEM_H */
7117 +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
7118 +index 97f8bf6..3986751 100644
7119 +--- a/arch/mips/include/asm/thread_info.h
7120 ++++ b/arch/mips/include/asm/thread_info.h
7121 +@@ -124,6 +124,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
7122 + #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
7123 + #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
7124 + #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
7125 ++/* li takes a 32bit immediate */
7126 ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
7127 + #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
7128 +
7129 + #ifdef CONFIG_MIPS32_O32
7130 +@@ -148,15 +150,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
7131 + #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
7132 + #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
7133 + #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
7134 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7135 ++
7136 ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
7137 +
7138 + /* work to do in syscall_trace_leave() */
7139 +-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
7140 ++#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
7141 +
7142 + /* work to do on interrupt/exception return */
7143 + #define _TIF_WORK_MASK (0x0000ffef & \
7144 + ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
7145 + /* work to do on any return to u-space */
7146 +-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
7147 ++#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
7148 +
7149 + #endif /* __KERNEL__ */
7150 +
7151 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7152 index 9fdd8bc..4bd7f1a 100644
7153 --- a/arch/mips/kernel/binfmt_elfn32.c
7154 @@ -2835,6 +2943,85 @@ index c47f96e..661d418 100644
7155 -
7156 - return sp & ALMASK;
7157 -}
7158 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7159 +index 4e6ea1f..0922422 100644
7160 +--- a/arch/mips/kernel/ptrace.c
7161 ++++ b/arch/mips/kernel/ptrace.c
7162 +@@ -529,6 +529,10 @@ static inline int audit_arch(void)
7163 + return arch;
7164 + }
7165 +
7166 ++#ifdef CONFIG_GRKERNSEC_SETXID
7167 ++extern void gr_delayed_cred_worker(void);
7168 ++#endif
7169 ++
7170 + /*
7171 + * Notification of system call entry/exit
7172 + * - triggered by current->work.syscall_trace
7173 +@@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
7174 + /* do the secure computing check first */
7175 + secure_computing(regs->regs[2]);
7176 +
7177 ++#ifdef CONFIG_GRKERNSEC_SETXID
7178 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7179 ++ gr_delayed_cred_worker();
7180 ++#endif
7181 ++
7182 + if (!(current->ptrace & PT_PTRACED))
7183 + goto out;
7184 +
7185 +diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
7186 +index a632bc1..0b77c7c 100644
7187 +--- a/arch/mips/kernel/scall32-o32.S
7188 ++++ b/arch/mips/kernel/scall32-o32.S
7189 +@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
7190 +
7191 + stack_done:
7192 + lw t0, TI_FLAGS($28) # syscall tracing enabled?
7193 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
7194 ++ li t1, _TIF_SYSCALL_WORK
7195 + and t0, t1
7196 + bnez t0, syscall_trace_entry # -> yes
7197 +
7198 +diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
7199 +index 3b5a5e9..e1ee86d 100644
7200 +--- a/arch/mips/kernel/scall64-64.S
7201 ++++ b/arch/mips/kernel/scall64-64.S
7202 +@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
7203 +
7204 + sd a3, PT_R26(sp) # save a3 for syscall restarting
7205 +
7206 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
7207 ++ li t1, _TIF_SYSCALL_WORK
7208 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
7209 + and t0, t1, t0
7210 + bnez t0, syscall_trace_entry
7211 +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
7212 +index 6be6f70..1859577 100644
7213 +--- a/arch/mips/kernel/scall64-n32.S
7214 ++++ b/arch/mips/kernel/scall64-n32.S
7215 +@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
7216 +
7217 + sd a3, PT_R26(sp) # save a3 for syscall restarting
7218 +
7219 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
7220 ++ li t1, _TIF_SYSCALL_WORK
7221 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
7222 + and t0, t1, t0
7223 + bnez t0, n32_syscall_trace_entry
7224 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
7225 +index 5422855..74e63a3 100644
7226 +--- a/arch/mips/kernel/scall64-o32.S
7227 ++++ b/arch/mips/kernel/scall64-o32.S
7228 +@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
7229 + PTR 4b, bad_stack
7230 + .previous
7231 +
7232 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
7233 ++ li t1, _TIF_SYSCALL_WORK
7234 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
7235 + and t0, t1, t0
7236 + bnez t0, trace_a_syscall
7237 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7238 index 937cf33..adb39bb 100644
7239 --- a/arch/mips/mm/fault.c
7240 @@ -3677,6 +3864,41 @@ index e30a13d..2b7d994 100644
7241
7242 /* Used in very early kernel initialization. */
7243 extern unsigned long reloc_offset(void);
7244 +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7245 +index 836f231..8403cfb 100644
7246 +--- a/arch/powerpc/include/asm/thread_info.h
7247 ++++ b/arch/powerpc/include/asm/thread_info.h
7248 +@@ -104,7 +104,6 @@ static inline struct thread_info *current_thread_info(void)
7249 + #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7250 + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7251 + #define TIF_SINGLESTEP 8 /* singlestepping active */
7252 +-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
7253 + #define TIF_SECCOMP 10 /* secure computing */
7254 + #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
7255 + #define TIF_NOERROR 12 /* Force successful syscall return */
7256 +@@ -112,6 +111,9 @@ static inline struct thread_info *current_thread_info(void)
7257 + #define TIF_FREEZE 14 /* Freezing for suspend */
7258 + #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
7259 + #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
7260 ++#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
7261 ++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7262 ++#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
7263 +
7264 + /* as above, but as bit values */
7265 + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7266 +@@ -130,8 +132,11 @@ static inline struct thread_info *current_thread_info(void)
7267 + #define _TIF_FREEZE (1<<TIF_FREEZE)
7268 + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7269 + #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
7270 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7271 ++
7272 + #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7273 +- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
7274 ++ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
7275 ++ _TIF_GRSEC_SETXID)
7276 +
7277 + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7278 + _TIF_NOTIFY_RESUME)
7279 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7280 index bd0fb84..a42a14b 100644
7281 --- a/arch/powerpc/include/asm/uaccess.h
7282 @@ -4053,6 +4275,45 @@ index 6457574..08b28d3 100644
7283 -
7284 - return ret;
7285 -}
7286 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7287 +index 5de73db..a05f61c 100644
7288 +--- a/arch/powerpc/kernel/ptrace.c
7289 ++++ b/arch/powerpc/kernel/ptrace.c
7290 +@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
7291 + return ret;
7292 + }
7293 +
7294 ++#ifdef CONFIG_GRKERNSEC_SETXID
7295 ++extern void gr_delayed_cred_worker(void);
7296 ++#endif
7297 ++
7298 + /*
7299 + * We must return the syscall number to actually look up in the table.
7300 + * This can be -1L to skip running any syscall at all.
7301 +@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7302 +
7303 + secure_computing(regs->gpr[0]);
7304 +
7305 ++#ifdef CONFIG_GRKERNSEC_SETXID
7306 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7307 ++ gr_delayed_cred_worker();
7308 ++#endif
7309 ++
7310 + if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7311 + tracehook_report_syscall_entry(regs))
7312 + /*
7313 +@@ -1748,6 +1757,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7314 + {
7315 + int step;
7316 +
7317 ++#ifdef CONFIG_GRKERNSEC_SETXID
7318 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7319 ++ gr_delayed_cred_worker();
7320 ++#endif
7321 ++
7322 + if (unlikely(current->audit_context))
7323 + audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
7324 + regs->result);
7325 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7326 index 836a5a1..27289a3 100644
7327 --- a/arch/powerpc/kernel/signal_32.c
7328 @@ -5278,7 +5539,7 @@ index fa57532..e1a4c53 100644
7329
7330 /*
7331 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
7332 -index 60d86be..952dea1 100644
7333 +index 60d86be..6389ac8 100644
7334 --- a/arch/sparc/include/asm/thread_info_64.h
7335 +++ b/arch/sparc/include/asm/thread_info_64.h
7336 @@ -63,6 +63,8 @@ struct thread_info {
7337 @@ -5290,6 +5551,38 @@ index 60d86be..952dea1 100644
7338 unsigned long fpregs[0] __attribute__ ((aligned(64)));
7339 };
7340
7341 +@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
7342 + #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
7343 + /* flag bit 6 is available */
7344 + #define TIF_32BIT 7 /* 32-bit binary */
7345 +-/* flag bit 8 is available */
7346 ++#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
7347 + #define TIF_SECCOMP 9 /* secure computing */
7348 + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
7349 + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
7350 ++
7351 + /* NOTE: Thread flags >= 12 should be ones we have no interest
7352 + * in using in assembly, else we can't use the mask as
7353 + * an immediate value in instructions such as andcc.
7354 +@@ -238,12 +241,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
7355 + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7356 + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
7357 + #define _TIF_FREEZE (1<<TIF_FREEZE)
7358 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7359 +
7360 + #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
7361 + _TIF_DO_NOTIFY_RESUME_MASK | \
7362 + _TIF_NEED_RESCHED)
7363 + #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
7364 +
7365 ++#define _TIF_WORK_SYSCALL \
7366 ++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
7367 ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7368 ++
7369 ++
7370 + /*
7371 + * Thread-synchronous status.
7372 + *
7373 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
7374 index e88fbe5..96b0ce5 100644
7375 --- a/arch/sparc/include/asm/uaccess.h
7376 @@ -5500,6 +5793,45 @@ index 3739a06..48b2ff0 100644
7377 (void *) gp->tpc,
7378 (void *) gp->o7,
7379 (void *) gp->i7,
7380 +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
7381 +index 96ee50a..68ce124 100644
7382 +--- a/arch/sparc/kernel/ptrace_64.c
7383 ++++ b/arch/sparc/kernel/ptrace_64.c
7384 +@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
7385 + return ret;
7386 + }
7387 +
7388 ++#ifdef CONFIG_GRKERNSEC_SETXID
7389 ++extern void gr_delayed_cred_worker(void);
7390 ++#endif
7391 ++
7392 + asmlinkage int syscall_trace_enter(struct pt_regs *regs)
7393 + {
7394 + int ret = 0;
7395 +@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
7396 + /* do the secure computing check first */
7397 + secure_computing(regs->u_regs[UREG_G1]);
7398 +
7399 ++#ifdef CONFIG_GRKERNSEC_SETXID
7400 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7401 ++ gr_delayed_cred_worker();
7402 ++#endif
7403 ++
7404 + if (test_thread_flag(TIF_SYSCALL_TRACE))
7405 + ret = tracehook_report_syscall_entry(regs);
7406 +
7407 +@@ -1086,6 +1095,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
7408 +
7409 + asmlinkage void syscall_trace_leave(struct pt_regs *regs)
7410 + {
7411 ++#ifdef CONFIG_GRKERNSEC_SETXID
7412 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7413 ++ gr_delayed_cred_worker();
7414 ++#endif
7415 ++
7416 + #ifdef CONFIG_AUDITSYSCALL
7417 + if (unlikely(current->audit_context)) {
7418 + unsigned long tstate = regs->tstate;
7419 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
7420 index 42b282f..28ce9f2 100644
7421 --- a/arch/sparc/kernel/sys_sparc_32.c
7422 @@ -5673,6 +6005,55 @@ index 441521a..b767073 100644
7423 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7424 mm->unmap_area = arch_unmap_area_topdown;
7425 }
7426 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
7427 +index 1d7e274..b39c527 100644
7428 +--- a/arch/sparc/kernel/syscalls.S
7429 ++++ b/arch/sparc/kernel/syscalls.S
7430 +@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
7431 + #endif
7432 + .align 32
7433 + 1: ldx [%g6 + TI_FLAGS], %l5
7434 +- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7435 ++ andcc %l5, _TIF_WORK_SYSCALL, %g0
7436 + be,pt %icc, rtrap
7437 + nop
7438 + call syscall_trace_leave
7439 +@@ -179,7 +179,7 @@ linux_sparc_syscall32:
7440 +
7441 + srl %i5, 0, %o5 ! IEU1
7442 + srl %i2, 0, %o2 ! IEU0 Group
7443 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7444 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0
7445 + bne,pn %icc, linux_syscall_trace32 ! CTI
7446 + mov %i0, %l5 ! IEU1
7447 + call %l7 ! CTI Group brk forced
7448 +@@ -202,7 +202,7 @@ linux_sparc_syscall:
7449 +
7450 + mov %i3, %o3 ! IEU1
7451 + mov %i4, %o4 ! IEU0 Group
7452 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7453 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0
7454 + bne,pn %icc, linux_syscall_trace ! CTI Group
7455 + mov %i0, %l5 ! IEU0
7456 + 2: call %l7 ! CTI Group brk forced
7457 +@@ -226,7 +226,7 @@ ret_sys_call:
7458 +
7459 + cmp %o0, -ERESTART_RESTARTBLOCK
7460 + bgeu,pn %xcc, 1f
7461 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
7462 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6
7463 + 80:
7464 + /* System call success, clear Carry condition code. */
7465 + andn %g3, %g2, %g3
7466 +@@ -241,7 +241,7 @@ ret_sys_call:
7467 + /* System call failure, set Carry condition code.
7468 + * Also, get abs(errno) to return to the process.
7469 + */
7470 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
7471 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6
7472 + sub %g0, %o0, %o0
7473 + or %g3, %g2, %g3
7474 + stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
7475 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
7476 index 591f20c..0f1b925 100644
7477 --- a/arch/sparc/kernel/traps_32.c
7478 @@ -7544,7 +7925,7 @@ index 3a19d04..7c1d55a 100644
7479 #endif
7480
7481 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7482 -index 89bbf4e..869908e 100644
7483 +index e77f4e4..17e511f 100644
7484 --- a/arch/x86/boot/compressed/relocs.c
7485 +++ b/arch/x86/boot/compressed/relocs.c
7486 @@ -13,8 +13,11 @@
7487 @@ -7649,7 +8030,7 @@ index 89bbf4e..869908e 100644
7488 rel->r_info = elf32_to_cpu(rel->r_info);
7489 }
7490 }
7491 -@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7492 +@@ -396,13 +440,13 @@ static void read_relocs(FILE *fp)
7493
7494 static void print_absolute_symbols(void)
7495 {
7496 @@ -7660,13 +8041,12 @@ index 89bbf4e..869908e 100644
7497 for (i = 0; i < ehdr.e_shnum; i++) {
7498 struct section *sec = &secs[i];
7499 char *sym_strtab;
7500 - Elf32_Sym *sh_symtab;
7501 - int j;
7502 + unsigned int j;
7503
7504 if (sec->shdr.sh_type != SHT_SYMTAB) {
7505 continue;
7506 -@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7507 +@@ -429,14 +473,14 @@ static void print_absolute_symbols(void)
7508
7509 static void print_absolute_relocs(void)
7510 {
7511 @@ -7683,7 +8063,7 @@ index 89bbf4e..869908e 100644
7512 if (sec->shdr.sh_type != SHT_REL) {
7513 continue;
7514 }
7515 -@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7516 +@@ -497,13 +541,13 @@ static void print_absolute_relocs(void)
7517
7518 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7519 {
7520 @@ -7699,7 +8079,7 @@ index 89bbf4e..869908e 100644
7521 struct section *sec = &secs[i];
7522
7523 if (sec->shdr.sh_type != SHT_REL) {
7524 -@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7525 +@@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7526 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7527 continue;
7528 }
7529 @@ -7722,7 +8102,7 @@ index 89bbf4e..869908e 100644
7530 switch (r_type) {
7531 case R_386_NONE:
7532 case R_386_PC32:
7533 -@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7534 +@@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb)
7535
7536 static void emit_relocs(int as_text)
7537 {
7538 @@ -7731,7 +8111,7 @@ index 89bbf4e..869908e 100644
7539 /* Count how many relocations I have and allocate space for them. */
7540 reloc_count = 0;
7541 walk_relocs(count_reloc);
7542 -@@ -665,6 +725,7 @@ int main(int argc, char **argv)
7543 +@@ -663,6 +723,7 @@ int main(int argc, char **argv)
7544 fname, strerror(errno));
7545 }
7546 read_ehdr(fp);
7547 @@ -12161,7 +12541,7 @@ index 2d2f01c..f985723 100644
7548 /*
7549 * Force strict CPU ordering.
7550 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
7551 -index d7ef849..6af292e 100644
7552 +index d7ef849..b1b009a 100644
7553 --- a/arch/x86/include/asm/thread_info.h
7554 +++ b/arch/x86/include/asm/thread_info.h
7555 @@ -10,6 +10,7 @@
7556 @@ -12210,7 +12590,45 @@ index d7ef849..6af292e 100644
7557 #define init_stack (init_thread_union.stack)
7558
7559 #else /* !__ASSEMBLY__ */
7560 -@@ -170,45 +164,40 @@ struct thread_info {
7561 +@@ -95,6 +89,7 @@ struct thread_info {
7562 + #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
7563 + #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
7564 + #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
7565 ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
7566 +
7567 + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
7568 + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
7569 +@@ -117,16 +112,17 @@ struct thread_info {
7570 + #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
7571 + #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
7572 + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
7573 ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
7574 +
7575 + /* work to do in syscall_trace_enter() */
7576 + #define _TIF_WORK_SYSCALL_ENTRY \
7577 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
7578 +- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
7579 ++ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7580 +
7581 + /* work to do in syscall_trace_leave() */
7582 + #define _TIF_WORK_SYSCALL_EXIT \
7583 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
7584 +- _TIF_SYSCALL_TRACEPOINT)
7585 ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7586 +
7587 + /* work to do on interrupt/exception return */
7588 + #define _TIF_WORK_MASK \
7589 +@@ -136,7 +132,8 @@ struct thread_info {
7590 +
7591 + /* work to do on any return to user space */
7592 + #define _TIF_ALLWORK_MASK \
7593 +- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
7594 ++ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
7595 ++ _TIF_GRSEC_SETXID)
7596 +
7597 + /* Only used for 64 bit */
7598 + #define _TIF_DO_NOTIFY_MASK \
7599 +@@ -170,45 +167,40 @@ struct thread_info {
7600 ret; \
7601 })
7602
7603 @@ -12281,7 +12699,7 @@ index d7ef849..6af292e 100644
7604 /*
7605 * macros/functions for gaining access to the thread information structure
7606 * preempt_count needs to be 1 initially, until the scheduler is functional.
7607 -@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
7608 +@@ -216,21 +208,8 @@ static inline struct thread_info *current_thread_info(void)
7609 #ifndef __ASSEMBLY__
7610 DECLARE_PER_CPU(unsigned long, kernel_stack);
7611
7612 @@ -12305,7 +12723,7 @@ index d7ef849..6af292e 100644
7613 #endif
7614
7615 #endif /* !X86_32 */
7616 -@@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
7617 +@@ -264,5 +243,16 @@ extern void arch_task_cache_init(void);
7618 extern void free_thread_info(struct thread_info *ti);
7619 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
7620 #define arch_task_cache_init arch_task_cache_init
7621 @@ -13612,7 +14030,7 @@ index 1f84794..e23f862 100644
7622 }
7623
7624 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
7625 -index f98d84c..e402a69 100644
7626 +index c4e3581..7e2f9d0 100644
7627 --- a/arch/x86/kernel/apic/apic.c
7628 +++ b/arch/x86/kernel/apic/apic.c
7629 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
7630 @@ -13624,7 +14042,7 @@ index f98d84c..e402a69 100644
7631
7632 int pic_mode;
7633
7634 -@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
7635 +@@ -1857,7 +1857,7 @@ void smp_error_interrupt(struct pt_regs *regs)
7636 apic_write(APIC_ESR, 0);
7637 v1 = apic_read(APIC_ESR);
7638 ack_APIC_irq();
7639 @@ -14623,7 +15041,7 @@ index cd28a35..c72ed9a 100644
7640 #include <asm/processor.h>
7641 #include <asm/fcntl.h>
7642 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
7643 -index bcda816..b0cbdf9 100644
7644 +index bcda816..5c89791 100644
7645 --- a/arch/x86/kernel/entry_32.S
7646 +++ b/arch/x86/kernel/entry_32.S
7647 @@ -180,13 +180,146 @@
7648 @@ -14816,7 +15234,7 @@ index bcda816..b0cbdf9 100644
7649 +#ifdef CONFIG_PAX_KERNEXEC
7650 + jae resume_userspace
7651 +
7652 -+ PAX_EXIT_KERNEL
7653 ++ pax_exit_kernel
7654 + jmp resume_kernel
7655 +#else
7656 jb resume_kernel # not returning to v8086 or userspace
7657 @@ -18551,7 +18969,7 @@ index 6a364a6..b147d11 100644
7658 ip = *(u64 *)(fp+8);
7659 if (!in_sched_functions(ip))
7660 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
7661 -index 8252879..f367ec9 100644
7662 +index 8252879..39d15fc 100644
7663 --- a/arch/x86/kernel/ptrace.c
7664 +++ b/arch/x86/kernel/ptrace.c
7665 @@ -791,6 +791,10 @@ static int ioperm_active(struct task_struct *target,
7666 @@ -18600,6 +19018,41 @@ index 8252879..f367ec9 100644
7667 }
7668
7669 void user_single_step_siginfo(struct task_struct *tsk,
7670 +@@ -1360,6 +1364,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
7671 + # define IS_IA32 0
7672 + #endif
7673 +
7674 ++#ifdef CONFIG_GRKERNSEC_SETXID
7675 ++extern void gr_delayed_cred_worker(void);
7676 ++#endif
7677 ++
7678 + /*
7679 + * We must return the syscall number to actually look up in the table.
7680 + * This can be -1L to skip running any syscall at all.
7681 +@@ -1368,6 +1376,11 @@ long syscall_trace_enter(struct pt_regs *regs)
7682 + {
7683 + long ret = 0;
7684 +
7685 ++#ifdef CONFIG_GRKERNSEC_SETXID
7686 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7687 ++ gr_delayed_cred_worker();
7688 ++#endif
7689 ++
7690 + /*
7691 + * If we stepped into a sysenter/syscall insn, it trapped in
7692 + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
7693 +@@ -1413,6 +1426,11 @@ void syscall_trace_leave(struct pt_regs *regs)
7694 + {
7695 + bool step;
7696 +
7697 ++#ifdef CONFIG_GRKERNSEC_SETXID
7698 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7699 ++ gr_delayed_cred_worker();
7700 ++#endif
7701 ++
7702 + if (unlikely(current->audit_context))
7703 + audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
7704 +
7705 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
7706 index 42eb330..139955c 100644
7707 --- a/arch/x86/kernel/pvclock.c
7708 @@ -18838,7 +19291,7 @@ index cf0ef98..e3f780b 100644
7709 bss_resource.start = virt_to_phys(&__bss_start);
7710 bss_resource.end = virt_to_phys(&__bss_stop)-1;
7711 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
7712 -index 71f4727..217419b 100644
7713 +index 5a98aa2..848d2be 100644
7714 --- a/arch/x86/kernel/setup_percpu.c
7715 +++ b/arch/x86/kernel/setup_percpu.c
7716 @@ -21,19 +21,17 @@
7717 @@ -18897,7 +19350,7 @@ index 71f4727..217419b 100644
7718 write_gdt_entry(get_cpu_gdt_table(cpu),
7719 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
7720 #endif
7721 -@@ -207,6 +209,11 @@ void __init setup_per_cpu_areas(void)
7722 +@@ -219,6 +221,11 @@ void __init setup_per_cpu_areas(void)
7723 /* alrighty, percpu areas up and running */
7724 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
7725 for_each_possible_cpu(cpu) {
7726 @@ -18909,7 +19362,7 @@ index 71f4727..217419b 100644
7727 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
7728 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
7729 per_cpu(cpu_number, cpu) = cpu;
7730 -@@ -247,6 +254,12 @@ void __init setup_per_cpu_areas(void)
7731 +@@ -259,6 +266,12 @@ void __init setup_per_cpu_areas(void)
7732 */
7733 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
7734 #endif
7735 @@ -20979,7 +21432,7 @@ index e8e7e0d..56fd1b0 100644
7736 movl %eax, (v)
7737 movl %edx, 4(v)
7738 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
7739 -index 391a083..d658e9f 100644
7740 +index 391a083..3a2cf39 100644
7741 --- a/arch/x86/lib/atomic64_cx8_32.S
7742 +++ b/arch/x86/lib/atomic64_cx8_32.S
7743 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
7744 @@ -21090,7 +21543,7 @@ index 391a083..d658e9f 100644
7745
7746 -.macro incdec_return func ins insc
7747 -ENTRY(atomic64_\func\()_return_cx8)
7748 -+.macro incdec_return func ins insc unchecked
7749 ++.macro incdec_return func ins insc unchecked=""
7750 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
7751 CFI_STARTPROC
7752 SAVE ebx
7753 @@ -24383,7 +24836,7 @@ index f4f29b1..5cac4fb 100644
7754
7755 return (void *)vaddr;
7756 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
7757 -index f581a18..29efd37 100644
7758 +index f581a18..a269cab 100644
7759 --- a/arch/x86/mm/hugetlbpage.c
7760 +++ b/arch/x86/mm/hugetlbpage.c
7761 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
7762 @@ -24459,7 +24912,7 @@ index f581a18..29efd37 100644
7763
7764 /* don't allow allocations above current base */
7765 if (mm->free_area_cache > base)
7766 -@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
7767 +@@ -321,64 +328,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
7768 largest_hole = 0;
7769 mm->free_area_cache = base;
7770 }
7771 @@ -24474,15 +24927,16 @@ index f581a18..29efd37 100644
7772 + addr = (mm->free_area_cache - len);
7773 do {
7774 + addr &= huge_page_mask(h);
7775 -+ vma = find_vma(mm, addr);
7776 /*
7777 * Lookup failure means no vma is above this address,
7778 * i.e. return with success:
7779 -- */
7780 + */
7781 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
7782 -- return addr;
7783 --
7784 -- /*
7785 ++ vma = find_vma(mm, addr);
7786 ++ if (!vma)
7787 + return addr;
7788 +
7789 + /*
7790 * new region fits between prev_vma->vm_end and
7791 * vma->vm_start, use it:
7792 */
7793 @@ -24554,7 +25008,7 @@ index f581a18..29efd37 100644
7794 mm->cached_hole_size = ~0UL;
7795 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
7796 len, pgoff, flags);
7797 -@@ -386,6 +392,7 @@ fail:
7798 +@@ -386,6 +397,7 @@ fail:
7799 /*
7800 * Restore the topdown base:
7801 */
7802 @@ -24562,7 +25016,7 @@ index f581a18..29efd37 100644
7803 mm->free_area_cache = base;
7804 mm->cached_hole_size = ~0UL;
7805
7806 -@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7807 +@@ -399,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7808 struct hstate *h = hstate_file(file);
7809 struct mm_struct *mm = current->mm;
7810 struct vm_area_struct *vma;
7811 @@ -24583,7 +25037,7 @@ index f581a18..29efd37 100644
7812 return -ENOMEM;
7813
7814 if (flags & MAP_FIXED) {
7815 -@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7816 +@@ -414,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7817 if (addr) {
7818 addr = ALIGN(addr, huge_page_size(h));
7819 vma = find_vma(mm, addr);
7820 @@ -25011,7 +25465,7 @@ index 29f7c6d..b46b35b 100644
7821 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
7822 size >> 10);
7823 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
7824 -index bbaaa00..0ad4539 100644
7825 +index bbaaa00..020e913 100644
7826 --- a/arch/x86/mm/init_64.c
7827 +++ b/arch/x86/mm/init_64.c
7828 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
7829 @@ -25128,6 +25582,15 @@ index bbaaa00..0ad4539 100644
7830 adr = (void *)(((unsigned long)adr) | left);
7831
7832 return adr;
7833 +@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
7834 + unmap_low_page(pmd);
7835 +
7836 + spin_lock(&init_mm.page_table_lock);
7837 +- pud_populate(&init_mm, pud, __va(pmd_phys));
7838 ++ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
7839 + spin_unlock(&init_mm.page_table_lock);
7840 + }
7841 + __flush_tlb_all();
7842 @@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
7843 unmap_low_page(pud);
7844
7845 @@ -26908,10 +27371,10 @@ index 153407c..611cba9 100644
7846 -}
7847 -__setup("vdso=", vdso_setup);
7848 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
7849 -index 1f92865..c843b20 100644
7850 +index e7c920b..c9bdcf7 100644
7851 --- a/arch/x86/xen/enlighten.c
7852 +++ b/arch/x86/xen/enlighten.c
7853 -@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
7854 +@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
7855
7856 struct shared_info xen_dummy_shared_info;
7857
7858 @@ -26920,7 +27383,7 @@ index 1f92865..c843b20 100644
7859 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
7860 __read_mostly int xen_have_vector_callback;
7861 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
7862 -@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
7863 +@@ -1030,7 +1028,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
7864 #endif
7865 };
7866
7867 @@ -26929,7 +27392,7 @@ index 1f92865..c843b20 100644
7868 {
7869 struct sched_shutdown r = { .reason = reason };
7870
7871 -@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
7872 +@@ -1038,17 +1036,17 @@ static void xen_reboot(int reason)
7873 BUG();
7874 }
7875
7876 @@ -26950,7 +27413,7 @@ index 1f92865..c843b20 100644
7877 {
7878 xen_reboot(SHUTDOWN_poweroff);
7879 }
7880 -@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
7881 +@@ -1154,7 +1152,17 @@ asmlinkage void __init xen_start_kernel(void)
7882 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
7883
7884 /* Work out if we support NX */
7885 @@ -26969,7 +27432,7 @@ index 1f92865..c843b20 100644
7886
7887 xen_setup_features();
7888
7889 -@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
7890 +@@ -1185,13 +1193,6 @@ asmlinkage void __init xen_start_kernel(void)
7891
7892 machine_ops = xen_machine_ops;
7893
7894 @@ -26984,10 +27447,10 @@ index 1f92865..c843b20 100644
7895
7896 #ifdef CONFIG_ACPI_NUMA
7897 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
7898 -index 87f6673..e2555a6 100644
7899 +index ec3d603..fa4ed1b 100644
7900 --- a/arch/x86/xen/mmu.c
7901 +++ b/arch/x86/xen/mmu.c
7902 -@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
7903 +@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
7904 convert_pfn_mfn(init_level4_pgt);
7905 convert_pfn_mfn(level3_ident_pgt);
7906 convert_pfn_mfn(level3_kernel_pgt);
7907 @@ -26997,7 +27460,7 @@ index 87f6673..e2555a6 100644
7908
7909 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
7910 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
7911 -@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
7912 +@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
7913 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
7914 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
7915 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
7916 @@ -27009,7 +27472,7 @@ index 87f6673..e2555a6 100644
7917 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
7918 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
7919
7920 -@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
7921 +@@ -1967,6 +1974,7 @@ static void __init xen_post_allocator_init(void)
7922 pv_mmu_ops.set_pud = xen_set_pud;
7923 #if PAGETABLE_LEVELS == 4
7924 pv_mmu_ops.set_pgd = xen_set_pgd;
7925 @@ -27017,7 +27480,7 @@ index 87f6673..e2555a6 100644
7926 #endif
7927
7928 /* This will work as long as patching hasn't happened yet
7929 -@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
7930 +@@ -2048,6 +2056,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
7931 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
7932 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
7933 .set_pgd = xen_set_pgd_hyper,
7934 @@ -27026,10 +27489,10 @@ index 87f6673..e2555a6 100644
7935 .alloc_pud = xen_alloc_pmd_init,
7936 .release_pud = xen_release_pmd_init,
7937 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
7938 -index 041d4fe..7666b7e 100644
7939 +index 9a23fff..9dfee11ca 100644
7940 --- a/arch/x86/xen/smp.c
7941 +++ b/arch/x86/xen/smp.c
7942 -@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
7943 +@@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
7944 {
7945 BUG_ON(smp_processor_id() != 0);
7946 native_smp_prepare_boot_cpu();
7947 @@ -27041,7 +27504,7 @@ index 041d4fe..7666b7e 100644
7948 xen_filter_cpu_maps();
7949 xen_setup_vcpu_info_placement();
7950 }
7951 -@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
7952 +@@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
7953 gdt = get_cpu_gdt_table(cpu);
7954
7955 ctxt->flags = VGCF_IN_KERNEL;
7956 @@ -27057,7 +27520,7 @@ index 041d4fe..7666b7e 100644
7957 #else
7958 ctxt->gs_base_kernel = per_cpu_offset(cpu);
7959 #endif
7960 -@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
7961 +@@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
7962 int rc;
7963
7964 per_cpu(current_task, cpu) = idle;
7965 @@ -27073,19 +27536,6 @@ index 041d4fe..7666b7e 100644
7966 #endif
7967 xen_setup_runstate_info(cpu);
7968 xen_setup_timer(cpu);
7969 -diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
7970 -index 79d7362..3e45aa0 100644
7971 ---- a/arch/x86/xen/xen-asm.S
7972 -+++ b/arch/x86/xen/xen-asm.S
7973 -@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
7974 -
7975 - /* check for unmasked and pending */
7976 - cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
7977 -- jz 1f
7978 -+ jnz 1f
7979 - 2: call check_events
7980 - 1:
7981 - ENDPATCH(xen_restore_fl_direct)
7982 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
7983 index b040b0e..8cc4fe0 100644
7984 --- a/arch/x86/xen/xen-asm_32.S
7985 @@ -30676,7 +31126,7 @@ index ae294a0..1755461 100644
7986 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
7987 }
7988 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
7989 -index b9da890..cad1d98 100644
7990 +index a6c2f7a..0eea25d 100644
7991 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
7992 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
7993 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
7994 @@ -33705,7 +34155,7 @@ index 4720f68..78d1df7 100644
7995
7996 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
7997 diff --git a/drivers/md/md.c b/drivers/md/md.c
7998 -index 6f37aa4..8d49123 100644
7999 +index 065ab4f..653e6d8 100644
8000 --- a/drivers/md/md.c
8001 +++ b/drivers/md/md.c
8002 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
8003 @@ -35613,10 +36063,10 @@ index 1b7082d..c786773 100644
8004 if ((num_pages != size) ||
8005 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
8006 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
8007 -index 486b404..0d6677d 100644
8008 +index 3ed983c..a1bb418 100644
8009 --- a/drivers/net/ppp/ppp_generic.c
8010 +++ b/drivers/net/ppp/ppp_generic.c
8011 -@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8012 +@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8013 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
8014 struct ppp_stats stats;
8015 struct ppp_comp_stats cstats;
8016 @@ -35624,7 +36074,7 @@ index 486b404..0d6677d 100644
8017
8018 switch (cmd) {
8019 case SIOCGPPPSTATS:
8020 -@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8021 +@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8022 break;
8023
8024 case SIOCGPPPVER:
8025 @@ -37836,7 +38286,7 @@ index f64250e..1ee3049 100644
8026 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
8027 {},
8028 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
8029 -index 77eae99..b7cdcc9 100644
8030 +index b2ccdea..84cde75 100644
8031 --- a/drivers/spi/spi.c
8032 +++ b/drivers/spi/spi.c
8033 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
8034 @@ -42484,7 +42934,7 @@ index 7ee7ba4..0c61a60 100644
8035 goto out_sig;
8036 if (offset > inode->i_sb->s_maxbytes)
8037 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
8038 -index 6861f61..a25f010 100644
8039 +index e1fbdee..cd5ea56 100644
8040 --- a/fs/autofs4/waitq.c
8041 +++ b/fs/autofs4/waitq.c
8042 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
8043 @@ -44516,7 +44966,7 @@ index 608c1c3..7d040a8 100644
8044 return rc;
8045 }
8046 diff --git a/fs/exec.c b/fs/exec.c
8047 -index 3625464..ff895b9 100644
8048 +index 160cd2f..e74d2a6 100644
8049 --- a/fs/exec.c
8050 +++ b/fs/exec.c
8051 @@ -55,12 +55,28 @@
8052 @@ -44771,7 +45221,7 @@ index 3625464..ff895b9 100644
8053 set_fs(old_fs);
8054 return result;
8055 }
8056 -@@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
8057 +@@ -1070,6 +1102,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
8058 perf_event_comm(tsk);
8059 }
8060
8061 @@ -44793,7 +45243,7 @@ index 3625464..ff895b9 100644
8062 int flush_old_exec(struct linux_binprm * bprm)
8063 {
8064 int retval;
8065 -@@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
8066 +@@ -1084,6 +1131,7 @@ int flush_old_exec(struct linux_binprm * bprm)
8067
8068 set_mm_exe_file(bprm->mm, bprm->file);
8069
8070 @@ -44801,7 +45251,7 @@ index 3625464..ff895b9 100644
8071 /*
8072 * Release all of the old mmap stuff
8073 */
8074 -@@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
8075 +@@ -1115,10 +1163,6 @@ EXPORT_SYMBOL(would_dump);
8076
8077 void setup_new_exec(struct linux_binprm * bprm)
8078 {
8079 @@ -44812,7 +45262,7 @@ index 3625464..ff895b9 100644
8080 arch_pick_mmap_layout(current->mm);
8081
8082 /* This is the point of no return */
8083 -@@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
8084 +@@ -1129,18 +1173,7 @@ void setup_new_exec(struct linux_binprm * bprm)
8085 else
8086 set_dumpable(current->mm, suid_dumpable);
8087
8088 @@ -44832,7 +45282,7 @@ index 3625464..ff895b9 100644
8089
8090 /* Set the new mm task size. We have to do that late because it may
8091 * depend on TIF_32BIT which is only updated in flush_thread() on
8092 -@@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
8093 +@@ -1250,7 +1283,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
8094 }
8095 rcu_read_unlock();
8096
8097 @@ -44841,7 +45291,7 @@ index 3625464..ff895b9 100644
8098 bprm->unsafe |= LSM_UNSAFE_SHARE;
8099 } else {
8100 res = -EAGAIN;
8101 -@@ -1442,6 +1475,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
8102 +@@ -1445,6 +1478,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
8103
8104 EXPORT_SYMBOL(search_binary_handler);
8105
8106 @@ -44870,7 +45320,7 @@ index 3625464..ff895b9 100644
8107 /*
8108 * sys_execve() executes a new program.
8109 */
8110 -@@ -1450,6 +1505,11 @@ static int do_execve_common(const char *filename,
8111 +@@ -1453,6 +1508,11 @@ static int do_execve_common(const char *filename,
8112 struct user_arg_ptr envp,
8113 struct pt_regs *regs)
8114 {
8115 @@ -44882,7 +45332,7 @@ index 3625464..ff895b9 100644
8116 struct linux_binprm *bprm;
8117 struct file *file;
8118 struct files_struct *displaced;
8119 -@@ -1457,6 +1517,8 @@ static int do_execve_common(const char *filename,
8120 +@@ -1460,6 +1520,8 @@ static int do_execve_common(const char *filename,
8121 int retval;
8122 const struct cred *cred = current_cred();
8123
8124 @@ -44891,7 +45341,7 @@ index 3625464..ff895b9 100644
8125 /*
8126 * We move the actual failure in case of RLIMIT_NPROC excess from
8127 * set*uid() to execve() because too many poorly written programs
8128 -@@ -1497,12 +1559,27 @@ static int do_execve_common(const char *filename,
8129 +@@ -1500,12 +1562,27 @@ static int do_execve_common(const char *filename,
8130 if (IS_ERR(file))
8131 goto out_unmark;
8132
8133 @@ -44919,7 +45369,7 @@ index 3625464..ff895b9 100644
8134 retval = bprm_mm_init(bprm);
8135 if (retval)
8136 goto out_file;
8137 -@@ -1519,24 +1596,65 @@ static int do_execve_common(const char *filename,
8138 +@@ -1522,24 +1599,65 @@ static int do_execve_common(const char *filename,
8139 if (retval < 0)
8140 goto out;
8141
8142 @@ -44989,7 +45439,7 @@ index 3625464..ff895b9 100644
8143 current->fs->in_exec = 0;
8144 current->in_execve = 0;
8145 acct_update_integrals(current);
8146 -@@ -1545,6 +1663,14 @@ static int do_execve_common(const char *filename,
8147 +@@ -1548,6 +1666,14 @@ static int do_execve_common(const char *filename,
8148 put_files_struct(displaced);
8149 return retval;
8150
8151 @@ -45004,7 +45454,7 @@ index 3625464..ff895b9 100644
8152 out:
8153 if (bprm->mm) {
8154 acct_arg_size(bprm, 0);
8155 -@@ -1618,7 +1744,7 @@ static int expand_corename(struct core_name *cn)
8156 +@@ -1621,7 +1747,7 @@ static int expand_corename(struct core_name *cn)
8157 {
8158 char *old_corename = cn->corename;
8159
8160 @@ -45013,7 +45463,7 @@ index 3625464..ff895b9 100644
8161 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
8162
8163 if (!cn->corename) {
8164 -@@ -1715,7 +1841,7 @@ static int format_corename(struct core_name *cn, long signr)
8165 +@@ -1718,7 +1844,7 @@ static int format_corename(struct core_name *cn, long signr)
8166 int pid_in_pattern = 0;
8167 int err = 0;
8168
8169 @@ -45022,7 +45472,7 @@ index 3625464..ff895b9 100644
8170 cn->corename = kmalloc(cn->size, GFP_KERNEL);
8171 cn->used = 0;
8172
8173 -@@ -1812,6 +1938,228 @@ out:
8174 +@@ -1815,6 +1941,228 @@ out:
8175 return ispipe;
8176 }
8177
8178 @@ -45251,7 +45701,7 @@ index 3625464..ff895b9 100644
8179 static int zap_process(struct task_struct *start, int exit_code)
8180 {
8181 struct task_struct *t;
8182 -@@ -2023,17 +2371,17 @@ static void wait_for_dump_helpers(struct file *file)
8183 +@@ -2026,17 +2374,17 @@ static void wait_for_dump_helpers(struct file *file)
8184 pipe = file->f_path.dentry->d_inode->i_pipe;
8185
8186 pipe_lock(pipe);
8187 @@ -45274,7 +45724,7 @@ index 3625464..ff895b9 100644
8188 pipe_unlock(pipe);
8189
8190 }
8191 -@@ -2094,7 +2442,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8192 +@@ -2097,7 +2445,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8193 int retval = 0;
8194 int flag = 0;
8195 int ispipe;
8196 @@ -45283,7 +45733,7 @@ index 3625464..ff895b9 100644
8197 struct coredump_params cprm = {
8198 .signr = signr,
8199 .regs = regs,
8200 -@@ -2109,6 +2457,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8201 +@@ -2112,6 +2460,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8202
8203 audit_core_dumps(signr);
8204
8205 @@ -45293,7 +45743,7 @@ index 3625464..ff895b9 100644
8206 binfmt = mm->binfmt;
8207 if (!binfmt || !binfmt->core_dump)
8208 goto fail;
8209 -@@ -2176,7 +2527,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8210 +@@ -2179,7 +2530,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8211 }
8212 cprm.limit = RLIM_INFINITY;
8213
8214 @@ -45302,7 +45752,7 @@ index 3625464..ff895b9 100644
8215 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
8216 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
8217 task_tgid_vnr(current), current->comm);
8218 -@@ -2203,6 +2554,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8219 +@@ -2206,6 +2557,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8220 } else {
8221 struct inode *inode;
8222
8223 @@ -45311,7 +45761,7 @@ index 3625464..ff895b9 100644
8224 if (cprm.limit < binfmt->min_coredump)
8225 goto fail_unlock;
8226
8227 -@@ -2246,7 +2599,7 @@ close_fail:
8228 +@@ -2249,7 +2602,7 @@ close_fail:
8229 filp_close(cprm.file, NULL);
8230 fail_dropcount:
8231 if (ispipe)
8232 @@ -45320,7 +45770,7 @@ index 3625464..ff895b9 100644
8233 fail_unlock:
8234 kfree(cn.corename);
8235 fail_corename:
8236 -@@ -2265,7 +2618,7 @@ fail:
8237 +@@ -2268,7 +2621,7 @@ fail:
8238 */
8239 int dump_write(struct file *file, const void *addr, int nr)
8240 {
8241 @@ -47143,50 +47593,6 @@ index cfd4959..a780959 100644
8242 if (!IS_ERR(s))
8243 kfree(s);
8244 }
8245 -diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
8246 -index 4dfbfec..ec2a9c2 100644
8247 ---- a/fs/hfsplus/catalog.c
8248 -+++ b/fs/hfsplus/catalog.c
8249 -@@ -366,6 +366,10 @@ int hfsplus_rename_cat(u32 cnid,
8250 - err = hfs_brec_find(&src_fd);
8251 - if (err)
8252 - goto out;
8253 -+ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
8254 -+ err = -EIO;
8255 -+ goto out;
8256 -+ }
8257 -
8258 - hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
8259 - src_fd.entrylength);
8260 -diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
8261 -index 4536cd3..5adb740 100644
8262 ---- a/fs/hfsplus/dir.c
8263 -+++ b/fs/hfsplus/dir.c
8264 -@@ -150,6 +150,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
8265 - filp->f_pos++;
8266 - /* fall through */
8267 - case 1:
8268 -+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
8269 -+ err = -EIO;
8270 -+ goto out;
8271 -+ }
8272 -+
8273 - hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
8274 - fd.entrylength);
8275 - if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
8276 -@@ -181,6 +186,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
8277 - err = -EIO;
8278 - goto out;
8279 - }
8280 -+
8281 -+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
8282 -+ err = -EIO;
8283 -+ goto out;
8284 -+ }
8285 -+
8286 - hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
8287 - fd.entrylength);
8288 - type = be16_to_cpu(entry.type);
8289 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
8290 index 2d0ca24..c4b8676511 100644
8291 --- a/fs/hugetlbfs/inode.c
8292 @@ -47965,7 +48371,7 @@ index 50a15fa..ca113f9 100644
8293
8294 void nfs_fattr_init(struct nfs_fattr *fattr)
8295 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
8296 -index 7a2e442..8e544cc 100644
8297 +index 5c3cd82..ed535e5 100644
8298 --- a/fs/nfsd/vfs.c
8299 +++ b/fs/nfsd/vfs.c
8300 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
8301 @@ -48109,7 +48515,7 @@ index d355e6e..578d905 100644
8302
8303 enum ocfs2_local_alloc_state
8304 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
8305 -index ba5d97e..c77db25 100644
8306 +index f169da4..9112253 100644
8307 --- a/fs/ocfs2/suballoc.c
8308 +++ b/fs/ocfs2/suballoc.c
8309 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
8310 @@ -48345,10 +48751,10 @@ index bd8ae78..539d250 100644
8311 ldm_crit ("Out of memory.");
8312 return false;
8313 diff --git a/fs/pipe.c b/fs/pipe.c
8314 -index 4065f07..68c0706 100644
8315 +index 05ed5ca..ab15592 100644
8316 --- a/fs/pipe.c
8317 +++ b/fs/pipe.c
8318 -@@ -420,9 +420,9 @@ redo:
8319 +@@ -437,9 +437,9 @@ redo:
8320 }
8321 if (bufs) /* More to do? */
8322 continue;
8323 @@ -48360,7 +48766,7 @@ index 4065f07..68c0706 100644
8324 /* syscall merging: Usually we must not sleep
8325 * if O_NONBLOCK is set, or if we got some data.
8326 * But if a writer sleeps in kernel space, then
8327 -@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
8328 +@@ -503,7 +503,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
8329 mutex_lock(&inode->i_mutex);
8330 pipe = inode->i_pipe;
8331
8332 @@ -48369,7 +48775,7 @@ index 4065f07..68c0706 100644
8333 send_sig(SIGPIPE, current, 0);
8334 ret = -EPIPE;
8335 goto out;
8336 -@@ -530,7 +530,7 @@ redo1:
8337 +@@ -552,7 +552,7 @@ redo1:
8338 for (;;) {
8339 int bufs;
8340
8341 @@ -48378,7 +48784,7 @@ index 4065f07..68c0706 100644
8342 send_sig(SIGPIPE, current, 0);
8343 if (!ret)
8344 ret = -EPIPE;
8345 -@@ -616,9 +616,9 @@ redo2:
8346 +@@ -643,9 +643,9 @@ redo2:
8347 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
8348 do_wakeup = 0;
8349 }
8350 @@ -48390,7 +48796,7 @@ index 4065f07..68c0706 100644
8351 }
8352 out:
8353 mutex_unlock(&inode->i_mutex);
8354 -@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
8355 +@@ -712,7 +712,7 @@ pipe_poll(struct file *filp, poll_table *wait)
8356 mask = 0;
8357 if (filp->f_mode & FMODE_READ) {
8358 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
8359 @@ -48399,7 +48805,7 @@ index 4065f07..68c0706 100644
8360 mask |= POLLHUP;
8361 }
8362
8363 -@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
8364 +@@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
8365 * Most Unices do not set POLLERR for FIFOs but on Linux they
8366 * behave exactly like pipes for poll().
8367 */
8368 @@ -48408,7 +48814,7 @@ index 4065f07..68c0706 100644
8369 mask |= POLLERR;
8370 }
8371
8372 -@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
8373 +@@ -736,10 +736,10 @@ pipe_release(struct inode *inode, int decr, int decw)
8374
8375 mutex_lock(&inode->i_mutex);
8376 pipe = inode->i_pipe;
8377 @@ -48422,7 +48828,7 @@ index 4065f07..68c0706 100644
8378 free_pipe_info(inode);
8379 } else {
8380 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
8381 -@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
8382 +@@ -829,7 +829,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
8383
8384 if (inode->i_pipe) {
8385 ret = 0;
8386 @@ -48431,7 +48837,7 @@ index 4065f07..68c0706 100644
8387 }
8388
8389 mutex_unlock(&inode->i_mutex);
8390 -@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
8391 +@@ -846,7 +846,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
8392
8393 if (inode->i_pipe) {
8394 ret = 0;
8395 @@ -48440,7 +48846,7 @@ index 4065f07..68c0706 100644
8396 }
8397
8398 mutex_unlock(&inode->i_mutex);
8399 -@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
8400 +@@ -864,9 +864,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
8401 if (inode->i_pipe) {
8402 ret = 0;
8403 if (filp->f_mode & FMODE_READ)
8404 @@ -48452,7 +48858,7 @@ index 4065f07..68c0706 100644
8405 }
8406
8407 mutex_unlock(&inode->i_mutex);
8408 -@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
8409 +@@ -958,7 +958,7 @@ void free_pipe_info(struct inode *inode)
8410 inode->i_pipe = NULL;
8411 }
8412
8413 @@ -48461,7 +48867,7 @@ index 4065f07..68c0706 100644
8414
8415 /*
8416 * pipefs_dname() is called from d_path().
8417 -@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
8418 +@@ -988,7 +988,8 @@ static struct inode * get_pipe_inode(void)
8419 goto fail_iput;
8420 inode->i_pipe = pipe;
8421
8422 @@ -49865,10 +50271,10 @@ index dba43c3..4b3f701 100644
8423
8424 if (op) {
8425 diff --git a/fs/splice.c b/fs/splice.c
8426 -index fa2defa..8601650 100644
8427 +index 6d0dfb8..115bb3a 100644
8428 --- a/fs/splice.c
8429 +++ b/fs/splice.c
8430 -@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
8431 +@@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
8432 pipe_lock(pipe);
8433
8434 for (;;) {
8435 @@ -49877,7 +50283,7 @@ index fa2defa..8601650 100644
8436 send_sig(SIGPIPE, current, 0);
8437 if (!ret)
8438 ret = -EPIPE;
8439 -@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
8440 +@@ -249,9 +249,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
8441 do_wakeup = 0;
8442 }
8443
8444 @@ -49889,7 +50295,7 @@ index fa2defa..8601650 100644
8445 }
8446
8447 pipe_unlock(pipe);
8448 -@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
8449 +@@ -561,7 +561,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
8450 old_fs = get_fs();
8451 set_fs(get_ds());
8452 /* The cast to a user pointer is valid due to the set_fs() */
8453 @@ -49898,7 +50304,7 @@ index fa2defa..8601650 100644
8454 set_fs(old_fs);
8455
8456 return res;
8457 -@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
8458 +@@ -576,7 +576,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
8459 old_fs = get_fs();
8460 set_fs(get_ds());
8461 /* The cast to a user pointer is valid due to the set_fs() */
8462 @@ -49907,7 +50313,7 @@ index fa2defa..8601650 100644
8463 set_fs(old_fs);
8464
8465 return res;
8466 -@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
8467 +@@ -627,7 +627,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
8468 goto err;
8469
8470 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
8471 @@ -49916,7 +50322,7 @@ index fa2defa..8601650 100644
8472 vec[i].iov_len = this_len;
8473 spd.pages[i] = page;
8474 spd.nr_pages++;
8475 -@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
8476 +@@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
8477 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
8478 {
8479 while (!pipe->nrbufs) {
8480 @@ -49929,7 +50335,7 @@ index fa2defa..8601650 100644
8481 return 0;
8482
8483 if (sd->flags & SPLICE_F_NONBLOCK)
8484 -@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
8485 +@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
8486 * out of the pipe right after the splice_to_pipe(). So set
8487 * PIPE_READERS appropriately.
8488 */
8489 @@ -49938,7 +50344,7 @@ index fa2defa..8601650 100644
8490
8491 current->splice_pipe = pipe;
8492 }
8493 -@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
8494 +@@ -1737,9 +1737,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
8495 ret = -ERESTARTSYS;
8496 break;
8497 }
8498 @@ -49950,7 +50356,7 @@ index fa2defa..8601650 100644
8499 if (flags & SPLICE_F_NONBLOCK) {
8500 ret = -EAGAIN;
8501 break;
8502 -@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
8503 +@@ -1771,7 +1771,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
8504 pipe_lock(pipe);
8505
8506 while (pipe->nrbufs >= pipe->buffers) {
8507 @@ -49959,7 +50365,7 @@ index fa2defa..8601650 100644
8508 send_sig(SIGPIPE, current, 0);
8509 ret = -EPIPE;
8510 break;
8511 -@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
8512 +@@ -1784,9 +1784,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
8513 ret = -ERESTARTSYS;
8514 break;
8515 }
8516 @@ -49971,7 +50377,7 @@ index fa2defa..8601650 100644
8517 }
8518
8519 pipe_unlock(pipe);
8520 -@@ -1819,14 +1819,14 @@ retry:
8521 +@@ -1822,14 +1822,14 @@ retry:
8522 pipe_double_lock(ipipe, opipe);
8523
8524 do {
8525 @@ -49988,7 +50394,7 @@ index fa2defa..8601650 100644
8526 break;
8527
8528 /*
8529 -@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
8530 +@@ -1926,7 +1926,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
8531 pipe_double_lock(ipipe, opipe);
8532
8533 do {
8534 @@ -49997,7 +50403,7 @@ index fa2defa..8601650 100644
8535 send_sig(SIGPIPE, current, 0);
8536 if (!ret)
8537 ret = -EPIPE;
8538 -@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
8539 +@@ -1971,7 +1971,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
8540 * return EAGAIN if we have the potential of some data in the
8541 * future, otherwise just return 0
8542 */
8543 @@ -50306,10 +50712,10 @@ index 23ce927..e274cc1 100644
8544 kfree(s);
8545 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
8546 new file mode 100644
8547 -index 0000000..4089e05
8548 +index 0000000..2645296
8549 --- /dev/null
8550 +++ b/grsecurity/Kconfig
8551 -@@ -0,0 +1,1078 @@
8552 +@@ -0,0 +1,1079 @@
8553 +#
8554 +# grecurity configuration
8555 +#
8556 @@ -50444,7 +50850,7 @@ index 0000000..4089e05
8557 + select GRKERNSEC_PROC_ADD
8558 + select GRKERNSEC_CHROOT_CHMOD
8559 + select GRKERNSEC_CHROOT_NICE
8560 -+ select GRKERNSEC_SETXID
8561 ++ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
8562 + select GRKERNSEC_AUDIT_MOUNT
8563 + select GRKERNSEC_MODHARDEN if (MODULES)
8564 + select GRKERNSEC_HARDEN_PTRACE
8565 @@ -51139,6 +51545,7 @@ index 0000000..4089e05
8566 +
8567 +config GRKERNSEC_SETXID
8568 + bool "Enforce consistent multithreaded privileges"
8569 ++ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
8570 + help
8571 + If you say Y here, a change from a root uid to a non-root uid
8572 + in a multithreaded application will cause the resulting uids,
8573 @@ -51434,10 +51841,10 @@ index 0000000..1b9afa9
8574 +endif
8575 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
8576 new file mode 100644
8577 -index 0000000..50b4257
8578 +index 0000000..e22066e
8579 --- /dev/null
8580 +++ b/grsecurity/gracl.c
8581 -@@ -0,0 +1,4185 @@
8582 +@@ -0,0 +1,4186 @@
8583 +#include <linux/kernel.h>
8584 +#include <linux/module.h>
8585 +#include <linux/sched.h>
8586 @@ -55288,21 +55695,22 @@ index 0000000..50b4257
8587 + if (unlikely(!(gr_status & GR_READY)))
8588 + return 0;
8589 +#endif
8590 ++ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
8591 ++ read_lock(&tasklist_lock);
8592 ++ while (tmp->pid > 0) {
8593 ++ if (tmp == curtemp)
8594 ++ break;
8595 ++ tmp = tmp->real_parent;
8596 ++ }
8597 +
8598 -+ read_lock(&tasklist_lock);
8599 -+ while (tmp->pid > 0) {
8600 -+ if (tmp == curtemp)
8601 -+ break;
8602 -+ tmp = tmp->real_parent;
8603 -+ }
8604 -+
8605 -+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
8606 -+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
8607 ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
8608 ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
8609 ++ read_unlock(&tasklist_lock);
8610 ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
8611 ++ return 1;
8612 ++ }
8613 + read_unlock(&tasklist_lock);
8614 -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
8615 -+ return 1;
8616 + }
8617 -+ read_unlock(&tasklist_lock);
8618 +
8619 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
8620 + if (!(gr_status & GR_READY))
8621 @@ -61396,10 +61804,10 @@ index e13117c..e9fc938 100644
8622 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
8623
8624 diff --git a/include/linux/efi.h b/include/linux/efi.h
8625 -index 2362a0b..cfaf8fcc 100644
8626 +index 1328d8c..2cd894c 100644
8627 --- a/include/linux/efi.h
8628 +++ b/include/linux/efi.h
8629 -@@ -446,7 +446,7 @@ struct efivar_operations {
8630 +@@ -457,7 +457,7 @@ struct efivar_operations {
8631 efi_get_variable_t *get_variable;
8632 efi_get_next_variable_t *get_next_variable;
8633 efi_set_variable_t *set_variable;
8634 @@ -62939,7 +63347,7 @@ index b16f653..eb908f4 100644
8635 #define request_module_nowait(mod...) __request_module(false, mod)
8636 #define try_then_request_module(x, mod...) \
8637 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
8638 -index d526231..c9599fc 100644
8639 +index 35410ef..9f98b23 100644
8640 --- a/include/linux/kvm_host.h
8641 +++ b/include/linux/kvm_host.h
8642 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
8643 @@ -62987,7 +63395,7 @@ index d526231..c9599fc 100644
8644 void kvm_arch_exit(void);
8645
8646 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
8647 -@@ -690,7 +690,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
8648 +@@ -696,7 +696,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
8649 int kvm_set_irq_routing(struct kvm *kvm,
8650 const struct kvm_irq_routing_entry *entries,
8651 unsigned nr,
8652 @@ -63521,7 +63929,7 @@ index ffc0213..2c1f2cb 100644
8653 return nd->saved_names[nd->depth];
8654 }
8655 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
8656 -index a82ad4d..90d15b7 100644
8657 +index cbeb586..eba9b27 100644
8658 --- a/include/linux/netdevice.h
8659 +++ b/include/linux/netdevice.h
8660 @@ -949,6 +949,7 @@ struct net_device_ops {
8661 @@ -63646,10 +64054,10 @@ index 8fc7dd1a..c19d89e 100644
8662
8663 /*
8664 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
8665 -index 77257c9..51d473a 100644
8666 +index 0072a53..c5dcca5 100644
8667 --- a/include/linux/pipe_fs_i.h
8668 +++ b/include/linux/pipe_fs_i.h
8669 -@@ -46,9 +46,9 @@ struct pipe_buffer {
8670 +@@ -47,9 +47,9 @@ struct pipe_buffer {
8671 struct pipe_inode_info {
8672 wait_queue_head_t wait;
8673 unsigned int nrbufs, curbuf, buffers;
8674 @@ -64223,10 +64631,10 @@ index 92808b8..c28cac4 100644
8675
8676 /* shm_mode upper byte flags */
8677 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
8678 -index 6cf8b53..bcce844 100644
8679 +index e689b47..3404939 100644
8680 --- a/include/linux/skbuff.h
8681 +++ b/include/linux/skbuff.h
8682 -@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
8683 +@@ -643,7 +643,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
8684 */
8685 static inline int skb_queue_empty(const struct sk_buff_head *list)
8686 {
8687 @@ -64235,7 +64643,7 @@ index 6cf8b53..bcce844 100644
8688 }
8689
8690 /**
8691 -@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
8692 +@@ -656,7 +656,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
8693 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
8694 const struct sk_buff *skb)
8695 {
8696 @@ -64244,7 +64652,7 @@ index 6cf8b53..bcce844 100644
8697 }
8698
8699 /**
8700 -@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
8701 +@@ -669,7 +669,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
8702 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
8703 const struct sk_buff *skb)
8704 {
8705 @@ -64253,7 +64661,7 @@ index 6cf8b53..bcce844 100644
8706 }
8707
8708 /**
8709 -@@ -1533,7 +1533,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
8710 +@@ -1546,7 +1546,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
8711 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
8712 */
8713 #ifndef NET_SKB_PAD
8714 @@ -66500,7 +66908,7 @@ index 42e8fa0..9e7406b 100644
8715 return -ENOMEM;
8716
8717 diff --git a/kernel/cred.c b/kernel/cred.c
8718 -index 48c6fd3..3342f00 100644
8719 +index 48c6fd3..8398912 100644
8720 --- a/kernel/cred.c
8721 +++ b/kernel/cred.c
8722 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
8723 @@ -66537,7 +66945,7 @@ index 48c6fd3..3342f00 100644
8724 /* dumpability changes */
8725 if (old->euid != new->euid ||
8726 old->egid != new->egid ||
8727 -@@ -540,6 +551,92 @@ int commit_creds(struct cred *new)
8728 +@@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
8729 put_cred(old);
8730 return 0;
8731 }
8732 @@ -66603,6 +67011,8 @@ index 48c6fd3..3342f00 100644
8733 +int commit_creds(struct cred *new)
8734 +{
8735 +#ifdef CONFIG_GRKERNSEC_SETXID
8736 ++ int ret;
8737 ++ int schedule_it = 0;
8738 + struct task_struct *t;
8739 +
8740 + /* we won't get called with tasklist_lock held for writing
8741 @@ -66611,20 +67021,27 @@ index 48c6fd3..3342f00 100644
8742 + */
8743 + if (grsec_enable_setxid && !current_is_single_threaded() &&
8744 + !current_uid() && new->uid) {
8745 ++ schedule_it = 1;
8746 ++ }
8747 ++ ret = __commit_creds(new);
8748 ++ if (schedule_it) {
8749 + rcu_read_lock();
8750 + read_lock(&tasklist_lock);
8751 + for (t = next_thread(current); t != current;
8752 + t = next_thread(t)) {
8753 + if (t->delayed_cred == NULL) {
8754 + t->delayed_cred = get_cred(new);
8755 ++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
8756 + set_tsk_need_resched(t);
8757 + }
8758 + }
8759 + read_unlock(&tasklist_lock);
8760 + rcu_read_unlock();
8761 + }
8762 -+#endif
8763 ++ return ret;
8764 ++#else
8765 + return __commit_creds(new);
8766 ++#endif
8767 +}
8768 +
8769 EXPORT_SYMBOL(commit_creds);
8770 @@ -66816,7 +67233,7 @@ index 58690af..d903d75 100644
8771
8772 /*
8773 diff --git a/kernel/exit.c b/kernel/exit.c
8774 -index e6e01b9..0a21b0a 100644
8775 +index 5a8a66e..ded4680 100644
8776 --- a/kernel/exit.c
8777 +++ b/kernel/exit.c
8778 @@ -57,6 +57,10 @@
8779 @@ -66868,7 +67285,7 @@ index e6e01b9..0a21b0a 100644
8780 /*
8781 * If we were started as result of loading a module, close all of the
8782 * user space pages. We don't need them, and if we didn't close them
8783 -@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
8784 +@@ -874,6 +893,8 @@ NORET_TYPE void do_exit(long code)
8785 struct task_struct *tsk = current;
8786 int group_dead;
8787
8788 @@ -66877,7 +67294,7 @@ index e6e01b9..0a21b0a 100644
8789 profile_task_exit(tsk);
8790
8791 WARN_ON(blk_needs_flush_plug(tsk));
8792 -@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
8793 +@@ -890,7 +911,6 @@ NORET_TYPE void do_exit(long code)
8794 * mm_release()->clear_child_tid() from writing to a user-controlled
8795 * kernel address.
8796 */
8797 @@ -66885,7 +67302,7 @@ index e6e01b9..0a21b0a 100644
8798
8799 ptrace_event(PTRACE_EVENT_EXIT, code);
8800
8801 -@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
8802 +@@ -952,6 +972,9 @@ NORET_TYPE void do_exit(long code)
8803 tsk->exit_code = code;
8804 taskstats_exit(tsk, group_dead);
8805
8806 @@ -66895,7 +67312,7 @@ index e6e01b9..0a21b0a 100644
8807 exit_mm(tsk);
8808
8809 if (group_dead)
8810 -@@ -1068,7 +1091,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
8811 +@@ -1049,7 +1072,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
8812 * Take down every thread in the group. This is called by fatal signals
8813 * as well as by sys_exit_group (below).
8814 */
8815 @@ -69537,39 +69954,10 @@ index 3d9f31c..7fefc9e 100644
8816
8817 default:
8818 diff --git a/kernel/sched.c b/kernel/sched.c
8819 -index d6b149c..896cbb8 100644
8820 +index 299f55c..2b2e317 100644
8821 --- a/kernel/sched.c
8822 +++ b/kernel/sched.c
8823 -@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
8824 - BUG(); /* the idle class will always have a runnable task */
8825 - }
8826 -
8827 -+#ifdef CONFIG_GRKERNSEC_SETXID
8828 -+extern void gr_delayed_cred_worker(void);
8829 -+static inline void gr_cred_schedule(void)
8830 -+{
8831 -+ if (unlikely(current->delayed_cred))
8832 -+ gr_delayed_cred_worker();
8833 -+}
8834 -+#else
8835 -+static inline void gr_cred_schedule(void)
8836 -+{
8837 -+}
8838 -+#endif
8839 -+
8840 - /*
8841 - * __schedule() is the main scheduler function.
8842 - */
8843 -@@ -4408,6 +4421,8 @@ need_resched:
8844 -
8845 - schedule_debug(prev);
8846 -
8847 -+ gr_cred_schedule();
8848 -+
8849 - if (sched_feat(HRTICK))
8850 - hrtick_clear(rq);
8851 -
8852 -@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
8853 +@@ -5097,6 +5097,8 @@ int can_nice(const struct task_struct *p, const int nice)
8854 /* convert nice value [19,-20] to rlimit style value [1,40] */
8855 int nice_rlim = 20 - nice;
8856
8857 @@ -69578,7 +69966,7 @@ index d6b149c..896cbb8 100644
8858 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
8859 capable(CAP_SYS_NICE));
8860 }
8861 -@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
8862 +@@ -5130,7 +5132,8 @@ SYSCALL_DEFINE1(nice, int, increment)
8863 if (nice > 19)
8864 nice = 19;
8865
8866 @@ -69588,7 +69976,7 @@ index d6b149c..896cbb8 100644
8867 return -EPERM;
8868
8869 retval = security_task_setnice(current, nice);
8870 -@@ -5288,6 +5306,7 @@ recheck:
8871 +@@ -5287,6 +5290,7 @@ recheck:
8872 unsigned long rlim_rtprio =
8873 task_rlimit(p, RLIMIT_RTPRIO);
8874
8875 @@ -69632,7 +70020,7 @@ index 8a39fa3..34f3dbc 100644
8876 int this_cpu = smp_processor_id();
8877 struct rq *this_rq = cpu_rq(this_cpu);
8878 diff --git a/kernel/signal.c b/kernel/signal.c
8879 -index 2065515..aed2987 100644
8880 +index 08e0b97..cdf6f49 100644
8881 --- a/kernel/signal.c
8882 +++ b/kernel/signal.c
8883 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
8884 @@ -69741,7 +70129,7 @@ index 2065515..aed2987 100644
8885
8886 return ret;
8887 }
8888 -@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
8889 +@@ -2763,7 +2786,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
8890 int error = -ESRCH;
8891
8892 rcu_read_lock();
8893 @@ -70729,7 +71117,7 @@ index fd3c8aa..5f324a6 100644
8894 }
8895 entry = ring_buffer_event_data(event);
8896 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
8897 -index 5199930..26c73a0 100644
8898 +index 1dcf253..b31d45c 100644
8899 --- a/kernel/trace/trace_output.c
8900 +++ b/kernel/trace/trace_output.c
8901 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
8902 @@ -70934,6 +71322,28 @@ index 013a761..c28f3fc 100644
8903 #define free(a) kfree(a)
8904 #endif
8905
8906 +diff --git a/lib/ioremap.c b/lib/ioremap.c
8907 +index da4e2ad..6373b5f 100644
8908 +--- a/lib/ioremap.c
8909 ++++ b/lib/ioremap.c
8910 +@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
8911 + unsigned long next;
8912 +
8913 + phys_addr -= addr;
8914 +- pmd = pmd_alloc(&init_mm, pud, addr);
8915 ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
8916 + if (!pmd)
8917 + return -ENOMEM;
8918 + do {
8919 +@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
8920 + unsigned long next;
8921 +
8922 + phys_addr -= addr;
8923 +- pud = pud_alloc(&init_mm, pgd, addr);
8924 ++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
8925 + if (!pud)
8926 + return -ENOMEM;
8927 + do {
8928 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
8929 index bd2bea9..6b3c95e 100644
8930 --- a/lib/is_single_threaded.c
8931 @@ -71500,7 +71910,7 @@ index 06d3479..0778eef 100644
8932 /* keep elevated page count for bad page */
8933 return ret;
8934 diff --git a/mm/memory.c b/mm/memory.c
8935 -index 1b1ca17..d49bd61 100644
8936 +index 1b1ca17..e6715dd 100644
8937 --- a/mm/memory.c
8938 +++ b/mm/memory.c
8939 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
8940 @@ -71627,7 +72037,29 @@ index 1b1ca17..d49bd61 100644
8941
8942 if (addr < vma->vm_start || addr >= vma->vm_end)
8943 return -EFAULT;
8944 -@@ -2453,6 +2466,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
8945 +@@ -2345,7 +2358,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
8946 +
8947 + BUG_ON(pud_huge(*pud));
8948 +
8949 +- pmd = pmd_alloc(mm, pud, addr);
8950 ++ pmd = (mm == &init_mm) ?
8951 ++ pmd_alloc_kernel(mm, pud, addr) :
8952 ++ pmd_alloc(mm, pud, addr);
8953 + if (!pmd)
8954 + return -ENOMEM;
8955 + do {
8956 +@@ -2365,7 +2380,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
8957 + unsigned long next;
8958 + int err;
8959 +
8960 +- pud = pud_alloc(mm, pgd, addr);
8961 ++ pud = (mm == &init_mm) ?
8962 ++ pud_alloc_kernel(mm, pgd, addr) :
8963 ++ pud_alloc(mm, pgd, addr);
8964 + if (!pud)
8965 + return -ENOMEM;
8966 + do {
8967 +@@ -2453,6 +2470,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
8968 copy_user_highpage(dst, src, va, vma);
8969 }
8970
8971 @@ -71814,7 +72246,7 @@ index 1b1ca17..d49bd61 100644
8972 /*
8973 * This routine handles present pages, when users try to write
8974 * to a shared page. It is done by copying the page to a new address
8975 -@@ -2664,6 +2857,12 @@ gotten:
8976 +@@ -2664,6 +2861,12 @@ gotten:
8977 */
8978 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
8979 if (likely(pte_same(*page_table, orig_pte))) {
8980 @@ -71827,7 +72259,7 @@ index 1b1ca17..d49bd61 100644
8981 if (old_page) {
8982 if (!PageAnon(old_page)) {
8983 dec_mm_counter_fast(mm, MM_FILEPAGES);
8984 -@@ -2715,6 +2914,10 @@ gotten:
8985 +@@ -2715,6 +2918,10 @@ gotten:
8986 page_remove_rmap(old_page);
8987 }
8988
8989 @@ -71838,7 +72270,7 @@ index 1b1ca17..d49bd61 100644
8990 /* Free the old page.. */
8991 new_page = old_page;
8992 ret |= VM_FAULT_WRITE;
8993 -@@ -2994,6 +3197,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
8994 +@@ -2994,6 +3201,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
8995 swap_free(entry);
8996 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
8997 try_to_free_swap(page);
8998 @@ -71850,7 +72282,7 @@ index 1b1ca17..d49bd61 100644
8999 unlock_page(page);
9000 if (swapcache) {
9001 /*
9002 -@@ -3017,6 +3225,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
9003 +@@ -3017,6 +3229,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
9004
9005 /* No need to invalidate - it was non-present before */
9006 update_mmu_cache(vma, address, page_table);
9007 @@ -71862,7 +72294,7 @@ index 1b1ca17..d49bd61 100644
9008 unlock:
9009 pte_unmap_unlock(page_table, ptl);
9010 out:
9011 -@@ -3036,40 +3249,6 @@ out_release:
9012 +@@ -3036,40 +3253,6 @@ out_release:
9013 }
9014
9015 /*
9016 @@ -71903,7 +72335,7 @@ index 1b1ca17..d49bd61 100644
9017 * We enter with non-exclusive mmap_sem (to exclude vma changes,
9018 * but allow concurrent faults), and pte mapped but not yet locked.
9019 * We return with mmap_sem still held, but pte unmapped and unlocked.
9020 -@@ -3078,27 +3257,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
9021 +@@ -3078,27 +3261,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
9022 unsigned long address, pte_t *page_table, pmd_t *pmd,
9023 unsigned int flags)
9024 {
9025 @@ -71936,7 +72368,7 @@ index 1b1ca17..d49bd61 100644
9026 if (unlikely(anon_vma_prepare(vma)))
9027 goto oom;
9028 page = alloc_zeroed_user_highpage_movable(vma, address);
9029 -@@ -3117,6 +3292,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
9030 +@@ -3117,6 +3296,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
9031 if (!pte_none(*page_table))
9032 goto release;
9033
9034 @@ -71948,7 +72380,7 @@ index 1b1ca17..d49bd61 100644
9035 inc_mm_counter_fast(mm, MM_ANONPAGES);
9036 page_add_new_anon_rmap(page, vma, address);
9037 setpte:
9038 -@@ -3124,6 +3304,12 @@ setpte:
9039 +@@ -3124,6 +3308,12 @@ setpte:
9040
9041 /* No need to invalidate - it was non-present before */
9042 update_mmu_cache(vma, address, page_table);
9043 @@ -71961,7 +72393,7 @@ index 1b1ca17..d49bd61 100644
9044 unlock:
9045 pte_unmap_unlock(page_table, ptl);
9046 return 0;
9047 -@@ -3267,6 +3453,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9048 +@@ -3267,6 +3457,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9049 */
9050 /* Only go through if we didn't race with anybody else... */
9051 if (likely(pte_same(*page_table, orig_pte))) {
9052 @@ -71974,7 +72406,7 @@ index 1b1ca17..d49bd61 100644
9053 flush_icache_page(vma, page);
9054 entry = mk_pte(page, vma->vm_page_prot);
9055 if (flags & FAULT_FLAG_WRITE)
9056 -@@ -3286,6 +3478,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9057 +@@ -3286,6 +3482,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9058
9059 /* no need to invalidate: a not-present page won't be cached */
9060 update_mmu_cache(vma, address, page_table);
9061 @@ -71989,7 +72421,7 @@ index 1b1ca17..d49bd61 100644
9062 } else {
9063 if (cow_page)
9064 mem_cgroup_uncharge_page(cow_page);
9065 -@@ -3439,6 +3639,12 @@ int handle_pte_fault(struct mm_struct *mm,
9066 +@@ -3439,6 +3643,12 @@ int handle_pte_fault(struct mm_struct *mm,
9067 if (flags & FAULT_FLAG_WRITE)
9068 flush_tlb_fix_spurious_fault(vma, address);
9069 }
9070 @@ -72002,7 +72434,7 @@ index 1b1ca17..d49bd61 100644
9071 unlock:
9072 pte_unmap_unlock(pte, ptl);
9073 return 0;
9074 -@@ -3455,6 +3661,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9075 +@@ -3455,6 +3665,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9076 pmd_t *pmd;
9077 pte_t *pte;
9078
9079 @@ -72013,7 +72445,7 @@ index 1b1ca17..d49bd61 100644
9080 __set_current_state(TASK_RUNNING);
9081
9082 count_vm_event(PGFAULT);
9083 -@@ -3466,6 +3676,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9084 +@@ -3466,6 +3680,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9085 if (unlikely(is_vm_hugetlb_page(vma)))
9086 return hugetlb_fault(mm, vma, address, flags);
9087
9088 @@ -72048,7 +72480,7 @@ index 1b1ca17..d49bd61 100644
9089 pgd = pgd_offset(mm, address);
9090 pud = pud_alloc(mm, pgd, address);
9091 if (!pud)
9092 -@@ -3495,7 +3733,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9093 +@@ -3495,7 +3737,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
9094 * run pte_offset_map on the pmd, if an huge pmd could
9095 * materialize from under us from a different thread.
9096 */
9097 @@ -72057,7 +72489,7 @@ index 1b1ca17..d49bd61 100644
9098 return VM_FAULT_OOM;
9099 /* if an huge pmd materialized from under us just retry later */
9100 if (unlikely(pmd_trans_huge(*pmd)))
9101 -@@ -3532,6 +3770,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
9102 +@@ -3532,6 +3774,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
9103 spin_unlock(&mm->page_table_lock);
9104 return 0;
9105 }
9106 @@ -72081,7 +72513,7 @@ index 1b1ca17..d49bd61 100644
9107 #endif /* __PAGETABLE_PUD_FOLDED */
9108
9109 #ifndef __PAGETABLE_PMD_FOLDED
9110 -@@ -3562,6 +3817,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
9111 +@@ -3562,6 +3821,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
9112 spin_unlock(&mm->page_table_lock);
9113 return 0;
9114 }
9115 @@ -72112,7 +72544,7 @@ index 1b1ca17..d49bd61 100644
9116 #endif /* __PAGETABLE_PMD_FOLDED */
9117
9118 int make_pages_present(unsigned long addr, unsigned long end)
9119 -@@ -3599,7 +3878,7 @@ static int __init gate_vma_init(void)
9120 +@@ -3599,7 +3882,7 @@ static int __init gate_vma_init(void)
9121 gate_vma.vm_start = FIXADDR_USER_START;
9122 gate_vma.vm_end = FIXADDR_USER_END;
9123 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
9124 @@ -75714,10 +76146,10 @@ index 17b5b1c..826d872 100644
9125 }
9126 }
9127 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
9128 -index 8eb6b15..e3db7ab 100644
9129 +index 5ac1811..7eb2320 100644
9130 --- a/net/bridge/br_multicast.c
9131 +++ b/net/bridge/br_multicast.c
9132 -@@ -1488,7 +1488,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
9133 +@@ -1408,7 +1408,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
9134 nexthdr = ip6h->nexthdr;
9135 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
9136
9137 @@ -76073,7 +76505,7 @@ index 68bbf9f..5ef0d12 100644
9138
9139 return err;
9140 diff --git a/net/core/dev.c b/net/core/dev.c
9141 -index 55cd370..672cffa 100644
9142 +index cd5050e..b1b4530 100644
9143 --- a/net/core/dev.c
9144 +++ b/net/core/dev.c
9145 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
9146 @@ -76154,7 +76586,7 @@ index 55cd370..672cffa 100644
9147 {
9148 struct softnet_data *sd = &__get_cpu_var(softnet_data);
9149 unsigned long time_limit = jiffies + 2;
9150 -@@ -5956,7 +5960,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9151 +@@ -5924,7 +5928,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9152 } else {
9153 netdev_stats_to_stats64(storage, &dev->stats);
9154 }
9155 @@ -76278,28 +76710,6 @@ index ff52ad0..aff1c0f 100644
9156 i++, cmfptr++)
9157 {
9158 int new_fd;
9159 -diff --git a/net/core/skbuff.c b/net/core/skbuff.c
9160 -index 3c30ee4..29cb392 100644
9161 ---- a/net/core/skbuff.c
9162 -+++ b/net/core/skbuff.c
9163 -@@ -3111,6 +3111,8 @@ static void sock_rmem_free(struct sk_buff *skb)
9164 - */
9165 - int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
9166 - {
9167 -+ int len = skb->len;
9168 -+
9169 - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
9170 - (unsigned)sk->sk_rcvbuf)
9171 - return -ENOMEM;
9172 -@@ -3125,7 +3127,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
9173 -
9174 - skb_queue_tail(&sk->sk_error_queue, skb);
9175 - if (!sock_flag(sk, SOCK_DEAD))
9176 -- sk->sk_data_ready(sk, skb->len);
9177 -+ sk->sk_data_ready(sk, len);
9178 - return 0;
9179 - }
9180 - EXPORT_SYMBOL(sock_queue_err_skb);
9181 diff --git a/net/core/sock.c b/net/core/sock.c
9182 index b23f174..b9a0d26 100644
9183 --- a/net/core/sock.c
9184 @@ -77312,7 +77722,7 @@ index 361ebf3..d5628fb 100644
9185
9186 static int raw6_seq_show(struct seq_file *seq, void *v)
9187 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
9188 -index b859e4a..f9d1589 100644
9189 +index 4a56574..9745b8a 100644
9190 --- a/net/ipv6/tcp_ipv6.c
9191 +++ b/net/ipv6/tcp_ipv6.c
9192 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
9193 @@ -77326,7 +77736,7 @@ index b859e4a..f9d1589 100644
9194 static void tcp_v6_hash(struct sock *sk)
9195 {
9196 if (sk->sk_state != TCP_CLOSE) {
9197 -@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
9198 +@@ -1655,6 +1659,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
9199 return 0;
9200
9201 reset:
9202 @@ -77336,7 +77746,7 @@ index b859e4a..f9d1589 100644
9203 tcp_v6_send_reset(sk, skb);
9204 discard:
9205 if (opt_skb)
9206 -@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
9207 +@@ -1734,12 +1741,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
9208 TCP_SKB_CB(skb)->sacked = 0;
9209
9210 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
9211 @@ -77359,7 +77769,7 @@ index b859e4a..f9d1589 100644
9212
9213 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
9214 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
9215 -@@ -1783,6 +1798,10 @@ no_tcp_socket:
9216 +@@ -1787,6 +1802,10 @@ no_tcp_socket:
9217 bad_packet:
9218 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9219 } else {
9220 @@ -77370,7 +77780,7 @@ index b859e4a..f9d1589 100644
9221 tcp_v6_send_reset(NULL, skb);
9222 }
9223
9224 -@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
9225 +@@ -2047,7 +2066,13 @@ static void get_openreq6(struct seq_file *seq,
9226 uid,
9227 0, /* non standard timer */
9228 0, /* open_requests have no inode */
9229 @@ -77385,7 +77795,7 @@ index b859e4a..f9d1589 100644
9230 }
9231
9232 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
9233 -@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
9234 +@@ -2097,7 +2122,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
9235 sock_i_uid(sp),
9236 icsk->icsk_probes_out,
9237 sock_i_ino(sp),
9238 @@ -77399,7 +77809,7 @@ index b859e4a..f9d1589 100644
9239 jiffies_to_clock_t(icsk->icsk_rto),
9240 jiffies_to_clock_t(icsk->icsk_ack.ato),
9241 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
9242 -@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
9243 +@@ -2132,7 +2162,13 @@ static void get_timewait6_sock(struct seq_file *seq,
9244 dest->s6_addr32[2], dest->s6_addr32[3], destp,
9245 tw->tw_substate, 0, 0,
9246 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
9247 @@ -78097,7 +78507,7 @@ index 4fe4fb4..87a89e5 100644
9248 return 0;
9249 }
9250 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
9251 -index 1201b6d..bcff8c6 100644
9252 +index a99fb41..740c2a4 100644
9253 --- a/net/netlink/af_netlink.c
9254 +++ b/net/netlink/af_netlink.c
9255 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
9256 @@ -78109,7 +78519,7 @@ index 1201b6d..bcff8c6 100644
9257 }
9258
9259 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
9260 -@@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
9261 +@@ -2001,7 +2001,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
9262 sk_wmem_alloc_get(s),
9263 nlk->cb,
9264 atomic_read(&s->sk_refcnt),
9265 @@ -78201,7 +78611,7 @@ index d65f699..05aa6ce 100644
9266
9267 err = proto_register(pp->prot, 1);
9268 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
9269 -index 2ba6e9f..409573f 100644
9270 +index 007546d..9a8e5c6 100644
9271 --- a/net/phonet/pep.c
9272 +++ b/net/phonet/pep.c
9273 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
9274 @@ -78679,10 +79089,10 @@ index 1e2eee8..ce3967e 100644
9275 assoc->assoc_id,
9276 assoc->sndbuf_used,
9277 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
9278 -index 54a7cd2..944edae 100644
9279 +index 0075554..42d36a1 100644
9280 --- a/net/sctp/socket.c
9281 +++ b/net/sctp/socket.c
9282 -@@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
9283 +@@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
9284 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
9285 if (space_left < addrlen)
9286 return -ENOMEM;
9287 @@ -78692,7 +79102,7 @@ index 54a7cd2..944edae 100644
9288 to += addrlen;
9289 cnt++;
9290 diff --git a/net/socket.c b/net/socket.c
9291 -index 2dce67a..1e91168 100644
9292 +index 273cbce..fd1e8ff 100644
9293 --- a/net/socket.c
9294 +++ b/net/socket.c
9295 @@ -88,6 +88,7 @@
9296 @@ -79535,7 +79945,7 @@ index 0000000..8729101
9297 +#!/bin/sh
9298 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
9299 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
9300 -index f936d1f..a66d95f 100644
9301 +index d1d0ae8..6b73b2a 100644
9302 --- a/scripts/mod/file2alias.c
9303 +++ b/scripts/mod/file2alias.c
9304 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
9305 @@ -87078,21 +87488,6 @@ index 0000000..b87ec9d
9306 +
9307 + return 0;
9308 +}
9309 -diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
9310 -index adb372d..e0a0970 100644
9311 ---- a/tools/perf/util/hist.c
9312 -+++ b/tools/perf/util/hist.c
9313 -@@ -237,8 +237,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
9314 - * mis-adjust symbol addresses when computing
9315 - * the history counter to increment.
9316 - */
9317 -- if (he->ms.map != entry->ms.map) {
9318 -- he->ms.map = entry->ms.map;
9319 -+ if (he->ms.map != entry.ms.map) {
9320 -+ he->ms.map = entry.ms.map;
9321 - if (he->ms.map)
9322 - he->ms.map->referenced = true;
9323 - }
9324 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
9325 index 6789d78..4afd019 100644
9326 --- a/tools/perf/util/include/asm/alternative-asm.h
9327 @@ -87132,7 +87527,7 @@ index af0f22f..9a7d479 100644
9328 break;
9329 }
9330 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9331 -index d9cfb78..4f27c10 100644
9332 +index e401c1b..8d4d5fa 100644
9333 --- a/virt/kvm/kvm_main.c
9334 +++ b/virt/kvm/kvm_main.c
9335 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
9336 @@ -87144,7 +87539,7 @@ index d9cfb78..4f27c10 100644
9337
9338 struct kmem_cache *kvm_vcpu_cache;
9339 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
9340 -@@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
9341 +@@ -2269,7 +2269,7 @@ static void hardware_enable_nolock(void *junk)
9342
9343 if (r) {
9344 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
9345 @@ -87153,7 +87548,7 @@ index d9cfb78..4f27c10 100644
9346 printk(KERN_INFO "kvm: enabling virtualization on "
9347 "CPU%d failed\n", cpu);
9348 }
9349 -@@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
9350 +@@ -2323,10 +2323,10 @@ static int hardware_enable_all(void)
9351
9352 kvm_usage_count++;
9353 if (kvm_usage_count == 1) {
9354 @@ -87166,7 +87561,7 @@ index d9cfb78..4f27c10 100644
9355 hardware_disable_all_nolock();
9356 r = -EBUSY;
9357 }
9358 -@@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
9359 +@@ -2677,7 +2677,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
9360 kvm_arch_vcpu_put(vcpu);
9361 }
9362
9363 @@ -87175,7 +87570,7 @@ index d9cfb78..4f27c10 100644
9364 struct module *module)
9365 {
9366 int r;
9367 -@@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
9368 +@@ -2740,7 +2740,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
9369 if (!vcpu_align)
9370 vcpu_align = __alignof__(struct kvm_vcpu);
9371 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
9372 @@ -87184,7 +87579,7 @@ index d9cfb78..4f27c10 100644
9373 if (!kvm_vcpu_cache) {
9374 r = -ENOMEM;
9375 goto out_free_3;
9376 -@@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
9377 +@@ -2750,9 +2750,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
9378 if (r)
9379 goto out_free;
9380
9381
9382 diff --git a/3.2.16/4430_grsec-remove-localversion-grsec.patch b/3.2.17/4430_grsec-remove-localversion-grsec.patch
9383 similarity index 100%
9384 rename from 3.2.16/4430_grsec-remove-localversion-grsec.patch
9385 rename to 3.2.17/4430_grsec-remove-localversion-grsec.patch
9386
9387 diff --git a/3.2.16/4435_grsec-mute-warnings.patch b/3.2.17/4435_grsec-mute-warnings.patch
9388 similarity index 100%
9389 rename from 3.2.16/4435_grsec-mute-warnings.patch
9390 rename to 3.2.17/4435_grsec-mute-warnings.patch
9391
9392 diff --git a/3.2.16/4440_grsec-remove-protected-paths.patch b/3.2.17/4440_grsec-remove-protected-paths.patch
9393 similarity index 100%
9394 rename from 3.2.16/4440_grsec-remove-protected-paths.patch
9395 rename to 3.2.17/4440_grsec-remove-protected-paths.patch
9396
9397 diff --git a/3.2.16/4445_grsec-pax-without-grsec.patch b/3.2.17/4445_grsec-pax-without-grsec.patch
9398 similarity index 100%
9399 rename from 3.2.16/4445_grsec-pax-without-grsec.patch
9400 rename to 3.2.17/4445_grsec-pax-without-grsec.patch
9401
9402 diff --git a/3.2.16/4450_grsec-kconfig-default-gids.patch b/3.2.17/4450_grsec-kconfig-default-gids.patch
9403 similarity index 100%
9404 rename from 3.2.16/4450_grsec-kconfig-default-gids.patch
9405 rename to 3.2.17/4450_grsec-kconfig-default-gids.patch
9406
9407 diff --git a/3.2.16/4455_grsec-kconfig-gentoo.patch b/3.2.17/4455_grsec-kconfig-gentoo.patch
9408 similarity index 100%
9409 rename from 3.2.16/4455_grsec-kconfig-gentoo.patch
9410 rename to 3.2.17/4455_grsec-kconfig-gentoo.patch
9411
9412 diff --git a/3.2.16/4460-grsec-kconfig-proc-user.patch b/3.2.17/4460-grsec-kconfig-proc-user.patch
9413 similarity index 100%
9414 rename from 3.2.16/4460-grsec-kconfig-proc-user.patch
9415 rename to 3.2.17/4460-grsec-kconfig-proc-user.patch
9416
9417 diff --git a/3.2.16/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.17/4465_selinux-avc_audit-log-curr_ip.patch
9418 similarity index 100%
9419 rename from 3.2.16/4465_selinux-avc_audit-log-curr_ip.patch
9420 rename to 3.2.17/4465_selinux-avc_audit-log-curr_ip.patch
9421
9422 diff --git a/3.2.16/4470_disable-compat_vdso.patch b/3.2.17/4470_disable-compat_vdso.patch
9423 similarity index 100%
9424 rename from 3.2.16/4470_disable-compat_vdso.patch
9425 rename to 3.2.17/4470_disable-compat_vdso.patch
9426
9427 diff --git a/3.3.5/1004_linux-3.3.5.patch b/3.3.5/1004_linux-3.3.5.patch
9428 deleted file mode 100644
9429 index a1fa635..0000000
9430 --- a/3.3.5/1004_linux-3.3.5.patch
9431 +++ /dev/null
9432 @@ -1,3285 +0,0 @@
9433 -diff --git a/Makefile b/Makefile
9434 -index 44ef766..64615e9 100644
9435 ---- a/Makefile
9436 -+++ b/Makefile
9437 -@@ -1,6 +1,6 @@
9438 - VERSION = 3
9439 - PATCHLEVEL = 3
9440 --SUBLEVEL = 4
9441 -+SUBLEVEL = 5
9442 - EXTRAVERSION =
9443 - NAME = Saber-toothed Squirrel
9444 -
9445 -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
9446 -index dfb0312..dedb885 100644
9447 ---- a/arch/arm/Kconfig
9448 -+++ b/arch/arm/Kconfig
9449 -@@ -1163,6 +1163,15 @@ if !MMU
9450 - source "arch/arm/Kconfig-nommu"
9451 - endif
9452 -
9453 -+config ARM_ERRATA_326103
9454 -+ bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
9455 -+ depends on CPU_V6
9456 -+ help
9457 -+ Executing a SWP instruction to read-only memory does not set bit 11
9458 -+ of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
9459 -+ treat the access as a read, preventing a COW from occurring and
9460 -+ causing the faulting task to livelock.
9461 -+
9462 - config ARM_ERRATA_411920
9463 - bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
9464 - depends on CPU_V6 || CPU_V6K
9465 -diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
9466 -index 60843eb..73409e6 100644
9467 ---- a/arch/arm/include/asm/tls.h
9468 -+++ b/arch/arm/include/asm/tls.h
9469 -@@ -7,6 +7,8 @@
9470 -
9471 - .macro set_tls_v6k, tp, tmp1, tmp2
9472 - mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
9473 -+ mov \tmp1, #0
9474 -+ mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
9475 - .endm
9476 -
9477 - .macro set_tls_v6, tp, tmp1, tmp2
9478 -@@ -15,6 +17,8 @@
9479 - mov \tmp2, #0xffff0fff
9480 - tst \tmp1, #HWCAP_TLS @ hardware TLS available?
9481 - mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
9482 -+ movne \tmp1, #0
9483 -+ mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
9484 - streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
9485 - .endm
9486 -
9487 -diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
9488 -index 3efd82c..87c8be5 100644
9489 ---- a/arch/arm/kernel/irq.c
9490 -+++ b/arch/arm/kernel/irq.c
9491 -@@ -156,10 +156,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
9492 - }
9493 -
9494 - c = irq_data_get_irq_chip(d);
9495 -- if (c->irq_set_affinity)
9496 -- c->irq_set_affinity(d, affinity, true);
9497 -- else
9498 -+ if (!c->irq_set_affinity)
9499 - pr_debug("IRQ%u: unable to set affinity\n", d->irq);
9500 -+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
9501 -+ cpumask_copy(d->affinity, affinity);
9502 -
9503 - return ret;
9504 - }
9505 -diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
9506 -index ff1f7cc..8074199 100644
9507 ---- a/arch/arm/mm/abort-ev6.S
9508 -+++ b/arch/arm/mm/abort-ev6.S
9509 -@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
9510 - mrc p15, 0, r1, c5, c0, 0 @ get FSR
9511 - mrc p15, 0, r0, c6, c0, 0 @ get FAR
9512 - /*
9513 -- * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
9514 -- * The test below covers all the write situations, including Java bytecodes
9515 -+ * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
9516 - */
9517 -- bic r1, r1, #1 << 11 @ clear bit 11 of FSR
9518 -+#ifdef CONFIG_ARM_ERRATA_326103
9519 -+ ldr ip, =0x4107b36
9520 -+ mrc p15, 0, r3, c0, c0, 0 @ get processor id
9521 -+ teq ip, r3, lsr #4 @ r0 ARM1136?
9522 -+ bne do_DataAbort
9523 - tst r5, #PSR_J_BIT @ Java?
9524 -+ tsteq r5, #PSR_T_BIT @ Thumb?
9525 - bne do_DataAbort
9526 -- do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
9527 -- ldreq r3, [r4] @ read aborted ARM instruction
9528 -+ bic r1, r1, #1 << 11 @ clear bit 11 of FSR
9529 -+ ldr r3, [r4] @ read aborted ARM instruction
9530 - #ifdef CONFIG_CPU_ENDIAN_BE8
9531 -- reveq r3, r3
9532 -+ rev r3, r3
9533 - #endif
9534 - do_ldrd_abort tmp=ip, insn=r3
9535 - tst r3, #1 << 20 @ L = 0 -> write
9536 - orreq r1, r1, #1 << 11 @ yes.
9537 -+#endif
9538 - b do_DataAbort
9539 -diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
9540 -index e215070..9c717bf 100644
9541 ---- a/arch/mips/ath79/dev-wmac.c
9542 -+++ b/arch/mips/ath79/dev-wmac.c
9543 -@@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void)
9544 -
9545 - static int ar933x_wmac_reset(void)
9546 - {
9547 -- ath79_device_reset_clear(AR933X_RESET_WMAC);
9548 - ath79_device_reset_set(AR933X_RESET_WMAC);
9549 -+ ath79_device_reset_clear(AR933X_RESET_WMAC);
9550 -
9551 - return 0;
9552 - }
9553 -diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
9554 -index 9fef530..67dac22 100644
9555 ---- a/arch/powerpc/platforms/85xx/common.c
9556 -+++ b/arch/powerpc/platforms/85xx/common.c
9557 -@@ -21,6 +21,12 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = {
9558 - { .compatible = "fsl,qe", },
9559 - { .compatible = "fsl,cpm2", },
9560 - { .compatible = "fsl,srio", },
9561 -+ /* So that the DMA channel nodes can be probed individually: */
9562 -+ { .compatible = "fsl,eloplus-dma", },
9563 -+ /* For the PMC driver */
9564 -+ { .compatible = "fsl,mpc8548-guts", },
9565 -+ /* Probably unnecessary? */
9566 -+ { .compatible = "gpio-leds", },
9567 - {},
9568 - };
9569 -
9570 -diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
9571 -index 1d15a0c..b498864 100644
9572 ---- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
9573 -+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
9574 -@@ -405,12 +405,6 @@ static int __init board_fixups(void)
9575 - machine_arch_initcall(mpc8568_mds, board_fixups);
9576 - machine_arch_initcall(mpc8569_mds, board_fixups);
9577 -
9578 --static struct of_device_id mpc85xx_ids[] = {
9579 -- { .compatible = "fsl,mpc8548-guts", },
9580 -- { .compatible = "gpio-leds", },
9581 -- {},
9582 --};
9583 --
9584 - static int __init mpc85xx_publish_devices(void)
9585 - {
9586 - if (machine_is(mpc8568_mds))
9587 -@@ -418,10 +412,7 @@ static int __init mpc85xx_publish_devices(void)
9588 - if (machine_is(mpc8569_mds))
9589 - simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
9590 -
9591 -- mpc85xx_common_publish_devices();
9592 -- of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
9593 --
9594 -- return 0;
9595 -+ return mpc85xx_common_publish_devices();
9596 - }
9597 -
9598 - machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
9599 -diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
9600 -index b0984ad..cc79cad8 100644
9601 ---- a/arch/powerpc/platforms/85xx/p1022_ds.c
9602 -+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
9603 -@@ -303,18 +303,7 @@ static void __init p1022_ds_setup_arch(void)
9604 - pr_info("Freescale P1022 DS reference board\n");
9605 - }
9606 -
9607 --static struct of_device_id __initdata p1022_ds_ids[] = {
9608 -- /* So that the DMA channel nodes can be probed individually: */
9609 -- { .compatible = "fsl,eloplus-dma", },
9610 -- {},
9611 --};
9612 --
9613 --static int __init p1022_ds_publish_devices(void)
9614 --{
9615 -- mpc85xx_common_publish_devices();
9616 -- return of_platform_bus_probe(NULL, p1022_ds_ids, NULL);
9617 --}
9618 --machine_device_initcall(p1022_ds, p1022_ds_publish_devices);
9619 -+machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
9620 -
9621 - machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
9622 -
9623 -diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
9624 -index 95365a8..5a747dd 100644
9625 ---- a/arch/x86/boot/Makefile
9626 -+++ b/arch/x86/boot/Makefile
9627 -@@ -37,7 +37,8 @@ setup-y += video-bios.o
9628 - targets += $(setup-y)
9629 - hostprogs-y := mkcpustr tools/build
9630 -
9631 --HOST_EXTRACFLAGS += $(LINUXINCLUDE)
9632 -+HOST_EXTRACFLAGS += -I$(srctree)/tools/include $(LINUXINCLUDE) \
9633 -+ -D__EXPORTED_HEADERS__
9634 -
9635 - $(obj)/cpu.o: $(obj)/cpustr.h
9636 -
9637 -diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
9638 -index b123b9a..fd55a2f 100644
9639 ---- a/arch/x86/boot/compressed/Makefile
9640 -+++ b/arch/x86/boot/compressed/Makefile
9641 -@@ -22,6 +22,7 @@ LDFLAGS := -m elf_$(UTS_MACHINE)
9642 - LDFLAGS_vmlinux := -T
9643 -
9644 - hostprogs-y := mkpiggy
9645 -+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
9646 -
9647 - VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
9648 - $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
9649 -diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
9650 -index fec216f..0cdfc0d 100644
9651 ---- a/arch/x86/boot/compressed/eboot.c
9652 -+++ b/arch/x86/boot/compressed/eboot.c
9653 -@@ -539,7 +539,7 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
9654 - struct initrd *initrd;
9655 - efi_file_handle_t *h;
9656 - efi_file_info_t *info;
9657 -- efi_char16_t filename[256];
9658 -+ efi_char16_t filename_16[256];
9659 - unsigned long info_sz;
9660 - efi_guid_t info_guid = EFI_FILE_INFO_ID;
9661 - efi_char16_t *p;
9662 -@@ -552,14 +552,14 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
9663 - str += 7;
9664 -
9665 - initrd = &initrds[i];
9666 -- p = filename;
9667 -+ p = filename_16;
9668 -
9669 - /* Skip any leading slashes */
9670 - while (*str == '/' || *str == '\\')
9671 - str++;
9672 -
9673 - while (*str && *str != ' ' && *str != '\n') {
9674 -- if (p >= filename + sizeof(filename))
9675 -+ if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
9676 - break;
9677 -
9678 - *p++ = *str++;
9679 -@@ -583,7 +583,7 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
9680 - goto free_initrds;
9681 - }
9682 -
9683 -- status = efi_call_phys5(fh->open, fh, &h, filename,
9684 -+ status = efi_call_phys5(fh->open, fh, &h, filename_16,
9685 - EFI_FILE_MODE_READ, (u64)0);
9686 - if (status != EFI_SUCCESS)
9687 - goto close_handles;
9688 -diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
9689 -index a055993..c85e3ac 100644
9690 ---- a/arch/x86/boot/compressed/head_32.S
9691 -+++ b/arch/x86/boot/compressed/head_32.S
9692 -@@ -33,6 +33,9 @@
9693 - __HEAD
9694 - ENTRY(startup_32)
9695 - #ifdef CONFIG_EFI_STUB
9696 -+ jmp preferred_addr
9697 -+
9698 -+ .balign 0x10
9699 - /*
9700 - * We don't need the return address, so set up the stack so
9701 - * efi_main() can find its arugments.
9702 -@@ -41,12 +44,17 @@ ENTRY(startup_32)
9703 -
9704 - call efi_main
9705 - cmpl $0, %eax
9706 -- je preferred_addr
9707 - movl %eax, %esi
9708 -- call 1f
9709 -+ jne 2f
9710 - 1:
9711 -+ /* EFI init failed, so hang. */
9712 -+ hlt
9713 -+ jmp 1b
9714 -+2:
9715 -+ call 3f
9716 -+3:
9717 - popl %eax
9718 -- subl $1b, %eax
9719 -+ subl $3b, %eax
9720 - subl BP_pref_address(%esi), %eax
9721 - add BP_code32_start(%esi), %eax
9722 - leal preferred_addr(%eax), %eax
9723 -diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
9724 -index 558d76c..87e03a1 100644
9725 ---- a/arch/x86/boot/compressed/head_64.S
9726 -+++ b/arch/x86/boot/compressed/head_64.S
9727 -@@ -200,18 +200,28 @@ ENTRY(startup_64)
9728 - * entire text+data+bss and hopefully all of memory.
9729 - */
9730 - #ifdef CONFIG_EFI_STUB
9731 -- pushq %rsi
9732 -+ /*
9733 -+ * The entry point for the PE/COFF executable is 0x210, so only
9734 -+ * legacy boot loaders will execute this jmp.
9735 -+ */
9736 -+ jmp preferred_addr
9737 -+
9738 -+ .org 0x210
9739 - mov %rcx, %rdi
9740 - mov %rdx, %rsi
9741 - call efi_main
9742 -- popq %rsi
9743 -- cmpq $0,%rax
9744 -- je preferred_addr
9745 - movq %rax,%rsi
9746 -- call 1f
9747 -+ cmpq $0,%rax
9748 -+ jne 2f
9749 - 1:
9750 -+ /* EFI init failed, so hang. */
9751 -+ hlt
9752 -+ jmp 1b
9753 -+2:
9754 -+ call 3f
9755 -+3:
9756 - popq %rax
9757 -- subq $1b, %rax
9758 -+ subq $3b, %rax
9759 - subq BP_pref_address(%rsi), %rax
9760 - add BP_code32_start(%esi), %eax
9761 - leaq preferred_addr(%rax), %rax
9762 -diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
9763 -index 46a8238..958a641 100644
9764 ---- a/arch/x86/boot/compressed/mkpiggy.c
9765 -+++ b/arch/x86/boot/compressed/mkpiggy.c
9766 -@@ -29,14 +29,7 @@
9767 - #include <stdio.h>
9768 - #include <string.h>
9769 - #include <inttypes.h>
9770 --
9771 --static uint32_t getle32(const void *p)
9772 --{
9773 -- const uint8_t *cp = p;
9774 --
9775 -- return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) +
9776 -- ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24);
9777 --}
9778 -+#include <tools/le_byteshift.h>
9779 -
9780 - int main(int argc, char *argv[])
9781 - {
9782 -@@ -69,7 +62,7 @@ int main(int argc, char *argv[])
9783 - }
9784 -
9785 - ilen = ftell(f);
9786 -- olen = getle32(&olen);
9787 -+ olen = get_unaligned_le32(&olen);
9788 - fclose(f);
9789 -
9790 - /*
9791 -diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
9792 -index 4e9bd6b..09ce870 100644
9793 ---- a/arch/x86/boot/tools/build.c
9794 -+++ b/arch/x86/boot/tools/build.c
9795 -@@ -34,6 +34,7 @@
9796 - #include <fcntl.h>
9797 - #include <sys/mman.h>
9798 - #include <asm/boot.h>
9799 -+#include <tools/le_byteshift.h>
9800 -
9801 - typedef unsigned char u8;
9802 - typedef unsigned short u16;
9803 -@@ -41,6 +42,7 @@ typedef unsigned long u32;
9804 -
9805 - #define DEFAULT_MAJOR_ROOT 0
9806 - #define DEFAULT_MINOR_ROOT 0
9807 -+#define DEFAULT_ROOT_DEV (DEFAULT_MAJOR_ROOT << 8 | DEFAULT_MINOR_ROOT)
9808 -
9809 - /* Minimal number of setup sectors */
9810 - #define SETUP_SECT_MIN 5
9811 -@@ -159,7 +161,7 @@ int main(int argc, char ** argv)
9812 - die("read-error on `setup'");
9813 - if (c < 1024)
9814 - die("The setup must be at least 1024 bytes");
9815 -- if (buf[510] != 0x55 || buf[511] != 0xaa)
9816 -+ if (get_unaligned_le16(&buf[510]) != 0xAA55)
9817 - die("Boot block hasn't got boot flag (0xAA55)");
9818 - fclose(file);
9819 -
9820 -@@ -171,8 +173,7 @@ int main(int argc, char ** argv)
9821 - memset(buf+c, 0, i-c);
9822 -
9823 - /* Set the default root device */
9824 -- buf[508] = DEFAULT_MINOR_ROOT;
9825 -- buf[509] = DEFAULT_MAJOR_ROOT;
9826 -+ put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
9827 -
9828 - fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i);
9829 -
9830 -@@ -192,44 +193,49 @@ int main(int argc, char ** argv)
9831 -
9832 - /* Patch the setup code with the appropriate size parameters */
9833 - buf[0x1f1] = setup_sectors-1;
9834 -- buf[0x1f4] = sys_size;
9835 -- buf[0x1f5] = sys_size >> 8;
9836 -- buf[0x1f6] = sys_size >> 16;
9837 -- buf[0x1f7] = sys_size >> 24;
9838 -+ put_unaligned_le32(sys_size, &buf[0x1f4]);
9839 -
9840 - #ifdef CONFIG_EFI_STUB
9841 - file_sz = sz + i + ((sys_size * 16) - sz);
9842 -
9843 -- pe_header = *(unsigned int *)&buf[0x3c];
9844 -+ pe_header = get_unaligned_le32(&buf[0x3c]);
9845 -
9846 - /* Size of code */
9847 -- *(unsigned int *)&buf[pe_header + 0x1c] = file_sz;
9848 -+ put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
9849 -
9850 - /* Size of image */
9851 -- *(unsigned int *)&buf[pe_header + 0x50] = file_sz;
9852 -+ put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
9853 -
9854 - #ifdef CONFIG_X86_32
9855 -- /* Address of entry point */
9856 -- *(unsigned int *)&buf[pe_header + 0x28] = i;
9857 -+ /*
9858 -+ * Address of entry point.
9859 -+ *
9860 -+ * The EFI stub entry point is +16 bytes from the start of
9861 -+ * the .text section.
9862 -+ */
9863 -+ put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
9864 -
9865 - /* .text size */
9866 -- *(unsigned int *)&buf[pe_header + 0xb0] = file_sz;
9867 -+ put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
9868 -
9869 - /* .text size of initialised data */
9870 -- *(unsigned int *)&buf[pe_header + 0xb8] = file_sz;
9871 -+ put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
9872 - #else
9873 - /*
9874 - * Address of entry point. startup_32 is at the beginning and
9875 - * the 64-bit entry point (startup_64) is always 512 bytes
9876 -- * after.
9877 -+ * after. The EFI stub entry point is 16 bytes after that, as
9878 -+ * the first instruction allows legacy loaders to jump over
9879 -+ * the EFI stub initialisation
9880 - */
9881 -- *(unsigned int *)&buf[pe_header + 0x28] = i + 512;
9882 -+ put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
9883 -
9884 - /* .text size */
9885 -- *(unsigned int *)&buf[pe_header + 0xc0] = file_sz;
9886 -+ put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
9887 -
9888 - /* .text size of initialised data */
9889 -- *(unsigned int *)&buf[pe_header + 0xc8] = file_sz;
9890 -+ put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
9891 -+
9892 - #endif /* CONFIG_X86_32 */
9893 - #endif /* CONFIG_EFI_STUB */
9894 -
9895 -diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
9896 -index 517d476..a609c39 100644
9897 ---- a/arch/x86/include/asm/x86_init.h
9898 -+++ b/arch/x86/include/asm/x86_init.h
9899 -@@ -189,6 +189,5 @@ extern struct x86_msi_ops x86_msi;
9900 -
9901 - extern void x86_init_noop(void);
9902 - extern void x86_init_uint_noop(unsigned int unused);
9903 --extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
9904 -
9905 - #endif
9906 -diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
9907 -index 2eec05b..5b3f88e 100644
9908 ---- a/arch/x86/kernel/apic/apic.c
9909 -+++ b/arch/x86/kernel/apic/apic.c
9910 -@@ -1632,9 +1632,11 @@ static int __init apic_verify(void)
9911 - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
9912 -
9913 - /* The BIOS may have set up the APIC at some other address */
9914 -- rdmsr(MSR_IA32_APICBASE, l, h);
9915 -- if (l & MSR_IA32_APICBASE_ENABLE)
9916 -- mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
9917 -+ if (boot_cpu_data.x86 >= 6) {
9918 -+ rdmsr(MSR_IA32_APICBASE, l, h);
9919 -+ if (l & MSR_IA32_APICBASE_ENABLE)
9920 -+ mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
9921 -+ }
9922 -
9923 - pr_info("Found and enabled local APIC!\n");
9924 - return 0;
9925 -@@ -1652,13 +1654,15 @@ int __init apic_force_enable(unsigned long addr)
9926 - * MSR. This can only be done in software for Intel P6 or later
9927 - * and AMD K7 (Model > 1) or later.
9928 - */
9929 -- rdmsr(MSR_IA32_APICBASE, l, h);
9930 -- if (!(l & MSR_IA32_APICBASE_ENABLE)) {
9931 -- pr_info("Local APIC disabled by BIOS -- reenabling.\n");
9932 -- l &= ~MSR_IA32_APICBASE_BASE;
9933 -- l |= MSR_IA32_APICBASE_ENABLE | addr;
9934 -- wrmsr(MSR_IA32_APICBASE, l, h);
9935 -- enabled_via_apicbase = 1;
9936 -+ if (boot_cpu_data.x86 >= 6) {
9937 -+ rdmsr(MSR_IA32_APICBASE, l, h);
9938 -+ if (!(l & MSR_IA32_APICBASE_ENABLE)) {
9939 -+ pr_info("Local APIC disabled by BIOS -- reenabling.\n");
9940 -+ l &= ~MSR_IA32_APICBASE_BASE;
9941 -+ l |= MSR_IA32_APICBASE_ENABLE | addr;
9942 -+ wrmsr(MSR_IA32_APICBASE, l, h);
9943 -+ enabled_via_apicbase = 1;
9944 -+ }
9945 - }
9946 - return apic_verify();
9947 - }
9948 -@@ -2204,10 +2208,12 @@ static void lapic_resume(void)
9949 - * FIXME! This will be wrong if we ever support suspend on
9950 - * SMP! We'll need to do this as part of the CPU restore!
9951 - */
9952 -- rdmsr(MSR_IA32_APICBASE, l, h);
9953 -- l &= ~MSR_IA32_APICBASE_BASE;
9954 -- l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
9955 -- wrmsr(MSR_IA32_APICBASE, l, h);
9956 -+ if (boot_cpu_data.x86 >= 6) {
9957 -+ rdmsr(MSR_IA32_APICBASE, l, h);
9958 -+ l &= ~MSR_IA32_APICBASE_BASE;
9959 -+ l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
9960 -+ wrmsr(MSR_IA32_APICBASE, l, h);
9961 -+ }
9962 - }
9963 -
9964 - maxlvt = lapic_get_maxlvt();
9965 -diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
9966 -index 09d3d8c..ade0182 100644
9967 ---- a/arch/x86/kernel/apic/apic_numachip.c
9968 -+++ b/arch/x86/kernel/apic/apic_numachip.c
9969 -@@ -201,8 +201,11 @@ static void __init map_csrs(void)
9970 -
9971 - static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
9972 - {
9973 -- c->phys_proc_id = node;
9974 -- per_cpu(cpu_llc_id, smp_processor_id()) = node;
9975 -+
9976 -+ if (c->phys_proc_id != node) {
9977 -+ c->phys_proc_id = node;
9978 -+ per_cpu(cpu_llc_id, smp_processor_id()) = node;
9979 -+ }
9980 - }
9981 -
9982 - static int __init numachip_system_init(void)
9983 -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
9984 -index f4773f4..80ab83d 100644
9985 ---- a/arch/x86/kernel/cpu/amd.c
9986 -+++ b/arch/x86/kernel/cpu/amd.c
9987 -@@ -352,10 +352,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
9988 - node = per_cpu(cpu_llc_id, cpu);
9989 -
9990 - /*
9991 -- * If core numbers are inconsistent, it's likely a multi-fabric platform,
9992 -- * so invoke platform-specific handler
9993 -+ * On multi-fabric platform (e.g. Numascale NumaChip) a
9994 -+ * platform-specific handler needs to be called to fixup some
9995 -+ * IDs of the CPU.
9996 - */
9997 -- if (c->phys_proc_id != node)
9998 -+ if (x86_cpuinit.fixup_cpu_id)
9999 - x86_cpuinit.fixup_cpu_id(c, node);
10000 -
10001 - if (!node_online(node)) {
10002 -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
10003 -index c0f7d68..1a810e4 100644
10004 ---- a/arch/x86/kernel/cpu/common.c
10005 -+++ b/arch/x86/kernel/cpu/common.c
10006 -@@ -1163,15 +1163,6 @@ static void dbg_restore_debug_regs(void)
10007 - #endif /* ! CONFIG_KGDB */
10008 -
10009 - /*
10010 -- * Prints an error where the NUMA and configured core-number mismatch and the
10011 -- * platform didn't override this to fix it up
10012 -- */
10013 --void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
10014 --{
10015 -- pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
10016 --}
10017 --
10018 --/*
10019 - * cpu_init() initializes state that is per-CPU. Some data is already
10020 - * initialized (naturally) in the bootstrap process, such as the GDT
10021 - * and IDT. We reload them nevertheless, this function acts as a
10022 -diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
10023 -index 739d859..f239f30 100644
10024 ---- a/arch/x86/kernel/i387.c
10025 -+++ b/arch/x86/kernel/i387.c
10026 -@@ -154,6 +154,7 @@ int init_fpu(struct task_struct *tsk)
10027 - if (tsk_used_math(tsk)) {
10028 - if (HAVE_HWFP && tsk == current)
10029 - unlazy_fpu(tsk);
10030 -+ tsk->thread.fpu.last_cpu = ~0;
10031 - return 0;
10032 - }
10033 -
10034 -diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
10035 -index 73465aa..8a2ce8f 100644
10036 ---- a/arch/x86/kernel/microcode_amd.c
10037 -+++ b/arch/x86/kernel/microcode_amd.c
10038 -@@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
10039 - {
10040 - struct cpuinfo_x86 *c = &cpu_data(cpu);
10041 -
10042 -- if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
10043 -- pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
10044 -- return -1;
10045 -- }
10046 --
10047 - csig->rev = c->microcode;
10048 - pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
10049 -
10050 -@@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {
10051 -
10052 - struct microcode_ops * __init init_amd_microcode(void)
10053 - {
10054 -+ struct cpuinfo_x86 *c = &cpu_data(0);
10055 -+
10056 -+ if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
10057 -+ pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
10058 -+ return NULL;
10059 -+ }
10060 -+
10061 - patch = (void *)get_zeroed_page(GFP_KERNEL);
10062 - if (!patch)
10063 - return NULL;
10064 -diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
10065 -index fda91c3..50a5875 100644
10066 ---- a/arch/x86/kernel/microcode_core.c
10067 -+++ b/arch/x86/kernel/microcode_core.c
10068 -@@ -418,10 +418,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)
10069 - if (err)
10070 - return err;
10071 -
10072 -- if (microcode_init_cpu(cpu) == UCODE_ERROR) {
10073 -- sysfs_remove_group(&dev->kobj, &mc_attr_group);
10074 -+ if (microcode_init_cpu(cpu) == UCODE_ERROR)
10075 - return -EINVAL;
10076 -- }
10077 -
10078 - return err;
10079 - }
10080 -@@ -513,11 +511,11 @@ static int __init microcode_init(void)
10081 - microcode_ops = init_intel_microcode();
10082 - else if (c->x86_vendor == X86_VENDOR_AMD)
10083 - microcode_ops = init_amd_microcode();
10084 --
10085 -- if (!microcode_ops) {
10086 -+ else
10087 - pr_err("no support for this CPU vendor\n");
10088 -+
10089 -+ if (!microcode_ops)
10090 - return -ENODEV;
10091 -- }
10092 -
10093 - microcode_pdev = platform_device_register_simple("microcode", -1,
10094 - NULL, 0);
10095 -diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
10096 -index 947a06c..83b05ad 100644
10097 ---- a/arch/x86/kernel/x86_init.c
10098 -+++ b/arch/x86/kernel/x86_init.c
10099 -@@ -92,7 +92,6 @@ struct x86_init_ops x86_init __initdata = {
10100 -
10101 - struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
10102 - .setup_percpu_clockev = setup_secondary_APIC_clock,
10103 -- .fixup_cpu_id = x86_default_fixup_cpu_id,
10104 - };
10105 -
10106 - static void default_nmi_init(void) { };
10107 -diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
10108 -index 501d4e0..f2ce60a 100644
10109 ---- a/arch/x86/xen/smp.c
10110 -+++ b/arch/x86/xen/smp.c
10111 -@@ -172,6 +172,7 @@ static void __init xen_fill_possible_map(void)
10112 - static void __init xen_filter_cpu_maps(void)
10113 - {
10114 - int i, rc;
10115 -+ unsigned int subtract = 0;
10116 -
10117 - if (!xen_initial_domain())
10118 - return;
10119 -@@ -186,8 +187,22 @@ static void __init xen_filter_cpu_maps(void)
10120 - } else {
10121 - set_cpu_possible(i, false);
10122 - set_cpu_present(i, false);
10123 -+ subtract++;
10124 - }
10125 - }
10126 -+#ifdef CONFIG_HOTPLUG_CPU
10127 -+ /* This is akin to using 'nr_cpus' on the Linux command line.
10128 -+ * Which is OK as when we use 'dom0_max_vcpus=X' we can only
10129 -+ * have up to X, while nr_cpu_ids is greater than X. This
10130 -+ * normally is not a problem, except when CPU hotplugging
10131 -+ * is involved and then there might be more than X CPUs
10132 -+ * in the guest - which will not work as there is no
10133 -+ * hypercall to expand the max number of VCPUs an already
10134 -+ * running guest has. So cap it up to X. */
10135 -+ if (subtract)
10136 -+ nr_cpu_ids = nr_cpu_ids - subtract;
10137 -+#endif
10138 -+
10139 - }
10140 -
10141 - static void __init xen_smp_prepare_boot_cpu(void)
10142 -diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
10143 -index 79d7362..3e45aa0 100644
10144 ---- a/arch/x86/xen/xen-asm.S
10145 -+++ b/arch/x86/xen/xen-asm.S
10146 -@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
10147 -
10148 - /* check for unmasked and pending */
10149 - cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
10150 -- jz 1f
10151 -+ jnz 1f
10152 - 2: call check_events
10153 - 1:
10154 - ENDPATCH(xen_restore_fl_direct)
10155 -diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
10156 -index a9b2820..58db834 100644
10157 ---- a/drivers/ata/libata-eh.c
10158 -+++ b/drivers/ata/libata-eh.c
10159 -@@ -3500,7 +3500,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
10160 - u64 now = get_jiffies_64();
10161 - int *trials = void_arg;
10162 -
10163 -- if (ent->timestamp < now - min(now, interval))
10164 -+ if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
10165 -+ (ent->timestamp < now - min(now, interval)))
10166 - return -1;
10167 -
10168 - (*trials)++;
10169 -diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
10170 -index 2d8c789..b28dbfa 100644
10171 ---- a/drivers/crypto/talitos.c
10172 -+++ b/drivers/crypto/talitos.c
10173 -@@ -124,6 +124,9 @@ struct talitos_private {
10174 - void __iomem *reg;
10175 - int irq[2];
10176 -
10177 -+ /* SEC global registers lock */
10178 -+ spinlock_t reg_lock ____cacheline_aligned;
10179 -+
10180 - /* SEC version geometry (from device tree node) */
10181 - unsigned int num_channels;
10182 - unsigned int chfifo_len;
10183 -@@ -412,6 +415,7 @@ static void talitos_done_##name(unsigned long data) \
10184 - { \
10185 - struct device *dev = (struct device *)data; \
10186 - struct talitos_private *priv = dev_get_drvdata(dev); \
10187 -+ unsigned long flags; \
10188 - \
10189 - if (ch_done_mask & 1) \
10190 - flush_channel(dev, 0, 0, 0); \
10191 -@@ -427,8 +431,10 @@ static void talitos_done_##name(unsigned long data) \
10192 - out: \
10193 - /* At this point, all completed channels have been processed */ \
10194 - /* Unmask done interrupts for channels completed later on. */ \
10195 -+ spin_lock_irqsave(&priv->reg_lock, flags); \
10196 - setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
10197 - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
10198 -+ spin_unlock_irqrestore(&priv->reg_lock, flags); \
10199 - }
10200 - DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
10201 - DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
10202 -@@ -619,22 +625,28 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
10203 - struct device *dev = data; \
10204 - struct talitos_private *priv = dev_get_drvdata(dev); \
10205 - u32 isr, isr_lo; \
10206 -+ unsigned long flags; \
10207 - \
10208 -+ spin_lock_irqsave(&priv->reg_lock, flags); \
10209 - isr = in_be32(priv->reg + TALITOS_ISR); \
10210 - isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
10211 - /* Acknowledge interrupt */ \
10212 - out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
10213 - out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
10214 - \
10215 -- if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \
10216 -- talitos_error(dev, isr, isr_lo); \
10217 -- else \
10218 -+ if (unlikely(isr & ch_err_mask || isr_lo)) { \
10219 -+ spin_unlock_irqrestore(&priv->reg_lock, flags); \
10220 -+ talitos_error(dev, isr & ch_err_mask, isr_lo); \
10221 -+ } \
10222 -+ else { \
10223 - if (likely(isr & ch_done_mask)) { \
10224 - /* mask further done interrupts. */ \
10225 - clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
10226 - /* done_task will unmask done interrupts at exit */ \
10227 - tasklet_schedule(&priv->done_task[tlet]); \
10228 - } \
10229 -+ spin_unlock_irqrestore(&priv->reg_lock, flags); \
10230 -+ } \
10231 - \
10232 - return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
10233 - IRQ_NONE; \
10234 -@@ -2718,6 +2730,8 @@ static int talitos_probe(struct platform_device *ofdev)
10235 -
10236 - priv->ofdev = ofdev;
10237 -
10238 -+ spin_lock_init(&priv->reg_lock);
10239 -+
10240 - err = talitos_probe_irq(ofdev);
10241 - if (err)
10242 - goto err_out;
10243 -diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
10244 -index f4aed5f..a342873 100644
10245 ---- a/drivers/dma/at_hdmac.c
10246 -+++ b/drivers/dma/at_hdmac.c
10247 -@@ -241,10 +241,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
10248 -
10249 - vdbg_dump_regs(atchan);
10250 -
10251 -- /* clear any pending interrupt */
10252 -- while (dma_readl(atdma, EBCISR))
10253 -- cpu_relax();
10254 --
10255 - channel_writel(atchan, SADDR, 0);
10256 - channel_writel(atchan, DADDR, 0);
10257 - channel_writel(atchan, CTRLA, 0);
10258 -diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
10259 -index d25599f..47408e8 100644
10260 ---- a/drivers/firmware/efivars.c
10261 -+++ b/drivers/firmware/efivars.c
10262 -@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
10263 - }
10264 - }
10265 -
10266 -+static bool
10267 -+validate_device_path(struct efi_variable *var, int match, u8 *buffer,
10268 -+ unsigned long len)
10269 -+{
10270 -+ struct efi_generic_dev_path *node;
10271 -+ int offset = 0;
10272 -+
10273 -+ node = (struct efi_generic_dev_path *)buffer;
10274 -+
10275 -+ if (len < sizeof(*node))
10276 -+ return false;
10277 -+
10278 -+ while (offset <= len - sizeof(*node) &&
10279 -+ node->length >= sizeof(*node) &&
10280 -+ node->length <= len - offset) {
10281 -+ offset += node->length;
10282 -+
10283 -+ if ((node->type == EFI_DEV_END_PATH ||
10284 -+ node->type == EFI_DEV_END_PATH2) &&
10285 -+ node->sub_type == EFI_DEV_END_ENTIRE)
10286 -+ return true;
10287 -+
10288 -+ node = (struct efi_generic_dev_path *)(buffer + offset);
10289 -+ }
10290 -+
10291 -+ /*
10292 -+ * If we're here then either node->length pointed past the end
10293 -+ * of the buffer or we reached the end of the buffer without
10294 -+ * finding a device path end node.
10295 -+ */
10296 -+ return false;
10297 -+}
10298 -+
10299 -+static bool
10300 -+validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
10301 -+ unsigned long len)
10302 -+{
10303 -+ /* An array of 16-bit integers */
10304 -+ if ((len % 2) != 0)
10305 -+ return false;
10306 -+
10307 -+ return true;
10308 -+}
10309 -+
10310 -+static bool
10311 -+validate_load_option(struct efi_variable *var, int match, u8 *buffer,
10312 -+ unsigned long len)
10313 -+{
10314 -+ u16 filepathlength;
10315 -+ int i, desclength = 0, namelen;
10316 -+
10317 -+ namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
10318 -+
10319 -+ /* Either "Boot" or "Driver" followed by four digits of hex */
10320 -+ for (i = match; i < match+4; i++) {
10321 -+ if (var->VariableName[i] > 127 ||
10322 -+ hex_to_bin(var->VariableName[i] & 0xff) < 0)
10323 -+ return true;
10324 -+ }
10325 -+
10326 -+ /* Reject it if there's 4 digits of hex and then further content */
10327 -+ if (namelen > match + 4)
10328 -+ return false;
10329 -+
10330 -+ /* A valid entry must be at least 8 bytes */
10331 -+ if (len < 8)
10332 -+ return false;
10333 -+
10334 -+ filepathlength = buffer[4] | buffer[5] << 8;
10335 -+
10336 -+ /*
10337 -+ * There's no stored length for the description, so it has to be
10338 -+ * found by hand
10339 -+ */
10340 -+ desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
10341 -+
10342 -+ /* Each boot entry must have a descriptor */
10343 -+ if (!desclength)
10344 -+ return false;
10345 -+
10346 -+ /*
10347 -+ * If the sum of the length of the description, the claimed filepath
10348 -+ * length and the original header are greater than the length of the
10349 -+ * variable, it's malformed
10350 -+ */
10351 -+ if ((desclength + filepathlength + 6) > len)
10352 -+ return false;
10353 -+
10354 -+ /*
10355 -+ * And, finally, check the filepath
10356 -+ */
10357 -+ return validate_device_path(var, match, buffer + desclength + 6,
10358 -+ filepathlength);
10359 -+}
10360 -+
10361 -+static bool
10362 -+validate_uint16(struct efi_variable *var, int match, u8 *buffer,
10363 -+ unsigned long len)
10364 -+{
10365 -+ /* A single 16-bit integer */
10366 -+ if (len != 2)
10367 -+ return false;
10368 -+
10369 -+ return true;
10370 -+}
10371 -+
10372 -+static bool
10373 -+validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
10374 -+ unsigned long len)
10375 -+{
10376 -+ int i;
10377 -+
10378 -+ for (i = 0; i < len; i++) {
10379 -+ if (buffer[i] > 127)
10380 -+ return false;
10381 -+
10382 -+ if (buffer[i] == 0)
10383 -+ return true;
10384 -+ }
10385 -+
10386 -+ return false;
10387 -+}
10388 -+
10389 -+struct variable_validate {
10390 -+ char *name;
10391 -+ bool (*validate)(struct efi_variable *var, int match, u8 *data,
10392 -+ unsigned long len);
10393 -+};
10394 -+
10395 -+static const struct variable_validate variable_validate[] = {
10396 -+ { "BootNext", validate_uint16 },
10397 -+ { "BootOrder", validate_boot_order },
10398 -+ { "DriverOrder", validate_boot_order },
10399 -+ { "Boot*", validate_load_option },
10400 -+ { "Driver*", validate_load_option },
10401 -+ { "ConIn", validate_device_path },
10402 -+ { "ConInDev", validate_device_path },
10403 -+ { "ConOut", validate_device_path },
10404 -+ { "ConOutDev", validate_device_path },
10405 -+ { "ErrOut", validate_device_path },
10406 -+ { "ErrOutDev", validate_device_path },
10407 -+ { "Timeout", validate_uint16 },
10408 -+ { "Lang", validate_ascii_string },
10409 -+ { "PlatformLang", validate_ascii_string },
10410 -+ { "", NULL },
10411 -+};
10412 -+
10413 -+static bool
10414 -+validate_var(struct efi_variable *var, u8 *data, unsigned long len)
10415 -+{
10416 -+ int i;
10417 -+ u16 *unicode_name = var->VariableName;
10418 -+
10419 -+ for (i = 0; variable_validate[i].validate != NULL; i++) {
10420 -+ const char *name = variable_validate[i].name;
10421 -+ int match;
10422 -+
10423 -+ for (match = 0; ; match++) {
10424 -+ char c = name[match];
10425 -+ u16 u = unicode_name[match];
10426 -+
10427 -+ /* All special variables are plain ascii */
10428 -+ if (u > 127)
10429 -+ return true;
10430 -+
10431 -+ /* Wildcard in the matching name means we've matched */
10432 -+ if (c == '*')
10433 -+ return variable_validate[i].validate(var,
10434 -+ match, data, len);
10435 -+
10436 -+ /* Case sensitive match */
10437 -+ if (c != u)
10438 -+ break;
10439 -+
10440 -+ /* Reached the end of the string while matching */
10441 -+ if (!c)
10442 -+ return variable_validate[i].validate(var,
10443 -+ match, data, len);
10444 -+ }
10445 -+ }
10446 -+
10447 -+ return true;
10448 -+}
10449 -+
10450 - static efi_status_t
10451 - get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
10452 - {
10453 -@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
10454 - return -EINVAL;
10455 - }
10456 -
10457 -+ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
10458 -+ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
10459 -+ printk(KERN_ERR "efivars: Malformed variable content\n");
10460 -+ return -EINVAL;
10461 -+ }
10462 -+
10463 - spin_lock(&efivars->lock);
10464 - status = efivars->ops->set_variable(new_var->VariableName,
10465 - &new_var->VendorGuid,
10466 -@@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
10467 - if (!capable(CAP_SYS_ADMIN))
10468 - return -EACCES;
10469 -
10470 -+ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
10471 -+ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
10472 -+ printk(KERN_ERR "efivars: Malformed variable content\n");
10473 -+ return -EINVAL;
10474 -+ }
10475 -+
10476 - spin_lock(&efivars->lock);
10477 -
10478 - /*
10479 -diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
10480 -index 65e1f00..e159e33 100644
10481 ---- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
10482 -+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
10483 -@@ -1082,6 +1082,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
10484 - return -EINVAL;
10485 - }
10486 -
10487 -+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
10488 -+ DRM_DEBUG("execbuf with %u cliprects\n",
10489 -+ args->num_cliprects);
10490 -+ return -EINVAL;
10491 -+ }
10492 - cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
10493 - GFP_KERNEL);
10494 - if (cliprects == NULL) {
10495 -@@ -1353,7 +1358,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
10496 - struct drm_i915_gem_exec_object2 *exec2_list = NULL;
10497 - int ret;
10498 -
10499 -- if (args->buffer_count < 1) {
10500 -+ if (args->buffer_count < 1 ||
10501 -+ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
10502 - DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
10503 - return -EINVAL;
10504 - }
10505 -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
10506 -index 3e6429a..ac38d21 100644
10507 ---- a/drivers/gpu/drm/i915/i915_reg.h
10508 -+++ b/drivers/gpu/drm/i915/i915_reg.h
10509 -@@ -523,6 +523,7 @@
10510 - #define CM0_MASK_SHIFT 16
10511 - #define CM0_IZ_OPT_DISABLE (1<<6)
10512 - #define CM0_ZR_OPT_DISABLE (1<<5)
10513 -+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
10514 - #define CM0_DEPTH_EVICT_DISABLE (1<<4)
10515 - #define CM0_COLOR_EVICT_DISABLE (1<<3)
10516 - #define CM0_DEPTH_WRITE_DISABLE (1<<1)
10517 -diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
10518 -index cbc3c04..99f71af 100644
10519 ---- a/drivers/gpu/drm/i915/intel_ringbuffer.c
10520 -+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
10521 -@@ -417,6 +417,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
10522 - if (INTEL_INFO(dev)->gen >= 6) {
10523 - I915_WRITE(INSTPM,
10524 - INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
10525 -+
10526 -+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
10527 -+ * "If this bit is set, STCunit will have LRA as replacement
10528 -+ * policy. [...] This bit must be reset. LRA replacement
10529 -+ * policy is not supported."
10530 -+ */
10531 -+ I915_WRITE(CACHE_MODE_0,
10532 -+ CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
10533 - }
10534 -
10535 - return ret;
10536 -diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
10537 -index e334ec3..0a877dd 100644
10538 ---- a/drivers/gpu/drm/i915/intel_sdvo.c
10539 -+++ b/drivers/gpu/drm/i915/intel_sdvo.c
10540 -@@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
10541 - uint16_t width, height;
10542 - uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
10543 - uint16_t h_sync_offset, v_sync_offset;
10544 -+ int mode_clock;
10545 -
10546 - width = mode->crtc_hdisplay;
10547 - height = mode->crtc_vdisplay;
10548 -@@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
10549 - h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
10550 - v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
10551 -
10552 -- dtd->part1.clock = mode->clock / 10;
10553 -+ mode_clock = mode->clock;
10554 -+ mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
10555 -+ mode_clock /= 10;
10556 -+ dtd->part1.clock = mode_clock;
10557 -+
10558 - dtd->part1.h_active = width & 0xff;
10559 - dtd->part1.h_blank = h_blank_len & 0xff;
10560 - dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
10561 -@@ -997,7 +1002,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
10562 - struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
10563 - u32 sdvox;
10564 - struct intel_sdvo_in_out_map in_out;
10565 -- struct intel_sdvo_dtd input_dtd;
10566 -+ struct intel_sdvo_dtd input_dtd, output_dtd;
10567 - int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
10568 - int rate;
10569 -
10570 -@@ -1022,20 +1027,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
10571 - intel_sdvo->attached_output))
10572 - return;
10573 -
10574 -- /* We have tried to get input timing in mode_fixup, and filled into
10575 -- * adjusted_mode.
10576 -- */
10577 -- if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
10578 -- input_dtd = intel_sdvo->input_dtd;
10579 -- } else {
10580 -- /* Set the output timing to the screen */
10581 -- if (!intel_sdvo_set_target_output(intel_sdvo,
10582 -- intel_sdvo->attached_output))
10583 -- return;
10584 --
10585 -- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
10586 -- (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
10587 -- }
10588 -+ /* lvds has a special fixed output timing. */
10589 -+ if (intel_sdvo->is_lvds)
10590 -+ intel_sdvo_get_dtd_from_mode(&output_dtd,
10591 -+ intel_sdvo->sdvo_lvds_fixed_mode);
10592 -+ else
10593 -+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
10594 -+ (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
10595 -
10596 - /* Set the input timing to the screen. Assume always input 0. */
10597 - if (!intel_sdvo_set_target_input(intel_sdvo))
10598 -@@ -1053,6 +1051,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
10599 - !intel_sdvo_set_tv_format(intel_sdvo))
10600 - return;
10601 -
10602 -+ /* We have tried to get input timing in mode_fixup, and filled into
10603 -+ * adjusted_mode.
10604 -+ */
10605 -+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
10606 - (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
10607 -
10608 - switch (pixel_multiplier) {
10609 -diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
10610 -index 7814a76..284bd25 100644
10611 ---- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
10612 -+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
10613 -@@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
10614 - struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
10615 - struct pci_dev *pdev = NULL;
10616 - int has_dsm = 0;
10617 -- int has_optimus;
10618 -+ int has_optimus = 0;
10619 - int vga_count = 0;
10620 - bool guid_valid;
10621 - int retval;
10622 -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
10623 -index 24ed306..2dab552 100644
10624 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c
10625 -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
10626 -@@ -912,8 +912,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
10627 - break;
10628 - }
10629 -
10630 -- if (radeon_encoder->active_device &
10631 -- (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
10632 -+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
10633 -+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
10634 - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
10635 - struct drm_connector *connector =
10636 - radeon_get_connector_for_encoder(encoder);
10637 -diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
10638 -index a6c6ec3..1248ee4 100644
10639 ---- a/drivers/hwmon/coretemp.c
10640 -+++ b/drivers/hwmon/coretemp.c
10641 -@@ -51,7 +51,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
10642 - MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
10643 -
10644 - #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
10645 --#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
10646 -+#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
10647 - #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
10648 - #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
10649 - #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
10650 -@@ -708,6 +708,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
10651 -
10652 - indx = TO_ATTR_NO(cpu);
10653 -
10654 -+ /* The core id is too big, just return */
10655 -+ if (indx > MAX_CORE_DATA - 1)
10656 -+ return;
10657 -+
10658 - if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
10659 - coretemp_remove_core(pdata, &pdev->dev, indx);
10660 -
10661 -diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
10662 -index 930370d..9a4c3ab 100644
10663 ---- a/drivers/hwmon/fam15h_power.c
10664 -+++ b/drivers/hwmon/fam15h_power.c
10665 -@@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
10666 - return true;
10667 - }
10668 -
10669 -+/*
10670 -+ * Newer BKDG versions have an updated recommendation on how to properly
10671 -+ * initialize the running average range (was: 0xE, now: 0x9). This avoids
10672 -+ * counter saturations resulting in bogus power readings.
10673 -+ * We correct this value ourselves to cope with older BIOSes.
10674 -+ */
10675 -+static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
10676 -+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
10677 -+ { 0 }
10678 -+};
10679 -+
10680 -+static void __devinit tweak_runavg_range(struct pci_dev *pdev)
10681 -+{
10682 -+ u32 val;
10683 -+
10684 -+ /*
10685 -+ * let this quirk apply only to the current version of the
10686 -+ * northbridge, since future versions may change the behavior
10687 -+ */
10688 -+ if (!pci_match_id(affected_device, pdev))
10689 -+ return;
10690 -+
10691 -+ pci_bus_read_config_dword(pdev->bus,
10692 -+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
10693 -+ REG_TDP_RUNNING_AVERAGE, &val);
10694 -+ if ((val & 0xf) != 0xe)
10695 -+ return;
10696 -+
10697 -+ val &= ~0xf;
10698 -+ val |= 0x9;
10699 -+ pci_bus_write_config_dword(pdev->bus,
10700 -+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
10701 -+ REG_TDP_RUNNING_AVERAGE, val);
10702 -+}
10703 -+
10704 - static void __devinit fam15h_power_init_data(struct pci_dev *f4,
10705 - struct fam15h_power_data *data)
10706 - {
10707 -@@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev,
10708 - struct device *dev;
10709 - int err;
10710 -
10711 -+ /*
10712 -+ * though we ignore every other northbridge, we still have to
10713 -+ * do the tweaking on _each_ node in MCM processors as the counters
10714 -+ * are working hand-in-hand
10715 -+ */
10716 -+ tweak_runavg_range(pdev);
10717 -+
10718 - if (!fam15h_power_is_internal_node0(pdev)) {
10719 - err = -ENODEV;
10720 - goto exit;
10721 -diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
10722 -index 04be9f8..eb8ad53 100644
10723 ---- a/drivers/i2c/busses/i2c-pnx.c
10724 -+++ b/drivers/i2c/busses/i2c-pnx.c
10725 -@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
10726 - {
10727 - struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
10728 -
10729 -- /* FIXME: shouldn't this be clk_disable? */
10730 -- clk_enable(alg_data->clk);
10731 -+ clk_disable(alg_data->clk);
10732 -
10733 - return 0;
10734 - }
10735 -diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
10736 -index 8081a0a..a4b14a4 100644
10737 ---- a/drivers/input/mouse/synaptics.c
10738 -+++ b/drivers/input/mouse/synaptics.c
10739 -@@ -274,7 +274,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
10740 - static unsigned char param = 0xc8;
10741 - struct synaptics_data *priv = psmouse->private;
10742 -
10743 -- if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
10744 -+ if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
10745 -+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
10746 - return 0;
10747 -
10748 - if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
10749 -diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
10750 -index 360f2b9..d1162e5 100644
10751 ---- a/drivers/md/raid5.c
10752 -+++ b/drivers/md/raid5.c
10753 -@@ -3277,12 +3277,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
10754 - /* If there is a failed device being replaced,
10755 - * we must be recovering.
10756 - * else if we are after recovery_cp, we must be syncing
10757 -+ * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
10758 - * else we can only be replacing
10759 - * sync and recovery both need to read all devices, and so
10760 - * use the same flag.
10761 - */
10762 - if (do_recovery ||
10763 -- sh->sector >= conf->mddev->recovery_cp)
10764 -+ sh->sector >= conf->mddev->recovery_cp ||
10765 -+ test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
10766 - s->syncing = 1;
10767 - else
10768 - s->replacing = 1;
10769 -diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
10770 -index 23ffb1b..11ab4a4 100644
10771 ---- a/drivers/net/wireless/b43/main.c
10772 -+++ b/drivers/net/wireless/b43/main.c
10773 -@@ -4841,8 +4841,14 @@ static int b43_op_start(struct ieee80211_hw *hw)
10774 - out_mutex_unlock:
10775 - mutex_unlock(&wl->mutex);
10776 -
10777 -- /* reload configuration */
10778 -- b43_op_config(hw, ~0);
10779 -+ /*
10780 -+ * Configuration may have been overwritten during initialization.
10781 -+ * Reload the configuration, but only if initialization was
10782 -+ * successful. Reloading the configuration after a failed init
10783 -+ * may hang the system.
10784 -+ */
10785 -+ if (!err)
10786 -+ b43_op_config(hw, ~0);
10787 -
10788 - return err;
10789 - }
10790 -diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
10791 -index 4fcdac6..cb33e6c 100644
10792 ---- a/drivers/net/wireless/ipw2x00/ipw2200.c
10793 -+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
10794 -@@ -2191,6 +2191,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
10795 - {
10796 - int rc = 0;
10797 - unsigned long flags;
10798 -+ unsigned long now, end;
10799 -
10800 - spin_lock_irqsave(&priv->lock, flags);
10801 - if (priv->status & STATUS_HCMD_ACTIVE) {
10802 -@@ -2232,10 +2233,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
10803 - }
10804 - spin_unlock_irqrestore(&priv->lock, flags);
10805 -
10806 -+ now = jiffies;
10807 -+ end = now + HOST_COMPLETE_TIMEOUT;
10808 -+again:
10809 - rc = wait_event_interruptible_timeout(priv->wait_command_queue,
10810 - !(priv->
10811 - status & STATUS_HCMD_ACTIVE),
10812 -- HOST_COMPLETE_TIMEOUT);
10813 -+ end - now);
10814 -+ if (rc < 0) {
10815 -+ now = jiffies;
10816 -+ if (time_before(now, end))
10817 -+ goto again;
10818 -+ rc = 0;
10819 -+ }
10820 -+
10821 - if (rc == 0) {
10822 - spin_lock_irqsave(&priv->lock, flags);
10823 - if (priv->status & STATUS_HCMD_ACTIVE) {
10824 -diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
10825 -index 1ef7bfc..9fcd417 100644
10826 ---- a/drivers/net/wireless/iwlwifi/iwl-1000.c
10827 -+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
10828 -@@ -45,8 +45,8 @@
10829 - #include "iwl-cfg.h"
10830 -
10831 - /* Highest firmware API version supported */
10832 --#define IWL1000_UCODE_API_MAX 6
10833 --#define IWL100_UCODE_API_MAX 6
10834 -+#define IWL1000_UCODE_API_MAX 5
10835 -+#define IWL100_UCODE_API_MAX 5
10836 -
10837 - /* Oldest version we won't warn about */
10838 - #define IWL1000_UCODE_API_OK 5
10839 -@@ -235,5 +235,5 @@ struct iwl_cfg iwl100_bg_cfg = {
10840 - IWL_DEVICE_100,
10841 - };
10842 -
10843 --MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
10844 --MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
10845 -+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
10846 -+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
10847 -diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
10848 -index 0946933..369d6b1 100644
10849 ---- a/drivers/net/wireless/iwlwifi/iwl-2000.c
10850 -+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
10851 -@@ -51,10 +51,10 @@
10852 - #define IWL135_UCODE_API_MAX 6
10853 -
10854 - /* Oldest version we won't warn about */
10855 --#define IWL2030_UCODE_API_OK 5
10856 --#define IWL2000_UCODE_API_OK 5
10857 --#define IWL105_UCODE_API_OK 5
10858 --#define IWL135_UCODE_API_OK 5
10859 -+#define IWL2030_UCODE_API_OK 6
10860 -+#define IWL2000_UCODE_API_OK 6
10861 -+#define IWL105_UCODE_API_OK 6
10862 -+#define IWL135_UCODE_API_OK 6
10863 -
10864 - /* Lowest firmware API version supported */
10865 - #define IWL2030_UCODE_API_MIN 5
10866 -@@ -338,7 +338,7 @@ struct iwl_cfg iwl135_bgn_cfg = {
10867 - .ht_params = &iwl2000_ht_params,
10868 - };
10869 -
10870 --MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
10871 --MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
10872 --MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
10873 --MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
10874 -+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
10875 -+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
10876 -+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
10877 -+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
10878 -diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
10879 -index b3a365f..3ce542e 100644
10880 ---- a/drivers/net/wireless/iwlwifi/iwl-5000.c
10881 -+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
10882 -@@ -50,6 +50,10 @@
10883 - #define IWL5000_UCODE_API_MAX 5
10884 - #define IWL5150_UCODE_API_MAX 2
10885 -
10886 -+/* Oldest version we won't warn about */
10887 -+#define IWL5000_UCODE_API_OK 5
10888 -+#define IWL5150_UCODE_API_OK 2
10889 -+
10890 - /* Lowest firmware API version supported */
10891 - #define IWL5000_UCODE_API_MIN 1
10892 - #define IWL5150_UCODE_API_MIN 1
10893 -@@ -359,6 +363,7 @@ static struct iwl_ht_params iwl5000_ht_params = {
10894 - #define IWL_DEVICE_5000 \
10895 - .fw_name_pre = IWL5000_FW_PRE, \
10896 - .ucode_api_max = IWL5000_UCODE_API_MAX, \
10897 -+ .ucode_api_ok = IWL5000_UCODE_API_OK, \
10898 - .ucode_api_min = IWL5000_UCODE_API_MIN, \
10899 - .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
10900 - .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
10901 -@@ -402,6 +407,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
10902 - .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
10903 - .fw_name_pre = IWL5000_FW_PRE,
10904 - .ucode_api_max = IWL5000_UCODE_API_MAX,
10905 -+ .ucode_api_ok = IWL5000_UCODE_API_OK,
10906 - .ucode_api_min = IWL5000_UCODE_API_MIN,
10907 - .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
10908 - .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
10909 -@@ -415,6 +421,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
10910 - #define IWL_DEVICE_5150 \
10911 - .fw_name_pre = IWL5150_FW_PRE, \
10912 - .ucode_api_max = IWL5150_UCODE_API_MAX, \
10913 -+ .ucode_api_ok = IWL5150_UCODE_API_OK, \
10914 - .ucode_api_min = IWL5150_UCODE_API_MIN, \
10915 - .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
10916 - .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
10917 -@@ -436,5 +443,5 @@ struct iwl_cfg iwl5150_abg_cfg = {
10918 - IWL_DEVICE_5150,
10919 - };
10920 -
10921 --MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
10922 --MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
10923 -+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
10924 -+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
10925 -diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
10926 -index 54b7533..cf806ae 100644
10927 ---- a/drivers/net/wireless/iwlwifi/iwl-6000.c
10928 -+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
10929 -@@ -53,6 +53,8 @@
10930 - /* Oldest version we won't warn about */
10931 - #define IWL6000_UCODE_API_OK 4
10932 - #define IWL6000G2_UCODE_API_OK 5
10933 -+#define IWL6050_UCODE_API_OK 5
10934 -+#define IWL6000G2B_UCODE_API_OK 6
10935 -
10936 - /* Lowest firmware API version supported */
10937 - #define IWL6000_UCODE_API_MIN 4
10938 -@@ -389,7 +391,7 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
10939 - #define IWL_DEVICE_6030 \
10940 - .fw_name_pre = IWL6030_FW_PRE, \
10941 - .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
10942 -- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
10943 -+ .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
10944 - .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
10945 - .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
10946 - .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
10947 -@@ -548,6 +550,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
10948 - };
10949 -
10950 - MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
10951 --MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
10952 --MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
10953 --MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
10954 -+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
10955 -+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
10956 -+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
10957 -diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
10958 -index b5c7c5f..2db9cd7 100644
10959 ---- a/drivers/net/wireless/iwlwifi/iwl-agn.c
10960 -+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
10961 -@@ -1403,7 +1403,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
10962 -
10963 - void iwlagn_prepare_restart(struct iwl_priv *priv)
10964 - {
10965 -- struct iwl_rxon_context *ctx;
10966 - bool bt_full_concurrent;
10967 - u8 bt_ci_compliance;
10968 - u8 bt_load;
10969 -@@ -1412,8 +1411,6 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
10970 -
10971 - lockdep_assert_held(&priv->shrd->mutex);
10972 -
10973 -- for_each_context(priv, ctx)
10974 -- ctx->vif = NULL;
10975 - priv->is_open = 0;
10976 -
10977 - /*
10978 -diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
10979 -index 5bede9d..aae992a 100644
10980 ---- a/drivers/net/wireless/iwlwifi/iwl-fh.h
10981 -+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
10982 -@@ -104,15 +104,29 @@
10983 - * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
10984 - * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
10985 - * aligned (address bits 0-7 must be 0).
10986 -+ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
10987 -+ * for them are in different places.
10988 - *
10989 - * Bit fields in each pointer register:
10990 - * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
10991 - */
10992 --#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
10993 --#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
10994 --
10995 --/* Find TFD CB base pointer for given queue (range 0-15). */
10996 --#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
10997 -+#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
10998 -+#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
10999 -+#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
11000 -+#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
11001 -+#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
11002 -+#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
11003 -+
11004 -+/* Find TFD CB base pointer for given queue */
11005 -+static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
11006 -+{
11007 -+ if (chnl < 16)
11008 -+ return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
11009 -+ if (chnl < 20)
11010 -+ return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
11011 -+ WARN_ON_ONCE(chnl >= 32);
11012 -+ return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
11013 -+}
11014 -
11015 -
11016 - /**
11017 -diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
11018 -index f980e57..4fd5199 100644
11019 ---- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
11020 -+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
11021 -@@ -1226,6 +1226,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
11022 - struct iwl_rxon_context *tmp, *ctx = NULL;
11023 - int err;
11024 - enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
11025 -+ bool reset = false;
11026 -
11027 - IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
11028 - viftype, vif->addr);
11029 -@@ -1247,6 +1248,13 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
11030 - tmp->interface_modes | tmp->exclusive_interface_modes;
11031 -
11032 - if (tmp->vif) {
11033 -+ /* On reset we need to add the same interface again */
11034 -+ if (tmp->vif == vif) {
11035 -+ reset = true;
11036 -+ ctx = tmp;
11037 -+ break;
11038 -+ }
11039 -+
11040 - /* check if this busy context is exclusive */
11041 - if (tmp->exclusive_interface_modes &
11042 - BIT(tmp->vif->type)) {
11043 -@@ -1273,7 +1281,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
11044 - ctx->vif = vif;
11045 -
11046 - err = iwl_setup_interface(priv, ctx);
11047 -- if (!err)
11048 -+ if (!err || reset)
11049 - goto out;
11050 -
11051 - ctx->vif = NULL;
11052 -diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
11053 -index bebdd82..d9b089e 100644
11054 ---- a/drivers/net/wireless/iwlwifi/iwl-prph.h
11055 -+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
11056 -@@ -227,12 +227,33 @@
11057 - #define SCD_AIT (SCD_BASE + 0x0c)
11058 - #define SCD_TXFACT (SCD_BASE + 0x10)
11059 - #define SCD_ACTIVE (SCD_BASE + 0x14)
11060 --#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
11061 --#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
11062 - #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
11063 - #define SCD_AGGR_SEL (SCD_BASE + 0x248)
11064 - #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
11065 --#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
11066 -+
11067 -+static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
11068 -+{
11069 -+ if (chnl < 20)
11070 -+ return SCD_BASE + 0x18 + chnl * 4;
11071 -+ WARN_ON_ONCE(chnl >= 32);
11072 -+ return SCD_BASE + 0x284 + (chnl - 20) * 4;
11073 -+}
11074 -+
11075 -+static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
11076 -+{
11077 -+ if (chnl < 20)
11078 -+ return SCD_BASE + 0x68 + chnl * 4;
11079 -+ WARN_ON_ONCE(chnl >= 32);
11080 -+ return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
11081 -+}
11082 -+
11083 -+static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
11084 -+{
11085 -+ if (chnl < 20)
11086 -+ return SCD_BASE + 0x10c + chnl * 4;
11087 -+ WARN_ON_ONCE(chnl >= 32);
11088 -+ return SCD_BASE + 0x384 + (chnl - 20) * 4;
11089 -+}
11090 -
11091 - /*********************** END TX SCHEDULER *************************************/
11092 -
11093 -diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
11094 -index c694cae..b588ca8 100644
11095 ---- a/drivers/net/wireless/rtlwifi/pci.c
11096 -+++ b/drivers/net/wireless/rtlwifi/pci.c
11097 -@@ -1955,6 +1955,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
11098 - rtl_deinit_deferred_work(hw);
11099 - rtlpriv->intf_ops->adapter_stop(hw);
11100 - }
11101 -+ rtlpriv->cfg->ops->disable_interrupt(hw);
11102 -
11103 - /*deinit rfkill */
11104 - rtl_deinit_rfkill(hw);
11105 -diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
11106 -index ba3268e..40c1574 100644
11107 ---- a/drivers/net/wireless/wl1251/main.c
11108 -+++ b/drivers/net/wireless/wl1251/main.c
11109 -@@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
11110 - cancel_work_sync(&wl->irq_work);
11111 - cancel_work_sync(&wl->tx_work);
11112 - cancel_work_sync(&wl->filter_work);
11113 -+ cancel_delayed_work_sync(&wl->elp_work);
11114 -
11115 - mutex_lock(&wl->mutex);
11116 -
11117 -diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
11118 -index f786942..1b851f6 100644
11119 ---- a/drivers/net/wireless/wl1251/sdio.c
11120 -+++ b/drivers/net/wireless/wl1251/sdio.c
11121 -@@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
11122 -
11123 - if (wl->irq)
11124 - free_irq(wl->irq, wl);
11125 -- kfree(wl_sdio);
11126 - wl1251_free_hw(wl);
11127 -+ kfree(wl_sdio);
11128 -
11129 - sdio_claim_host(func);
11130 - sdio_release_irq(func);
11131 -diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
11132 -index 92e42d4..1d3bcce 100644
11133 ---- a/drivers/platform/x86/dell-laptop.c
11134 -+++ b/drivers/platform/x86/dell-laptop.c
11135 -@@ -211,6 +211,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
11136 - },
11137 - .driver_data = &quirk_dell_vostro_v130,
11138 - },
11139 -+ { }
11140 - };
11141 -
11142 - static struct calling_interface_buffer *buffer;
11143 -diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
11144 -index 1b831c5..e48ba4b 100644
11145 ---- a/drivers/scsi/libsas/sas_expander.c
11146 -+++ b/drivers/scsi/libsas/sas_expander.c
11147 -@@ -192,7 +192,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
11148 - phy->attached_sata_ps = dr->attached_sata_ps;
11149 - phy->attached_iproto = dr->iproto << 1;
11150 - phy->attached_tproto = dr->tproto << 1;
11151 -- memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
11152 -+ /* help some expanders that fail to zero sas_address in the 'no
11153 -+ * device' case
11154 -+ */
11155 -+ if (phy->attached_dev_type == NO_DEVICE ||
11156 -+ phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
11157 -+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
11158 -+ else
11159 -+ memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
11160 - phy->attached_phy_id = dr->attached_phy_id;
11161 - phy->phy_change_count = dr->change_count;
11162 - phy->routing_attr = dr->routing_attr;
11163 -@@ -1643,9 +1650,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
11164 - int phy_change_count = 0;
11165 -
11166 - res = sas_get_phy_change_count(dev, i, &phy_change_count);
11167 -- if (res)
11168 -- goto out;
11169 -- else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
11170 -+ switch (res) {
11171 -+ case SMP_RESP_PHY_VACANT:
11172 -+ case SMP_RESP_NO_PHY:
11173 -+ continue;
11174 -+ case SMP_RESP_FUNC_ACC:
11175 -+ break;
11176 -+ default:
11177 -+ return res;
11178 -+ }
11179 -+
11180 -+ if (phy_change_count != ex->ex_phy[i].phy_change_count) {
11181 - if (update)
11182 - ex->ex_phy[i].phy_change_count =
11183 - phy_change_count;
11184 -@@ -1653,8 +1668,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
11185 - return 0;
11186 - }
11187 - }
11188 --out:
11189 -- return res;
11190 -+ return 0;
11191 - }
11192 -
11193 - static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
11194 -diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
11195 -index 2f085fb..7b45f66 100644
11196 ---- a/drivers/usb/class/cdc-wdm.c
11197 -+++ b/drivers/usb/class/cdc-wdm.c
11198 -@@ -108,8 +108,9 @@ static void wdm_out_callback(struct urb *urb)
11199 - spin_lock(&desc->iuspin);
11200 - desc->werr = urb->status;
11201 - spin_unlock(&desc->iuspin);
11202 -- clear_bit(WDM_IN_USE, &desc->flags);
11203 - kfree(desc->outbuf);
11204 -+ desc->outbuf = NULL;
11205 -+ clear_bit(WDM_IN_USE, &desc->flags);
11206 - wake_up(&desc->wait);
11207 - }
11208 -
11209 -@@ -312,7 +313,7 @@ static ssize_t wdm_write
11210 - if (we < 0)
11211 - return -EIO;
11212 -
11213 -- desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
11214 -+ buf = kmalloc(count, GFP_KERNEL);
11215 - if (!buf) {
11216 - rv = -ENOMEM;
11217 - goto outnl;
11218 -@@ -376,10 +377,12 @@ static ssize_t wdm_write
11219 - req->wIndex = desc->inum;
11220 - req->wLength = cpu_to_le16(count);
11221 - set_bit(WDM_IN_USE, &desc->flags);
11222 -+ desc->outbuf = buf;
11223 -
11224 - rv = usb_submit_urb(desc->command, GFP_KERNEL);
11225 - if (rv < 0) {
11226 - kfree(buf);
11227 -+ desc->outbuf = NULL;
11228 - clear_bit(WDM_IN_USE, &desc->flags);
11229 - dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
11230 - } else {
11231 -diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
11232 -index 81e2c0d..c4dfcc0 100644
11233 ---- a/drivers/usb/core/hcd-pci.c
11234 -+++ b/drivers/usb/core/hcd-pci.c
11235 -@@ -491,6 +491,15 @@ static int hcd_pci_suspend_noirq(struct device *dev)
11236 -
11237 - pci_save_state(pci_dev);
11238 -
11239 -+ /*
11240 -+ * Some systems crash if an EHCI controller is in D3 during
11241 -+ * a sleep transition. We have to leave such controllers in D0.
11242 -+ */
11243 -+ if (hcd->broken_pci_sleep) {
11244 -+ dev_dbg(dev, "Staying in PCI D0\n");
11245 -+ return retval;
11246 -+ }
11247 -+
11248 - /* If the root hub is dead rather than suspended, disallow remote
11249 - * wakeup. usb_hc_died() should ensure that both hosts are marked as
11250 - * dying, so we only need to check the primary roothub.
11251 -diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
11252 -index db815c2..9098642 100644
11253 ---- a/drivers/usb/gadget/dummy_hcd.c
11254 -+++ b/drivers/usb/gadget/dummy_hcd.c
11255 -@@ -924,7 +924,6 @@ static int dummy_udc_stop(struct usb_gadget *g,
11256 -
11257 - dum->driver = NULL;
11258 -
11259 -- dummy_pullup(&dum->gadget, 0);
11260 - return 0;
11261 - }
11262 -
11263 -diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
11264 -index ee8ceec..1d7682d 100644
11265 ---- a/drivers/usb/gadget/f_mass_storage.c
11266 -+++ b/drivers/usb/gadget/f_mass_storage.c
11267 -@@ -2190,7 +2190,7 @@ unknown_cmnd:
11268 - common->data_size_from_cmnd = 0;
11269 - sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
11270 - reply = check_command(common, common->cmnd_size,
11271 -- DATA_DIR_UNKNOWN, 0xff, 0, unknown);
11272 -+ DATA_DIR_UNKNOWN, ~0, 0, unknown);
11273 - if (reply == 0) {
11274 - common->curlun->sense_data = SS_INVALID_COMMAND;
11275 - reply = -EINVAL;
11276 -diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
11277 -index 47766f0..18d96e0 100644
11278 ---- a/drivers/usb/gadget/file_storage.c
11279 -+++ b/drivers/usb/gadget/file_storage.c
11280 -@@ -2579,7 +2579,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
11281 - fsg->data_size_from_cmnd = 0;
11282 - sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
11283 - if ((reply = check_command(fsg, fsg->cmnd_size,
11284 -- DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
11285 -+ DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
11286 - fsg->curlun->sense_data = SS_INVALID_COMMAND;
11287 - reply = -EINVAL;
11288 - }
11289 -diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
11290 -index bc78c60..ca4e03a 100644
11291 ---- a/drivers/usb/gadget/uvc.h
11292 -+++ b/drivers/usb/gadget/uvc.h
11293 -@@ -28,7 +28,7 @@
11294 -
11295 - struct uvc_request_data
11296 - {
11297 -- unsigned int length;
11298 -+ __s32 length;
11299 - __u8 data[60];
11300 - };
11301 -
11302 -diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
11303 -index f6e083b..54d7ca5 100644
11304 ---- a/drivers/usb/gadget/uvc_v4l2.c
11305 -+++ b/drivers/usb/gadget/uvc_v4l2.c
11306 -@@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
11307 - if (data->length < 0)
11308 - return usb_ep_set_halt(cdev->gadget->ep0);
11309 -
11310 -- req->length = min(uvc->event_length, data->length);
11311 -+ req->length = min_t(unsigned int, uvc->event_length, data->length);
11312 - req->zero = data->length < uvc->event_length;
11313 - req->dma = DMA_ADDR_INVALID;
11314 -
11315 -diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
11316 -index 01bb7241d..fe8dc06 100644
11317 ---- a/drivers/usb/host/ehci-pci.c
11318 -+++ b/drivers/usb/host/ehci-pci.c
11319 -@@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
11320 - hcd->has_tt = 1;
11321 - tdi_reset(ehci);
11322 - }
11323 -+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
11324 -+ /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
11325 -+ if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
11326 -+ ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
11327 -+ hcd->broken_pci_sleep = 1;
11328 -+ device_set_wakeup_capable(&pdev->dev, false);
11329 -+ }
11330 -+ }
11331 - break;
11332 - case PCI_VENDOR_ID_TDI:
11333 - if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
11334 -diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
11335 -index dbc7fe8..de36b8c 100644
11336 ---- a/drivers/usb/host/ehci-tegra.c
11337 -+++ b/drivers/usb/host/ehci-tegra.c
11338 -@@ -601,7 +601,6 @@ static int setup_vbus_gpio(struct platform_device *pdev)
11339 - dev_err(&pdev->dev, "can't enable vbus\n");
11340 - return err;
11341 - }
11342 -- gpio_set_value(gpio, 1);
11343 -
11344 - return err;
11345 - }
11346 -diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
11347 -index eb1cc92..908e184 100644
11348 ---- a/fs/autofs4/autofs_i.h
11349 -+++ b/fs/autofs4/autofs_i.h
11350 -@@ -110,7 +110,6 @@ struct autofs_sb_info {
11351 - int sub_version;
11352 - int min_proto;
11353 - int max_proto;
11354 -- int compat_daemon;
11355 - unsigned long exp_timeout;
11356 - unsigned int type;
11357 - int reghost_enabled;
11358 -@@ -270,6 +269,17 @@ int autofs4_fill_super(struct super_block *, void *, int);
11359 - struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
11360 - void autofs4_clean_ino(struct autofs_info *);
11361 -
11362 -+static inline int autofs_prepare_pipe(struct file *pipe)
11363 -+{
11364 -+ if (!pipe->f_op || !pipe->f_op->write)
11365 -+ return -EINVAL;
11366 -+ if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
11367 -+ return -EINVAL;
11368 -+ /* We want a packet pipe */
11369 -+ pipe->f_flags |= O_DIRECT;
11370 -+ return 0;
11371 -+}
11372 -+
11373 - /* Queue management functions */
11374 -
11375 - int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
11376 -diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
11377 -index 85f1fcd..d06d95a 100644
11378 ---- a/fs/autofs4/dev-ioctl.c
11379 -+++ b/fs/autofs4/dev-ioctl.c
11380 -@@ -376,7 +376,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
11381 - err = -EBADF;
11382 - goto out;
11383 - }
11384 -- if (!pipe->f_op || !pipe->f_op->write) {
11385 -+ if (autofs_prepare_pipe(pipe) < 0) {
11386 - err = -EPIPE;
11387 - fput(pipe);
11388 - goto out;
11389 -@@ -385,7 +385,6 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
11390 - sbi->pipefd = pipefd;
11391 - sbi->pipe = pipe;
11392 - sbi->catatonic = 0;
11393 -- sbi->compat_daemon = is_compat_task();
11394 - }
11395 - out:
11396 - mutex_unlock(&sbi->wq_mutex);
11397 -diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
11398 -index 06858d9..9ef53a6 100644
11399 ---- a/fs/autofs4/inode.c
11400 -+++ b/fs/autofs4/inode.c
11401 -@@ -19,7 +19,6 @@
11402 - #include <linux/parser.h>
11403 - #include <linux/bitops.h>
11404 - #include <linux/magic.h>
11405 --#include <linux/compat.h>
11406 - #include "autofs_i.h"
11407 - #include <linux/module.h>
11408 -
11409 -@@ -225,7 +224,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
11410 - set_autofs_type_indirect(&sbi->type);
11411 - sbi->min_proto = 0;
11412 - sbi->max_proto = 0;
11413 -- sbi->compat_daemon = is_compat_task();
11414 - mutex_init(&sbi->wq_mutex);
11415 - mutex_init(&sbi->pipe_mutex);
11416 - spin_lock_init(&sbi->fs_lock);
11417 -@@ -295,7 +293,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
11418 - printk("autofs: could not open pipe file descriptor\n");
11419 - goto fail_dput;
11420 - }
11421 -- if (!pipe->f_op || !pipe->f_op->write)
11422 -+ if (autofs_prepare_pipe(pipe) < 0)
11423 - goto fail_fput;
11424 - sbi->pipe = pipe;
11425 - sbi->pipefd = pipefd;
11426 -diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
11427 -index 9c098db..f624cd0 100644
11428 ---- a/fs/autofs4/waitq.c
11429 -+++ b/fs/autofs4/waitq.c
11430 -@@ -92,23 +92,6 @@ static int autofs4_write(struct autofs_sb_info *sbi,
11431 - return (bytes > 0);
11432 - }
11433 -
11434 --/*
11435 -- * The autofs_v5 packet was misdesigned.
11436 -- *
11437 -- * The packets are identical on x86-32 and x86-64, but have different
11438 -- * alignment. Which means that 'sizeof()' will give different results.
11439 -- * Fix it up for the case of running 32-bit user mode on a 64-bit kernel.
11440 -- */
11441 --static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi)
11442 --{
11443 -- size_t pktsz = sizeof(struct autofs_v5_packet);
11444 --#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT)
11445 -- if (sbi->compat_daemon > 0)
11446 -- pktsz -= 4;
11447 --#endif
11448 -- return pktsz;
11449 --}
11450 --
11451 - static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
11452 - struct autofs_wait_queue *wq,
11453 - int type)
11454 -@@ -172,7 +155,8 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
11455 - {
11456 - struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
11457 -
11458 -- pktsz = autofs_v5_packet_size(sbi);
11459 -+ pktsz = sizeof(*packet);
11460 -+
11461 - packet->wait_queue_token = wq->wait_queue_token;
11462 - packet->len = wq->name.len;
11463 - memcpy(packet->name, wq->name.name, wq->name.len);
11464 -diff --git a/fs/exec.c b/fs/exec.c
11465 -index 153dee1..ae42277 100644
11466 ---- a/fs/exec.c
11467 -+++ b/fs/exec.c
11468 -@@ -975,6 +975,9 @@ static int de_thread(struct task_struct *tsk)
11469 - sig->notify_count = 0;
11470 -
11471 - no_thread_group:
11472 -+ /* we have changed execution domain */
11473 -+ tsk->exit_signal = SIGCHLD;
11474 -+
11475 - if (current->mm)
11476 - setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
11477 -
11478 -diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
11479 -index 4dfbfec..ec2a9c2 100644
11480 ---- a/fs/hfsplus/catalog.c
11481 -+++ b/fs/hfsplus/catalog.c
11482 -@@ -366,6 +366,10 @@ int hfsplus_rename_cat(u32 cnid,
11483 - err = hfs_brec_find(&src_fd);
11484 - if (err)
11485 - goto out;
11486 -+ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
11487 -+ err = -EIO;
11488 -+ goto out;
11489 -+ }
11490 -
11491 - hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
11492 - src_fd.entrylength);
11493 -diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
11494 -index 88e155f..26b53fb 100644
11495 ---- a/fs/hfsplus/dir.c
11496 -+++ b/fs/hfsplus/dir.c
11497 -@@ -150,6 +150,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
11498 - filp->f_pos++;
11499 - /* fall through */
11500 - case 1:
11501 -+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
11502 -+ err = -EIO;
11503 -+ goto out;
11504 -+ }
11505 -+
11506 - hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
11507 - fd.entrylength);
11508 - if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
11509 -@@ -181,6 +186,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
11510 - err = -EIO;
11511 - goto out;
11512 - }
11513 -+
11514 -+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
11515 -+ err = -EIO;
11516 -+ goto out;
11517 -+ }
11518 -+
11519 - hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
11520 - fd.entrylength);
11521 - type = be16_to_cpu(entry.type);
11522 -diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
11523 -index 9a54c9e..2612223 100644
11524 ---- a/fs/nfs/nfs4proc.c
11525 -+++ b/fs/nfs/nfs4proc.c
11526 -@@ -4460,7 +4460,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
11527 - static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
11528 - {
11529 - struct nfs_server *server = NFS_SERVER(state->inode);
11530 -- struct nfs4_exception exception = { };
11531 -+ struct nfs4_exception exception = {
11532 -+ .inode = state->inode,
11533 -+ };
11534 - int err;
11535 -
11536 - do {
11537 -@@ -4478,7 +4480,9 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
11538 - static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
11539 - {
11540 - struct nfs_server *server = NFS_SERVER(state->inode);
11541 -- struct nfs4_exception exception = { };
11542 -+ struct nfs4_exception exception = {
11543 -+ .inode = state->inode,
11544 -+ };
11545 - int err;
11546 -
11547 - err = nfs4_set_lock_state(state, request);
11548 -@@ -4558,6 +4562,7 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
11549 - {
11550 - struct nfs4_exception exception = {
11551 - .state = state,
11552 -+ .inode = state->inode,
11553 - };
11554 - int err;
11555 -
11556 -@@ -4603,6 +4608,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
11557 -
11558 - if (state == NULL)
11559 - return -ENOLCK;
11560 -+ /*
11561 -+ * Don't rely on the VFS having checked the file open mode,
11562 -+ * since it won't do this for flock() locks.
11563 -+ */
11564 -+ switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
11565 -+ case F_RDLCK:
11566 -+ if (!(filp->f_mode & FMODE_READ))
11567 -+ return -EBADF;
11568 -+ break;
11569 -+ case F_WRLCK:
11570 -+ if (!(filp->f_mode & FMODE_WRITE))
11571 -+ return -EBADF;
11572 -+ }
11573 -+
11574 - do {
11575 - status = nfs4_proc_setlk(state, cmd, request);
11576 - if ((status != -EAGAIN) || IS_SETLK(cmd))
11577 -diff --git a/fs/nfs/read.c b/fs/nfs/read.c
11578 -index cfa175c..41bae32 100644
11579 ---- a/fs/nfs/read.c
11580 -+++ b/fs/nfs/read.c
11581 -@@ -324,7 +324,7 @@ out_bad:
11582 - while (!list_empty(res)) {
11583 - data = list_entry(res->next, struct nfs_read_data, list);
11584 - list_del(&data->list);
11585 -- nfs_readdata_free(data);
11586 -+ nfs_readdata_release(data);
11587 - }
11588 - nfs_readpage_release(req);
11589 - return -ENOMEM;
11590 -diff --git a/fs/nfs/super.c b/fs/nfs/super.c
11591 -index 3dfa4f1..e4622ee 100644
11592 ---- a/fs/nfs/super.c
11593 -+++ b/fs/nfs/super.c
11594 -@@ -2707,11 +2707,15 @@ static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
11595 - char *root_devname;
11596 - size_t len;
11597 -
11598 -- len = strlen(hostname) + 3;
11599 -+ len = strlen(hostname) + 5;
11600 - root_devname = kmalloc(len, GFP_KERNEL);
11601 - if (root_devname == NULL)
11602 - return ERR_PTR(-ENOMEM);
11603 -- snprintf(root_devname, len, "%s:/", hostname);
11604 -+ /* Does hostname needs to be enclosed in brackets? */
11605 -+ if (strchr(hostname, ':'))
11606 -+ snprintf(root_devname, len, "[%s]:/", hostname);
11607 -+ else
11608 -+ snprintf(root_devname, len, "%s:/", hostname);
11609 - root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
11610 - kfree(root_devname);
11611 - return root_mnt;
11612 -diff --git a/fs/nfs/write.c b/fs/nfs/write.c
11613 -index 834f0fe..8fcc23a 100644
11614 ---- a/fs/nfs/write.c
11615 -+++ b/fs/nfs/write.c
11616 -@@ -974,7 +974,7 @@ out_bad:
11617 - while (!list_empty(res)) {
11618 - data = list_entry(res->next, struct nfs_write_data, list);
11619 - list_del(&data->list);
11620 -- nfs_writedata_free(data);
11621 -+ nfs_writedata_release(data);
11622 - }
11623 - nfs_redirty_request(req);
11624 - return -ENOMEM;
11625 -diff --git a/fs/pipe.c b/fs/pipe.c
11626 -index a932ced..82e651b 100644
11627 ---- a/fs/pipe.c
11628 -+++ b/fs/pipe.c
11629 -@@ -345,6 +345,16 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
11630 - .get = generic_pipe_buf_get,
11631 - };
11632 -
11633 -+static const struct pipe_buf_operations packet_pipe_buf_ops = {
11634 -+ .can_merge = 0,
11635 -+ .map = generic_pipe_buf_map,
11636 -+ .unmap = generic_pipe_buf_unmap,
11637 -+ .confirm = generic_pipe_buf_confirm,
11638 -+ .release = anon_pipe_buf_release,
11639 -+ .steal = generic_pipe_buf_steal,
11640 -+ .get = generic_pipe_buf_get,
11641 -+};
11642 -+
11643 - static ssize_t
11644 - pipe_read(struct kiocb *iocb, const struct iovec *_iov,
11645 - unsigned long nr_segs, loff_t pos)
11646 -@@ -406,6 +416,13 @@ redo:
11647 - ret += chars;
11648 - buf->offset += chars;
11649 - buf->len -= chars;
11650 -+
11651 -+ /* Was it a packet buffer? Clean up and exit */
11652 -+ if (buf->flags & PIPE_BUF_FLAG_PACKET) {
11653 -+ total_len = chars;
11654 -+ buf->len = 0;
11655 -+ }
11656 -+
11657 - if (!buf->len) {
11658 - buf->ops = NULL;
11659 - ops->release(pipe, buf);
11660 -@@ -458,6 +475,11 @@ redo:
11661 - return ret;
11662 - }
11663 -
11664 -+static inline int is_packetized(struct file *file)
11665 -+{
11666 -+ return (file->f_flags & O_DIRECT) != 0;
11667 -+}
11668 -+
11669 - static ssize_t
11670 - pipe_write(struct kiocb *iocb, const struct iovec *_iov,
11671 - unsigned long nr_segs, loff_t ppos)
11672 -@@ -592,6 +614,11 @@ redo2:
11673 - buf->ops = &anon_pipe_buf_ops;
11674 - buf->offset = 0;
11675 - buf->len = chars;
11676 -+ buf->flags = 0;
11677 -+ if (is_packetized(filp)) {
11678 -+ buf->ops = &packet_pipe_buf_ops;
11679 -+ buf->flags = PIPE_BUF_FLAG_PACKET;
11680 -+ }
11681 - pipe->nrbufs = ++bufs;
11682 - pipe->tmp_page = NULL;
11683 -
11684 -@@ -1012,7 +1039,7 @@ struct file *create_write_pipe(int flags)
11685 - goto err_dentry;
11686 - f->f_mapping = inode->i_mapping;
11687 -
11688 -- f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
11689 -+ f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
11690 - f->f_version = 0;
11691 -
11692 - return f;
11693 -@@ -1056,7 +1083,7 @@ int do_pipe_flags(int *fd, int flags)
11694 - int error;
11695 - int fdw, fdr;
11696 -
11697 -- if (flags & ~(O_CLOEXEC | O_NONBLOCK))
11698 -+ if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
11699 - return -EINVAL;
11700 -
11701 - fw = create_write_pipe(flags);
11702 -diff --git a/include/linux/efi.h b/include/linux/efi.h
11703 -index 37c3007..7cce0ea 100644
11704 ---- a/include/linux/efi.h
11705 -+++ b/include/linux/efi.h
11706 -@@ -510,7 +510,18 @@ extern int __init efi_setup_pcdp_console(char *);
11707 - #define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001
11708 - #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
11709 - #define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
11710 --
11711 -+#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
11712 -+#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
11713 -+#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
11714 -+#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
11715 -+
11716 -+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
11717 -+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
11718 -+ EFI_VARIABLE_RUNTIME_ACCESS | \
11719 -+ EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
11720 -+ EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
11721 -+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
11722 -+ EFI_VARIABLE_APPEND_WRITE)
11723 - /*
11724 - * The type of search to perform when calling boottime->locate_handle
11725 - */
11726 -diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
11727 -index 77257c9..0072a53 100644
11728 ---- a/include/linux/pipe_fs_i.h
11729 -+++ b/include/linux/pipe_fs_i.h
11730 -@@ -8,6 +8,7 @@
11731 - #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
11732 - #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
11733 - #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
11734 -+#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
11735 -
11736 - /**
11737 - * struct pipe_buffer - a linux kernel pipe buffer
11738 -diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
11739 -index b2f62f3..05695ba 100644
11740 ---- a/include/linux/usb/hcd.h
11741 -+++ b/include/linux/usb/hcd.h
11742 -@@ -126,6 +126,8 @@ struct usb_hcd {
11743 - unsigned wireless:1; /* Wireless USB HCD */
11744 - unsigned authorized_default:1;
11745 - unsigned has_tt:1; /* Integrated TT in root hub */
11746 -+ unsigned broken_pci_sleep:1; /* Don't put the
11747 -+ controller in PCI-D3 for system sleep */
11748 -
11749 - int irq; /* irq allocated */
11750 - void __iomem *regs; /* device memory/io */
11751 -diff --git a/kernel/exit.c b/kernel/exit.c
11752 -index 4b4042f..46c8b14 100644
11753 ---- a/kernel/exit.c
11754 -+++ b/kernel/exit.c
11755 -@@ -818,25 +818,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
11756 - if (group_dead)
11757 - kill_orphaned_pgrp(tsk->group_leader, NULL);
11758 -
11759 -- /* Let father know we died
11760 -- *
11761 -- * Thread signals are configurable, but you aren't going to use
11762 -- * that to send signals to arbitrary processes.
11763 -- * That stops right now.
11764 -- *
11765 -- * If the parent exec id doesn't match the exec id we saved
11766 -- * when we started then we know the parent has changed security
11767 -- * domain.
11768 -- *
11769 -- * If our self_exec id doesn't match our parent_exec_id then
11770 -- * we have changed execution domain as these two values started
11771 -- * the same after a fork.
11772 -- */
11773 -- if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
11774 -- (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
11775 -- tsk->self_exec_id != tsk->parent_exec_id))
11776 -- tsk->exit_signal = SIGCHLD;
11777 --
11778 - if (unlikely(tsk->ptrace)) {
11779 - int sig = thread_group_leader(tsk) &&
11780 - thread_group_empty(tsk) &&
11781 -diff --git a/kernel/power/swap.c b/kernel/power/swap.c
11782 -index 8742fd0..eef311a 100644
11783 ---- a/kernel/power/swap.c
11784 -+++ b/kernel/power/swap.c
11785 -@@ -51,6 +51,23 @@
11786 -
11787 - #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
11788 -
11789 -+/*
11790 -+ * Number of free pages that are not high.
11791 -+ */
11792 -+static inline unsigned long low_free_pages(void)
11793 -+{
11794 -+ return nr_free_pages() - nr_free_highpages();
11795 -+}
11796 -+
11797 -+/*
11798 -+ * Number of pages required to be kept free while writing the image. Always
11799 -+ * half of all available low pages before the writing starts.
11800 -+ */
11801 -+static inline unsigned long reqd_free_pages(void)
11802 -+{
11803 -+ return low_free_pages() / 2;
11804 -+}
11805 -+
11806 - struct swap_map_page {
11807 - sector_t entries[MAP_PAGE_ENTRIES];
11808 - sector_t next_swap;
11809 -@@ -72,7 +89,7 @@ struct swap_map_handle {
11810 - sector_t cur_swap;
11811 - sector_t first_sector;
11812 - unsigned int k;
11813 -- unsigned long nr_free_pages, written;
11814 -+ unsigned long reqd_free_pages;
11815 - u32 crc32;
11816 - };
11817 -
11818 -@@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
11819 - goto err_rel;
11820 - }
11821 - handle->k = 0;
11822 -- handle->nr_free_pages = nr_free_pages() >> 1;
11823 -- handle->written = 0;
11824 -+ handle->reqd_free_pages = reqd_free_pages();
11825 - handle->first_sector = handle->cur_swap;
11826 - return 0;
11827 - err_rel:
11828 -@@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
11829 - handle->cur_swap = offset;
11830 - handle->k = 0;
11831 - }
11832 -- if (bio_chain && ++handle->written > handle->nr_free_pages) {
11833 -+ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
11834 - error = hib_wait_on_bio_chain(bio_chain);
11835 - if (error)
11836 - goto out;
11837 -- handle->written = 0;
11838 -+ handle->reqd_free_pages = reqd_free_pages();
11839 - }
11840 - out:
11841 - return error;
11842 -@@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
11843 - * Adjust number of free pages after all allocations have been done.
11844 - * We don't want to run out of pages when writing.
11845 - */
11846 -- handle->nr_free_pages = nr_free_pages() >> 1;
11847 -+ handle->reqd_free_pages = reqd_free_pages();
11848 -
11849 - /*
11850 - * Start the CRC32 thread.
11851 -diff --git a/kernel/sched/core.c b/kernel/sched/core.c
11852 -index b342f57..478a04c 100644
11853 ---- a/kernel/sched/core.c
11854 -+++ b/kernel/sched/core.c
11855 -@@ -2266,13 +2266,10 @@ calc_load_n(unsigned long load, unsigned long exp,
11856 - * Once we've updated the global active value, we need to apply the exponential
11857 - * weights adjusted to the number of cycles missed.
11858 - */
11859 --static void calc_global_nohz(unsigned long ticks)
11860 -+static void calc_global_nohz(void)
11861 - {
11862 - long delta, active, n;
11863 -
11864 -- if (time_before(jiffies, calc_load_update))
11865 -- return;
11866 --
11867 - /*
11868 - * If we crossed a calc_load_update boundary, make sure to fold
11869 - * any pending idle changes, the respective CPUs might have
11870 -@@ -2284,31 +2281,25 @@ static void calc_global_nohz(unsigned long ticks)
11871 - atomic_long_add(delta, &calc_load_tasks);
11872 -
11873 - /*
11874 -- * If we were idle for multiple load cycles, apply them.
11875 -+ * It could be the one fold was all it took, we done!
11876 - */
11877 -- if (ticks >= LOAD_FREQ) {
11878 -- n = ticks / LOAD_FREQ;
11879 -+ if (time_before(jiffies, calc_load_update + 10))
11880 -+ return;
11881 -
11882 -- active = atomic_long_read(&calc_load_tasks);
11883 -- active = active > 0 ? active * FIXED_1 : 0;
11884 -+ /*
11885 -+ * Catch-up, fold however many we are behind still
11886 -+ */
11887 -+ delta = jiffies - calc_load_update - 10;
11888 -+ n = 1 + (delta / LOAD_FREQ);
11889 -
11890 -- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
11891 -- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
11892 -- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
11893 -+ active = atomic_long_read(&calc_load_tasks);
11894 -+ active = active > 0 ? active * FIXED_1 : 0;
11895 -
11896 -- calc_load_update += n * LOAD_FREQ;
11897 -- }
11898 -+ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
11899 -+ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
11900 -+ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
11901 -
11902 -- /*
11903 -- * Its possible the remainder of the above division also crosses
11904 -- * a LOAD_FREQ period, the regular check in calc_global_load()
11905 -- * which comes after this will take care of that.
11906 -- *
11907 -- * Consider us being 11 ticks before a cycle completion, and us
11908 -- * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
11909 -- * age us 4 cycles, and the test in calc_global_load() will
11910 -- * pick up the final one.
11911 -- */
11912 -+ calc_load_update += n * LOAD_FREQ;
11913 - }
11914 - #else
11915 - void calc_load_account_idle(struct rq *this_rq)
11916 -@@ -2320,7 +2311,7 @@ static inline long calc_load_fold_idle(void)
11917 - return 0;
11918 - }
11919 -
11920 --static void calc_global_nohz(unsigned long ticks)
11921 -+static void calc_global_nohz(void)
11922 - {
11923 - }
11924 - #endif
11925 -@@ -2348,8 +2339,6 @@ void calc_global_load(unsigned long ticks)
11926 - {
11927 - long active;
11928 -
11929 -- calc_global_nohz(ticks);
11930 --
11931 - if (time_before(jiffies, calc_load_update + 10))
11932 - return;
11933 -
11934 -@@ -2361,6 +2350,16 @@ void calc_global_load(unsigned long ticks)
11935 - avenrun[2] = calc_load(avenrun[2], EXP_15, active);
11936 -
11937 - calc_load_update += LOAD_FREQ;
11938 -+
11939 -+ /*
11940 -+ * Account one period with whatever state we found before
11941 -+ * folding in the nohz state and ageing the entire idle period.
11942 -+ *
11943 -+ * This avoids loosing a sample when we go idle between
11944 -+ * calc_load_account_active() (10 ticks ago) and now and thus
11945 -+ * under-accounting.
11946 -+ */
11947 -+ calc_global_nohz();
11948 - }
11949 -
11950 - /*
11951 -@@ -6334,16 +6333,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
11952 - struct sd_data *sdd = &tl->data;
11953 -
11954 - for_each_cpu(j, cpu_map) {
11955 -- struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
11956 -- if (sd && (sd->flags & SD_OVERLAP))
11957 -- free_sched_groups(sd->groups, 0);
11958 -- kfree(*per_cpu_ptr(sdd->sd, j));
11959 -- kfree(*per_cpu_ptr(sdd->sg, j));
11960 -- kfree(*per_cpu_ptr(sdd->sgp, j));
11961 -+ struct sched_domain *sd;
11962 -+
11963 -+ if (sdd->sd) {
11964 -+ sd = *per_cpu_ptr(sdd->sd, j);
11965 -+ if (sd && (sd->flags & SD_OVERLAP))
11966 -+ free_sched_groups(sd->groups, 0);
11967 -+ kfree(*per_cpu_ptr(sdd->sd, j));
11968 -+ }
11969 -+
11970 -+ if (sdd->sg)
11971 -+ kfree(*per_cpu_ptr(sdd->sg, j));
11972 -+ if (sdd->sgp)
11973 -+ kfree(*per_cpu_ptr(sdd->sgp, j));
11974 - }
11975 - free_percpu(sdd->sd);
11976 -+ sdd->sd = NULL;
11977 - free_percpu(sdd->sg);
11978 -+ sdd->sg = NULL;
11979 - free_percpu(sdd->sgp);
11980 -+ sdd->sgp = NULL;
11981 - }
11982 - }
11983 -
11984 -diff --git a/kernel/signal.c b/kernel/signal.c
11985 -index c73c428..b09cf3b 100644
11986 ---- a/kernel/signal.c
11987 -+++ b/kernel/signal.c
11988 -@@ -1642,6 +1642,15 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
11989 - BUG_ON(!tsk->ptrace &&
11990 - (tsk->group_leader != tsk || !thread_group_empty(tsk)));
11991 -
11992 -+ if (sig != SIGCHLD) {
11993 -+ /*
11994 -+ * This is only possible if parent == real_parent.
11995 -+ * Check if it has changed security domain.
11996 -+ */
11997 -+ if (tsk->parent_exec_id != tsk->parent->self_exec_id)
11998 -+ sig = SIGCHLD;
11999 -+ }
12000 -+
12001 - info.si_signo = sig;
12002 - info.si_errno = 0;
12003 - /*
12004 -diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
12005 -index 0d6ff35..d9c07f0 100644
12006 ---- a/kernel/trace/trace_output.c
12007 -+++ b/kernel/trace/trace_output.c
12008 -@@ -650,6 +650,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
12009 - {
12010 - u64 next_ts;
12011 - int ret;
12012 -+ /* trace_find_next_entry will reset ent_size */
12013 -+ int ent_size = iter->ent_size;
12014 - struct trace_seq *s = &iter->seq;
12015 - struct trace_entry *entry = iter->ent,
12016 - *next_entry = trace_find_next_entry(iter, NULL,
12017 -@@ -658,6 +660,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
12018 - unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
12019 - unsigned long rel_usecs;
12020 -
12021 -+ /* Restore the original ent_size */
12022 -+ iter->ent_size = ent_size;
12023 -+
12024 - if (!next_entry)
12025 - next_ts = iter->ts;
12026 - rel_usecs = ns2usecs(next_ts - iter->ts);
12027 -diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
12028 -index e05667c..6a31cea 100644
12029 ---- a/net/mac80211/tx.c
12030 -+++ b/net/mac80211/tx.c
12031 -@@ -1144,7 +1144,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
12032 - tx->sta = rcu_dereference(sdata->u.vlan.sta);
12033 - if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
12034 - return TX_DROP;
12035 -- } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
12036 -+ } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
12037 -+ tx->sdata->control_port_protocol == tx->skb->protocol) {
12038 - tx->sta = sta_info_get_bss(sdata, hdr->addr1);
12039 - }
12040 - if (!tx->sta)
12041 -diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
12042 -index afeea32..bf945c9 100644
12043 ---- a/net/wireless/nl80211.c
12044 -+++ b/net/wireless/nl80211.c
12045 -@@ -1293,6 +1293,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
12046 - goto bad_res;
12047 - }
12048 -
12049 -+ if (!netif_running(netdev)) {
12050 -+ result = -ENETDOWN;
12051 -+ goto bad_res;
12052 -+ }
12053 -+
12054 - nla_for_each_nested(nl_txq_params,
12055 - info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
12056 - rem_txq_params) {
12057 -@@ -6262,7 +6267,7 @@ static struct genl_ops nl80211_ops[] = {
12058 - .doit = nl80211_get_key,
12059 - .policy = nl80211_policy,
12060 - .flags = GENL_ADMIN_PERM,
12061 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12062 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12063 - NL80211_FLAG_NEED_RTNL,
12064 - },
12065 - {
12066 -@@ -6294,7 +6299,7 @@ static struct genl_ops nl80211_ops[] = {
12067 - .policy = nl80211_policy,
12068 - .flags = GENL_ADMIN_PERM,
12069 - .doit = nl80211_addset_beacon,
12070 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12071 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12072 - NL80211_FLAG_NEED_RTNL,
12073 - },
12074 - {
12075 -@@ -6302,7 +6307,7 @@ static struct genl_ops nl80211_ops[] = {
12076 - .policy = nl80211_policy,
12077 - .flags = GENL_ADMIN_PERM,
12078 - .doit = nl80211_addset_beacon,
12079 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12080 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12081 - NL80211_FLAG_NEED_RTNL,
12082 - },
12083 - {
12084 -@@ -6326,7 +6331,7 @@ static struct genl_ops nl80211_ops[] = {
12085 - .doit = nl80211_set_station,
12086 - .policy = nl80211_policy,
12087 - .flags = GENL_ADMIN_PERM,
12088 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12089 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12090 - NL80211_FLAG_NEED_RTNL,
12091 - },
12092 - {
12093 -@@ -6342,7 +6347,7 @@ static struct genl_ops nl80211_ops[] = {
12094 - .doit = nl80211_del_station,
12095 - .policy = nl80211_policy,
12096 - .flags = GENL_ADMIN_PERM,
12097 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12098 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12099 - NL80211_FLAG_NEED_RTNL,
12100 - },
12101 - {
12102 -@@ -6375,7 +6380,7 @@ static struct genl_ops nl80211_ops[] = {
12103 - .doit = nl80211_del_mpath,
12104 - .policy = nl80211_policy,
12105 - .flags = GENL_ADMIN_PERM,
12106 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12107 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12108 - NL80211_FLAG_NEED_RTNL,
12109 - },
12110 - {
12111 -@@ -6383,7 +6388,7 @@ static struct genl_ops nl80211_ops[] = {
12112 - .doit = nl80211_set_bss,
12113 - .policy = nl80211_policy,
12114 - .flags = GENL_ADMIN_PERM,
12115 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12116 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12117 - NL80211_FLAG_NEED_RTNL,
12118 - },
12119 - {
12120 -@@ -6409,7 +6414,7 @@ static struct genl_ops nl80211_ops[] = {
12121 - .doit = nl80211_get_mesh_config,
12122 - .policy = nl80211_policy,
12123 - /* can be retrieved by unprivileged users */
12124 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12125 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12126 - NL80211_FLAG_NEED_RTNL,
12127 - },
12128 - {
12129 -@@ -6542,7 +6547,7 @@ static struct genl_ops nl80211_ops[] = {
12130 - .doit = nl80211_setdel_pmksa,
12131 - .policy = nl80211_policy,
12132 - .flags = GENL_ADMIN_PERM,
12133 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12134 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12135 - NL80211_FLAG_NEED_RTNL,
12136 - },
12137 - {
12138 -@@ -6550,7 +6555,7 @@ static struct genl_ops nl80211_ops[] = {
12139 - .doit = nl80211_setdel_pmksa,
12140 - .policy = nl80211_policy,
12141 - .flags = GENL_ADMIN_PERM,
12142 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12143 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12144 - NL80211_FLAG_NEED_RTNL,
12145 - },
12146 - {
12147 -@@ -6558,7 +6563,7 @@ static struct genl_ops nl80211_ops[] = {
12148 - .doit = nl80211_flush_pmksa,
12149 - .policy = nl80211_policy,
12150 - .flags = GENL_ADMIN_PERM,
12151 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12152 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12153 - NL80211_FLAG_NEED_RTNL,
12154 - },
12155 - {
12156 -@@ -6718,7 +6723,7 @@ static struct genl_ops nl80211_ops[] = {
12157 - .doit = nl80211_probe_client,
12158 - .policy = nl80211_policy,
12159 - .flags = GENL_ADMIN_PERM,
12160 -- .internal_flags = NL80211_FLAG_NEED_NETDEV |
12161 -+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
12162 - NL80211_FLAG_NEED_RTNL,
12163 - },
12164 - {
12165 -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12166 -index e5153ea..0960ece 100644
12167 ---- a/sound/pci/hda/patch_realtek.c
12168 -+++ b/sound/pci/hda/patch_realtek.c
12169 -@@ -5402,6 +5402,7 @@ static const struct alc_fixup alc269_fixups[] = {
12170 - };
12171 -
12172 - static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12173 -+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
12174 - SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
12175 - SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
12176 - SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
12177 -diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
12178 -index 900c91b..e5cc616 100644
12179 ---- a/sound/soc/codecs/wm8994.c
12180 -+++ b/sound/soc/codecs/wm8994.c
12181 -@@ -929,61 +929,170 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
12182 - }
12183 - }
12184 -
12185 --static int late_enable_ev(struct snd_soc_dapm_widget *w,
12186 -- struct snd_kcontrol *kcontrol, int event)
12187 -+static int aif1clk_ev(struct snd_soc_dapm_widget *w,
12188 -+ struct snd_kcontrol *kcontrol, int event)
12189 - {
12190 - struct snd_soc_codec *codec = w->codec;
12191 -- struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
12192 -+ struct wm8994 *control = codec->control_data;
12193 -+ int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
12194 -+ int dac;
12195 -+ int adc;
12196 -+ int val;
12197 -+
12198 -+ switch (control->type) {
12199 -+ case WM8994:
12200 -+ case WM8958:
12201 -+ mask |= WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA;
12202 -+ break;
12203 -+ default:
12204 -+ break;
12205 -+ }
12206 -
12207 - switch (event) {
12208 - case SND_SOC_DAPM_PRE_PMU:
12209 -- if (wm8994->aif1clk_enable) {
12210 -- snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
12211 -- WM8994_AIF1CLK_ENA_MASK,
12212 -- WM8994_AIF1CLK_ENA);
12213 -- wm8994->aif1clk_enable = 0;
12214 -- }
12215 -- if (wm8994->aif2clk_enable) {
12216 -- snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
12217 -- WM8994_AIF2CLK_ENA_MASK,
12218 -- WM8994_AIF2CLK_ENA);
12219 -- wm8994->aif2clk_enable = 0;
12220 -- }
12221 -+ val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
12222 -+ if ((val & WM8994_AIF1ADCL_SRC) &&
12223 -+ (val & WM8994_AIF1ADCR_SRC))
12224 -+ adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA;
12225 -+ else if (!(val & WM8994_AIF1ADCL_SRC) &&
12226 -+ !(val & WM8994_AIF1ADCR_SRC))
12227 -+ adc = WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
12228 -+ else
12229 -+ adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA |
12230 -+ WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
12231 -+
12232 -+ val = snd_soc_read(codec, WM8994_AIF1_CONTROL_2);
12233 -+ if ((val & WM8994_AIF1DACL_SRC) &&
12234 -+ (val & WM8994_AIF1DACR_SRC))
12235 -+ dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA;
12236 -+ else if (!(val & WM8994_AIF1DACL_SRC) &&
12237 -+ !(val & WM8994_AIF1DACR_SRC))
12238 -+ dac = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
12239 -+ else
12240 -+ dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA |
12241 -+ WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
12242 -+
12243 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
12244 -+ mask, adc);
12245 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
12246 -+ mask, dac);
12247 -+ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
12248 -+ WM8994_AIF1DSPCLK_ENA |
12249 -+ WM8994_SYSDSPCLK_ENA,
12250 -+ WM8994_AIF1DSPCLK_ENA |
12251 -+ WM8994_SYSDSPCLK_ENA);
12252 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, mask,
12253 -+ WM8994_AIF1ADC1R_ENA |
12254 -+ WM8994_AIF1ADC1L_ENA |
12255 -+ WM8994_AIF1ADC2R_ENA |
12256 -+ WM8994_AIF1ADC2L_ENA);
12257 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, mask,
12258 -+ WM8994_AIF1DAC1R_ENA |
12259 -+ WM8994_AIF1DAC1L_ENA |
12260 -+ WM8994_AIF1DAC2R_ENA |
12261 -+ WM8994_AIF1DAC2L_ENA);
12262 -+ break;
12263 -+
12264 -+ case SND_SOC_DAPM_PRE_PMD:
12265 -+ case SND_SOC_DAPM_POST_PMD:
12266 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
12267 -+ mask, 0);
12268 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
12269 -+ mask, 0);
12270 -+
12271 -+ val = snd_soc_read(codec, WM8994_CLOCKING_1);
12272 -+ if (val & WM8994_AIF2DSPCLK_ENA)
12273 -+ val = WM8994_SYSDSPCLK_ENA;
12274 -+ else
12275 -+ val = 0;
12276 -+ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
12277 -+ WM8994_SYSDSPCLK_ENA |
12278 -+ WM8994_AIF1DSPCLK_ENA, val);
12279 - break;
12280 - }
12281 -
12282 -- /* We may also have postponed startup of DSP, handle that. */
12283 -- wm8958_aif_ev(w, kcontrol, event);
12284 --
12285 - return 0;
12286 - }
12287 -
12288 --static int late_disable_ev(struct snd_soc_dapm_widget *w,
12289 -- struct snd_kcontrol *kcontrol, int event)
12290 -+static int aif2clk_ev(struct snd_soc_dapm_widget *w,
12291 -+ struct snd_kcontrol *kcontrol, int event)
12292 - {
12293 - struct snd_soc_codec *codec = w->codec;
12294 -- struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
12295 -+ int dac;
12296 -+ int adc;
12297 -+ int val;
12298 -
12299 - switch (event) {
12300 -+ case SND_SOC_DAPM_PRE_PMU:
12301 -+ val = snd_soc_read(codec, WM8994_AIF2_CONTROL_1);
12302 -+ if ((val & WM8994_AIF2ADCL_SRC) &&
12303 -+ (val & WM8994_AIF2ADCR_SRC))
12304 -+ adc = WM8994_AIF2ADCR_ENA;
12305 -+ else if (!(val & WM8994_AIF2ADCL_SRC) &&
12306 -+ !(val & WM8994_AIF2ADCR_SRC))
12307 -+ adc = WM8994_AIF2ADCL_ENA;
12308 -+ else
12309 -+ adc = WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA;
12310 -+
12311 -+
12312 -+ val = snd_soc_read(codec, WM8994_AIF2_CONTROL_2);
12313 -+ if ((val & WM8994_AIF2DACL_SRC) &&
12314 -+ (val & WM8994_AIF2DACR_SRC))
12315 -+ dac = WM8994_AIF2DACR_ENA;
12316 -+ else if (!(val & WM8994_AIF2DACL_SRC) &&
12317 -+ !(val & WM8994_AIF2DACR_SRC))
12318 -+ dac = WM8994_AIF2DACL_ENA;
12319 -+ else
12320 -+ dac = WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA;
12321 -+
12322 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
12323 -+ WM8994_AIF2ADCL_ENA |
12324 -+ WM8994_AIF2ADCR_ENA, adc);
12325 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
12326 -+ WM8994_AIF2DACL_ENA |
12327 -+ WM8994_AIF2DACR_ENA, dac);
12328 -+ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
12329 -+ WM8994_AIF2DSPCLK_ENA |
12330 -+ WM8994_SYSDSPCLK_ENA,
12331 -+ WM8994_AIF2DSPCLK_ENA |
12332 -+ WM8994_SYSDSPCLK_ENA);
12333 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
12334 -+ WM8994_AIF2ADCL_ENA |
12335 -+ WM8994_AIF2ADCR_ENA,
12336 -+ WM8994_AIF2ADCL_ENA |
12337 -+ WM8994_AIF2ADCR_ENA);
12338 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
12339 -+ WM8994_AIF2DACL_ENA |
12340 -+ WM8994_AIF2DACR_ENA,
12341 -+ WM8994_AIF2DACL_ENA |
12342 -+ WM8994_AIF2DACR_ENA);
12343 -+ break;
12344 -+
12345 -+ case SND_SOC_DAPM_PRE_PMD:
12346 - case SND_SOC_DAPM_POST_PMD:
12347 -- if (wm8994->aif1clk_disable) {
12348 -- snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
12349 -- WM8994_AIF1CLK_ENA_MASK, 0);
12350 -- wm8994->aif1clk_disable = 0;
12351 -- }
12352 -- if (wm8994->aif2clk_disable) {
12353 -- snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
12354 -- WM8994_AIF2CLK_ENA_MASK, 0);
12355 -- wm8994->aif2clk_disable = 0;
12356 -- }
12357 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
12358 -+ WM8994_AIF2DACL_ENA |
12359 -+ WM8994_AIF2DACR_ENA, 0);
12360 -+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
12361 -+ WM8994_AIF2ADCL_ENA |
12362 -+ WM8994_AIF2ADCR_ENA, 0);
12363 -+
12364 -+ val = snd_soc_read(codec, WM8994_CLOCKING_1);
12365 -+ if (val & WM8994_AIF1DSPCLK_ENA)
12366 -+ val = WM8994_SYSDSPCLK_ENA;
12367 -+ else
12368 -+ val = 0;
12369 -+ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
12370 -+ WM8994_SYSDSPCLK_ENA |
12371 -+ WM8994_AIF2DSPCLK_ENA, val);
12372 - break;
12373 - }
12374 -
12375 - return 0;
12376 - }
12377 -
12378 --static int aif1clk_ev(struct snd_soc_dapm_widget *w,
12379 -- struct snd_kcontrol *kcontrol, int event)
12380 -+static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
12381 -+ struct snd_kcontrol *kcontrol, int event)
12382 - {
12383 - struct snd_soc_codec *codec = w->codec;
12384 - struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
12385 -@@ -1000,8 +1109,8 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
12386 - return 0;
12387 - }
12388 -
12389 --static int aif2clk_ev(struct snd_soc_dapm_widget *w,
12390 -- struct snd_kcontrol *kcontrol, int event)
12391 -+static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
12392 -+ struct snd_kcontrol *kcontrol, int event)
12393 - {
12394 - struct snd_soc_codec *codec = w->codec;
12395 - struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
12396 -@@ -1018,6 +1127,63 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
12397 - return 0;
12398 - }
12399 -
12400 -+static int late_enable_ev(struct snd_soc_dapm_widget *w,
12401 -+ struct snd_kcontrol *kcontrol, int event)
12402 -+{
12403 -+ struct snd_soc_codec *codec = w->codec;
12404 -+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
12405 -+
12406 -+ switch (event) {
12407 -+ case SND_SOC_DAPM_PRE_PMU:
12408 -+ if (wm8994->aif1clk_enable) {
12409 -+ aif1clk_ev(w, kcontrol, event);
12410 -+ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
12411 -+ WM8994_AIF1CLK_ENA_MASK,
12412 -+ WM8994_AIF1CLK_ENA);
12413 -+ wm8994->aif1clk_enable = 0;
12414 -+ }
12415 -+ if (wm8994->aif2clk_enable) {
12416 -+ aif2clk_ev(w, kcontrol, event);
12417 -+ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
12418 -+ WM8994_AIF2CLK_ENA_MASK,
12419 -+ WM8994_AIF2CLK_ENA);
12420 -+ wm8994->aif2clk_enable = 0;
12421 -+ }
12422 -+ break;
12423 -+ }
12424 -+
12425 -+ /* We may also have postponed startup of DSP, handle that. */
12426 -+ wm8958_aif_ev(w, kcontrol, event);
12427 -+
12428 -+ return 0;
12429 -+}
12430 -+
12431 -+static int late_disable_ev(struct snd_soc_dapm_widget *w,
12432 -+ struct snd_kcontrol *kcontrol, int event)
12433 -+{
12434 -+ struct snd_soc_codec *codec = w->codec;
12435 -+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
12436 -+
12437 -+ switch (event) {
12438 -+ case SND_SOC_DAPM_POST_PMD:
12439 -+ if (wm8994->aif1clk_disable) {
12440 -+ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
12441 -+ WM8994_AIF1CLK_ENA_MASK, 0);
12442 -+ aif1clk_ev(w, kcontrol, event);
12443 -+ wm8994->aif1clk_disable = 0;
12444 -+ }
12445 -+ if (wm8994->aif2clk_disable) {
12446 -+ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
12447 -+ WM8994_AIF2CLK_ENA_MASK, 0);
12448 -+ aif2clk_ev(w, kcontrol, event);
12449 -+ wm8994->aif2clk_disable = 0;
12450 -+ }
12451 -+ break;
12452 -+ }
12453 -+
12454 -+ return 0;
12455 -+}
12456 -+
12457 - static int adc_mux_ev(struct snd_soc_dapm_widget *w,
12458 - struct snd_kcontrol *kcontrol, int event)
12459 - {
12460 -@@ -1314,9 +1480,9 @@ static const struct snd_kcontrol_new aif2dacr_src_mux =
12461 - SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
12462 -
12463 - static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
12464 --SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
12465 -+SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_late_ev,
12466 - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
12467 --SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
12468 -+SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_late_ev,
12469 - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
12470 -
12471 - SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
12472 -@@ -1345,8 +1511,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
12473 - };
12474 -
12475 - static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
12476 --SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
12477 --SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
12478 -+SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
12479 -+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
12480 -+SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
12481 -+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
12482 - SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
12483 - SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
12484 - left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
12485 -@@ -1399,30 +1567,30 @@ SND_SOC_DAPM_SUPPLY("VMID", SND_SOC_NOPM, 0, 0, vmid_event,
12486 - SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
12487 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
12488 -
12489 --SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
12490 --SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
12491 --SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
12492 -+SND_SOC_DAPM_SUPPLY("DSP1CLK", SND_SOC_NOPM, 3, 0, NULL, 0),
12493 -+SND_SOC_DAPM_SUPPLY("DSP2CLK", SND_SOC_NOPM, 2, 0, NULL, 0),
12494 -+SND_SOC_DAPM_SUPPLY("DSPINTCLK", SND_SOC_NOPM, 1, 0, NULL, 0),
12495 -
12496 - SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
12497 -- 0, WM8994_POWER_MANAGEMENT_4, 9, 0),
12498 -+ 0, SND_SOC_NOPM, 9, 0),
12499 - SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
12500 -- 0, WM8994_POWER_MANAGEMENT_4, 8, 0),
12501 -+ 0, SND_SOC_NOPM, 8, 0),
12502 - SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
12503 -- WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
12504 -+ SND_SOC_NOPM, 9, 0, wm8958_aif_ev,
12505 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
12506 - SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
12507 -- WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
12508 -+ SND_SOC_NOPM, 8, 0, wm8958_aif_ev,
12509 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
12510 -
12511 - SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
12512 -- 0, WM8994_POWER_MANAGEMENT_4, 11, 0),
12513 -+ 0, SND_SOC_NOPM, 11, 0),
12514 - SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
12515 -- 0, WM8994_POWER_MANAGEMENT_4, 10, 0),
12516 -+ 0, SND_SOC_NOPM, 10, 0),
12517 - SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
12518 -- WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
12519 -+ SND_SOC_NOPM, 11, 0, wm8958_aif_ev,
12520 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
12521 - SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
12522 -- WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
12523 -+ SND_SOC_NOPM, 10, 0, wm8958_aif_ev,
12524 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
12525 -
12526 - SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
12527 -@@ -1449,14 +1617,14 @@ SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0,
12528 - dac1r_mix, ARRAY_SIZE(dac1r_mix)),
12529 -
12530 - SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0,
12531 -- WM8994_POWER_MANAGEMENT_4, 13, 0),
12532 -+ SND_SOC_NOPM, 13, 0),
12533 - SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
12534 -- WM8994_POWER_MANAGEMENT_4, 12, 0),
12535 -+ SND_SOC_NOPM, 12, 0),
12536 - SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
12537 -- WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
12538 -+ SND_SOC_NOPM, 13, 0, wm8958_aif_ev,
12539 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
12540 - SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
12541 -- WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
12542 -+ SND_SOC_NOPM, 12, 0, wm8958_aif_ev,
12543 - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
12544 -
12545 - SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
12546 -diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
12547 -index 1315663..ac6b869 100644
12548 ---- a/sound/soc/soc-dapm.c
12549 -+++ b/sound/soc/soc-dapm.c
12550 -@@ -70,6 +70,7 @@ static int dapm_up_seq[] = {
12551 - [snd_soc_dapm_out_drv] = 10,
12552 - [snd_soc_dapm_hp] = 10,
12553 - [snd_soc_dapm_spk] = 10,
12554 -+ [snd_soc_dapm_line] = 10,
12555 - [snd_soc_dapm_post] = 11,
12556 - };
12557 -
12558 -@@ -78,6 +79,7 @@ static int dapm_down_seq[] = {
12559 - [snd_soc_dapm_adc] = 1,
12560 - [snd_soc_dapm_hp] = 2,
12561 - [snd_soc_dapm_spk] = 2,
12562 -+ [snd_soc_dapm_line] = 2,
12563 - [snd_soc_dapm_out_drv] = 2,
12564 - [snd_soc_dapm_pga] = 4,
12565 - [snd_soc_dapm_mixer_named_ctl] = 5,
12566 -diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h
12567 -new file mode 100644
12568 -index 0000000..f4912e2
12569 ---- /dev/null
12570 -+++ b/tools/include/tools/be_byteshift.h
12571 -@@ -0,0 +1,70 @@
12572 -+#ifndef _TOOLS_BE_BYTESHIFT_H
12573 -+#define _TOOLS_BE_BYTESHIFT_H
12574 -+
12575 -+#include <linux/types.h>
12576 -+
12577 -+static inline __u16 __get_unaligned_be16(const __u8 *p)
12578 -+{
12579 -+ return p[0] << 8 | p[1];
12580 -+}
12581 -+
12582 -+static inline __u32 __get_unaligned_be32(const __u8 *p)
12583 -+{
12584 -+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
12585 -+}
12586 -+
12587 -+static inline __u64 __get_unaligned_be64(const __u8 *p)
12588 -+{
12589 -+ return (__u64)__get_unaligned_be32(p) << 32 |
12590 -+ __get_unaligned_be32(p + 4);
12591 -+}
12592 -+
12593 -+static inline void __put_unaligned_be16(__u16 val, __u8 *p)
12594 -+{
12595 -+ *p++ = val >> 8;
12596 -+ *p++ = val;
12597 -+}
12598 -+
12599 -+static inline void __put_unaligned_be32(__u32 val, __u8 *p)
12600 -+{
12601 -+ __put_unaligned_be16(val >> 16, p);
12602 -+ __put_unaligned_be16(val, p + 2);
12603 -+}
12604 -+
12605 -+static inline void __put_unaligned_be64(__u64 val, __u8 *p)
12606 -+{
12607 -+ __put_unaligned_be32(val >> 32, p);
12608 -+ __put_unaligned_be32(val, p + 4);
12609 -+}
12610 -+
12611 -+static inline __u16 get_unaligned_be16(const void *p)
12612 -+{
12613 -+ return __get_unaligned_be16((const __u8 *)p);
12614 -+}
12615 -+
12616 -+static inline __u32 get_unaligned_be32(const void *p)
12617 -+{
12618 -+ return __get_unaligned_be32((const __u8 *)p);
12619 -+}
12620 -+
12621 -+static inline __u64 get_unaligned_be64(const void *p)
12622 -+{
12623 -+ return __get_unaligned_be64((const __u8 *)p);
12624 -+}
12625 -+
12626 -+static inline void put_unaligned_be16(__u16 val, void *p)
12627 -+{
12628 -+ __put_unaligned_be16(val, p);
12629 -+}
12630 -+
12631 -+static inline void put_unaligned_be32(__u32 val, void *p)
12632 -+{
12633 -+ __put_unaligned_be32(val, p);
12634 -+}
12635 -+
12636 -+static inline void put_unaligned_be64(__u64 val, void *p)
12637 -+{
12638 -+ __put_unaligned_be64(val, p);
12639 -+}
12640 -+
12641 -+#endif /* _TOOLS_BE_BYTESHIFT_H */
12642 -diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h
12643 -new file mode 100644
12644 -index 0000000..c99d45a
12645 ---- /dev/null
12646 -+++ b/tools/include/tools/le_byteshift.h
12647 -@@ -0,0 +1,70 @@
12648 -+#ifndef _TOOLS_LE_BYTESHIFT_H
12649 -+#define _TOOLS_LE_BYTESHIFT_H
12650 -+
12651 -+#include <linux/types.h>
12652 -+
12653 -+static inline __u16 __get_unaligned_le16(const __u8 *p)
12654 -+{
12655 -+ return p[0] | p[1] << 8;
12656 -+}
12657 -+
12658 -+static inline __u32 __get_unaligned_le32(const __u8 *p)
12659 -+{
12660 -+ return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
12661 -+}
12662 -+
12663 -+static inline __u64 __get_unaligned_le64(const __u8 *p)
12664 -+{
12665 -+ return (__u64)__get_unaligned_le32(p + 4) << 32 |
12666 -+ __get_unaligned_le32(p);
12667 -+}
12668 -+
12669 -+static inline void __put_unaligned_le16(__u16 val, __u8 *p)
12670 -+{
12671 -+ *p++ = val;
12672 -+ *p++ = val >> 8;
12673 -+}
12674 -+
12675 -+static inline void __put_unaligned_le32(__u32 val, __u8 *p)
12676 -+{
12677 -+ __put_unaligned_le16(val >> 16, p + 2);
12678 -+ __put_unaligned_le16(val, p);
12679 -+}
12680 -+
12681 -+static inline void __put_unaligned_le64(__u64 val, __u8 *p)
12682 -+{
12683 -+ __put_unaligned_le32(val >> 32, p + 4);
12684 -+ __put_unaligned_le32(val, p);
12685 -+}
12686 -+
12687 -+static inline __u16 get_unaligned_le16(const void *p)
12688 -+{
12689 -+ return __get_unaligned_le16((const __u8 *)p);
12690 -+}
12691 -+
12692 -+static inline __u32 get_unaligned_le32(const void *p)
12693 -+{
12694 -+ return __get_unaligned_le32((const __u8 *)p);
12695 -+}
12696 -+
12697 -+static inline __u64 get_unaligned_le64(const void *p)
12698 -+{
12699 -+ return __get_unaligned_le64((const __u8 *)p);
12700 -+}
12701 -+
12702 -+static inline void put_unaligned_le16(__u16 val, void *p)
12703 -+{
12704 -+ __put_unaligned_le16(val, p);
12705 -+}
12706 -+
12707 -+static inline void put_unaligned_le32(__u32 val, void *p)
12708 -+{
12709 -+ __put_unaligned_le32(val, p);
12710 -+}
12711 -+
12712 -+static inline void put_unaligned_le64(__u64 val, void *p)
12713 -+{
12714 -+ __put_unaligned_le64(val, p);
12715 -+}
12716 -+
12717 -+#endif /* _TOOLS_LE_BYTESHIFT_H */
12718
12719 diff --git a/3.3.5/0000_README b/3.3.6/0000_README
12720 similarity index 95%
12721 rename from 3.3.5/0000_README
12722 rename to 3.3.6/0000_README
12723 index 9dc6525..f827d9b 100644
12724 --- a/3.3.5/0000_README
12725 +++ b/3.3.6/0000_README
12726 @@ -2,11 +2,11 @@ README
12727 -----------------------------------------------------------------------------
12728 Individual Patch Descriptions:
12729 -----------------------------------------------------------------------------
12730 -Patch: 1004_linux-3.3.5.patch
12731 +Patch: 1005_linux-3.3.6.patch
12732 From: http://www.kernel.org
12733 -Desc: Linux 3.3.5
12734 +Desc: Linux 3.3.6
12735
12736 -Patch: 4420_grsecurity-2.9-3.3.5-201205071839.patch
12737 +Patch: 4420_grsecurity-2.9-3.3.6-201205131658.patch
12738 From: http://www.grsecurity.net
12739 Desc: hardened-sources base patch from upstream grsecurity
12740
12741
12742 diff --git a/3.3.6/1005_linux-3.3.6.patch b/3.3.6/1005_linux-3.3.6.patch
12743 new file mode 100644
12744 index 0000000..f02721b
12745 --- /dev/null
12746 +++ b/3.3.6/1005_linux-3.3.6.patch
12747 @@ -0,0 +1,1832 @@
12748 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
12749 +index ad3e80e..d18bbac 100644
12750 +--- a/Documentation/networking/ip-sysctl.txt
12751 ++++ b/Documentation/networking/ip-sysctl.txt
12752 +@@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
12753 + (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
12754 + if it is <= 0.
12755 + Possible values are [-31, 31], inclusive.
12756 +- Default: 2
12757 ++ Default: 1
12758 +
12759 + tcp_allowed_congestion_control - STRING
12760 + Show/set the congestion control choices available to non-privileged
12761 +@@ -410,7 +410,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
12762 + net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
12763 + automatic tuning of that socket's receive buffer size, in which
12764 + case this value is ignored.
12765 +- Default: between 87380B and 4MB, depending on RAM size.
12766 ++ Default: between 87380B and 6MB, depending on RAM size.
12767 +
12768 + tcp_sack - BOOLEAN
12769 + Enable select acknowledgments (SACKS).
12770 +diff --git a/Makefile b/Makefile
12771 +index 64615e9..9cd6941 100644
12772 +--- a/Makefile
12773 ++++ b/Makefile
12774 +@@ -1,6 +1,6 @@
12775 + VERSION = 3
12776 + PATCHLEVEL = 3
12777 +-SUBLEVEL = 5
12778 ++SUBLEVEL = 6
12779 + EXTRAVERSION =
12780 + NAME = Saber-toothed Squirrel
12781 +
12782 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
12783 +index ede6443..f5ce8ab 100644
12784 +--- a/arch/arm/kernel/ptrace.c
12785 ++++ b/arch/arm/kernel/ptrace.c
12786 +@@ -905,27 +905,14 @@ long arch_ptrace(struct task_struct *child, long request,
12787 + return ret;
12788 + }
12789 +
12790 +-#ifdef __ARMEB__
12791 +-#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
12792 +-#else
12793 +-#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
12794 +-#endif
12795 +-
12796 + asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
12797 + {
12798 + unsigned long ip;
12799 +
12800 +- /*
12801 +- * Save IP. IP is used to denote syscall entry/exit:
12802 +- * IP = 0 -> entry, = 1 -> exit
12803 +- */
12804 +- ip = regs->ARM_ip;
12805 +- regs->ARM_ip = why;
12806 +-
12807 +- if (!ip)
12808 ++ if (why)
12809 + audit_syscall_exit(regs);
12810 + else
12811 +- audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
12812 ++ audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
12813 + regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
12814 +
12815 + if (!test_thread_flag(TIF_SYSCALL_TRACE))
12816 +@@ -935,6 +922,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
12817 +
12818 + current_thread_info()->syscall = scno;
12819 +
12820 ++ /*
12821 ++ * IP is used to denote syscall entry/exit:
12822 ++ * IP = 0 -> entry, =1 -> exit
12823 ++ */
12824 ++ ip = regs->ARM_ip;
12825 ++ regs->ARM_ip = why;
12826 ++
12827 + /* the 0x80 provides a way for the tracing parent to distinguish
12828 + between a syscall stop and SIGTRAP delivery */
12829 + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
12830 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
12831 +index cdeb727..31c2567 100644
12832 +--- a/arch/arm/kernel/smp.c
12833 ++++ b/arch/arm/kernel/smp.c
12834 +@@ -255,8 +255,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
12835 + struct mm_struct *mm = &init_mm;
12836 + unsigned int cpu = smp_processor_id();
12837 +
12838 +- printk("CPU%u: Booted secondary processor\n", cpu);
12839 +-
12840 + /*
12841 + * All kernel threads share the same mm context; grab a
12842 + * reference and switch to it.
12843 +@@ -268,6 +266,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
12844 + enter_lazy_tlb(mm, current);
12845 + local_flush_tlb_all();
12846 +
12847 ++ printk("CPU%u: Booted secondary processor\n", cpu);
12848 ++
12849 + cpu_init();
12850 + preempt_disable();
12851 + trace_hardirqs_off();
12852 +diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
12853 +index d2b1779..76cbb05 100644
12854 +--- a/arch/arm/kernel/sys_arm.c
12855 ++++ b/arch/arm/kernel/sys_arm.c
12856 +@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
12857 + "Ir" (THREAD_START_SP - sizeof(regs)),
12858 + "r" (&regs),
12859 + "Ir" (sizeof(regs))
12860 +- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
12861 ++ : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
12862 +
12863 + out:
12864 + return ret;
12865 +diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
12866 +index 1e2d332..c88420d 100644
12867 +--- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
12868 ++++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
12869 +@@ -941,10 +941,10 @@
12870 + #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
12871 + #define OMAP4_DSI1_LANEENABLE_SHIFT 24
12872 + #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
12873 +-#define OMAP4_DSI2_PIPD_SHIFT 19
12874 +-#define OMAP4_DSI2_PIPD_MASK (0x1f << 19)
12875 +-#define OMAP4_DSI1_PIPD_SHIFT 14
12876 +-#define OMAP4_DSI1_PIPD_MASK (0x1f << 14)
12877 ++#define OMAP4_DSI1_PIPD_SHIFT 19
12878 ++#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
12879 ++#define OMAP4_DSI2_PIPD_SHIFT 14
12880 ++#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
12881 +
12882 + /* CONTROL_MCBSPLP */
12883 + #define OMAP4_ALBCTRLRX_FSX_SHIFT 31
12884 +diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h
12885 +index eac6897..db70e79 100644
12886 +--- a/arch/arm/mach-orion5x/mpp.h
12887 ++++ b/arch/arm/mach-orion5x/mpp.h
12888 +@@ -65,8 +65,8 @@
12889 + #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1)
12890 +
12891 + #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1)
12892 +-#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1)
12893 +-#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1)
12894 ++#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1)
12895 ++#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1)
12896 +
12897 + #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1)
12898 + #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1)
12899 +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
12900 +index b1e192b..db7bcc0 100644
12901 +--- a/arch/arm/mm/cache-l2x0.c
12902 ++++ b/arch/arm/mm/cache-l2x0.c
12903 +@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
12904 + static DEFINE_RAW_SPINLOCK(l2x0_lock);
12905 + static uint32_t l2x0_way_mask; /* Bitmask of active ways */
12906 + static uint32_t l2x0_size;
12907 ++static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
12908 +
12909 + struct l2x0_regs l2x0_saved_regs;
12910 +
12911 +@@ -61,12 +62,7 @@ static inline void cache_sync(void)
12912 + {
12913 + void __iomem *base = l2x0_base;
12914 +
12915 +-#ifdef CONFIG_PL310_ERRATA_753970
12916 +- /* write to an unmmapped register */
12917 +- writel_relaxed(0, base + L2X0_DUMMY_REG);
12918 +-#else
12919 +- writel_relaxed(0, base + L2X0_CACHE_SYNC);
12920 +-#endif
12921 ++ writel_relaxed(0, base + sync_reg_offset);
12922 + cache_wait(base + L2X0_CACHE_SYNC, 1);
12923 + }
12924 +
12925 +@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
12926 + }
12927 +
12928 + #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
12929 ++static inline void debug_writel(unsigned long val)
12930 ++{
12931 ++ if (outer_cache.set_debug)
12932 ++ outer_cache.set_debug(val);
12933 ++}
12934 +
12935 +-#define debug_writel(val) outer_cache.set_debug(val)
12936 +-
12937 +-static void l2x0_set_debug(unsigned long val)
12938 ++static void pl310_set_debug(unsigned long val)
12939 + {
12940 + writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
12941 + }
12942 +@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
12943 + {
12944 + }
12945 +
12946 +-#define l2x0_set_debug NULL
12947 ++#define pl310_set_debug NULL
12948 + #endif
12949 +
12950 + #ifdef CONFIG_PL310_ERRATA_588369
12951 +@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
12952 + else
12953 + ways = 8;
12954 + type = "L310";
12955 ++#ifdef CONFIG_PL310_ERRATA_753970
12956 ++ /* Unmapped register. */
12957 ++ sync_reg_offset = L2X0_DUMMY_REG;
12958 ++#endif
12959 ++ outer_cache.set_debug = pl310_set_debug;
12960 + break;
12961 + case L2X0_CACHE_ID_PART_L210:
12962 + ways = (aux >> 13) & 0xf;
12963 +@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
12964 + outer_cache.flush_all = l2x0_flush_all;
12965 + outer_cache.inv_all = l2x0_inv_all;
12966 + outer_cache.disable = l2x0_disable;
12967 +- outer_cache.set_debug = l2x0_set_debug;
12968 +
12969 + printk(KERN_INFO "%s cache controller enabled\n", type);
12970 + printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
12971 +diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
12972 +index 4050520..8c25855 100644
12973 +--- a/arch/ia64/kvm/kvm-ia64.c
12974 ++++ b/arch/ia64/kvm/kvm-ia64.c
12975 +@@ -1169,6 +1169,11 @@ out:
12976 +
12977 + #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
12978 +
12979 ++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
12980 ++{
12981 ++ return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
12982 ++}
12983 ++
12984 + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
12985 + {
12986 + struct kvm_vcpu *v;
12987 +diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
12988 +index 0243454..a5f6eff 100644
12989 +--- a/arch/s390/kvm/intercept.c
12990 ++++ b/arch/s390/kvm/intercept.c
12991 +@@ -133,13 +133,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
12992 +
12993 + vcpu->stat.exit_stop_request++;
12994 + spin_lock_bh(&vcpu->arch.local_int.lock);
12995 +- if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
12996 +- vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
12997 +- rc = kvm_s390_vcpu_store_status(vcpu,
12998 +- KVM_S390_STORE_STATUS_NOADDR);
12999 +- if (rc >= 0)
13000 +- rc = -EOPNOTSUPP;
13001 +- }
13002 +
13003 + if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
13004 + vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
13005 +@@ -155,7 +148,18 @@ static int handle_stop(struct kvm_vcpu *vcpu)
13006 + rc = -EOPNOTSUPP;
13007 + }
13008 +
13009 +- spin_unlock_bh(&vcpu->arch.local_int.lock);
13010 ++ if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
13011 ++ vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
13012 ++ /* store status must be called unlocked. Since local_int.lock
13013 ++ * only protects local_int.* and not guest memory we can give
13014 ++ * up the lock here */
13015 ++ spin_unlock_bh(&vcpu->arch.local_int.lock);
13016 ++ rc = kvm_s390_vcpu_store_status(vcpu,
13017 ++ KVM_S390_STORE_STATUS_NOADDR);
13018 ++ if (rc >= 0)
13019 ++ rc = -EOPNOTSUPP;
13020 ++ } else
13021 ++ spin_unlock_bh(&vcpu->arch.local_int.lock);
13022 + return rc;
13023 + }
13024 +
13025 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
13026 +index d1c44573..d3cb86c 100644
13027 +--- a/arch/s390/kvm/kvm-s390.c
13028 ++++ b/arch/s390/kvm/kvm-s390.c
13029 +@@ -418,7 +418,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
13030 + int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
13031 + {
13032 + memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
13033 +- vcpu->arch.guest_fpregs.fpc = fpu->fpc;
13034 ++ vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
13035 + restore_fp_regs(&vcpu->arch.guest_fpregs);
13036 + return 0;
13037 + }
13038 +diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
13039 +index 89bbf4e..e77f4e4 100644
13040 +--- a/arch/x86/boot/compressed/relocs.c
13041 ++++ b/arch/x86/boot/compressed/relocs.c
13042 +@@ -402,13 +402,11 @@ static void print_absolute_symbols(void)
13043 + for (i = 0; i < ehdr.e_shnum; i++) {
13044 + struct section *sec = &secs[i];
13045 + char *sym_strtab;
13046 +- Elf32_Sym *sh_symtab;
13047 + int j;
13048 +
13049 + if (sec->shdr.sh_type != SHT_SYMTAB) {
13050 + continue;
13051 + }
13052 +- sh_symtab = sec->symtab;
13053 + sym_strtab = sec->link->strtab;
13054 + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
13055 + Elf32_Sym *sym;
13056 +diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
13057 +index 71f4727..5a98aa2 100644
13058 +--- a/arch/x86/kernel/setup_percpu.c
13059 ++++ b/arch/x86/kernel/setup_percpu.c
13060 +@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
13061 + #endif
13062 + rc = -EINVAL;
13063 + if (pcpu_chosen_fc != PCPU_FC_PAGE) {
13064 +- const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
13065 + const size_t dyn_size = PERCPU_MODULE_RESERVE +
13066 + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
13067 ++ size_t atom_size;
13068 +
13069 ++ /*
13070 ++ * On 64bit, use PMD_SIZE for atom_size so that embedded
13071 ++ * percpu areas are aligned to PMD. This, in the future,
13072 ++ * can also allow using PMD mappings in vmalloc area. Use
13073 ++ * PAGE_SIZE on 32bit as vmalloc space is highly contended
13074 ++ * and large vmalloc area allocs can easily fail.
13075 ++ */
13076 ++#ifdef CONFIG_X86_64
13077 ++ atom_size = PMD_SIZE;
13078 ++#else
13079 ++ atom_size = PAGE_SIZE;
13080 ++#endif
13081 + rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
13082 + dyn_size, atom_size,
13083 + pcpu_cpu_distance,
13084 +diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
13085 +index 7aad544..3e48c1d 100644
13086 +--- a/arch/x86/kvm/pmu.c
13087 ++++ b/arch/x86/kvm/pmu.c
13088 +@@ -413,7 +413,7 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
13089 + struct kvm_pmc *counters;
13090 + u64 ctr;
13091 +
13092 +- pmc &= (3u << 30) - 1;
13093 ++ pmc &= ~(3u << 30);
13094 + if (!fixed && pmc >= pmu->nr_arch_gp_counters)
13095 + return 1;
13096 + if (fixed && pmc >= pmu->nr_arch_fixed_counters)
13097 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
13098 +index 3b4c8d8..a7a6f60 100644
13099 +--- a/arch/x86/kvm/vmx.c
13100 ++++ b/arch/x86/kvm/vmx.c
13101 +@@ -1678,7 +1678,7 @@ static int nested_pf_handled(struct kvm_vcpu *vcpu)
13102 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13103 +
13104 + /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
13105 +- if (!(vmcs12->exception_bitmap & PF_VECTOR))
13106 ++ if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
13107 + return 0;
13108 +
13109 + nested_vmx_vmexit(vcpu);
13110 +@@ -2219,6 +2219,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
13111 + msr = find_msr_entry(vmx, msr_index);
13112 + if (msr) {
13113 + msr->data = data;
13114 ++ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
13115 ++ preempt_disable();
13116 ++ kvm_set_shared_msr(msr->index, msr->data,
13117 ++ msr->mask);
13118 ++ preempt_enable();
13119 ++ }
13120 + break;
13121 + }
13122 + ret = kvm_set_msr_common(vcpu, msr_index, data);
13123 +@@ -3915,7 +3921,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
13124 + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
13125 +
13126 + vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
13127 ++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
13128 + vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
13129 ++ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
13130 + vmx_set_cr4(&vmx->vcpu, 0);
13131 + vmx_set_efer(&vmx->vcpu, 0);
13132 + vmx_fpu_activate(&vmx->vcpu);
13133 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
13134 +index 9cbfc06..8d1c6c6 100644
13135 +--- a/arch/x86/kvm/x86.c
13136 ++++ b/arch/x86/kvm/x86.c
13137 +@@ -2997,6 +2997,8 @@ static void write_protect_slot(struct kvm *kvm,
13138 + unsigned long *dirty_bitmap,
13139 + unsigned long nr_dirty_pages)
13140 + {
13141 ++ spin_lock(&kvm->mmu_lock);
13142 ++
13143 + /* Not many dirty pages compared to # of shadow pages. */
13144 + if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
13145 + unsigned long gfn_offset;
13146 +@@ -3004,16 +3006,13 @@ static void write_protect_slot(struct kvm *kvm,
13147 + for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
13148 + unsigned long gfn = memslot->base_gfn + gfn_offset;
13149 +
13150 +- spin_lock(&kvm->mmu_lock);
13151 + kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
13152 +- spin_unlock(&kvm->mmu_lock);
13153 + }
13154 + kvm_flush_remote_tlbs(kvm);
13155 +- } else {
13156 +- spin_lock(&kvm->mmu_lock);
13157 ++ } else
13158 + kvm_mmu_slot_remove_write_access(kvm, memslot->id);
13159 +- spin_unlock(&kvm->mmu_lock);
13160 +- }
13161 ++
13162 ++ spin_unlock(&kvm->mmu_lock);
13163 + }
13164 +
13165 + /*
13166 +@@ -3132,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
13167 + r = -EEXIST;
13168 + if (kvm->arch.vpic)
13169 + goto create_irqchip_unlock;
13170 ++ r = -EINVAL;
13171 ++ if (atomic_read(&kvm->online_vcpus))
13172 ++ goto create_irqchip_unlock;
13173 + r = -ENOMEM;
13174 + vpic = kvm_create_pic(kvm);
13175 + if (vpic) {
13176 +@@ -5957,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
13177 + kvm_x86_ops->check_processor_compatibility(rtn);
13178 + }
13179 +
13180 ++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
13181 ++{
13182 ++ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
13183 ++}
13184 ++
13185 + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
13186 + {
13187 + struct page *page;
13188 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
13189 +index 4172af8..4e517d4 100644
13190 +--- a/arch/x86/xen/enlighten.c
13191 ++++ b/arch/x86/xen/enlighten.c
13192 +@@ -62,6 +62,7 @@
13193 + #include <asm/reboot.h>
13194 + #include <asm/stackprotector.h>
13195 + #include <asm/hypervisor.h>
13196 ++#include <asm/pci_x86.h>
13197 +
13198 + #include "xen-ops.h"
13199 + #include "mmu.h"
13200 +@@ -1274,8 +1275,10 @@ asmlinkage void __init xen_start_kernel(void)
13201 + /* Make sure ACS will be enabled */
13202 + pci_request_acs();
13203 + }
13204 +-
13205 +-
13206 ++#ifdef CONFIG_PCI
13207 ++ /* PCI BIOS service won't work from a PV guest. */
13208 ++ pci_probe &= ~PCI_PROBE_BIOS;
13209 ++#endif
13210 + xen_raw_console_write("about to get started...\n");
13211 +
13212 + xen_setup_runstate_info(0);
13213 +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
13214 +index 95c1cf6..dc19347 100644
13215 +--- a/arch/x86/xen/mmu.c
13216 ++++ b/arch/x86/xen/mmu.c
13217 +@@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
13218 + {
13219 + if (val & _PAGE_PRESENT) {
13220 + unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
13221 ++ unsigned long pfn = mfn_to_pfn(mfn);
13222 ++
13223 + pteval_t flags = val & PTE_FLAGS_MASK;
13224 +- val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
13225 ++ if (unlikely(pfn == ~0))
13226 ++ val = flags & ~_PAGE_PRESENT;
13227 ++ else
13228 ++ val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
13229 + }
13230 +
13231 + return val;
13232 +diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
13233 +index b5dd14e..0ba837f 100644
13234 +--- a/drivers/block/mtip32xx/Kconfig
13235 ++++ b/drivers/block/mtip32xx/Kconfig
13236 +@@ -4,6 +4,6 @@
13237 +
13238 + config BLK_DEV_PCIESSD_MTIP32XX
13239 + tristate "Block Device Driver for Micron PCIe SSDs"
13240 +- depends on HOTPLUG_PCI_PCIE
13241 ++ depends on PCI
13242 + help
13243 + This enables the block driver for Micron PCIe SSDs.
13244 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
13245 +index 8eb81c9..c37073d 100644
13246 +--- a/drivers/block/mtip32xx/mtip32xx.c
13247 ++++ b/drivers/block/mtip32xx/mtip32xx.c
13248 +@@ -422,6 +422,10 @@ static void mtip_init_port(struct mtip_port *port)
13249 + /* Clear any pending interrupts for this port */
13250 + writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
13251 +
13252 ++ /* Clear any pending interrupts on the HBA. */
13253 ++ writel(readl(port->dd->mmio + HOST_IRQ_STAT),
13254 ++ port->dd->mmio + HOST_IRQ_STAT);
13255 ++
13256 + /* Enable port interrupts */
13257 + writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
13258 + }
13259 +@@ -490,11 +494,9 @@ static void mtip_restart_port(struct mtip_port *port)
13260 + dev_warn(&port->dd->pdev->dev,
13261 + "COM reset failed\n");
13262 +
13263 +- /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
13264 +- writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
13265 ++ mtip_init_port(port);
13266 ++ mtip_start_port(port);
13267 +
13268 +- /* Enable the DMA engine */
13269 +- mtip_enable_engine(port, 1);
13270 + }
13271 +
13272 + /*
13273 +@@ -3359,9 +3361,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
13274 + return -ENOMEM;
13275 + }
13276 +
13277 +- /* Set the atomic variable as 1 in case of SRSI */
13278 +- atomic_set(&dd->drv_cleanup_done, true);
13279 +-
13280 + atomic_set(&dd->resumeflag, false);
13281 +
13282 + /* Attach the private data to this PCI device. */
13283 +@@ -3434,8 +3433,8 @@ iomap_err:
13284 + pci_set_drvdata(pdev, NULL);
13285 + return rv;
13286 + done:
13287 +- /* Set the atomic variable as 0 in case of SRSI */
13288 +- atomic_set(&dd->drv_cleanup_done, true);
13289 ++ /* Set the atomic variable as 0 */
13290 ++ atomic_set(&dd->drv_cleanup_done, false);
13291 +
13292 + return rv;
13293 + }
13294 +@@ -3463,8 +3462,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
13295 + }
13296 + }
13297 + }
13298 +- /* Set the atomic variable as 1 in case of SRSI */
13299 +- atomic_set(&dd->drv_cleanup_done, true);
13300 +
13301 + /* Clean up the block layer. */
13302 + mtip_block_remove(dd);
13303 +@@ -3608,18 +3605,25 @@ MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
13304 + */
13305 + static int __init mtip_init(void)
13306 + {
13307 ++ int error;
13308 ++
13309 + printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
13310 +
13311 + /* Allocate a major block device number to use with this driver. */
13312 +- mtip_major = register_blkdev(0, MTIP_DRV_NAME);
13313 +- if (mtip_major < 0) {
13314 ++ error = register_blkdev(0, MTIP_DRV_NAME);
13315 ++ if (error <= 0) {
13316 + printk(KERN_ERR "Unable to register block device (%d)\n",
13317 +- mtip_major);
13318 ++ error);
13319 + return -EBUSY;
13320 + }
13321 ++ mtip_major = error;
13322 +
13323 + /* Register our PCI operations. */
13324 +- return pci_register_driver(&mtip_pci_driver);
13325 ++ error = pci_register_driver(&mtip_pci_driver);
13326 ++ if (error)
13327 ++ unregister_blkdev(mtip_major, MTIP_DRV_NAME);
13328 ++
13329 ++ return error;
13330 + }
13331 +
13332 + /*
13333 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
13334 +index 64541f7..9cd81ba 100644
13335 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
13336 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
13337 +@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
13338 +
13339 + val &= ~VIDEO_DIP_SELECT_MASK;
13340 +
13341 +- I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
13342 ++ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
13343 +
13344 + for (i = 0; i < len; i += 4) {
13345 + I915_WRITE(VIDEO_DIP_DATA, *data);
13346 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
13347 +index 99f71af..6753f59 100644
13348 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
13349 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
13350 +@@ -414,10 +414,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
13351 + return ret;
13352 + }
13353 +
13354 +- if (INTEL_INFO(dev)->gen >= 6) {
13355 +- I915_WRITE(INSTPM,
13356 +- INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
13357 +
13358 ++ if (IS_GEN6(dev)) {
13359 + /* From the Sandybridge PRM, volume 1 part 3, page 24:
13360 + * "If this bit is set, STCunit will have LRA as replacement
13361 + * policy. [...] This bit must be reset. LRA replacement
13362 +@@ -427,6 +425,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
13363 + CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
13364 + }
13365 +
13366 ++ if (INTEL_INFO(dev)->gen >= 6) {
13367 ++ I915_WRITE(INSTPM,
13368 ++ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
13369 ++ }
13370 ++
13371 + return ret;
13372 + }
13373 +
13374 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
13375 +index 0a877dd..8eddcca 100644
13376 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
13377 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
13378 +@@ -1221,8 +1221,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
13379 +
13380 + static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
13381 + {
13382 ++ struct drm_device *dev = intel_sdvo->base.base.dev;
13383 + u8 response[2];
13384 +
13385 ++ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
13386 ++ * on the line. */
13387 ++ if (IS_I945G(dev) || IS_I945GM(dev))
13388 ++ return false;
13389 ++
13390 + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
13391 + &response, 2) && response[0];
13392 + }
13393 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
13394 +index 83047783..ecbd765 100644
13395 +--- a/drivers/net/ethernet/broadcom/tg3.c
13396 ++++ b/drivers/net/ethernet/broadcom/tg3.c
13397 +@@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
13398 + if (sblk->status & SD_STATUS_LINK_CHG)
13399 + work_exists = 1;
13400 + }
13401 +- /* check for RX/TX work to do */
13402 +- if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
13403 ++
13404 ++ /* check for TX work to do */
13405 ++ if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
13406 ++ work_exists = 1;
13407 ++
13408 ++ /* check for RX work to do */
13409 ++ if (tnapi->rx_rcb_prod_idx &&
13410 + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
13411 + work_exists = 1;
13412 +
13413 +@@ -5877,6 +5882,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
13414 + return work_done;
13415 + }
13416 +
13417 ++ if (!tnapi->rx_rcb_prod_idx)
13418 ++ return work_done;
13419 ++
13420 + /* run RX thread, within the bounds set by NAPI.
13421 + * All RX "locking" is done by ensuring outside
13422 + * code synchronizes with tg3->napi.poll()
13423 +@@ -7428,6 +7436,12 @@ static int tg3_alloc_consistent(struct tg3 *tp)
13424 + */
13425 + switch (i) {
13426 + default:
13427 ++ if (tg3_flag(tp, ENABLE_RSS)) {
13428 ++ tnapi->rx_rcb_prod_idx = NULL;
13429 ++ break;
13430 ++ }
13431 ++ /* Fall through */
13432 ++ case 1:
13433 + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
13434 + break;
13435 + case 2:
13436 +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
13437 +index d94d64b..b444f21 100644
13438 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
13439 ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
13440 +@@ -164,6 +164,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
13441 + static bool e1000_vlan_used(struct e1000_adapter *adapter);
13442 + static void e1000_vlan_mode(struct net_device *netdev,
13443 + netdev_features_t features);
13444 ++static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
13445 ++ bool filter_on);
13446 + static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
13447 + static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
13448 + static void e1000_restore_vlan(struct e1000_adapter *adapter);
13449 +@@ -1213,7 +1215,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
13450 + if (err)
13451 + goto err_register;
13452 +
13453 +- e1000_vlan_mode(netdev, netdev->features);
13454 ++ e1000_vlan_filter_on_off(adapter, false);
13455 +
13456 + /* print bus type/speed/width info */
13457 + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
13458 +@@ -4549,6 +4551,22 @@ static bool e1000_vlan_used(struct e1000_adapter *adapter)
13459 + return false;
13460 + }
13461 +
13462 ++static void __e1000_vlan_mode(struct e1000_adapter *adapter,
13463 ++ netdev_features_t features)
13464 ++{
13465 ++ struct e1000_hw *hw = &adapter->hw;
13466 ++ u32 ctrl;
13467 ++
13468 ++ ctrl = er32(CTRL);
13469 ++ if (features & NETIF_F_HW_VLAN_RX) {
13470 ++ /* enable VLAN tag insert/strip */
13471 ++ ctrl |= E1000_CTRL_VME;
13472 ++ } else {
13473 ++ /* disable VLAN tag insert/strip */
13474 ++ ctrl &= ~E1000_CTRL_VME;
13475 ++ }
13476 ++ ew32(CTRL, ctrl);
13477 ++}
13478 + static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
13479 + bool filter_on)
13480 + {
13481 +@@ -4558,6 +4576,7 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
13482 + if (!test_bit(__E1000_DOWN, &adapter->flags))
13483 + e1000_irq_disable(adapter);
13484 +
13485 ++ __e1000_vlan_mode(adapter, adapter->netdev->features);
13486 + if (filter_on) {
13487 + /* enable VLAN receive filtering */
13488 + rctl = er32(RCTL);
13489 +@@ -4578,24 +4597,14 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
13490 + }
13491 +
13492 + static void e1000_vlan_mode(struct net_device *netdev,
13493 +- netdev_features_t features)
13494 ++ netdev_features_t features)
13495 + {
13496 + struct e1000_adapter *adapter = netdev_priv(netdev);
13497 +- struct e1000_hw *hw = &adapter->hw;
13498 +- u32 ctrl;
13499 +
13500 + if (!test_bit(__E1000_DOWN, &adapter->flags))
13501 + e1000_irq_disable(adapter);
13502 +
13503 +- ctrl = er32(CTRL);
13504 +- if (features & NETIF_F_HW_VLAN_RX) {
13505 +- /* enable VLAN tag insert/strip */
13506 +- ctrl |= E1000_CTRL_VME;
13507 +- } else {
13508 +- /* disable VLAN tag insert/strip */
13509 +- ctrl &= ~E1000_CTRL_VME;
13510 +- }
13511 +- ew32(CTRL, ctrl);
13512 ++ __e1000_vlan_mode(adapter, features);
13513 +
13514 + if (!test_bit(__E1000_DOWN, &adapter->flags))
13515 + e1000_irq_enable(adapter);
13516 +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
13517 +index ec6136f..1d04182 100644
13518 +--- a/drivers/net/ethernet/marvell/sky2.c
13519 ++++ b/drivers/net/ethernet/marvell/sky2.c
13520 +@@ -2483,8 +2483,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
13521 + skb_copy_from_linear_data(re->skb, skb->data, length);
13522 + skb->ip_summed = re->skb->ip_summed;
13523 + skb->csum = re->skb->csum;
13524 ++ skb->rxhash = re->skb->rxhash;
13525 ++ skb->vlan_tci = re->skb->vlan_tci;
13526 ++
13527 + pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
13528 + length, PCI_DMA_FROMDEVICE);
13529 ++ re->skb->vlan_tci = 0;
13530 ++ re->skb->rxhash = 0;
13531 + re->skb->ip_summed = CHECKSUM_NONE;
13532 + skb_put(skb, length);
13533 + }
13534 +@@ -2569,9 +2574,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
13535 + struct sk_buff *skb = NULL;
13536 + u16 count = (status & GMR_FS_LEN) >> 16;
13537 +
13538 +- if (status & GMR_FS_VLAN)
13539 +- count -= VLAN_HLEN; /* Account for vlan tag */
13540 +-
13541 + netif_printk(sky2, rx_status, KERN_DEBUG, dev,
13542 + "rx slot %u status 0x%x len %d\n",
13543 + sky2->rx_next, status, length);
13544 +@@ -2579,6 +2581,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
13545 + sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
13546 + prefetch(sky2->rx_ring + sky2->rx_next);
13547 +
13548 ++ if (vlan_tx_tag_present(re->skb))
13549 ++ count -= VLAN_HLEN; /* Account for vlan tag */
13550 ++
13551 + /* This chip has hardware problems that generates bogus status.
13552 + * So do only marginal checking and expect higher level protocols
13553 + * to handle crap frames.
13554 +@@ -2636,11 +2641,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
13555 + }
13556 +
13557 + static inline void sky2_skb_rx(const struct sky2_port *sky2,
13558 +- u32 status, struct sk_buff *skb)
13559 ++ struct sk_buff *skb)
13560 + {
13561 +- if (status & GMR_FS_VLAN)
13562 +- __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
13563 +-
13564 + if (skb->ip_summed == CHECKSUM_NONE)
13565 + netif_receive_skb(skb);
13566 + else
13567 +@@ -2694,6 +2696,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
13568 + }
13569 + }
13570 +
13571 ++static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
13572 ++{
13573 ++ struct sk_buff *skb;
13574 ++
13575 ++ skb = sky2->rx_ring[sky2->rx_next].skb;
13576 ++ __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
13577 ++}
13578 ++
13579 + static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
13580 + {
13581 + struct sk_buff *skb;
13582 +@@ -2752,8 +2762,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
13583 + }
13584 +
13585 + skb->protocol = eth_type_trans(skb, dev);
13586 +-
13587 +- sky2_skb_rx(sky2, status, skb);
13588 ++ sky2_skb_rx(sky2, skb);
13589 +
13590 + /* Stop after net poll weight */
13591 + if (++work_done >= to_do)
13592 +@@ -2761,11 +2770,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
13593 + break;
13594 +
13595 + case OP_RXVLAN:
13596 +- sky2->rx_tag = length;
13597 ++ sky2_rx_tag(sky2, length);
13598 + break;
13599 +
13600 + case OP_RXCHKSVLAN:
13601 +- sky2->rx_tag = length;
13602 ++ sky2_rx_tag(sky2, length);
13603 + /* fall through */
13604 + case OP_RXCHKS:
13605 + if (likely(dev->features & NETIF_F_RXCSUM))
13606 +diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
13607 +index ff6f58b..3c896ce 100644
13608 +--- a/drivers/net/ethernet/marvell/sky2.h
13609 ++++ b/drivers/net/ethernet/marvell/sky2.h
13610 +@@ -2241,7 +2241,6 @@ struct sky2_port {
13611 + u16 rx_pending;
13612 + u16 rx_data_size;
13613 + u16 rx_nfrags;
13614 +- u16 rx_tag;
13615 +
13616 + struct {
13617 + unsigned long last;
13618 +diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
13619 +index 31441a8..d14a011 100644
13620 +--- a/drivers/net/ethernet/sun/sungem.c
13621 ++++ b/drivers/net/ethernet/sun/sungem.c
13622 +@@ -2340,7 +2340,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
13623 + netif_device_detach(dev);
13624 +
13625 + /* Switch off chip, remember WOL setting */
13626 +- gp->asleep_wol = gp->wake_on_lan;
13627 ++ gp->asleep_wol = !!gp->wake_on_lan;
13628 + gem_do_stop(dev, gp->asleep_wol);
13629 +
13630 + /* Unlock the network stack */
13631 +diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
13632 +index d6da5ee..c7ada22 100644
13633 +--- a/drivers/net/usb/asix.c
13634 ++++ b/drivers/net/usb/asix.c
13635 +@@ -403,7 +403,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
13636 + u32 packet_len;
13637 + u32 padbytes = 0xffff0000;
13638 +
13639 +- padlen = ((skb->len + 4) % 512) ? 0 : 4;
13640 ++ padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
13641 +
13642 + if ((!skb_cloned(skb)) &&
13643 + ((headroom + tailroom) >= (4 + padlen))) {
13644 +@@ -425,7 +425,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
13645 + cpu_to_le32s(&packet_len);
13646 + skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
13647 +
13648 +- if ((skb->len % 512) == 0) {
13649 ++ if (padlen) {
13650 + cpu_to_le32s(&padbytes);
13651 + memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
13652 + skb_put(skb, sizeof(padbytes));
13653 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
13654 +index d45520e..f1e77b1 100644
13655 +--- a/drivers/net/usb/smsc95xx.c
13656 ++++ b/drivers/net/usb/smsc95xx.c
13657 +@@ -1191,7 +1191,7 @@ static const struct driver_info smsc95xx_info = {
13658 + .rx_fixup = smsc95xx_rx_fixup,
13659 + .tx_fixup = smsc95xx_tx_fixup,
13660 + .status = smsc95xx_status,
13661 +- .flags = FLAG_ETHER | FLAG_SEND_ZLP,
13662 ++ .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
13663 + };
13664 +
13665 + static const struct usb_device_id products[] = {
13666 +diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
13667 +index c006dee..40c4705 100644
13668 +--- a/drivers/platform/x86/sony-laptop.c
13669 ++++ b/drivers/platform/x86/sony-laptop.c
13670 +@@ -127,7 +127,7 @@ MODULE_PARM_DESC(minor,
13671 + "default is -1 (automatic)");
13672 + #endif
13673 +
13674 +-static int kbd_backlight; /* = 1 */
13675 ++static int kbd_backlight = 1;
13676 + module_param(kbd_backlight, int, 0444);
13677 + MODULE_PARM_DESC(kbd_backlight,
13678 + "set this to 0 to disable keyboard backlight, "
13679 +diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
13680 +index d26e864..cf73ab2 100644
13681 +--- a/drivers/regulator/max8997.c
13682 ++++ b/drivers/regulator/max8997.c
13683 +@@ -689,7 +689,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
13684 + }
13685 +
13686 + new_val++;
13687 +- } while (desc->min + desc->step + new_val <= desc->max);
13688 ++ } while (desc->min + desc->step * new_val <= desc->max);
13689 +
13690 + new_idx = tmp_idx;
13691 + new_val = tmp_val;
13692 +diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
13693 +index ec02ed0..4e2e13e 100644
13694 +--- a/drivers/usb/gadget/udc-core.c
13695 ++++ b/drivers/usb/gadget/udc-core.c
13696 +@@ -211,8 +211,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
13697 +
13698 + if (udc_is_newstyle(udc)) {
13699 + udc->driver->disconnect(udc->gadget);
13700 +- udc->driver->unbind(udc->gadget);
13701 + usb_gadget_disconnect(udc->gadget);
13702 ++ udc->driver->unbind(udc->gadget);
13703 + usb_gadget_udc_stop(udc->gadget, udc->driver);
13704 + } else {
13705 + usb_gadget_stop(udc->gadget, udc->driver);
13706 +@@ -363,9 +363,9 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
13707 + usb_gadget_udc_start(udc->gadget, udc->driver);
13708 + usb_gadget_connect(udc->gadget);
13709 + } else if (sysfs_streq(buf, "disconnect")) {
13710 ++ usb_gadget_disconnect(udc->gadget);
13711 + if (udc_is_newstyle(udc))
13712 + usb_gadget_udc_stop(udc->gadget, udc->driver);
13713 +- usb_gadget_disconnect(udc->gadget);
13714 + } else {
13715 + dev_err(dev, "unsupported command '%s'\n", buf);
13716 + return -EINVAL;
13717 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
13718 +index cd66b76..1250bba 100644
13719 +--- a/fs/cifs/cifssmb.c
13720 ++++ b/fs/cifs/cifssmb.c
13721 +@@ -4831,8 +4831,12 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
13722 + max_len = data_end - temp;
13723 + node->node_name = cifs_strndup_from_utf16(temp, max_len,
13724 + is_unicode, nls_codepage);
13725 +- if (!node->node_name)
13726 ++ if (!node->node_name) {
13727 + rc = -ENOMEM;
13728 ++ goto parse_DFS_referrals_exit;
13729 ++ }
13730 ++
13731 ++ ref++;
13732 + }
13733 +
13734 + parse_DFS_referrals_exit:
13735 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
13736 +index 3645cd3..c60267e 100644
13737 +--- a/fs/hugetlbfs/inode.c
13738 ++++ b/fs/hugetlbfs/inode.c
13739 +@@ -600,9 +600,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
13740 + spin_lock(&sbinfo->stat_lock);
13741 + /* If no limits set, just report 0 for max/free/used
13742 + * blocks, like simple_statfs() */
13743 +- if (sbinfo->max_blocks >= 0) {
13744 +- buf->f_blocks = sbinfo->max_blocks;
13745 +- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
13746 ++ if (sbinfo->spool) {
13747 ++ long free_pages;
13748 ++
13749 ++ spin_lock(&sbinfo->spool->lock);
13750 ++ buf->f_blocks = sbinfo->spool->max_hpages;
13751 ++ free_pages = sbinfo->spool->max_hpages
13752 ++ - sbinfo->spool->used_hpages;
13753 ++ buf->f_bavail = buf->f_bfree = free_pages;
13754 ++ spin_unlock(&sbinfo->spool->lock);
13755 + buf->f_files = sbinfo->max_inodes;
13756 + buf->f_ffree = sbinfo->free_inodes;
13757 + }
13758 +@@ -618,6 +624,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
13759 +
13760 + if (sbi) {
13761 + sb->s_fs_info = NULL;
13762 ++
13763 ++ if (sbi->spool)
13764 ++ hugepage_put_subpool(sbi->spool);
13765 ++
13766 + kfree(sbi);
13767 + }
13768 + }
13769 +@@ -848,10 +858,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
13770 + sb->s_fs_info = sbinfo;
13771 + sbinfo->hstate = config.hstate;
13772 + spin_lock_init(&sbinfo->stat_lock);
13773 +- sbinfo->max_blocks = config.nr_blocks;
13774 +- sbinfo->free_blocks = config.nr_blocks;
13775 + sbinfo->max_inodes = config.nr_inodes;
13776 + sbinfo->free_inodes = config.nr_inodes;
13777 ++ sbinfo->spool = NULL;
13778 ++ if (config.nr_blocks != -1) {
13779 ++ sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
13780 ++ if (!sbinfo->spool)
13781 ++ goto out_free;
13782 ++ }
13783 + sb->s_maxbytes = MAX_LFS_FILESIZE;
13784 + sb->s_blocksize = huge_page_size(config.hstate);
13785 + sb->s_blocksize_bits = huge_page_shift(config.hstate);
13786 +@@ -870,38 +884,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
13787 + sb->s_root = root;
13788 + return 0;
13789 + out_free:
13790 ++ if (sbinfo->spool)
13791 ++ kfree(sbinfo->spool);
13792 + kfree(sbinfo);
13793 + return -ENOMEM;
13794 + }
13795 +
13796 +-int hugetlb_get_quota(struct address_space *mapping, long delta)
13797 +-{
13798 +- int ret = 0;
13799 +- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
13800 +-
13801 +- if (sbinfo->free_blocks > -1) {
13802 +- spin_lock(&sbinfo->stat_lock);
13803 +- if (sbinfo->free_blocks - delta >= 0)
13804 +- sbinfo->free_blocks -= delta;
13805 +- else
13806 +- ret = -ENOMEM;
13807 +- spin_unlock(&sbinfo->stat_lock);
13808 +- }
13809 +-
13810 +- return ret;
13811 +-}
13812 +-
13813 +-void hugetlb_put_quota(struct address_space *mapping, long delta)
13814 +-{
13815 +- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
13816 +-
13817 +- if (sbinfo->free_blocks > -1) {
13818 +- spin_lock(&sbinfo->stat_lock);
13819 +- sbinfo->free_blocks += delta;
13820 +- spin_unlock(&sbinfo->stat_lock);
13821 +- }
13822 +-}
13823 +-
13824 + static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
13825 + int flags, const char *dev_name, void *data)
13826 + {
13827 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
13828 +index de3fa1a..2c1244b 100644
13829 +--- a/fs/nfsd/nfs4proc.c
13830 ++++ b/fs/nfsd/nfs4proc.c
13831 +@@ -231,17 +231,17 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
13832 + */
13833 + if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
13834 + open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
13835 +- FATTR4_WORD1_TIME_MODIFY);
13836 ++ FATTR4_WORD1_TIME_MODIFY);
13837 + } else {
13838 + status = nfsd_lookup(rqstp, current_fh,
13839 + open->op_fname.data, open->op_fname.len, &resfh);
13840 + fh_unlock(current_fh);
13841 +- if (status)
13842 +- goto out;
13843 +- status = nfsd_check_obj_isreg(&resfh);
13844 + }
13845 + if (status)
13846 + goto out;
13847 ++ status = nfsd_check_obj_isreg(&resfh);
13848 ++ if (status)
13849 ++ goto out;
13850 +
13851 + if (is_create_with_attrs(open) && open->op_acl != NULL)
13852 + do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval);
13853 +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
13854 +index edf6d3e..b96fe94 100644
13855 +--- a/fs/nfsd/vfs.c
13856 ++++ b/fs/nfsd/vfs.c
13857 +@@ -1450,7 +1450,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
13858 + switch (createmode) {
13859 + case NFS3_CREATE_UNCHECKED:
13860 + if (! S_ISREG(dchild->d_inode->i_mode))
13861 +- err = nfserr_exist;
13862 ++ goto out;
13863 + else if (truncp) {
13864 + /* in nfsv4, we need to treat this case a little
13865 + * differently. we don't want to truncate the
13866 +diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
13867 +index 0fd28e0..c749af9 100644
13868 +--- a/include/asm-generic/statfs.h
13869 ++++ b/include/asm-generic/statfs.h
13870 +@@ -15,7 +15,7 @@ typedef __kernel_fsid_t fsid_t;
13871 + * with a 10' pole.
13872 + */
13873 + #ifndef __statfs_word
13874 +-#if BITS_PER_LONG == 64
13875 ++#if __BITS_PER_LONG == 64
13876 + #define __statfs_word long
13877 + #else
13878 + #define __statfs_word __u32
13879 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
13880 +index d9d6c86..c5ed2f1 100644
13881 +--- a/include/linux/hugetlb.h
13882 ++++ b/include/linux/hugetlb.h
13883 +@@ -14,6 +14,15 @@ struct user_struct;
13884 + #include <linux/shm.h>
13885 + #include <asm/tlbflush.h>
13886 +
13887 ++struct hugepage_subpool {
13888 ++ spinlock_t lock;
13889 ++ long count;
13890 ++ long max_hpages, used_hpages;
13891 ++};
13892 ++
13893 ++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
13894 ++void hugepage_put_subpool(struct hugepage_subpool *spool);
13895 ++
13896 + int PageHuge(struct page *page);
13897 +
13898 + void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
13899 +@@ -138,12 +147,11 @@ struct hugetlbfs_config {
13900 + };
13901 +
13902 + struct hugetlbfs_sb_info {
13903 +- long max_blocks; /* blocks allowed */
13904 +- long free_blocks; /* blocks free */
13905 + long max_inodes; /* inodes allowed */
13906 + long free_inodes; /* inodes free */
13907 + spinlock_t stat_lock;
13908 + struct hstate *hstate;
13909 ++ struct hugepage_subpool *spool;
13910 + };
13911 +
13912 +
13913 +@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
13914 + extern const struct vm_operations_struct hugetlb_vm_ops;
13915 + struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
13916 + struct user_struct **user, int creat_flags);
13917 +-int hugetlb_get_quota(struct address_space *mapping, long delta);
13918 +-void hugetlb_put_quota(struct address_space *mapping, long delta);
13919 +
13920 + static inline int is_file_hugepages(struct file *file)
13921 + {
13922 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
13923 +index bc21720..4c4e83d 100644
13924 +--- a/include/linux/kvm_host.h
13925 ++++ b/include/linux/kvm_host.h
13926 +@@ -775,6 +775,13 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
13927 + {
13928 + return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
13929 + }
13930 ++
13931 ++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
13932 ++
13933 ++#else
13934 ++
13935 ++static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
13936 ++
13937 + #endif
13938 +
13939 + #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
13940 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
13941 +index 4f3b01a..7e472b7 100644
13942 +--- a/include/linux/netdevice.h
13943 ++++ b/include/linux/netdevice.h
13944 +@@ -1898,12 +1898,22 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
13945 + {
13946 + #ifdef CONFIG_BQL
13947 + dql_queued(&dev_queue->dql, bytes);
13948 +- if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
13949 +- set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
13950 +- if (unlikely(dql_avail(&dev_queue->dql) >= 0))
13951 +- clear_bit(__QUEUE_STATE_STACK_XOFF,
13952 +- &dev_queue->state);
13953 +- }
13954 ++
13955 ++ if (likely(dql_avail(&dev_queue->dql) >= 0))
13956 ++ return;
13957 ++
13958 ++ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
13959 ++
13960 ++ /*
13961 ++ * The XOFF flag must be set before checking the dql_avail below,
13962 ++ * because in netdev_tx_completed_queue we update the dql_completed
13963 ++ * before checking the XOFF flag.
13964 ++ */
13965 ++ smp_mb();
13966 ++
13967 ++ /* check again in case another CPU has just made room avail */
13968 ++ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
13969 ++ clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
13970 + #endif
13971 + }
13972 +
13973 +@@ -1916,16 +1926,23 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
13974 + unsigned pkts, unsigned bytes)
13975 + {
13976 + #ifdef CONFIG_BQL
13977 +- if (likely(bytes)) {
13978 +- dql_completed(&dev_queue->dql, bytes);
13979 +- if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
13980 +- &dev_queue->state) &&
13981 +- dql_avail(&dev_queue->dql) >= 0)) {
13982 +- if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
13983 +- &dev_queue->state))
13984 +- netif_schedule_queue(dev_queue);
13985 +- }
13986 +- }
13987 ++ if (unlikely(!bytes))
13988 ++ return;
13989 ++
13990 ++ dql_completed(&dev_queue->dql, bytes);
13991 ++
13992 ++ /*
13993 ++ * Without the memory barrier there is a small possiblity that
13994 ++ * netdev_tx_sent_queue will miss the update and cause the queue to
13995 ++ * be stopped forever
13996 ++ */
13997 ++ smp_mb();
13998 ++
13999 ++ if (dql_avail(&dev_queue->dql) < 0)
14000 ++ return;
14001 ++
14002 ++ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
14003 ++ netif_schedule_queue(dev_queue);
14004 + #endif
14005 + }
14006 +
14007 +@@ -1938,6 +1955,7 @@ static inline void netdev_completed_queue(struct net_device *dev,
14008 + static inline void netdev_tx_reset_queue(struct netdev_queue *q)
14009 + {
14010 + #ifdef CONFIG_BQL
14011 ++ clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
14012 + dql_reset(&q->dql);
14013 + #endif
14014 + }
14015 +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
14016 +index c6db9fb..bb1fac5 100644
14017 +--- a/include/linux/seqlock.h
14018 ++++ b/include/linux/seqlock.h
14019 +@@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
14020 + unsigned ret;
14021 +
14022 + repeat:
14023 +- ret = s->sequence;
14024 ++ ret = ACCESS_ONCE(s->sequence);
14025 + if (unlikely(ret & 1)) {
14026 + cpu_relax();
14027 + goto repeat;
14028 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
14029 +index a7cf829..24b1787 100644
14030 +--- a/mm/hugetlb.c
14031 ++++ b/mm/hugetlb.c
14032 +@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
14033 + */
14034 + static DEFINE_SPINLOCK(hugetlb_lock);
14035 +
14036 ++static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
14037 ++{
14038 ++ bool free = (spool->count == 0) && (spool->used_hpages == 0);
14039 ++
14040 ++ spin_unlock(&spool->lock);
14041 ++
14042 ++ /* If no pages are used, and no other handles to the subpool
14043 ++ * remain, free the subpool the subpool remain */
14044 ++ if (free)
14045 ++ kfree(spool);
14046 ++}
14047 ++
14048 ++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
14049 ++{
14050 ++ struct hugepage_subpool *spool;
14051 ++
14052 ++ spool = kmalloc(sizeof(*spool), GFP_KERNEL);
14053 ++ if (!spool)
14054 ++ return NULL;
14055 ++
14056 ++ spin_lock_init(&spool->lock);
14057 ++ spool->count = 1;
14058 ++ spool->max_hpages = nr_blocks;
14059 ++ spool->used_hpages = 0;
14060 ++
14061 ++ return spool;
14062 ++}
14063 ++
14064 ++void hugepage_put_subpool(struct hugepage_subpool *spool)
14065 ++{
14066 ++ spin_lock(&spool->lock);
14067 ++ BUG_ON(!spool->count);
14068 ++ spool->count--;
14069 ++ unlock_or_release_subpool(spool);
14070 ++}
14071 ++
14072 ++static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
14073 ++ long delta)
14074 ++{
14075 ++ int ret = 0;
14076 ++
14077 ++ if (!spool)
14078 ++ return 0;
14079 ++
14080 ++ spin_lock(&spool->lock);
14081 ++ if ((spool->used_hpages + delta) <= spool->max_hpages) {
14082 ++ spool->used_hpages += delta;
14083 ++ } else {
14084 ++ ret = -ENOMEM;
14085 ++ }
14086 ++ spin_unlock(&spool->lock);
14087 ++
14088 ++ return ret;
14089 ++}
14090 ++
14091 ++static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
14092 ++ long delta)
14093 ++{
14094 ++ if (!spool)
14095 ++ return;
14096 ++
14097 ++ spin_lock(&spool->lock);
14098 ++ spool->used_hpages -= delta;
14099 ++ /* If hugetlbfs_put_super couldn't free spool due to
14100 ++ * an outstanding quota reference, free it now. */
14101 ++ unlock_or_release_subpool(spool);
14102 ++}
14103 ++
14104 ++static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
14105 ++{
14106 ++ return HUGETLBFS_SB(inode->i_sb)->spool;
14107 ++}
14108 ++
14109 ++static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
14110 ++{
14111 ++ return subpool_inode(vma->vm_file->f_dentry->d_inode);
14112 ++}
14113 ++
14114 + /*
14115 + * Region tracking -- allows tracking of reservations and instantiated pages
14116 + * across the pages in a mapping.
14117 +@@ -533,9 +611,9 @@ static void free_huge_page(struct page *page)
14118 + */
14119 + struct hstate *h = page_hstate(page);
14120 + int nid = page_to_nid(page);
14121 +- struct address_space *mapping;
14122 ++ struct hugepage_subpool *spool =
14123 ++ (struct hugepage_subpool *)page_private(page);
14124 +
14125 +- mapping = (struct address_space *) page_private(page);
14126 + set_page_private(page, 0);
14127 + page->mapping = NULL;
14128 + BUG_ON(page_count(page));
14129 +@@ -551,8 +629,7 @@ static void free_huge_page(struct page *page)
14130 + enqueue_huge_page(h, page);
14131 + }
14132 + spin_unlock(&hugetlb_lock);
14133 +- if (mapping)
14134 +- hugetlb_put_quota(mapping, 1);
14135 ++ hugepage_subpool_put_pages(spool, 1);
14136 + }
14137 +
14138 + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
14139 +@@ -966,11 +1043,12 @@ static void return_unused_surplus_pages(struct hstate *h,
14140 + /*
14141 + * Determine if the huge page at addr within the vma has an associated
14142 + * reservation. Where it does not we will need to logically increase
14143 +- * reservation and actually increase quota before an allocation can occur.
14144 +- * Where any new reservation would be required the reservation change is
14145 +- * prepared, but not committed. Once the page has been quota'd allocated
14146 +- * an instantiated the change should be committed via vma_commit_reservation.
14147 +- * No action is required on failure.
14148 ++ * reservation and actually increase subpool usage before an allocation
14149 ++ * can occur. Where any new reservation would be required the
14150 ++ * reservation change is prepared, but not committed. Once the page
14151 ++ * has been allocated from the subpool and instantiated the change should
14152 ++ * be committed via vma_commit_reservation. No action is required on
14153 ++ * failure.
14154 + */
14155 + static long vma_needs_reservation(struct hstate *h,
14156 + struct vm_area_struct *vma, unsigned long addr)
14157 +@@ -1019,24 +1097,24 @@ static void vma_commit_reservation(struct hstate *h,
14158 + static struct page *alloc_huge_page(struct vm_area_struct *vma,
14159 + unsigned long addr, int avoid_reserve)
14160 + {
14161 ++ struct hugepage_subpool *spool = subpool_vma(vma);
14162 + struct hstate *h = hstate_vma(vma);
14163 + struct page *page;
14164 +- struct address_space *mapping = vma->vm_file->f_mapping;
14165 +- struct inode *inode = mapping->host;
14166 + long chg;
14167 +
14168 + /*
14169 +- * Processes that did not create the mapping will have no reserves and
14170 +- * will not have accounted against quota. Check that the quota can be
14171 +- * made before satisfying the allocation
14172 +- * MAP_NORESERVE mappings may also need pages and quota allocated
14173 +- * if no reserve mapping overlaps.
14174 ++ * Processes that did not create the mapping will have no
14175 ++ * reserves and will not have accounted against subpool
14176 ++ * limit. Check that the subpool limit can be made before
14177 ++ * satisfying the allocation MAP_NORESERVE mappings may also
14178 ++ * need pages and subpool limit allocated allocated if no reserve
14179 ++ * mapping overlaps.
14180 + */
14181 + chg = vma_needs_reservation(h, vma, addr);
14182 + if (chg < 0)
14183 + return ERR_PTR(-VM_FAULT_OOM);
14184 + if (chg)
14185 +- if (hugetlb_get_quota(inode->i_mapping, chg))
14186 ++ if (hugepage_subpool_get_pages(spool, chg))
14187 + return ERR_PTR(-VM_FAULT_SIGBUS);
14188 +
14189 + spin_lock(&hugetlb_lock);
14190 +@@ -1046,12 +1124,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
14191 + if (!page) {
14192 + page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
14193 + if (!page) {
14194 +- hugetlb_put_quota(inode->i_mapping, chg);
14195 ++ hugepage_subpool_put_pages(spool, chg);
14196 + return ERR_PTR(-VM_FAULT_SIGBUS);
14197 + }
14198 + }
14199 +
14200 +- set_page_private(page, (unsigned long) mapping);
14201 ++ set_page_private(page, (unsigned long)spool);
14202 +
14203 + vma_commit_reservation(h, vma, addr);
14204 +
14205 +@@ -2072,6 +2150,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
14206 + {
14207 + struct hstate *h = hstate_vma(vma);
14208 + struct resv_map *reservations = vma_resv_map(vma);
14209 ++ struct hugepage_subpool *spool = subpool_vma(vma);
14210 + unsigned long reserve;
14211 + unsigned long start;
14212 + unsigned long end;
14213 +@@ -2087,7 +2166,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
14214 +
14215 + if (reserve) {
14216 + hugetlb_acct_memory(h, -reserve);
14217 +- hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
14218 ++ hugepage_subpool_put_pages(spool, reserve);
14219 + }
14220 + }
14221 + }
14222 +@@ -2316,7 +2395,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
14223 + */
14224 + address = address & huge_page_mask(h);
14225 + pgoff = vma_hugecache_offset(h, vma, address);
14226 +- mapping = (struct address_space *)page_private(page);
14227 ++ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
14228 +
14229 + /*
14230 + * Take the mapping lock for the duration of the table walk. As
14231 +@@ -2871,11 +2950,12 @@ int hugetlb_reserve_pages(struct inode *inode,
14232 + {
14233 + long ret, chg;
14234 + struct hstate *h = hstate_inode(inode);
14235 ++ struct hugepage_subpool *spool = subpool_inode(inode);
14236 +
14237 + /*
14238 + * Only apply hugepage reservation if asked. At fault time, an
14239 + * attempt will be made for VM_NORESERVE to allocate a page
14240 +- * and filesystem quota without using reserves
14241 ++ * without using reserves
14242 + */
14243 + if (vm_flags & VM_NORESERVE)
14244 + return 0;
14245 +@@ -2902,17 +2982,17 @@ int hugetlb_reserve_pages(struct inode *inode,
14246 + if (chg < 0)
14247 + return chg;
14248 +
14249 +- /* There must be enough filesystem quota for the mapping */
14250 +- if (hugetlb_get_quota(inode->i_mapping, chg))
14251 ++ /* There must be enough pages in the subpool for the mapping */
14252 ++ if (hugepage_subpool_get_pages(spool, chg))
14253 + return -ENOSPC;
14254 +
14255 + /*
14256 + * Check enough hugepages are available for the reservation.
14257 +- * Hand back the quota if there are not
14258 ++ * Hand the pages back to the subpool if there are not
14259 + */
14260 + ret = hugetlb_acct_memory(h, chg);
14261 + if (ret < 0) {
14262 +- hugetlb_put_quota(inode->i_mapping, chg);
14263 ++ hugepage_subpool_put_pages(spool, chg);
14264 + return ret;
14265 + }
14266 +
14267 +@@ -2936,12 +3016,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
14268 + {
14269 + struct hstate *h = hstate_inode(inode);
14270 + long chg = region_truncate(&inode->i_mapping->private_list, offset);
14271 ++ struct hugepage_subpool *spool = subpool_inode(inode);
14272 +
14273 + spin_lock(&inode->i_lock);
14274 + inode->i_blocks -= (blocks_per_huge_page(h) * freed);
14275 + spin_unlock(&inode->i_lock);
14276 +
14277 +- hugetlb_put_quota(inode->i_mapping, (chg - freed));
14278 ++ hugepage_subpool_put_pages(spool, (chg - freed));
14279 + hugetlb_acct_memory(h, -(chg - freed));
14280 + }
14281 +
14282 +diff --git a/net/core/dev.c b/net/core/dev.c
14283 +index 7f72c9c..0336374 100644
14284 +--- a/net/core/dev.c
14285 ++++ b/net/core/dev.c
14286 +@@ -1412,14 +1412,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
14287 + * register_netdevice_notifier(). The notifier is unlinked into the
14288 + * kernel structures and may then be reused. A negative errno code
14289 + * is returned on a failure.
14290 ++ *
14291 ++ * After unregistering unregister and down device events are synthesized
14292 ++ * for all devices on the device list to the removed notifier to remove
14293 ++ * the need for special case cleanup code.
14294 + */
14295 +
14296 + int unregister_netdevice_notifier(struct notifier_block *nb)
14297 + {
14298 ++ struct net_device *dev;
14299 ++ struct net *net;
14300 + int err;
14301 +
14302 + rtnl_lock();
14303 + err = raw_notifier_chain_unregister(&netdev_chain, nb);
14304 ++ if (err)
14305 ++ goto unlock;
14306 ++
14307 ++ for_each_net(net) {
14308 ++ for_each_netdev(net, dev) {
14309 ++ if (dev->flags & IFF_UP) {
14310 ++ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
14311 ++ nb->notifier_call(nb, NETDEV_DOWN, dev);
14312 ++ }
14313 ++ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
14314 ++ nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
14315 ++ }
14316 ++ }
14317 ++unlock:
14318 + rtnl_unlock();
14319 + return err;
14320 + }
14321 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
14322 +index 8c85021..e2327db 100644
14323 +--- a/net/ipv4/tcp.c
14324 ++++ b/net/ipv4/tcp.c
14325 +@@ -3240,7 +3240,7 @@ void __init tcp_init(void)
14326 + {
14327 + struct sk_buff *skb = NULL;
14328 + unsigned long limit;
14329 +- int max_share, cnt;
14330 ++ int max_rshare, max_wshare, cnt;
14331 + unsigned int i;
14332 + unsigned long jiffy = jiffies;
14333 +
14334 +@@ -3300,15 +3300,16 @@ void __init tcp_init(void)
14335 + tcp_init_mem(&init_net);
14336 + /* Set per-socket limits to no more than 1/128 the pressure threshold */
14337 + limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
14338 +- max_share = min(4UL*1024*1024, limit);
14339 ++ max_wshare = min(4UL*1024*1024, limit);
14340 ++ max_rshare = min(6UL*1024*1024, limit);
14341 +
14342 + sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
14343 + sysctl_tcp_wmem[1] = 16*1024;
14344 +- sysctl_tcp_wmem[2] = max(64*1024, max_share);
14345 ++ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
14346 +
14347 + sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
14348 + sysctl_tcp_rmem[1] = 87380;
14349 +- sysctl_tcp_rmem[2] = max(87380, max_share);
14350 ++ sysctl_tcp_rmem[2] = max(87380, max_rshare);
14351 +
14352 + printk(KERN_INFO "TCP: Hash tables configured "
14353 + "(established %u bind %u)\n",
14354 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
14355 +index 1c30511..169f3a6 100644
14356 +--- a/net/ipv4/tcp_input.c
14357 ++++ b/net/ipv4/tcp_input.c
14358 +@@ -83,7 +83,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
14359 + EXPORT_SYMBOL(sysctl_tcp_ecn);
14360 + int sysctl_tcp_dsack __read_mostly = 1;
14361 + int sysctl_tcp_app_win __read_mostly = 31;
14362 +-int sysctl_tcp_adv_win_scale __read_mostly = 2;
14363 ++int sysctl_tcp_adv_win_scale __read_mostly = 1;
14364 + EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
14365 +
14366 + int sysctl_tcp_stdurg __read_mostly;
14367 +@@ -2866,11 +2866,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
14368 +
14369 + /* Do not moderate cwnd if it's already undone in cwr or recovery. */
14370 + if (tp->undo_marker) {
14371 +- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
14372 ++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
14373 + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
14374 +- else /* PRR */
14375 ++ tp->snd_cwnd_stamp = tcp_time_stamp;
14376 ++ } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
14377 ++ /* PRR algorithm. */
14378 + tp->snd_cwnd = tp->snd_ssthresh;
14379 +- tp->snd_cwnd_stamp = tcp_time_stamp;
14380 ++ tp->snd_cwnd_stamp = tcp_time_stamp;
14381 ++ }
14382 + }
14383 + tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
14384 + }
14385 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
14386 +index 55670ec..2a2a3e7 100644
14387 +--- a/net/l2tp/l2tp_ip.c
14388 ++++ b/net/l2tp/l2tp_ip.c
14389 +@@ -441,8 +441,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
14390 +
14391 + daddr = lip->l2tp_addr.s_addr;
14392 + } else {
14393 ++ rc = -EDESTADDRREQ;
14394 + if (sk->sk_state != TCP_ESTABLISHED)
14395 +- return -EDESTADDRREQ;
14396 ++ goto out;
14397 +
14398 + daddr = inet->inet_daddr;
14399 + connected = 1;
14400 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
14401 +index 5da548f..ebd2296 100644
14402 +--- a/net/sched/sch_netem.c
14403 ++++ b/net/sched/sch_netem.c
14404 +@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
14405 + if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
14406 + if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
14407 + (skb->ip_summed == CHECKSUM_PARTIAL &&
14408 +- skb_checksum_help(skb))) {
14409 +- sch->qstats.drops++;
14410 +- return NET_XMIT_DROP;
14411 +- }
14412 ++ skb_checksum_help(skb)))
14413 ++ return qdisc_drop(skb, sch);
14414 +
14415 + skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
14416 + }
14417 +diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
14418 +index dfa41a9..e7de911 100644
14419 +--- a/sound/soc/codecs/tlv320aic23.c
14420 ++++ b/sound/soc/codecs/tlv320aic23.c
14421 +@@ -472,7 +472,7 @@ static int tlv320aic23_set_dai_sysclk(struct snd_soc_dai *codec_dai,
14422 + static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
14423 + enum snd_soc_bias_level level)
14424 + {
14425 +- u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0xff7f;
14426 ++ u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0x17f;
14427 +
14428 + switch (level) {
14429 + case SND_SOC_BIAS_ON:
14430 +@@ -491,7 +491,7 @@ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
14431 + case SND_SOC_BIAS_OFF:
14432 + /* everything off, dac mute, inactive */
14433 + snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0);
14434 +- snd_soc_write(codec, TLV320AIC23_PWR, 0xffff);
14435 ++ snd_soc_write(codec, TLV320AIC23_PWR, 0x1ff);
14436 + break;
14437 + }
14438 + codec->dapm.bias_level = level;
14439 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
14440 +index 92cee24..48e91cd 100644
14441 +--- a/sound/soc/soc-core.c
14442 ++++ b/sound/soc/soc-core.c
14443 +@@ -3420,10 +3420,10 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
14444 + int i, ret;
14445 +
14446 + num_routes = of_property_count_strings(np, propname);
14447 +- if (num_routes & 1) {
14448 ++ if (num_routes < 0 || num_routes & 1) {
14449 + dev_err(card->dev,
14450 +- "Property '%s's length is not even\n",
14451 +- propname);
14452 ++ "Property '%s' does not exist or its length is not even\n",
14453 ++ propname);
14454 + return -EINVAL;
14455 + }
14456 + num_routes /= 2;
14457 +diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
14458 +index fec1723..e9fff98 100644
14459 +--- a/virt/kvm/iommu.c
14460 ++++ b/virt/kvm/iommu.c
14461 +@@ -240,9 +240,13 @@ int kvm_iommu_map_guest(struct kvm *kvm)
14462 + return -ENODEV;
14463 + }
14464 +
14465 ++ mutex_lock(&kvm->slots_lock);
14466 ++
14467 + kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
14468 +- if (!kvm->arch.iommu_domain)
14469 +- return -ENOMEM;
14470 ++ if (!kvm->arch.iommu_domain) {
14471 ++ r = -ENOMEM;
14472 ++ goto out_unlock;
14473 ++ }
14474 +
14475 + if (!allow_unsafe_assigned_interrupts &&
14476 + !iommu_domain_has_cap(kvm->arch.iommu_domain,
14477 +@@ -253,17 +257,16 @@ int kvm_iommu_map_guest(struct kvm *kvm)
14478 + " module option.\n", __func__);
14479 + iommu_domain_free(kvm->arch.iommu_domain);
14480 + kvm->arch.iommu_domain = NULL;
14481 +- return -EPERM;
14482 ++ r = -EPERM;
14483 ++ goto out_unlock;
14484 + }
14485 +
14486 + r = kvm_iommu_map_memslots(kvm);
14487 + if (r)
14488 +- goto out_unmap;
14489 +-
14490 +- return 0;
14491 ++ kvm_iommu_unmap_memslots(kvm);
14492 +
14493 +-out_unmap:
14494 +- kvm_iommu_unmap_memslots(kvm);
14495 ++out_unlock:
14496 ++ mutex_unlock(&kvm->slots_lock);
14497 + return r;
14498 + }
14499 +
14500 +@@ -340,7 +343,11 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
14501 + if (!domain)
14502 + return 0;
14503 +
14504 ++ mutex_lock(&kvm->slots_lock);
14505 + kvm_iommu_unmap_memslots(kvm);
14506 ++ kvm->arch.iommu_domain = NULL;
14507 ++ mutex_unlock(&kvm->slots_lock);
14508 ++
14509 + iommu_domain_free(domain);
14510 + return 0;
14511 + }
14512 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
14513 +index c4ac57e..7858228 100644
14514 +--- a/virt/kvm/kvm_main.c
14515 ++++ b/virt/kvm/kvm_main.c
14516 +@@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
14517 + */
14518 + idx = srcu_read_lock(&kvm->srcu);
14519 + spin_lock(&kvm->mmu_lock);
14520 ++
14521 + kvm->mmu_notifier_seq++;
14522 + need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
14523 +- spin_unlock(&kvm->mmu_lock);
14524 +- srcu_read_unlock(&kvm->srcu, idx);
14525 +-
14526 + /* we've to flush the tlb before the pages can be freed */
14527 + if (need_tlb_flush)
14528 + kvm_flush_remote_tlbs(kvm);
14529 +
14530 ++ spin_unlock(&kvm->mmu_lock);
14531 ++ srcu_read_unlock(&kvm->srcu, idx);
14532 + }
14533 +
14534 + static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
14535 +@@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
14536 + for (; start < end; start += PAGE_SIZE)
14537 + need_tlb_flush |= kvm_unmap_hva(kvm, start);
14538 + need_tlb_flush |= kvm->tlbs_dirty;
14539 +- spin_unlock(&kvm->mmu_lock);
14540 +- srcu_read_unlock(&kvm->srcu, idx);
14541 +-
14542 + /* we've to flush the tlb before the pages can be freed */
14543 + if (need_tlb_flush)
14544 + kvm_flush_remote_tlbs(kvm);
14545 ++
14546 ++ spin_unlock(&kvm->mmu_lock);
14547 ++ srcu_read_unlock(&kvm->srcu, idx);
14548 + }
14549 +
14550 + static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
14551 +@@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
14552 +
14553 + idx = srcu_read_lock(&kvm->srcu);
14554 + spin_lock(&kvm->mmu_lock);
14555 +- young = kvm_age_hva(kvm, address);
14556 +- spin_unlock(&kvm->mmu_lock);
14557 +- srcu_read_unlock(&kvm->srcu, idx);
14558 +
14559 ++ young = kvm_age_hva(kvm, address);
14560 + if (young)
14561 + kvm_flush_remote_tlbs(kvm);
14562 +
14563 ++ spin_unlock(&kvm->mmu_lock);
14564 ++ srcu_read_unlock(&kvm->srcu, idx);
14565 ++
14566 + return young;
14567 + }
14568 +
14569 +@@ -1719,6 +1720,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
14570 + goto vcpu_destroy;
14571 +
14572 + mutex_lock(&kvm->lock);
14573 ++ if (!kvm_vcpu_compatible(vcpu)) {
14574 ++ r = -EINVAL;
14575 ++ goto unlock_vcpu_destroy;
14576 ++ }
14577 + if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
14578 + r = -EINVAL;
14579 + goto unlock_vcpu_destroy;
14580
14581 diff --git a/3.3.5/4420_grsecurity-2.9-3.3.5-201205071839.patch b/3.3.6/4420_grsecurity-2.9-3.3.6-201205131658.patch
14582 similarity index 99%
14583 rename from 3.3.5/4420_grsecurity-2.9-3.3.5-201205071839.patch
14584 rename to 3.3.6/4420_grsecurity-2.9-3.3.6-201205131658.patch
14585 index 222eccd..0bad506 100644
14586 --- a/3.3.5/4420_grsecurity-2.9-3.3.5-201205071839.patch
14587 +++ b/3.3.6/4420_grsecurity-2.9-3.3.6-201205131658.patch
14588 @@ -195,7 +195,7 @@ index d99fd9c..8689fef 100644
14589
14590 pcd. [PARIDE]
14591 diff --git a/Makefile b/Makefile
14592 -index 64615e9..64d72ce 100644
14593 +index 9cd6941..92e68ff 100644
14594 --- a/Makefile
14595 +++ b/Makefile
14596 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
14597 @@ -1457,6 +1457,36 @@ index e4c96cc..1145653 100644
14598 #endif /* __ASSEMBLY__ */
14599
14600 #define arch_align_stack(x) (x)
14601 +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
14602 +index d4c24d4..4ac53e8 100644
14603 +--- a/arch/arm/include/asm/thread_info.h
14604 ++++ b/arch/arm/include/asm/thread_info.h
14605 +@@ -141,6 +141,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
14606 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
14607 + #define TIF_SYSCALL_TRACE 8
14608 + #define TIF_SYSCALL_AUDIT 9
14609 ++
14610 ++/* within 8 bits of TIF_SYSCALL_TRACE
14611 ++ to meet flexible second operand requirements
14612 ++*/
14613 ++#define TIF_GRSEC_SETXID 10
14614 ++
14615 + #define TIF_POLLING_NRFLAG 16
14616 + #define TIF_USING_IWMMXT 17
14617 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
14618 +@@ -156,9 +162,11 @@ extern void vfp_flush_hwstate(struct thread_info *);
14619 + #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
14620 + #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
14621 + #define _TIF_SECCOMP (1 << TIF_SECCOMP)
14622 ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
14623 +
14624 + /* Checks for any syscall work in entry-common.S */
14625 +-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
14626 ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
14627 ++ _TIF_GRSEC_SETXID)
14628 +
14629 + /*
14630 + * Change these and you break ASM code in entry-common.S
14631 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
14632 index 2958976..12ccac4 100644
14633 --- a/arch/arm/include/asm/uaccess.h
14634 @@ -1568,6 +1598,30 @@ index 971d65c..cc936fb 100644
14635 #ifdef CONFIG_MMU
14636 /*
14637 * The vectors page is always readable from user space for the
14638 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
14639 +index f5ce8ab..4b73893 100644
14640 +--- a/arch/arm/kernel/ptrace.c
14641 ++++ b/arch/arm/kernel/ptrace.c
14642 +@@ -905,10 +905,19 @@ long arch_ptrace(struct task_struct *child, long request,
14643 + return ret;
14644 + }
14645 +
14646 ++#ifdef CONFIG_GRKERNSEC_SETXID
14647 ++extern void gr_delayed_cred_worker(void);
14648 ++#endif
14649 ++
14650 + asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
14651 + {
14652 + unsigned long ip;
14653 +
14654 ++#ifdef CONFIG_GRKERNSEC_SETXID
14655 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
14656 ++ gr_delayed_cred_worker();
14657 ++#endif
14658 ++
14659 + if (why)
14660 + audit_syscall_exit(regs);
14661 + else
14662 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
14663 index a255c39..4a19b25 100644
14664 --- a/arch/arm/kernel/setup.c
14665 @@ -2791,6 +2845,40 @@ index 6018c80..7c37203 100644
14666 +#define arch_align_stack(x) ((x) & ~0xfUL)
14667
14668 #endif /* _ASM_SYSTEM_H */
14669 +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
14670 +index 0d85d8e..ec71487 100644
14671 +--- a/arch/mips/include/asm/thread_info.h
14672 ++++ b/arch/mips/include/asm/thread_info.h
14673 +@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
14674 + #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
14675 + #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
14676 + #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
14677 ++/* li takes a 32bit immediate */
14678 ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
14679 + #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
14680 +
14681 + #ifdef CONFIG_MIPS32_O32
14682 +@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
14683 + #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
14684 + #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
14685 + #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
14686 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
14687 ++
14688 ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
14689 +
14690 + /* work to do in syscall_trace_leave() */
14691 +-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
14692 ++#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
14693 +
14694 + /* work to do on interrupt/exception return */
14695 + #define _TIF_WORK_MASK (0x0000ffef & \
14696 + ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
14697 + /* work to do on any return to u-space */
14698 +-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
14699 ++#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
14700 +
14701 + #endif /* __KERNEL__ */
14702 +
14703 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
14704 index 9fdd8bc..4bd7f1a 100644
14705 --- a/arch/mips/kernel/binfmt_elfn32.c
14706 @@ -2847,6 +2935,85 @@ index 7955409..ceaea7c 100644
14707 -
14708 - return sp & ALMASK;
14709 -}
14710 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
14711 +index 7786b60..3e38c72 100644
14712 +--- a/arch/mips/kernel/ptrace.c
14713 ++++ b/arch/mips/kernel/ptrace.c
14714 +@@ -529,6 +529,10 @@ static inline int audit_arch(void)
14715 + return arch;
14716 + }
14717 +
14718 ++#ifdef CONFIG_GRKERNSEC_SETXID
14719 ++extern void gr_delayed_cred_worker(void);
14720 ++#endif
14721 ++
14722 + /*
14723 + * Notification of system call entry/exit
14724 + * - triggered by current->work.syscall_trace
14725 +@@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
14726 + /* do the secure computing check first */
14727 + secure_computing(regs->regs[2]);
14728 +
14729 ++#ifdef CONFIG_GRKERNSEC_SETXID
14730 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
14731 ++ gr_delayed_cred_worker();
14732 ++#endif
14733 ++
14734 + if (!(current->ptrace & PT_PTRACED))
14735 + goto out;
14736 +
14737 +diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
14738 +index a632bc1..0b77c7c 100644
14739 +--- a/arch/mips/kernel/scall32-o32.S
14740 ++++ b/arch/mips/kernel/scall32-o32.S
14741 +@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
14742 +
14743 + stack_done:
14744 + lw t0, TI_FLAGS($28) # syscall tracing enabled?
14745 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
14746 ++ li t1, _TIF_SYSCALL_WORK
14747 + and t0, t1
14748 + bnez t0, syscall_trace_entry # -> yes
14749 +
14750 +diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
14751 +index 3b5a5e9..e1ee86d 100644
14752 +--- a/arch/mips/kernel/scall64-64.S
14753 ++++ b/arch/mips/kernel/scall64-64.S
14754 +@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
14755 +
14756 + sd a3, PT_R26(sp) # save a3 for syscall restarting
14757 +
14758 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
14759 ++ li t1, _TIF_SYSCALL_WORK
14760 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
14761 + and t0, t1, t0
14762 + bnez t0, syscall_trace_entry
14763 +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
14764 +index 6be6f70..1859577 100644
14765 +--- a/arch/mips/kernel/scall64-n32.S
14766 ++++ b/arch/mips/kernel/scall64-n32.S
14767 +@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
14768 +
14769 + sd a3, PT_R26(sp) # save a3 for syscall restarting
14770 +
14771 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
14772 ++ li t1, _TIF_SYSCALL_WORK
14773 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
14774 + and t0, t1, t0
14775 + bnez t0, n32_syscall_trace_entry
14776 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
14777 +index 5422855..74e63a3 100644
14778 +--- a/arch/mips/kernel/scall64-o32.S
14779 ++++ b/arch/mips/kernel/scall64-o32.S
14780 +@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
14781 + PTR 4b, bad_stack
14782 + .previous
14783 +
14784 +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
14785 ++ li t1, _TIF_SYSCALL_WORK
14786 + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
14787 + and t0, t1, t0
14788 + bnez t0, trace_a_syscall
14789 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
14790 index 69ebd58..e4bff83 100644
14791 --- a/arch/mips/mm/fault.c
14792 @@ -3689,6 +3856,40 @@ index c377457..3c69fbc 100644
14793
14794 /* Used in very early kernel initialization. */
14795 extern unsigned long reloc_offset(void);
14796 +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
14797 +index 96471494..60ed5a2 100644
14798 +--- a/arch/powerpc/include/asm/thread_info.h
14799 ++++ b/arch/powerpc/include/asm/thread_info.h
14800 +@@ -104,13 +104,15 @@ static inline struct thread_info *current_thread_info(void)
14801 + #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
14802 + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
14803 + #define TIF_SINGLESTEP 8 /* singlestepping active */
14804 +-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
14805 + #define TIF_SECCOMP 10 /* secure computing */
14806 + #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
14807 + #define TIF_NOERROR 12 /* Force successful syscall return */
14808 + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
14809 + #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
14810 + #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
14811 ++#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
14812 ++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
14813 ++#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
14814 +
14815 + /* as above, but as bit values */
14816 + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
14817 +@@ -128,8 +130,11 @@ static inline struct thread_info *current_thread_info(void)
14818 + #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
14819 + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
14820 + #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
14821 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
14822 ++
14823 + #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
14824 +- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
14825 ++ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
14826 ++ _TIF_GRSEC_SETXID)
14827 +
14828 + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
14829 + _TIF_NOTIFY_RESUME)
14830 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
14831 index bd0fb84..a42a14b 100644
14832 --- a/arch/powerpc/include/asm/uaccess.h
14833 @@ -4065,6 +4266,45 @@ index d817ab0..b23b18e 100644
14834 -
14835 - return ret;
14836 -}
14837 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
14838 +index 5b43325..94a5bb4 100644
14839 +--- a/arch/powerpc/kernel/ptrace.c
14840 ++++ b/arch/powerpc/kernel/ptrace.c
14841 +@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
14842 + return ret;
14843 + }
14844 +
14845 ++#ifdef CONFIG_GRKERNSEC_SETXID
14846 ++extern void gr_delayed_cred_worker(void);
14847 ++#endif
14848 ++
14849 + /*
14850 + * We must return the syscall number to actually look up in the table.
14851 + * This can be -1L to skip running any syscall at all.
14852 +@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
14853 +
14854 + secure_computing(regs->gpr[0]);
14855 +
14856 ++#ifdef CONFIG_GRKERNSEC_SETXID
14857 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
14858 ++ gr_delayed_cred_worker();
14859 ++#endif
14860 ++
14861 + if (test_thread_flag(TIF_SYSCALL_TRACE) &&
14862 + tracehook_report_syscall_entry(regs))
14863 + /*
14864 +@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
14865 + {
14866 + int step;
14867 +
14868 ++#ifdef CONFIG_GRKERNSEC_SETXID
14869 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
14870 ++ gr_delayed_cred_worker();
14871 ++#endif
14872 ++
14873 + audit_syscall_exit(regs);
14874 +
14875 + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
14876 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
14877 index 836a5a1..27289a3 100644
14878 --- a/arch/powerpc/kernel/signal_32.c
14879 @@ -5253,7 +5493,7 @@ index c2a1080..21ed218 100644
14880
14881 /*
14882 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
14883 -index 01d057f..0a02f7e 100644
14884 +index 01d057f..13a7d2f 100644
14885 --- a/arch/sparc/include/asm/thread_info_64.h
14886 +++ b/arch/sparc/include/asm/thread_info_64.h
14887 @@ -63,6 +63,8 @@ struct thread_info {
14888 @@ -5265,6 +5505,38 @@ index 01d057f..0a02f7e 100644
14889 unsigned long fpregs[0] __attribute__ ((aligned(64)));
14890 };
14891
14892 +@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
14893 + #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
14894 + /* flag bit 6 is available */
14895 + #define TIF_32BIT 7 /* 32-bit binary */
14896 +-/* flag bit 8 is available */
14897 ++#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
14898 + #define TIF_SECCOMP 9 /* secure computing */
14899 + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
14900 + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
14901 ++
14902 + /* NOTE: Thread flags >= 12 should be ones we have no interest
14903 + * in using in assembly, else we can't use the mask as
14904 + * an immediate value in instructions such as andcc.
14905 +@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
14906 + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
14907 + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
14908 + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
14909 ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
14910 +
14911 + #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
14912 + _TIF_DO_NOTIFY_RESUME_MASK | \
14913 + _TIF_NEED_RESCHED)
14914 + #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
14915 +
14916 ++#define _TIF_WORK_SYSCALL \
14917 ++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
14918 ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
14919 ++
14920 ++
14921 + /*
14922 + * Thread-synchronous status.
14923 + *
14924 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
14925 index e88fbe5..96b0ce5 100644
14926 --- a/arch/sparc/include/asm/uaccess.h
14927 @@ -5475,6 +5747,45 @@ index 39d8b05..d1a7d90 100644
14928 (void *) gp->tpc,
14929 (void *) gp->o7,
14930 (void *) gp->i7,
14931 +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
14932 +index 9388844..0075fd2 100644
14933 +--- a/arch/sparc/kernel/ptrace_64.c
14934 ++++ b/arch/sparc/kernel/ptrace_64.c
14935 +@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
14936 + return ret;
14937 + }
14938 +
14939 ++#ifdef CONFIG_GRKERNSEC_SETXID
14940 ++extern void gr_delayed_cred_worker(void);
14941 ++#endif
14942 ++
14943 + asmlinkage int syscall_trace_enter(struct pt_regs *regs)
14944 + {
14945 + int ret = 0;
14946 +@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
14947 + /* do the secure computing check first */
14948 + secure_computing(regs->u_regs[UREG_G1]);
14949 +
14950 ++#ifdef CONFIG_GRKERNSEC_SETXID
14951 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
14952 ++ gr_delayed_cred_worker();
14953 ++#endif
14954 ++
14955 + if (test_thread_flag(TIF_SYSCALL_TRACE))
14956 + ret = tracehook_report_syscall_entry(regs);
14957 +
14958 +@@ -1085,6 +1094,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
14959 +
14960 + asmlinkage void syscall_trace_leave(struct pt_regs *regs)
14961 + {
14962 ++#ifdef CONFIG_GRKERNSEC_SETXID
14963 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
14964 ++ gr_delayed_cred_worker();
14965 ++#endif
14966 ++
14967 + audit_syscall_exit(regs);
14968 +
14969 + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
14970 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
14971 index 42b282f..28ce9f2 100644
14972 --- a/arch/sparc/kernel/sys_sparc_32.c
14973 @@ -5648,6 +5959,55 @@ index 232df99..cee1f9c 100644
14974 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
14975 mm->unmap_area = arch_unmap_area_topdown;
14976 }
14977 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
14978 +index 1d7e274..b39c527 100644
14979 +--- a/arch/sparc/kernel/syscalls.S
14980 ++++ b/arch/sparc/kernel/syscalls.S
14981 +@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
14982 + #endif
14983 + .align 32
14984 + 1: ldx [%g6 + TI_FLAGS], %l5
14985 +- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
14986 ++ andcc %l5, _TIF_WORK_SYSCALL, %g0
14987 + be,pt %icc, rtrap
14988 + nop
14989 + call syscall_trace_leave
14990 +@@ -179,7 +179,7 @@ linux_sparc_syscall32:
14991 +
14992 + srl %i5, 0, %o5 ! IEU1
14993 + srl %i2, 0, %o2 ! IEU0 Group
14994 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
14995 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0
14996 + bne,pn %icc, linux_syscall_trace32 ! CTI
14997 + mov %i0, %l5 ! IEU1
14998 + call %l7 ! CTI Group brk forced
14999 +@@ -202,7 +202,7 @@ linux_sparc_syscall:
15000 +
15001 + mov %i3, %o3 ! IEU1
15002 + mov %i4, %o4 ! IEU0 Group
15003 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
15004 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0
15005 + bne,pn %icc, linux_syscall_trace ! CTI Group
15006 + mov %i0, %l5 ! IEU0
15007 + 2: call %l7 ! CTI Group brk forced
15008 +@@ -226,7 +226,7 @@ ret_sys_call:
15009 +
15010 + cmp %o0, -ERESTART_RESTARTBLOCK
15011 + bgeu,pn %xcc, 1f
15012 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
15013 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6
15014 + 80:
15015 + /* System call success, clear Carry condition code. */
15016 + andn %g3, %g2, %g3
15017 +@@ -241,7 +241,7 @@ ret_sys_call:
15018 + /* System call failure, set Carry condition code.
15019 + * Also, get abs(errno) to return to the process.
15020 + */
15021 +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
15022 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6
15023 + sub %g0, %o0, %o0
15024 + or %g3, %g2, %g3
15025 + stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
15026 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
15027 index 591f20c..0f1b925 100644
15028 --- a/arch/sparc/kernel/traps_32.c
15029 @@ -7519,7 +7879,7 @@ index 7116dcb..d9ae1d7 100644
15030 #endif
15031
15032 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
15033 -index 89bbf4e..869908e 100644
15034 +index e77f4e4..17e511f 100644
15035 --- a/arch/x86/boot/compressed/relocs.c
15036 +++ b/arch/x86/boot/compressed/relocs.c
15037 @@ -13,8 +13,11 @@
15038 @@ -7624,7 +7984,7 @@ index 89bbf4e..869908e 100644
15039 rel->r_info = elf32_to_cpu(rel->r_info);
15040 }
15041 }
15042 -@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
15043 +@@ -396,13 +440,13 @@ static void read_relocs(FILE *fp)
15044
15045 static void print_absolute_symbols(void)
15046 {
15047 @@ -7635,13 +7995,12 @@ index 89bbf4e..869908e 100644
15048 for (i = 0; i < ehdr.e_shnum; i++) {
15049 struct section *sec = &secs[i];
15050 char *sym_strtab;
15051 - Elf32_Sym *sh_symtab;
15052 - int j;
15053 + unsigned int j;
15054
15055 if (sec->shdr.sh_type != SHT_SYMTAB) {
15056 continue;
15057 -@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
15058 +@@ -429,14 +473,14 @@ static void print_absolute_symbols(void)
15059
15060 static void print_absolute_relocs(void)
15061 {
15062 @@ -7658,7 +8017,7 @@ index 89bbf4e..869908e 100644
15063 if (sec->shdr.sh_type != SHT_REL) {
15064 continue;
15065 }
15066 -@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
15067 +@@ -497,13 +541,13 @@ static void print_absolute_relocs(void)
15068
15069 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
15070 {
15071 @@ -7674,7 +8033,7 @@ index 89bbf4e..869908e 100644
15072 struct section *sec = &secs[i];
15073
15074 if (sec->shdr.sh_type != SHT_REL) {
15075 -@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
15076 +@@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
15077 !is_rel_reloc(sym_name(sym_strtab, sym))) {
15078 continue;
15079 }
15080 @@ -7697,7 +8056,7 @@ index 89bbf4e..869908e 100644
15081 switch (r_type) {
15082 case R_386_NONE:
15083 case R_386_PC32:
15084 -@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
15085 +@@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb)
15086
15087 static void emit_relocs(int as_text)
15088 {
15089 @@ -7706,7 +8065,7 @@ index 89bbf4e..869908e 100644
15090 /* Count how many relocations I have and allocate space for them. */
15091 reloc_count = 0;
15092 walk_relocs(count_reloc);
15093 -@@ -665,6 +725,7 @@ int main(int argc, char **argv)
15094 +@@ -663,6 +723,7 @@ int main(int argc, char **argv)
15095 fname, strerror(errno));
15096 }
15097 read_ehdr(fp);
15098 @@ -12132,7 +12491,7 @@ index 2d2f01c..f985723 100644
15099 /*
15100 * Force strict CPU ordering.
15101 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15102 -index cfd8144..1b1127d 100644
15103 +index cfd8144..664ac89 100644
15104 --- a/arch/x86/include/asm/thread_info.h
15105 +++ b/arch/x86/include/asm/thread_info.h
15106 @@ -10,6 +10,7 @@
15107 @@ -12182,7 +12541,45 @@ index cfd8144..1b1127d 100644
15108 #define init_stack (init_thread_union.stack)
15109
15110 #else /* !__ASSEMBLY__ */
15111 -@@ -169,45 +163,40 @@ struct thread_info {
15112 +@@ -95,6 +89,7 @@ struct thread_info {
15113 + #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
15114 + #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
15115 + #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15116 ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
15117 +
15118 + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15119 + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15120 +@@ -116,16 +111,17 @@ struct thread_info {
15121 + #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
15122 + #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
15123 + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15124 ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15125 +
15126 + /* work to do in syscall_trace_enter() */
15127 + #define _TIF_WORK_SYSCALL_ENTRY \
15128 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15129 +- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
15130 ++ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
15131 +
15132 + /* work to do in syscall_trace_leave() */
15133 + #define _TIF_WORK_SYSCALL_EXIT \
15134 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15135 +- _TIF_SYSCALL_TRACEPOINT)
15136 ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
15137 +
15138 + /* work to do on interrupt/exception return */
15139 + #define _TIF_WORK_MASK \
15140 +@@ -135,7 +131,8 @@ struct thread_info {
15141 +
15142 + /* work to do on any return to user space */
15143 + #define _TIF_ALLWORK_MASK \
15144 +- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
15145 ++ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15146 ++ _TIF_GRSEC_SETXID)
15147 +
15148 + /* Only used for 64 bit */
15149 + #define _TIF_DO_NOTIFY_MASK \
15150 +@@ -169,45 +166,40 @@ struct thread_info {
15151 ret; \
15152 })
15153
15154 @@ -12253,7 +12650,7 @@ index cfd8144..1b1127d 100644
15155 /*
15156 * macros/functions for gaining access to the thread information structure
15157 * preempt_count needs to be 1 initially, until the scheduler is functional.
15158 -@@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void)
15159 +@@ -215,27 +207,8 @@ static inline struct thread_info *current_thread_info(void)
15160 #ifndef __ASSEMBLY__
15161 DECLARE_PER_CPU(unsigned long, kernel_stack);
15162
15163 @@ -12283,7 +12680,7 @@ index cfd8144..1b1127d 100644
15164 #endif
15165
15166 #endif /* !X86_32 */
15167 -@@ -269,5 +239,16 @@ extern void arch_task_cache_init(void);
15168 +@@ -269,5 +242,16 @@ extern void arch_task_cache_init(void);
15169 extern void free_thread_info(struct thread_info *ti);
15170 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15171 #define arch_task_cache_init arch_task_cache_init
15172 @@ -14606,7 +15003,7 @@ index 9b9f18b..9fcaa04 100644
15173 #include <asm/processor.h>
15174 #include <asm/fcntl.h>
15175 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15176 -index 7b784f4..76aaad7 100644
15177 +index 7b784f4..db6b628 100644
15178 --- a/arch/x86/kernel/entry_32.S
15179 +++ b/arch/x86/kernel/entry_32.S
15180 @@ -179,13 +179,146 @@
15181 @@ -14799,7 +15196,7 @@ index 7b784f4..76aaad7 100644
15182 +#ifdef CONFIG_PAX_KERNEXEC
15183 + jae resume_userspace
15184 +
15185 -+ PAX_EXIT_KERNEL
15186 ++ pax_exit_kernel
15187 + jmp resume_kernel
15188 +#else
15189 jb resume_kernel # not returning to v8086 or userspace
15190 @@ -18533,7 +18930,7 @@ index cfa5c90..4facd28 100644
15191 ip = *(u64 *)(fp+8);
15192 if (!in_sched_functions(ip))
15193 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
15194 -index 5026738..e1b5aa8 100644
15195 +index 5026738..574f70a 100644
15196 --- a/arch/x86/kernel/ptrace.c
15197 +++ b/arch/x86/kernel/ptrace.c
15198 @@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target,
15199 @@ -18582,6 +18979,41 @@ index 5026738..e1b5aa8 100644
15200 }
15201
15202 void user_single_step_siginfo(struct task_struct *tsk,
15203 +@@ -1361,6 +1365,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
15204 + # define IS_IA32 0
15205 + #endif
15206 +
15207 ++#ifdef CONFIG_GRKERNSEC_SETXID
15208 ++extern void gr_delayed_cred_worker(void);
15209 ++#endif
15210 ++
15211 + /*
15212 + * We must return the syscall number to actually look up in the table.
15213 + * This can be -1L to skip running any syscall at all.
15214 +@@ -1369,6 +1377,11 @@ long syscall_trace_enter(struct pt_regs *regs)
15215 + {
15216 + long ret = 0;
15217 +
15218 ++#ifdef CONFIG_GRKERNSEC_SETXID
15219 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
15220 ++ gr_delayed_cred_worker();
15221 ++#endif
15222 ++
15223 + /*
15224 + * If we stepped into a sysenter/syscall insn, it trapped in
15225 + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
15226 +@@ -1412,6 +1425,11 @@ void syscall_trace_leave(struct pt_regs *regs)
15227 + {
15228 + bool step;
15229 +
15230 ++#ifdef CONFIG_GRKERNSEC_SETXID
15231 ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
15232 ++ gr_delayed_cred_worker();
15233 ++#endif
15234 ++
15235 + audit_syscall_exit(regs);
15236 +
15237 + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
15238 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
15239 index 42eb330..139955c 100644
15240 --- a/arch/x86/kernel/pvclock.c
15241 @@ -18820,7 +19252,7 @@ index d7d5099..28555d0 100644
15242 bss_resource.start = virt_to_phys(&__bss_start);
15243 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15244 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
15245 -index 71f4727..217419b 100644
15246 +index 5a98aa2..848d2be 100644
15247 --- a/arch/x86/kernel/setup_percpu.c
15248 +++ b/arch/x86/kernel/setup_percpu.c
15249 @@ -21,19 +21,17 @@
15250 @@ -18879,7 +19311,7 @@ index 71f4727..217419b 100644
15251 write_gdt_entry(get_cpu_gdt_table(cpu),
15252 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15253 #endif
15254 -@@ -207,6 +209,11 @@ void __init setup_per_cpu_areas(void)
15255 +@@ -219,6 +221,11 @@ void __init setup_per_cpu_areas(void)
15256 /* alrighty, percpu areas up and running */
15257 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15258 for_each_possible_cpu(cpu) {
15259 @@ -18891,7 +19323,7 @@ index 71f4727..217419b 100644
15260 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15261 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15262 per_cpu(cpu_number, cpu) = cpu;
15263 -@@ -247,6 +254,12 @@ void __init setup_per_cpu_areas(void)
15264 +@@ -259,6 +266,12 @@ void __init setup_per_cpu_areas(void)
15265 */
15266 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15267 #endif
15268 @@ -20334,7 +20766,7 @@ index e385214..f8df033 100644
15269
15270 local_irq_disable();
15271 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
15272 -index 3b4c8d8..f457b63 100644
15273 +index a7a6f60..04b745a 100644
15274 --- a/arch/x86/kvm/vmx.c
15275 +++ b/arch/x86/kvm/vmx.c
15276 @@ -1306,7 +1306,11 @@ static void reload_tss(void)
15277 @@ -20349,7 +20781,7 @@ index 3b4c8d8..f457b63 100644
15278 load_TR_desc();
15279 }
15280
15281 -@@ -2631,8 +2635,11 @@ static __init int hardware_setup(void)
15282 +@@ -2637,8 +2641,11 @@ static __init int hardware_setup(void)
15283 if (!cpu_has_vmx_flexpriority())
15284 flexpriority_enabled = 0;
15285
15286 @@ -20363,7 +20795,7 @@ index 3b4c8d8..f457b63 100644
15287
15288 if (enable_ept && !cpu_has_vmx_ept_2m_page())
15289 kvm_disable_largepages();
15290 -@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
15291 +@@ -3654,7 +3661,7 @@ static void vmx_set_constant_host_state(void)
15292 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
15293
15294 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
15295 @@ -20372,7 +20804,7 @@ index 3b4c8d8..f457b63 100644
15296
15297 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
15298 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
15299 -@@ -6184,6 +6191,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
15300 +@@ -6192,6 +6199,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
15301 "jmp .Lkvm_vmx_return \n\t"
15302 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
15303 ".Lkvm_vmx_return: "
15304 @@ -20385,7 +20817,7 @@ index 3b4c8d8..f457b63 100644
15305 /* Save guest registers, load host registers, keep flags */
15306 "mov %0, %c[wordsize](%%"R"sp) \n\t"
15307 "pop %0 \n\t"
15308 -@@ -6232,6 +6245,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
15309 +@@ -6240,6 +6253,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
15310 #endif
15311 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
15312 [wordsize]"i"(sizeof(ulong))
15313 @@ -20397,7 +20829,7 @@ index 3b4c8d8..f457b63 100644
15314 : "cc", "memory"
15315 , R"ax", R"bx", R"di", R"si"
15316 #ifdef CONFIG_X86_64
15317 -@@ -6260,7 +6278,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
15318 +@@ -6268,7 +6286,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
15319 }
15320 }
15321
15322 @@ -20416,7 +20848,7 @@ index 3b4c8d8..f457b63 100644
15323
15324 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
15325 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
15326 -index 9cbfc06..943ffa6 100644
15327 +index 8d1c6c6..6e6d611 100644
15328 --- a/arch/x86/kvm/x86.c
15329 +++ b/arch/x86/kvm/x86.c
15330 @@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
15331 @@ -20461,7 +20893,7 @@ index 9cbfc06..943ffa6 100644
15332 return -EINVAL;
15333 if (irqchip_in_kernel(vcpu->kvm))
15334 return -ENXIO;
15335 -@@ -3497,6 +3501,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
15336 +@@ -3499,6 +3503,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
15337
15338 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
15339 struct kvm_vcpu *vcpu, u32 access,
15340 @@ -20471,7 +20903,7 @@ index 9cbfc06..943ffa6 100644
15341 struct x86_exception *exception)
15342 {
15343 void *data = val;
15344 -@@ -3528,6 +3535,9 @@ out:
15345 +@@ -3530,6 +3537,9 @@ out:
15346 /* used for instruction fetching */
15347 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
15348 gva_t addr, void *val, unsigned int bytes,
15349 @@ -20481,7 +20913,7 @@ index 9cbfc06..943ffa6 100644
15350 struct x86_exception *exception)
15351 {
15352 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
15353 -@@ -3552,6 +3562,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
15354 +@@ -3554,6 +3564,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
15355
15356 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
15357 gva_t addr, void *val, unsigned int bytes,
15358 @@ -20491,7 +20923,7 @@ index 9cbfc06..943ffa6 100644
15359 struct x86_exception *exception)
15360 {
15361 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
15362 -@@ -3665,12 +3678,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
15363 +@@ -3667,12 +3680,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
15364 }
15365
15366 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
15367 @@ -20508,7 +20940,7 @@ index 9cbfc06..943ffa6 100644
15368 void *val, int bytes)
15369 {
15370 return emulator_write_phys(vcpu, gpa, val, bytes);
15371 -@@ -3821,6 +3838,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
15372 +@@ -3823,6 +3840,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
15373 const void *old,
15374 const void *new,
15375 unsigned int bytes,
15376 @@ -20521,7 +20953,7 @@ index 9cbfc06..943ffa6 100644
15377 struct x86_exception *exception)
15378 {
15379 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
15380 -@@ -4780,7 +4803,7 @@ static void kvm_set_mmio_spte_mask(void)
15381 +@@ -4782,7 +4805,7 @@ static void kvm_set_mmio_spte_mask(void)
15382 kvm_mmu_set_mmio_spte_mask(mask);
15383 }
15384
15385 @@ -20906,7 +21338,7 @@ index e8e7e0d..56fd1b0 100644
15386 movl %eax, (v)
15387 movl %edx, 4(v)
15388 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
15389 -index 391a083..d658e9f 100644
15390 +index 391a083..3a2cf39 100644
15391 --- a/arch/x86/lib/atomic64_cx8_32.S
15392 +++ b/arch/x86/lib/atomic64_cx8_32.S
15393 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
15394 @@ -21017,7 +21449,7 @@ index 391a083..d658e9f 100644
15395
15396 -.macro incdec_return func ins insc
15397 -ENTRY(atomic64_\func\()_return_cx8)
15398 -+.macro incdec_return func ins insc unchecked
15399 ++.macro incdec_return func ins insc unchecked=""
15400 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
15401 CFI_STARTPROC
15402 SAVE ebx
15403 @@ -24310,7 +24742,7 @@ index f4f29b1..5cac4fb 100644
15404
15405 return (void *)vaddr;
15406 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
15407 -index 8ecbb4b..29efd37 100644
15408 +index 8ecbb4b..a269cab 100644
15409 --- a/arch/x86/mm/hugetlbpage.c
15410 +++ b/arch/x86/mm/hugetlbpage.c
15411 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
15412 @@ -24386,7 +24818,7 @@ index 8ecbb4b..29efd37 100644
15413
15414 /* don't allow allocations above current base */
15415 if (mm->free_area_cache > base)
15416 -@@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
15417 +@@ -321,14 +328,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
15418 largest_hole = 0;
15419 mm->free_area_cache = base;
15420 }
15421 @@ -24401,16 +24833,10 @@ index 8ecbb4b..29efd37 100644
15422 + addr = (mm->free_area_cache - len);
15423 do {
15424 + addr &= huge_page_mask(h);
15425 -+ vma = find_vma(mm, addr);
15426 /*
15427 * Lookup failure means no vma is above this address,
15428 * i.e. return with success:
15429 -- */
15430 -- vma = find_vma(mm, addr);
15431 -- if (!vma)
15432 -- return addr;
15433 --
15434 -- /*
15435 +@@ -341,46 +349,47 @@ try_again:
15436 * new region fits between prev_vma->vm_end and
15437 * vma->vm_start, use it:
15438 */
15439 @@ -24483,7 +24909,7 @@ index 8ecbb4b..29efd37 100644
15440 mm->cached_hole_size = ~0UL;
15441 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
15442 len, pgoff, flags);
15443 -@@ -388,6 +392,7 @@ fail:
15444 +@@ -388,6 +397,7 @@ fail:
15445 /*
15446 * Restore the topdown base:
15447 */
15448 @@ -24491,7 +24917,7 @@ index 8ecbb4b..29efd37 100644
15449 mm->free_area_cache = base;
15450 mm->cached_hole_size = ~0UL;
15451
15452 -@@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
15453 +@@ -401,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
15454 struct hstate *h = hstate_file(file);
15455 struct mm_struct *mm = current->mm;
15456 struct vm_area_struct *vma;
15457 @@ -24512,7 +24938,7 @@ index 8ecbb4b..29efd37 100644
15458 return -ENOMEM;
15459
15460 if (flags & MAP_FIXED) {
15461 -@@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
15462 +@@ -416,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
15463 if (addr) {
15464 addr = ALIGN(addr, huge_page_size(h));
15465 vma = find_vma(mm, addr);
15466 @@ -24940,7 +25366,7 @@ index 8663f6c..829ae76 100644
15467 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
15468 size >> 10);
15469 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
15470 -index 436a030..2b60088 100644
15471 +index 436a030..4f97ffc 100644
15472 --- a/arch/x86/mm/init_64.c
15473 +++ b/arch/x86/mm/init_64.c
15474 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
15475 @@ -25057,6 +25483,15 @@ index 436a030..2b60088 100644
15476 adr = (void *)(((unsigned long)adr) | left);
15477
15478 return adr;
15479 +@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
15480 + unmap_low_page(pmd);
15481 +
15482 + spin_lock(&init_mm.page_table_lock);
15483 +- pud_populate(&init_mm, pud, __va(pmd_phys));
15484 ++ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
15485 + spin_unlock(&init_mm.page_table_lock);
15486 + }
15487 + __flush_tlb_all();
15488 @@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
15489 unmap_low_page(pud);
15490
15491 @@ -26837,10 +27272,10 @@ index 153407c..611cba9 100644
15492 -}
15493 -__setup("vdso=", vdso_setup);
15494 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
15495 -index 4172af8..2c8ed7f 100644
15496 +index 4e517d4..68a48f5 100644
15497 --- a/arch/x86/xen/enlighten.c
15498 +++ b/arch/x86/xen/enlighten.c
15499 -@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
15500 +@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
15501
15502 struct shared_info xen_dummy_shared_info;
15503
15504 @@ -26849,7 +27284,7 @@ index 4172af8..2c8ed7f 100644
15505 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
15506 __read_mostly int xen_have_vector_callback;
15507 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
15508 -@@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
15509 +@@ -1030,30 +1028,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
15510 #endif
15511 };
15512
15513 @@ -26887,7 +27322,7 @@ index 4172af8..2c8ed7f 100644
15514 {
15515 if (pm_power_off)
15516 pm_power_off();
15517 -@@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void)
15518 +@@ -1156,7 +1154,17 @@ asmlinkage void __init xen_start_kernel(void)
15519 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
15520
15521 /* Work out if we support NX */
15522 @@ -26906,7 +27341,7 @@ index 4172af8..2c8ed7f 100644
15523
15524 xen_setup_features();
15525
15526 -@@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void)
15527 +@@ -1187,13 +1195,6 @@ asmlinkage void __init xen_start_kernel(void)
15528
15529 machine_ops = xen_machine_ops;
15530
15531 @@ -26921,10 +27356,10 @@ index 4172af8..2c8ed7f 100644
15532
15533 #ifdef CONFIG_ACPI_NUMA
15534 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
15535 -index 95c1cf6..4bfa5be 100644
15536 +index dc19347..1b07a2c 100644
15537 --- a/arch/x86/xen/mmu.c
15538 +++ b/arch/x86/xen/mmu.c
15539 -@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
15540 +@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
15541 convert_pfn_mfn(init_level4_pgt);
15542 convert_pfn_mfn(level3_ident_pgt);
15543 convert_pfn_mfn(level3_kernel_pgt);
15544 @@ -26934,7 +27369,7 @@ index 95c1cf6..4bfa5be 100644
15545
15546 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
15547 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
15548 -@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
15549 +@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
15550 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
15551 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
15552 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
15553 @@ -26946,7 +27381,7 @@ index 95c1cf6..4bfa5be 100644
15554 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
15555 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
15556
15557 -@@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void)
15558 +@@ -1963,6 +1970,7 @@ static void __init xen_post_allocator_init(void)
15559 pv_mmu_ops.set_pud = xen_set_pud;
15560 #if PAGETABLE_LEVELS == 4
15561 pv_mmu_ops.set_pgd = xen_set_pgd;
15562 @@ -26954,7 +27389,7 @@ index 95c1cf6..4bfa5be 100644
15563 #endif
15564
15565 /* This will work as long as patching hasn't happened yet
15566 -@@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
15567 +@@ -2044,6 +2052,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
15568 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
15569 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
15570 .set_pgd = xen_set_pgd_hyper,
15571 @@ -46851,10 +47286,10 @@ index 5698746..6086012 100644
15572 kfree(s);
15573 }
15574 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
15575 -index 3645cd3..786809c 100644
15576 +index c60267e..193d9e4 100644
15577 --- a/fs/hugetlbfs/inode.c
15578 +++ b/fs/hugetlbfs/inode.c
15579 -@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs_fs_type = {
15580 +@@ -902,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = {
15581 .kill_sb = kill_litter_super,
15582 };
15583
15584 @@ -47597,7 +48032,7 @@ index f649fba..236bf92 100644
15585
15586 void nfs_fattr_init(struct nfs_fattr *fattr)
15587 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
15588 -index edf6d3e..bdd1da7 100644
15589 +index b96fe94..a4dbece 100644
15590 --- a/fs/nfsd/vfs.c
15591 +++ b/fs/nfsd/vfs.c
15592 @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
15593 @@ -49831,10 +50266,10 @@ index ab30253..4d86958 100644
15594 kfree(s);
15595 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
15596 new file mode 100644
15597 -index 0000000..4089e05
15598 +index 0000000..2645296
15599 --- /dev/null
15600 +++ b/grsecurity/Kconfig
15601 -@@ -0,0 +1,1078 @@
15602 +@@ -0,0 +1,1079 @@
15603 +#
15604 +# grecurity configuration
15605 +#
15606 @@ -49969,7 +50404,7 @@ index 0000000..4089e05
15607 + select GRKERNSEC_PROC_ADD
15608 + select GRKERNSEC_CHROOT_CHMOD
15609 + select GRKERNSEC_CHROOT_NICE
15610 -+ select GRKERNSEC_SETXID
15611 ++ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
15612 + select GRKERNSEC_AUDIT_MOUNT
15613 + select GRKERNSEC_MODHARDEN if (MODULES)
15614 + select GRKERNSEC_HARDEN_PTRACE
15615 @@ -50664,6 +51099,7 @@ index 0000000..4089e05
15616 +
15617 +config GRKERNSEC_SETXID
15618 + bool "Enforce consistent multithreaded privileges"
15619 ++ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
15620 + help
15621 + If you say Y here, a change from a root uid to a non-root uid
15622 + in a multithreaded application will cause the resulting uids,
15623 @@ -50959,10 +51395,10 @@ index 0000000..1b9afa9
15624 +endif
15625 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
15626 new file mode 100644
15627 -index 0000000..42813ac
15628 +index 0000000..a6d83f0
15629 --- /dev/null
15630 +++ b/grsecurity/gracl.c
15631 -@@ -0,0 +1,4192 @@
15632 +@@ -0,0 +1,4193 @@
15633 +#include <linux/kernel.h>
15634 +#include <linux/module.h>
15635 +#include <linux/sched.h>
15636 @@ -54820,21 +55256,22 @@ index 0000000..42813ac
15637 + if (unlikely(!(gr_status & GR_READY)))
15638 + return 0;
15639 +#endif
15640 ++ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
15641 ++ read_lock(&tasklist_lock);
15642 ++ while (tmp->pid > 0) {
15643 ++ if (tmp == curtemp)
15644 ++ break;
15645 ++ tmp = tmp->real_parent;
15646 ++ }
15647 +
15648 -+ read_lock(&tasklist_lock);
15649 -+ while (tmp->pid > 0) {
15650 -+ if (tmp == curtemp)
15651 -+ break;
15652 -+ tmp = tmp->real_parent;
15653 -+ }
15654 -+
15655 -+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
15656 -+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
15657 ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
15658 ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
15659 ++ read_unlock(&tasklist_lock);
15660 ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
15661 ++ return 1;
15662 ++ }
15663 + read_unlock(&tasklist_lock);
15664 -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
15665 -+ return 1;
15666 + }
15667 -+ read_unlock(&tasklist_lock);
15668 +
15669 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
15670 + if (!(gr_status & GR_READY))
15671 @@ -62544,7 +62981,7 @@ index 9c07dce..a92fa71 100644
15672 if (atomic_sub_and_test((int) count, &kref->refcount)) {
15673 release(kref);
15674 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
15675 -index bc21720..098aefa 100644
15676 +index 4c4e83d..5f16617 100644
15677 --- a/include/linux/kvm_host.h
15678 +++ b/include/linux/kvm_host.h
15679 @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
15680 @@ -63114,7 +63551,7 @@ index ffc0213..2c1f2cb 100644
15681 return nd->saved_names[nd->depth];
15682 }
15683 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
15684 -index 4f3b01a..8256d1a 100644
15685 +index 7e472b7..212d381 100644
15686 --- a/include/linux/netdevice.h
15687 +++ b/include/linux/netdevice.h
15688 @@ -1002,6 +1002,7 @@ struct net_device_ops {
15689 @@ -66076,7 +66513,7 @@ index 42e8fa0..9e7406b 100644
15690 return -ENOMEM;
15691
15692 diff --git a/kernel/cred.c b/kernel/cred.c
15693 -index 48c6fd3..3342f00 100644
15694 +index 48c6fd3..8398912 100644
15695 --- a/kernel/cred.c
15696 +++ b/kernel/cred.c
15697 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
15698 @@ -66113,7 +66550,7 @@ index 48c6fd3..3342f00 100644
15699 /* dumpability changes */
15700 if (old->euid != new->euid ||
15701 old->egid != new->egid ||
15702 -@@ -540,6 +551,92 @@ int commit_creds(struct cred *new)
15703 +@@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
15704 put_cred(old);
15705 return 0;
15706 }
15707 @@ -66179,6 +66616,8 @@ index 48c6fd3..3342f00 100644
15708 +int commit_creds(struct cred *new)
15709 +{
15710 +#ifdef CONFIG_GRKERNSEC_SETXID
15711 ++ int ret;
15712 ++ int schedule_it = 0;
15713 + struct task_struct *t;
15714 +
15715 + /* we won't get called with tasklist_lock held for writing
15716 @@ -66187,20 +66626,27 @@ index 48c6fd3..3342f00 100644
15717 + */
15718 + if (grsec_enable_setxid && !current_is_single_threaded() &&
15719 + !current_uid() && new->uid) {
15720 ++ schedule_it = 1;
15721 ++ }
15722 ++ ret = __commit_creds(new);
15723 ++ if (schedule_it) {
15724 + rcu_read_lock();
15725 + read_lock(&tasklist_lock);
15726 + for (t = next_thread(current); t != current;
15727 + t = next_thread(t)) {
15728 + if (t->delayed_cred == NULL) {
15729 + t->delayed_cred = get_cred(new);
15730 ++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
15731 + set_tsk_need_resched(t);
15732 + }
15733 + }
15734 + read_unlock(&tasklist_lock);
15735 + rcu_read_unlock();
15736 + }
15737 -+#endif
15738 ++ return ret;
15739 ++#else
15740 + return __commit_creds(new);
15741 ++#endif
15742 +}
15743 +
15744 EXPORT_SYMBOL(commit_creds);
15745 @@ -69073,39 +69519,10 @@ index e8a1f83..363d17d 100644
15746 #ifdef CONFIG_RT_GROUP_SCHED
15747 /*
15748 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
15749 -index 478a04c..6970d99 100644
15750 +index 478a04c..e16339a 100644
15751 --- a/kernel/sched/core.c
15752 +++ b/kernel/sched/core.c
15753 -@@ -3142,6 +3142,19 @@ pick_next_task(struct rq *rq)
15754 - BUG(); /* the idle class will always have a runnable task */
15755 - }
15756 -
15757 -+#ifdef CONFIG_GRKERNSEC_SETXID
15758 -+extern void gr_delayed_cred_worker(void);
15759 -+static inline void gr_cred_schedule(void)
15760 -+{
15761 -+ if (unlikely(current->delayed_cred))
15762 -+ gr_delayed_cred_worker();
15763 -+}
15764 -+#else
15765 -+static inline void gr_cred_schedule(void)
15766 -+{
15767 -+}
15768 -+#endif
15769 -+
15770 - /*
15771 - * __schedule() is the main scheduler function.
15772 - */
15773 -@@ -3161,6 +3174,8 @@ need_resched:
15774 -
15775 - schedule_debug(prev);
15776 -
15777 -+ gr_cred_schedule();
15778 -+
15779 - if (sched_feat(HRTICK))
15780 - hrtick_clear(rq);
15781 -
15782 -@@ -3851,6 +3866,8 @@ int can_nice(const struct task_struct *p, const int nice)
15783 +@@ -3851,6 +3851,8 @@ int can_nice(const struct task_struct *p, const int nice)
15784 /* convert nice value [19,-20] to rlimit style value [1,40] */
15785 int nice_rlim = 20 - nice;
15786
15787 @@ -69114,7 +69531,7 @@ index 478a04c..6970d99 100644
15788 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
15789 capable(CAP_SYS_NICE));
15790 }
15791 -@@ -3884,7 +3901,8 @@ SYSCALL_DEFINE1(nice, int, increment)
15792 +@@ -3884,7 +3886,8 @@ SYSCALL_DEFINE1(nice, int, increment)
15793 if (nice > 19)
15794 nice = 19;
15795
15796 @@ -69124,7 +69541,7 @@ index 478a04c..6970d99 100644
15797 return -EPERM;
15798
15799 retval = security_task_setnice(current, nice);
15800 -@@ -4041,6 +4059,7 @@ recheck:
15801 +@@ -4041,6 +4044,7 @@ recheck:
15802 unsigned long rlim_rtprio =
15803 task_rlimit(p, RLIMIT_RTPRIO);
15804
15805 @@ -70448,6 +70865,28 @@ index 013a761..c28f3fc 100644
15806 #define free(a) kfree(a)
15807 #endif
15808
15809 +diff --git a/lib/ioremap.c b/lib/ioremap.c
15810 +index da4e2ad..6373b5f 100644
15811 +--- a/lib/ioremap.c
15812 ++++ b/lib/ioremap.c
15813 +@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
15814 + unsigned long next;
15815 +
15816 + phys_addr -= addr;
15817 +- pmd = pmd_alloc(&init_mm, pud, addr);
15818 ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
15819 + if (!pmd)
15820 + return -ENOMEM;
15821 + do {
15822 +@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
15823 + unsigned long next;
15824 +
15825 + phys_addr -= addr;
15826 +- pud = pud_alloc(&init_mm, pgd, addr);
15827 ++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
15828 + if (!pud)
15829 + return -ENOMEM;
15830 + do {
15831 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
15832 index bd2bea9..6b3c95e 100644
15833 --- a/lib/is_single_threaded.c
15834 @@ -70677,10 +71116,10 @@ index 8f7fc39..69bf1e9 100644
15835 /* if an huge pmd materialized from under us just retry later */
15836 if (unlikely(pmd_trans_huge(*pmd)))
15837 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
15838 -index a7cf829..d60e0e1 100644
15839 +index 24b1787..e0fbc01 100644
15840 --- a/mm/hugetlb.c
15841 +++ b/mm/hugetlb.c
15842 -@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
15843 +@@ -2425,6 +2425,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
15844 return 1;
15845 }
15846
15847 @@ -70708,7 +71147,7 @@ index a7cf829..d60e0e1 100644
15848 /*
15849 * Hugetlb_cow() should be called with page lock of the original hugepage held.
15850 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
15851 -@@ -2459,6 +2480,11 @@ retry_avoidcopy:
15852 +@@ -2538,6 +2559,11 @@ retry_avoidcopy:
15853 make_huge_pte(vma, new_page, 1));
15854 page_remove_rmap(old_page);
15855 hugepage_add_new_anon_rmap(new_page, vma, address);
15856 @@ -70720,7 +71159,7 @@ index a7cf829..d60e0e1 100644
15857 /* Make the old page be freed below */
15858 new_page = old_page;
15859 mmu_notifier_invalidate_range_end(mm,
15860 -@@ -2613,6 +2639,10 @@ retry:
15861 +@@ -2692,6 +2718,10 @@ retry:
15862 && (vma->vm_flags & VM_SHARED)));
15863 set_huge_pte_at(mm, address, ptep, new_pte);
15864
15865 @@ -70731,7 +71170,7 @@ index a7cf829..d60e0e1 100644
15866 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
15867 /* Optimization, do the COW without a second fault */
15868 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
15869 -@@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
15870 +@@ -2721,6 +2751,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
15871 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
15872 struct hstate *h = hstate_vma(vma);
15873
15874 @@ -70742,7 +71181,7 @@ index a7cf829..d60e0e1 100644
15875 address &= huge_page_mask(h);
15876
15877 ptep = huge_pte_offset(mm, address);
15878 -@@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
15879 +@@ -2734,6 +2768,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
15880 VM_FAULT_SET_HINDEX(h - hstates);
15881 }
15882
15883 @@ -70982,7 +71421,7 @@ index 56080ea..115071e 100644
15884 /* keep elevated page count for bad page */
15885 return ret;
15886 diff --git a/mm/memory.c b/mm/memory.c
15887 -index 10b4dda..b1f60ad 100644
15888 +index 10b4dda..06857f3 100644
15889 --- a/mm/memory.c
15890 +++ b/mm/memory.c
15891 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
15892 @@ -71109,7 +71548,29 @@ index 10b4dda..b1f60ad 100644
15893
15894 if (addr < vma->vm_start || addr >= vma->vm_end)
15895 return -EFAULT;
15896 -@@ -2472,6 +2485,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
15897 +@@ -2364,7 +2377,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
15898 +
15899 + BUG_ON(pud_huge(*pud));
15900 +
15901 +- pmd = pmd_alloc(mm, pud, addr);
15902 ++ pmd = (mm == &init_mm) ?
15903 ++ pmd_alloc_kernel(mm, pud, addr) :
15904 ++ pmd_alloc(mm, pud, addr);
15905 + if (!pmd)
15906 + return -ENOMEM;
15907 + do {
15908 +@@ -2384,7 +2399,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
15909 + unsigned long next;
15910 + int err;
15911 +
15912 +- pud = pud_alloc(mm, pgd, addr);
15913 ++ pud = (mm == &init_mm) ?
15914 ++ pud_alloc_kernel(mm, pgd, addr) :
15915 ++ pud_alloc(mm, pgd, addr);
15916 + if (!pud)
15917 + return -ENOMEM;
15918 + do {
15919 +@@ -2472,6 +2489,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
15920 copy_user_highpage(dst, src, va, vma);
15921 }
15922
15923 @@ -71296,7 +71757,7 @@ index 10b4dda..b1f60ad 100644
15924 /*
15925 * This routine handles present pages, when users try to write
15926 * to a shared page. It is done by copying the page to a new address
15927 -@@ -2683,6 +2876,12 @@ gotten:
15928 +@@ -2683,6 +2880,12 @@ gotten:
15929 */
15930 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
15931 if (likely(pte_same(*page_table, orig_pte))) {
15932 @@ -71309,7 +71770,7 @@ index 10b4dda..b1f60ad 100644
15933 if (old_page) {
15934 if (!PageAnon(old_page)) {
15935 dec_mm_counter_fast(mm, MM_FILEPAGES);
15936 -@@ -2734,6 +2933,10 @@ gotten:
15937 +@@ -2734,6 +2937,10 @@ gotten:
15938 page_remove_rmap(old_page);
15939 }
15940
15941 @@ -71320,7 +71781,7 @@ index 10b4dda..b1f60ad 100644
15942 /* Free the old page.. */
15943 new_page = old_page;
15944 ret |= VM_FAULT_WRITE;
15945 -@@ -3013,6 +3216,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
15946 +@@ -3013,6 +3220,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
15947 swap_free(entry);
15948 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
15949 try_to_free_swap(page);
15950 @@ -71332,7 +71793,7 @@ index 10b4dda..b1f60ad 100644
15951 unlock_page(page);
15952 if (swapcache) {
15953 /*
15954 -@@ -3036,6 +3244,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
15955 +@@ -3036,6 +3248,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
15956
15957 /* No need to invalidate - it was non-present before */
15958 update_mmu_cache(vma, address, page_table);
15959 @@ -71344,7 +71805,7 @@ index 10b4dda..b1f60ad 100644
15960 unlock:
15961 pte_unmap_unlock(page_table, ptl);
15962 out:
15963 -@@ -3055,40 +3268,6 @@ out_release:
15964 +@@ -3055,40 +3272,6 @@ out_release:
15965 }
15966
15967 /*
15968 @@ -71385,7 +71846,7 @@ index 10b4dda..b1f60ad 100644
15969 * We enter with non-exclusive mmap_sem (to exclude vma changes,
15970 * but allow concurrent faults), and pte mapped but not yet locked.
15971 * We return with mmap_sem still held, but pte unmapped and unlocked.
15972 -@@ -3097,27 +3276,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
15973 +@@ -3097,27 +3280,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
15974 unsigned long address, pte_t *page_table, pmd_t *pmd,
15975 unsigned int flags)
15976 {
15977 @@ -71418,7 +71879,7 @@ index 10b4dda..b1f60ad 100644
15978 if (unlikely(anon_vma_prepare(vma)))
15979 goto oom;
15980 page = alloc_zeroed_user_highpage_movable(vma, address);
15981 -@@ -3136,6 +3311,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
15982 +@@ -3136,6 +3315,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
15983 if (!pte_none(*page_table))
15984 goto release;
15985
15986 @@ -71430,7 +71891,7 @@ index 10b4dda..b1f60ad 100644
15987 inc_mm_counter_fast(mm, MM_ANONPAGES);
15988 page_add_new_anon_rmap(page, vma, address);
15989 setpte:
15990 -@@ -3143,6 +3323,12 @@ setpte:
15991 +@@ -3143,6 +3327,12 @@ setpte:
15992
15993 /* No need to invalidate - it was non-present before */
15994 update_mmu_cache(vma, address, page_table);
15995 @@ -71443,7 +71904,7 @@ index 10b4dda..b1f60ad 100644
15996 unlock:
15997 pte_unmap_unlock(page_table, ptl);
15998 return 0;
15999 -@@ -3286,6 +3472,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16000 +@@ -3286,6 +3476,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16001 */
16002 /* Only go through if we didn't race with anybody else... */
16003 if (likely(pte_same(*page_table, orig_pte))) {
16004 @@ -71456,7 +71917,7 @@ index 10b4dda..b1f60ad 100644
16005 flush_icache_page(vma, page);
16006 entry = mk_pte(page, vma->vm_page_prot);
16007 if (flags & FAULT_FLAG_WRITE)
16008 -@@ -3305,6 +3497,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16009 +@@ -3305,6 +3501,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16010
16011 /* no need to invalidate: a not-present page won't be cached */
16012 update_mmu_cache(vma, address, page_table);
16013 @@ -71471,7 +71932,7 @@ index 10b4dda..b1f60ad 100644
16014 } else {
16015 if (cow_page)
16016 mem_cgroup_uncharge_page(cow_page);
16017 -@@ -3458,6 +3658,12 @@ int handle_pte_fault(struct mm_struct *mm,
16018 +@@ -3458,6 +3662,12 @@ int handle_pte_fault(struct mm_struct *mm,
16019 if (flags & FAULT_FLAG_WRITE)
16020 flush_tlb_fix_spurious_fault(vma, address);
16021 }
16022 @@ -71484,7 +71945,7 @@ index 10b4dda..b1f60ad 100644
16023 unlock:
16024 pte_unmap_unlock(pte, ptl);
16025 return 0;
16026 -@@ -3474,6 +3680,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16027 +@@ -3474,6 +3684,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16028 pmd_t *pmd;
16029 pte_t *pte;
16030
16031 @@ -71495,7 +71956,7 @@ index 10b4dda..b1f60ad 100644
16032 __set_current_state(TASK_RUNNING);
16033
16034 count_vm_event(PGFAULT);
16035 -@@ -3485,6 +3695,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16036 +@@ -3485,6 +3699,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16037 if (unlikely(is_vm_hugetlb_page(vma)))
16038 return hugetlb_fault(mm, vma, address, flags);
16039
16040 @@ -71530,7 +71991,7 @@ index 10b4dda..b1f60ad 100644
16041 pgd = pgd_offset(mm, address);
16042 pud = pud_alloc(mm, pgd, address);
16043 if (!pud)
16044 -@@ -3514,7 +3752,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16045 +@@ -3514,7 +3756,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16046 * run pte_offset_map on the pmd, if an huge pmd could
16047 * materialize from under us from a different thread.
16048 */
16049 @@ -71539,7 +72000,7 @@ index 10b4dda..b1f60ad 100644
16050 return VM_FAULT_OOM;
16051 /* if an huge pmd materialized from under us just retry later */
16052 if (unlikely(pmd_trans_huge(*pmd)))
16053 -@@ -3551,6 +3789,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
16054 +@@ -3551,6 +3793,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
16055 spin_unlock(&mm->page_table_lock);
16056 return 0;
16057 }
16058 @@ -71563,7 +72024,7 @@ index 10b4dda..b1f60ad 100644
16059 #endif /* __PAGETABLE_PUD_FOLDED */
16060
16061 #ifndef __PAGETABLE_PMD_FOLDED
16062 -@@ -3581,6 +3836,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
16063 +@@ -3581,6 +3840,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
16064 spin_unlock(&mm->page_table_lock);
16065 return 0;
16066 }
16067 @@ -71594,7 +72055,7 @@ index 10b4dda..b1f60ad 100644
16068 #endif /* __PAGETABLE_PMD_FOLDED */
16069
16070 int make_pages_present(unsigned long addr, unsigned long end)
16071 -@@ -3618,7 +3897,7 @@ static int __init gate_vma_init(void)
16072 +@@ -3618,7 +3901,7 @@ static int __init gate_vma_init(void)
16073 gate_vma.vm_start = FIXADDR_USER_START;
16074 gate_vma.vm_end = FIXADDR_USER_END;
16075 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
16076 @@ -75428,7 +75889,7 @@ index 68bbf9f..5ef0d12 100644
16077
16078 return err;
16079 diff --git a/net/core/dev.c b/net/core/dev.c
16080 -index 7f72c9c..e29943b 100644
16081 +index 0336374..659088a 100644
16082 --- a/net/core/dev.c
16083 +++ b/net/core/dev.c
16084 @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
16085 @@ -75446,7 +75907,7 @@ index 7f72c9c..e29943b 100644
16086 }
16087 }
16088 EXPORT_SYMBOL(dev_load);
16089 -@@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
16090 +@@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
16091 {
16092 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
16093 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
16094 @@ -75455,7 +75916,7 @@ index 7f72c9c..e29943b 100644
16095 kfree_skb(skb);
16096 return NET_RX_DROP;
16097 }
16098 -@@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
16099 +@@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
16100 nf_reset(skb);
16101
16102 if (unlikely(!is_skb_forwardable(dev, skb))) {
16103 @@ -75464,7 +75925,7 @@ index 7f72c9c..e29943b 100644
16104 kfree_skb(skb);
16105 return NET_RX_DROP;
16106 }
16107 -@@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
16108 +@@ -2077,7 +2081,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
16109
16110 struct dev_gso_cb {
16111 void (*destructor)(struct sk_buff *skb);
16112 @@ -75473,7 +75934,7 @@ index 7f72c9c..e29943b 100644
16113
16114 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
16115
16116 -@@ -2913,7 +2917,7 @@ enqueue:
16117 +@@ -2933,7 +2937,7 @@ enqueue:
16118
16119 local_irq_restore(flags);
16120
16121 @@ -75482,7 +75943,7 @@ index 7f72c9c..e29943b 100644
16122 kfree_skb(skb);
16123 return NET_RX_DROP;
16124 }
16125 -@@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb)
16126 +@@ -3005,7 +3009,7 @@ int netif_rx_ni(struct sk_buff *skb)
16127 }
16128 EXPORT_SYMBOL(netif_rx_ni);
16129
16130 @@ -75491,7 +75952,7 @@ index 7f72c9c..e29943b 100644
16131 {
16132 struct softnet_data *sd = &__get_cpu_var(softnet_data);
16133
16134 -@@ -3273,7 +3277,7 @@ ncls:
16135 +@@ -3293,7 +3297,7 @@ ncls:
16136 if (pt_prev) {
16137 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
16138 } else {
16139 @@ -75500,7 +75961,7 @@ index 7f72c9c..e29943b 100644
16140 kfree_skb(skb);
16141 /* Jamal, now you will not able to escape explaining
16142 * me how you were going to use this. :-)
16143 -@@ -3833,7 +3837,7 @@ void netif_napi_del(struct napi_struct *napi)
16144 +@@ -3853,7 +3857,7 @@ void netif_napi_del(struct napi_struct *napi)
16145 }
16146 EXPORT_SYMBOL(netif_napi_del);
16147
16148 @@ -75509,7 +75970,7 @@ index 7f72c9c..e29943b 100644
16149 {
16150 struct softnet_data *sd = &__get_cpu_var(softnet_data);
16151 unsigned long time_limit = jiffies + 2;
16152 -@@ -5858,7 +5862,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
16153 +@@ -5878,7 +5882,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
16154 } else {
16155 netdev_stats_to_stats64(storage, &dev->stats);
16156 }
16157 @@ -86454,7 +86915,7 @@ index af0f22f..9a7d479 100644
16158 break;
16159 }
16160 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
16161 -index c4ac57e..527711d 100644
16162 +index 7858228..2919715 100644
16163 --- a/virt/kvm/kvm_main.c
16164 +++ b/virt/kvm/kvm_main.c
16165 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
16166 @@ -86466,7 +86927,7 @@ index c4ac57e..527711d 100644
16167
16168 struct kmem_cache *kvm_vcpu_cache;
16169 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
16170 -@@ -2313,7 +2313,7 @@ static void hardware_enable_nolock(void *junk)
16171 +@@ -2318,7 +2318,7 @@ static void hardware_enable_nolock(void *junk)
16172
16173 if (r) {
16174 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
16175 @@ -86475,7 +86936,7 @@ index c4ac57e..527711d 100644
16176 printk(KERN_INFO "kvm: enabling virtualization on "
16177 "CPU%d failed\n", cpu);
16178 }
16179 -@@ -2367,10 +2367,10 @@ static int hardware_enable_all(void)
16180 +@@ -2372,10 +2372,10 @@ static int hardware_enable_all(void)
16181
16182 kvm_usage_count++;
16183 if (kvm_usage_count == 1) {
16184 @@ -86488,7 +86949,7 @@ index c4ac57e..527711d 100644
16185 hardware_disable_all_nolock();
16186 r = -EBUSY;
16187 }
16188 -@@ -2733,7 +2733,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
16189 +@@ -2738,7 +2738,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
16190 kvm_arch_vcpu_put(vcpu);
16191 }
16192
16193 @@ -86497,7 +86958,7 @@ index c4ac57e..527711d 100644
16194 struct module *module)
16195 {
16196 int r;
16197 -@@ -2796,7 +2796,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
16198 +@@ -2801,7 +2801,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
16199 if (!vcpu_align)
16200 vcpu_align = __alignof__(struct kvm_vcpu);
16201 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
16202 @@ -86506,7 +86967,7 @@ index c4ac57e..527711d 100644
16203 if (!kvm_vcpu_cache) {
16204 r = -ENOMEM;
16205 goto out_free_3;
16206 -@@ -2806,9 +2806,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
16207 +@@ -2811,9 +2811,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
16208 if (r)
16209 goto out_free;
16210
16211
16212 diff --git a/3.3.5/4430_grsec-remove-localversion-grsec.patch b/3.3.6/4430_grsec-remove-localversion-grsec.patch
16213 similarity index 100%
16214 rename from 3.3.5/4430_grsec-remove-localversion-grsec.patch
16215 rename to 3.3.6/4430_grsec-remove-localversion-grsec.patch
16216
16217 diff --git a/3.3.5/4435_grsec-mute-warnings.patch b/3.3.6/4435_grsec-mute-warnings.patch
16218 similarity index 100%
16219 rename from 3.3.5/4435_grsec-mute-warnings.patch
16220 rename to 3.3.6/4435_grsec-mute-warnings.patch
16221
16222 diff --git a/3.3.5/4440_grsec-remove-protected-paths.patch b/3.3.6/4440_grsec-remove-protected-paths.patch
16223 similarity index 100%
16224 rename from 3.3.5/4440_grsec-remove-protected-paths.patch
16225 rename to 3.3.6/4440_grsec-remove-protected-paths.patch
16226
16227 diff --git a/3.3.5/4445_grsec-pax-without-grsec.patch b/3.3.6/4445_grsec-pax-without-grsec.patch
16228 similarity index 100%
16229 rename from 3.3.5/4445_grsec-pax-without-grsec.patch
16230 rename to 3.3.6/4445_grsec-pax-without-grsec.patch
16231
16232 diff --git a/3.3.5/4450_grsec-kconfig-default-gids.patch b/3.3.6/4450_grsec-kconfig-default-gids.patch
16233 similarity index 100%
16234 rename from 3.3.5/4450_grsec-kconfig-default-gids.patch
16235 rename to 3.3.6/4450_grsec-kconfig-default-gids.patch
16236
16237 diff --git a/3.3.5/4455_grsec-kconfig-gentoo.patch b/3.3.6/4455_grsec-kconfig-gentoo.patch
16238 similarity index 100%
16239 rename from 3.3.5/4455_grsec-kconfig-gentoo.patch
16240 rename to 3.3.6/4455_grsec-kconfig-gentoo.patch
16241
16242 diff --git a/3.3.5/4460-grsec-kconfig-proc-user.patch b/3.3.6/4460-grsec-kconfig-proc-user.patch
16243 similarity index 100%
16244 rename from 3.3.5/4460-grsec-kconfig-proc-user.patch
16245 rename to 3.3.6/4460-grsec-kconfig-proc-user.patch
16246
16247 diff --git a/3.3.5/4465_selinux-avc_audit-log-curr_ip.patch b/3.3.6/4465_selinux-avc_audit-log-curr_ip.patch
16248 similarity index 100%
16249 rename from 3.3.5/4465_selinux-avc_audit-log-curr_ip.patch
16250 rename to 3.3.6/4465_selinux-avc_audit-log-curr_ip.patch
16251
16252 diff --git a/3.3.5/4470_disable-compat_vdso.patch b/3.3.6/4470_disable-compat_vdso.patch
16253 similarity index 100%
16254 rename from 3.3.5/4470_disable-compat_vdso.patch
16255 rename to 3.3.6/4470_disable-compat_vdso.patch