Gentoo Archives: gentoo-commits

From: "Mike Frysinger (vapier)" <vapier@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] gentoo commit in src/patchsets/gentoo-headers/3.3: 90_all_x32-3.3.patch
Date: Mon, 02 Apr 2012 06:02:44
Message-Id: 20120402060231.51D5A2004B@flycatcher.gentoo.org
1 vapier 12/04/02 06:02:31
2
3 Added: 90_all_x32-3.3.patch
4 Log:
5 add new x32 syscall patch based on v3.4-rc1
6
7 Revision Changes Path
8 1.1 src/patchsets/gentoo-headers/3.3/90_all_x32-3.3.patch
9
10 file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/gentoo-headers/3.3/90_all_x32-3.3.patch?rev=1.1&view=markup
11 plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/gentoo-headers/3.3/90_all_x32-3.3.patch?rev=1.1&content-type=text/plain
12
13 Index: 90_all_x32-3.3.patch
14 ===================================================================
15 created via filtering:
16 git diff v3.3...v3.4-rc1 arch/x86/
17
18 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
19 index 209ba12..968dbe2 100644
20 --- a/arch/x86/Makefile
21 +++ b/arch/x86/Makefile
22 @@ -82,6 +82,22 @@ ifdef CONFIG_CC_STACKPROTECTOR
23 endif
24 endif
25
26 +ifdef CONFIG_X86_X32
27 + x32_ld_ok := $(call try-run,\
28 + /bin/echo -e '1: .quad 1b' | \
29 + $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
30 + $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
31 + $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
32 + ifeq ($(x32_ld_ok),y)
33 + CONFIG_X86_X32_ABI := y
34 + KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI
35 + KBUILD_CFLAGS += -DCONFIG_X86_X32_ABI
36 + else
37 + $(warning CONFIG_X86_X32 enabled but no binutils support)
38 + endif
39 +endif
40 +export CONFIG_X86_X32_ABI
41 +
42 # Don't unroll struct assignments with kmemcheck enabled
43 ifeq ($(CONFIG_KMEMCHECK),y)
44 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
45 diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
46 index 36ddec6..4be406a 100644
47 --- a/arch/x86/Makefile.um
48 +++ b/arch/x86/Makefile.um
49 @@ -8,15 +8,11 @@ ELF_ARCH := i386
50 ELF_FORMAT := elf32-i386
51 CHECKFLAGS += -D__i386__
52
53 -ifeq ("$(origin SUBARCH)", "command line")
54 -ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)")
55 KBUILD_CFLAGS += $(call cc-option,-m32)
56 KBUILD_AFLAGS += $(call cc-option,-m32)
57 LINK-y += $(call cc-option,-m32)
58
59 export LDFLAGS
60 -endif
61 -endif
62
63 # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
64 include $(srctree)/arch/x86/Makefile_32.cpu
65 diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
66 index b57e6a4..f9c0d3b 100644
67 --- a/arch/x86/include/asm/Kbuild
68 +++ b/arch/x86/include/asm/Kbuild
69 @@ -14,6 +14,7 @@ header-y += msr.h
70 header-y += mtrr.h
71 header-y += posix_types_32.h
72 header-y += posix_types_64.h
73 +header-y += posix_types_x32.h
74 header-y += prctl.h
75 header-y += processor-flags.h
76 header-y += ptrace-abi.h
77 @@ -24,3 +25,4 @@ header-y += vsyscall.h
78
79 genhdr-y += unistd_32.h
80 genhdr-y += unistd_64.h
81 +genhdr-y += unistd_x32.h
82 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
83 index 37ad100..49331be 100644
84 --- a/arch/x86/include/asm/alternative.h
85 +++ b/arch/x86/include/asm/alternative.h
86 @@ -145,6 +145,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
87 */
88 #define ASM_OUTPUT2(a...) a
89
90 +/*
91 + * use this macro if you need clobbers but no inputs in
92 + * alternative_{input,io,call}()
93 + */
94 +#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
95 +
96 struct paravirt_patch_site;
97 #ifdef CONFIG_PARAVIRT
98 void apply_paravirt(struct paravirt_patch_site *start,
99 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
100 index 3ab9bdd..d854101 100644
101 --- a/arch/x86/include/asm/apic.h
102 +++ b/arch/x86/include/asm/apic.h
103 @@ -11,7 +11,6 @@
104 #include <linux/atomic.h>
105 #include <asm/fixmap.h>
106 #include <asm/mpspec.h>
107 -#include <asm/system.h>
108 #include <asm/msr.h>
109
110 #define ARCH_APICTIMER_STOPS_ON_C3 1
111 @@ -288,6 +287,7 @@ struct apic {
112
113 int (*probe)(void);
114 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
115 + int (*apic_id_valid)(int apicid);
116 int (*apic_id_registered)(void);
117
118 u32 irq_delivery_mode;
119 @@ -532,6 +532,11 @@ static inline unsigned int read_apic_id(void)
120 return apic->get_apic_id(reg);
121 }
122
123 +static inline int default_apic_id_valid(int apicid)
124 +{
125 + return (apicid < 255);
126 +}
127 +
128 extern void default_setup_apic_routing(void);
129
130 extern struct apic apic_noop;
131 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
132 index fa13f0e..1981199 100644
133 --- a/arch/x86/include/asm/atomic64_32.h
134 +++ b/arch/x86/include/asm/atomic64_32.h
135 @@ -14,13 +14,52 @@ typedef struct {
136
137 #define ATOMIC64_INIT(val) { (val) }
138
139 +#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
140 +#ifndef ATOMIC64_EXPORT
141 +#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
142 +#else
143 +#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
144 + ATOMIC64_EXPORT(atomic64_##sym)
145 +#endif
146 +
147 #ifdef CONFIG_X86_CMPXCHG64
148 -#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
149 +#define __alternative_atomic64(f, g, out, in...) \
150 + asm volatile("call %P[func]" \
151 + : out : [func] "i" (atomic64_##g##_cx8), ## in)
152 +
153 +#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
154 #else
155 -#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
156 +#define __alternative_atomic64(f, g, out, in...) \
157 + alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
158 + X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
159 +
160 +#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
161 + ATOMIC64_DECL_ONE(sym##_386)
162 +
163 +ATOMIC64_DECL_ONE(add_386);
164 +ATOMIC64_DECL_ONE(sub_386);
165 +ATOMIC64_DECL_ONE(inc_386);
166 +ATOMIC64_DECL_ONE(dec_386);
167 #endif
168
169 -#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
170 +#define alternative_atomic64(f, out, in...) \
171 + __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
172 +
173 +ATOMIC64_DECL(read);
174 +ATOMIC64_DECL(set);
175 +ATOMIC64_DECL(xchg);
176 +ATOMIC64_DECL(add_return);
177 +ATOMIC64_DECL(sub_return);
178 +ATOMIC64_DECL(inc_return);
179 +ATOMIC64_DECL(dec_return);
180 +ATOMIC64_DECL(dec_if_positive);
181 +ATOMIC64_DECL(inc_not_zero);
182 +ATOMIC64_DECL(add_unless);
183 +
184 +#undef ATOMIC64_DECL
185 +#undef ATOMIC64_DECL_ONE
186 +#undef __ATOMIC64_DECL
187 +#undef ATOMIC64_EXPORT
188
189 /**
190 * atomic64_cmpxchg - cmpxchg atomic64 variable
191 @@ -50,11 +89,9 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
192 long long o;
193 unsigned high = (unsigned)(n >> 32);
194 unsigned low = (unsigned)n;
195 - asm volatile(ATOMIC64_ALTERNATIVE(xchg)
196 - : "=A" (o), "+b" (low), "+c" (high)
197 - : "S" (v)
198 - : "memory"
199 - );
200 + alternative_atomic64(xchg, "=&A" (o),
201 + "S" (v), "b" (low), "c" (high)
202 + : "memory");
203 return o;
204 }
205
206 @@ -69,11 +106,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
207 {
208 unsigned high = (unsigned)(i >> 32);
209 unsigned low = (unsigned)i;
210 - asm volatile(ATOMIC64_ALTERNATIVE(set)
211 - : "+b" (low), "+c" (high)
212 - : "S" (v)
213 - : "eax", "edx", "memory"
214 - );
215 + alternative_atomic64(set, /* no output */,
216 + "S" (v), "b" (low), "c" (high)
217 + : "eax", "edx", "memory");
218 }
219
220 /**
221 @@ -85,10 +120,7 @@ static inline void atomic64_set(atomic64_t *v, long long i)
222 static inline long long atomic64_read(const atomic64_t *v)
223 {
224 long long r;
225 - asm volatile(ATOMIC64_ALTERNATIVE(read)
226 - : "=A" (r), "+c" (v)
227 - : : "memory"
228 - );
229 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
230 return r;
231 }
232
233 @@ -101,10 +133,9 @@ static inline long long atomic64_read(const atomic64_t *v)
234 */
235 static inline long long atomic64_add_return(long long i, atomic64_t *v)
236 {
237 - asm volatile(ATOMIC64_ALTERNATIVE(add_return)
238 - : "+A" (i), "+c" (v)
239 - : : "memory"
240 - );
241 + alternative_atomic64(add_return,
242 + ASM_OUTPUT2("+A" (i), "+c" (v)),
243 + ASM_NO_INPUT_CLOBBER("memory"));
244 return i;
245 }
246
247 @@ -113,32 +144,25 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
248 */
249 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
250 {
251 - asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
252 - : "+A" (i), "+c" (v)
253 - : : "memory"
254 - );
255 + alternative_atomic64(sub_return,
256 + ASM_OUTPUT2("+A" (i), "+c" (v)),
257 + ASM_NO_INPUT_CLOBBER("memory"));
258 return i;
259 }
260
261 static inline long long atomic64_inc_return(atomic64_t *v)
262 {
263 long long a;
264 - asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
265 - : "=A" (a)
266 - : "S" (v)
267 - : "memory", "ecx"
268 - );
269 + alternative_atomic64(inc_return, "=&A" (a),
270 + "S" (v) : "memory", "ecx");
271 return a;
272 }
273
274 static inline long long atomic64_dec_return(atomic64_t *v)
275 {
276 long long a;
277 - asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
278 - : "=A" (a)
279 - : "S" (v)
280 - : "memory", "ecx"
281 - );
282 + alternative_atomic64(dec_return, "=&A" (a),
283 + "S" (v) : "memory", "ecx");
284 return a;
285 }
286
287 @@ -151,10 +175,9 @@ static inline long long atomic64_dec_return(atomic64_t *v)
288 */
289 static inline long long atomic64_add(long long i, atomic64_t *v)
290 {
291 - asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
292 - : "+A" (i), "+c" (v)
293 - : : "memory"
294 - );
295 + __alternative_atomic64(add, add_return,
296 + ASM_OUTPUT2("+A" (i), "+c" (v)),
297 + ASM_NO_INPUT_CLOBBER("memory"));
298 return i;
299 }
300
301 @@ -167,10 +190,9 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
302 */
303 static inline long long atomic64_sub(long long i, atomic64_t *v)
304 {
305 - asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
306 - : "+A" (i), "+c" (v)
307 - : : "memory"
308 - );
309 + __alternative_atomic64(sub, sub_return,
310 + ASM_OUTPUT2("+A" (i), "+c" (v)),
311 + ASM_NO_INPUT_CLOBBER("memory"));
312 return i;
313 }
314
315 @@ -196,10 +218,8 @@ static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
316 */
317 static inline void atomic64_inc(atomic64_t *v)
318 {
319 - asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
320 - : : "S" (v)
321 - : "memory", "eax", "ecx", "edx"
322 - );
323 + __alternative_atomic64(inc, inc_return, /* no output */,
324 + "S" (v) : "memory", "eax", "ecx", "edx");
325 }
326
327 /**
328 @@ -210,10 +230,8 @@ static inline void atomic64_inc(atomic64_t *v)
329 */
330 static inline void atomic64_dec(atomic64_t *v)
331 {
332 - asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
333 - : : "S" (v)
334 - : "memory", "eax", "ecx", "edx"
335 - );
336 + __alternative_atomic64(dec, dec_return, /* no output */,
337 + "S" (v) : "memory", "eax", "ecx", "edx");
338 }
339
340 /**
341 @@ -263,15 +281,15 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
342 * @u: ...unless v is equal to u.
343 *
344 * Atomically adds @a to @v, so long as it was not @u.
345 - * Returns the old value of @v.
346 + * Returns non-zero if the add was done, zero otherwise.
347 */
348 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
349 {
350 unsigned low = (unsigned)u;
351 unsigned high = (unsigned)(u >> 32);
352 - asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
353 - : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
354 - : : "memory");
355 + alternative_atomic64(add_unless,
356 + ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
357 + "S" (v) : "memory");
358 return (int)a;
359 }
360
361 @@ -279,26 +297,20 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
362 static inline int atomic64_inc_not_zero(atomic64_t *v)
363 {
364 int r;
365 - asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
366 - : "=a" (r)
367 - : "S" (v)
368 - : "ecx", "edx", "memory"
369 - );
370 + alternative_atomic64(inc_not_zero, "=&a" (r),
371 + "S" (v) : "ecx", "edx", "memory");
372 return r;
373 }
374
375 static inline long long atomic64_dec_if_positive(atomic64_t *v)
376 {
377 long long r;
378 - asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
379 - : "=A" (r)
380 - : "S" (v)
381 - : "ecx", "memory"
382 - );
383 + alternative_atomic64(dec_if_positive, "=&A" (r),
384 + "S" (v) : "ecx", "memory");
385 return r;
386 }
387
388 -#undef ATOMIC64_ALTERNATIVE
389 -#undef ATOMIC64_ALTERNATIVE_
390 +#undef alternative_atomic64
391 +#undef __alternative_atomic64
392
393 #endif /* _ASM_X86_ATOMIC64_32_H */
394 diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h
395 index 1316b4c..77203ac 100644
396 --- a/arch/x86/include/asm/auxvec.h
397 +++ b/arch/x86/include/asm/auxvec.h
398 @@ -9,4 +9,11 @@
399 #endif
400 #define AT_SYSINFO_EHDR 33
401
402 +/* entries in ARCH_DLINFO: */
403 +#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
404 +# define AT_VECTOR_SIZE_ARCH 2
405 +#else /* else it's non-compat x86-64 */
406 +# define AT_VECTOR_SIZE_ARCH 1
407 +#endif
408 +
409 #endif /* _ASM_X86_AUXVEC_H */
410 diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
411 new file mode 100644
412 index 0000000..c6cd358
413 --- /dev/null
414 +++ b/arch/x86/include/asm/barrier.h
415 @@ -0,0 +1,116 @@
416 +#ifndef _ASM_X86_BARRIER_H
417 +#define _ASM_X86_BARRIER_H
418 +
419 +#include <asm/alternative.h>
420 +#include <asm/nops.h>
421 +
422 +/*
423 + * Force strict CPU ordering.
424 + * And yes, this is required on UP too when we're talking
425 + * to devices.
426 + */
427 +
428 +#ifdef CONFIG_X86_32
429 +/*
430 + * Some non-Intel clones support out of order store. wmb() ceases to be a
431 + * nop for these.
432 + */
433 +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
434 +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
435 +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
436 +#else
437 +#define mb() asm volatile("mfence":::"memory")
438 +#define rmb() asm volatile("lfence":::"memory")
439 +#define wmb() asm volatile("sfence" ::: "memory")
440 +#endif
441 +
442 +/**
443 + * read_barrier_depends - Flush all pending reads that subsequents reads
444 + * depend on.
445 + *
446 + * No data-dependent reads from memory-like regions are ever reordered
447 + * over this barrier. All reads preceding this primitive are guaranteed
448 + * to access memory (but not necessarily other CPUs' caches) before any
449 + * reads following this primitive that depend on the data return by
450 + * any of the preceding reads. This primitive is much lighter weight than
451 + * rmb() on most CPUs, and is never heavier weight than is
452 + * rmb().
453 + *
454 + * These ordering constraints are respected by both the local CPU
455 + * and the compiler.
456 + *
457 + * Ordering is not guaranteed by anything other than these primitives,
458 + * not even by data dependencies. See the documentation for
459 + * memory_barrier() for examples and URLs to more information.
460 + *
461 + * For example, the following code would force ordering (the initial
462 + * value of "a" is zero, "b" is one, and "p" is "&a"):
463 + *
464 + * <programlisting>
465 + * CPU 0 CPU 1
466 + *
467 + * b = 2;
468 + * memory_barrier();
469 + * p = &b; q = p;
470 + * read_barrier_depends();
471 + * d = *q;
472 + * </programlisting>
473 + *
474 + * because the read of "*q" depends on the read of "p" and these
475 + * two reads are separated by a read_barrier_depends(). However,
476 + * the following code, with the same initial values for "a" and "b":
477 + *
478 + * <programlisting>
479 + * CPU 0 CPU 1
480 + *
481 + * a = 2;
482 + * memory_barrier();
483 + * b = 3; y = b;
484 + * read_barrier_depends();
485 + * x = a;
486 + * </programlisting>
487 + *
488 + * does not enforce ordering, since there is no data dependency between
489 + * the read of "a" and the read of "b". Therefore, on some CPUs, such
490 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
491 + * in cases like this where there are no data dependencies.
492 + **/
493 +
494 +#define read_barrier_depends() do { } while (0)
495 +
496 +#ifdef CONFIG_SMP
497 +#define smp_mb() mb()
498 +#ifdef CONFIG_X86_PPRO_FENCE
499 +# define smp_rmb() rmb()
500 +#else
501 +# define smp_rmb() barrier()
502 +#endif
503 +#ifdef CONFIG_X86_OOSTORE
504 +# define smp_wmb() wmb()
505 +#else
506 +# define smp_wmb() barrier()
507 +#endif
508 +#define smp_read_barrier_depends() read_barrier_depends()
509 +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
510 +#else
511 +#define smp_mb() barrier()
512 +#define smp_rmb() barrier()
513 +#define smp_wmb() barrier()
514 +#define smp_read_barrier_depends() do { } while (0)
515 +#define set_mb(var, value) do { var = value; barrier(); } while (0)
516 +#endif
517 +
518 +/*
519 + * Stop RDTSC speculation. This is needed when you need to use RDTSC
520 + * (or get_cycles or vread that possibly accesses the TSC) in a defined
521 + * code region.
522 + *
523 + * (Could use an alternative three way for this if there was one.)
524 + */
525 +static __always_inline void rdtsc_barrier(void)
526 +{
527 + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
528 + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
529 +}
530 +
531 +#endif /* _ASM_X86_BARRIER_H */
532 diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
533 index f654d1b..11e1152 100644
534 --- a/arch/x86/include/asm/bug.h
535 +++ b/arch/x86/include/asm/bug.h
536 @@ -36,4 +36,8 @@ do { \
537 #endif /* !CONFIG_BUG */
538
539 #include <asm-generic/bug.h>
540 +
541 +
542 +extern void show_regs_common(void);
543 +
544 #endif /* _ASM_X86_BUG_H */
545 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
546 index 4e12668..9863ee3 100644
547 --- a/arch/x86/include/asm/cacheflush.h
548 +++ b/arch/x86/include/asm/cacheflush.h
549 @@ -3,6 +3,7 @@
550
551 /* Caches aren't brain-dead on the intel. */
552 #include <asm-generic/cacheflush.h>
553 +#include <asm/special_insns.h>
554
555 #ifdef CONFIG_X86_PAT
556 /*
557 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
558 index 30d737e..d680579 100644
559 --- a/arch/x86/include/asm/compat.h
560 +++ b/arch/x86/include/asm/compat.h
561 @@ -6,7 +6,9 @@
562 */
563 #include <linux/types.h>
564 #include <linux/sched.h>
565 +#include <asm/processor.h>
566 #include <asm/user32.h>
567 +#include <asm/unistd.h>
568
569 #define COMPAT_USER_HZ 100
570 #define COMPAT_UTS_MACHINE "i686\0\0"
571 @@ -186,7 +188,20 @@ struct compat_shmid64_ds {
572 /*
573 * The type of struct elf_prstatus.pr_reg in compatible core dumps.
574 */
575 +#ifdef CONFIG_X86_X32_ABI
576 +typedef struct user_regs_struct compat_elf_gregset_t;
577 +
578 +#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
579 +#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
580 +#define SET_PR_FPVALID(S,V) \
581 + do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
582 + while (0)
583 +
584 +#define COMPAT_USE_64BIT_TIME \
585 + (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
586 +#else
587 typedef struct user_regs_struct32 compat_elf_gregset_t;
588 +#endif
589
590 /*
591 * A pointer passed in from user mode. This should not
592 @@ -208,13 +223,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
593
594 static inline void __user *arch_compat_alloc_user_space(long len)
595 {
596 - struct pt_regs *regs = task_pt_regs(current);
597 - return (void __user *)regs->sp - len;
598 + compat_uptr_t sp;
599 +
600 + if (test_thread_flag(TIF_IA32)) {
601 + sp = task_pt_regs(current)->sp;
602 + } else {
603 + /* -128 for the x32 ABI redzone */
604 + sp = percpu_read(old_rsp) - 128;
605 + }
606 +
607 + return (void __user *)round_down(sp - len, 16);
608 +}
609 +
610 +static inline bool is_x32_task(void)
611 +{
612 +#ifdef CONFIG_X86_X32_ABI
613 + if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
614 + return true;
615 +#endif
616 + return false;
617 }
618
619 -static inline int is_compat_task(void)
620 +static inline bool is_compat_task(void)
621 {
622 - return current_thread_info()->status & TS_COMPAT;
623 + return is_ia32_task() || is_x32_task();
624 }
625
626 #endif /* _ASM_X86_COMPAT_H */
627 diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
628 new file mode 100644
629 index 0000000..ff501e5
630 --- /dev/null
631 +++ b/arch/x86/include/asm/cpu_device_id.h
632 @@ -0,0 +1,13 @@
633 +#ifndef _CPU_DEVICE_ID
634 +#define _CPU_DEVICE_ID 1
635 +
636 +/*
637 + * Declare drivers belonging to specific x86 CPUs
638 + * Similar in spirit to pci_device_id and related PCI functions
639 + */
640 +
641 +#include <linux/mod_devicetable.h>
642 +
643 +extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
644 +
645 +#endif
646 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
647 index 8d67d42..340ee49 100644
648 --- a/arch/x86/include/asm/cpufeature.h
649 +++ b/arch/x86/include/asm/cpufeature.h
650 @@ -177,6 +177,7 @@
651 #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
652 #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
653 #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
654 +#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
655
656 /* Virtualization flags: Linux defined, word 8 */
657 #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
658 @@ -199,10 +200,13 @@
659 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
660 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
661 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
662 +#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
663 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
664 #define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
665 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
666 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
667 +#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
668 +#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
669
670 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
671
672 diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
673 index b903d5e..2d91580 100644
674 --- a/arch/x86/include/asm/debugreg.h
675 +++ b/arch/x86/include/asm/debugreg.h
676 @@ -78,8 +78,75 @@
677 */
678 #ifdef __KERNEL__
679
680 +#include <linux/bug.h>
681 +
682 DECLARE_PER_CPU(unsigned long, cpu_dr7);
683
684 +#ifndef CONFIG_PARAVIRT
685 +/*
686 + * These special macros can be used to get or set a debugging register
687 + */
688 +#define get_debugreg(var, register) \
689 + (var) = native_get_debugreg(register)
690 +#define set_debugreg(value, register) \
691 + native_set_debugreg(register, value)
692 +#endif
693 +
694 +static inline unsigned long native_get_debugreg(int regno)
695 +{
696 + unsigned long val = 0; /* Damn you, gcc! */
697 +
698 + switch (regno) {
699 + case 0:
700 + asm("mov %%db0, %0" :"=r" (val));
701 + break;
702 + case 1:
703 + asm("mov %%db1, %0" :"=r" (val));
704 + break;
705 + case 2:
706 + asm("mov %%db2, %0" :"=r" (val));
707 + break;
708 + case 3:
709 + asm("mov %%db3, %0" :"=r" (val));
710 + break;
711 + case 6:
712 + asm("mov %%db6, %0" :"=r" (val));
713 + break;
714 + case 7:
715 + asm("mov %%db7, %0" :"=r" (val));
716 + break;
717 + default:
718 + BUG();
719 + }
720 + return val;
721 +}
722 +
723 +static inline void native_set_debugreg(int regno, unsigned long value)
724 +{
725 + switch (regno) {
726 + case 0:
727 + asm("mov %0, %%db0" ::"r" (value));
728 + break;
729 + case 1:
730 + asm("mov %0, %%db1" ::"r" (value));
731 + break;
732 + case 2:
733 + asm("mov %0, %%db2" ::"r" (value));
734 + break;
735 + case 3:
736 + asm("mov %0, %%db3" ::"r" (value));
737 + break;
738 + case 6:
739 + asm("mov %0, %%db6" ::"r" (value));
740 + break;
741 + case 7:
742 + asm("mov %0, %%db7" ::"r" (value));
743 + break;
744 + default:
745 + BUG();
746 + }
747 +}
748 +
749 static inline void hw_breakpoint_disable(void)
750 {
751 /* Zero the control register for HW Breakpoint */
752 diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
753 index 844f735..c9dcc18 100644
754 --- a/arch/x86/include/asm/efi.h
755 +++ b/arch/x86/include/asm/efi.h
756 @@ -95,7 +95,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
757
758 extern int add_efi_memmap;
759 extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
760 -extern void efi_memblock_x86_reserve_range(void);
761 +extern int efi_memblock_x86_reserve_range(void);
762 extern void efi_call_phys_prelog(void);
763 extern void efi_call_phys_epilog(void);
764
765 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
766 index 5f962df..5939f44 100644
767 --- a/arch/x86/include/asm/elf.h
768 +++ b/arch/x86/include/asm/elf.h
769 @@ -84,7 +84,6 @@ extern unsigned int vdso_enabled;
770 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
771
772 #include <asm/processor.h>
773 -#include <asm/system.h>
774
775 #ifdef CONFIG_X86_32
776 #include <asm/desc.h>
777 @@ -156,7 +155,12 @@ do { \
778 #define elf_check_arch(x) \
779 ((x)->e_machine == EM_X86_64)
780
781 -#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
782 +#define compat_elf_check_arch(x) \
783 + (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
784 +
785 +#if __USER32_DS != __USER_DS
786 +# error "The following code assumes __USER32_DS == __USER_DS"
787 +#endif
788
789 static inline void elf_common_init(struct thread_struct *t,
790 struct pt_regs *regs, const u16 ds)
791 @@ -179,8 +183,9 @@ static inline void elf_common_init(struct thread_struct *t,
792 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
793 #define compat_start_thread start_thread_ia32
794
795 -void set_personality_ia32(void);
796 -#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
797 +void set_personality_ia32(bool);
798 +#define COMPAT_SET_PERSONALITY(ex) \
799 + set_personality_ia32((ex).e_machine == EM_X86_64)
800
801 #define COMPAT_ELF_PLATFORM ("i686")
802
803 @@ -287,7 +292,7 @@ do { \
804 #define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
805
806 /* 1GB for 64bit, 8MB for 32bit */
807 -#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
808 +#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
809
810 #define ARCH_DLINFO \
811 do { \
812 @@ -296,9 +301,20 @@ do { \
813 (unsigned long)current->mm->context.vdso); \
814 } while (0)
815
816 +#define ARCH_DLINFO_X32 \
817 +do { \
818 + if (vdso_enabled) \
819 + NEW_AUX_ENT(AT_SYSINFO_EHDR, \
820 + (unsigned long)current->mm->context.vdso); \
821 +} while (0)
822 +
823 #define AT_SYSINFO 32
824
825 -#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
826 +#define COMPAT_ARCH_DLINFO \
827 +if (test_thread_flag(TIF_X32)) \
828 + ARCH_DLINFO_X32; \
829 +else \
830 + ARCH_DLINFO_IA32(sysctl_vsyscall32)
831
832 #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
833
834 @@ -314,6 +330,8 @@ struct linux_binprm;
835 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
836 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
837 int uses_interp);
838 +extern int x32_setup_additional_pages(struct linux_binprm *bprm,
839 + int uses_interp);
840
841 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
842 #define compat_arch_setup_additional_pages syscall32_setup_pages
843 @@ -330,7 +348,7 @@ static inline int mmap_is_ia32(void)
844 return 1;
845 #endif
846 #ifdef CONFIG_IA32_EMULATION
847 - if (test_thread_flag(TIF_IA32))
848 + if (test_thread_flag(TIF_ADDR32))
849 return 1;
850 #endif
851 return 0;
852 diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h
853 new file mode 100644
854 index 0000000..54c2e1d
855 --- /dev/null
856 +++ b/arch/x86/include/asm/exec.h
857 @@ -0,0 +1 @@
858 +/* define arch_align_stack() here */
859 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
860 new file mode 100644
861 index 0000000..4fa8815
862 --- /dev/null
863 +++ b/arch/x86/include/asm/fpu-internal.h
864 @@ -0,0 +1,520 @@
865 +/*
866 + * Copyright (C) 1994 Linus Torvalds
867 + *
868 + * Pentium III FXSR, SSE support
869 + * General FPU state handling cleanups
870 + * Gareth Hughes <gareth@×××××××.com>, May 2000
871 + * x86-64 work by Andi Kleen 2002
872 + */
873 +
874 +#ifndef _FPU_INTERNAL_H
875 +#define _FPU_INTERNAL_H
876 +
877 +#include <linux/kernel_stat.h>
878 +#include <linux/regset.h>
879 +#include <linux/slab.h>
880 +#include <asm/asm.h>
881 +#include <asm/cpufeature.h>
882 +#include <asm/processor.h>
883 +#include <asm/sigcontext.h>
884 +#include <asm/user.h>
885 +#include <asm/uaccess.h>
886 +#include <asm/xsave.h>
887 +
888 +extern unsigned int sig_xstate_size;
889 +extern void fpu_init(void);
890 +
891 +DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
892 +
893 +extern user_regset_active_fn fpregs_active, xfpregs_active;
894 +extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
895 + xstateregs_get;
896 +extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
897 + xstateregs_set;
898 +
899 +
900 +/*
901 + * xstateregs_active == fpregs_active. Please refer to the comment
902 + * at the definition of fpregs_active.
903 + */
904 +#define xstateregs_active fpregs_active
905 +
906 +extern struct _fpx_sw_bytes fx_sw_reserved;
907 +#ifdef CONFIG_IA32_EMULATION
908 +extern unsigned int sig_xstate_ia32_size;
909 +extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
910 +struct _fpstate_ia32;
911 +struct _xstate_ia32;
912 +extern int save_i387_xstate_ia32(void __user *buf);
913 +extern int restore_i387_xstate_ia32(void __user *buf);
914 +#endif
915 +
916 +#ifdef CONFIG_MATH_EMULATION
917 +extern void finit_soft_fpu(struct i387_soft_struct *soft);
918 +#else
919 +static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
920 +#endif
921 +
922 +#define X87_FSW_ES (1 << 7) /* Exception Summary */
923 +
924 +static __always_inline __pure bool use_xsaveopt(void)
925 +{
926 + return static_cpu_has(X86_FEATURE_XSAVEOPT);
927 +}
928 +
929 +static __always_inline __pure bool use_xsave(void)
930 +{
931 + return static_cpu_has(X86_FEATURE_XSAVE);
932 +}
933 +
934 +static __always_inline __pure bool use_fxsr(void)
935 +{
936 + return static_cpu_has(X86_FEATURE_FXSR);
937 +}
938 +
939 +extern void __sanitize_i387_state(struct task_struct *);
940 +
941 +static inline void sanitize_i387_state(struct task_struct *tsk)
942 +{
943 + if (!use_xsaveopt())
944 + return;
945 + __sanitize_i387_state(tsk);
946 +}
947 +
948 +#ifdef CONFIG_X86_64
949 +static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
950 +{
951 + int err;
952 +
953 + /* See comment in fxsave() below. */
954 +#ifdef CONFIG_AS_FXSAVEQ
955 + asm volatile("1: fxrstorq %[fx]\n\t"
956 + "2:\n"
957 + ".section .fixup,\"ax\"\n"
958 + "3: movl $-1,%[err]\n"
959 + " jmp 2b\n"
960 + ".previous\n"
961 + _ASM_EXTABLE(1b, 3b)
962 + : [err] "=r" (err)
963 + : [fx] "m" (*fx), "0" (0));
964 +#else
965 + asm volatile("1: rex64/fxrstor (%[fx])\n\t"
966 + "2:\n"
967 + ".section .fixup,\"ax\"\n"
968 + "3: movl $-1,%[err]\n"
969 + " jmp 2b\n"
970 + ".previous\n"
971 + _ASM_EXTABLE(1b, 3b)
972 + : [err] "=r" (err)
973 + : [fx] "R" (fx), "m" (*fx), "0" (0));
974 +#endif
975 + return err;
976 +}
977 +
978 +static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
979 +{
980 + int err;
981 +
982 + /*
983 + * Clear the bytes not touched by the fxsave and reserved
984 + * for the SW usage.
985 + */
986 + err = __clear_user(&fx->sw_reserved,
987 + sizeof(struct _fpx_sw_bytes));
988 + if (unlikely(err))
989 + return -EFAULT;
990 +
991 + /* See comment in fxsave() below. */
992 +#ifdef CONFIG_AS_FXSAVEQ
993 + asm volatile("1: fxsaveq %[fx]\n\t"
994 + "2:\n"
995 + ".section .fixup,\"ax\"\n"
996 + "3: movl $-1,%[err]\n"
997 + " jmp 2b\n"
998 + ".previous\n"
999 + _ASM_EXTABLE(1b, 3b)
1000 + : [err] "=r" (err), [fx] "=m" (*fx)
1001 + : "0" (0));
1002 +#else
1003 + asm volatile("1: rex64/fxsave (%[fx])\n\t"
1004 + "2:\n"
1005 + ".section .fixup,\"ax\"\n"
1006 + "3: movl $-1,%[err]\n"
1007 + " jmp 2b\n"
1008 + ".previous\n"
1009 + _ASM_EXTABLE(1b, 3b)
1010 + : [err] "=r" (err), "=m" (*fx)
1011 + : [fx] "R" (fx), "0" (0));
1012 +#endif
1013 + if (unlikely(err) &&
1014 + __clear_user(fx, sizeof(struct i387_fxsave_struct)))
1015 + err = -EFAULT;
1016 + /* No need to clear here because the caller clears USED_MATH */
1017 + return err;
1018 +}
1019 +
1020 +static inline void fpu_fxsave(struct fpu *fpu)
1021 +{
1022 + /* Using "rex64; fxsave %0" is broken because, if the memory operand
1023 + uses any extended registers for addressing, a second REX prefix
1024 + will be generated (to the assembler, rex64 followed by semicolon
1025 + is a separate instruction), and hence the 64-bitness is lost. */
1026 +
1027 +#ifdef CONFIG_AS_FXSAVEQ
1028 + /* Using "fxsaveq %0" would be the ideal choice, but is only supported
1029 + starting with gas 2.16. */
1030 + __asm__ __volatile__("fxsaveq %0"
1031 + : "=m" (fpu->state->fxsave));
1032 +#else
1033 + /* Using, as a workaround, the properly prefixed form below isn't
1034 + accepted by any binutils version so far released, complaining that
1035 + the same type of prefix is used twice if an extended register is
1036 + needed for addressing (fix submitted to mainline 2005-11-21).
1037 + asm volatile("rex64/fxsave %0"
1038 + : "=m" (fpu->state->fxsave));
1039 + This, however, we can work around by forcing the compiler to select
1040 + an addressing mode that doesn't require extended registers. */
1041 + asm volatile("rex64/fxsave (%[fx])"
1042 + : "=m" (fpu->state->fxsave)
1043 + : [fx] "R" (&fpu->state->fxsave));
1044 +#endif
1045 +}
1046 +
1047 +#else /* CONFIG_X86_32 */
1048 +
1049 +/* perform fxrstor iff the processor has extended states, otherwise frstor */
1050 +static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
1051 +{
1052 + /*
1053 + * The "nop" is needed to make the instructions the same
1054 + * length.
1055 + */
1056 + alternative_input(
1057 + "nop ; frstor %1",
1058 + "fxrstor %1",
1059 + X86_FEATURE_FXSR,
1060 + "m" (*fx));
1061 +
1062 + return 0;
1063 +}
1064 +
1065 +static inline void fpu_fxsave(struct fpu *fpu)
1066 +{
1067 + asm volatile("fxsave %[fx]"
1068 + : [fx] "=m" (fpu->state->fxsave));
1069 +}
1070 +
1071 +#endif /* CONFIG_X86_64 */
1072 +
1073 +/*
1074 + * These must be called with preempt disabled. Returns
1075 + * 'true' if the FPU state is still intact.
1076 + */
1077 +static inline int fpu_save_init(struct fpu *fpu)
1078 +{
1079 + if (use_xsave()) {
1080 + fpu_xsave(fpu);
1081 +
1082 + /*
1083 + * xsave header may indicate the init state of the FP.
1084 + */
1085 + if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
1086 + return 1;
1087 + } else if (use_fxsr()) {
1088 + fpu_fxsave(fpu);
1089 + } else {
1090 + asm volatile("fnsave %[fx]; fwait"
1091 + : [fx] "=m" (fpu->state->fsave));
1092 + return 0;
1093 + }
1094 +
1095 + /*
1096 + * If exceptions are pending, we need to clear them so
1097 + * that we don't randomly get exceptions later.
1098 + *
1099 + * FIXME! Is this perhaps only true for the old-style
1100 + * irq13 case? Maybe we could leave the x87 state
1101 + * intact otherwise?
1102 + */
1103 + if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
1104 + asm volatile("fnclex");
1105 + return 0;
1106 + }
1107 + return 1;
1108 +}
1109 +
1110 +static inline int __save_init_fpu(struct task_struct *tsk)
1111 +{
1112 + return fpu_save_init(&tsk->thread.fpu);
1113 +}
1114 +
1115 +static inline int fpu_fxrstor_checking(struct fpu *fpu)
1116 +{
1117 + return fxrstor_checking(&fpu->state->fxsave);
1118 +}
1119 +
1120 +static inline int fpu_restore_checking(struct fpu *fpu)
1121 +{
1122 + if (use_xsave())
1123 + return fpu_xrstor_checking(fpu);
1124 + else
1125 + return fpu_fxrstor_checking(fpu);
1126 +}
1127 +
1128 +static inline int restore_fpu_checking(struct task_struct *tsk)
1129 +{
1130 + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
1131 + is pending. Clear the x87 state here by setting it to fixed
1132 + values. "m" is a random variable that should be in L1 */
1133 + alternative_input(
1134 + ASM_NOP8 ASM_NOP2,
1135 + "emms\n\t" /* clear stack tags */
1136 + "fildl %P[addr]", /* set F?P to defined value */
1137 + X86_FEATURE_FXSAVE_LEAK,
1138 + [addr] "m" (tsk->thread.fpu.has_fpu));
1139 +
1140 + return fpu_restore_checking(&tsk->thread.fpu);
1141 +}
1142 +
1143 +/*
1144 + * Software FPU state helpers. Careful: these need to
1145 + * be preemption protection *and* they need to be
1146 + * properly paired with the CR0.TS changes!
1147 + */
1148 +static inline int __thread_has_fpu(struct task_struct *tsk)
1149 +{
1150 + return tsk->thread.fpu.has_fpu;
1151 +}
1152 +
1153 +/* Must be paired with an 'stts' after! */
1154 +static inline void __thread_clear_has_fpu(struct task_struct *tsk)
1155 +{
1156 + tsk->thread.fpu.has_fpu = 0;
1157 + percpu_write(fpu_owner_task, NULL);
1158 +}
1159 +
1160 +/* Must be paired with a 'clts' before! */
1161 +static inline void __thread_set_has_fpu(struct task_struct *tsk)
1162 +{
1163 + tsk->thread.fpu.has_fpu = 1;
1164 + percpu_write(fpu_owner_task, tsk);
1165 +}
1166 +
1167 +/*
1168 + * Encapsulate the CR0.TS handling together with the
1169 + * software flag.
1170 + *
1171 + * These generally need preemption protection to work,
1172 + * do try to avoid using these on their own.
1173 + */
1174 +static inline void __thread_fpu_end(struct task_struct *tsk)
1175 +{
1176 + __thread_clear_has_fpu(tsk);
1177 + stts();
1178 +}
1179 +
1180 +static inline void __thread_fpu_begin(struct task_struct *tsk)
1181 +{
1182 + clts();
1183 + __thread_set_has_fpu(tsk);
1184 +}
1185 +
1186 +/*
1187 + * FPU state switching for scheduling.
1188 + *
1189 + * This is a two-stage process:
1190 + *
1191 + * - switch_fpu_prepare() saves the old state and
1192 + * sets the new state of the CR0.TS bit. This is
1193 + * done within the context of the old process.
1194 + *
1195 + * - switch_fpu_finish() restores the new state as
1196 + * necessary.
1197 + */
1198 +typedef struct { int preload; } fpu_switch_t;
1199 +
1200 +/*
1201 + * FIXME! We could do a totally lazy restore, but we need to
1202 + * add a per-cpu "this was the task that last touched the FPU
1203 + * on this CPU" variable, and the task needs to have a "I last
1204 + * touched the FPU on this CPU" and check them.
1205 + *
1206 + * We don't do that yet, so "fpu_lazy_restore()" always returns
1207 + * false, but some day..
1208 + */
1209 +static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
1210 +{
1211 + return new == percpu_read_stable(fpu_owner_task) &&
1212 + cpu == new->thread.fpu.last_cpu;
1213 +}
1214 +
1215 +static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
1216 +{
1217 + fpu_switch_t fpu;
1218 +
1219 + fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
1220 + if (__thread_has_fpu(old)) {
1221 + if (!__save_init_fpu(old))
1222 + cpu = ~0;
1223 + old->thread.fpu.last_cpu = cpu;
1224 + old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
1225 +
1226 + /* Don't change CR0.TS if we just switch! */
1227 + if (fpu.preload) {
1228 + new->fpu_counter++;
1229 + __thread_set_has_fpu(new);
1230 + prefetch(new->thread.fpu.state);
1231 + } else
1232 + stts();
1233 + } else {
1234 + old->fpu_counter = 0;
1235 + old->thread.fpu.last_cpu = ~0;
1236 + if (fpu.preload) {
1237 + new->fpu_counter++;
1238 + if (fpu_lazy_restore(new, cpu))
1239 + fpu.preload = 0;
1240 + else
1241 + prefetch(new->thread.fpu.state);
1242 + __thread_fpu_begin(new);
1243 + }
1244 + }
1245 + return fpu;
1246 +}
1247 +
1248 +/*
1249 + * By the time this gets called, we've already cleared CR0.TS and
1250 + * given the process the FPU if we are going to preload the FPU
1251 + * state - all we need to do is to conditionally restore the register
1252 + * state itself.
1253 + */
1254 +static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
1255 +{
1256 + if (fpu.preload) {
1257 + if (unlikely(restore_fpu_checking(new)))
1258 + __thread_fpu_end(new);
1259 + }
1260 +}
1261 +
1262 +/*
1263 + * Signal frame handlers...
1264 + */
1265 +extern int save_i387_xstate(void __user *buf);
1266 +extern int restore_i387_xstate(void __user *buf);
1267 +
1268 +static inline void __clear_fpu(struct task_struct *tsk)
1269 +{
1270 + if (__thread_has_fpu(tsk)) {
1271 + /* Ignore delayed exceptions from user space */
1272 + asm volatile("1: fwait\n"
1273 + "2:\n"
1274 + _ASM_EXTABLE(1b, 2b));
1275 + __thread_fpu_end(tsk);
1276 + }
1277 +}
1278 +
1279 +/*
1280 + * The actual user_fpu_begin/end() functions
1281 + * need to be preemption-safe.
1282 + *
1283 + * NOTE! user_fpu_end() must be used only after you
1284 + * have saved the FP state, and user_fpu_begin() must
1285 + * be used only immediately before restoring it.
1286 + * These functions do not do any save/restore on
1287 + * their own.
1288 + */
1289 +static inline void user_fpu_end(void)
1290 +{
1291 + preempt_disable();
1292 + __thread_fpu_end(current);
1293 + preempt_enable();
1294 +}
1295 +
1296 +static inline void user_fpu_begin(void)
1297 +{
1298 + preempt_disable();
1299 + if (!user_has_fpu())
1300 + __thread_fpu_begin(current);
1301 + preempt_enable();
1302 +}
1303 +
1304 +/*
1305 + * These disable preemption on their own and are safe
1306 + */
1307 +static inline void save_init_fpu(struct task_struct *tsk)
1308 +{
1309 + WARN_ON_ONCE(!__thread_has_fpu(tsk));
1310 + preempt_disable();
1311 + __save_init_fpu(tsk);
1312 + __thread_fpu_end(tsk);
1313 + preempt_enable();
1314 +}
1315 +
1316 +static inline void clear_fpu(struct task_struct *tsk)
1317 +{
1318 + preempt_disable();
1319 + __clear_fpu(tsk);
1320 + preempt_enable();
1321 +}
1322 +
1323 +/*
1324 + * i387 state interaction
1325 + */
1326 +static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
1327 +{
1328 + if (cpu_has_fxsr) {
1329 + return tsk->thread.fpu.state->fxsave.cwd;
1330 + } else {
1331 + return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
1332 + }
1333 +}
1334 +
1335 +static inline unsigned short get_fpu_swd(struct task_struct *tsk)
1336 +{
1337 + if (cpu_has_fxsr) {
1338 + return tsk->thread.fpu.state->fxsave.swd;
1339 + } else {
1340 + return (unsigned short)tsk->thread.fpu.state->fsave.swd;
1341 + }
1342 +}
1343 +
1344 +static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
1345 +{
1346 + if (cpu_has_xmm) {
1347 + return tsk->thread.fpu.state->fxsave.mxcsr;
1348 + } else {
1349 + return MXCSR_DEFAULT;
1350 + }
1351 +}
1352 +
1353 +static bool fpu_allocated(struct fpu *fpu)
1354 +{
1355 + return fpu->state != NULL;
1356 +}
1357 +
1358 +static inline int fpu_alloc(struct fpu *fpu)
1359 +{
1360 + if (fpu_allocated(fpu))
1361 + return 0;
1362 + fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
1363 + if (!fpu->state)
1364 + return -ENOMEM;
1365 + WARN_ON((unsigned long)fpu->state & 15);
1366 + return 0;
1367 +}
1368 +
1369 +static inline void fpu_free(struct fpu *fpu)
1370 +{
1371 + if (fpu->state) {
1372 + kmem_cache_free(task_xstate_cachep, fpu->state);
1373 + fpu->state = NULL;
1374 + }
1375 +}
1376 +
1377 +static inline void fpu_copy(struct fpu *dst, struct fpu *src)
1378 +{
1379 + memcpy(dst->state, src->state, xstate_size);
1380 +}
1381 +
1382 +extern void fpu_finit(struct fpu *fpu);
1383 +
1384 +#endif
1385 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
1386 index d09bb03..71ecbcba 100644
1387 --- a/arch/x86/include/asm/futex.h
1388 +++ b/arch/x86/include/asm/futex.h
1389 @@ -9,7 +9,6 @@
1390 #include <asm/asm.h>
1391 #include <asm/errno.h>
1392 #include <asm/processor.h>
1393 -#include <asm/system.h>
1394
1395 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
1396 asm volatile("1:\t" insn "\n" \
1397 diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
1398 index da0b3ca..382f75d 100644
1399 --- a/arch/x86/include/asm/hardirq.h
1400 +++ b/arch/x86/include/asm/hardirq.h
1401 @@ -7,7 +7,6 @@
1402 typedef struct {
1403 unsigned int __softirq_pending;
1404 unsigned int __nmi_count; /* arch dependent */
1405 - unsigned int irq0_irqs;
1406 #ifdef CONFIG_X86_LOCAL_APIC
1407 unsigned int apic_timer_irqs; /* arch dependent */
1408 unsigned int irq_spurious_count;
1409 diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
1410 index 3bd0402..302a323 100644
1411 --- a/arch/x86/include/asm/highmem.h
1412 +++ b/arch/x86/include/asm/highmem.h
1413 @@ -61,7 +61,7 @@ void *kmap(struct page *page);
1414 void kunmap(struct page *page);
1415
1416 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
1417 -void *__kmap_atomic(struct page *page);
1418 +void *kmap_atomic(struct page *page);
1419 void __kunmap_atomic(void *kvaddr);
1420 void *kmap_atomic_pfn(unsigned long pfn);
1421 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
1422 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
1423 index 2479049..257d9cc 100644
1424 --- a/arch/x86/include/asm/i387.h
1425 +++ b/arch/x86/include/asm/i387.h
1426 @@ -13,476 +13,18 @@
1427 #ifndef __ASSEMBLY__
1428
1429 #include <linux/sched.h>
1430 -#include <linux/kernel_stat.h>
1431 -#include <linux/regset.h>
1432 #include <linux/hardirq.h>
1433 -#include <linux/slab.h>
1434 -#include <asm/asm.h>
1435 -#include <asm/cpufeature.h>
1436 -#include <asm/processor.h>
1437 -#include <asm/sigcontext.h>
1438 -#include <asm/user.h>
1439 -#include <asm/uaccess.h>
1440 -#include <asm/xsave.h>
1441
1442 -extern unsigned int sig_xstate_size;
1443 -extern void fpu_init(void);
1444 -extern void mxcsr_feature_mask_init(void);
1445 +struct pt_regs;
1446 +struct user_i387_struct;
1447 +
1448 extern int init_fpu(struct task_struct *child);
1449 -extern void math_state_restore(void);
1450 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
1451 +extern void math_state_restore(void);
1452
1453 -DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
1454 -
1455 -extern user_regset_active_fn fpregs_active, xfpregs_active;
1456 -extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
1457 - xstateregs_get;
1458 -extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
1459 - xstateregs_set;
1460 -
1461 -/*
1462 - * xstateregs_active == fpregs_active. Please refer to the comment
1463 - * at the definition of fpregs_active.
1464 - */
1465 -#define xstateregs_active fpregs_active
1466 -
1467 -extern struct _fpx_sw_bytes fx_sw_reserved;
1468 -#ifdef CONFIG_IA32_EMULATION
1469 -extern unsigned int sig_xstate_ia32_size;
1470 -extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
1471 -struct _fpstate_ia32;
1472 -struct _xstate_ia32;
1473 -extern int save_i387_xstate_ia32(void __user *buf);
1474 -extern int restore_i387_xstate_ia32(void __user *buf);
1475 -#endif
1476 -
1477 -#ifdef CONFIG_MATH_EMULATION
1478 -extern void finit_soft_fpu(struct i387_soft_struct *soft);
1479 -#else
1480 -static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
1481 -#endif
1482 -
1483 -#define X87_FSW_ES (1 << 7) /* Exception Summary */
1484 -
1485 -static __always_inline __pure bool use_xsaveopt(void)
1486 -{
1487 - return static_cpu_has(X86_FEATURE_XSAVEOPT);
1488 -}
1489 -
1490 -static __always_inline __pure bool use_xsave(void)
1491 -{
1492 - return static_cpu_has(X86_FEATURE_XSAVE);
1493 -}
1494 -
1495 -static __always_inline __pure bool use_fxsr(void)
1496 -{
1497 - return static_cpu_has(X86_FEATURE_FXSR);
1498 -}
1499 -
1500 -extern void __sanitize_i387_state(struct task_struct *);
1501 -
1502 -static inline void sanitize_i387_state(struct task_struct *tsk)
1503 -{
1504 - if (!use_xsaveopt())
1505 - return;
1506 - __sanitize_i387_state(tsk);
1507 -}
1508 -
1509 -#ifdef CONFIG_X86_64
1510 -static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
1511 -{
1512 - int err;
1513 -
1514 - /* See comment in fxsave() below. */
1515 -#ifdef CONFIG_AS_FXSAVEQ
1516 - asm volatile("1: fxrstorq %[fx]\n\t"
1517 - "2:\n"
1518 - ".section .fixup,\"ax\"\n"
1519 - "3: movl $-1,%[err]\n"
1520 - " jmp 2b\n"
1521 - ".previous\n"
1522 - _ASM_EXTABLE(1b, 3b)
1523 - : [err] "=r" (err)
1524 - : [fx] "m" (*fx), "0" (0));
1525 -#else
1526 - asm volatile("1: rex64/fxrstor (%[fx])\n\t"
1527 - "2:\n"
1528 - ".section .fixup,\"ax\"\n"
1529 - "3: movl $-1,%[err]\n"
1530 - " jmp 2b\n"
1531 - ".previous\n"
1532 - _ASM_EXTABLE(1b, 3b)
1533 - : [err] "=r" (err)
1534 - : [fx] "R" (fx), "m" (*fx), "0" (0));
1535 -#endif
1536 - return err;
1537 -}
1538 -
1539 -static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
1540 -{
1541 - int err;
1542 -
1543 - /*
1544 - * Clear the bytes not touched by the fxsave and reserved
1545 - * for the SW usage.
1546 - */
1547 - err = __clear_user(&fx->sw_reserved,
1548 - sizeof(struct _fpx_sw_bytes));
1549 - if (unlikely(err))
1550 - return -EFAULT;
1551 -
1552 - /* See comment in fxsave() below. */
1553 -#ifdef CONFIG_AS_FXSAVEQ
1554 - asm volatile("1: fxsaveq %[fx]\n\t"
1555 - "2:\n"
1556 - ".section .fixup,\"ax\"\n"
1557 - "3: movl $-1,%[err]\n"
1558 - " jmp 2b\n"
1559 - ".previous\n"
1560 - _ASM_EXTABLE(1b, 3b)
1561 - : [err] "=r" (err), [fx] "=m" (*fx)
1562 - : "0" (0));
1563 -#else
1564 - asm volatile("1: rex64/fxsave (%[fx])\n\t"
1565 - "2:\n"
1566 - ".section .fixup,\"ax\"\n"
1567 - "3: movl $-1,%[err]\n"
1568 - " jmp 2b\n"
1569 - ".previous\n"
1570 - _ASM_EXTABLE(1b, 3b)
1571 - : [err] "=r" (err), "=m" (*fx)
1572 - : [fx] "R" (fx), "0" (0));
1573 -#endif
1574 - if (unlikely(err) &&
1575 - __clear_user(fx, sizeof(struct i387_fxsave_struct)))
1576 - err = -EFAULT;
1577 - /* No need to clear here because the caller clears USED_MATH */
1578 - return err;
1579 -}
1580 -
1581 -static inline void fpu_fxsave(struct fpu *fpu)
1582 -{
1583 - /* Using "rex64; fxsave %0" is broken because, if the memory operand
1584 - uses any extended registers for addressing, a second REX prefix
1585 - will be generated (to the assembler, rex64 followed by semicolon
1586 - is a separate instruction), and hence the 64-bitness is lost. */
1587 -
1588 -#ifdef CONFIG_AS_FXSAVEQ
1589 - /* Using "fxsaveq %0" would be the ideal choice, but is only supported
1590 - starting with gas 2.16. */
1591 - __asm__ __volatile__("fxsaveq %0"
1592 - : "=m" (fpu->state->fxsave));
1593 -#else
1594 - /* Using, as a workaround, the properly prefixed form below isn't
1595 - accepted by any binutils version so far released, complaining that
1596 - the same type of prefix is used twice if an extended register is
1597 - needed for addressing (fix submitted to mainline 2005-11-21).
1598 - asm volatile("rex64/fxsave %0"
1599 - : "=m" (fpu->state->fxsave));
1600 - This, however, we can work around by forcing the compiler to select
1601 - an addressing mode that doesn't require extended registers. */
1602 - asm volatile("rex64/fxsave (%[fx])"
1603 - : "=m" (fpu->state->fxsave)
1604 - : [fx] "R" (&fpu->state->fxsave));
1605 -#endif
1606 -}
1607 -
1608 -#else /* CONFIG_X86_32 */
1609 -
1610 -/* perform fxrstor iff the processor has extended states, otherwise frstor */
1611 -static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
1612 -{
1613 - /*
1614 - * The "nop" is needed to make the instructions the same
1615 - * length.
1616 - */
1617 - alternative_input(
1618 - "nop ; frstor %1",
1619 - "fxrstor %1",
1620 - X86_FEATURE_FXSR,
1621 - "m" (*fx));
1622 -
1623 - return 0;
1624 -}
1625 -
1626 -static inline void fpu_fxsave(struct fpu *fpu)
1627 -{
1628 - asm volatile("fxsave %[fx]"
1629 - : [fx] "=m" (fpu->state->fxsave));
1630 -}
1631 -
1632 -#endif /* CONFIG_X86_64 */
1633 -
1634 -/*
1635 - * These must be called with preempt disabled. Returns
1636 - * 'true' if the FPU state is still intact.
1637 - */
1638 -static inline int fpu_save_init(struct fpu *fpu)
1639 -{
1640 - if (use_xsave()) {
1641 - fpu_xsave(fpu);
1642 -
1643 - /*
1644 - * xsave header may indicate the init state of the FP.
1645 - */
1646 - if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
1647 - return 1;
1648 - } else if (use_fxsr()) {
1649 - fpu_fxsave(fpu);
1650 - } else {
1651 - asm volatile("fnsave %[fx]; fwait"
1652 - : [fx] "=m" (fpu->state->fsave));
1653 - return 0;
1654 - }
1655 -
1656 - /*
1657 - * If exceptions are pending, we need to clear them so
1658 - * that we don't randomly get exceptions later.
1659 - *
1660 - * FIXME! Is this perhaps only true for the old-style
1661 - * irq13 case? Maybe we could leave the x87 state
1662 - * intact otherwise?
1663 - */
1664 - if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
1665 - asm volatile("fnclex");
1666 - return 0;
1667 - }
1668 - return 1;
1669 -}
1670 -
1671 -static inline int __save_init_fpu(struct task_struct *tsk)
1672 -{
1673 - return fpu_save_init(&tsk->thread.fpu);
1674 -}
1675 -
1676 -static inline int fpu_fxrstor_checking(struct fpu *fpu)
1677 -{
1678 - return fxrstor_checking(&fpu->state->fxsave);
1679 -}
1680 -
1681 -static inline int fpu_restore_checking(struct fpu *fpu)
1682 -{
1683 - if (use_xsave())
1684 - return fpu_xrstor_checking(fpu);
1685 - else
1686 - return fpu_fxrstor_checking(fpu);
1687 -}
1688 -
1689 -static inline int restore_fpu_checking(struct task_struct *tsk)
1690 -{
1691 - /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
1692 - is pending. Clear the x87 state here by setting it to fixed
1693 - values. "m" is a random variable that should be in L1 */
1694 - alternative_input(
1695 - ASM_NOP8 ASM_NOP2,
1696 - "emms\n\t" /* clear stack tags */
1697 - "fildl %P[addr]", /* set F?P to defined value */
1698 - X86_FEATURE_FXSAVE_LEAK,
1699 - [addr] "m" (tsk->thread.fpu.has_fpu));
1700 -
1701 - return fpu_restore_checking(&tsk->thread.fpu);
1702 -}
1703 -
1704 -/*
1705 - * Software FPU state helpers. Careful: these need to
1706 - * be preemption protection *and* they need to be
1707 - * properly paired with the CR0.TS changes!
1708 - */
1709 -static inline int __thread_has_fpu(struct task_struct *tsk)
1710 -{
1711 - return tsk->thread.fpu.has_fpu;
1712 -}
1713 -
1714 -/* Must be paired with an 'stts' after! */
1715 -static inline void __thread_clear_has_fpu(struct task_struct *tsk)
1716 -{
1717 - tsk->thread.fpu.has_fpu = 0;
1718 - percpu_write(fpu_owner_task, NULL);
1719 -}
1720 -
1721 -/* Must be paired with a 'clts' before! */
1722 -static inline void __thread_set_has_fpu(struct task_struct *tsk)
1723 -{
1724 - tsk->thread.fpu.has_fpu = 1;
1725 - percpu_write(fpu_owner_task, tsk);
1726 -}
1727 -
1728 -/*
1729 - * Encapsulate the CR0.TS handling together with the
1730 - * software flag.
1731 - *
1732 - * These generally need preemption protection to work,
1733 - * do try to avoid using these on their own.
1734 - */
1735 -static inline void __thread_fpu_end(struct task_struct *tsk)
1736 -{
1737 - __thread_clear_has_fpu(tsk);
1738 - stts();
1739 -}
1740 -
1741 -static inline void __thread_fpu_begin(struct task_struct *tsk)
1742 -{
1743 - clts();
1744 - __thread_set_has_fpu(tsk);
1745 -}
1746 -
1747 -/*
1748 - * FPU state switching for scheduling.
1749 - *
1750 - * This is a two-stage process:
1751 - *
1752 - * - switch_fpu_prepare() saves the old state and
1753 - * sets the new state of the CR0.TS bit. This is
1754 - * done within the context of the old process.
1755 - *
1756 - * - switch_fpu_finish() restores the new state as
1757 - * necessary.
1758 - */
1759 -typedef struct { int preload; } fpu_switch_t;
1760 -
1761 -/*
1762 - * FIXME! We could do a totally lazy restore, but we need to
1763 - * add a per-cpu "this was the task that last touched the FPU
1764 - * on this CPU" variable, and the task needs to have a "I last
1765 - * touched the FPU on this CPU" and check them.
1766 - *
1767 - * We don't do that yet, so "fpu_lazy_restore()" always returns
1768 - * false, but some day..
1769 - */
1770 -static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
1771 -{
1772 - return new == percpu_read_stable(fpu_owner_task) &&
1773 - cpu == new->thread.fpu.last_cpu;
1774 -}
1775 -
1776 -static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
1777 -{
1778 - fpu_switch_t fpu;
1779 -
1780 - fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
1781 - if (__thread_has_fpu(old)) {
1782 - if (!__save_init_fpu(old))
1783 - cpu = ~0;
1784 - old->thread.fpu.last_cpu = cpu;
1785 - old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
1786 -
1787 - /* Don't change CR0.TS if we just switch! */
1788 - if (fpu.preload) {
1789 - new->fpu_counter++;
1790 - __thread_set_has_fpu(new);
1791 - prefetch(new->thread.fpu.state);
1792 - } else
1793 - stts();
1794 - } else {
1795 - old->fpu_counter = 0;
1796 - old->thread.fpu.last_cpu = ~0;
1797 - if (fpu.preload) {
1798 - new->fpu_counter++;
1799 - if (fpu_lazy_restore(new, cpu))
1800 - fpu.preload = 0;
1801 - else
1802 - prefetch(new->thread.fpu.state);
1803 - __thread_fpu_begin(new);
1804 - }
1805 - }
1806 - return fpu;
1807 -}
1808 -
1809 -/*
1810 - * By the time this gets called, we've already cleared CR0.TS and
1811 - * given the process the FPU if we are going to preload the FPU
1812 - * state - all we need to do is to conditionally restore the register
1813 - * state itself.
1814 - */
1815 -static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
1816 -{
1817 - if (fpu.preload) {
1818 - if (unlikely(restore_fpu_checking(new)))
1819 - __thread_fpu_end(new);
1820 - }
1821 -}
1822 -
1823 -/*
1824 - * Signal frame handlers...
1825 - */
1826 -extern int save_i387_xstate(void __user *buf);
1827 -extern int restore_i387_xstate(void __user *buf);
1828 -
1829 -static inline void __clear_fpu(struct task_struct *tsk)
1830 -{
1831 - if (__thread_has_fpu(tsk)) {
1832 - /* Ignore delayed exceptions from user space */
1833 - asm volatile("1: fwait\n"
1834 - "2:\n"
1835 - _ASM_EXTABLE(1b, 2b));
1836 - __thread_fpu_end(tsk);
1837 - }
1838 -}
1839 -
1840 -/*
1841 - * Were we in an interrupt that interrupted kernel mode?
1842 - *
1843 - * We can do a kernel_fpu_begin/end() pair *ONLY* if that
1844 - * pair does nothing at all: the thread must not have fpu (so
1845 - * that we don't try to save the FPU state), and TS must
1846 - * be set (so that the clts/stts pair does nothing that is
1847 - * visible in the interrupted kernel thread).
1848 - */
1849 -static inline bool interrupted_kernel_fpu_idle(void)
1850 -{
1851 - return !__thread_has_fpu(current) &&
1852 - (read_cr0() & X86_CR0_TS);
1853 -}
1854 -
1855 -/*
1856 - * Were we in user mode (or vm86 mode) when we were
1857 - * interrupted?
1858 - *
1859 - * Doing kernel_fpu_begin/end() is ok if we are running
1860 - * in an interrupt context from user mode - we'll just
1861 - * save the FPU state as required.
1862 - */
1863 -static inline bool interrupted_user_mode(void)
1864 -{
1865 - struct pt_regs *regs = get_irq_regs();
1866 - return regs && user_mode_vm(regs);
1867 -}
1868 -
1869 -/*
1870 - * Can we use the FPU in kernel mode with the
1871 - * whole "kernel_fpu_begin/end()" sequence?
1872 - *
1873 - * It's always ok in process context (ie "not interrupt")
1874 - * but it is sometimes ok even from an irq.
1875 - */
1876 -static inline bool irq_fpu_usable(void)
1877 -{
1878 - return !in_interrupt() ||
1879 - interrupted_user_mode() ||
1880 - interrupted_kernel_fpu_idle();
1881 -}
1882 -
1883 -static inline void kernel_fpu_begin(void)
1884 -{
1885 - struct task_struct *me = current;
1886 -
1887 - WARN_ON_ONCE(!irq_fpu_usable());
1888 - preempt_disable();
1889 - if (__thread_has_fpu(me)) {
1890 - __save_init_fpu(me);
1891 - __thread_clear_has_fpu(me);
1892 - /* We do 'stts()' in kernel_fpu_end() */
1893 - } else {
1894 - percpu_write(fpu_owner_task, NULL);
1895 - clts();
1896 - }
1897 -}
1898 -
1899 -static inline void kernel_fpu_end(void)
1900 -{
1901 - stts();
1902 - preempt_enable();
1903 -}
1904 +extern bool irq_fpu_usable(void);
1905 +extern void kernel_fpu_begin(void);
1906 +extern void kernel_fpu_end(void);
1907
1908 /*
1909 * Some instructions like VIA's padlock instructions generate a spurious
1910 @@ -524,126 +66,13 @@ static inline void irq_ts_restore(int TS_state)
1911 * we can just assume we have FPU access - typically
1912 * to save the FP state - we'll just take a #NM
1913 * fault and get the FPU access back.
1914 - *
1915 - * The actual user_fpu_begin/end() functions
1916 - * need to be preemption-safe, though.
1917 - *
1918 - * NOTE! user_fpu_end() must be used only after you
1919 - * have saved the FP state, and user_fpu_begin() must
1920 - * be used only immediately before restoring it.
1921 - * These functions do not do any save/restore on
1922 - * their own.
1923 */
1924 static inline int user_has_fpu(void)
1925 {
1926 - return __thread_has_fpu(current);
1927 -}
1928 -
1929 -static inline void user_fpu_end(void)
1930 -{
1931 - preempt_disable();
1932 - __thread_fpu_end(current);
1933 - preempt_enable();
1934 -}
1935 -
1936 -static inline void user_fpu_begin(void)
1937 -{
1938 - preempt_disable();
1939 - if (!user_has_fpu())
1940 - __thread_fpu_begin(current);
1941 - preempt_enable();
1942 -}
1943 -
1944 -/*
1945 - * These disable preemption on their own and are safe
1946 - */
1947 -static inline void save_init_fpu(struct task_struct *tsk)
1948 -{
1949 - WARN_ON_ONCE(!__thread_has_fpu(tsk));
1950 - preempt_disable();
1951 - __save_init_fpu(tsk);
1952 - __thread_fpu_end(tsk);
1953 - preempt_enable();
1954 -}
1955 -
1956 -static inline void unlazy_fpu(struct task_struct *tsk)
1957 -{
1958 - preempt_disable();
1959 - if (__thread_has_fpu(tsk)) {
1960 - __save_init_fpu(tsk);
1961 - __thread_fpu_end(tsk);
1962 - } else
1963 - tsk->fpu_counter = 0;
1964 - preempt_enable();
1965 -}
1966 -
1967 -static inline void clear_fpu(struct task_struct *tsk)
1968 -{
1969 - preempt_disable();
1970 - __clear_fpu(tsk);
1971 - preempt_enable();
1972 -}
1973 -
1974 -/*
1975 - * i387 state interaction
1976 - */
1977 -static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
1978 -{
1979 - if (cpu_has_fxsr) {
1980 - return tsk->thread.fpu.state->fxsave.cwd;
1981 - } else {
1982 - return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
1983 - }
1984 -}
1985 -
1986 -static inline unsigned short get_fpu_swd(struct task_struct *tsk)
1987 -{
1988 - if (cpu_has_fxsr) {
1989 - return tsk->thread.fpu.state->fxsave.swd;
1990 - } else {
1991 - return (unsigned short)tsk->thread.fpu.state->fsave.swd;
1992 - }
1993 -}
1994 -
1995 -static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
1996 -{
1997 - if (cpu_has_xmm) {
1998 - return tsk->thread.fpu.state->fxsave.mxcsr;
1999 - } else {
2000 - return MXCSR_DEFAULT;
2001 - }
2002 -}
2003 -
2004 -static bool fpu_allocated(struct fpu *fpu)
2005 -{
2006 - return fpu->state != NULL;
2007 -}
2008 -
2009 -static inline int fpu_alloc(struct fpu *fpu)
2010 -{
2011 - if (fpu_allocated(fpu))
2012 - return 0;
2013 - fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
2014 - if (!fpu->state)
2015 - return -ENOMEM;
2016 - WARN_ON((unsigned long)fpu->state & 15);
2017 - return 0;
2018 -}
2019 -
2020 -static inline void fpu_free(struct fpu *fpu)
2021 -{
2022 - if (fpu->state) {
2023 - kmem_cache_free(task_xstate_cachep, fpu->state);
2024 - fpu->state = NULL;
2025 - }
2026 -}
2027 -
2028 -static inline void fpu_copy(struct fpu *dst, struct fpu *src)
2029 -{
2030 - memcpy(dst->state, src->state, xstate_size);
2031 + return current->thread.fpu.has_fpu;
2032 }
2033
2034 -extern void fpu_finit(struct fpu *fpu);
2035 +extern void unlazy_fpu(struct task_struct *tsk);
2036
2037 #endif /* __ASSEMBLY__ */
2038
2039 diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
2040 index 1f7e625..ee52760 100644
2041 --- a/arch/x86/include/asm/ia32.h
2042 +++ b/arch/x86/include/asm/ia32.h
2043 @@ -43,6 +43,15 @@ struct ucontext_ia32 {
2044 compat_sigset_t uc_sigmask; /* mask last for extensibility */
2045 };
2046
2047 +struct ucontext_x32 {
2048 + unsigned int uc_flags;
2049 + unsigned int uc_link;
2050 + stack_ia32_t uc_stack;
2051 + unsigned int uc__pad0; /* needed for alignment */
2052 + struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
2053 + compat_sigset_t uc_sigmask; /* mask last for extensibility */
2054 +};
2055 +
2056 /* This matches struct stat64 in glibc2.2, hence the absolutely
2057 * insane amounts of padding around dev_t's.
2058 */
2059 @@ -116,6 +125,15 @@ typedef struct compat_siginfo {
2060 compat_clock_t _stime;
2061 } _sigchld;
2062
2063 + /* SIGCHLD (x32 version) */
2064 + struct {
2065 + unsigned int _pid; /* which child */
2066 + unsigned int _uid; /* sender's uid */
2067 + int _status; /* exit code */
2068 + compat_s64 _utime;
2069 + compat_s64 _stime;
2070 + } _sigchld_x32;
2071 +
2072 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
2073 struct {
2074 unsigned int _addr; /* faulting insn/memory ref. */
2075 diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
2076 index f49253d7..c5d1785 100644
2077 --- a/arch/x86/include/asm/idle.h
2078 +++ b/arch/x86/include/asm/idle.h
2079 @@ -14,6 +14,7 @@ void exit_idle(void);
2080 #else /* !CONFIG_X86_64 */
2081 static inline void enter_idle(void) { }
2082 static inline void exit_idle(void) { }
2083 +static inline void __exit_idle(void) { }
2084 #endif /* CONFIG_X86_64 */
2085
2086 void amd_e400_remove_cpu(int cpu);
2087 diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
2088 index 205b063..74a2e31 100644
2089 --- a/arch/x86/include/asm/inat.h
2090 +++ b/arch/x86/include/asm/inat.h
2091 @@ -97,11 +97,12 @@
2092
2093 /* Attribute search APIs */
2094 extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
2095 +extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
2096 extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
2097 - insn_byte_t last_pfx,
2098 + int lpfx_id,
2099 insn_attr_t esc_attr);
2100 extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
2101 - insn_byte_t last_pfx,
2102 + int lpfx_id,
2103 insn_attr_t esc_attr);
2104 extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
2105 insn_byte_t vex_m,
2106 diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
2107 index 74df3f1..48eb30a 100644
2108 --- a/arch/x86/include/asm/insn.h
2109 +++ b/arch/x86/include/asm/insn.h
2110 @@ -96,12 +96,6 @@ struct insn {
2111 #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
2112 #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
2113
2114 -/* The last prefix is needed for two-byte and three-byte opcodes */
2115 -static inline insn_byte_t insn_last_prefix(struct insn *insn)
2116 -{
2117 - return insn->prefixes.bytes[3];
2118 -}
2119 -
2120 extern void insn_init(struct insn *insn, const void *kaddr, int x86_64);
2121 extern void insn_get_prefixes(struct insn *insn);
2122 extern void insn_get_opcode(struct insn *insn);
2123 @@ -160,6 +154,18 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
2124 return X86_VEX_P(insn->vex_prefix.bytes[2]);
2125 }
2126
2127 +/* Get the last prefix id from last prefix or VEX prefix */
2128 +static inline int insn_last_prefix_id(struct insn *insn)
2129 +{
2130 + if (insn_is_avx(insn))
2131 + return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
2132 +
2133 + if (insn->prefixes.bytes[3])
2134 + return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
2135 +
2136 + return 0;
2137 +}
2138 +
2139 /* Offset of each field from kaddr */
2140 static inline int insn_offset_rex_prefix(struct insn *insn)
2141 {
2142 diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
2143 index 690d1cc..2c4943d 100644
2144 --- a/arch/x86/include/asm/io_apic.h
2145 +++ b/arch/x86/include/asm/io_apic.h
2146 @@ -21,6 +21,15 @@
2147 #define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
2148 #define IO_APIC_REDIR_MASKED (1 << 16)
2149
2150 +struct io_apic_ops {
2151 + void (*init) (void);
2152 + unsigned int (*read) (unsigned int apic, unsigned int reg);
2153 + void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
2154 + void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
2155 +};
2156 +
2157 +void __init set_io_apic_ops(const struct io_apic_ops *);
2158 +
2159 /*
2160 * The structure of the IO-APIC:
2161 */
2162 diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h
2163 deleted file mode 100644
2164 index 423bbbd..0000000
2165 --- a/arch/x86/include/asm/irq_controller.h
2166 +++ /dev/null
2167 @@ -1,12 +0,0 @@
2168 -#ifndef __IRQ_CONTROLLER__
2169 -#define __IRQ_CONTROLLER__
2170 -
2171 -struct irq_domain {
2172 - int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
2173 - u32 *out_hwirq, u32 *out_type);
2174 - void *priv;
2175 - struct device_node *controller;
2176 - struct list_head l;
2177 -};
2178 -
2179 -#endif
2180 diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
2181 index a32b18c..3a16c14 100644
2182 --- a/arch/x86/include/asm/jump_label.h
2183 +++ b/arch/x86/include/asm/jump_label.h
2184 @@ -9,12 +9,12 @@
2185
2186 #define JUMP_LABEL_NOP_SIZE 5
2187
2188 -#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
2189 +#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
2190
2191 -static __always_inline bool arch_static_branch(struct jump_label_key *key)
2192 +static __always_inline bool arch_static_branch(struct static_key *key)
2193 {
2194 asm goto("1:"
2195 - JUMP_LABEL_INITIAL_NOP
2196 + STATIC_KEY_INITIAL_NOP
2197 ".pushsection __jump_table, \"aw\" \n\t"
2198 _ASM_ALIGN "\n\t"
2199 _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
2200 diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
2201 index 77e95f5..332f98c 100644
2202 --- a/arch/x86/include/asm/kgdb.h
2203 +++ b/arch/x86/include/asm/kgdb.h
2204 @@ -64,11 +64,15 @@ enum regnames {
2205 GDB_PS, /* 17 */
2206 GDB_CS, /* 18 */
2207 GDB_SS, /* 19 */
2208 + GDB_DS, /* 20 */
2209 + GDB_ES, /* 21 */
2210 + GDB_FS, /* 22 */
2211 + GDB_GS, /* 23 */
2212 };
2213 #define GDB_ORIG_AX 57
2214 -#define DBG_MAX_REG_NUM 20
2215 -/* 17 64 bit regs and 3 32 bit regs */
2216 -#define NUMREGBYTES ((17 * 8) + (3 * 4))
2217 +#define DBG_MAX_REG_NUM 24
2218 +/* 17 64 bit regs and 5 32 bit regs */
2219 +#define NUMREGBYTES ((17 * 8) + (5 * 4))
2220 #endif /* ! CONFIG_X86_32 */
2221
2222 static inline void arch_kgdb_breakpoint(void)
2223 diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
2224 index 4d8dcbd..e7d1c19 100644
2225 --- a/arch/x86/include/asm/kvm.h
2226 +++ b/arch/x86/include/asm/kvm.h
2227 @@ -321,4 +321,8 @@ struct kvm_xcrs {
2228 __u64 padding[16];
2229 };
2230
2231 +/* definition of registers in kvm_run */
2232 +struct kvm_sync_regs {
2233 +};
2234 +
2235 #endif /* _ASM_X86_KVM_H */
2236 diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
2237 index 7b9cfc4..c222e1a 100644
2238 --- a/arch/x86/include/asm/kvm_emulate.h
2239 +++ b/arch/x86/include/asm/kvm_emulate.h
2240 @@ -176,6 +176,7 @@ struct x86_emulate_ops {
2241 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
2242 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
2243 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
2244 + void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val);
2245 int (*cpl)(struct x86_emulate_ctxt *ctxt);
2246 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
2247 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
2248 @@ -388,7 +389,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
2249 #define EMULATION_INTERCEPTED 2
2250 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
2251 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2252 - u16 tss_selector, int reason,
2253 + u16 tss_selector, int idt_index, int reason,
2254 bool has_error_code, u32 error_code);
2255 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
2256 #endif /* _ASM_X86_KVM_X86_EMULATE_H */
2257 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
2258 index 52d6640..e216ba0 100644
2259 --- a/arch/x86/include/asm/kvm_host.h
2260 +++ b/arch/x86/include/asm/kvm_host.h
2261 @@ -29,7 +29,7 @@
2262 #include <asm/msr-index.h>
2263
2264 #define KVM_MAX_VCPUS 254
2265 -#define KVM_SOFT_MAX_VCPUS 64
2266 +#define KVM_SOFT_MAX_VCPUS 160
2267 #define KVM_MEMORY_SLOTS 32
2268 /* memory slots that does not exposed to userspace */
2269 #define KVM_PRIVATE_MEM_SLOTS 4
2270 @@ -181,13 +181,6 @@ struct kvm_mmu_memory_cache {
2271 void *objects[KVM_NR_MEM_OBJS];
2272 };
2273
2274 -#define NR_PTE_CHAIN_ENTRIES 5
2275 -
2276 -struct kvm_pte_chain {
2277 - u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
2278 - struct hlist_node link;
2279 -};
2280 -
2281 /*
2282 * kvm_mmu_page_role, below, is defined as:
2283 *
2284 @@ -427,12 +420,16 @@ struct kvm_vcpu_arch {
2285
2286 u64 last_guest_tsc;
2287 u64 last_kernel_ns;
2288 - u64 last_tsc_nsec;
2289 - u64 last_tsc_write;
2290 - u32 virtual_tsc_khz;
2291 + u64 last_host_tsc;
2292 + u64 tsc_offset_adjustment;
2293 + u64 this_tsc_nsec;
2294 + u64 this_tsc_write;
2295 + u8 this_tsc_generation;
2296 bool tsc_catchup;
2297 - u32 tsc_catchup_mult;
2298 - s8 tsc_catchup_shift;
2299 + bool tsc_always_catchup;
2300 + s8 virtual_tsc_shift;
2301 + u32 virtual_tsc_mult;
2302 + u32 virtual_tsc_khz;
2303
2304 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
2305 unsigned nmi_pending; /* NMI queued after currently running handler */
2306 @@ -478,6 +475,21 @@ struct kvm_vcpu_arch {
2307 u32 id;
2308 bool send_user_only;
2309 } apf;
2310 +
2311 + /* OSVW MSRs (AMD only) */
2312 + struct {
2313 + u64 length;
2314 + u64 status;
2315 + } osvw;
2316 +};
2317 +
2318 +struct kvm_lpage_info {
2319 + unsigned long rmap_pde;
2320 + int write_count;
2321 +};
2322 +
2323 +struct kvm_arch_memory_slot {
2324 + struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
2325 };
2326
2327 struct kvm_arch {
2328 @@ -511,8 +523,12 @@ struct kvm_arch {
2329 s64 kvmclock_offset;
2330 raw_spinlock_t tsc_write_lock;
2331 u64 last_tsc_nsec;
2332 - u64 last_tsc_offset;
2333 u64 last_tsc_write;
2334 + u32 last_tsc_khz;
2335 + u64 cur_tsc_nsec;
2336 + u64 cur_tsc_write;
2337 + u64 cur_tsc_offset;
2338 + u8 cur_tsc_generation;
2339
2340 struct kvm_xen_hvm_config xen_hvm_config;
2341
2342 @@ -644,7 +660,7 @@ struct kvm_x86_ops {
2343 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
2344 int (*get_lpage_level)(void);
2345 bool (*rdtscp_supported)(void);
2346 - void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
2347 + void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
2348
2349 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
2350
2351 @@ -652,7 +668,7 @@ struct kvm_x86_ops {
2352
2353 bool (*has_wbinvd_exit)(void);
2354
2355 - void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
2356 + void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
2357 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
2358
2359 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
2360 @@ -674,6 +690,17 @@ struct kvm_arch_async_pf {
2361
2362 extern struct kvm_x86_ops *kvm_x86_ops;
2363
2364 +static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2365 + s64 adjustment)
2366 +{
2367 + kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
2368 +}
2369 +
2370 +static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2371 +{
2372 + kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
2373 +}
2374 +
2375 int kvm_mmu_module_init(void);
2376 void kvm_mmu_module_exit(void);
2377
2378 @@ -741,8 +768,8 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
2379 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2380 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
2381
2382 -int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
2383 - bool has_error_code, u32 error_code);
2384 +int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
2385 + int reason, bool has_error_code, u32 error_code);
2386
2387 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
2388 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
2389 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
2390 index 9cdae5d..c8bed0d 100644
2391 --- a/arch/x86/include/asm/local.h
2392 +++ b/arch/x86/include/asm/local.h
2393 @@ -3,7 +3,6 @@
2394
2395 #include <linux/percpu.h>
2396
2397 -#include <asm/system.h>
2398 #include <linux/atomic.h>
2399 #include <asm/asm.h>
2400
2401 diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
2402 index 0e8e85b..d354fb7 100644
2403 --- a/arch/x86/include/asm/mc146818rtc.h
2404 +++ b/arch/x86/include/asm/mc146818rtc.h
2405 @@ -5,7 +5,6 @@
2406 #define _ASM_X86_MC146818RTC_H
2407
2408 #include <asm/io.h>
2409 -#include <asm/system.h>
2410 #include <asm/processor.h>
2411 #include <linux/mc146818rtc.h>
2412
2413 diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
2414 index 6aefb14..441520e 100644
2415 --- a/arch/x86/include/asm/mce.h
2416 +++ b/arch/x86/include/asm/mce.h
2417 @@ -151,7 +151,7 @@ static inline void enable_p5_mce(void) {}
2418
2419 void mce_setup(struct mce *m);
2420 void mce_log(struct mce *m);
2421 -extern struct device *mce_device[CONFIG_NR_CPUS];
2422 +DECLARE_PER_CPU(struct device *, mce_device);
2423
2424 /*
2425 * Maximum banks number.
2426 diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
2427 index 0a0a954..fc18bf3 100644
2428 --- a/arch/x86/include/asm/mrst.h
2429 +++ b/arch/x86/include/asm/mrst.h
2430 @@ -26,8 +26,8 @@ extern struct sfi_rtc_table_entry sfi_mrtc_array[];
2431 * identified via MSRs.
2432 */
2433 enum mrst_cpu_type {
2434 - MRST_CPU_CHIP_LINCROFT = 1,
2435 - MRST_CPU_CHIP_PENWELL,
2436 + /* 1 was Moorestown */
2437 + MRST_CPU_CHIP_PENWELL = 2,
2438 };
2439
2440 extern enum mrst_cpu_type __mrst_cpu_chip;
2441 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
2442 index a6962d9..ccb8059 100644
2443 --- a/arch/x86/include/asm/msr-index.h
2444 +++ b/arch/x86/include/asm/msr-index.h
2445 @@ -56,6 +56,13 @@
2446 #define MSR_OFFCORE_RSP_0 0x000001a6
2447 #define MSR_OFFCORE_RSP_1 0x000001a7
2448
2449 +#define MSR_LBR_SELECT 0x000001c8
2450 +#define MSR_LBR_TOS 0x000001c9
2451 +#define MSR_LBR_NHM_FROM 0x00000680
2452 +#define MSR_LBR_NHM_TO 0x000006c0
2453 +#define MSR_LBR_CORE_FROM 0x00000040
2454 +#define MSR_LBR_CORE_TO 0x00000060
2455 +
2456 #define MSR_IA32_PEBS_ENABLE 0x000003f1
2457 #define MSR_IA32_DS_AREA 0x00000600
2458 #define MSR_IA32_PERF_CAPABILITIES 0x00000345
2459 diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
2460 index 4365ffd..7e3f17f 100644
2461 --- a/arch/x86/include/asm/mtrr.h
2462 +++ b/arch/x86/include/asm/mtrr.h
2463 @@ -29,18 +29,18 @@
2464
2465 #define MTRR_IOCTL_BASE 'M'
2466
2467 -struct mtrr_sentry {
2468 - unsigned long base; /* Base address */
2469 - unsigned int size; /* Size of region */
2470 - unsigned int type; /* Type of region */
2471 -};
2472 -
2473 /* Warning: this structure has a different order from i386
2474 on x86-64. The 32bit emulation code takes care of that.
2475 But you need to use this for 64bit, otherwise your X server
2476 will break. */
2477
2478 #ifdef __i386__
2479 +struct mtrr_sentry {
2480 + unsigned long base; /* Base address */
2481 + unsigned int size; /* Size of region */
2482 + unsigned int type; /* Type of region */
2483 +};
2484 +
2485 struct mtrr_gentry {
2486 unsigned int regnum; /* Register number */
2487 unsigned long base; /* Base address */
2488 @@ -50,12 +50,20 @@ struct mtrr_gentry {
2489
2490 #else /* __i386__ */
2491
2492 +struct mtrr_sentry {
2493 + __u64 base; /* Base address */
2494 + __u32 size; /* Size of region */
2495 + __u32 type; /* Type of region */
2496 +};
2497 +
2498 struct mtrr_gentry {
2499 - unsigned long base; /* Base address */
2500 - unsigned int size; /* Size of region */
2501 - unsigned int regnum; /* Register number */
2502 - unsigned int type; /* Type of region */
2503 + __u64 base; /* Base address */
2504 + __u32 size; /* Size of region */
2505 + __u32 regnum; /* Register number */
2506 + __u32 type; /* Type of region */
2507 + __u32 _pad; /* Unused */
2508 };
2509 +
2510 #endif /* !__i386__ */
2511
2512 struct mtrr_var_range {
2513 diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
2514 index bce688d..e21fdd1 100644
2515 --- a/arch/x86/include/asm/page_types.h
2516 +++ b/arch/x86/include/asm/page_types.h
2517 @@ -55,7 +55,6 @@ extern unsigned long init_memory_mapping(unsigned long start,
2518 unsigned long end);
2519
2520 extern void initmem_init(void);
2521 -extern void free_initmem(void);
2522
2523 #endif /* !__ASSEMBLY__ */
2524
2525 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
2526 index a7d2db9..aa0f913 100644
2527 --- a/arch/x86/include/asm/paravirt.h
2528 +++ b/arch/x86/include/asm/paravirt.h
2529 @@ -10,6 +10,7 @@
2530 #include <asm/paravirt_types.h>
2531
2532 #ifndef __ASSEMBLY__
2533 +#include <linux/bug.h>
2534 #include <linux/types.h>
2535 #include <linux/cpumask.h>
2536
2537 @@ -230,9 +231,9 @@ static inline unsigned long long paravirt_sched_clock(void)
2538 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
2539 }
2540
2541 -struct jump_label_key;
2542 -extern struct jump_label_key paravirt_steal_enabled;
2543 -extern struct jump_label_key paravirt_steal_rq_enabled;
2544 +struct static_key;
2545 +extern struct static_key paravirt_steal_enabled;
2546 +extern struct static_key paravirt_steal_rq_enabled;
2547
2548 static inline u64 paravirt_steal_clock(int cpu)
2549 {
2550 diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
2551 index 461ce43..2291895 100644
2552 --- a/arch/x86/include/asm/perf_event.h
2553 +++ b/arch/x86/include/asm/perf_event.h
2554 @@ -23,6 +23,7 @@
2555 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
2556 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
2557 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
2558 +#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
2559 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
2560 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
2561 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
2562 @@ -188,8 +189,6 @@ extern u32 get_ibs_caps(void);
2563 #ifdef CONFIG_PERF_EVENTS
2564 extern void perf_events_lapic_init(void);
2565
2566 -#define PERF_EVENT_INDEX_OFFSET 0
2567 -
2568 /*
2569 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
2570 * This flag is otherwise unused and ABI specified to be 0, so nobody should
2571 diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
2572 index bb7133d..3427b77 100644
2573 --- a/arch/x86/include/asm/posix_types.h
2574 +++ b/arch/x86/include/asm/posix_types.h
2575 @@ -7,7 +7,9 @@
2576 #else
2577 # ifdef __i386__
2578 # include "posix_types_32.h"
2579 -# else
2580 +# elif defined(__LP64__)
2581 # include "posix_types_64.h"
2582 +# else
2583 +# include "posix_types_x32.h"
2584 # endif
2585 #endif
2586 diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
2587 index f7d9adf..99f262e 100644
2588 --- a/arch/x86/include/asm/posix_types_32.h
2589 +++ b/arch/x86/include/asm/posix_types_32.h
2590 @@ -7,79 +7,22 @@
2591 * assume GCC is being used.
2592 */
2593
2594 -typedef unsigned long __kernel_ino_t;
2595 typedef unsigned short __kernel_mode_t;
2596 +#define __kernel_mode_t __kernel_mode_t
2597 +
2598 typedef unsigned short __kernel_nlink_t;
2599 -typedef long __kernel_off_t;
2600 -typedef int __kernel_pid_t;
2601 +#define __kernel_nlink_t __kernel_nlink_t
2602 +
2603 typedef unsigned short __kernel_ipc_pid_t;
2604 +#define __kernel_ipc_pid_t __kernel_ipc_pid_t
2605 +
2606 typedef unsigned short __kernel_uid_t;
2607 typedef unsigned short __kernel_gid_t;
2608 -typedef unsigned int __kernel_size_t;
2609 -typedef int __kernel_ssize_t;
2610 -typedef int __kernel_ptrdiff_t;
2611 -typedef long __kernel_time_t;
2612 -typedef long __kernel_suseconds_t;
2613 -typedef long __kernel_clock_t;
2614 -typedef int __kernel_timer_t;
2615 -typedef int __kernel_clockid_t;
2616 -typedef int __kernel_daddr_t;
2617 -typedef char * __kernel_caddr_t;
2618 -typedef unsigned short __kernel_uid16_t;
2619 -typedef unsigned short __kernel_gid16_t;
2620 -typedef unsigned int __kernel_uid32_t;
2621 -typedef unsigned int __kernel_gid32_t;
2622 +#define __kernel_uid_t __kernel_uid_t
2623
2624 -typedef unsigned short __kernel_old_uid_t;
2625 -typedef unsigned short __kernel_old_gid_t;
2626 typedef unsigned short __kernel_old_dev_t;
2627 +#define __kernel_old_dev_t __kernel_old_dev_t
2628
2629 -#ifdef __GNUC__
2630 -typedef long long __kernel_loff_t;
2631 -#endif
2632 -
2633 -typedef struct {
2634 - int val[2];
2635 -} __kernel_fsid_t;
2636 -
2637 -#if defined(__KERNEL__)
2638 -
2639 -#undef __FD_SET
2640 -#define __FD_SET(fd,fdsetp) \
2641 - asm volatile("btsl %1,%0": \
2642 - "+m" (*(__kernel_fd_set *)(fdsetp)) \
2643 - : "r" ((int)(fd)))
2644 -
2645 -#undef __FD_CLR
2646 -#define __FD_CLR(fd,fdsetp) \
2647 - asm volatile("btrl %1,%0": \
2648 - "+m" (*(__kernel_fd_set *)(fdsetp)) \
2649 - : "r" ((int) (fd)))
2650 -
2651 -#undef __FD_ISSET
2652 -#define __FD_ISSET(fd,fdsetp) \
2653 - (__extension__ \
2654 - ({ \
2655 - unsigned char __result; \
2656 - asm volatile("btl %1,%2 ; setb %0" \
2657 - : "=q" (__result) \
2658 - : "r" ((int)(fd)), \
2659 - "m" (*(__kernel_fd_set *)(fdsetp))); \
2660 - __result; \
2661 -}))
2662 -
2663 -#undef __FD_ZERO
2664 -#define __FD_ZERO(fdsetp) \
2665 -do { \
2666 - int __d0, __d1; \
2667 - asm volatile("cld ; rep ; stosl" \
2668 - : "=m" (*(__kernel_fd_set *)(fdsetp)), \
2669 - "=&c" (__d0), "=&D" (__d1) \
2670 - : "a" (0), "1" (__FDSET_LONGS), \
2671 - "2" ((__kernel_fd_set *)(fdsetp)) \
2672 - : "memory"); \
2673 -} while (0)
2674 -
2675 -#endif /* defined(__KERNEL__) */
2676 +#include <asm-generic/posix_types.h>
2677
2678 #endif /* _ASM_X86_POSIX_TYPES_32_H */
2679 diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h
2680 index eb8d2d9..cba0c1e 100644
2681 --- a/arch/x86/include/asm/posix_types_64.h
2682 +++ b/arch/x86/include/asm/posix_types_64.h
2683 @@ -7,113 +7,13 @@
2684 * assume GCC is being used.
2685 */
2686
2687 -typedef unsigned long __kernel_ino_t;
2688 -typedef unsigned int __kernel_mode_t;
2689 -typedef unsigned long __kernel_nlink_t;
2690 -typedef long __kernel_off_t;
2691 -typedef int __kernel_pid_t;
2692 -typedef int __kernel_ipc_pid_t;
2693 -typedef unsigned int __kernel_uid_t;
2694 -typedef unsigned int __kernel_gid_t;
2695 -typedef unsigned long __kernel_size_t;
2696 -typedef long __kernel_ssize_t;
2697 -typedef long __kernel_ptrdiff_t;
2698 -typedef long __kernel_time_t;
2699 -typedef long __kernel_suseconds_t;
2700 -typedef long __kernel_clock_t;
2701 -typedef int __kernel_timer_t;
2702 -typedef int __kernel_clockid_t;
2703 -typedef int __kernel_daddr_t;
2704 -typedef char * __kernel_caddr_t;
2705 -typedef unsigned short __kernel_uid16_t;
2706 -typedef unsigned short __kernel_gid16_t;
2707 -
2708 -#ifdef __GNUC__
2709 -typedef long long __kernel_loff_t;
2710 -#endif
2711 -
2712 -typedef struct {
2713 - int val[2];
2714 -} __kernel_fsid_t;
2715 -
2716 typedef unsigned short __kernel_old_uid_t;
2717 typedef unsigned short __kernel_old_gid_t;
2718 -typedef __kernel_uid_t __kernel_uid32_t;
2719 -typedef __kernel_gid_t __kernel_gid32_t;
2720 +#define __kernel_old_uid_t __kernel_old_uid_t
2721
2722 typedef unsigned long __kernel_old_dev_t;
2723 +#define __kernel_old_dev_t __kernel_old_dev_t
2724
2725 -#ifdef __KERNEL__
2726 -
2727 -#undef __FD_SET
2728 -static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
2729 -{
2730 - unsigned long _tmp = fd / __NFDBITS;
2731 - unsigned long _rem = fd % __NFDBITS;
2732 - fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
2733 -}
2734 -
2735 -#undef __FD_CLR
2736 -static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
2737 -{
2738 - unsigned long _tmp = fd / __NFDBITS;
2739 - unsigned long _rem = fd % __NFDBITS;
2740 - fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
2741 -}
2742 -
2743 -#undef __FD_ISSET
2744 -static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
2745 -{
2746 - unsigned long _tmp = fd / __NFDBITS;
2747 - unsigned long _rem = fd % __NFDBITS;
2748 - return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
2749 -}
2750 -
2751 -/*
2752 - * This will unroll the loop for the normal constant cases (8 or 32 longs,
2753 - * for 256 and 1024-bit fd_sets respectively)
2754 - */
2755 -#undef __FD_ZERO
2756 -static inline void __FD_ZERO(__kernel_fd_set *p)
2757 -{
2758 - unsigned long *tmp = p->fds_bits;
2759 - int i;
2760 -
2761 - if (__builtin_constant_p(__FDSET_LONGS)) {
2762 - switch (__FDSET_LONGS) {
2763 - case 32:
2764 - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
2765 - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
2766 - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
2767 - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
2768 - tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
2769 - tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
2770 - tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
2771 - tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
2772 - return;
2773 - case 16:
2774 - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
2775 - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
2776 - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
2777 - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
2778 - return;
2779 - case 8:
2780 - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
2781 - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
2782 - return;
2783 - case 4:
2784 - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
2785 - return;
2786 - }
2787 - }
2788 - i = __FDSET_LONGS;
2789 - while (i) {
2790 - i--;
2791 - *tmp = 0;
2792 - tmp++;
2793 - }
2794 -}
2795 -
2796 -#endif /* defined(__KERNEL__) */
2797 +#include <asm-generic/posix_types.h>
2798
2799 #endif /* _ASM_X86_POSIX_TYPES_64_H */
2800 diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/asm/posix_types_x32.h
2801 new file mode 100644
2802 index 0000000..85f9bda
2803 --- /dev/null
2804 +++ b/arch/x86/include/asm/posix_types_x32.h
2805 @@ -0,0 +1,19 @@
2806 +#ifndef _ASM_X86_POSIX_TYPES_X32_H
2807 +#define _ASM_X86_POSIX_TYPES_X32_H
2808 +
2809 +/*
2810 + * This file is only used by user-level software, so you need to
2811 + * be a little careful about namespace pollution etc. Also, we cannot
2812 + * assume GCC is being used.
2813 + *
2814 + * These types should generally match the ones used by the 64-bit kernel,
2815 + *
2816 + */
2817 +
2818 +typedef long long __kernel_long_t;
2819 +typedef unsigned long long __kernel_ulong_t;
2820 +#define __kernel_long_t __kernel_long_t
2821 +
2822 +#include <asm/posix_types_64.h>
2823 +
2824 +#endif /* _ASM_X86_POSIX_TYPES_X32_H */
2825 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
2826 index 58545c9..7284c9a 100644
2827 --- a/arch/x86/include/asm/processor.h
2828 +++ b/arch/x86/include/asm/processor.h
2829 @@ -14,13 +14,13 @@ struct mm_struct;
2830 #include <asm/sigcontext.h>
2831 #include <asm/current.h>
2832 #include <asm/cpufeature.h>
2833 -#include <asm/system.h>
2834 #include <asm/page.h>
2835 #include <asm/pgtable_types.h>
2836 #include <asm/percpu.h>
2837 #include <asm/msr.h>
2838 #include <asm/desc_defs.h>
2839 #include <asm/nops.h>
2840 +#include <asm/special_insns.h>
2841
2842 #include <linux/personality.h>
2843 #include <linux/cpumask.h>
2844 @@ -29,6 +29,15 @@ struct mm_struct;
2845 #include <linux/math64.h>
2846 #include <linux/init.h>
2847 #include <linux/err.h>
2848 +#include <linux/irqflags.h>
2849 +
2850 +/*
2851 + * We handle most unaligned accesses in hardware. On the other hand
2852 + * unaligned DMA can be quite expensive on some Nehalem processors.
2853 + *
2854 + * Based on this we disable the IP header alignment in network drivers.
2855 + */
2856 +#define NET_IP_ALIGN 0
2857
2858 #define HBP_NUM 4
2859 /*
2860 @@ -162,6 +171,7 @@ extern void early_cpu_init(void);
2861 extern void identify_boot_cpu(void);
2862 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
2863 extern void print_cpu_info(struct cpuinfo_x86 *);
2864 +void print_cpu_msr(struct cpuinfo_x86 *);
2865 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
2866 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
2867 extern unsigned short num_cache_leaves;
2868 @@ -453,7 +463,7 @@ struct thread_struct {
2869 unsigned long ptrace_dr7;
2870 /* Fault info: */
2871 unsigned long cr2;
2872 - unsigned long trap_no;
2873 + unsigned long trap_nr;
2874 unsigned long error_code;
2875 /* floating point and extended processor state */
2876 struct fpu fpu;
2877 @@ -474,61 +484,6 @@ struct thread_struct {
2878 unsigned io_bitmap_max;
2879 };
2880
2881 -static inline unsigned long native_get_debugreg(int regno)
2882 -{
2883 - unsigned long val = 0; /* Damn you, gcc! */
2884 -
2885 - switch (regno) {
2886 - case 0:
2887 - asm("mov %%db0, %0" :"=r" (val));
2888 - break;
2889 - case 1:
2890 - asm("mov %%db1, %0" :"=r" (val));
2891 - break;
2892 - case 2:
2893 - asm("mov %%db2, %0" :"=r" (val));
2894 - break;
2895 - case 3:
2896 - asm("mov %%db3, %0" :"=r" (val));
2897 - break;
2898 - case 6:
2899 - asm("mov %%db6, %0" :"=r" (val));
2900 - break;
2901 - case 7:
2902 - asm("mov %%db7, %0" :"=r" (val));
2903 - break;
2904 - default:
2905 - BUG();
2906 - }
2907 - return val;
2908 -}
2909 -
2910 -static inline void native_set_debugreg(int regno, unsigned long value)
2911 -{
2912 - switch (regno) {
2913 - case 0:
2914 - asm("mov %0, %%db0" ::"r" (value));
2915 - break;
2916 - case 1:
2917 - asm("mov %0, %%db1" ::"r" (value));
2918 - break;
2919 - case 2:
2920 - asm("mov %0, %%db2" ::"r" (value));
2921 - break;
2922 - case 3:
2923 - asm("mov %0, %%db3" ::"r" (value));
2924 - break;
2925 - case 6:
2926 - asm("mov %0, %%db6" ::"r" (value));
2927 - break;
2928 - case 7:
2929 - asm("mov %0, %%db7" ::"r" (value));
2930 - break;
2931 - default:
2932 - BUG();
2933 - }
2934 -}
2935 -
2936 /*
2937 * Set IOPL bits in EFLAGS from given mask
2938 */
2939 @@ -574,14 +529,6 @@ static inline void native_swapgs(void)
2940 #define __cpuid native_cpuid
2941 #define paravirt_enabled() 0
2942
2943 -/*
2944 - * These special macros can be used to get or set a debugging register
2945 - */
2946 -#define get_debugreg(var, register) \
2947 - (var) = native_get_debugreg(register)
2948 -#define set_debugreg(value, register) \
2949 - native_set_debugreg(register, value)
2950 -
2951 static inline void load_sp0(struct tss_struct *tss,
2952 struct thread_struct *thread)
2953 {
2954 @@ -926,9 +873,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
2955 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
2956 0xc0000000 : 0xFFFFe000)
2957
2958 -#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
2959 +#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
2960 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
2961 -#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
2962 +#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
2963 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
2964
2965 #define STACK_TOP TASK_SIZE
2966 @@ -950,6 +897,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
2967
2968 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
2969 extern unsigned long KSTK_ESP(struct task_struct *task);
2970 +
2971 +/*
2972 + * User space RSP while inside the SYSCALL fast path
2973 + */
2974 +DECLARE_PER_CPU(unsigned long, old_rsp);
2975 +
2976 #endif /* CONFIG_X86_64 */
2977
2978 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
2979 @@ -1021,4 +974,24 @@ extern bool cpu_has_amd_erratum(const int *);
2980 #define cpu_has_amd_erratum(x) (false)
2981 #endif /* CONFIG_CPU_SUP_AMD */
2982
2983 +#ifdef CONFIG_X86_32
2984 +/*
2985 + * disable hlt during certain critical i/o operations
2986 + */
2987 +#define HAVE_DISABLE_HLT
2988 +#endif
2989 +
2990 +void disable_hlt(void);
2991 +void enable_hlt(void);
2992 +
2993 +void cpu_idle_wait(void);
2994 +
2995 +extern unsigned long arch_align_stack(unsigned long sp);
2996 +extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
2997 +
2998 +void default_idle(void);
2999 +bool set_pm_idle_to_default(void);
3000 +
3001 +void stop_this_cpu(void *dummy);
3002 +
3003 #endif /* _ASM_X86_PROCESSOR_H */
3004 diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
3005 index 644dd885..60bef66 100644
3006 --- a/arch/x86/include/asm/prom.h
3007 +++ b/arch/x86/include/asm/prom.h
3008 @@ -21,7 +21,6 @@
3009 #include <asm/irq.h>
3010 #include <linux/atomic.h>
3011 #include <asm/setup.h>
3012 -#include <asm/irq_controller.h>
3013
3014 #ifdef CONFIG_OF
3015 extern int of_ioapic;
3016 @@ -43,15 +42,6 @@ extern char cmd_line[COMMAND_LINE_SIZE];
3017 #define pci_address_to_pio pci_address_to_pio
3018 unsigned long pci_address_to_pio(phys_addr_t addr);
3019
3020 -/**
3021 - * irq_dispose_mapping - Unmap an interrupt
3022 - * @virq: linux virq number of the interrupt to unmap
3023 - *
3024 - * FIXME: We really should implement proper virq handling like power,
3025 - * but that's going to be major surgery.
3026 - */
3027 -static inline void irq_dispose_mapping(unsigned int virq) { }
3028 -
3029 #define HAVE_ARCH_DEVTREE_FIXUPS
3030
3031 #endif /* __ASSEMBLY__ */
3032 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
3033 index 3566454..dcfde52 100644
3034 --- a/arch/x86/include/asm/ptrace.h
3035 +++ b/arch/x86/include/asm/ptrace.h
3036 @@ -145,7 +145,6 @@ extern unsigned long
3037 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
3038 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
3039 int error_code, int si_code);
3040 -void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
3041
3042 extern long syscall_trace_enter(struct pt_regs *);
3043 extern void syscall_trace_leave(struct pt_regs *);
3044 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
3045 index 5e64171..1654662 100644
3046 --- a/arch/x86/include/asm/segment.h
3047 +++ b/arch/x86/include/asm/segment.h
3048 @@ -212,7 +212,61 @@
3049 #ifdef __KERNEL__
3050 #ifndef __ASSEMBLY__
3051 extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
3052 -#endif
3053 -#endif
3054 +
3055 +/*
3056 + * Load a segment. Fall back on loading the zero
3057 + * segment if something goes wrong..
3058 + */
3059 +#define loadsegment(seg, value) \
3060 +do { \
3061 + unsigned short __val = (value); \
3062 + \
3063 + asm volatile(" \n" \
3064 + "1: movl %k0,%%" #seg " \n" \
3065 + \
3066 + ".section .fixup,\"ax\" \n" \
3067 + "2: xorl %k0,%k0 \n" \
3068 + " jmp 1b \n" \
3069 + ".previous \n" \
3070 + \
3071 + _ASM_EXTABLE(1b, 2b) \
3072 + \
3073 + : "+r" (__val) : : "memory"); \
3074 +} while (0)
3075 +
3076 +/*
3077 + * Save a segment register away
3078 + */
3079 +#define savesegment(seg, value) \
3080 + asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
3081 +
3082 +/*
3083 + * x86_32 user gs accessors.
3084 + */
3085 +#ifdef CONFIG_X86_32
3086 +#ifdef CONFIG_X86_32_LAZY_GS
3087 +#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
3088 +#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
3089 +#define task_user_gs(tsk) ((tsk)->thread.gs)
3090 +#define lazy_save_gs(v) savesegment(gs, (v))
3091 +#define lazy_load_gs(v) loadsegment(gs, (v))
3092 +#else /* X86_32_LAZY_GS */
3093 +#define get_user_gs(regs) (u16)((regs)->gs)
3094 +#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
3095 +#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
3096 +#define lazy_save_gs(v) do { } while (0)
3097 +#define lazy_load_gs(v) do { } while (0)
3098 +#endif /* X86_32_LAZY_GS */
3099 +#endif /* X86_32 */
3100 +
3101 +static inline unsigned long get_limit(unsigned long segment)
3102 +{
3103 + unsigned long __limit;
3104 + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
3105 + return __limit + 1;
3106 +}
3107 +
3108 +#endif /* !__ASSEMBLY__ */
3109 +#endif /* __KERNEL__ */
3110
3111 #endif /* _ASM_X86_SEGMENT_H */
3112 diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
3113 index 04459d2..4a08538 100644
3114 --- a/arch/x86/include/asm/sigcontext.h
3115 +++ b/arch/x86/include/asm/sigcontext.h
3116 @@ -230,34 +230,37 @@ struct sigcontext {
3117 * User-space might still rely on the old definition:
3118 */
3119 struct sigcontext {
3120 - unsigned long r8;
3121 - unsigned long r9;
3122 - unsigned long r10;
3123 - unsigned long r11;
3124 - unsigned long r12;
3125 - unsigned long r13;
3126 - unsigned long r14;
3127 - unsigned long r15;
3128 - unsigned long rdi;
3129 - unsigned long rsi;
3130 - unsigned long rbp;
3131 - unsigned long rbx;
3132 - unsigned long rdx;
3133 - unsigned long rax;
3134 - unsigned long rcx;
3135 - unsigned long rsp;
3136 - unsigned long rip;
3137 - unsigned long eflags; /* RFLAGS */
3138 - unsigned short cs;
3139 - unsigned short gs;
3140 - unsigned short fs;
3141 - unsigned short __pad0;
3142 - unsigned long err;
3143 - unsigned long trapno;
3144 - unsigned long oldmask;
3145 - unsigned long cr2;
3146 + __u64 r8;
3147 + __u64 r9;
3148 + __u64 r10;
3149 + __u64 r11;
3150 + __u64 r12;
3151 + __u64 r13;
3152 + __u64 r14;
3153 + __u64 r15;
3154 + __u64 rdi;
3155 + __u64 rsi;
3156 + __u64 rbp;
3157 + __u64 rbx;
3158 + __u64 rdx;
3159 + __u64 rax;
3160 + __u64 rcx;
3161 + __u64 rsp;
3162 + __u64 rip;
3163 + __u64 eflags; /* RFLAGS */
3164 + __u16 cs;
3165 + __u16 gs;
3166 + __u16 fs;
3167 + __u16 __pad0;
3168 + __u64 err;
3169 + __u64 trapno;
3170 + __u64 oldmask;
3171 + __u64 cr2;
3172 struct _fpstate __user *fpstate; /* zero when no FPU context */
3173 - unsigned long reserved1[8];
3174 +#ifndef __LP64__
3175 + __u32 __fpstate_pad;
3176 +#endif
3177 + __u64 reserved1[8];
3178 };
3179 #endif /* !__KERNEL__ */
3180
3181 diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
3182 index 4e0fe26..7c7c27c 100644
3183 --- a/arch/x86/include/asm/sigframe.h
3184 +++ b/arch/x86/include/asm/sigframe.h
3185 @@ -59,12 +59,25 @@ struct rt_sigframe_ia32 {
3186 #endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
3187
3188 #ifdef CONFIG_X86_64
3189 +
3190 struct rt_sigframe {
3191 char __user *pretcode;
3192 struct ucontext uc;
3193 struct siginfo info;
3194 /* fp state follows here */
3195 };
3196 +
3197 +#ifdef CONFIG_X86_X32_ABI
3198 +
3199 +struct rt_sigframe_x32 {
3200 + u64 pretcode;
3201 + struct ucontext_x32 uc;
3202 + compat_siginfo_t info;
3203 + /* fp state follows here */
3204 +};
3205 +
3206 +#endif /* CONFIG_X86_X32_ABI */
3207 +
3208 #endif /* CONFIG_X86_64 */
3209
3210 #endif /* _ASM_X86_SIGFRAME_H */
3211 diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
3212 new file mode 100644
3213 index 0000000..ada93b3
3214 --- /dev/null
3215 +++ b/arch/x86/include/asm/sighandling.h
3216 @@ -0,0 +1,24 @@
3217 +#ifndef _ASM_X86_SIGHANDLING_H
3218 +#define _ASM_X86_SIGHANDLING_H
3219 +
3220 +#include <linux/compiler.h>
3221 +#include <linux/ptrace.h>
3222 +#include <linux/signal.h>
3223 +
3224 +#include <asm/processor-flags.h>
3225 +
3226 +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
3227 +
3228 +#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
3229 + X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
3230 + X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
3231 + X86_EFLAGS_CF)
3232 +
3233 +void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
3234 +
3235 +int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
3236 + unsigned long *pax);
3237 +int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
3238 + struct pt_regs *regs, unsigned long mask);
3239 +
3240 +#endif /* _ASM_X86_SIGHANDLING_H */
3241 diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
3242 new file mode 100644
3243 index 0000000..41fc93a
3244 --- /dev/null
3245 +++ b/arch/x86/include/asm/special_insns.h
3246 @@ -0,0 +1,199 @@
3247 +#ifndef _ASM_X86_SPECIAL_INSNS_H
3248 +#define _ASM_X86_SPECIAL_INSNS_H
3249 +
3250 +
3251 +#ifdef __KERNEL__
3252 +
3253 +static inline void native_clts(void)
3254 +{
3255 + asm volatile("clts");
3256 +}
3257 +
3258 +/*
3259 + * Volatile isn't enough to prevent the compiler from reordering the
3260 + * read/write functions for the control registers and messing everything up.
3261 + * A memory clobber would solve the problem, but would prevent reordering of
3262 + * all loads stores around it, which can hurt performance. Solution is to
3263 + * use a variable and mimic reads and writes to it to enforce serialization
3264 + */
3265 +static unsigned long __force_order;
3266 +
3267 +static inline unsigned long native_read_cr0(void)
3268 +{
3269 + unsigned long val;
3270 + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
3271 + return val;
3272 +}
3273 +
3274 +static inline void native_write_cr0(unsigned long val)
3275 +{
3276 + asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
3277 +}
3278 +
3279 +static inline unsigned long native_read_cr2(void)
3280 +{
3281 + unsigned long val;
3282 + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
3283 + return val;
3284 +}
3285 +
3286 +static inline void native_write_cr2(unsigned long val)
3287 +{
3288 + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
3289 +}
3290 +
3291 +static inline unsigned long native_read_cr3(void)
3292 +{
3293 + unsigned long val;
3294 + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
3295 + return val;
3296 +}
3297 +
3298 +static inline void native_write_cr3(unsigned long val)
3299 +{
3300 + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
3301 +}
3302 +
3303 +static inline unsigned long native_read_cr4(void)
3304 +{
3305 + unsigned long val;
3306 + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
3307 + return val;
3308 +}
3309 +
3310 +static inline unsigned long native_read_cr4_safe(void)
3311 +{
3312 + unsigned long val;
3313 + /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
3314 + * exists, so it will never fail. */
3315 +#ifdef CONFIG_X86_32
3316 + asm volatile("1: mov %%cr4, %0\n"
3317 + "2:\n"
3318 + _ASM_EXTABLE(1b, 2b)
3319 + : "=r" (val), "=m" (__force_order) : "0" (0));
3320 +#else
3321 + val = native_read_cr4();
3322 +#endif
3323 + return val;
3324 +}
3325 +
3326 +static inline void native_write_cr4(unsigned long val)
3327 +{
3328 + asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
3329 +}
3330 +
3331 +#ifdef CONFIG_X86_64
3332 +static inline unsigned long native_read_cr8(void)
3333 +{
3334 + unsigned long cr8;
3335 + asm volatile("movq %%cr8,%0" : "=r" (cr8));
3336 + return cr8;
3337 +}
3338 +
3339 +static inline void native_write_cr8(unsigned long val)
3340 +{
3341 + asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
3342 +}
3343 +#endif
3344 +
3345 +static inline void native_wbinvd(void)
3346 +{
3347 + asm volatile("wbinvd": : :"memory");
3348 +}
3349 +
3350 +extern void native_load_gs_index(unsigned);
3351 +
3352 +#ifdef CONFIG_PARAVIRT
3353 +#include <asm/paravirt.h>
3354 +#else
3355 +
3356 +static inline unsigned long read_cr0(void)
3357 +{
3358 + return native_read_cr0();
3359 +}
3360 +
3361 +static inline void write_cr0(unsigned long x)
3362 +{
3363 + native_write_cr0(x);
3364 +}
3365 +
3366 +static inline unsigned long read_cr2(void)
3367 +{
3368 + return native_read_cr2();
3369 +}
3370 +
3371 +static inline void write_cr2(unsigned long x)
3372 +{
3373 + native_write_cr2(x);
3374 +}
3375 +
3376 +static inline unsigned long read_cr3(void)
3377 +{
3378 + return native_read_cr3();
3379 +}
3380 +
3381 +static inline void write_cr3(unsigned long x)
3382 +{
3383 + native_write_cr3(x);
3384 +}
3385 +
3386 +static inline unsigned long read_cr4(void)
3387 +{
3388 + return native_read_cr4();
3389 +}
3390 +
3391 +static inline unsigned long read_cr4_safe(void)
3392 +{
3393 + return native_read_cr4_safe();
3394 +}
3395 +
3396 +static inline void write_cr4(unsigned long x)
3397 +{
3398 + native_write_cr4(x);
3399 +}
3400 +
3401 +static inline void wbinvd(void)
3402 +{
3403 + native_wbinvd();
3404 +}
3405 +
3406 +#ifdef CONFIG_X86_64
3407 +
3408 +static inline unsigned long read_cr8(void)
3409 +{
3410 + return native_read_cr8();
3411 +}
3412 +
3413 +static inline void write_cr8(unsigned long x)
3414 +{
3415 + native_write_cr8(x);
3416 +}
3417 +
3418 +static inline void load_gs_index(unsigned selector)
3419 +{
3420 + native_load_gs_index(selector);
3421 +}
3422 +
3423 +#endif
3424 +
3425 +/* Clear the 'TS' bit */
3426 +static inline void clts(void)
3427 +{
3428 + native_clts();
3429 +}
3430 +
3431 +#endif/* CONFIG_PARAVIRT */
3432 +
3433 +#define stts() write_cr0(read_cr0() | X86_CR0_TS)
3434 +
3435 +static inline void clflush(volatile void *__p)
3436 +{
3437 + asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
3438 +}
3439 +
3440 +#define nop() asm volatile ("nop")
3441 +
3442 +
3443 +#endif /* __KERNEL__ */
3444 +
3445 +#endif /* _ASM_X86_SPECIAL_INSNS_H */
3446 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
3447 index a82c2bf..76bfa2c 100644
3448 --- a/arch/x86/include/asm/spinlock.h
3449 +++ b/arch/x86/include/asm/spinlock.h
3450 @@ -88,14 +88,14 @@ static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
3451 {
3452 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
3453
3454 - return !!(tmp.tail ^ tmp.head);
3455 + return tmp.tail != tmp.head;
3456 }
3457
3458 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
3459 {
3460 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
3461
3462 - return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
3463 + return (__ticket_t)(tmp.tail - tmp.head) > 1;
3464 }
3465
3466 #ifndef CONFIG_PARAVIRT_SPINLOCKS
3467 diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
3468 index 8ebd5df..ad0ad07 100644
3469 --- a/arch/x86/include/asm/spinlock_types.h
3470 +++ b/arch/x86/include/asm/spinlock_types.h
3471 @@ -16,7 +16,6 @@ typedef u32 __ticketpair_t;
3472 #endif
3473
3474 #define TICKET_SHIFT (sizeof(__ticket_t) * 8)
3475 -#define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))
3476
3477 typedef struct arch_spinlock {
3478 union {
3479 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
3480 index 1575177..b5d9533 100644
3481 --- a/arch/x86/include/asm/stackprotector.h
3482 +++ b/arch/x86/include/asm/stackprotector.h
3483 @@ -38,7 +38,6 @@
3484 #include <asm/tsc.h>
3485 #include <asm/processor.h>
3486 #include <asm/percpu.h>
3487 -#include <asm/system.h>
3488 #include <asm/desc.h>
3489 #include <linux/random.h>
3490
3491 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
3492 new file mode 100644
3493 index 0000000..4ec45b3
3494 --- /dev/null
3495 +++ b/arch/x86/include/asm/switch_to.h
3496 @@ -0,0 +1,129 @@
3497 +#ifndef _ASM_X86_SWITCH_TO_H
3498 +#define _ASM_X86_SWITCH_TO_H
3499 +
3500 +struct task_struct; /* one of the stranger aspects of C forward declarations */
3501 +struct task_struct *__switch_to(struct task_struct *prev,
3502 + struct task_struct *next);
3503 +struct tss_struct;
3504 +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
3505 + struct tss_struct *tss);
3506 +
3507 +#ifdef CONFIG_X86_32
3508 +
3509 +#ifdef CONFIG_CC_STACKPROTECTOR
3510 +#define __switch_canary \
3511 + "movl %P[task_canary](%[next]), %%ebx\n\t" \
3512 + "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
3513 +#define __switch_canary_oparam \
3514 + , [stack_canary] "=m" (stack_canary.canary)
3515 +#define __switch_canary_iparam \
3516 + , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
3517 +#else /* CC_STACKPROTECTOR */
3518 +#define __switch_canary
3519 +#define __switch_canary_oparam
3520 +#define __switch_canary_iparam
3521 +#endif /* CC_STACKPROTECTOR */
3522 +
3523 +/*
3524 + * Saving eflags is important. It switches not only IOPL between tasks,
3525 + * it also protects other tasks from NT leaking through sysenter etc.
3526 + */
3527 +#define switch_to(prev, next, last) \
3528 +do { \
3529 + /* \
3530 + * Context-switching clobbers all registers, so we clobber \
3531 + * them explicitly, via unused output variables. \
3532 + * (EAX and EBP is not listed because EBP is saved/restored \
3533 + * explicitly for wchan access and EAX is the return value of \
3534 + * __switch_to()) \
3535 + */ \
3536 + unsigned long ebx, ecx, edx, esi, edi; \
3537 + \
3538 + asm volatile("pushfl\n\t" /* save flags */ \
3539 + "pushl %%ebp\n\t" /* save EBP */ \
3540 + "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
3541 + "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
3542 + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
3543 + "pushl %[next_ip]\n\t" /* restore EIP */ \
3544 + __switch_canary \
3545 + "jmp __switch_to\n" /* regparm call */ \
3546 + "1:\t" \
3547 + "popl %%ebp\n\t" /* restore EBP */ \
3548 + "popfl\n" /* restore flags */ \
3549 + \
3550 + /* output parameters */ \
3551 + : [prev_sp] "=m" (prev->thread.sp), \
3552 + [prev_ip] "=m" (prev->thread.ip), \
3553 + "=a" (last), \
3554 + \
3555 + /* clobbered output registers: */ \
3556 + "=b" (ebx), "=c" (ecx), "=d" (edx), \
3557 + "=S" (esi), "=D" (edi) \
3558 + \
3559 + __switch_canary_oparam \
3560 + \
3561 + /* input parameters: */ \
3562 + : [next_sp] "m" (next->thread.sp), \
3563 + [next_ip] "m" (next->thread.ip), \
3564 + \
3565 + /* regparm parameters for __switch_to(): */ \
3566 + [prev] "a" (prev), \
3567 + [next] "d" (next) \
3568 + \
3569 + __switch_canary_iparam \
3570 + \
3571 + : /* reloaded segment registers */ \
3572 + "memory"); \
3573 +} while (0)
3574 +
3575 +#else /* CONFIG_X86_32 */
3576 +
3577 +/* frame pointer must be last for get_wchan */
3578 +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
3579 +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
3580 +
3581 +#define __EXTRA_CLOBBER \
3582 + , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
3583 + "r12", "r13", "r14", "r15"
3584 +
3585 +#ifdef CONFIG_CC_STACKPROTECTOR
3586 +#define __switch_canary \
3587 + "movq %P[task_canary](%%rsi),%%r8\n\t" \
3588 + "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
3589 +#define __switch_canary_oparam \
3590 + , [gs_canary] "=m" (irq_stack_union.stack_canary)
3591 +#define __switch_canary_iparam \
3592 + , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
3593 +#else /* CC_STACKPROTECTOR */
3594 +#define __switch_canary
3595 +#define __switch_canary_oparam
3596 +#define __switch_canary_iparam
3597 +#endif /* CC_STACKPROTECTOR */
3598 +
3599 +/* Save restore flags to clear handle leaking NT */
3600 +#define switch_to(prev, next, last) \
3601 + asm volatile(SAVE_CONTEXT \
3602 + "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
3603 + "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
3604 + "call __switch_to\n\t" \
3605 + "movq "__percpu_arg([current_task])",%%rsi\n\t" \
3606 + __switch_canary \
3607 + "movq %P[thread_info](%%rsi),%%r8\n\t" \
3608 + "movq %%rax,%%rdi\n\t" \
3609 + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
3610 + "jnz ret_from_fork\n\t" \
3611 + RESTORE_CONTEXT \
3612 + : "=a" (last) \
3613 + __switch_canary_oparam \
3614 + : [next] "S" (next), [prev] "D" (prev), \
3615 + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
3616 + [ti_flags] "i" (offsetof(struct thread_info, flags)), \
3617 + [_tif_fork] "i" (_TIF_FORK), \
3618 + [thread_info] "i" (offsetof(struct task_struct, stack)), \
3619 + [current_task] "m" (current_task) \
3620 + __switch_canary_iparam \
3621 + : "memory", "cc" __EXTRA_CLOBBER)
3622 +
3623 +#endif /* CONFIG_X86_32 */
3624 +
3625 +#endif /* _ASM_X86_SWITCH_TO_H */
3626 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
3627 index cb23852..3fda9db4 100644
3628 --- a/arch/x86/include/asm/sys_ia32.h
3629 +++ b/arch/x86/include/asm/sys_ia32.h
3630 @@ -10,6 +10,8 @@
3631 #ifndef _ASM_X86_SYS_IA32_H
3632 #define _ASM_X86_SYS_IA32_H
3633
3634 +#ifdef CONFIG_COMPAT
3635 +
3636 #include <linux/compiler.h>
3637 #include <linux/linkage.h>
3638 #include <linux/types.h>
3639 @@ -36,8 +38,6 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
3640 struct sigaction32 __user *, unsigned int);
3641 asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
3642 struct old_sigaction32 __user *);
3643 -asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
3644 - compat_sigset_t __user *, unsigned int);
3645 asmlinkage long sys32_alarm(unsigned int);
3646
3647 asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
3648 @@ -83,4 +83,7 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
3649
3650 asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
3651 const char __user *);
3652 +
3653 +#endif /* CONFIG_COMPAT */
3654 +
3655 #endif /* _ASM_X86_SYS_IA32_H */
3656 diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
3657 index d962e56..386b786 100644
3658 --- a/arch/x86/include/asm/syscall.h
3659 +++ b/arch/x86/include/asm/syscall.h
3660 @@ -16,6 +16,7 @@
3661 #include <linux/sched.h>
3662 #include <linux/err.h>
3663 #include <asm/asm-offsets.h> /* For NR_syscalls */
3664 +#include <asm/unistd.h>
3665
3666 extern const unsigned long sys_call_table[];
3667
3668 @@ -26,13 +27,13 @@ extern const unsigned long sys_call_table[];
3669 */
3670 static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
3671 {
3672 - return regs->orig_ax;
3673 + return regs->orig_ax & __SYSCALL_MASK;
3674 }
3675
3676 static inline void syscall_rollback(struct task_struct *task,
3677 struct pt_regs *regs)
3678 {
3679 - regs->ax = regs->orig_ax;
3680 + regs->ax = regs->orig_ax & __SYSCALL_MASK;
3681 }
3682
3683 static inline long syscall_get_error(struct task_struct *task,
3684 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
3685 deleted file mode 100644
3686 index 2d2f01c..0000000
3687 --- a/arch/x86/include/asm/system.h
3688 +++ /dev/null
3689 @@ -1,523 +0,0 @@
3690 -#ifndef _ASM_X86_SYSTEM_H
3691 -#define _ASM_X86_SYSTEM_H
3692 -
3693 -#include <asm/asm.h>
3694 -#include <asm/segment.h>
3695 -#include <asm/cpufeature.h>
3696 -#include <asm/cmpxchg.h>
3697 -#include <asm/nops.h>
3698 -
3699 -#include <linux/kernel.h>
3700 -#include <linux/irqflags.h>
3701 -
3702 -/* entries in ARCH_DLINFO: */
3703 -#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
3704 -# define AT_VECTOR_SIZE_ARCH 2
3705 -#else /* else it's non-compat x86-64 */
3706 -# define AT_VECTOR_SIZE_ARCH 1
3707 -#endif
3708 -
3709 -struct task_struct; /* one of the stranger aspects of C forward declarations */
3710 -struct task_struct *__switch_to(struct task_struct *prev,
3711 - struct task_struct *next);
3712 -struct tss_struct;
3713 -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
3714 - struct tss_struct *tss);
3715 -extern void show_regs_common(void);
3716 -
3717 -#ifdef CONFIG_X86_32
3718 -
3719 -#ifdef CONFIG_CC_STACKPROTECTOR
3720 -#define __switch_canary \
3721 - "movl %P[task_canary](%[next]), %%ebx\n\t" \
3722 - "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
3723 -#define __switch_canary_oparam \
3724 - , [stack_canary] "=m" (stack_canary.canary)
3725 -#define __switch_canary_iparam \
3726 - , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
3727 -#else /* CC_STACKPROTECTOR */
3728 -#define __switch_canary
3729 -#define __switch_canary_oparam
3730 -#define __switch_canary_iparam
3731 -#endif /* CC_STACKPROTECTOR */
3732 -
3733 -/*
3734 - * Saving eflags is important. It switches not only IOPL between tasks,
3735 - * it also protects other tasks from NT leaking through sysenter etc.
3736 - */
3737 -#define switch_to(prev, next, last) \
3738 -do { \
3739 - /* \
3740 - * Context-switching clobbers all registers, so we clobber \
3741 - * them explicitly, via unused output variables. \
3742 - * (EAX and EBP is not listed because EBP is saved/restored \
3743 - * explicitly for wchan access and EAX is the return value of \
3744 - * __switch_to()) \
3745 - */ \
3746 - unsigned long ebx, ecx, edx, esi, edi; \
3747 - \
3748 - asm volatile("pushfl\n\t" /* save flags */ \
3749 - "pushl %%ebp\n\t" /* save EBP */ \
3750 - "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
3751 - "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
3752 - "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
3753 - "pushl %[next_ip]\n\t" /* restore EIP */ \
3754 - __switch_canary \
3755 - "jmp __switch_to\n" /* regparm call */ \
3756 - "1:\t" \
3757 - "popl %%ebp\n\t" /* restore EBP */ \
3758 - "popfl\n" /* restore flags */ \
3759 - \
3760 - /* output parameters */ \
3761 - : [prev_sp] "=m" (prev->thread.sp), \
3762 - [prev_ip] "=m" (prev->thread.ip), \
3763 - "=a" (last), \
3764 - \
3765 - /* clobbered output registers: */ \
3766 - "=b" (ebx), "=c" (ecx), "=d" (edx), \
3767 - "=S" (esi), "=D" (edi) \
3768 - \
3769 - __switch_canary_oparam \
3770 - \
3771 - /* input parameters: */ \
3772 - : [next_sp] "m" (next->thread.sp), \
3773 - [next_ip] "m" (next->thread.ip), \
3774 - \
3775 - /* regparm parameters for __switch_to(): */ \
3776 - [prev] "a" (prev), \
3777 - [next] "d" (next) \
3778 - \
3779 - __switch_canary_iparam \
3780 - \
3781 - : /* reloaded segment registers */ \
3782 - "memory"); \
3783 -} while (0)
3784 -
3785 -/*
3786 - * disable hlt during certain critical i/o operations
3787 - */
3788 -#define HAVE_DISABLE_HLT
3789 -#else
3790 -
3791 -/* frame pointer must be last for get_wchan */
3792 -#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
3793 -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
3794 -
3795 -#define __EXTRA_CLOBBER \
3796 - , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
3797 - "r12", "r13", "r14", "r15"
3798 -
3799 -#ifdef CONFIG_CC_STACKPROTECTOR
3800 -#define __switch_canary \
3801 - "movq %P[task_canary](%%rsi),%%r8\n\t" \
3802 - "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
3803 -#define __switch_canary_oparam \
3804 - , [gs_canary] "=m" (irq_stack_union.stack_canary)
3805 -#define __switch_canary_iparam \
3806 - , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
3807 -#else /* CC_STACKPROTECTOR */
3808 -#define __switch_canary
3809 -#define __switch_canary_oparam
3810 -#define __switch_canary_iparam
3811 -#endif /* CC_STACKPROTECTOR */
3812 -
3813 -/* Save restore flags to clear handle leaking NT */
3814 -#define switch_to(prev, next, last) \
3815 - asm volatile(SAVE_CONTEXT \
3816 - "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
3817 - "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
3818 - "call __switch_to\n\t" \
3819 - "movq "__percpu_arg([current_task])",%%rsi\n\t" \
3820 - __switch_canary \
3821 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
3822 - "movq %%rax,%%rdi\n\t" \
3823 - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
3824 - "jnz ret_from_fork\n\t" \
3825 - RESTORE_CONTEXT \
3826 - : "=a" (last) \
3827 - __switch_canary_oparam \
3828 - : [next] "S" (next), [prev] "D" (prev), \
3829 - [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
3830 - [ti_flags] "i" (offsetof(struct thread_info, flags)), \
3831 - [_tif_fork] "i" (_TIF_FORK), \
3832 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
3833 - [current_task] "m" (current_task) \
3834 - __switch_canary_iparam \
3835 - : "memory", "cc" __EXTRA_CLOBBER)
3836 -#endif
3837 -
3838 -#ifdef __KERNEL__
3839 -
3840 -extern void native_load_gs_index(unsigned);
3841 -
3842 -/*
3843 - * Load a segment. Fall back on loading the zero
3844 - * segment if something goes wrong..
3845 - */
3846 -#define loadsegment(seg, value) \
3847 -do { \
3848 - unsigned short __val = (value); \
3849 - \
3850 - asm volatile(" \n" \
3851 - "1: movl %k0,%%" #seg " \n" \
3852 - \
3853 - ".section .fixup,\"ax\" \n" \
3854 - "2: xorl %k0,%k0 \n" \
3855 - " jmp 1b \n" \
3856 - ".previous \n" \
3857 - \
3858 - _ASM_EXTABLE(1b, 2b) \
3859 - \
3860 - : "+r" (__val) : : "memory"); \
3861 -} while (0)
3862 -
3863 -/*
3864 - * Save a segment register away
3865 - */
3866 -#define savesegment(seg, value) \
3867 - asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
3868 -
3869 -/*
3870 - * x86_32 user gs accessors.
3871 - */
3872 -#ifdef CONFIG_X86_32
3873 -#ifdef CONFIG_X86_32_LAZY_GS
3874 -#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
3875 -#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
3876 -#define task_user_gs(tsk) ((tsk)->thread.gs)
3877 -#define lazy_save_gs(v) savesegment(gs, (v))
3878 -#define lazy_load_gs(v) loadsegment(gs, (v))
3879 -#else /* X86_32_LAZY_GS */
3880 -#define get_user_gs(regs) (u16)((regs)->gs)
3881 -#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
3882 -#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
3883 -#define lazy_save_gs(v) do { } while (0)
3884 -#define lazy_load_gs(v) do { } while (0)
3885 -#endif /* X86_32_LAZY_GS */
3886 -#endif /* X86_32 */
3887 -
3888 -static inline unsigned long get_limit(unsigned long segment)
3889 -{
3890 - unsigned long __limit;
3891 - asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
3892 - return __limit + 1;
3893 -}
3894 -
3895 -static inline void native_clts(void)
3896 -{
3897 - asm volatile("clts");
3898 -}
3899 -
3900 -/*
3901 - * Volatile isn't enough to prevent the compiler from reordering the
3902 - * read/write functions for the control registers and messing everything up.
3903 - * A memory clobber would solve the problem, but would prevent reordering of
3904 - * all loads stores around it, which can hurt performance. Solution is to
3905 - * use a variable and mimic reads and writes to it to enforce serialization
3906 - */
3907 -static unsigned long __force_order;
3908 -
3909 -static inline unsigned long native_read_cr0(void)
3910 -{
3911 - unsigned long val;
3912 - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
3913 - return val;
3914 -}
3915 -
3916 -static inline void native_write_cr0(unsigned long val)
3917 -{
3918 - asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
3919 -}
3920 -
3921 -static inline unsigned long native_read_cr2(void)
3922 -{
3923 - unsigned long val;
3924 - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
3925 - return val;
3926 -}
3927 -
3928 -static inline void native_write_cr2(unsigned long val)
3929 -{
3930 - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
3931 -}
3932 -
3933 -static inline unsigned long native_read_cr3(void)
3934 -{
3935 - unsigned long val;
3936 - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
3937 - return val;
3938 -}
3939 -
3940 -static inline void native_write_cr3(unsigned long val)
3941 -{
3942 - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
3943 -}
3944 -
3945 -static inline unsigned long native_read_cr4(void)
3946 -{
3947 - unsigned long val;
3948 - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
3949 - return val;
3950 -}
3951 -
3952 -static inline unsigned long native_read_cr4_safe(void)
3953 -{
3954 - unsigned long val;
3955 - /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
3956 - * exists, so it will never fail. */
3957 -#ifdef CONFIG_X86_32
3958 - asm volatile("1: mov %%cr4, %0\n"
3959 - "2:\n"
3960 - _ASM_EXTABLE(1b, 2b)
3961 - : "=r" (val), "=m" (__force_order) : "0" (0));
3962 -#else
3963 - val = native_read_cr4();
3964 -#endif
3965 - return val;
3966 -}
3967 -
3968 -static inline void native_write_cr4(unsigned long val)
3969 -{
3970 - asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
3971 -}
3972 -
3973 -#ifdef CONFIG_X86_64
3974 -static inline unsigned long native_read_cr8(void)
3975 -{
3976 - unsigned long cr8;
3977 - asm volatile("movq %%cr8,%0" : "=r" (cr8));
3978 - return cr8;
3979 -}
3980 -
3981 -static inline void native_write_cr8(unsigned long val)
3982 -{
3983 - asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
3984 -}
3985 -#endif
3986 -
3987 -static inline void native_wbinvd(void)
3988 -{
3989 - asm volatile("wbinvd": : :"memory");
3990 -}
3991 -
3992 -#ifdef CONFIG_PARAVIRT
3993 -#include <asm/paravirt.h>
3994 -#else
3995 -
3996 -static inline unsigned long read_cr0(void)
3997 -{
3998 - return native_read_cr0();
3999 -}
4000 -
4001 -static inline void write_cr0(unsigned long x)
4002 -{
4003 - native_write_cr0(x);
4004 -}
4005 -
4006 -static inline unsigned long read_cr2(void)
4007 -{
4008 - return native_read_cr2();
4009 -}
4010 -
4011 -static inline void write_cr2(unsigned long x)
4012 -{
4013 - native_write_cr2(x);
4014 -}
4015 -
4016 -static inline unsigned long read_cr3(void)
4017 -{
4018 - return native_read_cr3();
4019 -}
4020 -
4021 -static inline void write_cr3(unsigned long x)
4022 -{
4023 - native_write_cr3(x);
4024 -}
4025 -
4026 -static inline unsigned long read_cr4(void)
4027 -{
4028 - return native_read_cr4();
4029 -}
4030 -
4031 -static inline unsigned long read_cr4_safe(void)
4032 -{
4033 - return native_read_cr4_safe();
4034 -}
4035 -
4036 -static inline void write_cr4(unsigned long x)
4037 -{
4038 - native_write_cr4(x);
4039 -}
4040 -
4041 -static inline void wbinvd(void)
4042 -{
4043 - native_wbinvd();
4044 -}
4045 -
4046 -#ifdef CONFIG_X86_64
4047 -
4048 -static inline unsigned long read_cr8(void)
4049 -{
4050 - return native_read_cr8();
4051 -}
4052 -
4053 -static inline void write_cr8(unsigned long x)
4054 -{
4055 - native_write_cr8(x);
4056 -}
4057 -
4058 -static inline void load_gs_index(unsigned selector)
4059 -{
4060 - native_load_gs_index(selector);
4061 -}
4062 -
4063 -#endif
4064 -
4065 -/* Clear the 'TS' bit */
4066 -static inline void clts(void)
4067 -{
4068 - native_clts();
4069 -}
4070 -
4071 -#endif/* CONFIG_PARAVIRT */
4072 -
4073 -#define stts() write_cr0(read_cr0() | X86_CR0_TS)
4074 -
4075 -#endif /* __KERNEL__ */
4076 -
4077 -static inline void clflush(volatile void *__p)
4078 -{
4079 - asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
4080 -}
4081 -
4082 -#define nop() asm volatile ("nop")
4083 -
4084 -void disable_hlt(void);
4085 -void enable_hlt(void);
4086 -
4087 -void cpu_idle_wait(void);
4088 -
4089 -extern unsigned long arch_align_stack(unsigned long sp);
4090 -extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
4091 -
4092 -void default_idle(void);
4093 -bool set_pm_idle_to_default(void);
4094 -
4095 -void stop_this_cpu(void *dummy);
4096 -
4097 -/*
4098 - * Force strict CPU ordering.
4099 - * And yes, this is required on UP too when we're talking
4100 - * to devices.
4101 - */
4102 -#ifdef CONFIG_X86_32
4103 -/*
4104 - * Some non-Intel clones support out of order store. wmb() ceases to be a
4105 - * nop for these.
4106 - */
4107 -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
4108 -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
4109 -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
4110 -#else
4111 -#define mb() asm volatile("mfence":::"memory")
4112 -#define rmb() asm volatile("lfence":::"memory")
4113 -#define wmb() asm volatile("sfence" ::: "memory")
4114 -#endif
4115 -
4116 -/**
4117 - * read_barrier_depends - Flush all pending reads that subsequents reads
4118 - * depend on.
4119 - *
4120 - * No data-dependent reads from memory-like regions are ever reordered
4121 - * over this barrier. All reads preceding this primitive are guaranteed
4122 - * to access memory (but not necessarily other CPUs' caches) before any
4123 - * reads following this primitive that depend on the data return by
4124 - * any of the preceding reads. This primitive is much lighter weight than
4125 - * rmb() on most CPUs, and is never heavier weight than is
4126 - * rmb().
4127 - *
4128 - * These ordering constraints are respected by both the local CPU
4129 - * and the compiler.
4130 - *
4131 - * Ordering is not guaranteed by anything other than these primitives,
4132 - * not even by data dependencies. See the documentation for
4133 - * memory_barrier() for examples and URLs to more information.
4134 - *
4135 - * For example, the following code would force ordering (the initial
4136 - * value of "a" is zero, "b" is one, and "p" is "&a"):
4137 - *
4138 - * <programlisting>
4139 - * CPU 0 CPU 1
4140 - *
4141 - * b = 2;
4142 - * memory_barrier();
4143 - * p = &b; q = p;
4144 - * read_barrier_depends();
4145 - * d = *q;
4146 - * </programlisting>
4147 - *
4148 - * because the read of "*q" depends on the read of "p" and these
4149 - * two reads are separated by a read_barrier_depends(). However,
4150 - * the following code, with the same initial values for "a" and "b":
4151 - *
4152 - * <programlisting>
4153 - * CPU 0 CPU 1
4154 - *
4155 - * a = 2;
4156 - * memory_barrier();
4157 - * b = 3; y = b;
4158 - * read_barrier_depends();
4159 - * x = a;
4160 - * </programlisting>
4161 - *
4162 - * does not enforce ordering, since there is no data dependency between
4163 - * the read of "a" and the read of "b". Therefore, on some CPUs, such
4164 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
4165 - * in cases like this where there are no data dependencies.
4166 - **/
4167 -
4168 -#define read_barrier_depends() do { } while (0)
4169 -
4170 -#ifdef CONFIG_SMP
4171 -#define smp_mb() mb()
4172 -#ifdef CONFIG_X86_PPRO_FENCE
4173 -# define smp_rmb() rmb()
4174 -#else
4175 -# define smp_rmb() barrier()
4176 -#endif
4177 -#ifdef CONFIG_X86_OOSTORE
4178 -# define smp_wmb() wmb()
4179 -#else
4180 -# define smp_wmb() barrier()
4181 -#endif
4182 -#define smp_read_barrier_depends() read_barrier_depends()
4183 -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
4184 -#else
4185 -#define smp_mb() barrier()
4186 -#define smp_rmb() barrier()
4187 -#define smp_wmb() barrier()
4188 -#define smp_read_barrier_depends() do { } while (0)
4189 -#define set_mb(var, value) do { var = value; barrier(); } while (0)
4190 -#endif
4191 -
4192 -/*
4193 - * Stop RDTSC speculation. This is needed when you need to use RDTSC
4194 - * (or get_cycles or vread that possibly accesses the TSC) in a defined
4195 - * code region.
4196 - *
4197 - * (Could use an alternative three way for this if there was one.)
4198 - */
4199 -static __always_inline void rdtsc_barrier(void)
4200 -{
4201 - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
4202 - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
4203 -}
4204 -
4205 -/*
4206 - * We handle most unaligned accesses in hardware. On the other hand
4207 - * unaligned DMA can be quite expensive on some Nehalem processors.
4208 - *
4209 - * Based on this we disable the IP header alignment in network drivers.
4210 - */
4211 -#define NET_IP_ALIGN 0
4212 -#endif /* _ASM_X86_SYSTEM_H */
4213 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
4214 index cfd8144..ad6df8c 100644
4215 --- a/arch/x86/include/asm/thread_info.h
4216 +++ b/arch/x86/include/asm/thread_info.h
4217 @@ -86,7 +86,7 @@ struct thread_info {
4218 #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
4219 #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
4220 #define TIF_NOTSC 16 /* TSC is not accessible in userland */
4221 -#define TIF_IA32 17 /* 32bit process */
4222 +#define TIF_IA32 17 /* IA32 compatibility process */
4223 #define TIF_FORK 18 /* ret_from_fork */
4224 #define TIF_MEMDIE 20 /* is terminating due to OOM killer */
4225 #define TIF_DEBUG 21 /* uses debug registers */
4226 @@ -95,6 +95,8 @@ struct thread_info {
4227 #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
4228 #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
4229 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
4230 +#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
4231 +#define TIF_X32 30 /* 32-bit native x86-64 binary */
4232
4233 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
4234 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
4235 @@ -116,6 +118,8 @@ struct thread_info {
4236 #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
4237 #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
4238 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
4239 +#define _TIF_ADDR32 (1 << TIF_ADDR32)
4240 +#define _TIF_X32 (1 << TIF_X32)
4241
4242 /* work to do in syscall_trace_enter() */
4243 #define _TIF_WORK_SYSCALL_ENTRY \
4244 @@ -262,6 +266,18 @@ static inline void set_restore_sigmask(void)
4245 ti->status |= TS_RESTORE_SIGMASK;
4246 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
4247 }
4248 +
4249 +static inline bool is_ia32_task(void)
4250 +{
4251 +#ifdef CONFIG_X86_32
4252 + return true;
4253 +#endif
4254 +#ifdef CONFIG_IA32_EMULATION
4255 + if (current_thread_info()->status & TS_COMPAT)
4256 + return true;
4257 +#endif
4258 + return false;
4259 +}
4260 #endif /* !__ASSEMBLY__ */
4261
4262 #ifndef __ASSEMBLY__
4263 diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
4264 index 431793e..34baa0e 100644
4265 --- a/arch/x86/include/asm/timer.h
4266 +++ b/arch/x86/include/asm/timer.h
4267 @@ -57,14 +57,10 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
4268
4269 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
4270 {
4271 - unsigned long long quot;
4272 - unsigned long long rem;
4273 int cpu = smp_processor_id();
4274 unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
4275 - quot = (cyc >> CYC2NS_SCALE_FACTOR);
4276 - rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
4277 - ns += quot * per_cpu(cyc2ns, cpu) +
4278 - ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
4279 + ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
4280 + (1UL << CYC2NS_SCALE_FACTOR));
4281 return ns;
4282 }
4283
4284 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
4285 index 169be89..c0e108e 100644
4286 --- a/arch/x86/include/asm/tlbflush.h
4287 +++ b/arch/x86/include/asm/tlbflush.h
4288 @@ -5,7 +5,7 @@
4289 #include <linux/sched.h>
4290
4291 #include <asm/processor.h>
4292 -#include <asm/system.h>
4293 +#include <asm/special_insns.h>
4294
4295 #ifdef CONFIG_PARAVIRT
4296 #include <asm/paravirt.h>
4297 diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
4298 index 0012d09..88eae2a 100644
4299 --- a/arch/x86/include/asm/traps.h
4300 +++ b/arch/x86/include/asm/traps.h
4301 @@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
4302 asmlinkage void mce_threshold_interrupt(void);
4303 #endif
4304
4305 +/* Interrupts/Exceptions */
4306 +enum {
4307 + X86_TRAP_DE = 0, /* 0, Divide-by-zero */
4308 + X86_TRAP_DB, /* 1, Debug */
4309 + X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
4310 + X86_TRAP_BP, /* 3, Breakpoint */
4311 + X86_TRAP_OF, /* 4, Overflow */
4312 + X86_TRAP_BR, /* 5, Bound Range Exceeded */
4313 + X86_TRAP_UD, /* 6, Invalid Opcode */
4314 + X86_TRAP_NM, /* 7, Device Not Available */
4315 + X86_TRAP_DF, /* 8, Double Fault */
4316 + X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
4317 + X86_TRAP_TS, /* 10, Invalid TSS */
4318 + X86_TRAP_NP, /* 11, Segment Not Present */
4319 + X86_TRAP_SS, /* 12, Stack Segment Fault */
4320 + X86_TRAP_GP, /* 13, General Protection Fault */
4321 + X86_TRAP_PF, /* 14, Page Fault */
4322 + X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
4323 + X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
4324 + X86_TRAP_AC, /* 17, Alignment Check */
4325 + X86_TRAP_MC, /* 18, Machine Check */
4326 + X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
4327 + X86_TRAP_IRET = 32, /* 32, IRET Exception */
4328 +};
4329 +
4330 #endif /* _ASM_X86_TRAPS_H */
4331 diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
4332 index 15d9915..c91e8b9 100644
4333 --- a/arch/x86/include/asm/tsc.h
4334 +++ b/arch/x86/include/asm/tsc.h
4335 @@ -61,7 +61,7 @@ extern void check_tsc_sync_source(int cpu);
4336 extern void check_tsc_sync_target(void);
4337
4338 extern int notsc_setup(char *);
4339 -extern void save_sched_clock_state(void);
4340 -extern void restore_sched_clock_state(void);
4341 +extern void tsc_save_sched_clock_state(void);
4342 +extern void tsc_restore_sched_clock_state(void);
4343
4344 #endif /* _ASM_X86_TSC_H */
4345 diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
4346 index 21f77b8..37cdc9d 100644
4347 --- a/arch/x86/include/asm/unistd.h
4348 +++ b/arch/x86/include/asm/unistd.h
4349 @@ -1,7 +1,17 @@
4350 #ifndef _ASM_X86_UNISTD_H
4351 #define _ASM_X86_UNISTD_H 1
4352
4353 +/* x32 syscall flag bit */
4354 +#define __X32_SYSCALL_BIT 0x40000000
4355 +
4356 #ifdef __KERNEL__
4357 +
4358 +# ifdef CONFIG_X86_X32_ABI
4359 +# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
4360 +# else
4361 +# define __SYSCALL_MASK (~0)
4362 +# endif
4363 +
4364 # ifdef CONFIG_X86_32
4365
4366 # include <asm/unistd_32.h>
4367 @@ -14,6 +24,7 @@
4368 # else
4369
4370 # include <asm/unistd_64.h>
4371 +# include <asm/unistd_64_x32.h>
4372 # define __ARCH_WANT_COMPAT_SYS_TIME
4373
4374 # endif
4375 @@ -52,8 +63,10 @@
4376 #else
4377 # ifdef __i386__
4378 # include <asm/unistd_32.h>
4379 -# else
4380 +# elif defined(__LP64__)
4381 # include <asm/unistd_64.h>
4382 +# else
4383 +# include <asm/unistd_x32.h>
4384 # endif
4385 #endif
4386
4387 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
4388 index 815285b..8b38be2 100644
4389 --- a/arch/x86/include/asm/vgtod.h
4390 +++ b/arch/x86/include/asm/vgtod.h
4391 @@ -5,13 +5,8 @@
4392 #include <linux/clocksource.h>
4393
4394 struct vsyscall_gtod_data {
4395 - seqlock_t lock;
4396 + seqcount_t seq;
4397
4398 - /* open coded 'struct timespec' */
4399 - time_t wall_time_sec;
4400 - u32 wall_time_nsec;
4401 -
4402 - struct timezone sys_tz;
4403 struct { /* extract of a clocksource struct */
4404 int vclock_mode;
4405 cycle_t cycle_last;
4406 @@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
4407 u32 mult;
4408 u32 shift;
4409 } clock;
4410 - struct timespec wall_to_monotonic;
4411 +
4412 + /* open coded 'struct timespec' */
4413 + time_t wall_time_sec;
4414 + u32 wall_time_nsec;
4415 + u32 monotonic_time_nsec;
4416 + time_t monotonic_time_sec;
4417 +
4418 + struct timezone sys_tz;
4419 struct timespec wall_time_coarse;
4420 + struct timespec monotonic_time_coarse;
4421 };
4422 extern struct vsyscall_gtod_data vsyscall_gtod_data;
4423
4424 diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
4425 index e0f9aa1..5da71c2 100644
4426 --- a/arch/x86/include/asm/virtext.h
4427 +++ b/arch/x86/include/asm/virtext.h
4428 @@ -16,7 +16,6 @@
4429 #define _ASM_X86_VIRTEX_H
4430
4431 #include <asm/processor.h>
4432 -#include <asm/system.h>
4433
4434 #include <asm/vmx.h>
4435 #include <asm/svm.h>
4436 diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
4437 index 6bf5b8e..92e54ab 100644
4438 --- a/arch/x86/include/asm/x2apic.h
4439 +++ b/arch/x86/include/asm/x2apic.h
4440 @@ -18,6 +18,11 @@ static const struct cpumask *x2apic_target_cpus(void)
4441 return cpu_online_mask;
4442 }
4443
4444 +static int x2apic_apic_id_valid(int apicid)
4445 +{
4446 + return 1;
4447 +}
4448 +
4449 static int x2apic_apic_id_registered(void)
4450 {
4451 return 1;
4452 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
4453 index 517d476..baaca8d 100644
4454 --- a/arch/x86/include/asm/x86_init.h
4455 +++ b/arch/x86/include/asm/x86_init.h
4456 @@ -145,9 +145,11 @@ struct x86_init_ops {
4457 /**
4458 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
4459 * @setup_percpu_clockev: set up the per cpu clock event device
4460 + * @early_percpu_clock_init: early init of the per cpu clock event device
4461 */
4462 struct x86_cpuinit_ops {
4463 void (*setup_percpu_clockev)(void);
4464 + void (*early_percpu_clock_init)(void);
4465 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
4466 };
4467
4468 @@ -160,6 +162,8 @@ struct x86_cpuinit_ops {
4469 * @is_untracked_pat_range exclude from PAT logic
4470 * @nmi_init enable NMI on cpus
4471 * @i8042_detect pre-detect if i8042 controller exists
4472 + * @save_sched_clock_state: save state for sched_clock() on suspend
4473 + * @restore_sched_clock_state: restore state for sched_clock() on resume
4474 */
4475 struct x86_platform_ops {
4476 unsigned long (*calibrate_tsc)(void);
4477 @@ -171,6 +175,8 @@ struct x86_platform_ops {
4478 void (*nmi_init)(void);
4479 unsigned char (*get_nmi_reason)(void);
4480 int (*i8042_detect)(void);
4481 + void (*save_sched_clock_state)(void);
4482 + void (*restore_sched_clock_state)(void);
4483 };
4484
4485 struct pci_dev;
4486 diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
4487 index a1f2db5..cbf0c9d 100644
4488 --- a/arch/x86/include/asm/xen/interface.h
4489 +++ b/arch/x86/include/asm/xen/interface.h
4490 @@ -56,6 +56,7 @@ DEFINE_GUEST_HANDLE(int);
4491 DEFINE_GUEST_HANDLE(long);
4492 DEFINE_GUEST_HANDLE(void);
4493 DEFINE_GUEST_HANDLE(uint64_t);
4494 +DEFINE_GUEST_HANDLE(uint32_t);
4495 #endif
4496
4497 #ifndef HYPERVISOR_VIRT_START
4498 diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
4499 index 564b247..3236aeb 100644
4500 --- a/arch/x86/syscalls/Makefile
4501 +++ b/arch/x86/syscalls/Makefile
4502 @@ -10,8 +10,10 @@ syshdr := $(srctree)/$(src)/syscallhdr.sh
4503 systbl := $(srctree)/$(src)/syscalltbl.sh
4504
4505 quiet_cmd_syshdr = SYSHDR $@
4506 - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' $< $@ \
4507 - $(syshdr_abi_$(basetarget)) $(syshdr_pfx_$(basetarget))
4508 + cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
4509 + '$(syshdr_abi_$(basetarget))' \
4510 + '$(syshdr_pfx_$(basetarget))' \
4511 + '$(syshdr_offset_$(basetarget))'
4512 quiet_cmd_systbl = SYSTBL $@
4513 cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
4514
4515 @@ -24,18 +26,28 @@ syshdr_pfx_unistd_32_ia32 := ia32_
4516 $(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
4517 $(call if_changed,syshdr)
4518
4519 -syshdr_abi_unistd_64 := 64
4520 +syshdr_abi_unistd_x32 := common,x32
4521 +syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
4522 +$(out)/unistd_x32.h: $(syscall64) $(syshdr)
4523 + $(call if_changed,syshdr)
4524 +
4525 +syshdr_abi_unistd_64 := common,64
4526 $(out)/unistd_64.h: $(syscall64) $(syshdr)
4527 $(call if_changed,syshdr)
4528
4529 +syshdr_abi_unistd_64_x32 := x32
4530 +syshdr_pfx_unistd_64_x32 := x32_
4531 +$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
4532 + $(call if_changed,syshdr)
4533 +
4534 $(out)/syscalls_32.h: $(syscall32) $(systbl)
4535 $(call if_changed,systbl)
4536 $(out)/syscalls_64.h: $(syscall64) $(systbl)
4537 $(call if_changed,systbl)
4538
4539 -syshdr-y += unistd_32.h unistd_64.h
4540 +syshdr-y += unistd_32.h unistd_64.h unistd_x32.h
4541 syshdr-y += syscalls_32.h
4542 -syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h
4543 +syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
4544 syshdr-$(CONFIG_X86_64) += syscalls_64.h
4545
4546 targets += $(syshdr-y)
4547 diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
4548 index ce98e28..29f9f05 100644
4549 --- a/arch/x86/syscalls/syscall_32.tbl
4550 +++ b/arch/x86/syscalls/syscall_32.tbl
4551 @@ -181,7 +181,7 @@
4552 172 i386 prctl sys_prctl
4553 173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn
4554 174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction
4555 -175 i386 rt_sigprocmask sys_rt_sigprocmask sys32_rt_sigprocmask
4556 +175 i386 rt_sigprocmask sys_rt_sigprocmask
4557 176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending
4558 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
4559 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo
4560 @@ -288,7 +288,7 @@
4561 279 i386 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
4562 280 i386 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
4563 281 i386 mq_notify sys_mq_notify compat_sys_mq_notify
4564 -282 i386 mq_getsetaddr sys_mq_getsetattr compat_sys_mq_getsetattr
4565 +282 i386 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
4566 283 i386 kexec_load sys_kexec_load compat_sys_kexec_load
4567 284 i386 waitid sys_waitid compat_sys_waitid
4568 # 285 sys_setaltroot
4569 diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
4570 index b440a8f..dd29a9e 100644
4571 --- a/arch/x86/syscalls/syscall_64.tbl
4572 +++ b/arch/x86/syscalls/syscall_64.tbl
4573 @@ -4,317 +4,350 @@
4574 # The format is:
4575 # <number> <abi> <name> <entry point>
4576 #
4577 -# The abi is always "64" for this file (for now.)
4578 +# The abi is "common", "64" or "x32" for this file.
4579 #
4580 -0 64 read sys_read
4581 -1 64 write sys_write
4582 -2 64 open sys_open
4583 -3 64 close sys_close
4584 -4 64 stat sys_newstat
4585 -5 64 fstat sys_newfstat
4586 -6 64 lstat sys_newlstat
4587 -7 64 poll sys_poll
4588 -8 64 lseek sys_lseek
4589 -9 64 mmap sys_mmap
4590 -10 64 mprotect sys_mprotect
4591 -11 64 munmap sys_munmap
4592 -12 64 brk sys_brk
4593 +0 common read sys_read
4594 +1 common write sys_write
4595 +2 common open sys_open
4596 +3 common close sys_close
4597 +4 common stat sys_newstat
4598 +5 common fstat sys_newfstat
4599 +6 common lstat sys_newlstat
4600 +7 common poll sys_poll
4601 +8 common lseek sys_lseek
4602 +9 common mmap sys_mmap
4603 +10 common mprotect sys_mprotect
4604 +11 common munmap sys_munmap
4605 +12 common brk sys_brk
4606 13 64 rt_sigaction sys_rt_sigaction
4607 -14 64 rt_sigprocmask sys_rt_sigprocmask
4608 +14 common rt_sigprocmask sys_rt_sigprocmask
4609 15 64 rt_sigreturn stub_rt_sigreturn
4610 16 64 ioctl sys_ioctl
4611 -17 64 pread64 sys_pread64
4612 -18 64 pwrite64 sys_pwrite64
4613 +17 common pread64 sys_pread64
4614 +18 common pwrite64 sys_pwrite64
4615 19 64 readv sys_readv
4616 20 64 writev sys_writev
4617 -21 64 access sys_access
4618 -22 64 pipe sys_pipe
4619 -23 64 select sys_select
4620 -24 64 sched_yield sys_sched_yield
4621 -25 64 mremap sys_mremap
4622 -26 64 msync sys_msync
4623 -27 64 mincore sys_mincore
4624 -28 64 madvise sys_madvise
4625 -29 64 shmget sys_shmget
4626 -30 64 shmat sys_shmat
4627 -31 64 shmctl sys_shmctl
4628 -32 64 dup sys_dup
4629 -33 64 dup2 sys_dup2
4630 -34 64 pause sys_pause
4631 -35 64 nanosleep sys_nanosleep
4632 -36 64 getitimer sys_getitimer
4633 -37 64 alarm sys_alarm
4634 -38 64 setitimer sys_setitimer
4635 -39 64 getpid sys_getpid
4636 -40 64 sendfile sys_sendfile64
4637 -41 64 socket sys_socket
4638 -42 64 connect sys_connect
4639 -43 64 accept sys_accept
4640 -44 64 sendto sys_sendto
4641 +21 common access sys_access
4642 +22 common pipe sys_pipe
4643 +23 common select sys_select
4644 +24 common sched_yield sys_sched_yield
4645 +25 common mremap sys_mremap
4646 +26 common msync sys_msync
4647 +27 common mincore sys_mincore
4648 +28 common madvise sys_madvise
4649 +29 common shmget sys_shmget
4650 +30 common shmat sys_shmat
4651 +31 common shmctl sys_shmctl
4652 +32 common dup sys_dup
4653 +33 common dup2 sys_dup2
4654 +34 common pause sys_pause
4655 +35 common nanosleep sys_nanosleep
4656 +36 common getitimer sys_getitimer
4657 +37 common alarm sys_alarm
4658 +38 common setitimer sys_setitimer
4659 +39 common getpid sys_getpid
4660 +40 common sendfile sys_sendfile64
4661 +41 common socket sys_socket
4662 +42 common connect sys_connect
4663 +43 common accept sys_accept
4664 +44 common sendto sys_sendto
4665 45 64 recvfrom sys_recvfrom
4666 46 64 sendmsg sys_sendmsg
4667 47 64 recvmsg sys_recvmsg
4668 -48 64 shutdown sys_shutdown
4669 -49 64 bind sys_bind
4670 -50 64 listen sys_listen
4671 -51 64 getsockname sys_getsockname
4672 -52 64 getpeername sys_getpeername
4673 -53 64 socketpair sys_socketpair
4674 -54 64 setsockopt sys_setsockopt
4675 -55 64 getsockopt sys_getsockopt
4676 -56 64 clone stub_clone
4677 -57 64 fork stub_fork
4678 -58 64 vfork stub_vfork
4679 +48 common shutdown sys_shutdown
4680 +49 common bind sys_bind
4681 +50 common listen sys_listen
4682 +51 common getsockname sys_getsockname
4683 +52 common getpeername sys_getpeername
4684 +53 common socketpair sys_socketpair
4685 +54 common setsockopt sys_setsockopt
4686 +55 common getsockopt sys_getsockopt
4687 +56 common clone stub_clone
4688 +57 common fork stub_fork
4689 +58 common vfork stub_vfork
4690 59 64 execve stub_execve
4691 -60 64 exit sys_exit
4692 -61 64 wait4 sys_wait4
4693 -62 64 kill sys_kill
4694 -63 64 uname sys_newuname
4695 -64 64 semget sys_semget
4696 -65 64 semop sys_semop
4697 -66 64 semctl sys_semctl
4698 -67 64 shmdt sys_shmdt
4699 -68 64 msgget sys_msgget
4700 -69 64 msgsnd sys_msgsnd
4701 -70 64 msgrcv sys_msgrcv
4702 -71 64 msgctl sys_msgctl
4703 -72 64 fcntl sys_fcntl
4704 -73 64 flock sys_flock
4705 -74 64 fsync sys_fsync
4706 -75 64 fdatasync sys_fdatasync
4707 -76 64 truncate sys_truncate
4708 -77 64 ftruncate sys_ftruncate
4709 -78 64 getdents sys_getdents
4710 -79 64 getcwd sys_getcwd
4711 -80 64 chdir sys_chdir
4712 -81 64 fchdir sys_fchdir
4713 -82 64 rename sys_rename
4714 -83 64 mkdir sys_mkdir
4715 -84 64 rmdir sys_rmdir
4716 -85 64 creat sys_creat
4717 -86 64 link sys_link
4718 -87 64 unlink sys_unlink
4719 -88 64 symlink sys_symlink
4720 -89 64 readlink sys_readlink
4721 -90 64 chmod sys_chmod
4722 -91 64 fchmod sys_fchmod
4723 -92 64 chown sys_chown
4724 -93 64 fchown sys_fchown
4725 -94 64 lchown sys_lchown
4726 -95 64 umask sys_umask
4727 -96 64 gettimeofday sys_gettimeofday
4728 -97 64 getrlimit sys_getrlimit
4729 -98 64 getrusage sys_getrusage
4730 -99 64 sysinfo sys_sysinfo
4731 -100 64 times sys_times
4732 +60 common exit sys_exit
4733 +61 common wait4 sys_wait4
4734 +62 common kill sys_kill
4735 +63 common uname sys_newuname
4736 +64 common semget sys_semget
4737 +65 common semop sys_semop
4738 +66 common semctl sys_semctl
4739 +67 common shmdt sys_shmdt
4740 +68 common msgget sys_msgget
4741 +69 common msgsnd sys_msgsnd
4742 +70 common msgrcv sys_msgrcv
4743 +71 common msgctl sys_msgctl
4744 +72 common fcntl sys_fcntl
4745 +73 common flock sys_flock
4746 +74 common fsync sys_fsync
4747 +75 common fdatasync sys_fdatasync
4748 +76 common truncate sys_truncate
4749 +77 common ftruncate sys_ftruncate
4750 +78 common getdents sys_getdents
4751 +79 common getcwd sys_getcwd
4752 +80 common chdir sys_chdir
4753 +81 common fchdir sys_fchdir
4754 +82 common rename sys_rename
4755 +83 common mkdir sys_mkdir
4756 +84 common rmdir sys_rmdir
4757 +85 common creat sys_creat
4758 +86 common link sys_link
4759 +87 common unlink sys_unlink
4760 +88 common symlink sys_symlink
4761 +89 common readlink sys_readlink
4762 +90 common chmod sys_chmod
4763 +91 common fchmod sys_fchmod
4764 +92 common chown sys_chown
4765 +93 common fchown sys_fchown
4766 +94 common lchown sys_lchown
4767 +95 common umask sys_umask
4768 +96 common gettimeofday sys_gettimeofday
4769 +97 common getrlimit sys_getrlimit
4770 +98 common getrusage sys_getrusage
4771 +99 common sysinfo sys_sysinfo
4772 +100 common times sys_times
4773 101 64 ptrace sys_ptrace
4774 -102 64 getuid sys_getuid
4775 -103 64 syslog sys_syslog
4776 -104 64 getgid sys_getgid
4777 -105 64 setuid sys_setuid
4778 -106 64 setgid sys_setgid
4779 -107 64 geteuid sys_geteuid
4780 -108 64 getegid sys_getegid
4781 -109 64 setpgid sys_setpgid
4782 -110 64 getppid sys_getppid
4783 -111 64 getpgrp sys_getpgrp
4784 -112 64 setsid sys_setsid
4785 -113 64 setreuid sys_setreuid
4786 -114 64 setregid sys_setregid
4787 -115 64 getgroups sys_getgroups
4788 -116 64 setgroups sys_setgroups
4789 -117 64 setresuid sys_setresuid
4790 -118 64 getresuid sys_getresuid
4791 -119 64 setresgid sys_setresgid
4792 -120 64 getresgid sys_getresgid
4793 -121 64 getpgid sys_getpgid
4794 -122 64 setfsuid sys_setfsuid
4795 -123 64 setfsgid sys_setfsgid
4796 -124 64 getsid sys_getsid
4797 -125 64 capget sys_capget
4798 -126 64 capset sys_capset
4799 +102 common getuid sys_getuid
4800 +103 common syslog sys_syslog
4801 +104 common getgid sys_getgid
4802 +105 common setuid sys_setuid
4803 +106 common setgid sys_setgid
4804 +107 common geteuid sys_geteuid
4805 +108 common getegid sys_getegid
4806 +109 common setpgid sys_setpgid
4807 +110 common getppid sys_getppid
4808 +111 common getpgrp sys_getpgrp
4809 +112 common setsid sys_setsid
4810 +113 common setreuid sys_setreuid
4811 +114 common setregid sys_setregid
4812 +115 common getgroups sys_getgroups
4813 +116 common setgroups sys_setgroups
4814 +117 common setresuid sys_setresuid
4815 +118 common getresuid sys_getresuid
4816 +119 common setresgid sys_setresgid
4817 +120 common getresgid sys_getresgid
4818 +121 common getpgid sys_getpgid
4819 +122 common setfsuid sys_setfsuid
4820 +123 common setfsgid sys_setfsgid
4821 +124 common getsid sys_getsid
4822 +125 common capget sys_capget
4823 +126 common capset sys_capset
4824 127 64 rt_sigpending sys_rt_sigpending
4825 128 64 rt_sigtimedwait sys_rt_sigtimedwait
4826 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
4827 -130 64 rt_sigsuspend sys_rt_sigsuspend
4828 +130 common rt_sigsuspend sys_rt_sigsuspend
4829 131 64 sigaltstack stub_sigaltstack
4830 -132 64 utime sys_utime
4831 -133 64 mknod sys_mknod
4832 +132 common utime sys_utime
4833 +133 common mknod sys_mknod
4834 134 64 uselib
4835 -135 64 personality sys_personality
4836 -136 64 ustat sys_ustat
4837 -137 64 statfs sys_statfs
4838 -138 64 fstatfs sys_fstatfs
4839 -139 64 sysfs sys_sysfs
4840 -140 64 getpriority sys_getpriority
4841 -141 64 setpriority sys_setpriority
4842 -142 64 sched_setparam sys_sched_setparam
4843 -143 64 sched_getparam sys_sched_getparam
4844 -144 64 sched_setscheduler sys_sched_setscheduler
4845 -145 64 sched_getscheduler sys_sched_getscheduler
4846 -146 64 sched_get_priority_max sys_sched_get_priority_max
4847 -147 64 sched_get_priority_min sys_sched_get_priority_min
4848 -148 64 sched_rr_get_interval sys_sched_rr_get_interval
4849 -149 64 mlock sys_mlock
4850 -150 64 munlock sys_munlock
4851 -151 64 mlockall sys_mlockall
4852 -152 64 munlockall sys_munlockall
4853 -153 64 vhangup sys_vhangup
4854 -154 64 modify_ldt sys_modify_ldt
4855 -155 64 pivot_root sys_pivot_root
4856 +135 common personality sys_personality
4857 +136 common ustat sys_ustat
4858 +137 common statfs sys_statfs
4859 +138 common fstatfs sys_fstatfs
4860 +139 common sysfs sys_sysfs
4861 +140 common getpriority sys_getpriority
4862 +141 common setpriority sys_setpriority
4863 +142 common sched_setparam sys_sched_setparam
4864 +143 common sched_getparam sys_sched_getparam
4865 +144 common sched_setscheduler sys_sched_setscheduler
4866 +145 common sched_getscheduler sys_sched_getscheduler
4867 +146 common sched_get_priority_max sys_sched_get_priority_max
4868 +147 common sched_get_priority_min sys_sched_get_priority_min
4869 +148 common sched_rr_get_interval sys_sched_rr_get_interval
4870 +149 common mlock sys_mlock
4871 +150 common munlock sys_munlock
4872 +151 common mlockall sys_mlockall
4873 +152 common munlockall sys_munlockall
4874 +153 common vhangup sys_vhangup
4875 +154 common modify_ldt sys_modify_ldt
4876 +155 common pivot_root sys_pivot_root
4877 156 64 _sysctl sys_sysctl
4878 -157 64 prctl sys_prctl
4879 -158 64 arch_prctl sys_arch_prctl
4880 -159 64 adjtimex sys_adjtimex
4881 -160 64 setrlimit sys_setrlimit
4882 -161 64 chroot sys_chroot
4883 -162 64 sync sys_sync
4884 -163 64 acct sys_acct
4885 -164 64 settimeofday sys_settimeofday
4886 -165 64 mount sys_mount
4887 -166 64 umount2 sys_umount
4888 -167 64 swapon sys_swapon
4889 -168 64 swapoff sys_swapoff
4890 -169 64 reboot sys_reboot
4891 -170 64 sethostname sys_sethostname
4892 -171 64 setdomainname sys_setdomainname
4893 -172 64 iopl stub_iopl
4894 -173 64 ioperm sys_ioperm
4895 +157 common prctl sys_prctl
4896 +158 common arch_prctl sys_arch_prctl
4897 +159 common adjtimex sys_adjtimex
4898 +160 common setrlimit sys_setrlimit
4899 +161 common chroot sys_chroot
4900 +162 common sync sys_sync
4901 +163 common acct sys_acct
4902 +164 common settimeofday sys_settimeofday
4903 +165 common mount sys_mount
4904 +166 common umount2 sys_umount
4905 +167 common swapon sys_swapon
4906 +168 common swapoff sys_swapoff
4907 +169 common reboot sys_reboot
4908 +170 common sethostname sys_sethostname
4909 +171 common setdomainname sys_setdomainname
4910 +172 common iopl stub_iopl
4911 +173 common ioperm sys_ioperm
4912 174 64 create_module
4913 -175 64 init_module sys_init_module
4914 -176 64 delete_module sys_delete_module
4915 +175 common init_module sys_init_module
4916 +176 common delete_module sys_delete_module
4917 177 64 get_kernel_syms
4918 178 64 query_module
4919 -179 64 quotactl sys_quotactl
4920 +179 common quotactl sys_quotactl
4921 180 64 nfsservctl
4922 -181 64 getpmsg
4923 -182 64 putpmsg
4924 -183 64 afs_syscall
4925 -184 64 tuxcall
4926 -185 64 security
4927 -186 64 gettid sys_gettid
4928 -187 64 readahead sys_readahead
4929 -188 64 setxattr sys_setxattr
4930 -189 64 lsetxattr sys_lsetxattr
4931 -190 64 fsetxattr sys_fsetxattr
4932 -191 64 getxattr sys_getxattr
4933 -192 64 lgetxattr sys_lgetxattr
4934 -193 64 fgetxattr sys_fgetxattr
4935 -194 64 listxattr sys_listxattr
4936 -195 64 llistxattr sys_llistxattr
4937 -196 64 flistxattr sys_flistxattr
4938 -197 64 removexattr sys_removexattr
4939 -198 64 lremovexattr sys_lremovexattr
4940 -199 64 fremovexattr sys_fremovexattr
4941 -200 64 tkill sys_tkill
4942 -201 64 time sys_time
4943 -202 64 futex sys_futex
4944 -203 64 sched_setaffinity sys_sched_setaffinity
4945 -204 64 sched_getaffinity sys_sched_getaffinity
4946 +181 common getpmsg
4947 +182 common putpmsg
4948 +183 common afs_syscall
4949 +184 common tuxcall
4950 +185 common security
4951 +186 common gettid sys_gettid
4952 +187 common readahead sys_readahead
4953 +188 common setxattr sys_setxattr
4954 +189 common lsetxattr sys_lsetxattr
4955 +190 common fsetxattr sys_fsetxattr
4956 +191 common getxattr sys_getxattr
4957 +192 common lgetxattr sys_lgetxattr
4958 +193 common fgetxattr sys_fgetxattr
4959 +194 common listxattr sys_listxattr
4960 +195 common llistxattr sys_llistxattr
4961 +196 common flistxattr sys_flistxattr
4962 +197 common removexattr sys_removexattr
4963 +198 common lremovexattr sys_lremovexattr
4964 +199 common fremovexattr sys_fremovexattr
4965 +200 common tkill sys_tkill
4966 +201 common time sys_time
4967 +202 common futex sys_futex
4968 +203 common sched_setaffinity sys_sched_setaffinity
4969 +204 common sched_getaffinity sys_sched_getaffinity
4970 205 64 set_thread_area
4971 -206 64 io_setup sys_io_setup
4972 -207 64 io_destroy sys_io_destroy
4973 -208 64 io_getevents sys_io_getevents
4974 -209 64 io_submit sys_io_submit
4975 -210 64 io_cancel sys_io_cancel
4976 +206 common io_setup sys_io_setup
4977 +207 common io_destroy sys_io_destroy
4978 +208 common io_getevents sys_io_getevents
4979 +209 common io_submit sys_io_submit
4980 +210 common io_cancel sys_io_cancel
4981 211 64 get_thread_area
4982 -212 64 lookup_dcookie sys_lookup_dcookie
4983 -213 64 epoll_create sys_epoll_create
4984 +212 common lookup_dcookie sys_lookup_dcookie
4985 +213 common epoll_create sys_epoll_create
4986 214 64 epoll_ctl_old
4987 215 64 epoll_wait_old
4988 -216 64 remap_file_pages sys_remap_file_pages
4989 -217 64 getdents64 sys_getdents64
4990 -218 64 set_tid_address sys_set_tid_address
4991 -219 64 restart_syscall sys_restart_syscall
4992 -220 64 semtimedop sys_semtimedop
4993 -221 64 fadvise64 sys_fadvise64
4994 +216 common remap_file_pages sys_remap_file_pages
4995 +217 common getdents64 sys_getdents64
4996 +218 common set_tid_address sys_set_tid_address
4997 +219 common restart_syscall sys_restart_syscall
4998 +220 common semtimedop sys_semtimedop
4999 +221 common fadvise64 sys_fadvise64
5000 222 64 timer_create sys_timer_create
5001 -223 64 timer_settime sys_timer_settime
5002 -224 64 timer_gettime sys_timer_gettime
5003 -225 64 timer_getoverrun sys_timer_getoverrun
5004 -226 64 timer_delete sys_timer_delete
5005 -227 64 clock_settime sys_clock_settime
5006 -228 64 clock_gettime sys_clock_gettime
5007 -229 64 clock_getres sys_clock_getres
5008 -230 64 clock_nanosleep sys_clock_nanosleep
5009 -231 64 exit_group sys_exit_group
5010 -232 64 epoll_wait sys_epoll_wait
5011 -233 64 epoll_ctl sys_epoll_ctl
5012 -234 64 tgkill sys_tgkill
5013 -235 64 utimes sys_utimes
5014 +223 common timer_settime sys_timer_settime
5015 +224 common timer_gettime sys_timer_gettime
5016 +225 common timer_getoverrun sys_timer_getoverrun
5017 +226 common timer_delete sys_timer_delete
5018 +227 common clock_settime sys_clock_settime
5019 +228 common clock_gettime sys_clock_gettime
5020 +229 common clock_getres sys_clock_getres
5021 +230 common clock_nanosleep sys_clock_nanosleep
5022 +231 common exit_group sys_exit_group
5023 +232 common epoll_wait sys_epoll_wait
5024 +233 common epoll_ctl sys_epoll_ctl
5025 +234 common tgkill sys_tgkill
5026 +235 common utimes sys_utimes
5027 236 64 vserver
5028 -237 64 mbind sys_mbind
5029 -238 64 set_mempolicy sys_set_mempolicy
5030 -239 64 get_mempolicy sys_get_mempolicy
5031 -240 64 mq_open sys_mq_open
5032 -241 64 mq_unlink sys_mq_unlink
5033 -242 64 mq_timedsend sys_mq_timedsend
5034 -243 64 mq_timedreceive sys_mq_timedreceive
5035 +237 common mbind sys_mbind
5036 +238 common set_mempolicy sys_set_mempolicy
5037 +239 common get_mempolicy sys_get_mempolicy
5038 +240 common mq_open sys_mq_open
5039 +241 common mq_unlink sys_mq_unlink
5040 +242 common mq_timedsend sys_mq_timedsend
5041 +243 common mq_timedreceive sys_mq_timedreceive
5042 244 64 mq_notify sys_mq_notify
5043 -245 64 mq_getsetattr sys_mq_getsetattr
5044 +245 common mq_getsetattr sys_mq_getsetattr
5045 246 64 kexec_load sys_kexec_load
5046 247 64 waitid sys_waitid
5047 -248 64 add_key sys_add_key
5048 -249 64 request_key sys_request_key
5049 -250 64 keyctl sys_keyctl
5050 -251 64 ioprio_set sys_ioprio_set
5051 -252 64 ioprio_get sys_ioprio_get
5052 -253 64 inotify_init sys_inotify_init
5053 -254 64 inotify_add_watch sys_inotify_add_watch
5054 -255 64 inotify_rm_watch sys_inotify_rm_watch
5055 -256 64 migrate_pages sys_migrate_pages
5056 -257 64 openat sys_openat
5057 -258 64 mkdirat sys_mkdirat
5058 -259 64 mknodat sys_mknodat
5059 -260 64 fchownat sys_fchownat
5060 -261 64 futimesat sys_futimesat
5061 -262 64 newfstatat sys_newfstatat
5062 -263 64 unlinkat sys_unlinkat
5063 -264 64 renameat sys_renameat
5064 -265 64 linkat sys_linkat
5065 -266 64 symlinkat sys_symlinkat
5066 -267 64 readlinkat sys_readlinkat
5067 -268 64 fchmodat sys_fchmodat
5068 -269 64 faccessat sys_faccessat
5069 -270 64 pselect6 sys_pselect6
5070 -271 64 ppoll sys_ppoll
5071 -272 64 unshare sys_unshare
5072 +248 common add_key sys_add_key
5073 +249 common request_key sys_request_key
5074 +250 common keyctl sys_keyctl
5075 +251 common ioprio_set sys_ioprio_set
5076 +252 common ioprio_get sys_ioprio_get
5077 +253 common inotify_init sys_inotify_init
5078 +254 common inotify_add_watch sys_inotify_add_watch
5079 +255 common inotify_rm_watch sys_inotify_rm_watch
5080 +256 common migrate_pages sys_migrate_pages
5081 +257 common openat sys_openat
5082 +258 common mkdirat sys_mkdirat
5083 +259 common mknodat sys_mknodat
5084 +260 common fchownat sys_fchownat
5085 +261 common futimesat sys_futimesat
5086 +262 common newfstatat sys_newfstatat
5087 +263 common unlinkat sys_unlinkat
5088 +264 common renameat sys_renameat
5089 +265 common linkat sys_linkat
5090 +266 common symlinkat sys_symlinkat
5091 +267 common readlinkat sys_readlinkat
5092 +268 common fchmodat sys_fchmodat
5093 +269 common faccessat sys_faccessat
5094 +270 common pselect6 sys_pselect6
5095 +271 common ppoll sys_ppoll
5096 +272 common unshare sys_unshare
5097 273 64 set_robust_list sys_set_robust_list
5098 274 64 get_robust_list sys_get_robust_list
5099 -275 64 splice sys_splice
5100 -276 64 tee sys_tee
5101 -277 64 sync_file_range sys_sync_file_range
5102 +275 common splice sys_splice
5103 +276 common tee sys_tee
5104 +277 common sync_file_range sys_sync_file_range
5105 278 64 vmsplice sys_vmsplice
5106 279 64 move_pages sys_move_pages
5107 -280 64 utimensat sys_utimensat
5108 -281 64 epoll_pwait sys_epoll_pwait
5109 -282 64 signalfd sys_signalfd
5110 -283 64 timerfd_create sys_timerfd_create
5111 -284 64 eventfd sys_eventfd
5112 -285 64 fallocate sys_fallocate
5113 -286 64 timerfd_settime sys_timerfd_settime
5114 -287 64 timerfd_gettime sys_timerfd_gettime
5115 -288 64 accept4 sys_accept4
5116 -289 64 signalfd4 sys_signalfd4
5117 -290 64 eventfd2 sys_eventfd2
5118 -291 64 epoll_create1 sys_epoll_create1
5119 -292 64 dup3 sys_dup3
5120 -293 64 pipe2 sys_pipe2
5121 -294 64 inotify_init1 sys_inotify_init1
5122 +280 common utimensat sys_utimensat
5123 +281 common epoll_pwait sys_epoll_pwait
5124 +282 common signalfd sys_signalfd
5125 +283 common timerfd_create sys_timerfd_create
5126 +284 common eventfd sys_eventfd
5127 +285 common fallocate sys_fallocate
5128 +286 common timerfd_settime sys_timerfd_settime
5129 +287 common timerfd_gettime sys_timerfd_gettime
5130 +288 common accept4 sys_accept4
5131 +289 common signalfd4 sys_signalfd4
5132 +290 common eventfd2 sys_eventfd2
5133 +291 common epoll_create1 sys_epoll_create1
5134 +292 common dup3 sys_dup3
5135 +293 common pipe2 sys_pipe2
5136 +294 common inotify_init1 sys_inotify_init1
5137 295 64 preadv sys_preadv
5138 296 64 pwritev sys_pwritev
5139 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
5140 -298 64 perf_event_open sys_perf_event_open
5141 +298 common perf_event_open sys_perf_event_open
5142 299 64 recvmmsg sys_recvmmsg
5143 -300 64 fanotify_init sys_fanotify_init
5144 -301 64 fanotify_mark sys_fanotify_mark
5145 -302 64 prlimit64 sys_prlimit64
5146 -303 64 name_to_handle_at sys_name_to_handle_at
5147 -304 64 open_by_handle_at sys_open_by_handle_at
5148 -305 64 clock_adjtime sys_clock_adjtime
5149 -306 64 syncfs sys_syncfs
5150 +300 common fanotify_init sys_fanotify_init
5151 +301 common fanotify_mark sys_fanotify_mark
5152 +302 common prlimit64 sys_prlimit64
5153 +303 common name_to_handle_at sys_name_to_handle_at
5154 +304 common open_by_handle_at sys_open_by_handle_at
5155 +305 common clock_adjtime sys_clock_adjtime
5156 +306 common syncfs sys_syncfs
5157 307 64 sendmmsg sys_sendmmsg
5158 -308 64 setns sys_setns
5159 -309 64 getcpu sys_getcpu
5160 +308 common setns sys_setns
5161 +309 common getcpu sys_getcpu
5162 310 64 process_vm_readv sys_process_vm_readv
5163 311 64 process_vm_writev sys_process_vm_writev
5164 +#
5165 +# x32-specific system call numbers start at 512 to avoid cache impact
5166 +# for native 64-bit operation.
5167 +#
5168 +512 x32 rt_sigaction sys32_rt_sigaction
5169 +513 x32 rt_sigreturn stub_x32_rt_sigreturn
5170 +514 x32 ioctl compat_sys_ioctl
5171 +515 x32 readv compat_sys_readv
5172 +516 x32 writev compat_sys_writev
5173 +517 x32 recvfrom compat_sys_recvfrom
5174 +518 x32 sendmsg compat_sys_sendmsg
5175 +519 x32 recvmsg compat_sys_recvmsg
5176 +520 x32 execve stub_x32_execve
5177 +521 x32 ptrace compat_sys_ptrace
5178 +522 x32 rt_sigpending sys32_rt_sigpending
5179 +523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
5180 +524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo
5181 +525 x32 sigaltstack stub_x32_sigaltstack
5182 +526 x32 timer_create compat_sys_timer_create
5183 +527 x32 mq_notify compat_sys_mq_notify
5184 +528 x32 kexec_load compat_sys_kexec_load
5185 +529 x32 waitid compat_sys_waitid
5186 +530 x32 set_robust_list compat_sys_set_robust_list
5187 +531 x32 get_robust_list compat_sys_get_robust_list
5188 +532 x32 vmsplice compat_sys_vmsplice
5189 +533 x32 move_pages compat_sys_move_pages
5190 +534 x32 preadv compat_sys_preadv64
5191 +535 x32 pwritev compat_sys_pwritev64
5192 +536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
5193 +537 x32 recvmmsg compat_sys_recvmmsg
5194 +538 x32 sendmmsg compat_sys_sendmmsg
5195 +539 x32 process_vm_readv compat_sys_process_vm_readv
5196 +540 x32 process_vm_writev compat_sys_process_vm_writev