Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Tue, 17 Jul 2018 10:24:58
Message-Id: 1531823084.a7f6696c1a465601ed5aa37a060ec155e8e9ec85.mpagano@gentoo
1 commit: a7f6696c1a465601ed5aa37a060ec155e8e9ec85
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jul 17 10:24:44 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jul 17 10:24:44 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a7f6696c
7
8 Linux patch 4.4.141
9
10 0000_README | 4 +
11 1140_linux-4.4.141.patch | 2989 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2993 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 73e6c56..c1babcb 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -603,6 +603,10 @@ Patch: 1139_linux-4.4.140.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.140
21
22 +Patch: 1140_linux-4.4.141.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.141
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1140_linux-4.4.141.patch b/1140_linux-4.4.141.patch
31 new file mode 100644
32 index 0000000..eec959a
33 --- /dev/null
34 +++ b/1140_linux-4.4.141.patch
35 @@ -0,0 +1,2989 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index 4df6bd7d01ed..e60d0b5809c1 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -652,7 +652,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 +
42 + clearcpuid=BITNUM [X86]
43 + Disable CPUID feature X for the kernel. See
44 +- arch/x86/include/asm/cpufeature.h for the valid bit
45 ++ arch/x86/include/asm/cpufeatures.h for the valid bit
46 + numbers. Note the Linux specific bits are not necessarily
47 + stable over kernel options, but the vendor specific
48 + ones should be.
49 +diff --git a/Makefile b/Makefile
50 +index b842298a5970..3fc39e41dbde 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,6 +1,6 @@
54 + VERSION = 4
55 + PATCHLEVEL = 4
56 +-SUBLEVEL = 140
57 ++SUBLEVEL = 141
58 + EXTRAVERSION =
59 + NAME = Blurry Fish Butt
60 +
61 +diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
62 +index 8d5008cbdc0f..a853a83f2944 100644
63 +--- a/arch/mips/mm/ioremap.c
64 ++++ b/arch/mips/mm/ioremap.c
65 +@@ -9,6 +9,7 @@
66 + #include <linux/module.h>
67 + #include <asm/addrspace.h>
68 + #include <asm/byteorder.h>
69 ++#include <linux/ioport.h>
70 + #include <linux/sched.h>
71 + #include <linux/slab.h>
72 + #include <linux/vmalloc.h>
73 +@@ -97,6 +98,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
74 + return error;
75 + }
76 +
77 ++static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
78 ++ void *arg)
79 ++{
80 ++ unsigned long i;
81 ++
82 ++ for (i = 0; i < nr_pages; i++) {
83 ++ if (pfn_valid(start_pfn + i) &&
84 ++ !PageReserved(pfn_to_page(start_pfn + i)))
85 ++ return 1;
86 ++ }
87 ++
88 ++ return 0;
89 ++}
90 ++
91 + /*
92 + * Generic mapping function (not visible outside):
93 + */
94 +@@ -115,8 +130,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
95 +
96 + void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
97 + {
98 ++ unsigned long offset, pfn, last_pfn;
99 + struct vm_struct * area;
100 +- unsigned long offset;
101 + phys_addr_t last_addr;
102 + void * addr;
103 +
104 +@@ -136,18 +151,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
105 + return (void __iomem *) CKSEG1ADDR(phys_addr);
106 +
107 + /*
108 +- * Don't allow anybody to remap normal RAM that we're using..
109 ++ * Don't allow anybody to remap RAM that may be allocated by the page
110 ++ * allocator, since that could lead to races & data clobbering.
111 + */
112 +- if (phys_addr < virt_to_phys(high_memory)) {
113 +- char *t_addr, *t_end;
114 +- struct page *page;
115 +-
116 +- t_addr = __va(phys_addr);
117 +- t_end = t_addr + (size - 1);
118 +-
119 +- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
120 +- if(!PageReserved(page))
121 +- return NULL;
122 ++ pfn = PFN_DOWN(phys_addr);
123 ++ last_pfn = PFN_DOWN(last_addr);
124 ++ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
125 ++ __ioremap_check_ram) == 1) {
126 ++ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
127 ++ &phys_addr, &last_addr);
128 ++ return NULL;
129 + }
130 +
131 + /*
132 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
133 +index eab1ef25eecd..d9afe6d40550 100644
134 +--- a/arch/x86/Kconfig
135 ++++ b/arch/x86/Kconfig
136 +@@ -346,6 +346,17 @@ config X86_FEATURE_NAMES
137 +
138 + If in doubt, say Y.
139 +
140 ++config X86_FAST_FEATURE_TESTS
141 ++ bool "Fast CPU feature tests" if EMBEDDED
142 ++ default y
143 ++ ---help---
144 ++ Some fast-paths in the kernel depend on the capabilities of the CPU.
145 ++ Say Y here for the kernel to patch in the appropriate code at runtime
146 ++ based on the capabilities of the CPU. The infrastructure for patching
147 ++ code at runtime takes up some additional space; space-constrained
148 ++ embedded systems may wish to say N here to produce smaller, slightly
149 ++ slower code.
150 ++
151 + config X86_X2APIC
152 + bool "Support x2apic"
153 + depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
154 +diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
155 +index da00fe1f48f4..2aa212fb0faf 100644
156 +--- a/arch/x86/Kconfig.debug
157 ++++ b/arch/x86/Kconfig.debug
158 +@@ -367,16 +367,6 @@ config DEBUG_IMR_SELFTEST
159 +
160 + If unsure say N here.
161 +
162 +-config X86_DEBUG_STATIC_CPU_HAS
163 +- bool "Debug alternatives"
164 +- depends on DEBUG_KERNEL
165 +- ---help---
166 +- This option causes additional code to be generated which
167 +- fails if static_cpu_has() is used before alternatives have
168 +- run.
169 +-
170 +- If unsure, say N.
171 +-
172 + config X86_DEBUG_FPU
173 + bool "Debug the x86 FPU code"
174 + depends on DEBUG_KERNEL
175 +diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
176 +index ea97697e51e4..4cb404fd45ce 100644
177 +--- a/arch/x86/boot/cpuflags.h
178 ++++ b/arch/x86/boot/cpuflags.h
179 +@@ -1,7 +1,7 @@
180 + #ifndef BOOT_CPUFLAGS_H
181 + #define BOOT_CPUFLAGS_H
182 +
183 +-#include <asm/cpufeature.h>
184 ++#include <asm/cpufeatures.h>
185 + #include <asm/processor-flags.h>
186 +
187 + struct cpu_features {
188 +diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
189 +index 637097e66a62..f72498dc90d2 100644
190 +--- a/arch/x86/boot/mkcpustr.c
191 ++++ b/arch/x86/boot/mkcpustr.c
192 +@@ -17,7 +17,7 @@
193 +
194 + #include "../include/asm/required-features.h"
195 + #include "../include/asm/disabled-features.h"
196 +-#include "../include/asm/cpufeature.h"
197 ++#include "../include/asm/cpufeatures.h"
198 + #include "../kernel/cpu/capflags.c"
199 +
200 + int main(void)
201 +diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
202 +index 07d2c6c86a54..27226df3f7d8 100644
203 +--- a/arch/x86/crypto/crc32-pclmul_glue.c
204 ++++ b/arch/x86/crypto/crc32-pclmul_glue.c
205 +@@ -33,7 +33,7 @@
206 + #include <linux/crc32.h>
207 + #include <crypto/internal/hash.h>
208 +
209 +-#include <asm/cpufeature.h>
210 ++#include <asm/cpufeatures.h>
211 + #include <asm/cpu_device_id.h>
212 + #include <asm/fpu/api.h>
213 +
214 +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
215 +index 15f5c7675d42..715399b14ed7 100644
216 +--- a/arch/x86/crypto/crc32c-intel_glue.c
217 ++++ b/arch/x86/crypto/crc32c-intel_glue.c
218 +@@ -30,7 +30,7 @@
219 + #include <linux/kernel.h>
220 + #include <crypto/internal/hash.h>
221 +
222 +-#include <asm/cpufeature.h>
223 ++#include <asm/cpufeatures.h>
224 + #include <asm/cpu_device_id.h>
225 + #include <asm/fpu/internal.h>
226 +
227 +diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
228 +index a3fcfc97a311..cd4df9322501 100644
229 +--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
230 ++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
231 +@@ -30,7 +30,7 @@
232 + #include <linux/string.h>
233 + #include <linux/kernel.h>
234 + #include <asm/fpu/api.h>
235 +-#include <asm/cpufeature.h>
236 ++#include <asm/cpufeatures.h>
237 + #include <asm/cpu_device_id.h>
238 +
239 + asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
240 +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
241 +index b5eb1cca70a0..071582a3b5c0 100644
242 +--- a/arch/x86/entry/common.c
243 ++++ b/arch/x86/entry/common.c
244 +@@ -27,6 +27,7 @@
245 + #include <asm/traps.h>
246 + #include <asm/vdso.h>
247 + #include <asm/uaccess.h>
248 ++#include <asm/cpufeature.h>
249 +
250 + #define CREATE_TRACE_POINTS
251 + #include <trace/events/syscalls.h>
252 +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
253 +index d437f3871e53..49a8c9f7a379 100644
254 +--- a/arch/x86/entry/entry_32.S
255 ++++ b/arch/x86/entry/entry_32.S
256 +@@ -40,7 +40,7 @@
257 + #include <asm/processor-flags.h>
258 + #include <asm/ftrace.h>
259 + #include <asm/irq_vectors.h>
260 +-#include <asm/cpufeature.h>
261 ++#include <asm/cpufeatures.h>
262 + #include <asm/alternative-asm.h>
263 + #include <asm/asm.h>
264 + #include <asm/smap.h>
265 +diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
266 +index a7508d7e20b7..3f9d1a83891a 100644
267 +--- a/arch/x86/entry/vdso/vdso32-setup.c
268 ++++ b/arch/x86/entry/vdso/vdso32-setup.c
269 +@@ -11,7 +11,6 @@
270 + #include <linux/kernel.h>
271 + #include <linux/mm_types.h>
272 +
273 +-#include <asm/cpufeature.h>
274 + #include <asm/processor.h>
275 + #include <asm/vdso.h>
276 +
277 +diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
278 +index 3a1d9297074b..0109ac6cb79c 100644
279 +--- a/arch/x86/entry/vdso/vdso32/system_call.S
280 ++++ b/arch/x86/entry/vdso/vdso32/system_call.S
281 +@@ -3,7 +3,7 @@
282 + */
283 +
284 + #include <asm/dwarf2.h>
285 +-#include <asm/cpufeature.h>
286 ++#include <asm/cpufeatures.h>
287 + #include <asm/alternative-asm.h>
288 +
289 + /*
290 +diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
291 +index b8f69e264ac4..6b46648588d8 100644
292 +--- a/arch/x86/entry/vdso/vma.c
293 ++++ b/arch/x86/entry/vdso/vma.c
294 +@@ -20,6 +20,7 @@
295 + #include <asm/page.h>
296 + #include <asm/hpet.h>
297 + #include <asm/desc.h>
298 ++#include <asm/cpufeature.h>
299 +
300 + #if defined(CONFIG_X86_64)
301 + unsigned int __read_mostly vdso64_enabled = 1;
302 +@@ -254,7 +255,7 @@ static void vgetcpu_cpu_init(void *arg)
303 + #ifdef CONFIG_NUMA
304 + node = cpu_to_node(cpu);
305 + #endif
306 +- if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
307 ++ if (static_cpu_has(X86_FEATURE_RDTSCP))
308 + write_rdtscp_aux((node << 12) | cpu);
309 +
310 + /*
311 +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
312 +index 215ea9214215..002fcd901f07 100644
313 +--- a/arch/x86/include/asm/alternative.h
314 ++++ b/arch/x86/include/asm/alternative.h
315 +@@ -153,12 +153,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
316 + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
317 + ".popsection\n"
318 +
319 +-/*
320 +- * This must be included *after* the definition of ALTERNATIVE due to
321 +- * <asm/arch_hweight.h>
322 +- */
323 +-#include <asm/cpufeature.h>
324 +-
325 + /*
326 + * Alternative instructions for different CPU types or capabilities.
327 + *
328 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
329 +index 163769d82475..fd810a57ab1b 100644
330 +--- a/arch/x86/include/asm/apic.h
331 ++++ b/arch/x86/include/asm/apic.h
332 +@@ -6,7 +6,6 @@
333 +
334 + #include <asm/alternative.h>
335 + #include <asm/cpufeature.h>
336 +-#include <asm/processor.h>
337 + #include <asm/apicdef.h>
338 + #include <linux/atomic.h>
339 + #include <asm/fixmap.h>
340 +diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
341 +index 44f825c80ed5..e7cd63175de4 100644
342 +--- a/arch/x86/include/asm/arch_hweight.h
343 ++++ b/arch/x86/include/asm/arch_hweight.h
344 +@@ -1,6 +1,8 @@
345 + #ifndef _ASM_X86_HWEIGHT_H
346 + #define _ASM_X86_HWEIGHT_H
347 +
348 ++#include <asm/cpufeatures.h>
349 ++
350 + #ifdef CONFIG_64BIT
351 + /* popcnt %edi, %eax */
352 + #define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
353 +diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
354 +index ae5fb83e6d91..3e8674288198 100644
355 +--- a/arch/x86/include/asm/atomic.h
356 ++++ b/arch/x86/include/asm/atomic.h
357 +@@ -3,7 +3,6 @@
358 +
359 + #include <linux/compiler.h>
360 + #include <linux/types.h>
361 +-#include <asm/processor.h>
362 + #include <asm/alternative.h>
363 + #include <asm/cmpxchg.h>
364 + #include <asm/rmwcc.h>
365 +diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
366 +index a11c30b77fb5..a984111135b1 100644
367 +--- a/arch/x86/include/asm/atomic64_32.h
368 ++++ b/arch/x86/include/asm/atomic64_32.h
369 +@@ -3,7 +3,6 @@
370 +
371 + #include <linux/compiler.h>
372 + #include <linux/types.h>
373 +-#include <asm/processor.h>
374 + //#include <asm/cmpxchg.h>
375 +
376 + /* An 64bit atomic type */
377 +diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
378 +index ad19841eddfe..9733361fed6f 100644
379 +--- a/arch/x86/include/asm/cmpxchg.h
380 ++++ b/arch/x86/include/asm/cmpxchg.h
381 +@@ -2,6 +2,7 @@
382 + #define ASM_X86_CMPXCHG_H
383 +
384 + #include <linux/compiler.h>
385 ++#include <asm/cpufeatures.h>
386 + #include <asm/alternative.h> /* Provides LOCK_PREFIX */
387 +
388 + /*
389 +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
390 +index 232621c5e859..dd0089841a0f 100644
391 +--- a/arch/x86/include/asm/cpufeature.h
392 ++++ b/arch/x86/include/asm/cpufeature.h
393 +@@ -1,294 +1,35 @@
394 +-/*
395 +- * Defines x86 CPU feature bits
396 +- */
397 + #ifndef _ASM_X86_CPUFEATURE_H
398 + #define _ASM_X86_CPUFEATURE_H
399 +
400 +-#ifndef _ASM_X86_REQUIRED_FEATURES_H
401 +-#include <asm/required-features.h>
402 +-#endif
403 +-
404 +-#ifndef _ASM_X86_DISABLED_FEATURES_H
405 +-#include <asm/disabled-features.h>
406 +-#endif
407 +-
408 +-#define NCAPINTS 14 /* N 32-bit words worth of info */
409 +-#define NBUGINTS 1 /* N 32-bit bug flags */
410 +-
411 +-/*
412 +- * Note: If the comment begins with a quoted string, that string is used
413 +- * in /proc/cpuinfo instead of the macro name. If the string is "",
414 +- * this feature bit is not displayed in /proc/cpuinfo at all.
415 +- */
416 +-
417 +-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
418 +-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
419 +-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
420 +-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
421 +-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
422 +-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
423 +-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
424 +-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
425 +-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
426 +-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
427 +-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
428 +-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
429 +-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
430 +-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
431 +-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
432 +-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
433 +- /* (plus FCMOVcc, FCOMI with FPU) */
434 +-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
435 +-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
436 +-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
437 +-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
438 +-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
439 +-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
440 +-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
441 +-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
442 +-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
443 +-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
444 +-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
445 +-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
446 +-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
447 +-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
448 +-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
449 +-
450 +-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
451 +-/* Don't duplicate feature flags which are redundant with Intel! */
452 +-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
453 +-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
454 +-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
455 +-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
456 +-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
457 +-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
458 +-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
459 +-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
460 +-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
461 +-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
462 +-
463 +-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
464 +-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
465 +-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
466 +-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
467 +-
468 +-/* Other features, Linux-defined mapping, word 3 */
469 +-/* This range is used for feature bits which conflict or are synthesized */
470 +-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
471 +-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
472 +-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
473 +-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
474 +-/* cpu types for specific tunings: */
475 +-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
476 +-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
477 +-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
478 +-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
479 +-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
480 +-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
481 +-/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
482 +-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
483 +-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
484 +-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
485 +-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
486 +-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
487 +-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
488 +-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
489 +-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
490 +-/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
491 +-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
492 +-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
493 +-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
494 +-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
495 +-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
496 +-/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
497 +-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
498 +-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
499 +-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
500 +-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
501 +-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
502 +-
503 +-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
504 +-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
505 +-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
506 +-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
507 +-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
508 +-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
509 +-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
510 +-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
511 +-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
512 +-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
513 +-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
514 +-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
515 +-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
516 +-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
517 +-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
518 +-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
519 +-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
520 +-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
521 +-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
522 +-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
523 +-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
524 +-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
525 +-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
526 +-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
527 +-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
528 +-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
529 +-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
530 +-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
531 +-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
532 +-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
533 +-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
534 +-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
535 +-
536 +-/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
537 +-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
538 +-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
539 +-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
540 +-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
541 +-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
542 +-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
543 +-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
544 +-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
545 +-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
546 +-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
547 +-
548 +-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
549 +-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
550 +-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
551 +-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
552 +-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
553 +-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
554 +-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
555 +-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
556 +-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
557 +-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
558 +-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
559 +-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
560 +-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
561 +-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
562 +-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
563 +-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
564 +-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
565 +-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
566 +-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
567 +-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
568 +-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
569 +-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
570 +-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
571 +-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
572 +-#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
573 +-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
574 +-
575 +-/*
576 +- * Auxiliary flags: Linux defined - For features scattered in various
577 +- * CPUID levels like 0x6, 0xA etc, word 7
578 +- */
579 +-#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
580 +-#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
581 +-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
582 +-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
583 +-#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
584 +-#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
585 +-#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
586 +-#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
587 +-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
588 +-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
589 +-#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
590 +-#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
591 +-#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
592 +-#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
593 +-#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
594 +-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
595 +-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
596 +-
597 +-#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
598 +-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
599 +-/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
600 +-#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
601 +-
602 +-/* Virtualization flags: Linux defined, word 8 */
603 +-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
604 +-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
605 +-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
606 +-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
607 +-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
608 +-#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
609 +-#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
610 +-#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
611 +-#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
612 +-#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
613 +-#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
614 +-#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
615 +-#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
616 +-#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
617 +-#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
618 +-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
619 +-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
620 +-
621 +-
622 +-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
623 +-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
624 +-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
625 +-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
626 +-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
627 +-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
628 +-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
629 +-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
630 +-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
631 +-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
632 +-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
633 +-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
634 +-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
635 +-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
636 +-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
637 +-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
638 +-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
639 +-#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
640 +-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
641 +-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
642 +-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
643 +-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
644 +-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
645 +-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
646 +-
647 +-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
648 +-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
649 +-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
650 +-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
651 +-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
652 +-
653 +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
654 +-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
655 +-
656 +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
657 +-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
658 +-
659 +-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
660 +-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
661 +-
662 +-/*
663 +- * BUG word(s)
664 +- */
665 +-#define X86_BUG(x) (NCAPINTS*32 + (x))
666 +-
667 +-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
668 +-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
669 +-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
670 +-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
671 +-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
672 +-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
673 +-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
674 +-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
675 +-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
676 +-#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
677 +-#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
678 +-#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
679 ++#include <asm/processor.h>
680 +
681 + #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
682 +
683 + #include <asm/asm.h>
684 + #include <linux/bitops.h>
685 +
686 ++enum cpuid_leafs
687 ++{
688 ++ CPUID_1_EDX = 0,
689 ++ CPUID_8000_0001_EDX,
690 ++ CPUID_8086_0001_EDX,
691 ++ CPUID_LNX_1,
692 ++ CPUID_1_ECX,
693 ++ CPUID_C000_0001_EDX,
694 ++ CPUID_8000_0001_ECX,
695 ++ CPUID_LNX_2,
696 ++ CPUID_LNX_3,
697 ++ CPUID_7_0_EBX,
698 ++ CPUID_D_1_EAX,
699 ++ CPUID_F_0_EDX,
700 ++ CPUID_F_1_EDX,
701 ++ CPUID_8000_0008_EBX,
702 ++ CPUID_6_EAX,
703 ++ CPUID_8000_000A_EDX,
704 ++ CPUID_7_ECX,
705 ++ CPUID_8000_0007_EBX,
706 ++};
707 ++
708 + #ifdef CONFIG_X86_FEATURE_NAMES
709 + extern const char * const x86_cap_flags[NCAPINTS*32];
710 + extern const char * const x86_power_flags[32];
711 +@@ -308,29 +49,59 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
712 + #define test_cpu_cap(c, bit) \
713 + test_bit(bit, (unsigned long *)((c)->x86_capability))
714 +
715 +-#define REQUIRED_MASK_BIT_SET(bit) \
716 +- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
717 +- (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
718 +- (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
719 +- (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
720 +- (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
721 +- (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
722 +- (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
723 +- (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
724 +- (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
725 +- (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
726 +-
727 +-#define DISABLED_MASK_BIT_SET(bit) \
728 +- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
729 +- (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
730 +- (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
731 +- (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
732 +- (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
733 +- (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
734 +- (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
735 +- (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
736 +- (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
737 +- (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
738 ++/*
739 ++ * There are 32 bits/features in each mask word. The high bits
740 ++ * (selected with (bit>>5) give us the word number and the low 5
741 ++ * bits give us the bit/feature number inside the word.
742 ++ * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
743 ++ * see if it is set in the mask word.
744 ++ */
745 ++#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
746 ++ (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
747 ++
748 ++#define REQUIRED_MASK_BIT_SET(feature_bit) \
749 ++ ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \
750 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \
751 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \
752 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \
753 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \
754 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \
755 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \
756 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \
757 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \
758 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \
759 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \
760 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \
761 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \
762 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \
763 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \
764 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
765 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
766 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
767 ++ REQUIRED_MASK_CHECK || \
768 ++ BUILD_BUG_ON_ZERO(NCAPINTS != 18))
769 ++
770 ++#define DISABLED_MASK_BIT_SET(feature_bit) \
771 ++ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
772 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \
773 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \
774 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \
775 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \
776 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \
777 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \
778 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \
779 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \
780 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \
781 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \
782 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \
783 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \
784 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \
785 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \
786 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
787 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
788 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
789 ++ DISABLED_MASK_CHECK || \
790 ++ BUILD_BUG_ON_ZERO(NCAPINTS != 18))
791 +
792 + #define cpu_has(c, bit) \
793 + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
794 +@@ -349,8 +120,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
795 + * is not relevant.
796 + */
797 + #define cpu_feature_enabled(bit) \
798 +- (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
799 +- cpu_has(&boot_cpu_data, bit))
800 ++ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
801 +
802 + #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
803 +
804 +@@ -388,106 +158,19 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
805 + #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
806 + #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
807 + /*
808 +- * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
809 ++ * Do not add any more of those clumsy macros - use static_cpu_has() for
810 + * fast paths and boot_cpu_has() otherwise!
811 + */
812 +
813 +-#if __GNUC__ >= 4
814 +-extern void warn_pre_alternatives(void);
815 +-extern bool __static_cpu_has_safe(u16 bit);
816 +-
817 ++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
818 + /*
819 + * Static testing of CPU features. Used the same as boot_cpu_has().
820 +- * These are only valid after alternatives have run, but will statically
821 +- * patch the target code for additional performance.
822 ++ * These will statically patch the target code for additional
823 ++ * performance.
824 + */
825 +-static __always_inline __pure bool __static_cpu_has(u16 bit)
826 +-{
827 +-#ifdef CC_HAVE_ASM_GOTO
828 +-
829 +-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
830 +-
831 +- /*
832 +- * Catch too early usage of this before alternatives
833 +- * have run.
834 +- */
835 +- asm_volatile_goto("1: jmp %l[t_warn]\n"
836 +- "2:\n"
837 +- ".section .altinstructions,\"a\"\n"
838 +- " .long 1b - .\n"
839 +- " .long 0\n" /* no replacement */
840 +- " .word %P0\n" /* 1: do replace */
841 +- " .byte 2b - 1b\n" /* source len */
842 +- " .byte 0\n" /* replacement len */
843 +- " .byte 0\n" /* pad len */
844 +- ".previous\n"
845 +- /* skipping size check since replacement size = 0 */
846 +- : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
847 +-
848 +-#endif
849 +-
850 +- asm_volatile_goto("1: jmp %l[t_no]\n"
851 +- "2:\n"
852 +- ".section .altinstructions,\"a\"\n"
853 +- " .long 1b - .\n"
854 +- " .long 0\n" /* no replacement */
855 +- " .word %P0\n" /* feature bit */
856 +- " .byte 2b - 1b\n" /* source len */
857 +- " .byte 0\n" /* replacement len */
858 +- " .byte 0\n" /* pad len */
859 +- ".previous\n"
860 +- /* skipping size check since replacement size = 0 */
861 +- : : "i" (bit) : : t_no);
862 +- return true;
863 +- t_no:
864 +- return false;
865 +-
866 +-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
867 +- t_warn:
868 +- warn_pre_alternatives();
869 +- return false;
870 +-#endif
871 +-
872 +-#else /* CC_HAVE_ASM_GOTO */
873 +-
874 +- u8 flag;
875 +- /* Open-coded due to __stringify() in ALTERNATIVE() */
876 +- asm volatile("1: movb $0,%0\n"
877 +- "2:\n"
878 +- ".section .altinstructions,\"a\"\n"
879 +- " .long 1b - .\n"
880 +- " .long 3f - .\n"
881 +- " .word %P1\n" /* feature bit */
882 +- " .byte 2b - 1b\n" /* source len */
883 +- " .byte 4f - 3f\n" /* replacement len */
884 +- " .byte 0\n" /* pad len */
885 +- ".previous\n"
886 +- ".section .discard,\"aw\",@progbits\n"
887 +- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
888 +- ".previous\n"
889 +- ".section .altinstr_replacement,\"ax\"\n"
890 +- "3: movb $1,%0\n"
891 +- "4:\n"
892 +- ".previous\n"
893 +- : "=qm" (flag) : "i" (bit));
894 +- return flag;
895 +-
896 +-#endif /* CC_HAVE_ASM_GOTO */
897 +-}
898 +-
899 +-#define static_cpu_has(bit) \
900 +-( \
901 +- __builtin_constant_p(boot_cpu_has(bit)) ? \
902 +- boot_cpu_has(bit) : \
903 +- __builtin_constant_p(bit) ? \
904 +- __static_cpu_has(bit) : \
905 +- boot_cpu_has(bit) \
906 +-)
907 +-
908 +-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
909 ++static __always_inline __pure bool _static_cpu_has(u16 bit)
910 + {
911 +-#ifdef CC_HAVE_ASM_GOTO
912 +- asm_volatile_goto("1: jmp %l[t_dynamic]\n"
913 ++ asm_volatile_goto("1: jmp 6f\n"
914 + "2:\n"
915 + ".skip -(((5f-4f) - (2b-1b)) > 0) * "
916 + "((5f-4f) - (2b-1b)),0x90\n"
917 +@@ -512,66 +195,34 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
918 + " .byte 0\n" /* repl len */
919 + " .byte 0\n" /* pad len */
920 + ".previous\n"
921 +- : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
922 +- : : t_dynamic, t_no);
923 ++ ".section .altinstr_aux,\"ax\"\n"
924 ++ "6:\n"
925 ++ " testb %[bitnum],%[cap_byte]\n"
926 ++ " jnz %l[t_yes]\n"
927 ++ " jmp %l[t_no]\n"
928 ++ ".previous\n"
929 ++ : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
930 ++ [bitnum] "i" (1 << (bit & 7)),
931 ++ [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
932 ++ : : t_yes, t_no);
933 ++ t_yes:
934 + return true;
935 + t_no:
936 + return false;
937 +- t_dynamic:
938 +- return __static_cpu_has_safe(bit);
939 +-#else
940 +- u8 flag;
941 +- /* Open-coded due to __stringify() in ALTERNATIVE() */
942 +- asm volatile("1: movb $2,%0\n"
943 +- "2:\n"
944 +- ".section .altinstructions,\"a\"\n"
945 +- " .long 1b - .\n" /* src offset */
946 +- " .long 3f - .\n" /* repl offset */
947 +- " .word %P2\n" /* always replace */
948 +- " .byte 2b - 1b\n" /* source len */
949 +- " .byte 4f - 3f\n" /* replacement len */
950 +- " .byte 0\n" /* pad len */
951 +- ".previous\n"
952 +- ".section .discard,\"aw\",@progbits\n"
953 +- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
954 +- ".previous\n"
955 +- ".section .altinstr_replacement,\"ax\"\n"
956 +- "3: movb $0,%0\n"
957 +- "4:\n"
958 +- ".previous\n"
959 +- ".section .altinstructions,\"a\"\n"
960 +- " .long 1b - .\n" /* src offset */
961 +- " .long 5f - .\n" /* repl offset */
962 +- " .word %P1\n" /* feature bit */
963 +- " .byte 4b - 3b\n" /* src len */
964 +- " .byte 6f - 5f\n" /* repl len */
965 +- " .byte 0\n" /* pad len */
966 +- ".previous\n"
967 +- ".section .discard,\"aw\",@progbits\n"
968 +- " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
969 +- ".previous\n"
970 +- ".section .altinstr_replacement,\"ax\"\n"
971 +- "5: movb $1,%0\n"
972 +- "6:\n"
973 +- ".previous\n"
974 +- : "=qm" (flag)
975 +- : "i" (bit), "i" (X86_FEATURE_ALWAYS));
976 +- return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
977 +-#endif /* CC_HAVE_ASM_GOTO */
978 + }
979 +
980 +-#define static_cpu_has_safe(bit) \
981 ++#define static_cpu_has(bit) \
982 + ( \
983 + __builtin_constant_p(boot_cpu_has(bit)) ? \
984 + boot_cpu_has(bit) : \
985 +- _static_cpu_has_safe(bit) \
986 ++ _static_cpu_has(bit) \
987 + )
988 + #else
989 + /*
990 +- * gcc 3.x is too stupid to do the static test; fall back to dynamic.
991 ++ * Fall back to dynamic for gcc versions which don't support asm goto. Should be
992 ++ * a minority now anyway.
993 + */
994 + #define static_cpu_has(bit) boot_cpu_has(bit)
995 +-#define static_cpu_has_safe(bit) boot_cpu_has(bit)
996 + #endif
997 +
998 + #define cpu_has_bug(c, bit) cpu_has(c, (bit))
999 +@@ -579,7 +230,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
1000 + #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
1001 +
1002 + #define static_cpu_has_bug(bit) static_cpu_has((bit))
1003 +-#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
1004 + #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
1005 +
1006 + #define MAX_CPU_FEATURES (NCAPINTS * 32)
1007 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
1008 +new file mode 100644
1009 +index 000000000000..205ce70c1d6c
1010 +--- /dev/null
1011 ++++ b/arch/x86/include/asm/cpufeatures.h
1012 +@@ -0,0 +1,306 @@
1013 ++#ifndef _ASM_X86_CPUFEATURES_H
1014 ++#define _ASM_X86_CPUFEATURES_H
1015 ++
1016 ++#ifndef _ASM_X86_REQUIRED_FEATURES_H
1017 ++#include <asm/required-features.h>
1018 ++#endif
1019 ++
1020 ++#ifndef _ASM_X86_DISABLED_FEATURES_H
1021 ++#include <asm/disabled-features.h>
1022 ++#endif
1023 ++
1024 ++/*
1025 ++ * Defines x86 CPU feature bits
1026 ++ */
1027 ++#define NCAPINTS 18 /* N 32-bit words worth of info */
1028 ++#define NBUGINTS 1 /* N 32-bit bug flags */
1029 ++
1030 ++/*
1031 ++ * Note: If the comment begins with a quoted string, that string is used
1032 ++ * in /proc/cpuinfo instead of the macro name. If the string is "",
1033 ++ * this feature bit is not displayed in /proc/cpuinfo at all.
1034 ++ */
1035 ++
1036 ++/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
1037 ++#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
1038 ++#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
1039 ++#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
1040 ++#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
1041 ++#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
1042 ++#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
1043 ++#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
1044 ++#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
1045 ++#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
1046 ++#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
1047 ++#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
1048 ++#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
1049 ++#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
1050 ++#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
1051 ++#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
1052 ++ /* (plus FCMOVcc, FCOMI with FPU) */
1053 ++#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
1054 ++#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
1055 ++#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
1056 ++#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
1057 ++#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
1058 ++#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
1059 ++#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
1060 ++#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
1061 ++#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
1062 ++#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
1063 ++#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
1064 ++#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
1065 ++#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
1066 ++#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
1067 ++#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
1068 ++
1069 ++/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
1070 ++/* Don't duplicate feature flags which are redundant with Intel! */
1071 ++#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
1072 ++#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
1073 ++#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
1074 ++#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
1075 ++#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
1076 ++#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
1077 ++#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
1078 ++#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
1079 ++#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
1080 ++#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
1081 ++
1082 ++/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
1083 ++#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
1084 ++#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
1085 ++#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
1086 ++
1087 ++/* Other features, Linux-defined mapping, word 3 */
1088 ++/* This range is used for feature bits which conflict or are synthesized */
1089 ++#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
1090 ++#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
1091 ++#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
1092 ++#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
1093 ++/* cpu types for specific tunings: */
1094 ++#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
1095 ++#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
1096 ++#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
1097 ++#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
1098 ++#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
1099 ++#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
1100 ++/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
1101 ++#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
1102 ++#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
1103 ++#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
1104 ++#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
1105 ++#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
1106 ++#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
1107 ++#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
1108 ++#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
1109 ++/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
1110 ++#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
1111 ++#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
1112 ++#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
1113 ++#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
1114 ++#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
1115 ++/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
1116 ++#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
1117 ++#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
1118 ++#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
1119 ++/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
1120 ++#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
1121 ++
1122 ++/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
1123 ++#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
1124 ++#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
1125 ++#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
1126 ++#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
1127 ++#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
1128 ++#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
1129 ++#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
1130 ++#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
1131 ++#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
1132 ++#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
1133 ++#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
1134 ++#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
1135 ++#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
1136 ++#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
1137 ++#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
1138 ++#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
1139 ++#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
1140 ++#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
1141 ++#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
1142 ++#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
1143 ++#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
1144 ++#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
1145 ++#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
1146 ++#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
1147 ++#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
1148 ++#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
1149 ++#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
1150 ++#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
1151 ++#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
1152 ++#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
1153 ++#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
1154 ++
1155 ++/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
1156 ++#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
1157 ++#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
1158 ++#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
1159 ++#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
1160 ++#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
1161 ++#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
1162 ++#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
1163 ++#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
1164 ++#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
1165 ++#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
1166 ++
1167 ++/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
1168 ++#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
1169 ++#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
1170 ++#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
1171 ++#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
1172 ++#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
1173 ++#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
1174 ++#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
1175 ++#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
1176 ++#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
1177 ++#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
1178 ++#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
1179 ++#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
1180 ++#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
1181 ++#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
1182 ++#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
1183 ++#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
1184 ++#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
1185 ++#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
1186 ++#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
1187 ++#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
1188 ++#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
1189 ++#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
1190 ++#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
1191 ++#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
1192 ++#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
1193 ++
1194 ++/*
1195 ++ * Auxiliary flags: Linux defined - For features scattered in various
1196 ++ * CPUID levels like 0x6, 0xA etc, word 7.
1197 ++ *
1198 ++ * Reuse free bits when adding new feature flags!
1199 ++ */
1200 ++
1201 ++#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
1202 ++#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
1203 ++#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
1204 ++
1205 ++#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
1206 ++#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
1207 ++
1208 ++#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
1209 ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
1210 ++
1211 ++#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
1212 ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
1213 ++/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
1214 ++#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
1215 ++
1216 ++/* Virtualization flags: Linux defined, word 8 */
1217 ++#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
1218 ++#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
1219 ++#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
1220 ++#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
1221 ++#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
1222 ++
1223 ++#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
1224 ++#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
1225 ++
1226 ++
1227 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
1228 ++#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
1229 ++#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
1230 ++#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
1231 ++#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
1232 ++#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
1233 ++#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
1234 ++#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
1235 ++#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
1236 ++#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
1237 ++#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
1238 ++#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
1239 ++#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
1240 ++#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
1241 ++#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
1242 ++#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
1243 ++#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
1244 ++#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
1245 ++#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
1246 ++#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
1247 ++#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
1248 ++#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
1249 ++#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
1250 ++#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
1251 ++
1252 ++/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
1253 ++#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
1254 ++#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
1255 ++#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
1256 ++#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
1257 ++
1258 ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
1259 ++#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
1260 ++
1261 ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
1262 ++#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
1263 ++
1264 ++/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
1265 ++#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
1266 ++
1267 ++/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
1268 ++#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
1269 ++#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
1270 ++#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
1271 ++#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
1272 ++#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
1273 ++#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
1274 ++#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
1275 ++#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
1276 ++#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
1277 ++#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
1278 ++
1279 ++/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
1280 ++#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
1281 ++#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
1282 ++#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
1283 ++#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
1284 ++#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
1285 ++#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
1286 ++#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
1287 ++#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
1288 ++#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
1289 ++#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
1290 ++
1291 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
1292 ++#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
1293 ++#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
1294 ++
1295 ++/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
1296 ++#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
1297 ++#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
1298 ++#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
1299 ++
1300 ++/*
1301 ++ * BUG word(s)
1302 ++ */
1303 ++#define X86_BUG(x) (NCAPINTS*32 + (x))
1304 ++
1305 ++#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
1306 ++#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
1307 ++#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
1308 ++#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
1309 ++#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
1310 ++#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
1311 ++#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
1312 ++#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
1313 ++#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
1314 ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
1315 ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
1316 ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
1317 ++
1318 ++#endif /* _ASM_X86_CPUFEATURES_H */
1319 +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
1320 +index 8b17c2ad1048..21c5ac15657b 100644
1321 +--- a/arch/x86/include/asm/disabled-features.h
1322 ++++ b/arch/x86/include/asm/disabled-features.h
1323 +@@ -30,6 +30,14 @@
1324 + # define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
1325 + #endif /* CONFIG_X86_64 */
1326 +
1327 ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1328 ++# define DISABLE_PKU 0
1329 ++# define DISABLE_OSPKE 0
1330 ++#else
1331 ++# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
1332 ++# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
1333 ++#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
1334 ++
1335 + /*
1336 + * Make sure to add features to the correct mask
1337 + */
1338 +@@ -43,5 +51,14 @@
1339 + #define DISABLED_MASK7 0
1340 + #define DISABLED_MASK8 0
1341 + #define DISABLED_MASK9 (DISABLE_MPX)
1342 ++#define DISABLED_MASK10 0
1343 ++#define DISABLED_MASK11 0
1344 ++#define DISABLED_MASK12 0
1345 ++#define DISABLED_MASK13 0
1346 ++#define DISABLED_MASK14 0
1347 ++#define DISABLED_MASK15 0
1348 ++#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
1349 ++#define DISABLED_MASK17 0
1350 ++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
1351 +
1352 + #endif /* _ASM_X86_DISABLED_FEATURES_H */
1353 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
1354 +index 146d838e6ee7..ec2aedb6f92a 100644
1355 +--- a/arch/x86/include/asm/fpu/internal.h
1356 ++++ b/arch/x86/include/asm/fpu/internal.h
1357 +@@ -17,6 +17,7 @@
1358 + #include <asm/user.h>
1359 + #include <asm/fpu/api.h>
1360 + #include <asm/fpu/xstate.h>
1361 ++#include <asm/cpufeature.h>
1362 +
1363 + /*
1364 + * High level FPU state handling functions:
1365 +@@ -63,17 +64,17 @@ static __always_inline __pure bool use_eager_fpu(void)
1366 +
1367 + static __always_inline __pure bool use_xsaveopt(void)
1368 + {
1369 +- return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
1370 ++ return static_cpu_has(X86_FEATURE_XSAVEOPT);
1371 + }
1372 +
1373 + static __always_inline __pure bool use_xsave(void)
1374 + {
1375 +- return static_cpu_has_safe(X86_FEATURE_XSAVE);
1376 ++ return static_cpu_has(X86_FEATURE_XSAVE);
1377 + }
1378 +
1379 + static __always_inline __pure bool use_fxsr(void)
1380 + {
1381 +- return static_cpu_has_safe(X86_FEATURE_FXSR);
1382 ++ return static_cpu_has(X86_FEATURE_FXSR);
1383 + }
1384 +
1385 + /*
1386 +@@ -225,18 +226,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
1387 + #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
1388 + #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
1389 +
1390 +-/* xstate instruction fault handler: */
1391 +-#define xstate_fault(__err) \
1392 +- \
1393 +- ".section .fixup,\"ax\"\n" \
1394 +- \
1395 +- "3: movl $-2,%[_err]\n" \
1396 +- " jmp 2b\n" \
1397 +- \
1398 +- ".previous\n" \
1399 +- \
1400 +- _ASM_EXTABLE(1b, 3b) \
1401 +- : [_err] "=r" (__err)
1402 ++#define XSTATE_OP(op, st, lmask, hmask, err) \
1403 ++ asm volatile("1:" op "\n\t" \
1404 ++ "xor %[err], %[err]\n" \
1405 ++ "2:\n\t" \
1406 ++ ".pushsection .fixup,\"ax\"\n\t" \
1407 ++ "3: movl $-2,%[err]\n\t" \
1408 ++ "jmp 2b\n\t" \
1409 ++ ".popsection\n\t" \
1410 ++ _ASM_EXTABLE(1b, 3b) \
1411 ++ : [err] "=r" (err) \
1412 ++ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
1413 ++ : "memory")
1414 ++
1415 ++/*
1416 ++ * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
1417 ++ * format and supervisor states in addition to modified optimization in
1418 ++ * XSAVEOPT.
1419 ++ *
1420 ++ * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
1421 ++ * supports modified optimization which is not supported by XSAVE.
1422 ++ *
1423 ++ * We use XSAVE as a fallback.
1424 ++ *
1425 ++ * The 661 label is defined in the ALTERNATIVE* macros as the address of the
1426 ++ * original instruction which gets replaced. We need to use it here as the
1427 ++ * address of the instruction where we might get an exception at.
1428 ++ */
1429 ++#define XSTATE_XSAVE(st, lmask, hmask, err) \
1430 ++ asm volatile(ALTERNATIVE_2(XSAVE, \
1431 ++ XSAVEOPT, X86_FEATURE_XSAVEOPT, \
1432 ++ XSAVES, X86_FEATURE_XSAVES) \
1433 ++ "\n" \
1434 ++ "xor %[err], %[err]\n" \
1435 ++ "3:\n" \
1436 ++ ".pushsection .fixup,\"ax\"\n" \
1437 ++ "4: movl $-2, %[err]\n" \
1438 ++ "jmp 3b\n" \
1439 ++ ".popsection\n" \
1440 ++ _ASM_EXTABLE(661b, 4b) \
1441 ++ : [err] "=r" (err) \
1442 ++ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
1443 ++ : "memory")
1444 ++
1445 ++/*
1446 ++ * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
1447 ++ * XSAVE area format.
1448 ++ */
1449 ++#define XSTATE_XRESTORE(st, lmask, hmask, err) \
1450 ++ asm volatile(ALTERNATIVE(XRSTOR, \
1451 ++ XRSTORS, X86_FEATURE_XSAVES) \
1452 ++ "\n" \
1453 ++ "xor %[err], %[err]\n" \
1454 ++ "3:\n" \
1455 ++ ".pushsection .fixup,\"ax\"\n" \
1456 ++ "4: movl $-2, %[err]\n" \
1457 ++ "jmp 3b\n" \
1458 ++ ".popsection\n" \
1459 ++ _ASM_EXTABLE(661b, 4b) \
1460 ++ : [err] "=r" (err) \
1461 ++ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
1462 ++ : "memory")
1463 +
1464 + /*
1465 + * This function is called only during boot time when x86 caps are not set
1466 +@@ -247,22 +297,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
1467 + u64 mask = -1;
1468 + u32 lmask = mask;
1469 + u32 hmask = mask >> 32;
1470 +- int err = 0;
1471 ++ int err;
1472 +
1473 + WARN_ON(system_state != SYSTEM_BOOTING);
1474 +
1475 +- if (boot_cpu_has(X86_FEATURE_XSAVES))
1476 +- asm volatile("1:"XSAVES"\n\t"
1477 +- "2:\n\t"
1478 +- xstate_fault(err)
1479 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1480 +- : "memory");
1481 ++ if (static_cpu_has(X86_FEATURE_XSAVES))
1482 ++ XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
1483 + else
1484 +- asm volatile("1:"XSAVE"\n\t"
1485 +- "2:\n\t"
1486 +- xstate_fault(err)
1487 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1488 +- : "memory");
1489 ++ XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
1490 +
1491 + /* We should never fault when copying to a kernel buffer: */
1492 + WARN_ON_FPU(err);
1493 +@@ -277,22 +319,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
1494 + u64 mask = -1;
1495 + u32 lmask = mask;
1496 + u32 hmask = mask >> 32;
1497 +- int err = 0;
1498 ++ int err;
1499 +
1500 + WARN_ON(system_state != SYSTEM_BOOTING);
1501 +
1502 +- if (boot_cpu_has(X86_FEATURE_XSAVES))
1503 +- asm volatile("1:"XRSTORS"\n\t"
1504 +- "2:\n\t"
1505 +- xstate_fault(err)
1506 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1507 +- : "memory");
1508 ++ if (static_cpu_has(X86_FEATURE_XSAVES))
1509 ++ XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
1510 + else
1511 +- asm volatile("1:"XRSTOR"\n\t"
1512 +- "2:\n\t"
1513 +- xstate_fault(err)
1514 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1515 +- : "memory");
1516 ++ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
1517 +
1518 + /* We should never fault when copying from a kernel buffer: */
1519 + WARN_ON_FPU(err);
1520 +@@ -306,33 +340,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
1521 + u64 mask = -1;
1522 + u32 lmask = mask;
1523 + u32 hmask = mask >> 32;
1524 +- int err = 0;
1525 ++ int err;
1526 +
1527 + WARN_ON(!alternatives_patched);
1528 +
1529 +- /*
1530 +- * If xsaves is enabled, xsaves replaces xsaveopt because
1531 +- * it supports compact format and supervisor states in addition to
1532 +- * modified optimization in xsaveopt.
1533 +- *
1534 +- * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
1535 +- * because xsaveopt supports modified optimization which is not
1536 +- * supported by xsave.
1537 +- *
1538 +- * If none of xsaves and xsaveopt is enabled, use xsave.
1539 +- */
1540 +- alternative_input_2(
1541 +- "1:"XSAVE,
1542 +- XSAVEOPT,
1543 +- X86_FEATURE_XSAVEOPT,
1544 +- XSAVES,
1545 +- X86_FEATURE_XSAVES,
1546 +- [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
1547 +- "memory");
1548 +- asm volatile("2:\n\t"
1549 +- xstate_fault(err)
1550 +- : "0" (err)
1551 +- : "memory");
1552 ++ XSTATE_XSAVE(xstate, lmask, hmask, err);
1553 +
1554 + /* We should never fault when copying to a kernel buffer: */
1555 + WARN_ON_FPU(err);
1556 +@@ -345,23 +357,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
1557 + {
1558 + u32 lmask = mask;
1559 + u32 hmask = mask >> 32;
1560 +- int err = 0;
1561 ++ int err;
1562 +
1563 +- /*
1564 +- * Use xrstors to restore context if it is enabled. xrstors supports
1565 +- * compacted format of xsave area which is not supported by xrstor.
1566 +- */
1567 +- alternative_input(
1568 +- "1: " XRSTOR,
1569 +- XRSTORS,
1570 +- X86_FEATURE_XSAVES,
1571 +- "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
1572 +- : "memory");
1573 +-
1574 +- asm volatile("2:\n"
1575 +- xstate_fault(err)
1576 +- : "0" (err)
1577 +- : "memory");
1578 ++ XSTATE_XRESTORE(xstate, lmask, hmask, err);
1579 +
1580 + /* We should never fault when copying from a kernel buffer: */
1581 + WARN_ON_FPU(err);
1582 +@@ -389,12 +387,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
1583 + if (unlikely(err))
1584 + return -EFAULT;
1585 +
1586 +- __asm__ __volatile__(ASM_STAC "\n"
1587 +- "1:"XSAVE"\n"
1588 +- "2: " ASM_CLAC "\n"
1589 +- xstate_fault(err)
1590 +- : "D" (buf), "a" (-1), "d" (-1), "0" (err)
1591 +- : "memory");
1592 ++ stac();
1593 ++ XSTATE_OP(XSAVE, buf, -1, -1, err);
1594 ++ clac();
1595 ++
1596 + return err;
1597 + }
1598 +
1599 +@@ -406,14 +402,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
1600 + struct xregs_state *xstate = ((__force struct xregs_state *)buf);
1601 + u32 lmask = mask;
1602 + u32 hmask = mask >> 32;
1603 +- int err = 0;
1604 +-
1605 +- __asm__ __volatile__(ASM_STAC "\n"
1606 +- "1:"XRSTOR"\n"
1607 +- "2: " ASM_CLAC "\n"
1608 +- xstate_fault(err)
1609 +- : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
1610 +- : "memory"); /* memory required? */
1611 ++ int err;
1612 ++
1613 ++ stac();
1614 ++ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
1615 ++ clac();
1616 ++
1617 + return err;
1618 + }
1619 +
1620 +@@ -467,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
1621 + * pending. Clear the x87 state here by setting it to fixed values.
1622 + * "m" is a random variable that should be in L1.
1623 + */
1624 +- if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
1625 ++ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
1626 + asm volatile(
1627 + "fnclex\n\t"
1628 + "emms\n\t"
1629 +diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
1630 +index 78162f8e248b..d0afb05c84fc 100644
1631 +--- a/arch/x86/include/asm/irq_work.h
1632 ++++ b/arch/x86/include/asm/irq_work.h
1633 +@@ -1,7 +1,7 @@
1634 + #ifndef _ASM_IRQ_WORK_H
1635 + #define _ASM_IRQ_WORK_H
1636 +
1637 +-#include <asm/processor.h>
1638 ++#include <asm/cpufeature.h>
1639 +
1640 + static inline bool arch_irq_work_has_interrupt(void)
1641 + {
1642 +diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
1643 +index c70689b5e5aa..0deeb2d26df7 100644
1644 +--- a/arch/x86/include/asm/mwait.h
1645 ++++ b/arch/x86/include/asm/mwait.h
1646 +@@ -3,6 +3,8 @@
1647 +
1648 + #include <linux/sched.h>
1649 +
1650 ++#include <asm/cpufeature.h>
1651 ++
1652 + #define MWAIT_SUBSTATE_MASK 0xf
1653 + #define MWAIT_CSTATE_MASK 0xf
1654 + #define MWAIT_SUBSTATE_SIZE 4
1655 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
1656 +index 249f1c769f21..8b910416243c 100644
1657 +--- a/arch/x86/include/asm/nospec-branch.h
1658 ++++ b/arch/x86/include/asm/nospec-branch.h
1659 +@@ -5,7 +5,7 @@
1660 +
1661 + #include <asm/alternative.h>
1662 + #include <asm/alternative-asm.h>
1663 +-#include <asm/cpufeature.h>
1664 ++#include <asm/cpufeatures.h>
1665 +
1666 + /*
1667 + * Fill the CPU return stack buffer.
1668 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1669 +index 9e77cea2a8ef..8e415cf65457 100644
1670 +--- a/arch/x86/include/asm/processor.h
1671 ++++ b/arch/x86/include/asm/processor.h
1672 +@@ -13,7 +13,7 @@ struct vm86;
1673 + #include <asm/types.h>
1674 + #include <uapi/asm/sigcontext.h>
1675 + #include <asm/current.h>
1676 +-#include <asm/cpufeature.h>
1677 ++#include <asm/cpufeatures.h>
1678 + #include <asm/page.h>
1679 + #include <asm/pgtable_types.h>
1680 + #include <asm/percpu.h>
1681 +@@ -24,7 +24,6 @@ struct vm86;
1682 + #include <asm/fpu/types.h>
1683 +
1684 + #include <linux/personality.h>
1685 +-#include <linux/cpumask.h>
1686 + #include <linux/cache.h>
1687 + #include <linux/threads.h>
1688 + #include <linux/math64.h>
1689 +diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
1690 +index 5c6e4fb370f5..fac9a5c0abe9 100644
1691 +--- a/arch/x86/include/asm/required-features.h
1692 ++++ b/arch/x86/include/asm/required-features.h
1693 +@@ -92,5 +92,14 @@
1694 + #define REQUIRED_MASK7 0
1695 + #define REQUIRED_MASK8 0
1696 + #define REQUIRED_MASK9 0
1697 ++#define REQUIRED_MASK10 0
1698 ++#define REQUIRED_MASK11 0
1699 ++#define REQUIRED_MASK12 0
1700 ++#define REQUIRED_MASK13 0
1701 ++#define REQUIRED_MASK14 0
1702 ++#define REQUIRED_MASK15 0
1703 ++#define REQUIRED_MASK16 0
1704 ++#define REQUIRED_MASK17 0
1705 ++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
1706 +
1707 + #endif /* _ASM_X86_REQUIRED_FEATURES_H */
1708 +diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
1709 +index ba665ebd17bb..db333300bd4b 100644
1710 +--- a/arch/x86/include/asm/smap.h
1711 ++++ b/arch/x86/include/asm/smap.h
1712 +@@ -15,7 +15,7 @@
1713 +
1714 + #include <linux/stringify.h>
1715 + #include <asm/nops.h>
1716 +-#include <asm/cpufeature.h>
1717 ++#include <asm/cpufeatures.h>
1718 +
1719 + /* "Raw" instruction opcodes */
1720 + #define __ASM_CLAC .byte 0x0f,0x01,0xca
1721 +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
1722 +index a438c5598a90..04d6eef5f8a5 100644
1723 +--- a/arch/x86/include/asm/smp.h
1724 ++++ b/arch/x86/include/asm/smp.h
1725 +@@ -16,7 +16,6 @@
1726 + #endif
1727 + #include <asm/thread_info.h>
1728 + #include <asm/cpumask.h>
1729 +-#include <asm/cpufeature.h>
1730 +
1731 + extern int smp_num_siblings;
1732 + extern unsigned int num_processors;
1733 +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
1734 +index 9b028204685d..18c9aaa8c043 100644
1735 +--- a/arch/x86/include/asm/thread_info.h
1736 ++++ b/arch/x86/include/asm/thread_info.h
1737 +@@ -49,7 +49,7 @@
1738 + */
1739 + #ifndef __ASSEMBLY__
1740 + struct task_struct;
1741 +-#include <asm/processor.h>
1742 ++#include <asm/cpufeature.h>
1743 + #include <linux/atomic.h>
1744 +
1745 + struct thread_info {
1746 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
1747 +index a691b66cc40a..e2a89d2577fb 100644
1748 +--- a/arch/x86/include/asm/tlbflush.h
1749 ++++ b/arch/x86/include/asm/tlbflush.h
1750 +@@ -5,6 +5,7 @@
1751 + #include <linux/sched.h>
1752 +
1753 + #include <asm/processor.h>
1754 ++#include <asm/cpufeature.h>
1755 + #include <asm/special_insns.h>
1756 + #include <asm/smp.h>
1757 +
1758 +diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
1759 +index f2f9b39b274a..d83a55b95a48 100644
1760 +--- a/arch/x86/include/asm/uaccess_64.h
1761 ++++ b/arch/x86/include/asm/uaccess_64.h
1762 +@@ -8,7 +8,7 @@
1763 + #include <linux/errno.h>
1764 + #include <linux/lockdep.h>
1765 + #include <asm/alternative.h>
1766 +-#include <asm/cpufeature.h>
1767 ++#include <asm/cpufeatures.h>
1768 + #include <asm/page.h>
1769 +
1770 + /*
1771 +diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
1772 +index 2bd2292a316d..bac0805ea1d9 100644
1773 +--- a/arch/x86/kernel/apic/apic_numachip.c
1774 ++++ b/arch/x86/kernel/apic/apic_numachip.c
1775 +@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
1776 + unsigned long value;
1777 + unsigned int id = (x >> 24) & 0xff;
1778 +
1779 +- if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
1780 ++ if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
1781 + rdmsrl(MSR_FAM10H_NODE_ID, value);
1782 + id |= (value << 2) & 0xff00;
1783 + }
1784 +@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
1785 + this_cpu_write(cpu_llc_id, node);
1786 +
1787 + /* Account for nodes per socket in multi-core-module processors */
1788 +- if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
1789 ++ if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
1790 + rdmsrl(MSR_FAM10H_NODE_ID, val);
1791 + nodes = ((val >> 3) & 7) + 1;
1792 + }
1793 +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
1794 +index 8f184615053b..924b65794abd 100644
1795 +--- a/arch/x86/kernel/cpu/Makefile
1796 ++++ b/arch/x86/kernel/cpu/Makefile
1797 +@@ -62,7 +62,7 @@ ifdef CONFIG_X86_FEATURE_NAMES
1798 + quiet_cmd_mkcapflags = MKCAP $@
1799 + cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
1800 +
1801 +-cpufeature = $(src)/../../include/asm/cpufeature.h
1802 ++cpufeature = $(src)/../../include/asm/cpufeatures.h
1803 +
1804 + targets += capflags.c
1805 + $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
1806 +diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
1807 +index d8fba5c15fbd..6608c03c2126 100644
1808 +--- a/arch/x86/kernel/cpu/centaur.c
1809 ++++ b/arch/x86/kernel/cpu/centaur.c
1810 +@@ -1,7 +1,7 @@
1811 + #include <linux/bitops.h>
1812 + #include <linux/kernel.h>
1813 +
1814 +-#include <asm/processor.h>
1815 ++#include <asm/cpufeature.h>
1816 + #include <asm/e820.h>
1817 + #include <asm/mtrr.h>
1818 + #include <asm/msr.h>
1819 +@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c)
1820 + /* store Centaur Extended Feature Flags as
1821 + * word 5 of the CPU capability bit array
1822 + */
1823 +- c->x86_capability[5] = cpuid_edx(0xC0000001);
1824 ++ c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
1825 + }
1826 + #ifdef CONFIG_X86_32
1827 + /* Cyrix III family needs CX8 & PGE explicitly enabled. */
1828 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1829 +index 0498ad3702f5..814276d0eed1 100644
1830 +--- a/arch/x86/kernel/cpu/common.c
1831 ++++ b/arch/x86/kernel/cpu/common.c
1832 +@@ -676,50 +676,48 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
1833 +
1834 + void get_cpu_cap(struct cpuinfo_x86 *c)
1835 + {
1836 +- u32 tfms, xlvl;
1837 +- u32 ebx;
1838 ++ u32 eax, ebx, ecx, edx;
1839 +
1840 + /* Intel-defined flags: level 0x00000001 */
1841 + if (c->cpuid_level >= 0x00000001) {
1842 +- u32 capability, excap;
1843 ++ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1844 +
1845 +- cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
1846 +- c->x86_capability[0] = capability;
1847 +- c->x86_capability[4] = excap;
1848 ++ c->x86_capability[CPUID_1_ECX] = ecx;
1849 ++ c->x86_capability[CPUID_1_EDX] = edx;
1850 + }
1851 +
1852 + /* Additional Intel-defined flags: level 0x00000007 */
1853 + if (c->cpuid_level >= 0x00000007) {
1854 +- u32 eax, ebx, ecx, edx;
1855 +-
1856 + cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1857 +
1858 +- c->x86_capability[9] = ebx;
1859 ++ c->x86_capability[CPUID_7_0_EBX] = ebx;
1860 ++
1861 ++ c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1862 ++ c->x86_capability[CPUID_7_ECX] = ecx;
1863 + }
1864 +
1865 + /* Extended state features: level 0x0000000d */
1866 + if (c->cpuid_level >= 0x0000000d) {
1867 +- u32 eax, ebx, ecx, edx;
1868 +-
1869 + cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1870 +
1871 +- c->x86_capability[10] = eax;
1872 ++ c->x86_capability[CPUID_D_1_EAX] = eax;
1873 + }
1874 +
1875 + /* Additional Intel-defined flags: level 0x0000000F */
1876 + if (c->cpuid_level >= 0x0000000F) {
1877 +- u32 eax, ebx, ecx, edx;
1878 +
1879 + /* QoS sub-leaf, EAX=0Fh, ECX=0 */
1880 + cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
1881 +- c->x86_capability[11] = edx;
1882 ++ c->x86_capability[CPUID_F_0_EDX] = edx;
1883 ++
1884 + if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
1885 + /* will be overridden if occupancy monitoring exists */
1886 + c->x86_cache_max_rmid = ebx;
1887 +
1888 + /* QoS sub-leaf, EAX=0Fh, ECX=1 */
1889 + cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
1890 +- c->x86_capability[12] = edx;
1891 ++ c->x86_capability[CPUID_F_1_EDX] = edx;
1892 ++
1893 + if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
1894 + c->x86_cache_max_rmid = ecx;
1895 + c->x86_cache_occ_scale = ebx;
1896 +@@ -731,30 +729,39 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
1897 + }
1898 +
1899 + /* AMD-defined flags: level 0x80000001 */
1900 +- xlvl = cpuid_eax(0x80000000);
1901 +- c->extended_cpuid_level = xlvl;
1902 ++ eax = cpuid_eax(0x80000000);
1903 ++ c->extended_cpuid_level = eax;
1904 ++
1905 ++ if ((eax & 0xffff0000) == 0x80000000) {
1906 ++ if (eax >= 0x80000001) {
1907 ++ cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1908 +
1909 +- if ((xlvl & 0xffff0000) == 0x80000000) {
1910 +- if (xlvl >= 0x80000001) {
1911 +- c->x86_capability[1] = cpuid_edx(0x80000001);
1912 +- c->x86_capability[6] = cpuid_ecx(0x80000001);
1913 ++ c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1914 ++ c->x86_capability[CPUID_8000_0001_EDX] = edx;
1915 + }
1916 + }
1917 +
1918 ++ if (c->extended_cpuid_level >= 0x80000007) {
1919 ++ cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1920 ++
1921 ++ c->x86_capability[CPUID_8000_0007_EBX] = ebx;
1922 ++ c->x86_power = edx;
1923 ++ }
1924 ++
1925 + if (c->extended_cpuid_level >= 0x80000008) {
1926 +- u32 eax = cpuid_eax(0x80000008);
1927 ++ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1928 +
1929 + c->x86_virt_bits = (eax >> 8) & 0xff;
1930 + c->x86_phys_bits = eax & 0xff;
1931 +- c->x86_capability[13] = cpuid_ebx(0x80000008);
1932 ++ c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1933 + }
1934 + #ifdef CONFIG_X86_32
1935 + else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
1936 + c->x86_phys_bits = 36;
1937 + #endif
1938 +
1939 +- if (c->extended_cpuid_level >= 0x80000007)
1940 +- c->x86_power = cpuid_edx(0x80000007);
1941 ++ if (c->extended_cpuid_level >= 0x8000000a)
1942 ++ c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1943 +
1944 + init_scattered_cpuid_features(c);
1945 + }
1946 +@@ -1574,20 +1581,6 @@ void cpu_init(void)
1947 + }
1948 + #endif
1949 +
1950 +-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
1951 +-void warn_pre_alternatives(void)
1952 +-{
1953 +- WARN(1, "You're using static_cpu_has before alternatives have run!\n");
1954 +-}
1955 +-EXPORT_SYMBOL_GPL(warn_pre_alternatives);
1956 +-#endif
1957 +-
1958 +-inline bool __static_cpu_has_safe(u16 bit)
1959 +-{
1960 +- return boot_cpu_has(bit);
1961 +-}
1962 +-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
1963 +-
1964 + static void bsp_resume(void)
1965 + {
1966 + if (this_cpu->c_bsp_resume)
1967 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
1968 +index aaf152e79637..15e47c1cd412 100644
1969 +--- a/arch/x86/kernel/cpu/cyrix.c
1970 ++++ b/arch/x86/kernel/cpu/cyrix.c
1971 +@@ -8,6 +8,7 @@
1972 + #include <linux/timer.h>
1973 + #include <asm/pci-direct.h>
1974 + #include <asm/tsc.h>
1975 ++#include <asm/cpufeature.h>
1976 +
1977 + #include "cpu.h"
1978 +
1979 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1980 +index 565648bc1a0a..9299e3bdfad6 100644
1981 +--- a/arch/x86/kernel/cpu/intel.c
1982 ++++ b/arch/x86/kernel/cpu/intel.c
1983 +@@ -8,7 +8,7 @@
1984 + #include <linux/module.h>
1985 + #include <linux/uaccess.h>
1986 +
1987 +-#include <asm/processor.h>
1988 ++#include <asm/cpufeature.h>
1989 + #include <asm/pgtable.h>
1990 + #include <asm/msr.h>
1991 + #include <asm/bugs.h>
1992 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
1993 +index 3fa72317ad78..3557b3ceab14 100644
1994 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
1995 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
1996 +@@ -14,7 +14,7 @@
1997 + #include <linux/sysfs.h>
1998 + #include <linux/pci.h>
1999 +
2000 +-#include <asm/processor.h>
2001 ++#include <asm/cpufeature.h>
2002 + #include <asm/amd_nb.h>
2003 + #include <asm/smp.h>
2004 +
2005 +diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
2006 +index afa9f0d487ea..fbb5e90557a5 100644
2007 +--- a/arch/x86/kernel/cpu/match.c
2008 ++++ b/arch/x86/kernel/cpu/match.c
2009 +@@ -1,5 +1,5 @@
2010 + #include <asm/cpu_device_id.h>
2011 +-#include <asm/processor.h>
2012 ++#include <asm/cpufeature.h>
2013 + #include <linux/cpu.h>
2014 + #include <linux/module.h>
2015 + #include <linux/slab.h>
2016 +diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
2017 +index 3f20710a5b23..6988c74409a8 100644
2018 +--- a/arch/x86/kernel/cpu/mkcapflags.sh
2019 ++++ b/arch/x86/kernel/cpu/mkcapflags.sh
2020 +@@ -1,6 +1,6 @@
2021 + #!/bin/sh
2022 + #
2023 +-# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h
2024 ++# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
2025 + #
2026 +
2027 + IN=$1
2028 +@@ -49,8 +49,8 @@ dump_array()
2029 + trap 'rm "$OUT"' EXIT
2030 +
2031 + (
2032 +- echo "#ifndef _ASM_X86_CPUFEATURE_H"
2033 +- echo "#include <asm/cpufeature.h>"
2034 ++ echo "#ifndef _ASM_X86_CPUFEATURES_H"
2035 ++ echo "#include <asm/cpufeatures.h>"
2036 + echo "#endif"
2037 + echo ""
2038 +
2039 +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
2040 +index f924f41af89a..49bd700d9b7f 100644
2041 +--- a/arch/x86/kernel/cpu/mtrr/main.c
2042 ++++ b/arch/x86/kernel/cpu/mtrr/main.c
2043 +@@ -47,7 +47,7 @@
2044 + #include <linux/smp.h>
2045 + #include <linux/syscore_ops.h>
2046 +
2047 +-#include <asm/processor.h>
2048 ++#include <asm/cpufeature.h>
2049 + #include <asm/e820.h>
2050 + #include <asm/mtrr.h>
2051 + #include <asm/msr.h>
2052 +diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
2053 +index 608fb26c7254..8cb57df9398d 100644
2054 +--- a/arch/x86/kernel/cpu/scattered.c
2055 ++++ b/arch/x86/kernel/cpu/scattered.c
2056 +@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
2057 + const struct cpuid_bit *cb;
2058 +
2059 + static const struct cpuid_bit cpuid_bits[] = {
2060 +- { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
2061 +- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
2062 +- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
2063 +- { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
2064 +- { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
2065 +- { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
2066 +- { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
2067 +- { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
2068 +- { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
2069 +- { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
2070 + { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
2071 + { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
2072 + { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
2073 + { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
2074 + { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
2075 + { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
2076 +- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
2077 +- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
2078 +- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
2079 +- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
2080 +- { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
2081 +- { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
2082 +- { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
2083 +- { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
2084 +- { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
2085 +- { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
2086 + { 0, 0, 0, 0, 0 }
2087 + };
2088 +
2089 +diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
2090 +index 3fa0e5ad86b4..a19a663282b5 100644
2091 +--- a/arch/x86/kernel/cpu/transmeta.c
2092 ++++ b/arch/x86/kernel/cpu/transmeta.c
2093 +@@ -1,6 +1,6 @@
2094 + #include <linux/kernel.h>
2095 + #include <linux/mm.h>
2096 +-#include <asm/processor.h>
2097 ++#include <asm/cpufeature.h>
2098 + #include <asm/msr.h>
2099 + #include "cpu.h"
2100 +
2101 +@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
2102 + xlvl = cpuid_eax(0x80860000);
2103 + if ((xlvl & 0xffff0000) == 0x80860000) {
2104 + if (xlvl >= 0x80860001)
2105 +- c->x86_capability[2] = cpuid_edx(0x80860001);
2106 ++ c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
2107 + }
2108 + }
2109 +
2110 +@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
2111 + /* Unhide possibly hidden capability flags */
2112 + rdmsr(0x80860004, cap_mask, uk);
2113 + wrmsr(0x80860004, ~0, uk);
2114 +- c->x86_capability[0] = cpuid_edx(0x00000001);
2115 ++ c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001);
2116 + wrmsr(0x80860004, cap_mask, uk);
2117 +
2118 + /* All Transmeta CPUs have a constant TSC */
2119 +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
2120 +index 52a2526c3fbe..19bc19d5e174 100644
2121 +--- a/arch/x86/kernel/e820.c
2122 ++++ b/arch/x86/kernel/e820.c
2123 +@@ -24,6 +24,7 @@
2124 + #include <asm/e820.h>
2125 + #include <asm/proto.h>
2126 + #include <asm/setup.h>
2127 ++#include <asm/cpufeature.h>
2128 +
2129 + /*
2130 + * The e820 map is the map that gets modified e.g. with command line parameters
2131 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
2132 +index 70284d38fdc2..1c0b49fd6365 100644
2133 +--- a/arch/x86/kernel/head_32.S
2134 ++++ b/arch/x86/kernel/head_32.S
2135 +@@ -19,7 +19,7 @@
2136 + #include <asm/setup.h>
2137 + #include <asm/processor-flags.h>
2138 + #include <asm/msr-index.h>
2139 +-#include <asm/cpufeature.h>
2140 ++#include <asm/cpufeatures.h>
2141 + #include <asm/percpu.h>
2142 + #include <asm/nops.h>
2143 + #include <asm/bootparam.h>
2144 +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
2145 +index 4034e905741a..734ba1d0f686 100644
2146 +--- a/arch/x86/kernel/head_64.S
2147 ++++ b/arch/x86/kernel/head_64.S
2148 +@@ -76,9 +76,7 @@ startup_64:
2149 + subq $_text - __START_KERNEL_map, %rbp
2150 +
2151 + /* Is the address not 2M aligned? */
2152 +- movq %rbp, %rax
2153 +- andl $~PMD_PAGE_MASK, %eax
2154 +- testl %eax, %eax
2155 ++ testl $~PMD_PAGE_MASK, %ebp
2156 + jnz bad_address
2157 +
2158 + /*
2159 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
2160 +index f48eb8eeefe2..3fdc1e53aaac 100644
2161 +--- a/arch/x86/kernel/hpet.c
2162 ++++ b/arch/x86/kernel/hpet.c
2163 +@@ -12,6 +12,7 @@
2164 + #include <linux/pm.h>
2165 + #include <linux/io.h>
2166 +
2167 ++#include <asm/cpufeature.h>
2168 + #include <asm/irqdomain.h>
2169 + #include <asm/fixmap.h>
2170 + #include <asm/hpet.h>
2171 +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
2172 +index 113e70784854..f95ac5d435aa 100644
2173 +--- a/arch/x86/kernel/msr.c
2174 ++++ b/arch/x86/kernel/msr.c
2175 +@@ -40,7 +40,7 @@
2176 + #include <linux/uaccess.h>
2177 + #include <linux/gfp.h>
2178 +
2179 +-#include <asm/processor.h>
2180 ++#include <asm/cpufeature.h>
2181 + #include <asm/msr.h>
2182 +
2183 + static struct class *msr_class;
2184 +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
2185 +index c6aace2bbe08..b8105289c60b 100644
2186 +--- a/arch/x86/kernel/uprobes.c
2187 ++++ b/arch/x86/kernel/uprobes.c
2188 +@@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
2189 + insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
2190 + /* has the side-effect of processing the entire instruction */
2191 + insn_get_length(insn);
2192 +- if (WARN_ON_ONCE(!insn_complete(insn)))
2193 ++ if (!insn_complete(insn))
2194 + return -ENOEXEC;
2195 +
2196 + if (is_prefix_bad(insn))
2197 +diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
2198 +index 4cf401f581e7..b7c9db5deebe 100644
2199 +--- a/arch/x86/kernel/verify_cpu.S
2200 ++++ b/arch/x86/kernel/verify_cpu.S
2201 +@@ -30,7 +30,7 @@
2202 + * appropriately. Either display a message or halt.
2203 + */
2204 +
2205 +-#include <asm/cpufeature.h>
2206 ++#include <asm/cpufeatures.h>
2207 + #include <asm/msr-index.h>
2208 +
2209 + verify_cpu:
2210 +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
2211 +index d6d64a519559..7f4839ef3608 100644
2212 +--- a/arch/x86/kernel/vm86_32.c
2213 ++++ b/arch/x86/kernel/vm86_32.c
2214 +@@ -358,7 +358,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
2215 + /* make room for real-mode segments */
2216 + tsk->thread.sp0 += 16;
2217 +
2218 +- if (static_cpu_has_safe(X86_FEATURE_SEP))
2219 ++ if (static_cpu_has(X86_FEATURE_SEP))
2220 + tsk->thread.sysenter_cs = 0;
2221 +
2222 + load_sp0(tss, &tsk->thread);
2223 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
2224 +index e065065a4dfb..a703842b54de 100644
2225 +--- a/arch/x86/kernel/vmlinux.lds.S
2226 ++++ b/arch/x86/kernel/vmlinux.lds.S
2227 +@@ -202,6 +202,17 @@ SECTIONS
2228 + :init
2229 + #endif
2230 +
2231 ++ /*
2232 ++ * Section for code used exclusively before alternatives are run. All
2233 ++ * references to such code must be patched out by alternatives, normally
2234 ++ * by using X86_FEATURE_ALWAYS CPU feature bit.
2235 ++ *
2236 ++ * See static_cpu_has() for an example.
2237 ++ */
2238 ++ .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
2239 ++ *(.altinstr_aux)
2240 ++ }
2241 ++
2242 + INIT_DATA_SECTION(16)
2243 +
2244 + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
2245 +diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
2246 +index a2fe51b00cce..65be7cfaf947 100644
2247 +--- a/arch/x86/lib/clear_page_64.S
2248 ++++ b/arch/x86/lib/clear_page_64.S
2249 +@@ -1,5 +1,5 @@
2250 + #include <linux/linkage.h>
2251 +-#include <asm/cpufeature.h>
2252 ++#include <asm/cpufeatures.h>
2253 + #include <asm/alternative-asm.h>
2254 +
2255 + /*
2256 +diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
2257 +index 009f98216b7e..24ef1c2104d4 100644
2258 +--- a/arch/x86/lib/copy_page_64.S
2259 ++++ b/arch/x86/lib/copy_page_64.S
2260 +@@ -1,7 +1,7 @@
2261 + /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
2262 +
2263 + #include <linux/linkage.h>
2264 +-#include <asm/cpufeature.h>
2265 ++#include <asm/cpufeatures.h>
2266 + #include <asm/alternative-asm.h>
2267 +
2268 + /*
2269 +diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
2270 +index 423644c230e7..accf7f2f557f 100644
2271 +--- a/arch/x86/lib/copy_user_64.S
2272 ++++ b/arch/x86/lib/copy_user_64.S
2273 +@@ -10,7 +10,7 @@
2274 + #include <asm/current.h>
2275 + #include <asm/asm-offsets.h>
2276 + #include <asm/thread_info.h>
2277 +-#include <asm/cpufeature.h>
2278 ++#include <asm/cpufeatures.h>
2279 + #include <asm/alternative-asm.h>
2280 + #include <asm/asm.h>
2281 + #include <asm/smap.h>
2282 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
2283 +index 16698bba87de..a0de849435ad 100644
2284 +--- a/arch/x86/lib/memcpy_64.S
2285 ++++ b/arch/x86/lib/memcpy_64.S
2286 +@@ -1,7 +1,7 @@
2287 + /* Copyright 2002 Andi Kleen */
2288 +
2289 + #include <linux/linkage.h>
2290 +-#include <asm/cpufeature.h>
2291 ++#include <asm/cpufeatures.h>
2292 + #include <asm/alternative-asm.h>
2293 +
2294 + /*
2295 +diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
2296 +index ca2afdd6d98e..90ce01bee00c 100644
2297 +--- a/arch/x86/lib/memmove_64.S
2298 ++++ b/arch/x86/lib/memmove_64.S
2299 +@@ -6,7 +6,7 @@
2300 + * - Copyright 2011 Fenghua Yu <fenghua.yu@×××××.com>
2301 + */
2302 + #include <linux/linkage.h>
2303 +-#include <asm/cpufeature.h>
2304 ++#include <asm/cpufeatures.h>
2305 + #include <asm/alternative-asm.h>
2306 +
2307 + #undef memmove
2308 +diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
2309 +index 2661fad05827..c9c81227ea37 100644
2310 +--- a/arch/x86/lib/memset_64.S
2311 ++++ b/arch/x86/lib/memset_64.S
2312 +@@ -1,7 +1,7 @@
2313 + /* Copyright 2002 Andi Kleen, SuSE Labs */
2314 +
2315 + #include <linux/linkage.h>
2316 +-#include <asm/cpufeature.h>
2317 ++#include <asm/cpufeatures.h>
2318 + #include <asm/alternative-asm.h>
2319 +
2320 + .weak memset
2321 +diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
2322 +index 3d06b482ebc7..7bbb853e36bd 100644
2323 +--- a/arch/x86/lib/retpoline.S
2324 ++++ b/arch/x86/lib/retpoline.S
2325 +@@ -3,7 +3,7 @@
2326 + #include <linux/stringify.h>
2327 + #include <linux/linkage.h>
2328 + #include <asm/dwarf2.h>
2329 +-#include <asm/cpufeature.h>
2330 ++#include <asm/cpufeatures.h>
2331 + #include <asm/alternative-asm.h>
2332 + #include <asm-generic/export.h>
2333 + #include <asm/nospec-branch.h>
2334 +diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
2335 +index 92e2eacb3321..f65a33f505b6 100644
2336 +--- a/arch/x86/mm/setup_nx.c
2337 ++++ b/arch/x86/mm/setup_nx.c
2338 +@@ -4,6 +4,7 @@
2339 +
2340 + #include <asm/pgtable.h>
2341 + #include <asm/proto.h>
2342 ++#include <asm/cpufeature.h>
2343 +
2344 + static int disable_nx;
2345 +
2346 +diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
2347 +index 50d86c0e9ba4..660a83c8287b 100644
2348 +--- a/arch/x86/oprofile/op_model_amd.c
2349 ++++ b/arch/x86/oprofile/op_model_amd.c
2350 +@@ -24,7 +24,6 @@
2351 + #include <asm/nmi.h>
2352 + #include <asm/apic.h>
2353 + #include <asm/processor.h>
2354 +-#include <asm/cpufeature.h>
2355 +
2356 + #include "op_x86_model.h"
2357 + #include "op_counter.h"
2358 +diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
2359 +index 755481f14d90..764ac2fc53fe 100644
2360 +--- a/arch/x86/um/asm/barrier.h
2361 ++++ b/arch/x86/um/asm/barrier.h
2362 +@@ -3,7 +3,7 @@
2363 +
2364 + #include <asm/asm.h>
2365 + #include <asm/segment.h>
2366 +-#include <asm/cpufeature.h>
2367 ++#include <asm/cpufeatures.h>
2368 + #include <asm/cmpxchg.h>
2369 + #include <asm/nops.h>
2370 +
2371 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
2372 +index 5a6a01135470..34fdaa6e99ba 100644
2373 +--- a/drivers/ata/ahci.c
2374 ++++ b/drivers/ata/ahci.c
2375 +@@ -1229,6 +1229,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
2376 + return strcmp(buf, dmi->driver_data) < 0;
2377 + }
2378 +
2379 ++static bool ahci_broken_lpm(struct pci_dev *pdev)
2380 ++{
2381 ++ static const struct dmi_system_id sysids[] = {
2382 ++ /* Various Lenovo 50 series have LPM issues with older BIOSen */
2383 ++ {
2384 ++ .matches = {
2385 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2386 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
2387 ++ },
2388 ++ .driver_data = "20180406", /* 1.31 */
2389 ++ },
2390 ++ {
2391 ++ .matches = {
2392 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2393 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
2394 ++ },
2395 ++ .driver_data = "20180420", /* 1.28 */
2396 ++ },
2397 ++ {
2398 ++ .matches = {
2399 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2400 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
2401 ++ },
2402 ++ .driver_data = "20180315", /* 1.33 */
2403 ++ },
2404 ++ {
2405 ++ .matches = {
2406 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2407 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
2408 ++ },
2409 ++ /*
2410 ++ * Note date based on release notes, 2.35 has been
2411 ++ * reported to be good, but I've been unable to get
2412 ++ * a hold of the reporter to get the DMI BIOS date.
2413 ++ * TODO: fix this.
2414 ++ */
2415 ++ .driver_data = "20180310", /* 2.35 */
2416 ++ },
2417 ++ { } /* terminate list */
2418 ++ };
2419 ++ const struct dmi_system_id *dmi = dmi_first_match(sysids);
2420 ++ int year, month, date;
2421 ++ char buf[9];
2422 ++
2423 ++ if (!dmi)
2424 ++ return false;
2425 ++
2426 ++ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2427 ++ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2428 ++
2429 ++ return strcmp(buf, dmi->driver_data) < 0;
2430 ++}
2431 ++
2432 + static bool ahci_broken_online(struct pci_dev *pdev)
2433 + {
2434 + #define ENCODE_BUSDEVFN(bus, slot, func) \
2435 +@@ -1588,6 +1641,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2436 + "quirky BIOS, skipping spindown on poweroff\n");
2437 + }
2438 +
2439 ++ if (ahci_broken_lpm(pdev)) {
2440 ++ pi.flags |= ATA_FLAG_NO_LPM;
2441 ++ dev_warn(&pdev->dev,
2442 ++ "BIOS update required for Link Power Management support\n");
2443 ++ }
2444 ++
2445 + if (ahci_broken_suspend(pdev)) {
2446 + hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
2447 + dev_warn(&pdev->dev,
2448 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2449 +index 9afd06ee5b30..ba514fa733de 100644
2450 +--- a/drivers/ata/libata-core.c
2451 ++++ b/drivers/ata/libata-core.c
2452 +@@ -2209,6 +2209,9 @@ int ata_dev_configure(struct ata_device *dev)
2453 + (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2454 + dev->horkage |= ATA_HORKAGE_NOLPM;
2455 +
2456 ++ if (ap->flags & ATA_FLAG_NO_LPM)
2457 ++ dev->horkage |= ATA_HORKAGE_NOLPM;
2458 ++
2459 + if (dev->horkage & ATA_HORKAGE_NOLPM) {
2460 + ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2461 + dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2462 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
2463 +index e8165ec55e6f..da3902ac16c8 100644
2464 +--- a/drivers/block/loop.c
2465 ++++ b/drivers/block/loop.c
2466 +@@ -651,6 +651,36 @@ static void loop_reread_partitions(struct loop_device *lo,
2467 + __func__, lo->lo_number, lo->lo_file_name, rc);
2468 + }
2469 +
2470 ++static inline int is_loop_device(struct file *file)
2471 ++{
2472 ++ struct inode *i = file->f_mapping->host;
2473 ++
2474 ++ return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
2475 ++}
2476 ++
2477 ++static int loop_validate_file(struct file *file, struct block_device *bdev)
2478 ++{
2479 ++ struct inode *inode = file->f_mapping->host;
2480 ++ struct file *f = file;
2481 ++
2482 ++ /* Avoid recursion */
2483 ++ while (is_loop_device(f)) {
2484 ++ struct loop_device *l;
2485 ++
2486 ++ if (f->f_mapping->host->i_bdev == bdev)
2487 ++ return -EBADF;
2488 ++
2489 ++ l = f->f_mapping->host->i_bdev->bd_disk->private_data;
2490 ++ if (l->lo_state == Lo_unbound) {
2491 ++ return -EINVAL;
2492 ++ }
2493 ++ f = l->lo_backing_file;
2494 ++ }
2495 ++ if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
2496 ++ return -EINVAL;
2497 ++ return 0;
2498 ++}
2499 ++
2500 + /*
2501 + * loop_change_fd switched the backing store of a loopback device to
2502 + * a new file. This is useful for operating system installers to free up
2503 +@@ -680,14 +710,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
2504 + if (!file)
2505 + goto out;
2506 +
2507 ++ error = loop_validate_file(file, bdev);
2508 ++ if (error)
2509 ++ goto out_putf;
2510 ++
2511 + inode = file->f_mapping->host;
2512 + old_file = lo->lo_backing_file;
2513 +
2514 + error = -EINVAL;
2515 +
2516 +- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
2517 +- goto out_putf;
2518 +-
2519 + /* size of the new backing store needs to be the same */
2520 + if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
2521 + goto out_putf;
2522 +@@ -708,13 +739,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
2523 + return error;
2524 + }
2525 +
2526 +-static inline int is_loop_device(struct file *file)
2527 +-{
2528 +- struct inode *i = file->f_mapping->host;
2529 +-
2530 +- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
2531 +-}
2532 +-
2533 + /* loop sysfs attributes */
2534 +
2535 + static ssize_t loop_attr_show(struct device *dev, char *page,
2536 +@@ -811,16 +835,17 @@ static struct attribute_group loop_attribute_group = {
2537 + .attrs= loop_attrs,
2538 + };
2539 +
2540 +-static int loop_sysfs_init(struct loop_device *lo)
2541 ++static void loop_sysfs_init(struct loop_device *lo)
2542 + {
2543 +- return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
2544 +- &loop_attribute_group);
2545 ++ lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
2546 ++ &loop_attribute_group);
2547 + }
2548 +
2549 + static void loop_sysfs_exit(struct loop_device *lo)
2550 + {
2551 +- sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
2552 +- &loop_attribute_group);
2553 ++ if (lo->sysfs_inited)
2554 ++ sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
2555 ++ &loop_attribute_group);
2556 + }
2557 +
2558 + static void loop_config_discard(struct loop_device *lo)
2559 +@@ -872,7 +897,7 @@ static int loop_prepare_queue(struct loop_device *lo)
2560 + static int loop_set_fd(struct loop_device *lo, fmode_t mode,
2561 + struct block_device *bdev, unsigned int arg)
2562 + {
2563 +- struct file *file, *f;
2564 ++ struct file *file;
2565 + struct inode *inode;
2566 + struct address_space *mapping;
2567 + unsigned lo_blocksize;
2568 +@@ -892,29 +917,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
2569 + if (lo->lo_state != Lo_unbound)
2570 + goto out_putf;
2571 +
2572 +- /* Avoid recursion */
2573 +- f = file;
2574 +- while (is_loop_device(f)) {
2575 +- struct loop_device *l;
2576 +-
2577 +- if (f->f_mapping->host->i_bdev == bdev)
2578 +- goto out_putf;
2579 +-
2580 +- l = f->f_mapping->host->i_bdev->bd_disk->private_data;
2581 +- if (l->lo_state == Lo_unbound) {
2582 +- error = -EINVAL;
2583 +- goto out_putf;
2584 +- }
2585 +- f = l->lo_backing_file;
2586 +- }
2587 ++ error = loop_validate_file(file, bdev);
2588 ++ if (error)
2589 ++ goto out_putf;
2590 +
2591 + mapping = file->f_mapping;
2592 + inode = mapping->host;
2593 +
2594 +- error = -EINVAL;
2595 +- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
2596 +- goto out_putf;
2597 +-
2598 + if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
2599 + !file->f_op->write_iter)
2600 + lo_flags |= LO_FLAGS_READ_ONLY;
2601 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
2602 +index fb2237c73e61..60f0fd2c0c65 100644
2603 +--- a/drivers/block/loop.h
2604 ++++ b/drivers/block/loop.h
2605 +@@ -59,6 +59,7 @@ struct loop_device {
2606 + struct kthread_worker worker;
2607 + struct task_struct *worker_task;
2608 + bool use_dio;
2609 ++ bool sysfs_inited;
2610 +
2611 + struct request_queue *lo_queue;
2612 + struct blk_mq_tag_set tag_set;
2613 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2614 +index b316ab7e8996..60e2c9faa95f 100644
2615 +--- a/drivers/hid/hid-ids.h
2616 ++++ b/drivers/hid/hid-ids.h
2617 +@@ -512,6 +512,9 @@
2618 + #define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
2619 + #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
2620 +
2621 ++#define USB_VENDOR_ID_INNOMEDIA 0x1292
2622 ++#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
2623 ++
2624 + #define USB_VENDOR_ID_ITE 0x048d
2625 + #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
2626 + #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
2627 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
2628 +index ce1543d69acb..c9a11315493b 100644
2629 +--- a/drivers/hid/usbhid/hid-quirks.c
2630 ++++ b/drivers/hid/usbhid/hid-quirks.c
2631 +@@ -152,6 +152,7 @@ static const struct hid_blacklist {
2632 + { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
2633 + { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
2634 + { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
2635 ++ { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
2636 +
2637 + { 0, 0 }
2638 + };
2639 +diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
2640 +index aa26f3c3416b..c151bb625179 100644
2641 +--- a/drivers/infiniband/Kconfig
2642 ++++ b/drivers/infiniband/Kconfig
2643 +@@ -33,6 +33,18 @@ config INFINIBAND_USER_ACCESS
2644 + libibverbs, libibcm and a hardware driver library from
2645 + <http://www.openfabrics.org/git/>.
2646 +
2647 ++config INFINIBAND_USER_ACCESS_UCM
2648 ++ bool "Userspace CM (UCM, DEPRECATED)"
2649 ++ depends on BROKEN
2650 ++ depends on INFINIBAND_USER_ACCESS
2651 ++ help
2652 ++ The UCM module has known security flaws, which no one is
2653 ++ interested to fix. The user-space part of this code was
2654 ++ dropped from the upstream a long time ago.
2655 ++
2656 ++ This option is DEPRECATED and planned to be removed.
2657 ++
2658 ++
2659 + config INFINIBAND_USER_MEM
2660 + bool
2661 + depends on INFINIBAND_USER_ACCESS != n
2662 +diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
2663 +index d43a8994ac5c..737612a442be 100644
2664 +--- a/drivers/infiniband/core/Makefile
2665 ++++ b/drivers/infiniband/core/Makefile
2666 +@@ -5,8 +5,8 @@ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
2667 + ib_cm.o iw_cm.o ib_addr.o \
2668 + $(infiniband-y)
2669 + obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
2670 +-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
2671 +- $(user_access-y)
2672 ++obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
2673 ++obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
2674 +
2675 + ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
2676 + device.o fmr_pool.o cache.o netlink.o \
2677 +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
2678 +index e1629ab58db7..8218d714fa01 100644
2679 +--- a/drivers/infiniband/hw/cxgb4/mem.c
2680 ++++ b/drivers/infiniband/hw/cxgb4/mem.c
2681 +@@ -926,7 +926,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
2682 + {
2683 + struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
2684 +
2685 +- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
2686 ++ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
2687 + return -ENOMEM;
2688 +
2689 + mhp->mpl[mhp->mpl_len++] = addr;
2690 +diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
2691 +index e8b933111e0d..92109cadc3fc 100644
2692 +--- a/drivers/misc/ibmasm/ibmasmfs.c
2693 ++++ b/drivers/misc/ibmasm/ibmasmfs.c
2694 +@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
2695 + static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
2696 + {
2697 + void __iomem *address = (void __iomem *)file->private_data;
2698 +- unsigned char *page;
2699 +- int retval;
2700 + int len = 0;
2701 + unsigned int value;
2702 +-
2703 +- if (*offset < 0)
2704 +- return -EINVAL;
2705 +- if (count == 0 || count > 1024)
2706 +- return 0;
2707 +- if (*offset != 0)
2708 +- return 0;
2709 +-
2710 +- page = (unsigned char *)__get_free_page(GFP_KERNEL);
2711 +- if (!page)
2712 +- return -ENOMEM;
2713 ++ char lbuf[20];
2714 +
2715 + value = readl(address);
2716 +- len = sprintf(page, "%d\n", value);
2717 +-
2718 +- if (copy_to_user(buf, page, len)) {
2719 +- retval = -EFAULT;
2720 +- goto exit;
2721 +- }
2722 +- *offset += len;
2723 +- retval = len;
2724 ++ len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
2725 +
2726 +-exit:
2727 +- free_page((unsigned long)page);
2728 +- return retval;
2729 ++ return simple_read_from_buffer(buf, count, offset, lbuf, len);
2730 + }
2731 +
2732 + static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
2733 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
2734 +index fe90b7e04427..5e047bfc0cc4 100644
2735 +--- a/drivers/misc/vmw_balloon.c
2736 ++++ b/drivers/misc/vmw_balloon.c
2737 +@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
2738 + unsigned int num_pages, bool is_2m_pages, unsigned int *target)
2739 + {
2740 + unsigned long status;
2741 +- unsigned long pfn = page_to_pfn(b->page);
2742 ++ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
2743 +
2744 + STATS_INC(b->stats.lock[is_2m_pages]);
2745 +
2746 +@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
2747 + unsigned int num_pages, bool is_2m_pages, unsigned int *target)
2748 + {
2749 + unsigned long status;
2750 +- unsigned long pfn = page_to_pfn(b->page);
2751 ++ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
2752 +
2753 + STATS_INC(b->stats.unlock[is_2m_pages]);
2754 +
2755 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2756 +index 40ce175655e6..99f67764765f 100644
2757 +--- a/drivers/usb/core/quirks.c
2758 ++++ b/drivers/usb/core/quirks.c
2759 +@@ -231,6 +231,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2760 + /* Corsair K70 RGB */
2761 + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
2762 +
2763 ++ /* Corsair Strafe */
2764 ++ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
2765 ++ USB_QUIRK_DELAY_CTRL_MSG },
2766 ++
2767 + /* Corsair Strafe RGB */
2768 + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
2769 + USB_QUIRK_DELAY_CTRL_MSG },
2770 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2771 +index e4cf3322bcb3..0ec809a35a3f 100644
2772 +--- a/drivers/usb/host/xhci-mem.c
2773 ++++ b/drivers/usb/host/xhci-mem.c
2774 +@@ -638,7 +638,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
2775 + if (!ep->stream_info)
2776 + return NULL;
2777 +
2778 +- if (stream_id > ep->stream_info->num_streams)
2779 ++ if (stream_id >= ep->stream_info->num_streams)
2780 + return NULL;
2781 + return ep->stream_info->stream_rings[stream_id];
2782 + }
2783 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
2784 +index 343fa6ff9f4b..512c84adcace 100644
2785 +--- a/drivers/usb/misc/yurex.c
2786 ++++ b/drivers/usb/misc/yurex.c
2787 +@@ -414,8 +414,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
2788 + loff_t *ppos)
2789 + {
2790 + struct usb_yurex *dev;
2791 +- int retval = 0;
2792 +- int bytes_read = 0;
2793 ++ int len = 0;
2794 + char in_buffer[20];
2795 + unsigned long flags;
2796 +
2797 +@@ -423,26 +422,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
2798 +
2799 + mutex_lock(&dev->io_mutex);
2800 + if (!dev->interface) { /* already disconnected */
2801 +- retval = -ENODEV;
2802 +- goto exit;
2803 ++ mutex_unlock(&dev->io_mutex);
2804 ++ return -ENODEV;
2805 + }
2806 +
2807 + spin_lock_irqsave(&dev->lock, flags);
2808 +- bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
2809 ++ len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
2810 + spin_unlock_irqrestore(&dev->lock, flags);
2811 +-
2812 +- if (*ppos < bytes_read) {
2813 +- if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
2814 +- retval = -EFAULT;
2815 +- else {
2816 +- retval = bytes_read - *ppos;
2817 +- *ppos += bytes_read;
2818 +- }
2819 +- }
2820 +-
2821 +-exit:
2822 + mutex_unlock(&dev->io_mutex);
2823 +- return retval;
2824 ++
2825 ++ return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
2826 + }
2827 +
2828 + static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
2829 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
2830 +index 71133d96f97d..f73ea14e8173 100644
2831 +--- a/drivers/usb/serial/ch341.c
2832 ++++ b/drivers/usb/serial/ch341.c
2833 +@@ -118,7 +118,7 @@ static int ch341_control_in(struct usb_device *dev,
2834 + r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
2835 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
2836 + value, index, buf, bufsize, DEFAULT_TIMEOUT);
2837 +- if (r < bufsize) {
2838 ++ if (r < (int)bufsize) {
2839 + if (r >= 0) {
2840 + dev_err(&dev->dev,
2841 + "short control message received (%d < %u)\n",
2842 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2843 +index 73835027a7cc..97382301c393 100644
2844 +--- a/drivers/usb/serial/cp210x.c
2845 ++++ b/drivers/usb/serial/cp210x.c
2846 +@@ -145,6 +145,7 @@ static const struct usb_device_id id_table[] = {
2847 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
2848 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
2849 + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
2850 ++ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
2851 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
2852 + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
2853 + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
2854 +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
2855 +index 6b0942428917..8a4047de43dc 100644
2856 +--- a/drivers/usb/serial/keyspan_pda.c
2857 ++++ b/drivers/usb/serial/keyspan_pda.c
2858 +@@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
2859 + 3, /* get pins */
2860 + USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
2861 + 0, 0, data, 1, 2000);
2862 +- if (rc >= 0)
2863 ++ if (rc == 1)
2864 + *value = *data;
2865 ++ else if (rc >= 0)
2866 ++ rc = -EIO;
2867 +
2868 + kfree(data);
2869 + return rc;
2870 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
2871 +index ed883a7ad533..58ba6904a087 100644
2872 +--- a/drivers/usb/serial/mos7840.c
2873 ++++ b/drivers/usb/serial/mos7840.c
2874 +@@ -482,6 +482,9 @@ static void mos7840_control_callback(struct urb *urb)
2875 + }
2876 +
2877 + dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
2878 ++ if (urb->actual_length < 1)
2879 ++ goto out;
2880 ++
2881 + dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
2882 + mos7840_port->MsrLsr, mos7840_port->port_num);
2883 + data = urb->transfer_buffer;
2884 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2885 +index 7efd70bfeaf7..d106b981d86f 100644
2886 +--- a/fs/btrfs/disk-io.c
2887 ++++ b/fs/btrfs/disk-io.c
2888 +@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
2889 + if (bio_flags & EXTENT_BIO_TREE_LOG)
2890 + return 0;
2891 + #ifdef CONFIG_X86
2892 +- if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
2893 ++ if (static_cpu_has(X86_FEATURE_XMM4_2))
2894 + return 0;
2895 + #endif
2896 + return 1;
2897 +diff --git a/fs/inode.c b/fs/inode.c
2898 +index b95615f3fc50..a39c2724d8a0 100644
2899 +--- a/fs/inode.c
2900 ++++ b/fs/inode.c
2901 +@@ -1937,8 +1937,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
2902 + inode->i_uid = current_fsuid();
2903 + if (dir && dir->i_mode & S_ISGID) {
2904 + inode->i_gid = dir->i_gid;
2905 ++
2906 ++ /* Directories are special, and always inherit S_ISGID */
2907 + if (S_ISDIR(mode))
2908 + mode |= S_ISGID;
2909 ++ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2910 ++ !in_group_p(inode->i_gid) &&
2911 ++ !capable_wrt_inode_uidgid(dir, CAP_FSETID))
2912 ++ mode &= ~S_ISGID;
2913 + } else
2914 + inode->i_gid = current_fsgid();
2915 + inode->i_mode = mode;
2916 +diff --git a/include/linux/libata.h b/include/linux/libata.h
2917 +index b20a2752f934..6428ac4746de 100644
2918 +--- a/include/linux/libata.h
2919 ++++ b/include/linux/libata.h
2920 +@@ -210,6 +210,7 @@ enum {
2921 + ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
2922 + /* (doesn't imply presence) */
2923 + ATA_FLAG_SATA = (1 << 1),
2924 ++ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
2925 + ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
2926 + ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
2927 + ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
2928 +diff --git a/kernel/power/user.c b/kernel/power/user.c
2929 +index 526e8911460a..f83c1876b39c 100644
2930 +--- a/kernel/power/user.c
2931 ++++ b/kernel/power/user.c
2932 +@@ -184,6 +184,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
2933 + res = PAGE_SIZE - pg_offp;
2934 + }
2935 +
2936 ++ if (!data_of(data->handle)) {
2937 ++ res = -EINVAL;
2938 ++ goto unlock;
2939 ++ }
2940 ++
2941 + res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
2942 + buf, count);
2943 + if (res > 0)
2944 +diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
2945 +index 83c33a5bcffb..de67fea3cf46 100644
2946 +--- a/lib/atomic64_test.c
2947 ++++ b/lib/atomic64_test.c
2948 +@@ -16,6 +16,10 @@
2949 + #include <linux/kernel.h>
2950 + #include <linux/atomic.h>
2951 +
2952 ++#ifdef CONFIG_X86
2953 ++#include <asm/cpufeature.h> /* for boot_cpu_has below */
2954 ++#endif
2955 ++
2956 + #define TEST(bit, op, c_op, val) \
2957 + do { \
2958 + atomic##bit##_set(&v, v0); \
2959 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2960 +index 9f70c267a7a5..665fd87cc105 100644
2961 +--- a/net/bridge/netfilter/ebtables.c
2962 ++++ b/net/bridge/netfilter/ebtables.c
2963 +@@ -701,6 +701,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
2964 + }
2965 + i = 0;
2966 +
2967 ++ memset(&mtpar, 0, sizeof(mtpar));
2968 ++ memset(&tgpar, 0, sizeof(tgpar));
2969 + mtpar.net = tgpar.net = net;
2970 + mtpar.table = tgpar.table = name;
2971 + mtpar.entryinfo = tgpar.entryinfo = e;
2972 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2973 +index dac62b5e7fe3..9363c1a70f16 100644
2974 +--- a/net/ipv4/netfilter/ip_tables.c
2975 ++++ b/net/ipv4/netfilter/ip_tables.c
2976 +@@ -663,6 +663,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
2977 + return -ENOMEM;
2978 +
2979 + j = 0;
2980 ++ memset(&mtpar, 0, sizeof(mtpar));
2981 + mtpar.net = net;
2982 + mtpar.table = name;
2983 + mtpar.entryinfo = &e->ip;
2984 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2985 +index 795c343347ec..6cb9e35d23ac 100644
2986 +--- a/net/ipv6/netfilter/ip6_tables.c
2987 ++++ b/net/ipv6/netfilter/ip6_tables.c
2988 +@@ -676,6 +676,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
2989 + return -ENOMEM;
2990 +
2991 + j = 0;
2992 ++ memset(&mtpar, 0, sizeof(mtpar));
2993 + mtpar.net = net;
2994 + mtpar.table = name;
2995 + mtpar.entryinfo = &e->ipv6;
2996 +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
2997 +index 7edcfda288c4..54cde78c2718 100644
2998 +--- a/net/netfilter/nfnetlink_queue.c
2999 ++++ b/net/netfilter/nfnetlink_queue.c
3000 +@@ -1106,6 +1106,9 @@ nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
3001 + static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
3002 + [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
3003 + [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
3004 ++ [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
3005 ++ [NFQA_CFG_MASK] = { .type = NLA_U32 },
3006 ++ [NFQA_CFG_FLAGS] = { .type = NLA_U32 },
3007 + };
3008 +
3009 + static const struct nf_queue_handler nfqh = {
3010 +diff --git a/tools/build/Build.include b/tools/build/Build.include
3011 +index 4d000bc959b4..1c570528baf7 100644
3012 +--- a/tools/build/Build.include
3013 ++++ b/tools/build/Build.include
3014 +@@ -62,8 +62,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
3015 + $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
3016 + rm -f $(depfile); \
3017 + mv -f $(dot-target).tmp $(dot-target).cmd, \
3018 +- printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
3019 +- printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
3020 ++ printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
3021 ++ printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
3022 + cat $(depfile) >> $(dot-target).cmd; \
3023 + printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
3024 +