Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 21 Nov 2018 15:02:31
Message-Id: 1542812456.b756e9982b6c775189f4d96eba767c23773aebda.mpagano@gentoo
1 commit: b756e9982b6c775189f4d96eba767c23773aebda
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jul 17 10:24:44 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 21 15:00:56 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b756e998
7
8 Linux patch 4.4.141
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1140_linux-4.4.141.patch | 2989 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2993 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 73e6c56..c1babcb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -603,6 +603,10 @@ Patch: 1139_linux-4.4.140.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.140
23
24 +Patch: 1140_linux-4.4.141.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.141
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1140_linux-4.4.141.patch b/1140_linux-4.4.141.patch
33 new file mode 100644
34 index 0000000..eec959a
35 --- /dev/null
36 +++ b/1140_linux-4.4.141.patch
37 @@ -0,0 +1,2989 @@
38 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
39 +index 4df6bd7d01ed..e60d0b5809c1 100644
40 +--- a/Documentation/kernel-parameters.txt
41 ++++ b/Documentation/kernel-parameters.txt
42 +@@ -652,7 +652,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
43 +
44 + clearcpuid=BITNUM [X86]
45 + Disable CPUID feature X for the kernel. See
46 +- arch/x86/include/asm/cpufeature.h for the valid bit
47 ++ arch/x86/include/asm/cpufeatures.h for the valid bit
48 + numbers. Note the Linux specific bits are not necessarily
49 + stable over kernel options, but the vendor specific
50 + ones should be.
51 +diff --git a/Makefile b/Makefile
52 +index b842298a5970..3fc39e41dbde 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,6 +1,6 @@
56 + VERSION = 4
57 + PATCHLEVEL = 4
58 +-SUBLEVEL = 140
59 ++SUBLEVEL = 141
60 + EXTRAVERSION =
61 + NAME = Blurry Fish Butt
62 +
63 +diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
64 +index 8d5008cbdc0f..a853a83f2944 100644
65 +--- a/arch/mips/mm/ioremap.c
66 ++++ b/arch/mips/mm/ioremap.c
67 +@@ -9,6 +9,7 @@
68 + #include <linux/module.h>
69 + #include <asm/addrspace.h>
70 + #include <asm/byteorder.h>
71 ++#include <linux/ioport.h>
72 + #include <linux/sched.h>
73 + #include <linux/slab.h>
74 + #include <linux/vmalloc.h>
75 +@@ -97,6 +98,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
76 + return error;
77 + }
78 +
79 ++static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
80 ++ void *arg)
81 ++{
82 ++ unsigned long i;
83 ++
84 ++ for (i = 0; i < nr_pages; i++) {
85 ++ if (pfn_valid(start_pfn + i) &&
86 ++ !PageReserved(pfn_to_page(start_pfn + i)))
87 ++ return 1;
88 ++ }
89 ++
90 ++ return 0;
91 ++}
92 ++
93 + /*
94 + * Generic mapping function (not visible outside):
95 + */
96 +@@ -115,8 +130,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
97 +
98 + void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
99 + {
100 ++ unsigned long offset, pfn, last_pfn;
101 + struct vm_struct * area;
102 +- unsigned long offset;
103 + phys_addr_t last_addr;
104 + void * addr;
105 +
106 +@@ -136,18 +151,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
107 + return (void __iomem *) CKSEG1ADDR(phys_addr);
108 +
109 + /*
110 +- * Don't allow anybody to remap normal RAM that we're using..
111 ++ * Don't allow anybody to remap RAM that may be allocated by the page
112 ++ * allocator, since that could lead to races & data clobbering.
113 + */
114 +- if (phys_addr < virt_to_phys(high_memory)) {
115 +- char *t_addr, *t_end;
116 +- struct page *page;
117 +-
118 +- t_addr = __va(phys_addr);
119 +- t_end = t_addr + (size - 1);
120 +-
121 +- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
122 +- if(!PageReserved(page))
123 +- return NULL;
124 ++ pfn = PFN_DOWN(phys_addr);
125 ++ last_pfn = PFN_DOWN(last_addr);
126 ++ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
127 ++ __ioremap_check_ram) == 1) {
128 ++ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
129 ++ &phys_addr, &last_addr);
130 ++ return NULL;
131 + }
132 +
133 + /*
134 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
135 +index eab1ef25eecd..d9afe6d40550 100644
136 +--- a/arch/x86/Kconfig
137 ++++ b/arch/x86/Kconfig
138 +@@ -346,6 +346,17 @@ config X86_FEATURE_NAMES
139 +
140 + If in doubt, say Y.
141 +
142 ++config X86_FAST_FEATURE_TESTS
143 ++ bool "Fast CPU feature tests" if EMBEDDED
144 ++ default y
145 ++ ---help---
146 ++ Some fast-paths in the kernel depend on the capabilities of the CPU.
147 ++ Say Y here for the kernel to patch in the appropriate code at runtime
148 ++ based on the capabilities of the CPU. The infrastructure for patching
149 ++ code at runtime takes up some additional space; space-constrained
150 ++ embedded systems may wish to say N here to produce smaller, slightly
151 ++ slower code.
152 ++
153 + config X86_X2APIC
154 + bool "Support x2apic"
155 + depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
156 +diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
157 +index da00fe1f48f4..2aa212fb0faf 100644
158 +--- a/arch/x86/Kconfig.debug
159 ++++ b/arch/x86/Kconfig.debug
160 +@@ -367,16 +367,6 @@ config DEBUG_IMR_SELFTEST
161 +
162 + If unsure say N here.
163 +
164 +-config X86_DEBUG_STATIC_CPU_HAS
165 +- bool "Debug alternatives"
166 +- depends on DEBUG_KERNEL
167 +- ---help---
168 +- This option causes additional code to be generated which
169 +- fails if static_cpu_has() is used before alternatives have
170 +- run.
171 +-
172 +- If unsure, say N.
173 +-
174 + config X86_DEBUG_FPU
175 + bool "Debug the x86 FPU code"
176 + depends on DEBUG_KERNEL
177 +diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
178 +index ea97697e51e4..4cb404fd45ce 100644
179 +--- a/arch/x86/boot/cpuflags.h
180 ++++ b/arch/x86/boot/cpuflags.h
181 +@@ -1,7 +1,7 @@
182 + #ifndef BOOT_CPUFLAGS_H
183 + #define BOOT_CPUFLAGS_H
184 +
185 +-#include <asm/cpufeature.h>
186 ++#include <asm/cpufeatures.h>
187 + #include <asm/processor-flags.h>
188 +
189 + struct cpu_features {
190 +diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
191 +index 637097e66a62..f72498dc90d2 100644
192 +--- a/arch/x86/boot/mkcpustr.c
193 ++++ b/arch/x86/boot/mkcpustr.c
194 +@@ -17,7 +17,7 @@
195 +
196 + #include "../include/asm/required-features.h"
197 + #include "../include/asm/disabled-features.h"
198 +-#include "../include/asm/cpufeature.h"
199 ++#include "../include/asm/cpufeatures.h"
200 + #include "../kernel/cpu/capflags.c"
201 +
202 + int main(void)
203 +diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
204 +index 07d2c6c86a54..27226df3f7d8 100644
205 +--- a/arch/x86/crypto/crc32-pclmul_glue.c
206 ++++ b/arch/x86/crypto/crc32-pclmul_glue.c
207 +@@ -33,7 +33,7 @@
208 + #include <linux/crc32.h>
209 + #include <crypto/internal/hash.h>
210 +
211 +-#include <asm/cpufeature.h>
212 ++#include <asm/cpufeatures.h>
213 + #include <asm/cpu_device_id.h>
214 + #include <asm/fpu/api.h>
215 +
216 +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
217 +index 15f5c7675d42..715399b14ed7 100644
218 +--- a/arch/x86/crypto/crc32c-intel_glue.c
219 ++++ b/arch/x86/crypto/crc32c-intel_glue.c
220 +@@ -30,7 +30,7 @@
221 + #include <linux/kernel.h>
222 + #include <crypto/internal/hash.h>
223 +
224 +-#include <asm/cpufeature.h>
225 ++#include <asm/cpufeatures.h>
226 + #include <asm/cpu_device_id.h>
227 + #include <asm/fpu/internal.h>
228 +
229 +diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
230 +index a3fcfc97a311..cd4df9322501 100644
231 +--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
232 ++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
233 +@@ -30,7 +30,7 @@
234 + #include <linux/string.h>
235 + #include <linux/kernel.h>
236 + #include <asm/fpu/api.h>
237 +-#include <asm/cpufeature.h>
238 ++#include <asm/cpufeatures.h>
239 + #include <asm/cpu_device_id.h>
240 +
241 + asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
242 +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
243 +index b5eb1cca70a0..071582a3b5c0 100644
244 +--- a/arch/x86/entry/common.c
245 ++++ b/arch/x86/entry/common.c
246 +@@ -27,6 +27,7 @@
247 + #include <asm/traps.h>
248 + #include <asm/vdso.h>
249 + #include <asm/uaccess.h>
250 ++#include <asm/cpufeature.h>
251 +
252 + #define CREATE_TRACE_POINTS
253 + #include <trace/events/syscalls.h>
254 +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
255 +index d437f3871e53..49a8c9f7a379 100644
256 +--- a/arch/x86/entry/entry_32.S
257 ++++ b/arch/x86/entry/entry_32.S
258 +@@ -40,7 +40,7 @@
259 + #include <asm/processor-flags.h>
260 + #include <asm/ftrace.h>
261 + #include <asm/irq_vectors.h>
262 +-#include <asm/cpufeature.h>
263 ++#include <asm/cpufeatures.h>
264 + #include <asm/alternative-asm.h>
265 + #include <asm/asm.h>
266 + #include <asm/smap.h>
267 +diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
268 +index a7508d7e20b7..3f9d1a83891a 100644
269 +--- a/arch/x86/entry/vdso/vdso32-setup.c
270 ++++ b/arch/x86/entry/vdso/vdso32-setup.c
271 +@@ -11,7 +11,6 @@
272 + #include <linux/kernel.h>
273 + #include <linux/mm_types.h>
274 +
275 +-#include <asm/cpufeature.h>
276 + #include <asm/processor.h>
277 + #include <asm/vdso.h>
278 +
279 +diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
280 +index 3a1d9297074b..0109ac6cb79c 100644
281 +--- a/arch/x86/entry/vdso/vdso32/system_call.S
282 ++++ b/arch/x86/entry/vdso/vdso32/system_call.S
283 +@@ -3,7 +3,7 @@
284 + */
285 +
286 + #include <asm/dwarf2.h>
287 +-#include <asm/cpufeature.h>
288 ++#include <asm/cpufeatures.h>
289 + #include <asm/alternative-asm.h>
290 +
291 + /*
292 +diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
293 +index b8f69e264ac4..6b46648588d8 100644
294 +--- a/arch/x86/entry/vdso/vma.c
295 ++++ b/arch/x86/entry/vdso/vma.c
296 +@@ -20,6 +20,7 @@
297 + #include <asm/page.h>
298 + #include <asm/hpet.h>
299 + #include <asm/desc.h>
300 ++#include <asm/cpufeature.h>
301 +
302 + #if defined(CONFIG_X86_64)
303 + unsigned int __read_mostly vdso64_enabled = 1;
304 +@@ -254,7 +255,7 @@ static void vgetcpu_cpu_init(void *arg)
305 + #ifdef CONFIG_NUMA
306 + node = cpu_to_node(cpu);
307 + #endif
308 +- if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
309 ++ if (static_cpu_has(X86_FEATURE_RDTSCP))
310 + write_rdtscp_aux((node << 12) | cpu);
311 +
312 + /*
313 +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
314 +index 215ea9214215..002fcd901f07 100644
315 +--- a/arch/x86/include/asm/alternative.h
316 ++++ b/arch/x86/include/asm/alternative.h
317 +@@ -153,12 +153,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
318 + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
319 + ".popsection\n"
320 +
321 +-/*
322 +- * This must be included *after* the definition of ALTERNATIVE due to
323 +- * <asm/arch_hweight.h>
324 +- */
325 +-#include <asm/cpufeature.h>
326 +-
327 + /*
328 + * Alternative instructions for different CPU types or capabilities.
329 + *
330 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
331 +index 163769d82475..fd810a57ab1b 100644
332 +--- a/arch/x86/include/asm/apic.h
333 ++++ b/arch/x86/include/asm/apic.h
334 +@@ -6,7 +6,6 @@
335 +
336 + #include <asm/alternative.h>
337 + #include <asm/cpufeature.h>
338 +-#include <asm/processor.h>
339 + #include <asm/apicdef.h>
340 + #include <linux/atomic.h>
341 + #include <asm/fixmap.h>
342 +diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
343 +index 44f825c80ed5..e7cd63175de4 100644
344 +--- a/arch/x86/include/asm/arch_hweight.h
345 ++++ b/arch/x86/include/asm/arch_hweight.h
346 +@@ -1,6 +1,8 @@
347 + #ifndef _ASM_X86_HWEIGHT_H
348 + #define _ASM_X86_HWEIGHT_H
349 +
350 ++#include <asm/cpufeatures.h>
351 ++
352 + #ifdef CONFIG_64BIT
353 + /* popcnt %edi, %eax */
354 + #define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
355 +diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
356 +index ae5fb83e6d91..3e8674288198 100644
357 +--- a/arch/x86/include/asm/atomic.h
358 ++++ b/arch/x86/include/asm/atomic.h
359 +@@ -3,7 +3,6 @@
360 +
361 + #include <linux/compiler.h>
362 + #include <linux/types.h>
363 +-#include <asm/processor.h>
364 + #include <asm/alternative.h>
365 + #include <asm/cmpxchg.h>
366 + #include <asm/rmwcc.h>
367 +diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
368 +index a11c30b77fb5..a984111135b1 100644
369 +--- a/arch/x86/include/asm/atomic64_32.h
370 ++++ b/arch/x86/include/asm/atomic64_32.h
371 +@@ -3,7 +3,6 @@
372 +
373 + #include <linux/compiler.h>
374 + #include <linux/types.h>
375 +-#include <asm/processor.h>
376 + //#include <asm/cmpxchg.h>
377 +
378 + /* An 64bit atomic type */
379 +diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
380 +index ad19841eddfe..9733361fed6f 100644
381 +--- a/arch/x86/include/asm/cmpxchg.h
382 ++++ b/arch/x86/include/asm/cmpxchg.h
383 +@@ -2,6 +2,7 @@
384 + #define ASM_X86_CMPXCHG_H
385 +
386 + #include <linux/compiler.h>
387 ++#include <asm/cpufeatures.h>
388 + #include <asm/alternative.h> /* Provides LOCK_PREFIX */
389 +
390 + /*
391 +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
392 +index 232621c5e859..dd0089841a0f 100644
393 +--- a/arch/x86/include/asm/cpufeature.h
394 ++++ b/arch/x86/include/asm/cpufeature.h
395 +@@ -1,294 +1,35 @@
396 +-/*
397 +- * Defines x86 CPU feature bits
398 +- */
399 + #ifndef _ASM_X86_CPUFEATURE_H
400 + #define _ASM_X86_CPUFEATURE_H
401 +
402 +-#ifndef _ASM_X86_REQUIRED_FEATURES_H
403 +-#include <asm/required-features.h>
404 +-#endif
405 +-
406 +-#ifndef _ASM_X86_DISABLED_FEATURES_H
407 +-#include <asm/disabled-features.h>
408 +-#endif
409 +-
410 +-#define NCAPINTS 14 /* N 32-bit words worth of info */
411 +-#define NBUGINTS 1 /* N 32-bit bug flags */
412 +-
413 +-/*
414 +- * Note: If the comment begins with a quoted string, that string is used
415 +- * in /proc/cpuinfo instead of the macro name. If the string is "",
416 +- * this feature bit is not displayed in /proc/cpuinfo at all.
417 +- */
418 +-
419 +-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
420 +-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
421 +-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
422 +-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
423 +-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
424 +-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
425 +-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
426 +-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
427 +-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
428 +-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
429 +-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
430 +-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
431 +-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
432 +-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
433 +-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
434 +-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
435 +- /* (plus FCMOVcc, FCOMI with FPU) */
436 +-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
437 +-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
438 +-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
439 +-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
440 +-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
441 +-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
442 +-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
443 +-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
444 +-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
445 +-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
446 +-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
447 +-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
448 +-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
449 +-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
450 +-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
451 +-
452 +-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
453 +-/* Don't duplicate feature flags which are redundant with Intel! */
454 +-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
455 +-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
456 +-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
457 +-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
458 +-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
459 +-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
460 +-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
461 +-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
462 +-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
463 +-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
464 +-
465 +-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
466 +-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
467 +-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
468 +-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
469 +-
470 +-/* Other features, Linux-defined mapping, word 3 */
471 +-/* This range is used for feature bits which conflict or are synthesized */
472 +-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
473 +-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
474 +-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
475 +-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
476 +-/* cpu types for specific tunings: */
477 +-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
478 +-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
479 +-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
480 +-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
481 +-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
482 +-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
483 +-/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
484 +-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
485 +-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
486 +-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
487 +-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
488 +-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
489 +-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
490 +-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
491 +-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
492 +-/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
493 +-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
494 +-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
495 +-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
496 +-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
497 +-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
498 +-/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
499 +-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
500 +-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
501 +-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
502 +-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
503 +-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
504 +-
505 +-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
506 +-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
507 +-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
508 +-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
509 +-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
510 +-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
511 +-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
512 +-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
513 +-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
514 +-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
515 +-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
516 +-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
517 +-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
518 +-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
519 +-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
520 +-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
521 +-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
522 +-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
523 +-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
524 +-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
525 +-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
526 +-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
527 +-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
528 +-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
529 +-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
530 +-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
531 +-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
532 +-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
533 +-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
534 +-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
535 +-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
536 +-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
537 +-
538 +-/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
539 +-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
540 +-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
541 +-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
542 +-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
543 +-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
544 +-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
545 +-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
546 +-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
547 +-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
548 +-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
549 +-
550 +-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
551 +-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
552 +-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
553 +-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
554 +-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
555 +-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
556 +-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
557 +-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
558 +-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
559 +-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
560 +-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
561 +-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
562 +-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
563 +-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
564 +-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
565 +-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
566 +-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
567 +-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
568 +-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
569 +-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
570 +-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
571 +-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
572 +-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
573 +-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
574 +-#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
575 +-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
576 +-
577 +-/*
578 +- * Auxiliary flags: Linux defined - For features scattered in various
579 +- * CPUID levels like 0x6, 0xA etc, word 7
580 +- */
581 +-#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
582 +-#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
583 +-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
584 +-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
585 +-#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
586 +-#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
587 +-#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
588 +-#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
589 +-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
590 +-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
591 +-#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
592 +-#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
593 +-#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
594 +-#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
595 +-#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
596 +-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
597 +-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
598 +-
599 +-#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
600 +-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
601 +-/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
602 +-#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
603 +-
604 +-/* Virtualization flags: Linux defined, word 8 */
605 +-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
606 +-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
607 +-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
608 +-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
609 +-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
610 +-#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
611 +-#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
612 +-#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
613 +-#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
614 +-#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
615 +-#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
616 +-#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
617 +-#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
618 +-#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
619 +-#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
620 +-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
621 +-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
622 +-
623 +-
624 +-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
625 +-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
626 +-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
627 +-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
628 +-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
629 +-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
630 +-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
631 +-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
632 +-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
633 +-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
634 +-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
635 +-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
636 +-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
637 +-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
638 +-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
639 +-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
640 +-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
641 +-#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
642 +-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
643 +-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
644 +-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
645 +-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
646 +-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
647 +-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
648 +-
649 +-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
650 +-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
651 +-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
652 +-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
653 +-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
654 +-
655 +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
656 +-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
657 +-
658 +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
659 +-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
660 +-
661 +-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
662 +-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
663 +-
664 +-/*
665 +- * BUG word(s)
666 +- */
667 +-#define X86_BUG(x) (NCAPINTS*32 + (x))
668 +-
669 +-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
670 +-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
671 +-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
672 +-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
673 +-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
674 +-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
675 +-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
676 +-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
677 +-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
678 +-#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
679 +-#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
680 +-#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
681 ++#include <asm/processor.h>
682 +
683 + #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
684 +
685 + #include <asm/asm.h>
686 + #include <linux/bitops.h>
687 +
688 ++enum cpuid_leafs
689 ++{
690 ++ CPUID_1_EDX = 0,
691 ++ CPUID_8000_0001_EDX,
692 ++ CPUID_8086_0001_EDX,
693 ++ CPUID_LNX_1,
694 ++ CPUID_1_ECX,
695 ++ CPUID_C000_0001_EDX,
696 ++ CPUID_8000_0001_ECX,
697 ++ CPUID_LNX_2,
698 ++ CPUID_LNX_3,
699 ++ CPUID_7_0_EBX,
700 ++ CPUID_D_1_EAX,
701 ++ CPUID_F_0_EDX,
702 ++ CPUID_F_1_EDX,
703 ++ CPUID_8000_0008_EBX,
704 ++ CPUID_6_EAX,
705 ++ CPUID_8000_000A_EDX,
706 ++ CPUID_7_ECX,
707 ++ CPUID_8000_0007_EBX,
708 ++};
709 ++
710 + #ifdef CONFIG_X86_FEATURE_NAMES
711 + extern const char * const x86_cap_flags[NCAPINTS*32];
712 + extern const char * const x86_power_flags[32];
713 +@@ -308,29 +49,59 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
714 + #define test_cpu_cap(c, bit) \
715 + test_bit(bit, (unsigned long *)((c)->x86_capability))
716 +
717 +-#define REQUIRED_MASK_BIT_SET(bit) \
718 +- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
719 +- (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
720 +- (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
721 +- (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
722 +- (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
723 +- (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
724 +- (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
725 +- (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
726 +- (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
727 +- (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
728 +-
729 +-#define DISABLED_MASK_BIT_SET(bit) \
730 +- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
731 +- (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
732 +- (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
733 +- (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
734 +- (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
735 +- (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
736 +- (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
737 +- (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
738 +- (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
739 +- (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
740 ++/*
741 ++ * There are 32 bits/features in each mask word. The high bits
742 ++ * (selected with (bit>>5) give us the word number and the low 5
743 ++ * bits give us the bit/feature number inside the word.
744 ++ * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
745 ++ * see if it is set in the mask word.
746 ++ */
747 ++#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
748 ++ (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
749 ++
750 ++#define REQUIRED_MASK_BIT_SET(feature_bit) \
751 ++ ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \
752 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \
753 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \
754 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \
755 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \
756 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \
757 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \
758 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \
759 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \
760 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \
761 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \
762 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \
763 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \
764 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \
765 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \
766 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
767 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
768 ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
769 ++ REQUIRED_MASK_CHECK || \
770 ++ BUILD_BUG_ON_ZERO(NCAPINTS != 18))
771 ++
772 ++#define DISABLED_MASK_BIT_SET(feature_bit) \
773 ++ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
774 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \
775 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \
776 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \
777 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \
778 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \
779 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \
780 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \
781 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \
782 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \
783 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \
784 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \
785 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \
786 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \
787 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \
788 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
789 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
790 ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
791 ++ DISABLED_MASK_CHECK || \
792 ++ BUILD_BUG_ON_ZERO(NCAPINTS != 18))
793 +
794 + #define cpu_has(c, bit) \
795 + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
796 +@@ -349,8 +120,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
797 + * is not relevant.
798 + */
799 + #define cpu_feature_enabled(bit) \
800 +- (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
801 +- cpu_has(&boot_cpu_data, bit))
802 ++ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
803 +
804 + #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
805 +
806 +@@ -388,106 +158,19 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
807 + #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
808 + #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
809 + /*
810 +- * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
811 ++ * Do not add any more of those clumsy macros - use static_cpu_has() for
812 + * fast paths and boot_cpu_has() otherwise!
813 + */
814 +
815 +-#if __GNUC__ >= 4
816 +-extern void warn_pre_alternatives(void);
817 +-extern bool __static_cpu_has_safe(u16 bit);
818 +-
819 ++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
820 + /*
821 + * Static testing of CPU features. Used the same as boot_cpu_has().
822 +- * These are only valid after alternatives have run, but will statically
823 +- * patch the target code for additional performance.
824 ++ * These will statically patch the target code for additional
825 ++ * performance.
826 + */
827 +-static __always_inline __pure bool __static_cpu_has(u16 bit)
828 +-{
829 +-#ifdef CC_HAVE_ASM_GOTO
830 +-
831 +-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
832 +-
833 +- /*
834 +- * Catch too early usage of this before alternatives
835 +- * have run.
836 +- */
837 +- asm_volatile_goto("1: jmp %l[t_warn]\n"
838 +- "2:\n"
839 +- ".section .altinstructions,\"a\"\n"
840 +- " .long 1b - .\n"
841 +- " .long 0\n" /* no replacement */
842 +- " .word %P0\n" /* 1: do replace */
843 +- " .byte 2b - 1b\n" /* source len */
844 +- " .byte 0\n" /* replacement len */
845 +- " .byte 0\n" /* pad len */
846 +- ".previous\n"
847 +- /* skipping size check since replacement size = 0 */
848 +- : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
849 +-
850 +-#endif
851 +-
852 +- asm_volatile_goto("1: jmp %l[t_no]\n"
853 +- "2:\n"
854 +- ".section .altinstructions,\"a\"\n"
855 +- " .long 1b - .\n"
856 +- " .long 0\n" /* no replacement */
857 +- " .word %P0\n" /* feature bit */
858 +- " .byte 2b - 1b\n" /* source len */
859 +- " .byte 0\n" /* replacement len */
860 +- " .byte 0\n" /* pad len */
861 +- ".previous\n"
862 +- /* skipping size check since replacement size = 0 */
863 +- : : "i" (bit) : : t_no);
864 +- return true;
865 +- t_no:
866 +- return false;
867 +-
868 +-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
869 +- t_warn:
870 +- warn_pre_alternatives();
871 +- return false;
872 +-#endif
873 +-
874 +-#else /* CC_HAVE_ASM_GOTO */
875 +-
876 +- u8 flag;
877 +- /* Open-coded due to __stringify() in ALTERNATIVE() */
878 +- asm volatile("1: movb $0,%0\n"
879 +- "2:\n"
880 +- ".section .altinstructions,\"a\"\n"
881 +- " .long 1b - .\n"
882 +- " .long 3f - .\n"
883 +- " .word %P1\n" /* feature bit */
884 +- " .byte 2b - 1b\n" /* source len */
885 +- " .byte 4f - 3f\n" /* replacement len */
886 +- " .byte 0\n" /* pad len */
887 +- ".previous\n"
888 +- ".section .discard,\"aw\",@progbits\n"
889 +- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
890 +- ".previous\n"
891 +- ".section .altinstr_replacement,\"ax\"\n"
892 +- "3: movb $1,%0\n"
893 +- "4:\n"
894 +- ".previous\n"
895 +- : "=qm" (flag) : "i" (bit));
896 +- return flag;
897 +-
898 +-#endif /* CC_HAVE_ASM_GOTO */
899 +-}
900 +-
901 +-#define static_cpu_has(bit) \
902 +-( \
903 +- __builtin_constant_p(boot_cpu_has(bit)) ? \
904 +- boot_cpu_has(bit) : \
905 +- __builtin_constant_p(bit) ? \
906 +- __static_cpu_has(bit) : \
907 +- boot_cpu_has(bit) \
908 +-)
909 +-
910 +-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
911 ++static __always_inline __pure bool _static_cpu_has(u16 bit)
912 + {
913 +-#ifdef CC_HAVE_ASM_GOTO
914 +- asm_volatile_goto("1: jmp %l[t_dynamic]\n"
915 ++ asm_volatile_goto("1: jmp 6f\n"
916 + "2:\n"
917 + ".skip -(((5f-4f) - (2b-1b)) > 0) * "
918 + "((5f-4f) - (2b-1b)),0x90\n"
919 +@@ -512,66 +195,34 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
920 + " .byte 0\n" /* repl len */
921 + " .byte 0\n" /* pad len */
922 + ".previous\n"
923 +- : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
924 +- : : t_dynamic, t_no);
925 ++ ".section .altinstr_aux,\"ax\"\n"
926 ++ "6:\n"
927 ++ " testb %[bitnum],%[cap_byte]\n"
928 ++ " jnz %l[t_yes]\n"
929 ++ " jmp %l[t_no]\n"
930 ++ ".previous\n"
931 ++ : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
932 ++ [bitnum] "i" (1 << (bit & 7)),
933 ++ [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
934 ++ : : t_yes, t_no);
935 ++ t_yes:
936 + return true;
937 + t_no:
938 + return false;
939 +- t_dynamic:
940 +- return __static_cpu_has_safe(bit);
941 +-#else
942 +- u8 flag;
943 +- /* Open-coded due to __stringify() in ALTERNATIVE() */
944 +- asm volatile("1: movb $2,%0\n"
945 +- "2:\n"
946 +- ".section .altinstructions,\"a\"\n"
947 +- " .long 1b - .\n" /* src offset */
948 +- " .long 3f - .\n" /* repl offset */
949 +- " .word %P2\n" /* always replace */
950 +- " .byte 2b - 1b\n" /* source len */
951 +- " .byte 4f - 3f\n" /* replacement len */
952 +- " .byte 0\n" /* pad len */
953 +- ".previous\n"
954 +- ".section .discard,\"aw\",@progbits\n"
955 +- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
956 +- ".previous\n"
957 +- ".section .altinstr_replacement,\"ax\"\n"
958 +- "3: movb $0,%0\n"
959 +- "4:\n"
960 +- ".previous\n"
961 +- ".section .altinstructions,\"a\"\n"
962 +- " .long 1b - .\n" /* src offset */
963 +- " .long 5f - .\n" /* repl offset */
964 +- " .word %P1\n" /* feature bit */
965 +- " .byte 4b - 3b\n" /* src len */
966 +- " .byte 6f - 5f\n" /* repl len */
967 +- " .byte 0\n" /* pad len */
968 +- ".previous\n"
969 +- ".section .discard,\"aw\",@progbits\n"
970 +- " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
971 +- ".previous\n"
972 +- ".section .altinstr_replacement,\"ax\"\n"
973 +- "5: movb $1,%0\n"
974 +- "6:\n"
975 +- ".previous\n"
976 +- : "=qm" (flag)
977 +- : "i" (bit), "i" (X86_FEATURE_ALWAYS));
978 +- return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
979 +-#endif /* CC_HAVE_ASM_GOTO */
980 + }
981 +
982 +-#define static_cpu_has_safe(bit) \
983 ++#define static_cpu_has(bit) \
984 + ( \
985 + __builtin_constant_p(boot_cpu_has(bit)) ? \
986 + boot_cpu_has(bit) : \
987 +- _static_cpu_has_safe(bit) \
988 ++ _static_cpu_has(bit) \
989 + )
990 + #else
991 + /*
992 +- * gcc 3.x is too stupid to do the static test; fall back to dynamic.
993 ++ * Fall back to dynamic for gcc versions which don't support asm goto. Should be
994 ++ * a minority now anyway.
995 + */
996 + #define static_cpu_has(bit) boot_cpu_has(bit)
997 +-#define static_cpu_has_safe(bit) boot_cpu_has(bit)
998 + #endif
999 +
1000 + #define cpu_has_bug(c, bit) cpu_has(c, (bit))
1001 +@@ -579,7 +230,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
1002 + #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
1003 +
1004 + #define static_cpu_has_bug(bit) static_cpu_has((bit))
1005 +-#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
1006 + #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
1007 +
1008 + #define MAX_CPU_FEATURES (NCAPINTS * 32)
1009 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
1010 +new file mode 100644
1011 +index 000000000000..205ce70c1d6c
1012 +--- /dev/null
1013 ++++ b/arch/x86/include/asm/cpufeatures.h
1014 +@@ -0,0 +1,306 @@
1015 ++#ifndef _ASM_X86_CPUFEATURES_H
1016 ++#define _ASM_X86_CPUFEATURES_H
1017 ++
1018 ++#ifndef _ASM_X86_REQUIRED_FEATURES_H
1019 ++#include <asm/required-features.h>
1020 ++#endif
1021 ++
1022 ++#ifndef _ASM_X86_DISABLED_FEATURES_H
1023 ++#include <asm/disabled-features.h>
1024 ++#endif
1025 ++
1026 ++/*
1027 ++ * Defines x86 CPU feature bits
1028 ++ */
1029 ++#define NCAPINTS 18 /* N 32-bit words worth of info */
1030 ++#define NBUGINTS 1 /* N 32-bit bug flags */
1031 ++
1032 ++/*
1033 ++ * Note: If the comment begins with a quoted string, that string is used
1034 ++ * in /proc/cpuinfo instead of the macro name. If the string is "",
1035 ++ * this feature bit is not displayed in /proc/cpuinfo at all.
1036 ++ */
1037 ++
1038 ++/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
1039 ++#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
1040 ++#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
1041 ++#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
1042 ++#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
1043 ++#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
1044 ++#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
1045 ++#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
1046 ++#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
1047 ++#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
1048 ++#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
1049 ++#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
1050 ++#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
1051 ++#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
1052 ++#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
1053 ++#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
1054 ++ /* (plus FCMOVcc, FCOMI with FPU) */
1055 ++#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
1056 ++#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
1057 ++#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
1058 ++#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
1059 ++#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
1060 ++#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
1061 ++#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
1062 ++#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
1063 ++#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
1064 ++#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
1065 ++#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
1066 ++#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
1067 ++#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
1068 ++#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
1069 ++#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
1070 ++
1071 ++/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
1072 ++/* Don't duplicate feature flags which are redundant with Intel! */
1073 ++#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
1074 ++#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
1075 ++#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
1076 ++#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
1077 ++#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
1078 ++#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
1079 ++#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
1080 ++#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
1081 ++#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
1082 ++#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
1083 ++
1084 ++/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
1085 ++#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
1086 ++#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
1087 ++#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
1088 ++
1089 ++/* Other features, Linux-defined mapping, word 3 */
1090 ++/* This range is used for feature bits which conflict or are synthesized */
1091 ++#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
1092 ++#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
1093 ++#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
1094 ++#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
1095 ++/* cpu types for specific tunings: */
1096 ++#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
1097 ++#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
1098 ++#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
1099 ++#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
1100 ++#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
1101 ++#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
1102 ++/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
1103 ++#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
1104 ++#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
1105 ++#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
1106 ++#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
1107 ++#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
1108 ++#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
1109 ++#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
1110 ++#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
1111 ++/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
1112 ++#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
1113 ++#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
1114 ++#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
1115 ++#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
1116 ++#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
1117 ++/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
1118 ++#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
1119 ++#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
1120 ++#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
1121 ++/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
1122 ++#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
1123 ++
1124 ++/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
1125 ++#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
1126 ++#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
1127 ++#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
1128 ++#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
1129 ++#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
1130 ++#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
1131 ++#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
1132 ++#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
1133 ++#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
1134 ++#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
1135 ++#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
1136 ++#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
1137 ++#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
1138 ++#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
1139 ++#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
1140 ++#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
1141 ++#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
1142 ++#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
1143 ++#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
1144 ++#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
1145 ++#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
1146 ++#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
1147 ++#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
1148 ++#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
1149 ++#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
1150 ++#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
1151 ++#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
1152 ++#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
1153 ++#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
1154 ++#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
1155 ++#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
1156 ++
1157 ++/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
1158 ++#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
1159 ++#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
1160 ++#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
1161 ++#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
1162 ++#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
1163 ++#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
1164 ++#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
1165 ++#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
1166 ++#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
1167 ++#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
1168 ++
1169 ++/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
1170 ++#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
1171 ++#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
1172 ++#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
1173 ++#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
1174 ++#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
1175 ++#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
1176 ++#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
1177 ++#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
1178 ++#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
1179 ++#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
1180 ++#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
1181 ++#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
1182 ++#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
1183 ++#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
1184 ++#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
1185 ++#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
1186 ++#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
1187 ++#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
1188 ++#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
1189 ++#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
1190 ++#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
1191 ++#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
1192 ++#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
1193 ++#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
1194 ++#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
1195 ++
1196 ++/*
1197 ++ * Auxiliary flags: Linux defined - For features scattered in various
1198 ++ * CPUID levels like 0x6, 0xA etc, word 7.
1199 ++ *
1200 ++ * Reuse free bits when adding new feature flags!
1201 ++ */
1202 ++
1203 ++#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
1204 ++#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
1205 ++#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
1206 ++
1207 ++#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
1208 ++#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
1209 ++
1210 ++#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
1211 ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
1212 ++
1213 ++#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
1214 ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
1215 ++/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
1216 ++#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
1217 ++
1218 ++/* Virtualization flags: Linux defined, word 8 */
1219 ++#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
1220 ++#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
1221 ++#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
1222 ++#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
1223 ++#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
1224 ++
1225 ++#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
1226 ++#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
1227 ++
1228 ++
1229 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
1230 ++#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
1231 ++#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
1232 ++#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
1233 ++#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
1234 ++#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
1235 ++#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
1236 ++#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
1237 ++#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
1238 ++#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
1239 ++#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
1240 ++#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
1241 ++#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
1242 ++#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
1243 ++#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
1244 ++#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
1245 ++#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
1246 ++#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
1247 ++#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
1248 ++#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
1249 ++#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
1250 ++#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
1251 ++#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
1252 ++#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
1253 ++
1254 ++/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
1255 ++#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
1256 ++#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
1257 ++#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
1258 ++#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
1259 ++
1260 ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
1261 ++#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
1262 ++
1263 ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
1264 ++#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
1265 ++
1266 ++/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
1267 ++#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
1268 ++
1269 ++/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
1270 ++#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
1271 ++#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
1272 ++#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
1273 ++#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
1274 ++#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
1275 ++#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
1276 ++#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
1277 ++#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
1278 ++#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
1279 ++#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
1280 ++
1281 ++/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
1282 ++#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
1283 ++#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
1284 ++#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
1285 ++#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
1286 ++#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
1287 ++#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
1288 ++#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
1289 ++#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
1290 ++#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
1291 ++#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
1292 ++
1293 ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
1294 ++#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
1295 ++#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
1296 ++
1297 ++/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
1298 ++#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
1299 ++#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
1300 ++#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
1301 ++
1302 ++/*
1303 ++ * BUG word(s)
1304 ++ */
1305 ++#define X86_BUG(x) (NCAPINTS*32 + (x))
1306 ++
1307 ++#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
1308 ++#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
1309 ++#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
1310 ++#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
1311 ++#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
1312 ++#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
1313 ++#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
1314 ++#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
1315 ++#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
1316 ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
1317 ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
1318 ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
1319 ++
1320 ++#endif /* _ASM_X86_CPUFEATURES_H */
1321 +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
1322 +index 8b17c2ad1048..21c5ac15657b 100644
1323 +--- a/arch/x86/include/asm/disabled-features.h
1324 ++++ b/arch/x86/include/asm/disabled-features.h
1325 +@@ -30,6 +30,14 @@
1326 + # define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
1327 + #endif /* CONFIG_X86_64 */
1328 +
1329 ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1330 ++# define DISABLE_PKU 0
1331 ++# define DISABLE_OSPKE 0
1332 ++#else
1333 ++# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
1334 ++# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
1335 ++#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
1336 ++
1337 + /*
1338 + * Make sure to add features to the correct mask
1339 + */
1340 +@@ -43,5 +51,14 @@
1341 + #define DISABLED_MASK7 0
1342 + #define DISABLED_MASK8 0
1343 + #define DISABLED_MASK9 (DISABLE_MPX)
1344 ++#define DISABLED_MASK10 0
1345 ++#define DISABLED_MASK11 0
1346 ++#define DISABLED_MASK12 0
1347 ++#define DISABLED_MASK13 0
1348 ++#define DISABLED_MASK14 0
1349 ++#define DISABLED_MASK15 0
1350 ++#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
1351 ++#define DISABLED_MASK17 0
1352 ++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
1353 +
1354 + #endif /* _ASM_X86_DISABLED_FEATURES_H */
1355 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
1356 +index 146d838e6ee7..ec2aedb6f92a 100644
1357 +--- a/arch/x86/include/asm/fpu/internal.h
1358 ++++ b/arch/x86/include/asm/fpu/internal.h
1359 +@@ -17,6 +17,7 @@
1360 + #include <asm/user.h>
1361 + #include <asm/fpu/api.h>
1362 + #include <asm/fpu/xstate.h>
1363 ++#include <asm/cpufeature.h>
1364 +
1365 + /*
1366 + * High level FPU state handling functions:
1367 +@@ -63,17 +64,17 @@ static __always_inline __pure bool use_eager_fpu(void)
1368 +
1369 + static __always_inline __pure bool use_xsaveopt(void)
1370 + {
1371 +- return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
1372 ++ return static_cpu_has(X86_FEATURE_XSAVEOPT);
1373 + }
1374 +
1375 + static __always_inline __pure bool use_xsave(void)
1376 + {
1377 +- return static_cpu_has_safe(X86_FEATURE_XSAVE);
1378 ++ return static_cpu_has(X86_FEATURE_XSAVE);
1379 + }
1380 +
1381 + static __always_inline __pure bool use_fxsr(void)
1382 + {
1383 +- return static_cpu_has_safe(X86_FEATURE_FXSR);
1384 ++ return static_cpu_has(X86_FEATURE_FXSR);
1385 + }
1386 +
1387 + /*
1388 +@@ -225,18 +226,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
1389 + #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
1390 + #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
1391 +
1392 +-/* xstate instruction fault handler: */
1393 +-#define xstate_fault(__err) \
1394 +- \
1395 +- ".section .fixup,\"ax\"\n" \
1396 +- \
1397 +- "3: movl $-2,%[_err]\n" \
1398 +- " jmp 2b\n" \
1399 +- \
1400 +- ".previous\n" \
1401 +- \
1402 +- _ASM_EXTABLE(1b, 3b) \
1403 +- : [_err] "=r" (__err)
1404 ++#define XSTATE_OP(op, st, lmask, hmask, err) \
1405 ++ asm volatile("1:" op "\n\t" \
1406 ++ "xor %[err], %[err]\n" \
1407 ++ "2:\n\t" \
1408 ++ ".pushsection .fixup,\"ax\"\n\t" \
1409 ++ "3: movl $-2,%[err]\n\t" \
1410 ++ "jmp 2b\n\t" \
1411 ++ ".popsection\n\t" \
1412 ++ _ASM_EXTABLE(1b, 3b) \
1413 ++ : [err] "=r" (err) \
1414 ++ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
1415 ++ : "memory")
1416 ++
1417 ++/*
1418 ++ * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
1419 ++ * format and supervisor states in addition to modified optimization in
1420 ++ * XSAVEOPT.
1421 ++ *
1422 ++ * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
1423 ++ * supports modified optimization which is not supported by XSAVE.
1424 ++ *
1425 ++ * We use XSAVE as a fallback.
1426 ++ *
1427 ++ * The 661 label is defined in the ALTERNATIVE* macros as the address of the
1428 ++ * original instruction which gets replaced. We need to use it here as the
1429 ++ * address of the instruction where we might get an exception at.
1430 ++ */
1431 ++#define XSTATE_XSAVE(st, lmask, hmask, err) \
1432 ++ asm volatile(ALTERNATIVE_2(XSAVE, \
1433 ++ XSAVEOPT, X86_FEATURE_XSAVEOPT, \
1434 ++ XSAVES, X86_FEATURE_XSAVES) \
1435 ++ "\n" \
1436 ++ "xor %[err], %[err]\n" \
1437 ++ "3:\n" \
1438 ++ ".pushsection .fixup,\"ax\"\n" \
1439 ++ "4: movl $-2, %[err]\n" \
1440 ++ "jmp 3b\n" \
1441 ++ ".popsection\n" \
1442 ++ _ASM_EXTABLE(661b, 4b) \
1443 ++ : [err] "=r" (err) \
1444 ++ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
1445 ++ : "memory")
1446 ++
1447 ++/*
1448 ++ * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
1449 ++ * XSAVE area format.
1450 ++ */
1451 ++#define XSTATE_XRESTORE(st, lmask, hmask, err) \
1452 ++ asm volatile(ALTERNATIVE(XRSTOR, \
1453 ++ XRSTORS, X86_FEATURE_XSAVES) \
1454 ++ "\n" \
1455 ++ "xor %[err], %[err]\n" \
1456 ++ "3:\n" \
1457 ++ ".pushsection .fixup,\"ax\"\n" \
1458 ++ "4: movl $-2, %[err]\n" \
1459 ++ "jmp 3b\n" \
1460 ++ ".popsection\n" \
1461 ++ _ASM_EXTABLE(661b, 4b) \
1462 ++ : [err] "=r" (err) \
1463 ++ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
1464 ++ : "memory")
1465 +
1466 + /*
1467 + * This function is called only during boot time when x86 caps are not set
1468 +@@ -247,22 +297,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
1469 + u64 mask = -1;
1470 + u32 lmask = mask;
1471 + u32 hmask = mask >> 32;
1472 +- int err = 0;
1473 ++ int err;
1474 +
1475 + WARN_ON(system_state != SYSTEM_BOOTING);
1476 +
1477 +- if (boot_cpu_has(X86_FEATURE_XSAVES))
1478 +- asm volatile("1:"XSAVES"\n\t"
1479 +- "2:\n\t"
1480 +- xstate_fault(err)
1481 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1482 +- : "memory");
1483 ++ if (static_cpu_has(X86_FEATURE_XSAVES))
1484 ++ XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
1485 + else
1486 +- asm volatile("1:"XSAVE"\n\t"
1487 +- "2:\n\t"
1488 +- xstate_fault(err)
1489 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1490 +- : "memory");
1491 ++ XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
1492 +
1493 + /* We should never fault when copying to a kernel buffer: */
1494 + WARN_ON_FPU(err);
1495 +@@ -277,22 +319,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
1496 + u64 mask = -1;
1497 + u32 lmask = mask;
1498 + u32 hmask = mask >> 32;
1499 +- int err = 0;
1500 ++ int err;
1501 +
1502 + WARN_ON(system_state != SYSTEM_BOOTING);
1503 +
1504 +- if (boot_cpu_has(X86_FEATURE_XSAVES))
1505 +- asm volatile("1:"XRSTORS"\n\t"
1506 +- "2:\n\t"
1507 +- xstate_fault(err)
1508 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1509 +- : "memory");
1510 ++ if (static_cpu_has(X86_FEATURE_XSAVES))
1511 ++ XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
1512 + else
1513 +- asm volatile("1:"XRSTOR"\n\t"
1514 +- "2:\n\t"
1515 +- xstate_fault(err)
1516 +- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
1517 +- : "memory");
1518 ++ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
1519 +
1520 + /* We should never fault when copying from a kernel buffer: */
1521 + WARN_ON_FPU(err);
1522 +@@ -306,33 +340,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
1523 + u64 mask = -1;
1524 + u32 lmask = mask;
1525 + u32 hmask = mask >> 32;
1526 +- int err = 0;
1527 ++ int err;
1528 +
1529 + WARN_ON(!alternatives_patched);
1530 +
1531 +- /*
1532 +- * If xsaves is enabled, xsaves replaces xsaveopt because
1533 +- * it supports compact format and supervisor states in addition to
1534 +- * modified optimization in xsaveopt.
1535 +- *
1536 +- * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
1537 +- * because xsaveopt supports modified optimization which is not
1538 +- * supported by xsave.
1539 +- *
1540 +- * If none of xsaves and xsaveopt is enabled, use xsave.
1541 +- */
1542 +- alternative_input_2(
1543 +- "1:"XSAVE,
1544 +- XSAVEOPT,
1545 +- X86_FEATURE_XSAVEOPT,
1546 +- XSAVES,
1547 +- X86_FEATURE_XSAVES,
1548 +- [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
1549 +- "memory");
1550 +- asm volatile("2:\n\t"
1551 +- xstate_fault(err)
1552 +- : "0" (err)
1553 +- : "memory");
1554 ++ XSTATE_XSAVE(xstate, lmask, hmask, err);
1555 +
1556 + /* We should never fault when copying to a kernel buffer: */
1557 + WARN_ON_FPU(err);
1558 +@@ -345,23 +357,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
1559 + {
1560 + u32 lmask = mask;
1561 + u32 hmask = mask >> 32;
1562 +- int err = 0;
1563 ++ int err;
1564 +
1565 +- /*
1566 +- * Use xrstors to restore context if it is enabled. xrstors supports
1567 +- * compacted format of xsave area which is not supported by xrstor.
1568 +- */
1569 +- alternative_input(
1570 +- "1: " XRSTOR,
1571 +- XRSTORS,
1572 +- X86_FEATURE_XSAVES,
1573 +- "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
1574 +- : "memory");
1575 +-
1576 +- asm volatile("2:\n"
1577 +- xstate_fault(err)
1578 +- : "0" (err)
1579 +- : "memory");
1580 ++ XSTATE_XRESTORE(xstate, lmask, hmask, err);
1581 +
1582 + /* We should never fault when copying from a kernel buffer: */
1583 + WARN_ON_FPU(err);
1584 +@@ -389,12 +387,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
1585 + if (unlikely(err))
1586 + return -EFAULT;
1587 +
1588 +- __asm__ __volatile__(ASM_STAC "\n"
1589 +- "1:"XSAVE"\n"
1590 +- "2: " ASM_CLAC "\n"
1591 +- xstate_fault(err)
1592 +- : "D" (buf), "a" (-1), "d" (-1), "0" (err)
1593 +- : "memory");
1594 ++ stac();
1595 ++ XSTATE_OP(XSAVE, buf, -1, -1, err);
1596 ++ clac();
1597 ++
1598 + return err;
1599 + }
1600 +
1601 +@@ -406,14 +402,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
1602 + struct xregs_state *xstate = ((__force struct xregs_state *)buf);
1603 + u32 lmask = mask;
1604 + u32 hmask = mask >> 32;
1605 +- int err = 0;
1606 +-
1607 +- __asm__ __volatile__(ASM_STAC "\n"
1608 +- "1:"XRSTOR"\n"
1609 +- "2: " ASM_CLAC "\n"
1610 +- xstate_fault(err)
1611 +- : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
1612 +- : "memory"); /* memory required? */
1613 ++ int err;
1614 ++
1615 ++ stac();
1616 ++ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
1617 ++ clac();
1618 ++
1619 + return err;
1620 + }
1621 +
1622 +@@ -467,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
1623 + * pending. Clear the x87 state here by setting it to fixed values.
1624 + * "m" is a random variable that should be in L1.
1625 + */
1626 +- if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
1627 ++ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
1628 + asm volatile(
1629 + "fnclex\n\t"
1630 + "emms\n\t"
1631 +diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
1632 +index 78162f8e248b..d0afb05c84fc 100644
1633 +--- a/arch/x86/include/asm/irq_work.h
1634 ++++ b/arch/x86/include/asm/irq_work.h
1635 +@@ -1,7 +1,7 @@
1636 + #ifndef _ASM_IRQ_WORK_H
1637 + #define _ASM_IRQ_WORK_H
1638 +
1639 +-#include <asm/processor.h>
1640 ++#include <asm/cpufeature.h>
1641 +
1642 + static inline bool arch_irq_work_has_interrupt(void)
1643 + {
1644 +diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
1645 +index c70689b5e5aa..0deeb2d26df7 100644
1646 +--- a/arch/x86/include/asm/mwait.h
1647 ++++ b/arch/x86/include/asm/mwait.h
1648 +@@ -3,6 +3,8 @@
1649 +
1650 + #include <linux/sched.h>
1651 +
1652 ++#include <asm/cpufeature.h>
1653 ++
1654 + #define MWAIT_SUBSTATE_MASK 0xf
1655 + #define MWAIT_CSTATE_MASK 0xf
1656 + #define MWAIT_SUBSTATE_SIZE 4
1657 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
1658 +index 249f1c769f21..8b910416243c 100644
1659 +--- a/arch/x86/include/asm/nospec-branch.h
1660 ++++ b/arch/x86/include/asm/nospec-branch.h
1661 +@@ -5,7 +5,7 @@
1662 +
1663 + #include <asm/alternative.h>
1664 + #include <asm/alternative-asm.h>
1665 +-#include <asm/cpufeature.h>
1666 ++#include <asm/cpufeatures.h>
1667 +
1668 + /*
1669 + * Fill the CPU return stack buffer.
1670 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1671 +index 9e77cea2a8ef..8e415cf65457 100644
1672 +--- a/arch/x86/include/asm/processor.h
1673 ++++ b/arch/x86/include/asm/processor.h
1674 +@@ -13,7 +13,7 @@ struct vm86;
1675 + #include <asm/types.h>
1676 + #include <uapi/asm/sigcontext.h>
1677 + #include <asm/current.h>
1678 +-#include <asm/cpufeature.h>
1679 ++#include <asm/cpufeatures.h>
1680 + #include <asm/page.h>
1681 + #include <asm/pgtable_types.h>
1682 + #include <asm/percpu.h>
1683 +@@ -24,7 +24,6 @@ struct vm86;
1684 + #include <asm/fpu/types.h>
1685 +
1686 + #include <linux/personality.h>
1687 +-#include <linux/cpumask.h>
1688 + #include <linux/cache.h>
1689 + #include <linux/threads.h>
1690 + #include <linux/math64.h>
1691 +diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
1692 +index 5c6e4fb370f5..fac9a5c0abe9 100644
1693 +--- a/arch/x86/include/asm/required-features.h
1694 ++++ b/arch/x86/include/asm/required-features.h
1695 +@@ -92,5 +92,14 @@
1696 + #define REQUIRED_MASK7 0
1697 + #define REQUIRED_MASK8 0
1698 + #define REQUIRED_MASK9 0
1699 ++#define REQUIRED_MASK10 0
1700 ++#define REQUIRED_MASK11 0
1701 ++#define REQUIRED_MASK12 0
1702 ++#define REQUIRED_MASK13 0
1703 ++#define REQUIRED_MASK14 0
1704 ++#define REQUIRED_MASK15 0
1705 ++#define REQUIRED_MASK16 0
1706 ++#define REQUIRED_MASK17 0
1707 ++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
1708 +
1709 + #endif /* _ASM_X86_REQUIRED_FEATURES_H */
1710 +diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
1711 +index ba665ebd17bb..db333300bd4b 100644
1712 +--- a/arch/x86/include/asm/smap.h
1713 ++++ b/arch/x86/include/asm/smap.h
1714 +@@ -15,7 +15,7 @@
1715 +
1716 + #include <linux/stringify.h>
1717 + #include <asm/nops.h>
1718 +-#include <asm/cpufeature.h>
1719 ++#include <asm/cpufeatures.h>
1720 +
1721 + /* "Raw" instruction opcodes */
1722 + #define __ASM_CLAC .byte 0x0f,0x01,0xca
1723 +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
1724 +index a438c5598a90..04d6eef5f8a5 100644
1725 +--- a/arch/x86/include/asm/smp.h
1726 ++++ b/arch/x86/include/asm/smp.h
1727 +@@ -16,7 +16,6 @@
1728 + #endif
1729 + #include <asm/thread_info.h>
1730 + #include <asm/cpumask.h>
1731 +-#include <asm/cpufeature.h>
1732 +
1733 + extern int smp_num_siblings;
1734 + extern unsigned int num_processors;
1735 +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
1736 +index 9b028204685d..18c9aaa8c043 100644
1737 +--- a/arch/x86/include/asm/thread_info.h
1738 ++++ b/arch/x86/include/asm/thread_info.h
1739 +@@ -49,7 +49,7 @@
1740 + */
1741 + #ifndef __ASSEMBLY__
1742 + struct task_struct;
1743 +-#include <asm/processor.h>
1744 ++#include <asm/cpufeature.h>
1745 + #include <linux/atomic.h>
1746 +
1747 + struct thread_info {
1748 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
1749 +index a691b66cc40a..e2a89d2577fb 100644
1750 +--- a/arch/x86/include/asm/tlbflush.h
1751 ++++ b/arch/x86/include/asm/tlbflush.h
1752 +@@ -5,6 +5,7 @@
1753 + #include <linux/sched.h>
1754 +
1755 + #include <asm/processor.h>
1756 ++#include <asm/cpufeature.h>
1757 + #include <asm/special_insns.h>
1758 + #include <asm/smp.h>
1759 +
1760 +diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
1761 +index f2f9b39b274a..d83a55b95a48 100644
1762 +--- a/arch/x86/include/asm/uaccess_64.h
1763 ++++ b/arch/x86/include/asm/uaccess_64.h
1764 +@@ -8,7 +8,7 @@
1765 + #include <linux/errno.h>
1766 + #include <linux/lockdep.h>
1767 + #include <asm/alternative.h>
1768 +-#include <asm/cpufeature.h>
1769 ++#include <asm/cpufeatures.h>
1770 + #include <asm/page.h>
1771 +
1772 + /*
1773 +diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
1774 +index 2bd2292a316d..bac0805ea1d9 100644
1775 +--- a/arch/x86/kernel/apic/apic_numachip.c
1776 ++++ b/arch/x86/kernel/apic/apic_numachip.c
1777 +@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
1778 + unsigned long value;
1779 + unsigned int id = (x >> 24) & 0xff;
1780 +
1781 +- if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
1782 ++ if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
1783 + rdmsrl(MSR_FAM10H_NODE_ID, value);
1784 + id |= (value << 2) & 0xff00;
1785 + }
1786 +@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
1787 + this_cpu_write(cpu_llc_id, node);
1788 +
1789 + /* Account for nodes per socket in multi-core-module processors */
1790 +- if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
1791 ++ if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
1792 + rdmsrl(MSR_FAM10H_NODE_ID, val);
1793 + nodes = ((val >> 3) & 7) + 1;
1794 + }
1795 +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
1796 +index 8f184615053b..924b65794abd 100644
1797 +--- a/arch/x86/kernel/cpu/Makefile
1798 ++++ b/arch/x86/kernel/cpu/Makefile
1799 +@@ -62,7 +62,7 @@ ifdef CONFIG_X86_FEATURE_NAMES
1800 + quiet_cmd_mkcapflags = MKCAP $@
1801 + cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
1802 +
1803 +-cpufeature = $(src)/../../include/asm/cpufeature.h
1804 ++cpufeature = $(src)/../../include/asm/cpufeatures.h
1805 +
1806 + targets += capflags.c
1807 + $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
1808 +diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
1809 +index d8fba5c15fbd..6608c03c2126 100644
1810 +--- a/arch/x86/kernel/cpu/centaur.c
1811 ++++ b/arch/x86/kernel/cpu/centaur.c
1812 +@@ -1,7 +1,7 @@
1813 + #include <linux/bitops.h>
1814 + #include <linux/kernel.h>
1815 +
1816 +-#include <asm/processor.h>
1817 ++#include <asm/cpufeature.h>
1818 + #include <asm/e820.h>
1819 + #include <asm/mtrr.h>
1820 + #include <asm/msr.h>
1821 +@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c)
1822 + /* store Centaur Extended Feature Flags as
1823 + * word 5 of the CPU capability bit array
1824 + */
1825 +- c->x86_capability[5] = cpuid_edx(0xC0000001);
1826 ++ c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
1827 + }
1828 + #ifdef CONFIG_X86_32
1829 + /* Cyrix III family needs CX8 & PGE explicitly enabled. */
1830 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1831 +index 0498ad3702f5..814276d0eed1 100644
1832 +--- a/arch/x86/kernel/cpu/common.c
1833 ++++ b/arch/x86/kernel/cpu/common.c
1834 +@@ -676,50 +676,48 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
1835 +
1836 + void get_cpu_cap(struct cpuinfo_x86 *c)
1837 + {
1838 +- u32 tfms, xlvl;
1839 +- u32 ebx;
1840 ++ u32 eax, ebx, ecx, edx;
1841 +
1842 + /* Intel-defined flags: level 0x00000001 */
1843 + if (c->cpuid_level >= 0x00000001) {
1844 +- u32 capability, excap;
1845 ++ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1846 +
1847 +- cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
1848 +- c->x86_capability[0] = capability;
1849 +- c->x86_capability[4] = excap;
1850 ++ c->x86_capability[CPUID_1_ECX] = ecx;
1851 ++ c->x86_capability[CPUID_1_EDX] = edx;
1852 + }
1853 +
1854 + /* Additional Intel-defined flags: level 0x00000007 */
1855 + if (c->cpuid_level >= 0x00000007) {
1856 +- u32 eax, ebx, ecx, edx;
1857 +-
1858 + cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1859 +
1860 +- c->x86_capability[9] = ebx;
1861 ++ c->x86_capability[CPUID_7_0_EBX] = ebx;
1862 ++
1863 ++ c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1864 ++ c->x86_capability[CPUID_7_ECX] = ecx;
1865 + }
1866 +
1867 + /* Extended state features: level 0x0000000d */
1868 + if (c->cpuid_level >= 0x0000000d) {
1869 +- u32 eax, ebx, ecx, edx;
1870 +-
1871 + cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1872 +
1873 +- c->x86_capability[10] = eax;
1874 ++ c->x86_capability[CPUID_D_1_EAX] = eax;
1875 + }
1876 +
1877 + /* Additional Intel-defined flags: level 0x0000000F */
1878 + if (c->cpuid_level >= 0x0000000F) {
1879 +- u32 eax, ebx, ecx, edx;
1880 +
1881 + /* QoS sub-leaf, EAX=0Fh, ECX=0 */
1882 + cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
1883 +- c->x86_capability[11] = edx;
1884 ++ c->x86_capability[CPUID_F_0_EDX] = edx;
1885 ++
1886 + if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
1887 + /* will be overridden if occupancy monitoring exists */
1888 + c->x86_cache_max_rmid = ebx;
1889 +
1890 + /* QoS sub-leaf, EAX=0Fh, ECX=1 */
1891 + cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
1892 +- c->x86_capability[12] = edx;
1893 ++ c->x86_capability[CPUID_F_1_EDX] = edx;
1894 ++
1895 + if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
1896 + c->x86_cache_max_rmid = ecx;
1897 + c->x86_cache_occ_scale = ebx;
1898 +@@ -731,30 +729,39 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
1899 + }
1900 +
1901 + /* AMD-defined flags: level 0x80000001 */
1902 +- xlvl = cpuid_eax(0x80000000);
1903 +- c->extended_cpuid_level = xlvl;
1904 ++ eax = cpuid_eax(0x80000000);
1905 ++ c->extended_cpuid_level = eax;
1906 ++
1907 ++ if ((eax & 0xffff0000) == 0x80000000) {
1908 ++ if (eax >= 0x80000001) {
1909 ++ cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1910 +
1911 +- if ((xlvl & 0xffff0000) == 0x80000000) {
1912 +- if (xlvl >= 0x80000001) {
1913 +- c->x86_capability[1] = cpuid_edx(0x80000001);
1914 +- c->x86_capability[6] = cpuid_ecx(0x80000001);
1915 ++ c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1916 ++ c->x86_capability[CPUID_8000_0001_EDX] = edx;
1917 + }
1918 + }
1919 +
1920 ++ if (c->extended_cpuid_level >= 0x80000007) {
1921 ++ cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1922 ++
1923 ++ c->x86_capability[CPUID_8000_0007_EBX] = ebx;
1924 ++ c->x86_power = edx;
1925 ++ }
1926 ++
1927 + if (c->extended_cpuid_level >= 0x80000008) {
1928 +- u32 eax = cpuid_eax(0x80000008);
1929 ++ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1930 +
1931 + c->x86_virt_bits = (eax >> 8) & 0xff;
1932 + c->x86_phys_bits = eax & 0xff;
1933 +- c->x86_capability[13] = cpuid_ebx(0x80000008);
1934 ++ c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1935 + }
1936 + #ifdef CONFIG_X86_32
1937 + else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
1938 + c->x86_phys_bits = 36;
1939 + #endif
1940 +
1941 +- if (c->extended_cpuid_level >= 0x80000007)
1942 +- c->x86_power = cpuid_edx(0x80000007);
1943 ++ if (c->extended_cpuid_level >= 0x8000000a)
1944 ++ c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1945 +
1946 + init_scattered_cpuid_features(c);
1947 + }
1948 +@@ -1574,20 +1581,6 @@ void cpu_init(void)
1949 + }
1950 + #endif
1951 +
1952 +-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
1953 +-void warn_pre_alternatives(void)
1954 +-{
1955 +- WARN(1, "You're using static_cpu_has before alternatives have run!\n");
1956 +-}
1957 +-EXPORT_SYMBOL_GPL(warn_pre_alternatives);
1958 +-#endif
1959 +-
1960 +-inline bool __static_cpu_has_safe(u16 bit)
1961 +-{
1962 +- return boot_cpu_has(bit);
1963 +-}
1964 +-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
1965 +-
1966 + static void bsp_resume(void)
1967 + {
1968 + if (this_cpu->c_bsp_resume)
1969 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
1970 +index aaf152e79637..15e47c1cd412 100644
1971 +--- a/arch/x86/kernel/cpu/cyrix.c
1972 ++++ b/arch/x86/kernel/cpu/cyrix.c
1973 +@@ -8,6 +8,7 @@
1974 + #include <linux/timer.h>
1975 + #include <asm/pci-direct.h>
1976 + #include <asm/tsc.h>
1977 ++#include <asm/cpufeature.h>
1978 +
1979 + #include "cpu.h"
1980 +
1981 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1982 +index 565648bc1a0a..9299e3bdfad6 100644
1983 +--- a/arch/x86/kernel/cpu/intel.c
1984 ++++ b/arch/x86/kernel/cpu/intel.c
1985 +@@ -8,7 +8,7 @@
1986 + #include <linux/module.h>
1987 + #include <linux/uaccess.h>
1988 +
1989 +-#include <asm/processor.h>
1990 ++#include <asm/cpufeature.h>
1991 + #include <asm/pgtable.h>
1992 + #include <asm/msr.h>
1993 + #include <asm/bugs.h>
1994 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
1995 +index 3fa72317ad78..3557b3ceab14 100644
1996 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
1997 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
1998 +@@ -14,7 +14,7 @@
1999 + #include <linux/sysfs.h>
2000 + #include <linux/pci.h>
2001 +
2002 +-#include <asm/processor.h>
2003 ++#include <asm/cpufeature.h>
2004 + #include <asm/amd_nb.h>
2005 + #include <asm/smp.h>
2006 +
2007 +diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
2008 +index afa9f0d487ea..fbb5e90557a5 100644
2009 +--- a/arch/x86/kernel/cpu/match.c
2010 ++++ b/arch/x86/kernel/cpu/match.c
2011 +@@ -1,5 +1,5 @@
2012 + #include <asm/cpu_device_id.h>
2013 +-#include <asm/processor.h>
2014 ++#include <asm/cpufeature.h>
2015 + #include <linux/cpu.h>
2016 + #include <linux/module.h>
2017 + #include <linux/slab.h>
2018 +diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
2019 +index 3f20710a5b23..6988c74409a8 100644
2020 +--- a/arch/x86/kernel/cpu/mkcapflags.sh
2021 ++++ b/arch/x86/kernel/cpu/mkcapflags.sh
2022 +@@ -1,6 +1,6 @@
2023 + #!/bin/sh
2024 + #
2025 +-# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h
2026 ++# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
2027 + #
2028 +
2029 + IN=$1
2030 +@@ -49,8 +49,8 @@ dump_array()
2031 + trap 'rm "$OUT"' EXIT
2032 +
2033 + (
2034 +- echo "#ifndef _ASM_X86_CPUFEATURE_H"
2035 +- echo "#include <asm/cpufeature.h>"
2036 ++ echo "#ifndef _ASM_X86_CPUFEATURES_H"
2037 ++ echo "#include <asm/cpufeatures.h>"
2038 + echo "#endif"
2039 + echo ""
2040 +
2041 +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
2042 +index f924f41af89a..49bd700d9b7f 100644
2043 +--- a/arch/x86/kernel/cpu/mtrr/main.c
2044 ++++ b/arch/x86/kernel/cpu/mtrr/main.c
2045 +@@ -47,7 +47,7 @@
2046 + #include <linux/smp.h>
2047 + #include <linux/syscore_ops.h>
2048 +
2049 +-#include <asm/processor.h>
2050 ++#include <asm/cpufeature.h>
2051 + #include <asm/e820.h>
2052 + #include <asm/mtrr.h>
2053 + #include <asm/msr.h>
2054 +diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
2055 +index 608fb26c7254..8cb57df9398d 100644
2056 +--- a/arch/x86/kernel/cpu/scattered.c
2057 ++++ b/arch/x86/kernel/cpu/scattered.c
2058 +@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
2059 + const struct cpuid_bit *cb;
2060 +
2061 + static const struct cpuid_bit cpuid_bits[] = {
2062 +- { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
2063 +- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
2064 +- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
2065 +- { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
2066 +- { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
2067 +- { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
2068 +- { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
2069 +- { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
2070 +- { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
2071 +- { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
2072 + { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
2073 + { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
2074 + { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
2075 + { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
2076 + { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
2077 + { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
2078 +- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
2079 +- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
2080 +- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
2081 +- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
2082 +- { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
2083 +- { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
2084 +- { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
2085 +- { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
2086 +- { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
2087 +- { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
2088 + { 0, 0, 0, 0, 0 }
2089 + };
2090 +
2091 +diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
2092 +index 3fa0e5ad86b4..a19a663282b5 100644
2093 +--- a/arch/x86/kernel/cpu/transmeta.c
2094 ++++ b/arch/x86/kernel/cpu/transmeta.c
2095 +@@ -1,6 +1,6 @@
2096 + #include <linux/kernel.h>
2097 + #include <linux/mm.h>
2098 +-#include <asm/processor.h>
2099 ++#include <asm/cpufeature.h>
2100 + #include <asm/msr.h>
2101 + #include "cpu.h"
2102 +
2103 +@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
2104 + xlvl = cpuid_eax(0x80860000);
2105 + if ((xlvl & 0xffff0000) == 0x80860000) {
2106 + if (xlvl >= 0x80860001)
2107 +- c->x86_capability[2] = cpuid_edx(0x80860001);
2108 ++ c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
2109 + }
2110 + }
2111 +
2112 +@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
2113 + /* Unhide possibly hidden capability flags */
2114 + rdmsr(0x80860004, cap_mask, uk);
2115 + wrmsr(0x80860004, ~0, uk);
2116 +- c->x86_capability[0] = cpuid_edx(0x00000001);
2117 ++ c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001);
2118 + wrmsr(0x80860004, cap_mask, uk);
2119 +
2120 + /* All Transmeta CPUs have a constant TSC */
2121 +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
2122 +index 52a2526c3fbe..19bc19d5e174 100644
2123 +--- a/arch/x86/kernel/e820.c
2124 ++++ b/arch/x86/kernel/e820.c
2125 +@@ -24,6 +24,7 @@
2126 + #include <asm/e820.h>
2127 + #include <asm/proto.h>
2128 + #include <asm/setup.h>
2129 ++#include <asm/cpufeature.h>
2130 +
2131 + /*
2132 + * The e820 map is the map that gets modified e.g. with command line parameters
2133 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
2134 +index 70284d38fdc2..1c0b49fd6365 100644
2135 +--- a/arch/x86/kernel/head_32.S
2136 ++++ b/arch/x86/kernel/head_32.S
2137 +@@ -19,7 +19,7 @@
2138 + #include <asm/setup.h>
2139 + #include <asm/processor-flags.h>
2140 + #include <asm/msr-index.h>
2141 +-#include <asm/cpufeature.h>
2142 ++#include <asm/cpufeatures.h>
2143 + #include <asm/percpu.h>
2144 + #include <asm/nops.h>
2145 + #include <asm/bootparam.h>
2146 +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
2147 +index 4034e905741a..734ba1d0f686 100644
2148 +--- a/arch/x86/kernel/head_64.S
2149 ++++ b/arch/x86/kernel/head_64.S
2150 +@@ -76,9 +76,7 @@ startup_64:
2151 + subq $_text - __START_KERNEL_map, %rbp
2152 +
2153 + /* Is the address not 2M aligned? */
2154 +- movq %rbp, %rax
2155 +- andl $~PMD_PAGE_MASK, %eax
2156 +- testl %eax, %eax
2157 ++ testl $~PMD_PAGE_MASK, %ebp
2158 + jnz bad_address
2159 +
2160 + /*
2161 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
2162 +index f48eb8eeefe2..3fdc1e53aaac 100644
2163 +--- a/arch/x86/kernel/hpet.c
2164 ++++ b/arch/x86/kernel/hpet.c
2165 +@@ -12,6 +12,7 @@
2166 + #include <linux/pm.h>
2167 + #include <linux/io.h>
2168 +
2169 ++#include <asm/cpufeature.h>
2170 + #include <asm/irqdomain.h>
2171 + #include <asm/fixmap.h>
2172 + #include <asm/hpet.h>
2173 +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
2174 +index 113e70784854..f95ac5d435aa 100644
2175 +--- a/arch/x86/kernel/msr.c
2176 ++++ b/arch/x86/kernel/msr.c
2177 +@@ -40,7 +40,7 @@
2178 + #include <linux/uaccess.h>
2179 + #include <linux/gfp.h>
2180 +
2181 +-#include <asm/processor.h>
2182 ++#include <asm/cpufeature.h>
2183 + #include <asm/msr.h>
2184 +
2185 + static struct class *msr_class;
2186 +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
2187 +index c6aace2bbe08..b8105289c60b 100644
2188 +--- a/arch/x86/kernel/uprobes.c
2189 ++++ b/arch/x86/kernel/uprobes.c
2190 +@@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
2191 + insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
2192 + /* has the side-effect of processing the entire instruction */
2193 + insn_get_length(insn);
2194 +- if (WARN_ON_ONCE(!insn_complete(insn)))
2195 ++ if (!insn_complete(insn))
2196 + return -ENOEXEC;
2197 +
2198 + if (is_prefix_bad(insn))
2199 +diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
2200 +index 4cf401f581e7..b7c9db5deebe 100644
2201 +--- a/arch/x86/kernel/verify_cpu.S
2202 ++++ b/arch/x86/kernel/verify_cpu.S
2203 +@@ -30,7 +30,7 @@
2204 + * appropriately. Either display a message or halt.
2205 + */
2206 +
2207 +-#include <asm/cpufeature.h>
2208 ++#include <asm/cpufeatures.h>
2209 + #include <asm/msr-index.h>
2210 +
2211 + verify_cpu:
2212 +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
2213 +index d6d64a519559..7f4839ef3608 100644
2214 +--- a/arch/x86/kernel/vm86_32.c
2215 ++++ b/arch/x86/kernel/vm86_32.c
2216 +@@ -358,7 +358,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
2217 + /* make room for real-mode segments */
2218 + tsk->thread.sp0 += 16;
2219 +
2220 +- if (static_cpu_has_safe(X86_FEATURE_SEP))
2221 ++ if (static_cpu_has(X86_FEATURE_SEP))
2222 + tsk->thread.sysenter_cs = 0;
2223 +
2224 + load_sp0(tss, &tsk->thread);
2225 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
2226 +index e065065a4dfb..a703842b54de 100644
2227 +--- a/arch/x86/kernel/vmlinux.lds.S
2228 ++++ b/arch/x86/kernel/vmlinux.lds.S
2229 +@@ -202,6 +202,17 @@ SECTIONS
2230 + :init
2231 + #endif
2232 +
2233 ++ /*
2234 ++ * Section for code used exclusively before alternatives are run. All
2235 ++ * references to such code must be patched out by alternatives, normally
2236 ++ * by using X86_FEATURE_ALWAYS CPU feature bit.
2237 ++ *
2238 ++ * See static_cpu_has() for an example.
2239 ++ */
2240 ++ .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
2241 ++ *(.altinstr_aux)
2242 ++ }
2243 ++
2244 + INIT_DATA_SECTION(16)
2245 +
2246 + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
2247 +diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
2248 +index a2fe51b00cce..65be7cfaf947 100644
2249 +--- a/arch/x86/lib/clear_page_64.S
2250 ++++ b/arch/x86/lib/clear_page_64.S
2251 +@@ -1,5 +1,5 @@
2252 + #include <linux/linkage.h>
2253 +-#include <asm/cpufeature.h>
2254 ++#include <asm/cpufeatures.h>
2255 + #include <asm/alternative-asm.h>
2256 +
2257 + /*
2258 +diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
2259 +index 009f98216b7e..24ef1c2104d4 100644
2260 +--- a/arch/x86/lib/copy_page_64.S
2261 ++++ b/arch/x86/lib/copy_page_64.S
2262 +@@ -1,7 +1,7 @@
2263 + /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
2264 +
2265 + #include <linux/linkage.h>
2266 +-#include <asm/cpufeature.h>
2267 ++#include <asm/cpufeatures.h>
2268 + #include <asm/alternative-asm.h>
2269 +
2270 + /*
2271 +diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
2272 +index 423644c230e7..accf7f2f557f 100644
2273 +--- a/arch/x86/lib/copy_user_64.S
2274 ++++ b/arch/x86/lib/copy_user_64.S
2275 +@@ -10,7 +10,7 @@
2276 + #include <asm/current.h>
2277 + #include <asm/asm-offsets.h>
2278 + #include <asm/thread_info.h>
2279 +-#include <asm/cpufeature.h>
2280 ++#include <asm/cpufeatures.h>
2281 + #include <asm/alternative-asm.h>
2282 + #include <asm/asm.h>
2283 + #include <asm/smap.h>
2284 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
2285 +index 16698bba87de..a0de849435ad 100644
2286 +--- a/arch/x86/lib/memcpy_64.S
2287 ++++ b/arch/x86/lib/memcpy_64.S
2288 +@@ -1,7 +1,7 @@
2289 + /* Copyright 2002 Andi Kleen */
2290 +
2291 + #include <linux/linkage.h>
2292 +-#include <asm/cpufeature.h>
2293 ++#include <asm/cpufeatures.h>
2294 + #include <asm/alternative-asm.h>
2295 +
2296 + /*
2297 +diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
2298 +index ca2afdd6d98e..90ce01bee00c 100644
2299 +--- a/arch/x86/lib/memmove_64.S
2300 ++++ b/arch/x86/lib/memmove_64.S
2301 +@@ -6,7 +6,7 @@
2302 + * - Copyright 2011 Fenghua Yu <fenghua.yu@×××××.com>
2303 + */
2304 + #include <linux/linkage.h>
2305 +-#include <asm/cpufeature.h>
2306 ++#include <asm/cpufeatures.h>
2307 + #include <asm/alternative-asm.h>
2308 +
2309 + #undef memmove
2310 +diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
2311 +index 2661fad05827..c9c81227ea37 100644
2312 +--- a/arch/x86/lib/memset_64.S
2313 ++++ b/arch/x86/lib/memset_64.S
2314 +@@ -1,7 +1,7 @@
2315 + /* Copyright 2002 Andi Kleen, SuSE Labs */
2316 +
2317 + #include <linux/linkage.h>
2318 +-#include <asm/cpufeature.h>
2319 ++#include <asm/cpufeatures.h>
2320 + #include <asm/alternative-asm.h>
2321 +
2322 + .weak memset
2323 +diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
2324 +index 3d06b482ebc7..7bbb853e36bd 100644
2325 +--- a/arch/x86/lib/retpoline.S
2326 ++++ b/arch/x86/lib/retpoline.S
2327 +@@ -3,7 +3,7 @@
2328 + #include <linux/stringify.h>
2329 + #include <linux/linkage.h>
2330 + #include <asm/dwarf2.h>
2331 +-#include <asm/cpufeature.h>
2332 ++#include <asm/cpufeatures.h>
2333 + #include <asm/alternative-asm.h>
2334 + #include <asm-generic/export.h>
2335 + #include <asm/nospec-branch.h>
2336 +diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
2337 +index 92e2eacb3321..f65a33f505b6 100644
2338 +--- a/arch/x86/mm/setup_nx.c
2339 ++++ b/arch/x86/mm/setup_nx.c
2340 +@@ -4,6 +4,7 @@
2341 +
2342 + #include <asm/pgtable.h>
2343 + #include <asm/proto.h>
2344 ++#include <asm/cpufeature.h>
2345 +
2346 + static int disable_nx;
2347 +
2348 +diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
2349 +index 50d86c0e9ba4..660a83c8287b 100644
2350 +--- a/arch/x86/oprofile/op_model_amd.c
2351 ++++ b/arch/x86/oprofile/op_model_amd.c
2352 +@@ -24,7 +24,6 @@
2353 + #include <asm/nmi.h>
2354 + #include <asm/apic.h>
2355 + #include <asm/processor.h>
2356 +-#include <asm/cpufeature.h>
2357 +
2358 + #include "op_x86_model.h"
2359 + #include "op_counter.h"
2360 +diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
2361 +index 755481f14d90..764ac2fc53fe 100644
2362 +--- a/arch/x86/um/asm/barrier.h
2363 ++++ b/arch/x86/um/asm/barrier.h
2364 +@@ -3,7 +3,7 @@
2365 +
2366 + #include <asm/asm.h>
2367 + #include <asm/segment.h>
2368 +-#include <asm/cpufeature.h>
2369 ++#include <asm/cpufeatures.h>
2370 + #include <asm/cmpxchg.h>
2371 + #include <asm/nops.h>
2372 +
2373 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
2374 +index 5a6a01135470..34fdaa6e99ba 100644
2375 +--- a/drivers/ata/ahci.c
2376 ++++ b/drivers/ata/ahci.c
2377 +@@ -1229,6 +1229,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
2378 + return strcmp(buf, dmi->driver_data) < 0;
2379 + }
2380 +
2381 ++static bool ahci_broken_lpm(struct pci_dev *pdev)
2382 ++{
2383 ++ static const struct dmi_system_id sysids[] = {
2384 ++ /* Various Lenovo 50 series have LPM issues with older BIOSen */
2385 ++ {
2386 ++ .matches = {
2387 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2388 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
2389 ++ },
2390 ++ .driver_data = "20180406", /* 1.31 */
2391 ++ },
2392 ++ {
2393 ++ .matches = {
2394 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2395 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
2396 ++ },
2397 ++ .driver_data = "20180420", /* 1.28 */
2398 ++ },
2399 ++ {
2400 ++ .matches = {
2401 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2402 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
2403 ++ },
2404 ++ .driver_data = "20180315", /* 1.33 */
2405 ++ },
2406 ++ {
2407 ++ .matches = {
2408 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2409 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
2410 ++ },
2411 ++ /*
2412 ++ * Note date based on release notes, 2.35 has been
2413 ++ * reported to be good, but I've been unable to get
2414 ++ * a hold of the reporter to get the DMI BIOS date.
2415 ++ * TODO: fix this.
2416 ++ */
2417 ++ .driver_data = "20180310", /* 2.35 */
2418 ++ },
2419 ++ { } /* terminate list */
2420 ++ };
2421 ++ const struct dmi_system_id *dmi = dmi_first_match(sysids);
2422 ++ int year, month, date;
2423 ++ char buf[9];
2424 ++
2425 ++ if (!dmi)
2426 ++ return false;
2427 ++
2428 ++ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2429 ++ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2430 ++
2431 ++ return strcmp(buf, dmi->driver_data) < 0;
2432 ++}
2433 ++
2434 + static bool ahci_broken_online(struct pci_dev *pdev)
2435 + {
2436 + #define ENCODE_BUSDEVFN(bus, slot, func) \
2437 +@@ -1588,6 +1641,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2438 + "quirky BIOS, skipping spindown on poweroff\n");
2439 + }
2440 +
2441 ++ if (ahci_broken_lpm(pdev)) {
2442 ++ pi.flags |= ATA_FLAG_NO_LPM;
2443 ++ dev_warn(&pdev->dev,
2444 ++ "BIOS update required for Link Power Management support\n");
2445 ++ }
2446 ++
2447 + if (ahci_broken_suspend(pdev)) {
2448 + hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
2449 + dev_warn(&pdev->dev,
2450 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2451 +index 9afd06ee5b30..ba514fa733de 100644
2452 +--- a/drivers/ata/libata-core.c
2453 ++++ b/drivers/ata/libata-core.c
2454 +@@ -2209,6 +2209,9 @@ int ata_dev_configure(struct ata_device *dev)
2455 + (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2456 + dev->horkage |= ATA_HORKAGE_NOLPM;
2457 +
2458 ++ if (ap->flags & ATA_FLAG_NO_LPM)
2459 ++ dev->horkage |= ATA_HORKAGE_NOLPM;
2460 ++
2461 + if (dev->horkage & ATA_HORKAGE_NOLPM) {
2462 + ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2463 + dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2464 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
2465 +index e8165ec55e6f..da3902ac16c8 100644
2466 +--- a/drivers/block/loop.c
2467 ++++ b/drivers/block/loop.c
2468 +@@ -651,6 +651,36 @@ static void loop_reread_partitions(struct loop_device *lo,
2469 + __func__, lo->lo_number, lo->lo_file_name, rc);
2470 + }
2471 +
2472 ++static inline int is_loop_device(struct file *file)
2473 ++{
2474 ++ struct inode *i = file->f_mapping->host;
2475 ++
2476 ++ return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
2477 ++}
2478 ++
2479 ++static int loop_validate_file(struct file *file, struct block_device *bdev)
2480 ++{
2481 ++ struct inode *inode = file->f_mapping->host;
2482 ++ struct file *f = file;
2483 ++
2484 ++ /* Avoid recursion */
2485 ++ while (is_loop_device(f)) {
2486 ++ struct loop_device *l;
2487 ++
2488 ++ if (f->f_mapping->host->i_bdev == bdev)
2489 ++ return -EBADF;
2490 ++
2491 ++ l = f->f_mapping->host->i_bdev->bd_disk->private_data;
2492 ++ if (l->lo_state == Lo_unbound) {
2493 ++ return -EINVAL;
2494 ++ }
2495 ++ f = l->lo_backing_file;
2496 ++ }
2497 ++ if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
2498 ++ return -EINVAL;
2499 ++ return 0;
2500 ++}
2501 ++
2502 + /*
2503 + * loop_change_fd switched the backing store of a loopback device to
2504 + * a new file. This is useful for operating system installers to free up
2505 +@@ -680,14 +710,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
2506 + if (!file)
2507 + goto out;
2508 +
2509 ++ error = loop_validate_file(file, bdev);
2510 ++ if (error)
2511 ++ goto out_putf;
2512 ++
2513 + inode = file->f_mapping->host;
2514 + old_file = lo->lo_backing_file;
2515 +
2516 + error = -EINVAL;
2517 +
2518 +- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
2519 +- goto out_putf;
2520 +-
2521 + /* size of the new backing store needs to be the same */
2522 + if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
2523 + goto out_putf;
2524 +@@ -708,13 +739,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
2525 + return error;
2526 + }
2527 +
2528 +-static inline int is_loop_device(struct file *file)
2529 +-{
2530 +- struct inode *i = file->f_mapping->host;
2531 +-
2532 +- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
2533 +-}
2534 +-
2535 + /* loop sysfs attributes */
2536 +
2537 + static ssize_t loop_attr_show(struct device *dev, char *page,
2538 +@@ -811,16 +835,17 @@ static struct attribute_group loop_attribute_group = {
2539 + .attrs= loop_attrs,
2540 + };
2541 +
2542 +-static int loop_sysfs_init(struct loop_device *lo)
2543 ++static void loop_sysfs_init(struct loop_device *lo)
2544 + {
2545 +- return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
2546 +- &loop_attribute_group);
2547 ++ lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
2548 ++ &loop_attribute_group);
2549 + }
2550 +
2551 + static void loop_sysfs_exit(struct loop_device *lo)
2552 + {
2553 +- sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
2554 +- &loop_attribute_group);
2555 ++ if (lo->sysfs_inited)
2556 ++ sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
2557 ++ &loop_attribute_group);
2558 + }
2559 +
2560 + static void loop_config_discard(struct loop_device *lo)
2561 +@@ -872,7 +897,7 @@ static int loop_prepare_queue(struct loop_device *lo)
2562 + static int loop_set_fd(struct loop_device *lo, fmode_t mode,
2563 + struct block_device *bdev, unsigned int arg)
2564 + {
2565 +- struct file *file, *f;
2566 ++ struct file *file;
2567 + struct inode *inode;
2568 + struct address_space *mapping;
2569 + unsigned lo_blocksize;
2570 +@@ -892,29 +917,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
2571 + if (lo->lo_state != Lo_unbound)
2572 + goto out_putf;
2573 +
2574 +- /* Avoid recursion */
2575 +- f = file;
2576 +- while (is_loop_device(f)) {
2577 +- struct loop_device *l;
2578 +-
2579 +- if (f->f_mapping->host->i_bdev == bdev)
2580 +- goto out_putf;
2581 +-
2582 +- l = f->f_mapping->host->i_bdev->bd_disk->private_data;
2583 +- if (l->lo_state == Lo_unbound) {
2584 +- error = -EINVAL;
2585 +- goto out_putf;
2586 +- }
2587 +- f = l->lo_backing_file;
2588 +- }
2589 ++ error = loop_validate_file(file, bdev);
2590 ++ if (error)
2591 ++ goto out_putf;
2592 +
2593 + mapping = file->f_mapping;
2594 + inode = mapping->host;
2595 +
2596 +- error = -EINVAL;
2597 +- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
2598 +- goto out_putf;
2599 +-
2600 + if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
2601 + !file->f_op->write_iter)
2602 + lo_flags |= LO_FLAGS_READ_ONLY;
2603 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
2604 +index fb2237c73e61..60f0fd2c0c65 100644
2605 +--- a/drivers/block/loop.h
2606 ++++ b/drivers/block/loop.h
2607 +@@ -59,6 +59,7 @@ struct loop_device {
2608 + struct kthread_worker worker;
2609 + struct task_struct *worker_task;
2610 + bool use_dio;
2611 ++ bool sysfs_inited;
2612 +
2613 + struct request_queue *lo_queue;
2614 + struct blk_mq_tag_set tag_set;
2615 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2616 +index b316ab7e8996..60e2c9faa95f 100644
2617 +--- a/drivers/hid/hid-ids.h
2618 ++++ b/drivers/hid/hid-ids.h
2619 +@@ -512,6 +512,9 @@
2620 + #define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
2621 + #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
2622 +
2623 ++#define USB_VENDOR_ID_INNOMEDIA 0x1292
2624 ++#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
2625 ++
2626 + #define USB_VENDOR_ID_ITE 0x048d
2627 + #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
2628 + #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
2629 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
2630 +index ce1543d69acb..c9a11315493b 100644
2631 +--- a/drivers/hid/usbhid/hid-quirks.c
2632 ++++ b/drivers/hid/usbhid/hid-quirks.c
2633 +@@ -152,6 +152,7 @@ static const struct hid_blacklist {
2634 + { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
2635 + { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
2636 + { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
2637 ++ { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
2638 +
2639 + { 0, 0 }
2640 + };
2641 +diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
2642 +index aa26f3c3416b..c151bb625179 100644
2643 +--- a/drivers/infiniband/Kconfig
2644 ++++ b/drivers/infiniband/Kconfig
2645 +@@ -33,6 +33,18 @@ config INFINIBAND_USER_ACCESS
2646 + libibverbs, libibcm and a hardware driver library from
2647 + <http://www.openfabrics.org/git/>.
2648 +
2649 ++config INFINIBAND_USER_ACCESS_UCM
2650 ++ bool "Userspace CM (UCM, DEPRECATED)"
2651 ++ depends on BROKEN
2652 ++ depends on INFINIBAND_USER_ACCESS
2653 ++ help
2654 ++ The UCM module has known security flaws, which no one is
2655 ++ interested to fix. The user-space part of this code was
2656 ++ dropped from the upstream a long time ago.
2657 ++
2658 ++ This option is DEPRECATED and planned to be removed.
2659 ++
2660 ++
2661 + config INFINIBAND_USER_MEM
2662 + bool
2663 + depends on INFINIBAND_USER_ACCESS != n
2664 +diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
2665 +index d43a8994ac5c..737612a442be 100644
2666 +--- a/drivers/infiniband/core/Makefile
2667 ++++ b/drivers/infiniband/core/Makefile
2668 +@@ -5,8 +5,8 @@ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
2669 + ib_cm.o iw_cm.o ib_addr.o \
2670 + $(infiniband-y)
2671 + obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
2672 +-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
2673 +- $(user_access-y)
2674 ++obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
2675 ++obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
2676 +
2677 + ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
2678 + device.o fmr_pool.o cache.o netlink.o \
2679 +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
2680 +index e1629ab58db7..8218d714fa01 100644
2681 +--- a/drivers/infiniband/hw/cxgb4/mem.c
2682 ++++ b/drivers/infiniband/hw/cxgb4/mem.c
2683 +@@ -926,7 +926,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
2684 + {
2685 + struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
2686 +
2687 +- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
2688 ++ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
2689 + return -ENOMEM;
2690 +
2691 + mhp->mpl[mhp->mpl_len++] = addr;
2692 +diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
2693 +index e8b933111e0d..92109cadc3fc 100644
2694 +--- a/drivers/misc/ibmasm/ibmasmfs.c
2695 ++++ b/drivers/misc/ibmasm/ibmasmfs.c
2696 +@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
2697 + static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
2698 + {
2699 + void __iomem *address = (void __iomem *)file->private_data;
2700 +- unsigned char *page;
2701 +- int retval;
2702 + int len = 0;
2703 + unsigned int value;
2704 +-
2705 +- if (*offset < 0)
2706 +- return -EINVAL;
2707 +- if (count == 0 || count > 1024)
2708 +- return 0;
2709 +- if (*offset != 0)
2710 +- return 0;
2711 +-
2712 +- page = (unsigned char *)__get_free_page(GFP_KERNEL);
2713 +- if (!page)
2714 +- return -ENOMEM;
2715 ++ char lbuf[20];
2716 +
2717 + value = readl(address);
2718 +- len = sprintf(page, "%d\n", value);
2719 +-
2720 +- if (copy_to_user(buf, page, len)) {
2721 +- retval = -EFAULT;
2722 +- goto exit;
2723 +- }
2724 +- *offset += len;
2725 +- retval = len;
2726 ++ len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
2727 +
2728 +-exit:
2729 +- free_page((unsigned long)page);
2730 +- return retval;
2731 ++ return simple_read_from_buffer(buf, count, offset, lbuf, len);
2732 + }
2733 +
2734 + static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
2735 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
2736 +index fe90b7e04427..5e047bfc0cc4 100644
2737 +--- a/drivers/misc/vmw_balloon.c
2738 ++++ b/drivers/misc/vmw_balloon.c
2739 +@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
2740 + unsigned int num_pages, bool is_2m_pages, unsigned int *target)
2741 + {
2742 + unsigned long status;
2743 +- unsigned long pfn = page_to_pfn(b->page);
2744 ++ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
2745 +
2746 + STATS_INC(b->stats.lock[is_2m_pages]);
2747 +
2748 +@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
2749 + unsigned int num_pages, bool is_2m_pages, unsigned int *target)
2750 + {
2751 + unsigned long status;
2752 +- unsigned long pfn = page_to_pfn(b->page);
2753 ++ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
2754 +
2755 + STATS_INC(b->stats.unlock[is_2m_pages]);
2756 +
2757 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2758 +index 40ce175655e6..99f67764765f 100644
2759 +--- a/drivers/usb/core/quirks.c
2760 ++++ b/drivers/usb/core/quirks.c
2761 +@@ -231,6 +231,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2762 + /* Corsair K70 RGB */
2763 + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
2764 +
2765 ++ /* Corsair Strafe */
2766 ++ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
2767 ++ USB_QUIRK_DELAY_CTRL_MSG },
2768 ++
2769 + /* Corsair Strafe RGB */
2770 + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
2771 + USB_QUIRK_DELAY_CTRL_MSG },
2772 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2773 +index e4cf3322bcb3..0ec809a35a3f 100644
2774 +--- a/drivers/usb/host/xhci-mem.c
2775 ++++ b/drivers/usb/host/xhci-mem.c
2776 +@@ -638,7 +638,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
2777 + if (!ep->stream_info)
2778 + return NULL;
2779 +
2780 +- if (stream_id > ep->stream_info->num_streams)
2781 ++ if (stream_id >= ep->stream_info->num_streams)
2782 + return NULL;
2783 + return ep->stream_info->stream_rings[stream_id];
2784 + }
2785 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
2786 +index 343fa6ff9f4b..512c84adcace 100644
2787 +--- a/drivers/usb/misc/yurex.c
2788 ++++ b/drivers/usb/misc/yurex.c
2789 +@@ -414,8 +414,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
2790 + loff_t *ppos)
2791 + {
2792 + struct usb_yurex *dev;
2793 +- int retval = 0;
2794 +- int bytes_read = 0;
2795 ++ int len = 0;
2796 + char in_buffer[20];
2797 + unsigned long flags;
2798 +
2799 +@@ -423,26 +422,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
2800 +
2801 + mutex_lock(&dev->io_mutex);
2802 + if (!dev->interface) { /* already disconnected */
2803 +- retval = -ENODEV;
2804 +- goto exit;
2805 ++ mutex_unlock(&dev->io_mutex);
2806 ++ return -ENODEV;
2807 + }
2808 +
2809 + spin_lock_irqsave(&dev->lock, flags);
2810 +- bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
2811 ++ len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
2812 + spin_unlock_irqrestore(&dev->lock, flags);
2813 +-
2814 +- if (*ppos < bytes_read) {
2815 +- if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
2816 +- retval = -EFAULT;
2817 +- else {
2818 +- retval = bytes_read - *ppos;
2819 +- *ppos += bytes_read;
2820 +- }
2821 +- }
2822 +-
2823 +-exit:
2824 + mutex_unlock(&dev->io_mutex);
2825 +- return retval;
2826 ++
2827 ++ return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
2828 + }
2829 +
2830 + static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
2831 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
2832 +index 71133d96f97d..f73ea14e8173 100644
2833 +--- a/drivers/usb/serial/ch341.c
2834 ++++ b/drivers/usb/serial/ch341.c
2835 +@@ -118,7 +118,7 @@ static int ch341_control_in(struct usb_device *dev,
2836 + r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
2837 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
2838 + value, index, buf, bufsize, DEFAULT_TIMEOUT);
2839 +- if (r < bufsize) {
2840 ++ if (r < (int)bufsize) {
2841 + if (r >= 0) {
2842 + dev_err(&dev->dev,
2843 + "short control message received (%d < %u)\n",
2844 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2845 +index 73835027a7cc..97382301c393 100644
2846 +--- a/drivers/usb/serial/cp210x.c
2847 ++++ b/drivers/usb/serial/cp210x.c
2848 +@@ -145,6 +145,7 @@ static const struct usb_device_id id_table[] = {
2849 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
2850 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
2851 + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
2852 ++ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
2853 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
2854 + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
2855 + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
2856 +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
2857 +index 6b0942428917..8a4047de43dc 100644
2858 +--- a/drivers/usb/serial/keyspan_pda.c
2859 ++++ b/drivers/usb/serial/keyspan_pda.c
2860 +@@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
2861 + 3, /* get pins */
2862 + USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
2863 + 0, 0, data, 1, 2000);
2864 +- if (rc >= 0)
2865 ++ if (rc == 1)
2866 + *value = *data;
2867 ++ else if (rc >= 0)
2868 ++ rc = -EIO;
2869 +
2870 + kfree(data);
2871 + return rc;
2872 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
2873 +index ed883a7ad533..58ba6904a087 100644
2874 +--- a/drivers/usb/serial/mos7840.c
2875 ++++ b/drivers/usb/serial/mos7840.c
2876 +@@ -482,6 +482,9 @@ static void mos7840_control_callback(struct urb *urb)
2877 + }
2878 +
2879 + dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
2880 ++ if (urb->actual_length < 1)
2881 ++ goto out;
2882 ++
2883 + dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
2884 + mos7840_port->MsrLsr, mos7840_port->port_num);
2885 + data = urb->transfer_buffer;
2886 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2887 +index 7efd70bfeaf7..d106b981d86f 100644
2888 +--- a/fs/btrfs/disk-io.c
2889 ++++ b/fs/btrfs/disk-io.c
2890 +@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
2891 + if (bio_flags & EXTENT_BIO_TREE_LOG)
2892 + return 0;
2893 + #ifdef CONFIG_X86
2894 +- if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
2895 ++ if (static_cpu_has(X86_FEATURE_XMM4_2))
2896 + return 0;
2897 + #endif
2898 + return 1;
2899 +diff --git a/fs/inode.c b/fs/inode.c
2900 +index b95615f3fc50..a39c2724d8a0 100644
2901 +--- a/fs/inode.c
2902 ++++ b/fs/inode.c
2903 +@@ -1937,8 +1937,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
2904 + inode->i_uid = current_fsuid();
2905 + if (dir && dir->i_mode & S_ISGID) {
2906 + inode->i_gid = dir->i_gid;
2907 ++
2908 ++ /* Directories are special, and always inherit S_ISGID */
2909 + if (S_ISDIR(mode))
2910 + mode |= S_ISGID;
2911 ++ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2912 ++ !in_group_p(inode->i_gid) &&
2913 ++ !capable_wrt_inode_uidgid(dir, CAP_FSETID))
2914 ++ mode &= ~S_ISGID;
2915 + } else
2916 + inode->i_gid = current_fsgid();
2917 + inode->i_mode = mode;
2918 +diff --git a/include/linux/libata.h b/include/linux/libata.h
2919 +index b20a2752f934..6428ac4746de 100644
2920 +--- a/include/linux/libata.h
2921 ++++ b/include/linux/libata.h
2922 +@@ -210,6 +210,7 @@ enum {
2923 + ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
2924 + /* (doesn't imply presence) */
2925 + ATA_FLAG_SATA = (1 << 1),
2926 ++ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
2927 + ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
2928 + ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
2929 + ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
2930 +diff --git a/kernel/power/user.c b/kernel/power/user.c
2931 +index 526e8911460a..f83c1876b39c 100644
2932 +--- a/kernel/power/user.c
2933 ++++ b/kernel/power/user.c
2934 +@@ -184,6 +184,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
2935 + res = PAGE_SIZE - pg_offp;
2936 + }
2937 +
2938 ++ if (!data_of(data->handle)) {
2939 ++ res = -EINVAL;
2940 ++ goto unlock;
2941 ++ }
2942 ++
2943 + res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
2944 + buf, count);
2945 + if (res > 0)
2946 +diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
2947 +index 83c33a5bcffb..de67fea3cf46 100644
2948 +--- a/lib/atomic64_test.c
2949 ++++ b/lib/atomic64_test.c
2950 +@@ -16,6 +16,10 @@
2951 + #include <linux/kernel.h>
2952 + #include <linux/atomic.h>
2953 +
2954 ++#ifdef CONFIG_X86
2955 ++#include <asm/cpufeature.h> /* for boot_cpu_has below */
2956 ++#endif
2957 ++
2958 + #define TEST(bit, op, c_op, val) \
2959 + do { \
2960 + atomic##bit##_set(&v, v0); \
2961 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2962 +index 9f70c267a7a5..665fd87cc105 100644
2963 +--- a/net/bridge/netfilter/ebtables.c
2964 ++++ b/net/bridge/netfilter/ebtables.c
2965 +@@ -701,6 +701,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
2966 + }
2967 + i = 0;
2968 +
2969 ++ memset(&mtpar, 0, sizeof(mtpar));
2970 ++ memset(&tgpar, 0, sizeof(tgpar));
2971 + mtpar.net = tgpar.net = net;
2972 + mtpar.table = tgpar.table = name;
2973 + mtpar.entryinfo = tgpar.entryinfo = e;
2974 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2975 +index dac62b5e7fe3..9363c1a70f16 100644
2976 +--- a/net/ipv4/netfilter/ip_tables.c
2977 ++++ b/net/ipv4/netfilter/ip_tables.c
2978 +@@ -663,6 +663,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
2979 + return -ENOMEM;
2980 +
2981 + j = 0;
2982 ++ memset(&mtpar, 0, sizeof(mtpar));
2983 + mtpar.net = net;
2984 + mtpar.table = name;
2985 + mtpar.entryinfo = &e->ip;
2986 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2987 +index 795c343347ec..6cb9e35d23ac 100644
2988 +--- a/net/ipv6/netfilter/ip6_tables.c
2989 ++++ b/net/ipv6/netfilter/ip6_tables.c
2990 +@@ -676,6 +676,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
2991 + return -ENOMEM;
2992 +
2993 + j = 0;
2994 ++ memset(&mtpar, 0, sizeof(mtpar));
2995 + mtpar.net = net;
2996 + mtpar.table = name;
2997 + mtpar.entryinfo = &e->ipv6;
2998 +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
2999 +index 7edcfda288c4..54cde78c2718 100644
3000 +--- a/net/netfilter/nfnetlink_queue.c
3001 ++++ b/net/netfilter/nfnetlink_queue.c
3002 +@@ -1106,6 +1106,9 @@ nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
3003 + static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
3004 + [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
3005 + [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
3006 ++ [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
3007 ++ [NFQA_CFG_MASK] = { .type = NLA_U32 },
3008 ++ [NFQA_CFG_FLAGS] = { .type = NLA_U32 },
3009 + };
3010 +
3011 + static const struct nf_queue_handler nfqh = {
3012 +diff --git a/tools/build/Build.include b/tools/build/Build.include
3013 +index 4d000bc959b4..1c570528baf7 100644
3014 +--- a/tools/build/Build.include
3015 ++++ b/tools/build/Build.include
3016 +@@ -62,8 +62,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
3017 + $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
3018 + rm -f $(depfile); \
3019 + mv -f $(dot-target).tmp $(dot-target).cmd, \
3020 +- printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
3021 +- printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
3022 ++ printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
3023 ++ printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
3024 + cat $(depfile) >> $(dot-target).cmd; \
3025 + printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
3026 +