Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Fri, 23 Nov 2018 12:45:19
Message-Id: 1542977095.68ed016b1baa8e1e440b12efbfda34987f0d6ab4.mpagano@gentoo
1 commit: 68ed016b1baa8e1e440b12efbfda34987f0d6ab4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Nov 23 12:44:55 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Nov 23 12:44:55 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=68ed016b
7
8 proj/linux-patches: Linux patch 4.9.139
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1138_linux-4.9.139.patch | 2371 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2375 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 75308be..56d5a98 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -595,6 +595,10 @@ Patch: 1137_linux-4.9.138.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.138
23
24 +Patch: 1138_linux-4.9.139.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.139
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1138_linux-4.9.139.patch b/1138_linux-4.9.139.patch
33 new file mode 100644
34 index 0000000..befed26
35 --- /dev/null
36 +++ b/1138_linux-4.9.139.patch
37 @@ -0,0 +1,2371 @@
38 +diff --git a/.gitignore b/.gitignore
39 +index c2ed4ecb0acd..0c39aa20b6ba 100644
40 +--- a/.gitignore
41 ++++ b/.gitignore
42 +@@ -33,6 +33,7 @@
43 + *.lzo
44 + *.patch
45 + *.gcno
46 ++*.ll
47 + modules.builtin
48 + Module.symvers
49 + *.dwo
50 +diff --git a/Kbuild b/Kbuild
51 +index 3d0ae152af7c..94c752762bc2 100644
52 +--- a/Kbuild
53 ++++ b/Kbuild
54 +@@ -7,31 +7,6 @@
55 + # 4) Check for missing system calls
56 + # 5) Generate constants.py (may need bounds.h)
57 +
58 +-# Default sed regexp - multiline due to syntax constraints
59 +-define sed-y
60 +- "/^->/{s:->#\(.*\):/* \1 */:; \
61 +- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
62 +- s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
63 +- s:->::; p;}"
64 +-endef
65 +-
66 +-# Use filechk to avoid rebuilds when a header changes, but the resulting file
67 +-# does not
68 +-define filechk_offsets
69 +- (set -e; \
70 +- echo "#ifndef $2"; \
71 +- echo "#define $2"; \
72 +- echo "/*"; \
73 +- echo " * DO NOT MODIFY."; \
74 +- echo " *"; \
75 +- echo " * This file was generated by Kbuild"; \
76 +- echo " */"; \
77 +- echo ""; \
78 +- sed -ne $(sed-y); \
79 +- echo ""; \
80 +- echo "#endif" )
81 +-endef
82 +-
83 + #####
84 + # 1) Generate bounds.h
85 +
86 +diff --git a/Makefile b/Makefile
87 +index ccf2602f664d..a6959d96316d 100644
88 +--- a/Makefile
89 ++++ b/Makefile
90 +@@ -1,6 +1,6 @@
91 + VERSION = 4
92 + PATCHLEVEL = 9
93 +-SUBLEVEL = 138
94 ++SUBLEVEL = 139
95 + EXTRAVERSION =
96 + NAME = Roaring Lionus
97 +
98 +@@ -303,7 +303,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
99 +
100 + HOSTCC = gcc
101 + HOSTCXX = g++
102 +-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
103 ++HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
104 + HOSTCXXFLAGS = -O2
105 +
106 + ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
107 +@@ -394,7 +394,7 @@ LINUXINCLUDE += $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
108 +
109 + KBUILD_AFLAGS := -D__ASSEMBLY__
110 + KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
111 +- -fno-strict-aliasing -fno-common \
112 ++ -fno-strict-aliasing -fno-common -fshort-wchar \
113 + -Werror-implicit-function-declaration \
114 + -Wno-format-security \
115 + -std=gnu89
116 +@@ -644,7 +644,8 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
117 + endif
118 +
119 + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
120 +-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
121 ++KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
122 ++KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
123 + else
124 + ifdef CONFIG_PROFILE_ALL_BRANCHES
125 + KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
126 +@@ -704,11 +705,20 @@ endif
127 + KBUILD_CFLAGS += $(stackp-flag)
128 +
129 + ifeq ($(cc-name),clang)
130 ++ifneq ($(CROSS_COMPILE),)
131 ++CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
132 ++GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
133 ++endif
134 ++ifneq ($(GCC_TOOLCHAIN),)
135 ++CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
136 ++endif
137 ++KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
138 ++KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
139 + KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
140 +-KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
141 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
142 + KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
143 + KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
144 ++KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
145 + # Quiet clang warning: comparison of unsigned expression < 0 is always false
146 + KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
147 + # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
148 +@@ -716,6 +726,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
149 + # See modpost pattern 2
150 + KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
151 + KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
152 ++KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
153 ++KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
154 + else
155 +
156 + # These warnings generated too much noise in a regular build.
157 +@@ -1379,6 +1391,8 @@ help:
158 + @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
159 + @echo ' dir/ - Build all files in dir and below'
160 + @echo ' dir/file.[ois] - Build specified target only'
161 ++ @echo ' dir/file.ll - Build the LLVM assembly file'
162 ++ @echo ' (requires compiler support for LLVM assembly generation)'
163 + @echo ' dir/file.lst - Build specified mixed source/assembly target only'
164 + @echo ' (requires a recent binutils and recent build (System.map))'
165 + @echo ' dir/file.ko - Build module including final link'
166 +@@ -1563,6 +1577,7 @@ clean: $(clean-dirs)
167 + -o -name '*.symtypes' -o -name 'modules.order' \
168 + -o -name modules.builtin -o -name '.tmp_*.o.*' \
169 + -o -name '*.c.[012]*.*' \
170 ++ -o -name '*.ll' \
171 + -o -name '*.gcno' \) -type f -print | xargs rm -f
172 +
173 + # Generate tags for editors
174 +@@ -1666,6 +1681,8 @@ endif
175 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
176 + %.symtypes: %.c prepare scripts FORCE
177 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
178 ++%.ll: %.c prepare scripts FORCE
179 ++ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
180 +
181 + # Modules
182 + /: prepare scripts FORCE
183 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
184 +index 3aed4492c9a7..e616f61f859d 100644
185 +--- a/arch/arm/include/asm/assembler.h
186 ++++ b/arch/arm/include/asm/assembler.h
187 +@@ -445,11 +445,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
188 + .size \name , . - \name
189 + .endm
190 +
191 ++ .macro csdb
192 ++#ifdef CONFIG_THUMB2_KERNEL
193 ++ .inst.w 0xf3af8014
194 ++#else
195 ++ .inst 0xe320f014
196 ++#endif
197 ++ .endm
198 ++
199 + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
200 + #ifndef CONFIG_CPU_USE_DOMAINS
201 + adds \tmp, \addr, #\size - 1
202 + sbcccs \tmp, \tmp, \limit
203 + bcs \bad
204 ++#ifdef CONFIG_CPU_SPECTRE
205 ++ movcs \addr, #0
206 ++ csdb
207 ++#endif
208 + #endif
209 + .endm
210 +
211 +diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
212 +index f5d698182d50..513e03d138ea 100644
213 +--- a/arch/arm/include/asm/barrier.h
214 ++++ b/arch/arm/include/asm/barrier.h
215 +@@ -16,6 +16,12 @@
216 + #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
217 + #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
218 + #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
219 ++#ifdef CONFIG_THUMB2_KERNEL
220 ++#define CSDB ".inst.w 0xf3af8014"
221 ++#else
222 ++#define CSDB ".inst 0xe320f014"
223 ++#endif
224 ++#define csdb() __asm__ __volatile__(CSDB : : : "memory")
225 + #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
226 + #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
227 + : : "r" (0) : "memory")
228 +@@ -36,6 +42,13 @@
229 + #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
230 + #endif
231 +
232 ++#ifndef CSDB
233 ++#define CSDB
234 ++#endif
235 ++#ifndef csdb
236 ++#define csdb()
237 ++#endif
238 ++
239 + #ifdef CONFIG_ARM_HEAVY_MB
240 + extern void (*soc_mb)(void);
241 + extern void arm_heavy_mb(void);
242 +@@ -62,6 +75,25 @@ extern void arm_heavy_mb(void);
243 + #define __smp_rmb() __smp_mb()
244 + #define __smp_wmb() dmb(ishst)
245 +
246 ++#ifdef CONFIG_CPU_SPECTRE
247 ++static inline unsigned long array_index_mask_nospec(unsigned long idx,
248 ++ unsigned long sz)
249 ++{
250 ++ unsigned long mask;
251 ++
252 ++ asm volatile(
253 ++ "cmp %1, %2\n"
254 ++ " sbc %0, %1, %1\n"
255 ++ CSDB
256 ++ : "=r" (mask)
257 ++ : "r" (idx), "Ir" (sz)
258 ++ : "cc");
259 ++
260 ++ return mask;
261 ++}
262 ++#define array_index_mask_nospec array_index_mask_nospec
263 ++#endif
264 ++
265 + #include <asm-generic/barrier.h>
266 +
267 + #endif /* !__ASSEMBLY__ */
268 +diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
269 +index a97f1ea708d1..73a99c72a930 100644
270 +--- a/arch/arm/include/asm/bugs.h
271 ++++ b/arch/arm/include/asm/bugs.h
272 +@@ -10,12 +10,14 @@
273 + #ifndef __ASM_BUGS_H
274 + #define __ASM_BUGS_H
275 +
276 +-#ifdef CONFIG_MMU
277 + extern void check_writebuffer_bugs(void);
278 +
279 +-#define check_bugs() check_writebuffer_bugs()
280 ++#ifdef CONFIG_MMU
281 ++extern void check_bugs(void);
282 ++extern void check_other_bugs(void);
283 + #else
284 + #define check_bugs() do { } while (0)
285 ++#define check_other_bugs() do { } while (0)
286 + #endif
287 +
288 + #endif
289 +diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
290 +index dbdbce1b3a72..b74b174ac9fc 100644
291 +--- a/arch/arm/include/asm/cp15.h
292 ++++ b/arch/arm/include/asm/cp15.h
293 +@@ -64,6 +64,9 @@
294 + #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
295 + #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
296 +
297 ++#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
298 ++#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
299 ++
300 + extern unsigned long cr_alignment; /* defined in entry-armv.S */
301 +
302 + static inline unsigned long get_cr(void)
303 +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
304 +index b62eaeb147aa..c55db1e22f0c 100644
305 +--- a/arch/arm/include/asm/cputype.h
306 ++++ b/arch/arm/include/asm/cputype.h
307 +@@ -76,8 +76,16 @@
308 + #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
309 + #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
310 + #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
311 ++#define ARM_CPU_PART_CORTEX_A53 0x4100d030
312 ++#define ARM_CPU_PART_CORTEX_A57 0x4100d070
313 ++#define ARM_CPU_PART_CORTEX_A72 0x4100d080
314 ++#define ARM_CPU_PART_CORTEX_A73 0x4100d090
315 ++#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
316 + #define ARM_CPU_PART_MASK 0xff00fff0
317 +
318 ++/* Broadcom cores */
319 ++#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
320 ++
321 + /* DEC implemented cores */
322 + #define ARM_CPU_PART_SA1100 0x4400a110
323 +
324 +diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
325 +index 8ef05381984b..24f3ec7c9fbe 100644
326 +--- a/arch/arm/include/asm/kvm_asm.h
327 ++++ b/arch/arm/include/asm/kvm_asm.h
328 +@@ -61,8 +61,6 @@ struct kvm_vcpu;
329 + extern char __kvm_hyp_init[];
330 + extern char __kvm_hyp_init_end[];
331 +
332 +-extern char __kvm_hyp_vector[];
333 +-
334 + extern void __kvm_flush_vm_context(void);
335 + extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
336 + extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
337 +diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
338 +index 0833d8a1dbbb..2fda7e905754 100644
339 +--- a/arch/arm/include/asm/kvm_host.h
340 ++++ b/arch/arm/include/asm/kvm_host.h
341 +@@ -21,6 +21,7 @@
342 +
343 + #include <linux/types.h>
344 + #include <linux/kvm_types.h>
345 ++#include <asm/cputype.h>
346 + #include <asm/kvm.h>
347 + #include <asm/kvm_asm.h>
348 + #include <asm/kvm_mmio.h>
349 +@@ -323,8 +324,17 @@ static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
350 +
351 + static inline bool kvm_arm_harden_branch_predictor(void)
352 + {
353 +- /* No way to detect it yet, pretend it is not there. */
354 +- return false;
355 ++ switch(read_cpuid_part()) {
356 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
357 ++ case ARM_CPU_PART_BRAHMA_B15:
358 ++ case ARM_CPU_PART_CORTEX_A12:
359 ++ case ARM_CPU_PART_CORTEX_A15:
360 ++ case ARM_CPU_PART_CORTEX_A17:
361 ++ return true;
362 ++#endif
363 ++ default:
364 ++ return false;
365 ++ }
366 + }
367 +
368 + #define KVM_SSBD_UNKNOWN -1
369 +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
370 +index e2f05cedaf97..d26395754b56 100644
371 +--- a/arch/arm/include/asm/kvm_mmu.h
372 ++++ b/arch/arm/include/asm/kvm_mmu.h
373 +@@ -248,7 +248,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
374 +
375 + static inline void *kvm_get_hyp_vector(void)
376 + {
377 +- return kvm_ksym_ref(__kvm_hyp_vector);
378 ++ switch(read_cpuid_part()) {
379 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
380 ++ case ARM_CPU_PART_CORTEX_A12:
381 ++ case ARM_CPU_PART_CORTEX_A17:
382 ++ {
383 ++ extern char __kvm_hyp_vector_bp_inv[];
384 ++ return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
385 ++ }
386 ++
387 ++ case ARM_CPU_PART_BRAHMA_B15:
388 ++ case ARM_CPU_PART_CORTEX_A15:
389 ++ {
390 ++ extern char __kvm_hyp_vector_ic_inv[];
391 ++ return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
392 ++ }
393 ++#endif
394 ++ default:
395 ++ {
396 ++ extern char __kvm_hyp_vector[];
397 ++ return kvm_ksym_ref(__kvm_hyp_vector);
398 ++ }
399 ++ }
400 + }
401 +
402 + static inline int kvm_map_vectors(void)
403 +diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
404 +index 8877ad5ffe10..f379f5f849a9 100644
405 +--- a/arch/arm/include/asm/proc-fns.h
406 ++++ b/arch/arm/include/asm/proc-fns.h
407 +@@ -36,6 +36,10 @@ extern struct processor {
408 + * Set up any processor specifics
409 + */
410 + void (*_proc_init)(void);
411 ++ /*
412 ++ * Check for processor bugs
413 ++ */
414 ++ void (*check_bugs)(void);
415 + /*
416 + * Disable any processor specifics
417 + */
418 +diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
419 +index a3d61ad984af..1fed41440af9 100644
420 +--- a/arch/arm/include/asm/system_misc.h
421 ++++ b/arch/arm/include/asm/system_misc.h
422 +@@ -7,6 +7,7 @@
423 + #include <linux/linkage.h>
424 + #include <linux/irqflags.h>
425 + #include <linux/reboot.h>
426 ++#include <linux/percpu.h>
427 +
428 + extern void cpu_init(void);
429 +
430 +@@ -14,6 +15,20 @@ void soft_restart(unsigned long);
431 + extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
432 + extern void (*arm_pm_idle)(void);
433 +
434 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
435 ++typedef void (*harden_branch_predictor_fn_t)(void);
436 ++DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
437 ++static inline void harden_branch_predictor(void)
438 ++{
439 ++ harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
440 ++ smp_processor_id());
441 ++ if (fn)
442 ++ fn();
443 ++}
444 ++#else
445 ++#define harden_branch_predictor() do { } while (0)
446 ++#endif
447 ++
448 + #define UDBG_UNDEFINED (1 << 0)
449 + #define UDBG_SYSCALL (1 << 1)
450 + #define UDBG_BADABORT (1 << 2)
451 +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
452 +index 776757d1604a..57d2ad9c75ca 100644
453 +--- a/arch/arm/include/asm/thread_info.h
454 ++++ b/arch/arm/include/asm/thread_info.h
455 +@@ -126,8 +126,8 @@ struct user_vfp_exc;
456 +
457 + extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
458 + struct user_vfp_exc __user *);
459 +-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
460 +- struct user_vfp_exc __user *);
461 ++extern int vfp_restore_user_hwstate(struct user_vfp *,
462 ++ struct user_vfp_exc *);
463 + #endif
464 +
465 + /*
466 +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
467 +index b7e0125c0bbf..7b17460127fd 100644
468 +--- a/arch/arm/include/asm/uaccess.h
469 ++++ b/arch/arm/include/asm/uaccess.h
470 +@@ -114,6 +114,13 @@ static inline void set_fs(mm_segment_t fs)
471 + : "cc"); \
472 + flag; })
473 +
474 ++/*
475 ++ * This is a type: either unsigned long, if the argument fits into
476 ++ * that type, or otherwise unsigned long long.
477 ++ */
478 ++#define __inttype(x) \
479 ++ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
480 ++
481 + /*
482 + * Single-value transfer routines. They automatically use the right
483 + * size if we just have the right pointer type. Note that the functions
484 +@@ -183,7 +190,7 @@ extern int __get_user_64t_4(void *);
485 + ({ \
486 + unsigned long __limit = current_thread_info()->addr_limit - 1; \
487 + register const typeof(*(p)) __user *__p asm("r0") = (p);\
488 +- register typeof(x) __r2 asm("r2"); \
489 ++ register __inttype(x) __r2 asm("r2"); \
490 + register unsigned long __l asm("r1") = __limit; \
491 + register int __e asm("r0"); \
492 + unsigned int __ua_flags = uaccess_save_and_enable(); \
493 +@@ -273,6 +280,16 @@ static inline void set_fs(mm_segment_t fs)
494 + #define user_addr_max() \
495 + (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
496 +
497 ++#ifdef CONFIG_CPU_SPECTRE
498 ++/*
499 ++ * When mitigating Spectre variant 1, it is not worth fixing the non-
500 ++ * verifying accessors, because we need to add verification of the
501 ++ * address space there. Force these to use the standard get_user()
502 ++ * version instead.
503 ++ */
504 ++#define __get_user(x, ptr) get_user(x, ptr)
505 ++#else
506 ++
507 + /*
508 + * The "__xxx" versions of the user access functions do not verify the
509 + * address space - it must have been done previously with a separate
510 +@@ -289,12 +306,6 @@ static inline void set_fs(mm_segment_t fs)
511 + __gu_err; \
512 + })
513 +
514 +-#define __get_user_error(x, ptr, err) \
515 +-({ \
516 +- __get_user_err((x), (ptr), err); \
517 +- (void) 0; \
518 +-})
519 +-
520 + #define __get_user_err(x, ptr, err) \
521 + do { \
522 + unsigned long __gu_addr = (unsigned long)(ptr); \
523 +@@ -354,6 +365,7 @@ do { \
524 +
525 + #define __get_user_asm_word(x, addr, err) \
526 + __get_user_asm(x, addr, err, ldr)
527 ++#endif
528 +
529 +
530 + #define __put_user_switch(x, ptr, __err, __fn) \
531 +diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
532 +index ad325a8c7e1e..adb9add28b6f 100644
533 +--- a/arch/arm/kernel/Makefile
534 ++++ b/arch/arm/kernel/Makefile
535 +@@ -30,6 +30,7 @@ else
536 + obj-y += entry-armv.o
537 + endif
538 +
539 ++obj-$(CONFIG_MMU) += bugs.o
540 + obj-$(CONFIG_CPU_IDLE) += cpuidle.o
541 + obj-$(CONFIG_ISA_DMA_API) += dma.o
542 + obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
543 +diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
544 +new file mode 100644
545 +index 000000000000..7be511310191
546 +--- /dev/null
547 ++++ b/arch/arm/kernel/bugs.c
548 +@@ -0,0 +1,18 @@
549 ++// SPDX-Identifier: GPL-2.0
550 ++#include <linux/init.h>
551 ++#include <asm/bugs.h>
552 ++#include <asm/proc-fns.h>
553 ++
554 ++void check_other_bugs(void)
555 ++{
556 ++#ifdef MULTI_CPU
557 ++ if (processor.check_bugs)
558 ++ processor.check_bugs();
559 ++#endif
560 ++}
561 ++
562 ++void __init check_bugs(void)
563 ++{
564 ++ check_writebuffer_bugs();
565 ++ check_other_bugs();
566 ++}
567 +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
568 +index 10c3283d6c19..56be67ecf0fa 100644
569 +--- a/arch/arm/kernel/entry-common.S
570 ++++ b/arch/arm/kernel/entry-common.S
571 +@@ -223,9 +223,7 @@ local_restart:
572 + tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
573 + bne __sys_trace
574 +
575 +- cmp scno, #NR_syscalls @ check upper syscall limit
576 +- badr lr, ret_fast_syscall @ return address
577 +- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
578 ++ invoke_syscall tbl, scno, r10, ret_fast_syscall
579 +
580 + add r1, sp, #S_OFF
581 + 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
582 +@@ -258,14 +256,8 @@ __sys_trace:
583 + mov r1, scno
584 + add r0, sp, #S_OFF
585 + bl syscall_trace_enter
586 +-
587 +- badr lr, __sys_trace_return @ return address
588 +- mov scno, r0 @ syscall number (possibly new)
589 +- add r1, sp, #S_R0 + S_OFF @ pointer to regs
590 +- cmp scno, #NR_syscalls @ check upper syscall limit
591 +- ldmccia r1, {r0 - r6} @ have to reload r0 - r6
592 +- stmccia sp, {r4, r5} @ and update the stack args
593 +- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
594 ++ mov scno, r0
595 ++ invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
596 + cmp scno, #-1 @ skip the syscall?
597 + bne 2b
598 + add sp, sp, #S_OFF @ restore stack
599 +@@ -317,6 +309,10 @@ sys_syscall:
600 + bic scno, r0, #__NR_OABI_SYSCALL_BASE
601 + cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
602 + cmpne scno, #NR_syscalls @ check range
603 ++#ifdef CONFIG_CPU_SPECTRE
604 ++ movhs scno, #0
605 ++ csdb
606 ++#endif
607 + stmloia sp, {r5, r6} @ shuffle args
608 + movlo r0, r1
609 + movlo r1, r2
610 +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
611 +index e056c9a9aa9d..fa7c6e5c17e7 100644
612 +--- a/arch/arm/kernel/entry-header.S
613 ++++ b/arch/arm/kernel/entry-header.S
614 +@@ -377,6 +377,31 @@
615 + #endif
616 + .endm
617 +
618 ++ .macro invoke_syscall, table, nr, tmp, ret, reload=0
619 ++#ifdef CONFIG_CPU_SPECTRE
620 ++ mov \tmp, \nr
621 ++ cmp \tmp, #NR_syscalls @ check upper syscall limit
622 ++ movcs \tmp, #0
623 ++ csdb
624 ++ badr lr, \ret @ return address
625 ++ .if \reload
626 ++ add r1, sp, #S_R0 + S_OFF @ pointer to regs
627 ++ ldmccia r1, {r0 - r6} @ reload r0-r6
628 ++ stmccia sp, {r4, r5} @ update stack arguments
629 ++ .endif
630 ++ ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
631 ++#else
632 ++ cmp \nr, #NR_syscalls @ check upper syscall limit
633 ++ badr lr, \ret @ return address
634 ++ .if \reload
635 ++ add r1, sp, #S_R0 + S_OFF @ pointer to regs
636 ++ ldmccia r1, {r0 - r6} @ reload r0-r6
637 ++ stmccia sp, {r4, r5} @ update stack arguments
638 ++ .endif
639 ++ ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
640 ++#endif
641 ++ .endm
642 ++
643 + /*
644 + * These are the registers used in the syscall handler, and allow us to
645 + * have in theory up to 7 arguments to a function - r0 to r6.
646 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
647 +index 7b8f2141427b..6bee5c9b1133 100644
648 +--- a/arch/arm/kernel/signal.c
649 ++++ b/arch/arm/kernel/signal.c
650 +@@ -107,21 +107,20 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
651 + return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
652 + }
653 +
654 +-static int restore_vfp_context(struct vfp_sigframe __user *frame)
655 ++static int restore_vfp_context(struct vfp_sigframe __user *auxp)
656 + {
657 +- unsigned long magic;
658 +- unsigned long size;
659 +- int err = 0;
660 ++ struct vfp_sigframe frame;
661 ++ int err;
662 +
663 +- __get_user_error(magic, &frame->magic, err);
664 +- __get_user_error(size, &frame->size, err);
665 ++ err = __copy_from_user(&frame, (char __user *) auxp, sizeof(frame));
666 +
667 + if (err)
668 +- return -EFAULT;
669 +- if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
670 ++ return err;
671 ++
672 ++ if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
673 + return -EINVAL;
674 +
675 +- return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
676 ++ return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
677 + }
678 +
679 + #endif
680 +@@ -141,6 +140,7 @@ struct rt_sigframe {
681 +
682 + static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
683 + {
684 ++ struct sigcontext context;
685 + struct aux_sigframe __user *aux;
686 + sigset_t set;
687 + int err;
688 +@@ -149,23 +149,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
689 + if (err == 0)
690 + set_current_blocked(&set);
691 +
692 +- __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
693 +- __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
694 +- __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
695 +- __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
696 +- __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
697 +- __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
698 +- __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
699 +- __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
700 +- __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
701 +- __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
702 +- __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
703 +- __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
704 +- __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
705 +- __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
706 +- __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
707 +- __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
708 +- __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
709 ++ err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
710 ++ if (err == 0) {
711 ++ regs->ARM_r0 = context.arm_r0;
712 ++ regs->ARM_r1 = context.arm_r1;
713 ++ regs->ARM_r2 = context.arm_r2;
714 ++ regs->ARM_r3 = context.arm_r3;
715 ++ regs->ARM_r4 = context.arm_r4;
716 ++ regs->ARM_r5 = context.arm_r5;
717 ++ regs->ARM_r6 = context.arm_r6;
718 ++ regs->ARM_r7 = context.arm_r7;
719 ++ regs->ARM_r8 = context.arm_r8;
720 ++ regs->ARM_r9 = context.arm_r9;
721 ++ regs->ARM_r10 = context.arm_r10;
722 ++ regs->ARM_fp = context.arm_fp;
723 ++ regs->ARM_ip = context.arm_ip;
724 ++ regs->ARM_sp = context.arm_sp;
725 ++ regs->ARM_lr = context.arm_lr;
726 ++ regs->ARM_pc = context.arm_pc;
727 ++ regs->ARM_cpsr = context.arm_cpsr;
728 ++ }
729 +
730 + err |= !valid_user_regs(regs);
731 +
732 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
733 +index 7dd14e8395e6..d2ce37da87d8 100644
734 +--- a/arch/arm/kernel/smp.c
735 ++++ b/arch/arm/kernel/smp.c
736 +@@ -29,6 +29,7 @@
737 + #include <linux/irq_work.h>
738 +
739 + #include <linux/atomic.h>
740 ++#include <asm/bugs.h>
741 + #include <asm/smp.h>
742 + #include <asm/cacheflush.h>
743 + #include <asm/cpu.h>
744 +@@ -400,6 +401,9 @@ asmlinkage void secondary_start_kernel(void)
745 + * before we continue - which happens after __cpu_up returns.
746 + */
747 + set_cpu_online(cpu, true);
748 ++
749 ++ check_other_bugs();
750 ++
751 + complete(&cpu_running);
752 +
753 + local_irq_enable();
754 +diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
755 +index 9a2f882a0a2d..134f0d432610 100644
756 +--- a/arch/arm/kernel/suspend.c
757 ++++ b/arch/arm/kernel/suspend.c
758 +@@ -1,6 +1,7 @@
759 + #include <linux/init.h>
760 + #include <linux/slab.h>
761 +
762 ++#include <asm/bugs.h>
763 + #include <asm/cacheflush.h>
764 + #include <asm/idmap.h>
765 + #include <asm/pgalloc.h>
766 +@@ -34,6 +35,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
767 + cpu_switch_mm(mm->pgd, mm);
768 + local_flush_bp_all();
769 + local_flush_tlb_all();
770 ++ check_other_bugs();
771 + }
772 +
773 + return ret;
774 +diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
775 +index 5f221acd21ae..640748e27035 100644
776 +--- a/arch/arm/kernel/sys_oabi-compat.c
777 ++++ b/arch/arm/kernel/sys_oabi-compat.c
778 +@@ -328,9 +328,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
779 + return -ENOMEM;
780 + err = 0;
781 + for (i = 0; i < nsops; i++) {
782 +- __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
783 +- __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
784 +- __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
785 ++ struct oabi_sembuf osb;
786 ++ err |= __copy_from_user(&osb, tsops, sizeof(osb));
787 ++ sops[i].sem_num = osb.sem_num;
788 ++ sops[i].sem_op = osb.sem_op;
789 ++ sops[i].sem_flg = osb.sem_flg;
790 + tsops++;
791 + }
792 + if (timeout) {
793 +diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
794 +index 96beb53934c9..64d4a39f4b4b 100644
795 +--- a/arch/arm/kvm/hyp/hyp-entry.S
796 ++++ b/arch/arm/kvm/hyp/hyp-entry.S
797 +@@ -16,6 +16,7 @@
798 + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
799 + */
800 +
801 ++#include <linux/arm-smccc.h>
802 + #include <linux/linkage.h>
803 + #include <asm/kvm_arm.h>
804 + #include <asm/kvm_asm.h>
805 +@@ -71,6 +72,90 @@ __kvm_hyp_vector:
806 + W(b) hyp_irq
807 + W(b) hyp_fiq
808 +
809 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
810 ++ .align 5
811 ++__kvm_hyp_vector_ic_inv:
812 ++ .global __kvm_hyp_vector_ic_inv
813 ++
814 ++ /*
815 ++ * We encode the exception entry in the bottom 3 bits of
816 ++ * SP, and we have to guarantee to be 8 bytes aligned.
817 ++ */
818 ++ W(add) sp, sp, #1 /* Reset 7 */
819 ++ W(add) sp, sp, #1 /* Undef 6 */
820 ++ W(add) sp, sp, #1 /* Syscall 5 */
821 ++ W(add) sp, sp, #1 /* Prefetch abort 4 */
822 ++ W(add) sp, sp, #1 /* Data abort 3 */
823 ++ W(add) sp, sp, #1 /* HVC 2 */
824 ++ W(add) sp, sp, #1 /* IRQ 1 */
825 ++ W(nop) /* FIQ 0 */
826 ++
827 ++ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
828 ++ isb
829 ++
830 ++ b decode_vectors
831 ++
832 ++ .align 5
833 ++__kvm_hyp_vector_bp_inv:
834 ++ .global __kvm_hyp_vector_bp_inv
835 ++
836 ++ /*
837 ++ * We encode the exception entry in the bottom 3 bits of
838 ++ * SP, and we have to guarantee to be 8 bytes aligned.
839 ++ */
840 ++ W(add) sp, sp, #1 /* Reset 7 */
841 ++ W(add) sp, sp, #1 /* Undef 6 */
842 ++ W(add) sp, sp, #1 /* Syscall 5 */
843 ++ W(add) sp, sp, #1 /* Prefetch abort 4 */
844 ++ W(add) sp, sp, #1 /* Data abort 3 */
845 ++ W(add) sp, sp, #1 /* HVC 2 */
846 ++ W(add) sp, sp, #1 /* IRQ 1 */
847 ++ W(nop) /* FIQ 0 */
848 ++
849 ++ mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
850 ++ isb
851 ++
852 ++decode_vectors:
853 ++
854 ++#ifdef CONFIG_THUMB2_KERNEL
855 ++ /*
856 ++ * Yet another silly hack: Use VPIDR as a temp register.
857 ++ * Thumb2 is really a pain, as SP cannot be used with most
858 ++ * of the bitwise instructions. The vect_br macro ensures
859 ++ * things gets cleaned-up.
860 ++ */
861 ++ mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
862 ++ mov r0, sp
863 ++ and r0, r0, #7
864 ++ sub sp, sp, r0
865 ++ push {r1, r2}
866 ++ mov r1, r0
867 ++ mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
868 ++ mrc p15, 0, r2, c0, c0, 0 /* MIDR */
869 ++ mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
870 ++#endif
871 ++
872 ++.macro vect_br val, targ
873 ++ARM( eor sp, sp, #\val )
874 ++ARM( tst sp, #7 )
875 ++ARM( eorne sp, sp, #\val )
876 ++
877 ++THUMB( cmp r1, #\val )
878 ++THUMB( popeq {r1, r2} )
879 ++
880 ++ beq \targ
881 ++.endm
882 ++
883 ++ vect_br 0, hyp_fiq
884 ++ vect_br 1, hyp_irq
885 ++ vect_br 2, hyp_hvc
886 ++ vect_br 3, hyp_dabt
887 ++ vect_br 4, hyp_pabt
888 ++ vect_br 5, hyp_svc
889 ++ vect_br 6, hyp_undef
890 ++ vect_br 7, hyp_reset
891 ++#endif
892 ++
893 + .macro invalid_vector label, cause
894 + .align
895 + \label: mov r0, #\cause
896 +@@ -118,7 +203,7 @@ hyp_hvc:
897 + lsr r2, r2, #16
898 + and r2, r2, #0xff
899 + cmp r2, #0
900 +- bne guest_trap @ Guest called HVC
901 ++ bne guest_hvc_trap @ Guest called HVC
902 +
903 + /*
904 + * Getting here means host called HVC, we shift parameters and branch
905 +@@ -131,7 +216,14 @@ hyp_hvc:
906 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
907 + beq 1f
908 +
909 +- push {lr}
910 ++ /*
911 ++ * Pushing r2 here is just a way of keeping the stack aligned to
912 ++ * 8 bytes on any path that can trigger a HYP exception. Here,
913 ++ * we may well be about to jump into the guest, and the guest
914 ++ * exit would otherwise be badly decoded by our fancy
915 ++ * "decode-exception-without-a-branch" code...
916 ++ */
917 ++ push {r2, lr}
918 +
919 + mov lr, r0
920 + mov r0, r1
921 +@@ -141,9 +233,23 @@ hyp_hvc:
922 + THUMB( orr lr, #1)
923 + blx lr @ Call the HYP function
924 +
925 +- pop {lr}
926 ++ pop {r2, lr}
927 + 1: eret
928 +
929 ++guest_hvc_trap:
930 ++ movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
931 ++ movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
932 ++ ldr r0, [sp] @ Guest's r0
933 ++ teq r0, r2
934 ++ bne guest_trap
935 ++ add sp, sp, #12
936 ++ @ Returns:
937 ++ @ r0 = 0
938 ++ @ r1 = HSR value (perfectly predictable)
939 ++ @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
940 ++ mov r0, #0
941 ++ eret
942 ++
943 + guest_trap:
944 + load_vcpu r0 @ Load VCPU pointer to r0
945 +
946 +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
947 +index 7a4b06049001..a826df3d3814 100644
948 +--- a/arch/arm/lib/copy_from_user.S
949 ++++ b/arch/arm/lib/copy_from_user.S
950 +@@ -90,6 +90,15 @@
951 + .text
952 +
953 + ENTRY(arm_copy_from_user)
954 ++#ifdef CONFIG_CPU_SPECTRE
955 ++ get_thread_info r3
956 ++ ldr r3, [r3, #TI_ADDR_LIMIT]
957 ++ adds ip, r1, r2 @ ip=addr+size
958 ++ sub r3, r3, #1 @ addr_limit - 1
959 ++ cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
960 ++ movcs r1, #0 @ addr = NULL
961 ++ csdb
962 ++#endif
963 +
964 + #include "copy_template.S"
965 +
966 +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
967 +index c1799dd1d0d9..7f3760fa9c15 100644
968 +--- a/arch/arm/mm/Kconfig
969 ++++ b/arch/arm/mm/Kconfig
970 +@@ -396,6 +396,7 @@ config CPU_V7
971 + select CPU_CP15_MPU if !MMU
972 + select CPU_HAS_ASID if MMU
973 + select CPU_PABRT_V7
974 ++ select CPU_SPECTRE if MMU
975 + select CPU_TLB_V7 if MMU
976 +
977 + # ARMv7M
978 +@@ -800,6 +801,28 @@ config CPU_BPREDICT_DISABLE
979 + help
980 + Say Y here to disable branch prediction. If unsure, say N.
981 +
982 ++config CPU_SPECTRE
983 ++ bool
984 ++
985 ++config HARDEN_BRANCH_PREDICTOR
986 ++ bool "Harden the branch predictor against aliasing attacks" if EXPERT
987 ++ depends on CPU_SPECTRE
988 ++ default y
989 ++ help
990 ++ Speculation attacks against some high-performance processors rely
991 ++ on being able to manipulate the branch predictor for a victim
992 ++ context by executing aliasing branches in the attacker context.
993 ++ Such attacks can be partially mitigated against by clearing
994 ++ internal branch predictor state and limiting the prediction
995 ++ logic in some situations.
996 ++
997 ++ This config option will take CPU-specific actions to harden
998 ++ the branch predictor against aliasing attacks and may rely on
999 ++ specific instruction sequences or control bits being set by
1000 ++ the system firmware.
1001 ++
1002 ++ If unsure, say Y.
1003 ++
1004 + config TLS_REG_EMUL
1005 + bool
1006 + select NEED_KUSER_HELPERS
1007 +diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
1008 +index e8698241ece9..92d47c8cbbc3 100644
1009 +--- a/arch/arm/mm/Makefile
1010 ++++ b/arch/arm/mm/Makefile
1011 +@@ -94,7 +94,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
1012 + obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
1013 + obj-$(CONFIG_CPU_V6) += proc-v6.o
1014 + obj-$(CONFIG_CPU_V6K) += proc-v6.o
1015 +-obj-$(CONFIG_CPU_V7) += proc-v7.o
1016 ++obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
1017 + obj-$(CONFIG_CPU_V7M) += proc-v7m.o
1018 +
1019 + AFLAGS_proc-v6.o :=-Wa,-march=armv6
1020 +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1021 +index f7861dc83182..5ca207ada852 100644
1022 +--- a/arch/arm/mm/fault.c
1023 ++++ b/arch/arm/mm/fault.c
1024 +@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1025 + {
1026 + struct siginfo si;
1027 +
1028 ++ if (addr > TASK_SIZE)
1029 ++ harden_branch_predictor();
1030 ++
1031 + #ifdef CONFIG_DEBUG_USER
1032 + if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
1033 + ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
1034 +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
1035 +index 0d40c285bd86..7d9176c4a21d 100644
1036 +--- a/arch/arm/mm/proc-macros.S
1037 ++++ b/arch/arm/mm/proc-macros.S
1038 +@@ -274,13 +274,14 @@
1039 + mcr p15, 0, ip, c7, c10, 4 @ data write barrier
1040 + .endm
1041 +
1042 +-.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
1043 ++.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
1044 + .type \name\()_processor_functions, #object
1045 + .align 2
1046 + ENTRY(\name\()_processor_functions)
1047 + .word \dabort
1048 + .word \pabort
1049 + .word cpu_\name\()_proc_init
1050 ++ .word \bugs
1051 + .word cpu_\name\()_proc_fin
1052 + .word cpu_\name\()_reset
1053 + .word cpu_\name\()_do_idle
1054 +diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
1055 +index c6141a5435c3..f8d45ad2a515 100644
1056 +--- a/arch/arm/mm/proc-v7-2level.S
1057 ++++ b/arch/arm/mm/proc-v7-2level.S
1058 +@@ -41,11 +41,6 @@
1059 + * even on Cortex-A8 revisions not affected by 430973.
1060 + * If IBE is not set, the flush BTAC/BTB won't do anything.
1061 + */
1062 +-ENTRY(cpu_ca8_switch_mm)
1063 +-#ifdef CONFIG_MMU
1064 +- mov r2, #0
1065 +- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
1066 +-#endif
1067 + ENTRY(cpu_v7_switch_mm)
1068 + #ifdef CONFIG_MMU
1069 + mmid r1, r1 @ get mm->context.id
1070 +@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
1071 + #endif
1072 + bx lr
1073 + ENDPROC(cpu_v7_switch_mm)
1074 +-ENDPROC(cpu_ca8_switch_mm)
1075 +
1076 + /*
1077 + * cpu_v7_set_pte_ext(ptep, pte)
1078 +diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
1079 +new file mode 100644
1080 +index 000000000000..5544b82a2e7a
1081 +--- /dev/null
1082 ++++ b/arch/arm/mm/proc-v7-bugs.c
1083 +@@ -0,0 +1,174 @@
1084 ++// SPDX-License-Identifier: GPL-2.0
1085 ++#include <linux/arm-smccc.h>
1086 ++#include <linux/kernel.h>
1087 ++#include <linux/psci.h>
1088 ++#include <linux/smp.h>
1089 ++
1090 ++#include <asm/cp15.h>
1091 ++#include <asm/cputype.h>
1092 ++#include <asm/proc-fns.h>
1093 ++#include <asm/system_misc.h>
1094 ++
1095 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1096 ++DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
1097 ++
1098 ++extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1099 ++extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1100 ++extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1101 ++extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1102 ++
1103 ++static void harden_branch_predictor_bpiall(void)
1104 ++{
1105 ++ write_sysreg(0, BPIALL);
1106 ++}
1107 ++
1108 ++static void harden_branch_predictor_iciallu(void)
1109 ++{
1110 ++ write_sysreg(0, ICIALLU);
1111 ++}
1112 ++
1113 ++static void __maybe_unused call_smc_arch_workaround_1(void)
1114 ++{
1115 ++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1116 ++}
1117 ++
1118 ++static void __maybe_unused call_hvc_arch_workaround_1(void)
1119 ++{
1120 ++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1121 ++}
1122 ++
1123 ++static void cpu_v7_spectre_init(void)
1124 ++{
1125 ++ const char *spectre_v2_method = NULL;
1126 ++ int cpu = smp_processor_id();
1127 ++
1128 ++ if (per_cpu(harden_branch_predictor_fn, cpu))
1129 ++ return;
1130 ++
1131 ++ switch (read_cpuid_part()) {
1132 ++ case ARM_CPU_PART_CORTEX_A8:
1133 ++ case ARM_CPU_PART_CORTEX_A9:
1134 ++ case ARM_CPU_PART_CORTEX_A12:
1135 ++ case ARM_CPU_PART_CORTEX_A17:
1136 ++ case ARM_CPU_PART_CORTEX_A73:
1137 ++ case ARM_CPU_PART_CORTEX_A75:
1138 ++ if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
1139 ++ goto bl_error;
1140 ++ per_cpu(harden_branch_predictor_fn, cpu) =
1141 ++ harden_branch_predictor_bpiall;
1142 ++ spectre_v2_method = "BPIALL";
1143 ++ break;
1144 ++
1145 ++ case ARM_CPU_PART_CORTEX_A15:
1146 ++ case ARM_CPU_PART_BRAHMA_B15:
1147 ++ if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
1148 ++ goto bl_error;
1149 ++ per_cpu(harden_branch_predictor_fn, cpu) =
1150 ++ harden_branch_predictor_iciallu;
1151 ++ spectre_v2_method = "ICIALLU";
1152 ++ break;
1153 ++
1154 ++#ifdef CONFIG_ARM_PSCI
1155 ++ default:
1156 ++ /* Other ARM CPUs require no workaround */
1157 ++ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
1158 ++ break;
1159 ++ /* fallthrough */
1160 ++ /* Cortex A57/A72 require firmware workaround */
1161 ++ case ARM_CPU_PART_CORTEX_A57:
1162 ++ case ARM_CPU_PART_CORTEX_A72: {
1163 ++ struct arm_smccc_res res;
1164 ++
1165 ++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1166 ++ break;
1167 ++
1168 ++ switch (psci_ops.conduit) {
1169 ++ case PSCI_CONDUIT_HVC:
1170 ++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1171 ++ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1172 ++ if ((int)res.a0 != 0)
1173 ++ break;
1174 ++ if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
1175 ++ goto bl_error;
1176 ++ per_cpu(harden_branch_predictor_fn, cpu) =
1177 ++ call_hvc_arch_workaround_1;
1178 ++ processor.switch_mm = cpu_v7_hvc_switch_mm;
1179 ++ spectre_v2_method = "hypervisor";
1180 ++ break;
1181 ++
1182 ++ case PSCI_CONDUIT_SMC:
1183 ++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1184 ++ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1185 ++ if ((int)res.a0 != 0)
1186 ++ break;
1187 ++ if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
1188 ++ goto bl_error;
1189 ++ per_cpu(harden_branch_predictor_fn, cpu) =
1190 ++ call_smc_arch_workaround_1;
1191 ++ processor.switch_mm = cpu_v7_smc_switch_mm;
1192 ++ spectre_v2_method = "firmware";
1193 ++ break;
1194 ++
1195 ++ default:
1196 ++ break;
1197 ++ }
1198 ++ }
1199 ++#endif
1200 ++ }
1201 ++
1202 ++ if (spectre_v2_method)
1203 ++ pr_info("CPU%u: Spectre v2: using %s workaround\n",
1204 ++ smp_processor_id(), spectre_v2_method);
1205 ++ return;
1206 ++
1207 ++bl_error:
1208 ++ pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
1209 ++ cpu);
1210 ++}
1211 ++#else
1212 ++static void cpu_v7_spectre_init(void)
1213 ++{
1214 ++}
1215 ++#endif
1216 ++
1217 ++static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
1218 ++ u32 mask, const char *msg)
1219 ++{
1220 ++ u32 aux_cr;
1221 ++
1222 ++ asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
1223 ++
1224 ++ if ((aux_cr & mask) != mask) {
1225 ++ if (!*warned)
1226 ++ pr_err("CPU%u: %s", smp_processor_id(), msg);
1227 ++ *warned = true;
1228 ++ return false;
1229 ++ }
1230 ++ return true;
1231 ++}
1232 ++
1233 ++static DEFINE_PER_CPU(bool, spectre_warned);
1234 ++
1235 ++static bool check_spectre_auxcr(bool *warned, u32 bit)
1236 ++{
1237 ++ return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
1238 ++ cpu_v7_check_auxcr_set(warned, bit,
1239 ++ "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
1240 ++}
1241 ++
1242 ++void cpu_v7_ca8_ibe(void)
1243 ++{
1244 ++ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
1245 ++ cpu_v7_spectre_init();
1246 ++}
1247 ++
1248 ++void cpu_v7_ca15_ibe(void)
1249 ++{
1250 ++ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
1251 ++ cpu_v7_spectre_init();
1252 ++}
1253 ++
1254 ++void cpu_v7_bugs_init(void)
1255 ++{
1256 ++ cpu_v7_spectre_init();
1257 ++}
1258 +diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
1259 +index d00d52c9de3e..850c22bca19c 100644
1260 +--- a/arch/arm/mm/proc-v7.S
1261 ++++ b/arch/arm/mm/proc-v7.S
1262 +@@ -9,6 +9,7 @@
1263 + *
1264 + * This is the "shell" of the ARMv7 processor support.
1265 + */
1266 ++#include <linux/arm-smccc.h>
1267 + #include <linux/init.h>
1268 + #include <linux/linkage.h>
1269 + #include <asm/assembler.h>
1270 +@@ -88,6 +89,37 @@ ENTRY(cpu_v7_dcache_clean_area)
1271 + ret lr
1272 + ENDPROC(cpu_v7_dcache_clean_area)
1273 +
1274 ++#ifdef CONFIG_ARM_PSCI
1275 ++ .arch_extension sec
1276 ++ENTRY(cpu_v7_smc_switch_mm)
1277 ++ stmfd sp!, {r0 - r3}
1278 ++ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
1279 ++ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
1280 ++ smc #0
1281 ++ ldmfd sp!, {r0 - r3}
1282 ++ b cpu_v7_switch_mm
1283 ++ENDPROC(cpu_v7_smc_switch_mm)
1284 ++ .arch_extension virt
1285 ++ENTRY(cpu_v7_hvc_switch_mm)
1286 ++ stmfd sp!, {r0 - r3}
1287 ++ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
1288 ++ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
1289 ++ hvc #0
1290 ++ ldmfd sp!, {r0 - r3}
1291 ++ b cpu_v7_switch_mm
1292 ++ENDPROC(cpu_v7_hvc_switch_mm)
1293 ++#endif
1294 ++ENTRY(cpu_v7_iciallu_switch_mm)
1295 ++ mov r3, #0
1296 ++ mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
1297 ++ b cpu_v7_switch_mm
1298 ++ENDPROC(cpu_v7_iciallu_switch_mm)
1299 ++ENTRY(cpu_v7_bpiall_switch_mm)
1300 ++ mov r3, #0
1301 ++ mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
1302 ++ b cpu_v7_switch_mm
1303 ++ENDPROC(cpu_v7_bpiall_switch_mm)
1304 ++
1305 + string cpu_v7_name, "ARMv7 Processor"
1306 + .align
1307 +
1308 +@@ -153,31 +185,6 @@ ENTRY(cpu_v7_do_resume)
1309 + ENDPROC(cpu_v7_do_resume)
1310 + #endif
1311 +
1312 +-/*
1313 +- * Cortex-A8
1314 +- */
1315 +- globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
1316 +- globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
1317 +- globl_equ cpu_ca8_reset, cpu_v7_reset
1318 +- globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
1319 +- globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
1320 +- globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
1321 +- globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
1322 +-#ifdef CONFIG_ARM_CPU_SUSPEND
1323 +- globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
1324 +- globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
1325 +-#endif
1326 +-
1327 +-/*
1328 +- * Cortex-A9 processor functions
1329 +- */
1330 +- globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
1331 +- globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
1332 +- globl_equ cpu_ca9mp_reset, cpu_v7_reset
1333 +- globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
1334 +- globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
1335 +- globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
1336 +- globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
1337 + .globl cpu_ca9mp_suspend_size
1338 + .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
1339 + #ifdef CONFIG_ARM_CPU_SUSPEND
1340 +@@ -541,12 +548,79 @@ __v7_setup_stack:
1341 +
1342 + __INITDATA
1343 +
1344 ++ .weak cpu_v7_bugs_init
1345 ++
1346 + @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
1347 +- define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1348 ++ define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1349 ++
1350 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1351 ++ @ generic v7 bpiall on context switch
1352 ++ globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
1353 ++ globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
1354 ++ globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
1355 ++ globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
1356 ++ globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
1357 ++ globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
1358 ++ globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
1359 ++#ifdef CONFIG_ARM_CPU_SUSPEND
1360 ++ globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
1361 ++ globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
1362 ++#endif
1363 ++ define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1364 ++
1365 ++#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
1366 ++#else
1367 ++#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
1368 ++#endif
1369 ++
1370 + #ifndef CONFIG_ARM_LPAE
1371 +- define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1372 +- define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1373 ++ @ Cortex-A8 - always needs bpiall switch_mm implementation
1374 ++ globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
1375 ++ globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
1376 ++ globl_equ cpu_ca8_reset, cpu_v7_reset
1377 ++ globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
1378 ++ globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
1379 ++ globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
1380 ++ globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
1381 ++ globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
1382 ++#ifdef CONFIG_ARM_CPU_SUSPEND
1383 ++ globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
1384 ++ globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
1385 + #endif
1386 ++ define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
1387 ++
1388 ++ @ Cortex-A9 - needs more registers preserved across suspend/resume
1389 ++ @ and bpiall switch_mm for hardening
1390 ++ globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
1391 ++ globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
1392 ++ globl_equ cpu_ca9mp_reset, cpu_v7_reset
1393 ++ globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
1394 ++ globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
1395 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1396 ++ globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
1397 ++#else
1398 ++ globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
1399 ++#endif
1400 ++ globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
1401 ++ define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1402 ++#endif
1403 ++
1404 ++ @ Cortex-A15 - needs iciallu switch_mm for hardening
1405 ++ globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
1406 ++ globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
1407 ++ globl_equ cpu_ca15_reset, cpu_v7_reset
1408 ++ globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
1409 ++ globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
1410 ++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1411 ++ globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
1412 ++#else
1413 ++ globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
1414 ++#endif
1415 ++ globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
1416 ++ globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
1417 ++ globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
1418 ++ globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
1419 ++ define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
1420 + #ifdef CONFIG_CPU_PJ4B
1421 + define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1422 + #endif
1423 +@@ -653,7 +727,7 @@ __v7_ca7mp_proc_info:
1424 + __v7_ca12mp_proc_info:
1425 + .long 0x410fc0d0
1426 + .long 0xff0ffff0
1427 +- __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
1428 ++ __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1429 + .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
1430 +
1431 + /*
1432 +@@ -663,7 +737,7 @@ __v7_ca12mp_proc_info:
1433 + __v7_ca15mp_proc_info:
1434 + .long 0x410fc0f0
1435 + .long 0xff0ffff0
1436 +- __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
1437 ++ __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
1438 + .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
1439 +
1440 + /*
1441 +@@ -673,7 +747,7 @@ __v7_ca15mp_proc_info:
1442 + __v7_b15mp_proc_info:
1443 + .long 0x420f00f0
1444 + .long 0xff0ffff0
1445 +- __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
1446 ++ __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
1447 + .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
1448 +
1449 + /*
1450 +@@ -683,9 +757,25 @@ __v7_b15mp_proc_info:
1451 + __v7_ca17mp_proc_info:
1452 + .long 0x410fc0e0
1453 + .long 0xff0ffff0
1454 +- __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
1455 ++ __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1456 + .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
1457 +
1458 ++ /* ARM Ltd. Cortex A73 processor */
1459 ++ .type __v7_ca73_proc_info, #object
1460 ++__v7_ca73_proc_info:
1461 ++ .long 0x410fd090
1462 ++ .long 0xff0ffff0
1463 ++ __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1464 ++ .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
1465 ++
1466 ++ /* ARM Ltd. Cortex A75 processor */
1467 ++ .type __v7_ca75_proc_info, #object
1468 ++__v7_ca75_proc_info:
1469 ++ .long 0x410fd0a0
1470 ++ .long 0xff0ffff0
1471 ++ __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1472 ++ .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
1473 ++
1474 + /*
1475 + * Qualcomm Inc. Krait processors.
1476 + */
1477 +diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
1478 +index 5629d7580973..8e5e97989fda 100644
1479 +--- a/arch/arm/vfp/vfpmodule.c
1480 ++++ b/arch/arm/vfp/vfpmodule.c
1481 +@@ -597,13 +597,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
1482 + }
1483 +
1484 + /* Sanitise and restore the current VFP state from the provided structures. */
1485 +-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1486 +- struct user_vfp_exc __user *ufp_exc)
1487 ++int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
1488 + {
1489 + struct thread_info *thread = current_thread_info();
1490 + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
1491 + unsigned long fpexc;
1492 +- int err = 0;
1493 +
1494 + /* Disable VFP to avoid corrupting the new thread state. */
1495 + vfp_flush_hwstate(thread);
1496 +@@ -612,17 +610,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1497 + * Copy the floating point registers. There can be unused
1498 + * registers see asm/hwcap.h for details.
1499 + */
1500 +- err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
1501 +- sizeof(hwstate->fpregs));
1502 ++ memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
1503 + /*
1504 + * Copy the status and control register.
1505 + */
1506 +- __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
1507 ++ hwstate->fpscr = ufp->fpscr;
1508 +
1509 + /*
1510 + * Sanitise and restore the exception registers.
1511 + */
1512 +- __get_user_error(fpexc, &ufp_exc->fpexc, err);
1513 ++ fpexc = ufp_exc->fpexc;
1514 +
1515 + /* Ensure the VFP is enabled. */
1516 + fpexc |= FPEXC_EN;
1517 +@@ -631,10 +628,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1518 + fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
1519 + hwstate->fpexc = fpexc;
1520 +
1521 +- __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
1522 +- __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
1523 ++ hwstate->fpinst = ufp_exc->fpinst;
1524 ++ hwstate->fpinst2 = ufp_exc->fpinst2;
1525 +
1526 +- return err ? -EFAULT : 0;
1527 ++ return 0;
1528 + }
1529 +
1530 + /*
1531 +diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
1532 +index c98e7e849f06..8550408735a0 100644
1533 +--- a/arch/arm64/crypto/sha1-ce-core.S
1534 ++++ b/arch/arm64/crypto/sha1-ce-core.S
1535 +@@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
1536 + ldr dgb, [x0, #16]
1537 +
1538 + /* load sha1_ce_state::finalize */
1539 +- ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
1540 ++ ldr_l w4, sha1_ce_offsetof_finalize, x4
1541 ++ ldr w4, [x0, x4]
1542 +
1543 + /* load input */
1544 + 0: ld1 {v8.4s-v11.4s}, [x1], #64
1545 +@@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
1546 + * the padding is handled by the C code in that case.
1547 + */
1548 + cbz x4, 3f
1549 +- ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
1550 ++ ldr_l w4, sha1_ce_offsetof_count, x4
1551 ++ ldr x4, [x0, x4]
1552 + movi v9.2d, #0
1553 + mov x8, #0x80000000
1554 + movi v10.2d, #0
1555 +diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
1556 +index aefda9868627..ea319c055f5d 100644
1557 +--- a/arch/arm64/crypto/sha1-ce-glue.c
1558 ++++ b/arch/arm64/crypto/sha1-ce-glue.c
1559 +@@ -17,9 +17,6 @@
1560 + #include <linux/crypto.h>
1561 + #include <linux/module.h>
1562 +
1563 +-#define ASM_EXPORT(sym, val) \
1564 +- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
1565 +-
1566 + MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
1567 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@××××××.org>");
1568 + MODULE_LICENSE("GPL v2");
1569 +@@ -32,6 +29,9 @@ struct sha1_ce_state {
1570 + asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
1571 + int blocks);
1572 +
1573 ++const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
1574 ++const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
1575 ++
1576 + static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
1577 + unsigned int len)
1578 + {
1579 +@@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
1580 + struct sha1_ce_state *sctx = shash_desc_ctx(desc);
1581 + bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
1582 +
1583 +- ASM_EXPORT(sha1_ce_offsetof_count,
1584 +- offsetof(struct sha1_ce_state, sst.count));
1585 +- ASM_EXPORT(sha1_ce_offsetof_finalize,
1586 +- offsetof(struct sha1_ce_state, finalize));
1587 +-
1588 + /*
1589 + * Allow the asm code to perform the finalization if there is no
1590 + * partial data and the input is a round multiple of the block size.
1591 +diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
1592 +index 01cfee066837..679c6c002f4f 100644
1593 +--- a/arch/arm64/crypto/sha2-ce-core.S
1594 ++++ b/arch/arm64/crypto/sha2-ce-core.S
1595 +@@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
1596 + ld1 {dgav.4s, dgbv.4s}, [x0]
1597 +
1598 + /* load sha256_ce_state::finalize */
1599 +- ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
1600 ++ ldr_l w4, sha256_ce_offsetof_finalize, x4
1601 ++ ldr w4, [x0, x4]
1602 +
1603 + /* load input */
1604 + 0: ld1 {v16.4s-v19.4s}, [x1], #64
1605 +@@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
1606 + * the padding is handled by the C code in that case.
1607 + */
1608 + cbz x4, 3f
1609 +- ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
1610 ++ ldr_l w4, sha256_ce_offsetof_count, x4
1611 ++ ldr x4, [x0, x4]
1612 + movi v17.2d, #0
1613 + mov x8, #0x80000000
1614 + movi v18.2d, #0
1615 +diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
1616 +index 7cd587564a41..0ed9486f75dd 100644
1617 +--- a/arch/arm64/crypto/sha2-ce-glue.c
1618 ++++ b/arch/arm64/crypto/sha2-ce-glue.c
1619 +@@ -17,9 +17,6 @@
1620 + #include <linux/crypto.h>
1621 + #include <linux/module.h>
1622 +
1623 +-#define ASM_EXPORT(sym, val) \
1624 +- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
1625 +-
1626 + MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
1627 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@××××××.org>");
1628 + MODULE_LICENSE("GPL v2");
1629 +@@ -32,6 +29,11 @@ struct sha256_ce_state {
1630 + asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
1631 + int blocks);
1632 +
1633 ++const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
1634 ++ sst.count);
1635 ++const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
1636 ++ finalize);
1637 ++
1638 + static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
1639 + unsigned int len)
1640 + {
1641 +@@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
1642 + struct sha256_ce_state *sctx = shash_desc_ctx(desc);
1643 + bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
1644 +
1645 +- ASM_EXPORT(sha256_ce_offsetof_count,
1646 +- offsetof(struct sha256_ce_state, sst.count));
1647 +- ASM_EXPORT(sha256_ce_offsetof_finalize,
1648 +- offsetof(struct sha256_ce_state, finalize));
1649 +-
1650 + /*
1651 + * Allow the asm code to perform the finalization if there is no
1652 + * partial data and the input is a round multiple of the block size.
1653 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
1654 +index a9e54aad15ef..65615820155e 100644
1655 +--- a/arch/arm64/include/asm/efi.h
1656 ++++ b/arch/arm64/include/asm/efi.h
1657 +@@ -54,6 +54,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
1658 + #define alloc_screen_info(x...) &screen_info
1659 + #define free_screen_info(x...)
1660 +
1661 ++/* redeclare as 'hidden' so the compiler will generate relative references */
1662 ++extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
1663 ++
1664 + static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
1665 + {
1666 + }
1667 +diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
1668 +index 1d047d6c421b..f5cd96c60eb9 100644
1669 +--- a/arch/arm64/include/asm/uaccess.h
1670 ++++ b/arch/arm64/include/asm/uaccess.h
1671 +@@ -198,7 +198,7 @@ do { \
1672 + (err), ARM64_HAS_UAO); \
1673 + break; \
1674 + case 8: \
1675 +- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
1676 ++ __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
1677 + (err), ARM64_HAS_UAO); \
1678 + break; \
1679 + default: \
1680 +@@ -272,7 +272,7 @@ do { \
1681 + (err), ARM64_HAS_UAO); \
1682 + break; \
1683 + case 8: \
1684 +- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
1685 ++ __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
1686 + (err), ARM64_HAS_UAO); \
1687 + break; \
1688 + default: \
1689 +diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
1690 +index 3686d6abafde..9edda5466020 100644
1691 +--- a/arch/ia64/kernel/Makefile
1692 ++++ b/arch/ia64/kernel/Makefile
1693 +@@ -50,32 +50,10 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
1694 + # The gate DSO image is built using a special linker script.
1695 + include $(src)/Makefile.gate
1696 +
1697 +-# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
1698 +-define sed-y
1699 +- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
1700 +-endef
1701 +-quiet_cmd_nr_irqs = GEN $@
1702 +-define cmd_nr_irqs
1703 +- (set -e; \
1704 +- echo "#ifndef __ASM_NR_IRQS_H__"; \
1705 +- echo "#define __ASM_NR_IRQS_H__"; \
1706 +- echo "/*"; \
1707 +- echo " * DO NOT MODIFY."; \
1708 +- echo " *"; \
1709 +- echo " * This file was generated by Kbuild"; \
1710 +- echo " *"; \
1711 +- echo " */"; \
1712 +- echo ""; \
1713 +- sed -ne $(sed-y) $<; \
1714 +- echo ""; \
1715 +- echo "#endif" ) > $@
1716 +-endef
1717 +-
1718 + # We use internal kbuild rules to avoid the "is up to date" message from make
1719 + arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
1720 + $(Q)mkdir -p $(dir $@)
1721 + $(call if_changed_dep,cc_s_c)
1722 +
1723 +-include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
1724 +- $(Q)mkdir -p $(dir $@)
1725 +- $(call cmd,nr_irqs)
1726 ++include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE
1727 ++ $(call filechk,offsets,__ASM_NR_IRQS_H__)
1728 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
1729 +index f408babdf746..b5226a009973 100644
1730 +--- a/arch/x86/Makefile
1731 ++++ b/arch/x86/Makefile
1732 +@@ -11,6 +11,16 @@ else
1733 + KBUILD_DEFCONFIG := $(ARCH)_defconfig
1734 + endif
1735 +
1736 ++# For gcc stack alignment is specified with -mpreferred-stack-boundary,
1737 ++# clang has the option -mstack-alignment for that purpose.
1738 ++ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
1739 ++ cc_stack_align4 := -mpreferred-stack-boundary=2
1740 ++ cc_stack_align8 := -mpreferred-stack-boundary=3
1741 ++else ifneq ($(call cc-option, -mstack-alignment=16),)
1742 ++ cc_stack_align4 := -mstack-alignment=4
1743 ++ cc_stack_align8 := -mstack-alignment=8
1744 ++endif
1745 ++
1746 + # How to compile the 16-bit code. Note we always compile for -march=i386;
1747 + # that way we can complain to the user if the CPU is insufficient.
1748 + #
1749 +@@ -24,10 +34,11 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
1750 + -DDISABLE_BRANCH_PROFILING \
1751 + -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
1752 + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
1753 +- -mno-mmx -mno-sse \
1754 +- $(call cc-option, -ffreestanding) \
1755 +- $(call cc-option, -fno-stack-protector) \
1756 +- $(call cc-option, -mpreferred-stack-boundary=2)
1757 ++ -mno-mmx -mno-sse
1758 ++
1759 ++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
1760 ++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
1761 ++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
1762 + export REALMODE_CFLAGS
1763 +
1764 + # BITS is used as extension for files which are available in a 32 bit
1765 +@@ -64,8 +75,10 @@ ifeq ($(CONFIG_X86_32),y)
1766 + # with nonstandard options
1767 + KBUILD_CFLAGS += -fno-pic
1768 +
1769 +- # prevent gcc from keeping the stack 16 byte aligned
1770 +- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
1771 ++ # Align the stack to the register width instead of using the default
1772 ++ # alignment of 16 bytes. This reduces stack usage and the number of
1773 ++ # alignment instructions.
1774 ++ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
1775 +
1776 + # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
1777 + # a lot more stack due to the lack of sharing of stacklots:
1778 +@@ -88,17 +101,23 @@ else
1779 + KBUILD_CFLAGS += -m64
1780 +
1781 + # Align jump targets to 1 byte, not the default 16 bytes:
1782 +- KBUILD_CFLAGS += -falign-jumps=1
1783 ++ KBUILD_CFLAGS += $(call cc-option,-falign-jumps=1)
1784 +
1785 + # Pack loops tightly as well:
1786 +- KBUILD_CFLAGS += -falign-loops=1
1787 ++ KBUILD_CFLAGS += $(call cc-option,-falign-loops=1)
1788 +
1789 + # Don't autogenerate traditional x87 instructions
1790 + KBUILD_CFLAGS += $(call cc-option,-mno-80387)
1791 + KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
1792 +
1793 +- # Use -mpreferred-stack-boundary=3 if supported.
1794 +- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
1795 ++ # By default gcc and clang use a stack alignment of 16 bytes for x86.
1796 ++ # However the standard kernel entry on x86-64 leaves the stack on an
1797 ++ # 8-byte boundary. If the compiler isn't informed about the actual
1798 ++ # alignment it will generate extra alignment instructions for the
1799 ++ # default alignment which keep the stack *mis*aligned.
1800 ++ # Furthermore an alignment to the register width reduces stack usage
1801 ++ # and the number of alignment instructions.
1802 ++ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
1803 +
1804 + # Use -mskip-rax-setup if supported.
1805 + KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
1806 +diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
1807 +index 9e240fcba784..08dfce02362c 100644
1808 +--- a/arch/x86/boot/string.c
1809 ++++ b/arch/x86/boot/string.c
1810 +@@ -16,6 +16,15 @@
1811 + #include "ctype.h"
1812 + #include "string.h"
1813 +
1814 ++/*
1815 ++ * Undef these macros so that the functions that we provide
1816 ++ * here will have the correct names regardless of how string.h
1817 ++ * may have chosen to #define them.
1818 ++ */
1819 ++#undef memcpy
1820 ++#undef memset
1821 ++#undef memcmp
1822 ++
1823 + int memcmp(const void *s1, const void *s2, size_t len)
1824 + {
1825 + bool diff;
1826 +diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1827 +index a916c4a61165..5f6a5af9c489 100644
1828 +--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1829 ++++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1830 +@@ -65,7 +65,6 @@
1831 + #include <linux/linkage.h>
1832 + #include <asm/inst.h>
1833 +
1834 +-#define CONCAT(a,b) a##b
1835 + #define VMOVDQ vmovdqu
1836 +
1837 + #define xdata0 %xmm0
1838 +@@ -92,8 +91,6 @@
1839 + #define num_bytes %r8
1840 +
1841 + #define tmp %r10
1842 +-#define DDQ(i) CONCAT(ddq_add_,i)
1843 +-#define XMM(i) CONCAT(%xmm, i)
1844 + #define DDQ_DATA 0
1845 + #define XDATA 1
1846 + #define KEY_128 1
1847 +@@ -131,12 +128,12 @@ ddq_add_8:
1848 + /* generate a unique variable for ddq_add_x */
1849 +
1850 + .macro setddq n
1851 +- var_ddq_add = DDQ(\n)
1852 ++ var_ddq_add = ddq_add_\n
1853 + .endm
1854 +
1855 + /* generate a unique variable for xmm register */
1856 + .macro setxdata n
1857 +- var_xdata = XMM(\n)
1858 ++ var_xdata = %xmm\n
1859 + .endm
1860 +
1861 + /* club the numeric 'id' to the symbol 'name' */
1862 +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
1863 +index 5e23e2d305e7..2cd9496eb696 100644
1864 +--- a/drivers/firmware/efi/libstub/Makefile
1865 ++++ b/drivers/firmware/efi/libstub/Makefile
1866 +@@ -10,8 +10,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
1867 + -fPIC -fno-strict-aliasing -mno-red-zone \
1868 + -mno-mmx -mno-sse
1869 +
1870 +-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
1871 +-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
1872 ++cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
1873 ++cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
1874 + -fno-builtin -fpic -mno-single-pic-base
1875 +
1876 + cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
1877 +@@ -60,7 +60,7 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
1878 + extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
1879 + lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
1880 +
1881 +-STUBCOPY_FLAGS-y := -R .debug* -R *ksymtab* -R *kcrctab*
1882 ++STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab*
1883 + STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
1884 + --prefix-symbols=__efistub_
1885 + STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
1886 +@@ -68,17 +68,25 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
1887 + $(obj)/%.stub.o: $(obj)/%.o FORCE
1888 + $(call if_changed,stubcopy)
1889 +
1890 ++#
1891 ++# Strip debug sections and some other sections that may legally contain
1892 ++# absolute relocations, so that we can inspect the remaining sections for
1893 ++# such relocations. If none are found, regenerate the output object, but
1894 ++# this time, use objcopy and leave all sections in place.
1895 ++#
1896 + quiet_cmd_stubcopy = STUBCPY $@
1897 +- cmd_stubcopy = if $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; then \
1898 +- $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \
1899 +- && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
1900 +- rm -f $@; /bin/false); else /bin/false; fi
1901 ++ cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
1902 ++ then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
1903 ++ then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
1904 ++ rm -f $@; /bin/false); \
1905 ++ else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
1906 ++ else /bin/false; fi
1907 +
1908 + #
1909 + # ARM discards the .data section because it disallows r/w data in the
1910 + # decompressor. So move our .data to .data.efistub, which is preserved
1911 + # explicitly by the decompressor linker script.
1912 + #
1913 +-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
1914 +- -R ___ksymtab+sort -R ___kcrctab+sort
1915 ++STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
1916 ++STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort
1917 + STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
1918 +diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
1919 +index eae693eb3e91..959d9b8d4845 100644
1920 +--- a/drivers/firmware/efi/libstub/arm64-stub.c
1921 ++++ b/drivers/firmware/efi/libstub/arm64-stub.c
1922 +@@ -9,9 +9,17 @@
1923 + * published by the Free Software Foundation.
1924 + *
1925 + */
1926 ++
1927 ++/*
1928 ++ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
1929 ++ * references to the section markers, override their visibility as 'hidden'
1930 ++ */
1931 ++#pragma GCC visibility push(hidden)
1932 ++#include <asm/sections.h>
1933 ++#pragma GCC visibility pop
1934 ++
1935 + #include <linux/efi.h>
1936 + #include <asm/efi.h>
1937 +-#include <asm/sections.h>
1938 + #include <asm/sysreg.h>
1939 +
1940 + #include "efistub.h"
1941 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1942 +index 6250989c83d8..c069a04a6e7e 100644
1943 +--- a/drivers/net/ethernet/broadcom/tg3.c
1944 ++++ b/drivers/net/ethernet/broadcom/tg3.c
1945 +@@ -12389,6 +12389,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
1946 + {
1947 + struct tg3 *tp = netdev_priv(dev);
1948 + int i, irq_sync = 0, err = 0;
1949 ++ bool reset_phy = false;
1950 +
1951 + if ((ering->rx_pending > tp->rx_std_ring_mask) ||
1952 + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
1953 +@@ -12420,7 +12421,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
1954 +
1955 + if (netif_running(dev)) {
1956 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1957 +- err = tg3_restart_hw(tp, false);
1958 ++ /* Reset PHY to avoid PHY lock up */
1959 ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
1960 ++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
1961 ++ tg3_asic_rev(tp) == ASIC_REV_5720)
1962 ++ reset_phy = true;
1963 ++
1964 ++ err = tg3_restart_hw(tp, reset_phy);
1965 + if (!err)
1966 + tg3_netif_start(tp);
1967 + }
1968 +@@ -12454,6 +12461,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
1969 + {
1970 + struct tg3 *tp = netdev_priv(dev);
1971 + int err = 0;
1972 ++ bool reset_phy = false;
1973 +
1974 + if (tp->link_config.autoneg == AUTONEG_ENABLE)
1975 + tg3_warn_mgmt_link_flap(tp);
1976 +@@ -12544,7 +12552,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
1977 +
1978 + if (netif_running(dev)) {
1979 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1980 +- err = tg3_restart_hw(tp, false);
1981 ++ /* Reset PHY to avoid PHY lock up */
1982 ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
1983 ++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
1984 ++ tg3_asic_rev(tp) == ASIC_REV_5720)
1985 ++ reset_phy = true;
1986 ++
1987 ++ err = tg3_restart_hw(tp, reset_phy);
1988 + if (!err)
1989 + tg3_netif_start(tp);
1990 + }
1991 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
1992 +index a167116ceeee..e29f4c0767eb 100644
1993 +--- a/drivers/net/usb/smsc95xx.c
1994 ++++ b/drivers/net/usb/smsc95xx.c
1995 +@@ -1590,6 +1590,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
1996 + return ret;
1997 + }
1998 +
1999 ++ cancel_delayed_work_sync(&pdata->carrier_check);
2000 ++
2001 + if (pdata->suspend_flags) {
2002 + netdev_warn(dev->net, "error during last resume\n");
2003 + pdata->suspend_flags = 0;
2004 +@@ -1832,6 +1834,11 @@ done:
2005 + */
2006 + if (ret && PMSG_IS_AUTO(message))
2007 + usbnet_resume(intf);
2008 ++
2009 ++ if (ret)
2010 ++ schedule_delayed_work(&pdata->carrier_check,
2011 ++ CARRIER_CHECK_DELAY);
2012 ++
2013 + return ret;
2014 + }
2015 +
2016 +diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
2017 +index 8feab810aed9..7f188b8d0c67 100644
2018 +--- a/drivers/xen/Makefile
2019 ++++ b/drivers/xen/Makefile
2020 +@@ -7,9 +7,6 @@ obj-y += xenbus/
2021 + nostackp := $(call cc-option, -fno-stack-protector)
2022 + CFLAGS_features.o := $(nostackp)
2023 +
2024 +-CFLAGS_efi.o += -fshort-wchar
2025 +-LDFLAGS += $(call ld-option, --no-wchar-size-warning)
2026 +-
2027 + dom0-$(CONFIG_ARM64) += arm-device.o
2028 + dom0-$(CONFIG_PCI) += pci.o
2029 + dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
2030 +diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
2031 +index 22a72198c14b..4e80f3a9ad58 100644
2032 +--- a/include/linux/kbuild.h
2033 ++++ b/include/linux/kbuild.h
2034 +@@ -2,14 +2,14 @@
2035 + #define __LINUX_KBUILD_H
2036 +
2037 + #define DEFINE(sym, val) \
2038 +- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
2039 ++ asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val))
2040 +
2041 +-#define BLANK() asm volatile("\n->" : : )
2042 ++#define BLANK() asm volatile("\n.ascii \"->\"" : : )
2043 +
2044 + #define OFFSET(sym, str, mem) \
2045 + DEFINE(sym, offsetof(struct str, mem))
2046 +
2047 + #define COMMENT(x) \
2048 +- asm volatile("\n->#" x)
2049 ++ asm volatile("\n.ascii \"->#" x "\"")
2050 +
2051 + #endif
2052 +diff --git a/include/linux/module.h b/include/linux/module.h
2053 +index d2224a09b4b5..fd9e121c7b3f 100644
2054 +--- a/include/linux/module.h
2055 ++++ b/include/linux/module.h
2056 +@@ -127,13 +127,13 @@ extern void cleanup_module(void);
2057 +
2058 + /* Each module must use one module_init(). */
2059 + #define module_init(initfn) \
2060 +- static inline initcall_t __inittest(void) \
2061 ++ static inline initcall_t __maybe_unused __inittest(void) \
2062 + { return initfn; } \
2063 + int init_module(void) __attribute__((alias(#initfn)));
2064 +
2065 + /* This is only required if you want to be unloadable. */
2066 + #define module_exit(exitfn) \
2067 +- static inline exitcall_t __exittest(void) \
2068 ++ static inline exitcall_t __maybe_unused __exittest(void) \
2069 + { return exitfn; } \
2070 + void cleanup_module(void) __attribute__((alias(#exitfn)));
2071 +
2072 +diff --git a/net/core/dev.c b/net/core/dev.c
2073 +index 15e3bb94156b..071c589f7994 100644
2074 +--- a/net/core/dev.c
2075 ++++ b/net/core/dev.c
2076 +@@ -4756,6 +4756,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2077 + skb->vlan_tci = 0;
2078 + skb->dev = napi->dev;
2079 + skb->skb_iif = 0;
2080 ++
2081 ++ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
2082 ++ skb->pkt_type = PACKET_HOST;
2083 ++
2084 + skb->encapsulation = 0;
2085 + skb_shinfo(skb)->gso_type = 0;
2086 + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
2087 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
2088 +index 862d63ec56e4..ab7c50026cae 100644
2089 +--- a/net/core/flow_dissector.c
2090 ++++ b/net/core/flow_dissector.c
2091 +@@ -538,8 +538,8 @@ ip_proto_again:
2092 + break;
2093 + }
2094 +
2095 +- if (dissector_uses_key(flow_dissector,
2096 +- FLOW_DISSECTOR_KEY_PORTS)) {
2097 ++ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
2098 ++ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
2099 + key_ports = skb_flow_dissector_target(flow_dissector,
2100 + FLOW_DISSECTOR_KEY_PORTS,
2101 + target_container);
2102 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2103 +index 8323d33c0ce2..5a8c26c9872d 100644
2104 +--- a/net/ipv4/inet_fragment.c
2105 ++++ b/net/ipv4/inet_fragment.c
2106 +@@ -180,21 +180,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
2107 + }
2108 +
2109 + static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
2110 +- void *arg)
2111 ++ void *arg,
2112 ++ struct inet_frag_queue **prev)
2113 + {
2114 + struct inet_frags *f = nf->f;
2115 + struct inet_frag_queue *q;
2116 +- int err;
2117 +
2118 + q = inet_frag_alloc(nf, f, arg);
2119 +- if (!q)
2120 ++ if (!q) {
2121 ++ *prev = ERR_PTR(-ENOMEM);
2122 + return NULL;
2123 +-
2124 ++ }
2125 + mod_timer(&q->timer, jiffies + nf->timeout);
2126 +
2127 +- err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
2128 +- f->rhash_params);
2129 +- if (err < 0) {
2130 ++ *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
2131 ++ &q->node, f->rhash_params);
2132 ++ if (*prev) {
2133 + q->flags |= INET_FRAG_COMPLETE;
2134 + inet_frag_kill(q);
2135 + inet_frag_destroy(q);
2136 +@@ -207,17 +208,18 @@ EXPORT_SYMBOL(inet_frag_create);
2137 + /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
2138 + struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
2139 + {
2140 +- struct inet_frag_queue *fq;
2141 ++ struct inet_frag_queue *fq = NULL, *prev;
2142 +
2143 + rcu_read_lock();
2144 +- fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
2145 +- if (fq) {
2146 ++ prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
2147 ++ if (!prev)
2148 ++ fq = inet_frag_create(nf, key, &prev);
2149 ++ if (prev && !IS_ERR(prev)) {
2150 ++ fq = prev;
2151 + if (!atomic_inc_not_zero(&fq->refcnt))
2152 + fq = NULL;
2153 +- rcu_read_unlock();
2154 +- return fq;
2155 + }
2156 + rcu_read_unlock();
2157 +- return inet_frag_create(nf, key);
2158 ++ return fq;
2159 + }
2160 + EXPORT_SYMBOL(inet_frag_find);
2161 +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
2162 +index 0fd1976ab63b..2220a1b396af 100644
2163 +--- a/net/ipv4/ip_tunnel_core.c
2164 ++++ b/net/ipv4/ip_tunnel_core.c
2165 +@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
2166 +
2167 + iph->version = 4;
2168 + iph->ihl = sizeof(struct iphdr) >> 2;
2169 +- iph->frag_off = df;
2170 ++ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
2171 + iph->protocol = proto;
2172 + iph->tos = tos;
2173 + iph->daddr = dst;
2174 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2175 +index 4cc12eeca7ab..0db120d2a4fe 100644
2176 +--- a/net/ipv6/route.c
2177 ++++ b/net/ipv6/route.c
2178 +@@ -1439,10 +1439,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2179 +
2180 + void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2181 + {
2182 ++ int oif = sk->sk_bound_dev_if;
2183 + struct dst_entry *dst;
2184 +
2185 +- ip6_update_pmtu(skb, sock_net(sk), mtu,
2186 +- sk->sk_bound_dev_if, sk->sk_mark);
2187 ++ if (!oif && skb->dev)
2188 ++ oif = l3mdev_master_ifindex(skb->dev);
2189 ++
2190 ++ ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark);
2191 +
2192 + dst = __sk_dst_get(sk);
2193 + if (!dst || !dst->obsolete ||
2194 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2195 +index 9827ba4b9f74..93e60068800b 100644
2196 +--- a/net/sctp/socket.c
2197 ++++ b/net/sctp/socket.c
2198 +@@ -3732,32 +3732,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
2199 + unsigned int optlen)
2200 + {
2201 + struct sctp_assoc_value params;
2202 +- struct sctp_association *asoc;
2203 +- int retval = -EINVAL;
2204 +
2205 + if (optlen != sizeof(params))
2206 +- goto out;
2207 +-
2208 +- if (copy_from_user(&params, optval, optlen)) {
2209 +- retval = -EFAULT;
2210 +- goto out;
2211 +- }
2212 +-
2213 +- asoc = sctp_id2assoc(sk, params.assoc_id);
2214 +- if (asoc) {
2215 +- asoc->prsctp_enable = !!params.assoc_value;
2216 +- } else if (!params.assoc_id) {
2217 +- struct sctp_sock *sp = sctp_sk(sk);
2218 ++ return -EINVAL;
2219 +
2220 +- sp->ep->prsctp_enable = !!params.assoc_value;
2221 +- } else {
2222 +- goto out;
2223 +- }
2224 ++ if (copy_from_user(&params, optval, optlen))
2225 ++ return -EFAULT;
2226 +
2227 +- retval = 0;
2228 ++ sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
2229 +
2230 +-out:
2231 +- return retval;
2232 ++ return 0;
2233 + }
2234 +
2235 + static int sctp_setsockopt_default_prinfo(struct sock *sk,
2236 +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
2237 +index 8f8965608ee3..123840d827e8 100644
2238 +--- a/scripts/Kbuild.include
2239 ++++ b/scripts/Kbuild.include
2240 +@@ -109,6 +109,11 @@ as-option = $(call try-run,\
2241 + as-instr = $(call try-run,\
2242 + printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
2243 +
2244 ++# __cc-option
2245 ++# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
2246 ++__cc-option = $(call try-run,\
2247 ++ $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
2248 ++
2249 + # Do not attempt to build with gcc plugins during cc-option tests.
2250 + # (And this uses delayed resolution so the flags will be up to date.)
2251 + CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
2252 +@@ -116,13 +121,18 @@ CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
2253 + # cc-option
2254 + # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
2255 +
2256 +-cc-option = $(call try-run,\
2257 +- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
2258 ++cc-option = $(call __cc-option, $(CC),\
2259 ++ $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS),$(1),$(2))
2260 ++
2261 ++# hostcc-option
2262 ++# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
2263 ++hostcc-option = $(call __cc-option, $(HOSTCC),\
2264 ++ $(HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
2265 +
2266 + # cc-option-yn
2267 + # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
2268 + cc-option-yn = $(call try-run,\
2269 +- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
2270 ++ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
2271 +
2272 + # cc-option-align
2273 + # Prefix align with either -falign or -malign
2274 +@@ -132,7 +142,7 @@ cc-option-align = $(subst -functions=0,,\
2275 + # cc-disable-warning
2276 + # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
2277 + cc-disable-warning = $(call try-run,\
2278 +- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
2279 ++ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
2280 +
2281 + # cc-name
2282 + # Expands to either gcc or clang
2283 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
2284 +index abfd4f4b66dd..6228a83156ea 100644
2285 +--- a/scripts/Makefile.build
2286 ++++ b/scripts/Makefile.build
2287 +@@ -176,6 +176,14 @@ cmd_cc_symtypes_c = \
2288 + $(obj)/%.symtypes : $(src)/%.c FORCE
2289 + $(call cmd,cc_symtypes_c)
2290 +
2291 ++# LLVM assembly
2292 ++# Generate .ll files from .c
2293 ++quiet_cmd_cc_ll_c = CC $(quiet_modtag) $@
2294 ++ cmd_cc_ll_c = $(CC) $(c_flags) -emit-llvm -S -o $@ $<
2295 ++
2296 ++$(obj)/%.ll: $(src)/%.c FORCE
2297 ++ $(call if_changed_dep,cc_ll_c)
2298 ++
2299 + # C (.c) files
2300 + # The C file is compiled and updated dependency information is generated.
2301 + # (See cmd_cc_o_c + relevant part of rule_cc_o_c)
2302 +diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
2303 +index 7c321a603b07..fb3522fd8702 100644
2304 +--- a/scripts/Makefile.extrawarn
2305 ++++ b/scripts/Makefile.extrawarn
2306 +@@ -64,7 +64,6 @@ ifeq ($(cc-name),clang)
2307 + KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
2308 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
2309 + KBUILD_CFLAGS += $(call cc-disable-warning, format)
2310 +-KBUILD_CFLAGS += $(call cc-disable-warning, unknown-warning-option)
2311 + KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
2312 + KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
2313 + KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
2314 +diff --git a/scripts/Makefile.host b/scripts/Makefile.host
2315 +index 45b5b1aaedbd..9cfd5c84d76f 100644
2316 +--- a/scripts/Makefile.host
2317 ++++ b/scripts/Makefile.host
2318 +@@ -20,12 +20,6 @@
2319 + # Will compile qconf as a C++ program, and menu as a C program.
2320 + # They are linked as C++ code to the executable qconf
2321 +
2322 +-# hostcc-option
2323 +-# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
2324 +-
2325 +-hostcc-option = $(call try-run,\
2326 +- $(HOSTCC) $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
2327 +-
2328 + __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
2329 + host-cshlib := $(sort $(hostlibs-y) $(hostlibs-m))
2330 + host-cxxshlib := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
2331 +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
2332 +index c954040c3cf2..4e02d51dfc62 100644
2333 +--- a/scripts/Makefile.lib
2334 ++++ b/scripts/Makefile.lib
2335 +@@ -408,3 +408,34 @@ quiet_cmd_xzmisc = XZMISC $@
2336 + cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
2337 + xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
2338 + (rm -f $@ ; false)
2339 ++
2340 ++# ASM offsets
2341 ++# ---------------------------------------------------------------------------
2342 ++
2343 ++# Default sed regexp - multiline due to syntax constraints
2344 ++#
2345 ++# Use [:space:] because LLVM's integrated assembler inserts <tab> around
2346 ++# the .ascii directive whereas GCC keeps the <space> as-is.
2347 ++define sed-offsets
2348 ++ 's:^[[:space:]]*\.ascii[[:space:]]*"\(.*\)".*:\1:; \
2349 ++ /^->/{s:->#\(.*\):/* \1 */:; \
2350 ++ s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
2351 ++ s:->::; p;}'
2352 ++endef
2353 ++
2354 ++# Use filechk to avoid rebuilds when a header changes, but the resulting file
2355 ++# does not
2356 ++define filechk_offsets
2357 ++ (set -e; \
2358 ++ echo "#ifndef $2"; \
2359 ++ echo "#define $2"; \
2360 ++ echo "/*"; \
2361 ++ echo " * DO NOT MODIFY."; \
2362 ++ echo " *"; \
2363 ++ echo " * This file was generated by Kbuild"; \
2364 ++ echo " */"; \
2365 ++ echo ""; \
2366 ++ sed -ne $(sed-offsets); \
2367 ++ echo ""; \
2368 ++ echo "#endif" )
2369 ++endef
2370 +diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
2371 +index 19d9bcadc0cc..b497d9764dcf 100644
2372 +--- a/scripts/mod/Makefile
2373 ++++ b/scripts/mod/Makefile
2374 +@@ -7,32 +7,8 @@ modpost-objs := modpost.o file2alias.o sumversion.o
2375 +
2376 + devicetable-offsets-file := devicetable-offsets.h
2377 +
2378 +-define sed-y
2379 +- "/^->/{s:->#\(.*\):/* \1 */:; \
2380 +- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
2381 +- s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
2382 +- s:->::; p;}"
2383 +-endef
2384 +-
2385 +-quiet_cmd_offsets = GEN $@
2386 +-define cmd_offsets
2387 +- (set -e; \
2388 +- echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \
2389 +- echo "#define __DEVICETABLE_OFFSETS_H__"; \
2390 +- echo "/*"; \
2391 +- echo " * DO NOT MODIFY."; \
2392 +- echo " *"; \
2393 +- echo " * This file was generated by Kbuild"; \
2394 +- echo " *"; \
2395 +- echo " */"; \
2396 +- echo ""; \
2397 +- sed -ne $(sed-y) $<; \
2398 +- echo ""; \
2399 +- echo "#endif" ) > $@
2400 +-endef
2401 +-
2402 +-$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s
2403 +- $(call if_changed,offsets)
2404 ++$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s FORCE
2405 ++ $(call filechk,offsets,__DEVICETABLE_OFFSETS_H__)
2406 +
2407 + targets += $(devicetable-offsets-file) devicetable-offsets.s
2408 +