Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 02 Apr 2022 16:32:56
Message-Id: 1648917150.5e8982d6c61e5a378861f06965c7fb3c651f1902.mpagano@gentoo
1 commit: 5e8982d6c61e5a378861f06965c7fb3c651f1902
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Apr 2 16:32:30 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Apr 2 16:32:30 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5e8982d6
7
8 Linux patch 4.14.275
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1274_linux-4.14.275.patch | 1544 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1548 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1469ce24..572ede3f 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1143,6 +1143,10 @@ Patch: 1273_linux-4.14.274.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.274
23
24 +Patch: 1274_linux-4.14.275.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.275
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1274_linux-4.14.275.patch b/1274_linux-4.14.275.patch
33 new file mode 100644
34 index 00000000..32419e47
35 --- /dev/null
36 +++ b/1274_linux-4.14.275.patch
37 @@ -0,0 +1,1544 @@
38 +diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
39 +index e4fe6adc372bf..42f5672e89179 100644
40 +--- a/Documentation/arm64/silicon-errata.txt
41 ++++ b/Documentation/arm64/silicon-errata.txt
42 +@@ -56,6 +56,7 @@ stable kernels.
43 + | ARM | Cortex-A72 | #853709 | N/A |
44 + | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
45 + | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
46 ++| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
47 + | ARM | MMU-500 | #841119,#826419 | N/A |
48 + | | | | |
49 + | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
50 +diff --git a/Makefile b/Makefile
51 +index a06abc38f35db..cad522127bb90 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,7 +1,7 @@
55 + # SPDX-License-Identifier: GPL-2.0
56 + VERSION = 4
57 + PATCHLEVEL = 14
58 +-SUBLEVEL = 274
59 ++SUBLEVEL = 275
60 + EXTRAVERSION =
61 + NAME = Petit Gorille
62 +
63 +diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
64 +index b602326399845..dbd9615b428c3 100644
65 +--- a/arch/arm/include/asm/kvm_host.h
66 ++++ b/arch/arm/include/asm/kvm_host.h
67 +@@ -26,6 +26,7 @@
68 + #include <asm/kvm_asm.h>
69 + #include <asm/kvm_mmio.h>
70 + #include <asm/fpstate.h>
71 ++#include <asm/spectre.h>
72 + #include <kvm/arm_arch_timer.h>
73 +
74 + #define __KVM_HAVE_ARCH_INTC_INITIALIZED
75 +@@ -324,4 +325,9 @@ static inline int kvm_arm_have_ssbd(void)
76 + return KVM_SSBD_UNKNOWN;
77 + }
78 +
79 ++static inline int kvm_arm_get_spectre_bhb_state(void)
80 ++{
81 ++ /* 32bit guests don't need firmware for this */
82 ++ return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */
83 ++}
84 + #endif /* __ARM_KVM_HOST_H__ */
85 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
86 +index e76f74874a420..7605d2f00d553 100644
87 +--- a/arch/arm64/Kconfig
88 ++++ b/arch/arm64/Kconfig
89 +@@ -458,6 +458,20 @@ config ARM64_ERRATUM_1024718
90 +
91 + If unsure, say Y.
92 +
93 ++config ARM64_ERRATUM_1188873
94 ++ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
95 ++ default y
96 ++ depends on COMPAT
97 ++ select ARM_ARCH_TIMER_OOL_WORKAROUND
98 ++ help
99 ++ This option adds work arounds for ARM Cortex-A76 erratum 1188873
100 ++
101 ++ Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
102 ++ register corruption when accessing the timer registers from
103 ++ AArch32 userspace.
104 ++
105 ++ If unsure, say Y.
106 ++
107 + config CAVIUM_ERRATUM_22375
108 + bool "Cavium erratum 22375, 24313"
109 + default y
110 +@@ -858,6 +872,16 @@ config ARM64_SSBD
111 +
112 + If unsure, say Y.
113 +
114 ++config MITIGATE_SPECTRE_BRANCH_HISTORY
115 ++ bool "Mitigate Spectre style attacks against branch history" if EXPERT
116 ++ default y
117 ++ depends on HARDEN_BRANCH_PREDICTOR || !KVM
118 ++ help
119 ++ Speculation attacks against some high-performance processors can
120 ++ make use of branch history to influence future speculation.
121 ++ When taking an exception from user-space, a sequence of branches
122 ++ or a firmware call overwrites the branch history.
123 ++
124 + menuconfig ARMV8_DEPRECATED
125 + bool "Emulate deprecated/obsolete ARMv8 instructions"
126 + depends on COMPAT
127 +diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
128 +index 02d73d83f0deb..6b38f3b3095a3 100644
129 +--- a/arch/arm64/include/asm/assembler.h
130 ++++ b/arch/arm64/include/asm/assembler.h
131 +@@ -103,6 +103,13 @@
132 + hint #20
133 + .endm
134 +
135 ++/*
136 ++ * Clear Branch History instruction
137 ++ */
138 ++ .macro clearbhb
139 ++ hint #22
140 ++ .endm
141 ++
142 + /*
143 + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
144 + * of bounds.
145 +@@ -549,4 +556,31 @@ alternative_endif
146 + .Ldone\@:
147 + .endm
148 +
149 ++ .macro __mitigate_spectre_bhb_loop tmp
150 ++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
151 ++alternative_cb spectre_bhb_patch_loop_iter
152 ++ mov \tmp, #32 // Patched to correct the immediate
153 ++alternative_cb_end
154 ++.Lspectre_bhb_loop\@:
155 ++ b . + 4
156 ++ subs \tmp, \tmp, #1
157 ++ b.ne .Lspectre_bhb_loop\@
158 ++ dsb nsh
159 ++ isb
160 ++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
161 ++ .endm
162 ++
163 ++ /* Save/restores x0-x3 to the stack */
164 ++ .macro __mitigate_spectre_bhb_fw
165 ++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
166 ++ stp x0, x1, [sp, #-16]!
167 ++ stp x2, x3, [sp, #-16]!
168 ++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
169 ++alternative_cb arm64_update_smccc_conduit
170 ++ nop // Patched to SMC/HVC #0
171 ++alternative_cb_end
172 ++ ldp x2, x3, [sp], #16
173 ++ ldp x0, x1, [sp], #16
174 ++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
175 ++ .endm
176 + #endif /* __ASM_ASSEMBLER_H */
177 +diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
178 +index 889226b4c6e1c..c7f17e663e729 100644
179 +--- a/arch/arm64/include/asm/cpu.h
180 ++++ b/arch/arm64/include/asm/cpu.h
181 +@@ -36,6 +36,7 @@ struct cpuinfo_arm64 {
182 + u64 reg_id_aa64dfr1;
183 + u64 reg_id_aa64isar0;
184 + u64 reg_id_aa64isar1;
185 ++ u64 reg_id_aa64isar2;
186 + u64 reg_id_aa64mmfr0;
187 + u64 reg_id_aa64mmfr1;
188 + u64 reg_id_aa64mmfr2;
189 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
190 +index 2f8bd0388905d..20ca422eb094a 100644
191 +--- a/arch/arm64/include/asm/cpucaps.h
192 ++++ b/arch/arm64/include/asm/cpucaps.h
193 +@@ -45,7 +45,9 @@
194 + #define ARM64_SSBD 25
195 + #define ARM64_MISMATCHED_CACHE_TYPE 26
196 + #define ARM64_SSBS 27
197 ++#define ARM64_WORKAROUND_1188873 28
198 ++#define ARM64_SPECTRE_BHB 29
199 +
200 +-#define ARM64_NCAPS 28
201 ++#define ARM64_NCAPS 30
202 +
203 + #endif /* __ASM_CPUCAPS_H */
204 +diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
205 +index 166f81b7afee6..3e9d042d1b1e7 100644
206 +--- a/arch/arm64/include/asm/cpufeature.h
207 ++++ b/arch/arm64/include/asm/cpufeature.h
208 +@@ -456,6 +456,34 @@ static inline bool cpu_supports_mixed_endian_el0(void)
209 + return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
210 + }
211 +
212 ++static inline bool supports_csv2p3(int scope)
213 ++{
214 ++ u64 pfr0;
215 ++ u8 csv2_val;
216 ++
217 ++ if (scope == SCOPE_LOCAL_CPU)
218 ++ pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
219 ++ else
220 ++ pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
221 ++
222 ++ csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
223 ++ ID_AA64PFR0_CSV2_SHIFT);
224 ++ return csv2_val == 3;
225 ++}
226 ++
227 ++static inline bool supports_clearbhb(int scope)
228 ++{
229 ++ u64 isar2;
230 ++
231 ++ if (scope == SCOPE_LOCAL_CPU)
232 ++ isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
233 ++ else
234 ++ isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
235 ++
236 ++ return cpuid_feature_extract_unsigned_field(isar2,
237 ++ ID_AA64ISAR2_CLEARBHB_SHIFT);
238 ++}
239 ++
240 + static inline bool system_supports_32bit_el0(void)
241 + {
242 + return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
243 +@@ -495,6 +523,17 @@ static inline int arm64_get_ssbd_state(void)
244 +
245 + void arm64_set_ssbd_mitigation(bool state);
246 +
247 ++/* Watch out, ordering is important here. */
248 ++enum mitigation_state {
249 ++ SPECTRE_UNAFFECTED,
250 ++ SPECTRE_MITIGATED,
251 ++ SPECTRE_VULNERABLE,
252 ++};
253 ++
254 ++enum mitigation_state arm64_get_spectre_bhb_state(void);
255 ++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
256 ++u8 spectre_bhb_loop_affected(int scope);
257 ++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
258 + #endif /* __ASSEMBLY__ */
259 +
260 + #endif
261 +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
262 +index b23456035eac5..401088d9cd827 100644
263 +--- a/arch/arm64/include/asm/cputype.h
264 ++++ b/arch/arm64/include/asm/cputype.h
265 +@@ -87,6 +87,16 @@
266 + #define ARM_CPU_PART_CORTEX_A75 0xD0A
267 + #define ARM_CPU_PART_CORTEX_A35 0xD04
268 + #define ARM_CPU_PART_CORTEX_A55 0xD05
269 ++#define ARM_CPU_PART_CORTEX_A76 0xD0B
270 ++#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
271 ++#define ARM_CPU_PART_CORTEX_A77 0xD0D
272 ++#define ARM_CPU_PART_NEOVERSE_V1 0xD40
273 ++#define ARM_CPU_PART_CORTEX_A78 0xD41
274 ++#define ARM_CPU_PART_CORTEX_X1 0xD44
275 ++#define ARM_CPU_PART_CORTEX_A710 0xD47
276 ++#define ARM_CPU_PART_CORTEX_X2 0xD48
277 ++#define ARM_CPU_PART_NEOVERSE_N2 0xD49
278 ++#define ARM_CPU_PART_CORTEX_A78C 0xD4B
279 +
280 + #define APM_CPU_PART_POTENZA 0x000
281 +
282 +@@ -112,6 +122,16 @@
283 + #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
284 + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
285 + #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
286 ++#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
287 ++#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
288 ++#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
289 ++#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
290 ++#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
291 ++#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
292 ++#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
293 ++#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
294 ++#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
295 ++#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
296 + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
297 + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
298 + #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
299 +diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
300 +index ec1e6d6fa14cc..3c962ef081f84 100644
301 +--- a/arch/arm64/include/asm/fixmap.h
302 ++++ b/arch/arm64/include/asm/fixmap.h
303 +@@ -59,9 +59,11 @@ enum fixed_addresses {
304 + #endif /* CONFIG_ACPI_APEI_GHES */
305 +
306 + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
307 ++ FIX_ENTRY_TRAMP_TEXT3,
308 ++ FIX_ENTRY_TRAMP_TEXT2,
309 ++ FIX_ENTRY_TRAMP_TEXT1,
310 + FIX_ENTRY_TRAMP_DATA,
311 +- FIX_ENTRY_TRAMP_TEXT,
312 +-#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
313 ++#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
314 + #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
315 + __end_of_permanent_fixed_addresses,
316 +
317 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
318 +index 8d94404829f0f..be82119ed24a9 100644
319 +--- a/arch/arm64/include/asm/kvm_host.h
320 ++++ b/arch/arm64/include/asm/kvm_host.h
321 +@@ -450,4 +450,9 @@ static inline int kvm_arm_have_ssbd(void)
322 + }
323 + }
324 +
325 ++static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void)
326 ++{
327 ++ return arm64_get_spectre_bhb_state();
328 ++}
329 ++
330 + #endif /* __ARM64_KVM_HOST_H__ */
331 +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
332 +index 47ba6a57dc457..04c7c4596240a 100644
333 +--- a/arch/arm64/include/asm/kvm_mmu.h
334 ++++ b/arch/arm64/include/asm/kvm_mmu.h
335 +@@ -358,7 +358,7 @@ static inline void *kvm_get_hyp_vector(void)
336 + struct bp_hardening_data *data = arm64_get_bp_hardening_data();
337 + void *vect = kvm_ksym_ref(__kvm_hyp_vector);
338 +
339 +- if (data->fn) {
340 ++ if (data->template_start) {
341 + vect = __bp_harden_hyp_vecs_start +
342 + data->hyp_vectors_slot * SZ_2K;
343 +
344 +diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
345 +index 6dd83d75b82ab..5a77dc775cc3c 100644
346 +--- a/arch/arm64/include/asm/mmu.h
347 ++++ b/arch/arm64/include/asm/mmu.h
348 +@@ -35,7 +35,7 @@ typedef struct {
349 + */
350 + #define ASID(mm) ((mm)->context.id.counter & 0xffff)
351 +
352 +-static inline bool arm64_kernel_unmapped_at_el0(void)
353 ++static __always_inline bool arm64_kernel_unmapped_at_el0(void)
354 + {
355 + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
356 + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
357 +@@ -46,6 +46,12 @@ typedef void (*bp_hardening_cb_t)(void);
358 + struct bp_hardening_data {
359 + int hyp_vectors_slot;
360 + bp_hardening_cb_t fn;
361 ++
362 ++ /*
363 ++ * template_start is only used by the BHB mitigation to identify the
364 ++ * hyp_vectors_slot sequence.
365 ++ */
366 ++ const char *template_start;
367 + };
368 +
369 + #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
370 +diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
371 +index 941267caa39c2..8d3f1eab58e04 100644
372 +--- a/arch/arm64/include/asm/sections.h
373 ++++ b/arch/arm64/include/asm/sections.h
374 +@@ -28,5 +28,11 @@ extern char __initdata_begin[], __initdata_end[];
375 + extern char __inittext_begin[], __inittext_end[];
376 + extern char __irqentry_text_start[], __irqentry_text_end[];
377 + extern char __mmuoff_data_start[], __mmuoff_data_end[];
378 ++extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
379 ++
380 ++static inline size_t entry_tramp_text_size(void)
381 ++{
382 ++ return __entry_tramp_text_end - __entry_tramp_text_start;
383 ++}
384 +
385 + #endif /* __ASM_SECTIONS_H */
386 +diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
387 +index 2564dd429ab68..3bbf0dc5ecad0 100644
388 +--- a/arch/arm64/include/asm/sysreg.h
389 ++++ b/arch/arm64/include/asm/sysreg.h
390 +@@ -157,6 +157,7 @@
391 +
392 + #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
393 + #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
394 ++#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
395 +
396 + #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
397 + #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
398 +@@ -403,6 +404,9 @@
399 + #define ID_AA64ISAR1_JSCVT_SHIFT 12
400 + #define ID_AA64ISAR1_DPB_SHIFT 0
401 +
402 ++/* id_aa64isar2 */
403 ++#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
404 ++
405 + /* id_aa64pfr0 */
406 + #define ID_AA64PFR0_CSV3_SHIFT 60
407 + #define ID_AA64PFR0_CSV2_SHIFT 56
408 +@@ -448,6 +452,7 @@
409 + #define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
410 +
411 + /* id_aa64mmfr1 */
412 ++#define ID_AA64MMFR1_ECBHB_SHIFT 60
413 + #define ID_AA64MMFR1_PAN_SHIFT 20
414 + #define ID_AA64MMFR1_LOR_SHIFT 16
415 + #define ID_AA64MMFR1_HPD_SHIFT 12
416 +diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h
417 +new file mode 100644
418 +index 0000000000000..695583b9a145b
419 +--- /dev/null
420 ++++ b/arch/arm64/include/asm/vectors.h
421 +@@ -0,0 +1,74 @@
422 ++/* SPDX-License-Identifier: GPL-2.0-only */
423 ++/*
424 ++ * Copyright (C) 2022 ARM Ltd.
425 ++ */
426 ++#ifndef __ASM_VECTORS_H
427 ++#define __ASM_VECTORS_H
428 ++
429 ++#include <linux/bug.h>
430 ++#include <linux/percpu.h>
431 ++
432 ++#include <asm/fixmap.h>
433 ++#include <asm/mmu.h>
434 ++
435 ++extern char vectors[];
436 ++extern char tramp_vectors[];
437 ++extern char __bp_harden_el1_vectors[];
438 ++
439 ++/*
440 ++ * Note: the order of this enum corresponds to two arrays in entry.S:
441 ++ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
442 ++ * 'full fat' vectors are used directly.
443 ++ */
444 ++enum arm64_bp_harden_el1_vectors {
445 ++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
446 ++ /*
447 ++ * Perform the BHB loop mitigation, before branching to the canonical
448 ++ * vectors.
449 ++ */
450 ++ EL1_VECTOR_BHB_LOOP,
451 ++
452 ++ /*
453 ++ * Make the SMC call for firmware mitigation, before branching to the
454 ++ * canonical vectors.
455 ++ */
456 ++ EL1_VECTOR_BHB_FW,
457 ++
458 ++ /*
459 ++ * Use the ClearBHB instruction, before branching to the canonical
460 ++ * vectors.
461 ++ */
462 ++ EL1_VECTOR_BHB_CLEAR_INSN,
463 ++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
464 ++
465 ++ /*
466 ++ * Remap the kernel before branching to the canonical vectors.
467 ++ */
468 ++ EL1_VECTOR_KPTI,
469 ++};
470 ++
471 ++#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
472 ++#define EL1_VECTOR_BHB_LOOP -1
473 ++#define EL1_VECTOR_BHB_FW -1
474 ++#define EL1_VECTOR_BHB_CLEAR_INSN -1
475 ++#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
476 ++
477 ++/* The vectors to use on return from EL0. e.g. to remap the kernel */
478 ++DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
479 ++
480 ++#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
481 ++#define TRAMP_VALIAS 0
482 ++#endif
483 ++
484 ++static inline const char *
485 ++arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
486 ++{
487 ++ if (arm64_kernel_unmapped_at_el0())
488 ++ return (char *)TRAMP_VALIAS + SZ_2K * slot;
489 ++
490 ++ WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
491 ++
492 ++ return __bp_harden_el1_vectors + SZ_2K * slot;
493 ++}
494 ++
495 ++#endif /* __ASM_VECTORS_H */
496 +diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
497 +index 4cae34e5a24e2..bd6ef8750f440 100644
498 +--- a/arch/arm64/kernel/bpi.S
499 ++++ b/arch/arm64/kernel/bpi.S
500 +@@ -66,3 +66,58 @@ ENTRY(__smccc_workaround_1_smc_start)
501 + ldp x0, x1, [sp, #(8 * 2)]
502 + add sp, sp, #(8 * 4)
503 + ENTRY(__smccc_workaround_1_smc_end)
504 ++
505 ++ENTRY(__smccc_workaround_3_smc_start)
506 ++ sub sp, sp, #(8 * 4)
507 ++ stp x2, x3, [sp, #(8 * 0)]
508 ++ stp x0, x1, [sp, #(8 * 2)]
509 ++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
510 ++ smc #0
511 ++ ldp x2, x3, [sp, #(8 * 0)]
512 ++ ldp x0, x1, [sp, #(8 * 2)]
513 ++ add sp, sp, #(8 * 4)
514 ++ENTRY(__smccc_workaround_3_smc_end)
515 ++
516 ++ENTRY(__spectre_bhb_loop_k8_start)
517 ++ sub sp, sp, #(8 * 2)
518 ++ stp x0, x1, [sp, #(8 * 0)]
519 ++ mov x0, #8
520 ++2: b . + 4
521 ++ subs x0, x0, #1
522 ++ b.ne 2b
523 ++ dsb nsh
524 ++ isb
525 ++ ldp x0, x1, [sp, #(8 * 0)]
526 ++ add sp, sp, #(8 * 2)
527 ++ENTRY(__spectre_bhb_loop_k8_end)
528 ++
529 ++ENTRY(__spectre_bhb_loop_k24_start)
530 ++ sub sp, sp, #(8 * 2)
531 ++ stp x0, x1, [sp, #(8 * 0)]
532 ++ mov x0, #24
533 ++2: b . + 4
534 ++ subs x0, x0, #1
535 ++ b.ne 2b
536 ++ dsb nsh
537 ++ isb
538 ++ ldp x0, x1, [sp, #(8 * 0)]
539 ++ add sp, sp, #(8 * 2)
540 ++ENTRY(__spectre_bhb_loop_k24_end)
541 ++
542 ++ENTRY(__spectre_bhb_loop_k32_start)
543 ++ sub sp, sp, #(8 * 2)
544 ++ stp x0, x1, [sp, #(8 * 0)]
545 ++ mov x0, #32
546 ++2: b . + 4
547 ++ subs x0, x0, #1
548 ++ b.ne 2b
549 ++ dsb nsh
550 ++ isb
551 ++ ldp x0, x1, [sp, #(8 * 0)]
552 ++ add sp, sp, #(8 * 2)
553 ++ENTRY(__spectre_bhb_loop_k32_end)
554 ++
555 ++ENTRY(__spectre_bhb_clearbhb_start)
556 ++ hint #22 /* aka clearbhb */
557 ++ isb
558 ++ENTRY(__spectre_bhb_clearbhb_end)
559 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
560 +index 7d15f4cb63937..ed627d44746ad 100644
561 +--- a/arch/arm64/kernel/cpu_errata.c
562 ++++ b/arch/arm64/kernel/cpu_errata.c
563 +@@ -23,6 +23,7 @@
564 + #include <asm/cpu.h>
565 + #include <asm/cputype.h>
566 + #include <asm/cpufeature.h>
567 ++#include <asm/vectors.h>
568 +
569 + static bool __maybe_unused
570 + is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
571 +@@ -85,6 +86,16 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
572 + #ifdef CONFIG_KVM
573 + extern char __smccc_workaround_1_smc_start[];
574 + extern char __smccc_workaround_1_smc_end[];
575 ++extern char __smccc_workaround_3_smc_start[];
576 ++extern char __smccc_workaround_3_smc_end[];
577 ++extern char __spectre_bhb_loop_k8_start[];
578 ++extern char __spectre_bhb_loop_k8_end[];
579 ++extern char __spectre_bhb_loop_k24_start[];
580 ++extern char __spectre_bhb_loop_k24_end[];
581 ++extern char __spectre_bhb_loop_k32_start[];
582 ++extern char __spectre_bhb_loop_k32_end[];
583 ++extern char __spectre_bhb_clearbhb_start[];
584 ++extern char __spectre_bhb_clearbhb_end[];
585 +
586 + static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
587 + const char *hyp_vecs_end)
588 +@@ -98,12 +109,14 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
589 + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
590 + }
591 +
592 ++static DEFINE_SPINLOCK(bp_lock);
593 ++static int last_slot = -1;
594 ++
595 + static void install_bp_hardening_cb(bp_hardening_cb_t fn,
596 + const char *hyp_vecs_start,
597 + const char *hyp_vecs_end)
598 + {
599 +- static int last_slot = -1;
600 +- static DEFINE_SPINLOCK(bp_lock);
601 ++
602 + int cpu, slot = -1;
603 +
604 + spin_lock(&bp_lock);
605 +@@ -124,6 +137,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
606 +
607 + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
608 + __this_cpu_write(bp_hardening_data.fn, fn);
609 ++ __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
610 + spin_unlock(&bp_lock);
611 + }
612 + #else
613 +@@ -712,6 +726,21 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
614 + .matches = has_ssbd_mitigation,
615 + .midr_range_list = arm64_ssb_cpus,
616 + },
617 ++#ifdef CONFIG_ARM64_ERRATUM_1188873
618 ++ {
619 ++ /* Cortex-A76 r0p0 to r2p0 */
620 ++ .desc = "ARM erratum 1188873",
621 ++ .capability = ARM64_WORKAROUND_1188873,
622 ++ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
623 ++ },
624 ++#endif
625 ++ {
626 ++ .desc = "Spectre-BHB",
627 ++ .capability = ARM64_SPECTRE_BHB,
628 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
629 ++ .matches = is_spectre_bhb_affected,
630 ++ .cpu_enable = spectre_bhb_enable_mitigation,
631 ++ },
632 + {
633 + }
634 + };
635 +@@ -722,14 +751,39 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
636 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
637 + }
638 +
639 ++static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
640 ++{
641 ++ switch (bhb_state) {
642 ++ case SPECTRE_UNAFFECTED:
643 ++ return "";
644 ++ default:
645 ++ case SPECTRE_VULNERABLE:
646 ++ return ", but not BHB";
647 ++ case SPECTRE_MITIGATED:
648 ++ return ", BHB";
649 ++ }
650 ++}
651 ++
652 + ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
653 + char *buf)
654 + {
655 +- if (__spectrev2_safe)
656 +- return sprintf(buf, "Not affected\n");
657 ++ enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
658 ++ const char *bhb_str = get_bhb_affected_string(bhb_state);
659 ++ const char *v2_str = "Branch predictor hardening";
660 ++
661 ++ if (__spectrev2_safe) {
662 ++ if (bhb_state == SPECTRE_UNAFFECTED)
663 ++ return sprintf(buf, "Not affected\n");
664 ++
665 ++ /*
666 ++ * Platforms affected by Spectre-BHB can't report
667 ++ * "Not affected" for Spectre-v2.
668 ++ */
669 ++ v2_str = "CSV2";
670 ++ }
671 +
672 + if (__hardenbp_enab)
673 +- return sprintf(buf, "Mitigation: Branch predictor hardening\n");
674 ++ return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
675 +
676 + return sprintf(buf, "Vulnerable\n");
677 + }
678 +@@ -750,3 +804,334 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev,
679 +
680 + return sprintf(buf, "Vulnerable\n");
681 + }
682 ++
683 ++/*
684 ++ * We try to ensure that the mitigation state can never change as the result of
685 ++ * onlining a late CPU.
686 ++ */
687 ++static void update_mitigation_state(enum mitigation_state *oldp,
688 ++ enum mitigation_state new)
689 ++{
690 ++ enum mitigation_state state;
691 ++
692 ++ do {
693 ++ state = READ_ONCE(*oldp);
694 ++ if (new <= state)
695 ++ break;
696 ++ } while (cmpxchg_relaxed(oldp, state, new) != state);
697 ++}
698 ++
699 ++/*
700 ++ * Spectre BHB.
701 ++ *
702 ++ * A CPU is either:
703 ++ * - Mitigated by a branchy loop a CPU specific number of times, and listed
704 ++ * in our "loop mitigated list".
705 ++ * - Mitigated in software by the firmware Spectre v2 call.
706 ++ * - Has the ClearBHB instruction to perform the mitigation.
707 ++ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
708 ++ * software mitigation in the vectors is needed.
709 ++ * - Has CSV2.3, so is unaffected.
710 ++ */
711 ++static enum mitigation_state spectre_bhb_state;
712 ++
713 ++enum mitigation_state arm64_get_spectre_bhb_state(void)
714 ++{
715 ++ return spectre_bhb_state;
716 ++}
717 ++
718 ++/*
719 ++ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
720 ++ * SCOPE_SYSTEM call will give the right answer.
721 ++ */
722 ++u8 spectre_bhb_loop_affected(int scope)
723 ++{
724 ++ u8 k = 0;
725 ++ static u8 max_bhb_k;
726 ++
727 ++ if (scope == SCOPE_LOCAL_CPU) {
728 ++ static const struct midr_range spectre_bhb_k32_list[] = {
729 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
730 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
731 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
732 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
733 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
734 ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
735 ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
736 ++ {},
737 ++ };
738 ++ static const struct midr_range spectre_bhb_k24_list[] = {
739 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
740 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
741 ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
742 ++ {},
743 ++ };
744 ++ static const struct midr_range spectre_bhb_k8_list[] = {
745 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
746 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
747 ++ {},
748 ++ };
749 ++
750 ++ if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
751 ++ k = 32;
752 ++ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
753 ++ k = 24;
754 ++ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
755 ++ k = 8;
756 ++
757 ++ max_bhb_k = max(max_bhb_k, k);
758 ++ } else {
759 ++ k = max_bhb_k;
760 ++ }
761 ++
762 ++ return k;
763 ++}
764 ++
765 ++static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
766 ++{
767 ++ int ret;
768 ++ struct arm_smccc_res res;
769 ++
770 ++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
771 ++ return SPECTRE_VULNERABLE;
772 ++
773 ++ switch (psci_ops.conduit) {
774 ++ case PSCI_CONDUIT_HVC:
775 ++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
776 ++ ARM_SMCCC_ARCH_WORKAROUND_3, &res);
777 ++ break;
778 ++
779 ++ case PSCI_CONDUIT_SMC:
780 ++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
781 ++ ARM_SMCCC_ARCH_WORKAROUND_3, &res);
782 ++ break;
783 ++
784 ++ default:
785 ++ return SPECTRE_VULNERABLE;
786 ++ }
787 ++
788 ++ ret = res.a0;
789 ++ switch (ret) {
790 ++ case SMCCC_RET_SUCCESS:
791 ++ return SPECTRE_MITIGATED;
792 ++ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
793 ++ return SPECTRE_UNAFFECTED;
794 ++ default:
795 ++ case SMCCC_RET_NOT_SUPPORTED:
796 ++ return SPECTRE_VULNERABLE;
797 ++ }
798 ++}
799 ++
800 ++static bool is_spectre_bhb_fw_affected(int scope)
801 ++{
802 ++ static bool system_affected;
803 ++ enum mitigation_state fw_state;
804 ++ bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
805 ++ static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
806 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
807 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
808 ++ {},
809 ++ };
810 ++ bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
811 ++ spectre_bhb_firmware_mitigated_list);
812 ++
813 ++ if (scope != SCOPE_LOCAL_CPU)
814 ++ return system_affected;
815 ++
816 ++ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
817 ++ if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
818 ++ system_affected = true;
819 ++ return true;
820 ++ }
821 ++
822 ++ return false;
823 ++}
824 ++
825 ++static bool supports_ecbhb(int scope)
826 ++{
827 ++ u64 mmfr1;
828 ++
829 ++ if (scope == SCOPE_LOCAL_CPU)
830 ++ mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
831 ++ else
832 ++ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
833 ++
834 ++ return cpuid_feature_extract_unsigned_field(mmfr1,
835 ++ ID_AA64MMFR1_ECBHB_SHIFT);
836 ++}
837 ++
838 ++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
839 ++ int scope)
840 ++{
841 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
842 ++
843 ++ if (supports_csv2p3(scope))
844 ++ return false;
845 ++
846 ++ if (supports_clearbhb(scope))
847 ++ return true;
848 ++
849 ++ if (spectre_bhb_loop_affected(scope))
850 ++ return true;
851 ++
852 ++ if (is_spectre_bhb_fw_affected(scope))
853 ++ return true;
854 ++
855 ++ return false;
856 ++}
857 ++
858 ++static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
859 ++{
860 ++ const char *v = arm64_get_bp_hardening_vector(slot);
861 ++
862 ++ if (slot < 0)
863 ++ return;
864 ++
865 ++ __this_cpu_write(this_cpu_vector, v);
866 ++
867 ++ /*
868 ++ * When KPTI is in use, the vectors are switched when exiting to
869 ++ * user-space.
870 ++ */
871 ++ if (arm64_kernel_unmapped_at_el0())
872 ++ return;
873 ++
874 ++ write_sysreg(v, vbar_el1);
875 ++ isb();
876 ++}
877 ++
878 ++#ifdef CONFIG_KVM
879 ++static const char *kvm_bhb_get_vecs_end(const char *start)
880 ++{
881 ++ if (start == __smccc_workaround_3_smc_start)
882 ++ return __smccc_workaround_3_smc_end;
883 ++ else if (start == __spectre_bhb_loop_k8_start)
884 ++ return __spectre_bhb_loop_k8_end;
885 ++ else if (start == __spectre_bhb_loop_k24_start)
886 ++ return __spectre_bhb_loop_k24_end;
887 ++ else if (start == __spectre_bhb_loop_k32_start)
888 ++ return __spectre_bhb_loop_k32_end;
889 ++ else if (start == __spectre_bhb_clearbhb_start)
890 ++ return __spectre_bhb_clearbhb_end;
891 ++
892 ++ return NULL;
893 ++}
894 ++
895 ++static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
896 ++{
897 ++ int cpu, slot = -1;
898 ++ const char *hyp_vecs_end;
899 ++
900 ++ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
901 ++ return;
902 ++
903 ++ hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
904 ++ if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
905 ++ return;
906 ++
907 ++ spin_lock(&bp_lock);
908 ++ for_each_possible_cpu(cpu) {
909 ++ if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
910 ++ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
911 ++ break;
912 ++ }
913 ++ }
914 ++
915 ++ if (slot == -1) {
916 ++ last_slot++;
917 ++ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
918 ++ / SZ_2K) <= last_slot);
919 ++ slot = last_slot;
920 ++ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
921 ++ }
922 ++
923 ++ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
924 ++ __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
925 ++ spin_unlock(&bp_lock);
926 ++}
927 ++#else
928 ++#define __smccc_workaround_3_smc_start NULL
929 ++#define __spectre_bhb_loop_k8_start NULL
930 ++#define __spectre_bhb_loop_k24_start NULL
931 ++#define __spectre_bhb_loop_k32_start NULL
932 ++#define __spectre_bhb_clearbhb_start NULL
933 ++
934 ++static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
935 ++#endif
936 ++
937 ++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
938 ++{
939 ++ enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
940 ++
941 ++ if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
942 ++ return;
943 ++
944 ++ if (!__spectrev2_safe && !__hardenbp_enab) {
945 ++ /* No point mitigating Spectre-BHB alone. */
946 ++ } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
947 ++ pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
948 ++ } else if (cpu_mitigations_off()) {
949 ++ pr_info_once("spectre-bhb mitigation disabled by command line option\n");
950 ++ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
951 ++ state = SPECTRE_MITIGATED;
952 ++ } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
953 ++ kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
954 ++ this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
955 ++
956 ++ state = SPECTRE_MITIGATED;
957 ++ } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
958 ++ switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
959 ++ case 8:
960 ++ kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
961 ++ break;
962 ++ case 24:
963 ++ kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
964 ++ break;
965 ++ case 32:
966 ++ kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
967 ++ break;
968 ++ default:
969 ++ WARN_ON_ONCE(1);
970 ++ }
971 ++ this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
972 ++
973 ++ state = SPECTRE_MITIGATED;
974 ++ } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
975 ++ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
976 ++ if (fw_state == SPECTRE_MITIGATED) {
977 ++ kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
978 ++ this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
979 ++
980 ++ /*
981 ++ * With WA3 in the vectors, the WA1 calls can be
982 ++ * removed.
983 ++ */
984 ++ __this_cpu_write(bp_hardening_data.fn, NULL);
985 ++
986 ++ state = SPECTRE_MITIGATED;
987 ++ }
988 ++ }
989 ++
990 ++ update_mitigation_state(&spectre_bhb_state, state);
991 ++}
992 ++
993 ++/* Patched to correct the immediate */
994 ++void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
995 ++ __le32 *origptr, __le32 *updptr, int nr_inst)
996 ++{
997 ++ u8 rd;
998 ++ u32 insn;
999 ++ u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1000 ++
1001 ++ BUG_ON(nr_inst != 1); /* MOV -> MOV */
1002 ++
1003 ++ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1004 ++ return;
1005 ++
1006 ++ insn = le32_to_cpu(*origptr);
1007 ++ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1008 ++ insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1009 ++ AARCH64_INSN_VARIANT_64BIT,
1010 ++ AARCH64_INSN_MOVEWIDE_ZERO);
1011 ++ *updptr++ = cpu_to_le32(insn);
1012 ++}
1013 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
1014 +index 1481e18aa5ca0..b6922f33d306b 100644
1015 +--- a/arch/arm64/kernel/cpufeature.c
1016 ++++ b/arch/arm64/kernel/cpufeature.c
1017 +@@ -20,11 +20,13 @@
1018 +
1019 + #include <linux/bsearch.h>
1020 + #include <linux/cpumask.h>
1021 ++#include <linux/percpu.h>
1022 + #include <linux/sort.h>
1023 + #include <linux/stop_machine.h>
1024 + #include <linux/types.h>
1025 + #include <linux/mm.h>
1026 + #include <linux/cpu.h>
1027 ++
1028 + #include <asm/cpu.h>
1029 + #include <asm/cpufeature.h>
1030 + #include <asm/cpu_ops.h>
1031 +@@ -32,6 +34,7 @@
1032 + #include <asm/processor.h>
1033 + #include <asm/sysreg.h>
1034 + #include <asm/traps.h>
1035 ++#include <asm/vectors.h>
1036 + #include <asm/virt.h>
1037 +
1038 + unsigned long elf_hwcap __read_mostly;
1039 +@@ -50,6 +53,8 @@ unsigned int compat_elf_hwcap2 __read_mostly;
1040 + DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
1041 + EXPORT_SYMBOL(cpu_hwcaps);
1042 +
1043 ++DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
1044 ++
1045 + static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
1046 + {
1047 + /* file-wide pr_fmt adds "CPU features: " prefix */
1048 +@@ -129,6 +134,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
1049 + ARM64_FTR_END,
1050 + };
1051 +
1052 ++static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
1053 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
1054 ++ ARM64_FTR_END,
1055 ++};
1056 ++
1057 + static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
1058 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
1059 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
1060 +@@ -356,6 +366,7 @@ static const struct __ftr_reg_entry {
1061 + /* Op1 = 0, CRn = 0, CRm = 6 */
1062 + ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
1063 + ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
1064 ++ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
1065 +
1066 + /* Op1 = 0, CRn = 0, CRm = 7 */
1067 + ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
1068 +@@ -501,6 +512,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
1069 + init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
1070 + init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
1071 + init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
1072 ++ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
1073 + init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
1074 + init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
1075 + init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
1076 +@@ -612,6 +624,8 @@ void update_cpu_features(int cpu,
1077 + info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
1078 + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
1079 + info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1080 ++ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
1081 ++ info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
1082 +
1083 + /*
1084 + * Differing PARange support is fine as long as all peripherals and
1085 +@@ -732,6 +746,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
1086 + read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
1087 + read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
1088 + read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1089 ++ read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
1090 +
1091 + read_sysreg_case(SYS_CNTFRQ_EL0);
1092 + read_sysreg_case(SYS_CTR_EL0);
1093 +@@ -892,6 +907,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1094 + static bool kpti_applied = false;
1095 + int cpu = smp_processor_id();
1096 +
1097 ++ if (__this_cpu_read(this_cpu_vector) == vectors) {
1098 ++ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
1099 ++
1100 ++ __this_cpu_write(this_cpu_vector, v);
1101 ++ }
1102 ++
1103 + if (kpti_applied)
1104 + return;
1105 +
1106 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
1107 +index 9ff64e04e63d6..6b7db546efda5 100644
1108 +--- a/arch/arm64/kernel/cpuinfo.c
1109 ++++ b/arch/arm64/kernel/cpuinfo.c
1110 +@@ -333,6 +333,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
1111 + info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
1112 + info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
1113 + info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
1114 ++ info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
1115 + info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
1116 + info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
1117 + info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
1118 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
1119 +index c1ffa95c0ad24..f526148d14bd8 100644
1120 +--- a/arch/arm64/kernel/entry.S
1121 ++++ b/arch/arm64/kernel/entry.S
1122 +@@ -74,18 +74,21 @@
1123 +
1124 + .macro kernel_ventry, el, label, regsize = 64
1125 + .align 7
1126 +-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1127 +-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
1128 ++.Lventry_start\@:
1129 + .if \el == 0
1130 ++ /*
1131 ++ * This must be the first instruction of the EL0 vector entries. It is
1132 ++ * skipped by the trampoline vectors, to trigger the cleanup.
1133 ++ */
1134 ++ b .Lskip_tramp_vectors_cleanup\@
1135 + .if \regsize == 64
1136 + mrs x30, tpidrro_el0
1137 + msr tpidrro_el0, xzr
1138 + .else
1139 + mov x30, xzr
1140 + .endif
1141 ++.Lskip_tramp_vectors_cleanup\@:
1142 + .endif
1143 +-alternative_else_nop_endif
1144 +-#endif
1145 +
1146 + sub sp, sp, #S_FRAME_SIZE
1147 + #ifdef CONFIG_VMAP_STACK
1148 +@@ -131,11 +134,15 @@ alternative_else_nop_endif
1149 + mrs x0, tpidrro_el0
1150 + #endif
1151 + b el\()\el\()_\label
1152 ++.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
1153 + .endm
1154 +
1155 +- .macro tramp_alias, dst, sym
1156 ++ .macro tramp_alias, dst, sym, tmp
1157 + mov_q \dst, TRAMP_VALIAS
1158 +- add \dst, \dst, #(\sym - .entry.tramp.text)
1159 ++ adr_l \tmp, \sym
1160 ++ add \dst, \dst, \tmp
1161 ++ adr_l \tmp, .entry.tramp.text
1162 ++ sub \dst, \dst, \tmp
1163 + .endm
1164 +
1165 + // This macro corrupts x0-x3. It is the caller's duty
1166 +@@ -350,21 +357,25 @@ alternative_else_nop_endif
1167 + ldp x24, x25, [sp, #16 * 12]
1168 + ldp x26, x27, [sp, #16 * 13]
1169 + ldp x28, x29, [sp, #16 * 14]
1170 +- ldr lr, [sp, #S_LR]
1171 +- add sp, sp, #S_FRAME_SIZE // restore sp
1172 +
1173 + .if \el == 0
1174 +-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
1175 ++alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1176 ++ ldr lr, [sp, #S_LR]
1177 ++ add sp, sp, #S_FRAME_SIZE // restore sp
1178 ++ eret
1179 ++alternative_else_nop_endif
1180 + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1181 + bne 4f
1182 +- msr far_el1, x30
1183 +- tramp_alias x30, tramp_exit_native
1184 ++ msr far_el1, x29
1185 ++ tramp_alias x30, tramp_exit_native, x29
1186 + br x30
1187 + 4:
1188 +- tramp_alias x30, tramp_exit_compat
1189 ++ tramp_alias x30, tramp_exit_compat, x29
1190 + br x30
1191 + #endif
1192 + .else
1193 ++ ldr lr, [sp, #S_LR]
1194 ++ add sp, sp, #S_FRAME_SIZE // restore sp
1195 + eret
1196 + .endif
1197 + .endm
1198 +@@ -972,12 +983,7 @@ __ni_sys_trace:
1199 +
1200 + .popsection // .entry.text
1201 +
1202 +-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1203 +-/*
1204 +- * Exception vectors trampoline.
1205 +- */
1206 +- .pushsection ".entry.tramp.text", "ax"
1207 +-
1208 ++ // Move from tramp_pg_dir to swapper_pg_dir
1209 + .macro tramp_map_kernel, tmp
1210 + mrs \tmp, ttbr1_el1
1211 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
1212 +@@ -1009,12 +1015,47 @@ alternative_else_nop_endif
1213 + */
1214 + .endm
1215 +
1216 +- .macro tramp_ventry, regsize = 64
1217 ++ .macro tramp_data_page dst
1218 ++ adr_l \dst, .entry.tramp.text
1219 ++ sub \dst, \dst, PAGE_SIZE
1220 ++ .endm
1221 ++
1222 ++ .macro tramp_data_read_var dst, var
1223 ++#ifdef CONFIG_RANDOMIZE_BASE
1224 ++ tramp_data_page \dst
1225 ++ add \dst, \dst, #:lo12:__entry_tramp_data_\var
1226 ++ ldr \dst, [\dst]
1227 ++#else
1228 ++ ldr \dst, =\var
1229 ++#endif
1230 ++ .endm
1231 ++
1232 ++#define BHB_MITIGATION_NONE 0
1233 ++#define BHB_MITIGATION_LOOP 1
1234 ++#define BHB_MITIGATION_FW 2
1235 ++#define BHB_MITIGATION_INSN 3
1236 ++
1237 ++ .macro tramp_ventry, vector_start, regsize, kpti, bhb
1238 + .align 7
1239 + 1:
1240 + .if \regsize == 64
1241 + msr tpidrro_el0, x30 // Restored in kernel_ventry
1242 + .endif
1243 ++
1244 ++ .if \bhb == BHB_MITIGATION_LOOP
1245 ++ /*
1246 ++ * This sequence must appear before the first indirect branch. i.e. the
1247 ++ * ret out of tramp_ventry. It appears here because x30 is free.
1248 ++ */
1249 ++ __mitigate_spectre_bhb_loop x30
1250 ++ .endif // \bhb == BHB_MITIGATION_LOOP
1251 ++
1252 ++ .if \bhb == BHB_MITIGATION_INSN
1253 ++ clearbhb
1254 ++ isb
1255 ++ .endif // \bhb == BHB_MITIGATION_INSN
1256 ++
1257 ++ .if \kpti == 1
1258 + /*
1259 + * Defend against branch aliasing attacks by pushing a dummy
1260 + * entry onto the return stack and using a RET instruction to
1261 +@@ -1024,43 +1065,75 @@ alternative_else_nop_endif
1262 + b .
1263 + 2:
1264 + tramp_map_kernel x30
1265 +-#ifdef CONFIG_RANDOMIZE_BASE
1266 +- adr x30, tramp_vectors + PAGE_SIZE
1267 + alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1268 +- ldr x30, [x30]
1269 +-#else
1270 +- ldr x30, =vectors
1271 +-#endif
1272 +- prfm plil1strm, [x30, #(1b - tramp_vectors)]
1273 ++ tramp_data_read_var x30, vectors
1274 ++ prfm plil1strm, [x30, #(1b - \vector_start)]
1275 + msr vbar_el1, x30
1276 +- add x30, x30, #(1b - tramp_vectors)
1277 + isb
1278 ++ .else
1279 ++ ldr x30, =vectors
1280 ++ .endif // \kpti == 1
1281 ++
1282 ++ .if \bhb == BHB_MITIGATION_FW
1283 ++ /*
1284 ++ * The firmware sequence must appear before the first indirect branch.
1285 ++ * i.e. the ret out of tramp_ventry. But it also needs the stack to be
1286 ++ * mapped to save/restore the registers the SMC clobbers.
1287 ++ */
1288 ++ __mitigate_spectre_bhb_fw
1289 ++ .endif // \bhb == BHB_MITIGATION_FW
1290 ++
1291 ++ add x30, x30, #(1b - \vector_start + 4)
1292 + ret
1293 ++.org 1b + 128 // Did we overflow the ventry slot?
1294 + .endm
1295 +
1296 + .macro tramp_exit, regsize = 64
1297 +- adr x30, tramp_vectors
1298 ++ tramp_data_read_var x30, this_cpu_vector
1299 ++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1300 ++ mrs x29, tpidr_el1
1301 ++alternative_else
1302 ++ mrs x29, tpidr_el2
1303 ++alternative_endif
1304 ++ ldr x30, [x30, x29]
1305 ++
1306 + msr vbar_el1, x30
1307 +- tramp_unmap_kernel x30
1308 ++ ldr lr, [sp, #S_LR]
1309 ++ tramp_unmap_kernel x29
1310 + .if \regsize == 64
1311 +- mrs x30, far_el1
1312 ++ mrs x29, far_el1
1313 + .endif
1314 ++ add sp, sp, #S_FRAME_SIZE // restore sp
1315 + eret
1316 + .endm
1317 +
1318 +- .align 11
1319 +-ENTRY(tramp_vectors)
1320 ++ .macro generate_tramp_vector, kpti, bhb
1321 ++.Lvector_start\@:
1322 + .space 0x400
1323 +
1324 +- tramp_ventry
1325 +- tramp_ventry
1326 +- tramp_ventry
1327 +- tramp_ventry
1328 ++ .rept 4
1329 ++ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
1330 ++ .endr
1331 ++ .rept 4
1332 ++ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
1333 ++ .endr
1334 ++ .endm
1335 +
1336 +- tramp_ventry 32
1337 +- tramp_ventry 32
1338 +- tramp_ventry 32
1339 +- tramp_ventry 32
1340 ++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1341 ++/*
1342 ++ * Exception vectors trampoline.
1343 ++ * The order must match __bp_harden_el1_vectors and the
1344 ++ * arm64_bp_harden_el1_vectors enum.
1345 ++ */
1346 ++ .pushsection ".entry.tramp.text", "ax"
1347 ++ .align 11
1348 ++ENTRY(tramp_vectors)
1349 ++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1350 ++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
1351 ++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
1352 ++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
1353 ++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1354 ++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
1355 + END(tramp_vectors)
1356 +
1357 + ENTRY(tramp_exit_native)
1358 +@@ -1078,11 +1151,54 @@ END(tramp_exit_compat)
1359 + .align PAGE_SHIFT
1360 + .globl __entry_tramp_data_start
1361 + __entry_tramp_data_start:
1362 ++__entry_tramp_data_vectors:
1363 + .quad vectors
1364 ++#ifdef CONFIG_ARM_SDE_INTERFACE
1365 ++__entry_tramp_data___sdei_asm_trampoline_next_handler:
1366 ++ .quad __sdei_asm_handler
1367 ++#endif /* CONFIG_ARM_SDE_INTERFACE */
1368 ++__entry_tramp_data_this_cpu_vector:
1369 ++ .quad this_cpu_vector
1370 + .popsection // .rodata
1371 + #endif /* CONFIG_RANDOMIZE_BASE */
1372 + #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1373 +
1374 ++/*
1375 ++ * Exception vectors for spectre mitigations on entry from EL1 when
1376 ++ * kpti is not in use.
1377 ++ */
1378 ++ .macro generate_el1_vector, bhb
1379 ++.Lvector_start\@:
1380 ++ kernel_ventry 1, sync_invalid // Synchronous EL1t
1381 ++ kernel_ventry 1, irq_invalid // IRQ EL1t
1382 ++ kernel_ventry 1, fiq_invalid // FIQ EL1t
1383 ++ kernel_ventry 1, error_invalid // Error EL1t
1384 ++
1385 ++ kernel_ventry 1, sync // Synchronous EL1h
1386 ++ kernel_ventry 1, irq // IRQ EL1h
1387 ++ kernel_ventry 1, fiq_invalid // FIQ EL1h
1388 ++ kernel_ventry 1, error_invalid // Error EL1h
1389 ++
1390 ++ .rept 4
1391 ++ tramp_ventry .Lvector_start\@, 64, 0, \bhb
1392 ++ .endr
1393 ++ .rept 4
1394 ++ tramp_ventry .Lvector_start\@, 32, 0, \bhb
1395 ++ .endr
1396 ++ .endm
1397 ++
1398 ++/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1399 ++ .pushsection ".entry.text", "ax"
1400 ++ .align 11
1401 ++ENTRY(__bp_harden_el1_vectors)
1402 ++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1403 ++ generate_el1_vector bhb=BHB_MITIGATION_LOOP
1404 ++ generate_el1_vector bhb=BHB_MITIGATION_FW
1405 ++ generate_el1_vector bhb=BHB_MITIGATION_INSN
1406 ++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1407 ++END(__bp_harden_el1_vectors)
1408 ++ .popsection
1409 ++
1410 + /*
1411 + * Special system call wrappers.
1412 + */
1413 +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
1414 +index 4c11d3e64aef4..6543c58f26ec5 100644
1415 +--- a/arch/arm64/kernel/vmlinux.lds.S
1416 ++++ b/arch/arm64/kernel/vmlinux.lds.S
1417 +@@ -258,7 +258,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
1418 + <= SZ_4K, "Hibernate exit text too big or misaligned")
1419 + #endif
1420 + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1421 +-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
1422 ++ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
1423 + "Entry trampoline text too big")
1424 + #endif
1425 + /*
1426 +diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
1427 +index 5e041eabdd03e..8086294aedea7 100644
1428 +--- a/arch/arm64/kvm/hyp/hyp-entry.S
1429 ++++ b/arch/arm64/kvm/hyp/hyp-entry.S
1430 +@@ -135,6 +135,10 @@ el1_hvc_guest:
1431 + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
1432 + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
1433 + ARM_SMCCC_ARCH_WORKAROUND_2)
1434 ++ cbz w1, wa_epilogue
1435 ++
1436 ++ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
1437 ++ ARM_SMCCC_ARCH_WORKAROUND_3)
1438 + cbnz w1, el1_trap
1439 +
1440 + #ifdef CONFIG_ARM64_SSBD
1441 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
1442 +index 99ae75a43985c..0f05f402e04a1 100644
1443 +--- a/arch/arm64/kvm/hyp/switch.c
1444 ++++ b/arch/arm64/kvm/hyp/switch.c
1445 +@@ -27,6 +27,7 @@
1446 + #include <asm/kvm_emulate.h>
1447 + #include <asm/kvm_hyp.h>
1448 + #include <asm/fpsimd.h>
1449 ++#include <asm/vectors.h>
1450 +
1451 + extern struct exception_table_entry __start___kvm_ex_table;
1452 + extern struct exception_table_entry __stop___kvm_ex_table;
1453 +@@ -110,17 +111,21 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
1454 +
1455 + static void __hyp_text __deactivate_traps_vhe(void)
1456 + {
1457 +- extern char vectors[]; /* kernel exception vectors */
1458 ++ const char *host_vectors = vectors;
1459 + u64 mdcr_el2 = read_sysreg(mdcr_el2);
1460 +
1461 + mdcr_el2 &= MDCR_EL2_HPMN_MASK |
1462 + MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
1463 + MDCR_EL2_TPMS;
1464 +
1465 ++
1466 + write_sysreg(mdcr_el2, mdcr_el2);
1467 + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
1468 + write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
1469 +- write_sysreg(vectors, vbar_el1);
1470 ++
1471 ++ if (!arm64_kernel_unmapped_at_el0())
1472 ++ host_vectors = __this_cpu_read(this_cpu_vector);
1473 ++ write_sysreg(host_vectors, vbar_el1);
1474 + }
1475 +
1476 + static void __hyp_text __deactivate_traps_nvhe(void)
1477 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
1478 +index e02a6326c8003..4d472907194dd 100644
1479 +--- a/arch/arm64/mm/mmu.c
1480 ++++ b/arch/arm64/mm/mmu.c
1481 +@@ -532,6 +532,7 @@ early_param("rodata", parse_rodata);
1482 + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1483 + static int __init map_entry_trampoline(void)
1484 + {
1485 ++ int i;
1486 + extern char __entry_tramp_text_start[];
1487 +
1488 + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
1489 +@@ -542,11 +543,15 @@ static int __init map_entry_trampoline(void)
1490 +
1491 + /* Map only the text into the trampoline page table */
1492 + memset(tramp_pg_dir, 0, PGD_SIZE);
1493 +- __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
1494 +- prot, pgd_pgtable_alloc, 0);
1495 ++ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
1496 ++ entry_tramp_text_size(), prot, pgd_pgtable_alloc,
1497 ++ 0);
1498 +
1499 + /* Map both the text and data into the kernel page table */
1500 +- __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
1501 ++ for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
1502 ++ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
1503 ++ pa_start + i * PAGE_SIZE, prot);
1504 ++
1505 + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1506 + extern char __entry_tramp_data_start[];
1507 +
1508 +diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
1509 +index 2c5913057b87b..439a4d0058129 100644
1510 +--- a/drivers/clocksource/arm_arch_timer.c
1511 ++++ b/drivers/clocksource/arm_arch_timer.c
1512 +@@ -298,6 +298,13 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
1513 + }
1514 + #endif
1515 +
1516 ++#ifdef CONFIG_ARM64_ERRATUM_1188873
1517 ++static u64 notrace arm64_1188873_read_cntvct_el0(void)
1518 ++{
1519 ++ return read_sysreg(cntvct_el0);
1520 ++}
1521 ++#endif
1522 ++
1523 + #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
1524 + DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
1525 + timer_unstable_counter_workaround);
1526 +@@ -381,6 +388,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
1527 + .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
1528 + },
1529 + #endif
1530 ++#ifdef CONFIG_ARM64_ERRATUM_1188873
1531 ++ {
1532 ++ .match_type = ate_match_local_cap_id,
1533 ++ .id = (void *)ARM64_WORKAROUND_1188873,
1534 ++ .desc = "ARM erratum 1188873",
1535 ++ .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
1536 ++ },
1537 ++#endif
1538 + };
1539 +
1540 + typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
1541 +diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
1542 +index 6366b04c7d5f4..0402668914147 100644
1543 +--- a/include/linux/arm-smccc.h
1544 ++++ b/include/linux/arm-smccc.h
1545 +@@ -85,6 +85,13 @@
1546 + ARM_SMCCC_SMC_32, \
1547 + 0, 0x7fff)
1548 +
1549 ++#define ARM_SMCCC_ARCH_WORKAROUND_3 \
1550 ++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
1551 ++ ARM_SMCCC_SMC_32, \
1552 ++ 0, 0x3fff)
1553 ++
1554 ++#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
1555 ++
1556 + #ifndef __ASSEMBLY__
1557 +
1558 + #include <linux/linkage.h>
1559 +diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
1560 +index c95ab4c5a4751..129b755824e12 100644
1561 +--- a/virt/kvm/arm/psci.c
1562 ++++ b/virt/kvm/arm/psci.c
1563 +@@ -433,6 +433,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
1564 + break;
1565 + }
1566 + break;
1567 ++ case ARM_SMCCC_ARCH_WORKAROUND_3:
1568 ++ switch (kvm_arm_get_spectre_bhb_state()) {
1569 ++ case SPECTRE_VULNERABLE:
1570 ++ break;
1571 ++ case SPECTRE_MITIGATED:
1572 ++ val = SMCCC_RET_SUCCESS;
1573 ++ break;
1574 ++ case SPECTRE_UNAFFECTED:
1575 ++ val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
1576 ++ break;
1577 ++ }
1578 ++ break;
1579 + }
1580 + break;
1581 + default: