Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 11:33:08
Message-Id: 1572348763.4d5061ad7be34c2308960e3d0ed85a86b3fe8cc5.mpagano@gentoo
1 commit: 4d5061ad7be34c2308960e3d0ed85a86b3fe8cc5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Oct 29 11:32:43 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 11:32:43 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4d5061ad
7
8 Linux patch 4.14.151
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1150_linux-4.14.151.patch | 5291 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5295 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5e20a18..2d77f02 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -643,6 +643,10 @@ Patch: 1149_linux-4.14.150.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.150
23
24 +Patch: 1150_linux-4.14.151.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.151
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1150_linux-4.14.151.patch b/1150_linux-4.14.151.patch
33 new file mode 100644
34 index 0000000..5baca82
35 --- /dev/null
36 +++ b/1150_linux-4.14.151.patch
37 @@ -0,0 +1,5291 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 188a7db8501b..b67a6cd08ca1 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -2389,8 +2389,8 @@
43 + http://repo.or.cz/w/linux-2.6/mini2440.git
44 +
45 + mitigations=
46 +- [X86,PPC,S390] Control optional mitigations for CPU
47 +- vulnerabilities. This is a set of curated,
48 ++ [X86,PPC,S390,ARM64] Control optional mitigations for
49 ++ CPU vulnerabilities. This is a set of curated,
50 + arch-independent options, each of which is an
51 + aggregation of existing arch-specific options.
52 +
53 +@@ -2399,12 +2399,14 @@
54 + improves system performance, but it may also
55 + expose users to several CPU vulnerabilities.
56 + Equivalent to: nopti [X86,PPC]
57 ++ kpti=0 [ARM64]
58 + nospectre_v1 [PPC]
59 + nobp=0 [S390]
60 + nospectre_v1 [X86]
61 +- nospectre_v2 [X86,PPC,S390]
62 ++ nospectre_v2 [X86,PPC,S390,ARM64]
63 + spectre_v2_user=off [X86]
64 + spec_store_bypass_disable=off [X86,PPC]
65 ++ ssbd=force-off [ARM64]
66 + l1tf=off [X86]
67 + mds=off [X86]
68 +
69 +@@ -2745,10 +2747,10 @@
70 + (bounds check bypass). With this option data leaks
71 + are possible in the system.
72 +
73 +- nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
74 +- (indirect branch prediction) vulnerability. System may
75 +- allow data leaks with this option, which is equivalent
76 +- to spectre_v2=off.
77 ++ nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
78 ++ the Spectre variant 2 (indirect branch prediction)
79 ++ vulnerability. System may allow data leaks with this
80 ++ option.
81 +
82 + nospec_store_bypass_disable
83 + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
84 +diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
85 +index dad411d635d8..7964f03846b1 100644
86 +--- a/Documentation/arm64/cpu-feature-registers.txt
87 ++++ b/Documentation/arm64/cpu-feature-registers.txt
88 +@@ -110,7 +110,17 @@ infrastructure:
89 + x--------------------------------------------------x
90 + | Name | bits | visible |
91 + |--------------------------------------------------|
92 +- | RES0 | [63-32] | n |
93 ++ | TS | [55-52] | y |
94 ++ |--------------------------------------------------|
95 ++ | FHM | [51-48] | y |
96 ++ |--------------------------------------------------|
97 ++ | DP | [47-44] | y |
98 ++ |--------------------------------------------------|
99 ++ | SM4 | [43-40] | y |
100 ++ |--------------------------------------------------|
101 ++ | SM3 | [39-36] | y |
102 ++ |--------------------------------------------------|
103 ++ | SHA3 | [35-32] | y |
104 + |--------------------------------------------------|
105 + | RDM | [31-28] | y |
106 + |--------------------------------------------------|
107 +@@ -123,8 +133,6 @@ infrastructure:
108 + | SHA1 | [11-8] | y |
109 + |--------------------------------------------------|
110 + | AES | [7-4] | y |
111 +- |--------------------------------------------------|
112 +- | RES0 | [3-0] | n |
113 + x--------------------------------------------------x
114 +
115 +
116 +@@ -132,7 +140,9 @@ infrastructure:
117 + x--------------------------------------------------x
118 + | Name | bits | visible |
119 + |--------------------------------------------------|
120 +- | RES0 | [63-28] | n |
121 ++ | DIT | [51-48] | y |
122 ++ |--------------------------------------------------|
123 ++ | SVE | [35-32] | y |
124 + |--------------------------------------------------|
125 + | GIC | [27-24] | n |
126 + |--------------------------------------------------|
127 +@@ -183,6 +193,14 @@ infrastructure:
128 + | DPB | [3-0] | y |
129 + x--------------------------------------------------x
130 +
131 ++ 5) ID_AA64MMFR2_EL1 - Memory model feature register 2
132 ++
133 ++ x--------------------------------------------------x
134 ++ | Name | bits | visible |
135 ++ |--------------------------------------------------|
136 ++ | AT | [35-32] | y |
137 ++ x--------------------------------------------------x
138 ++
139 + Appendix I: Example
140 + ---------------------------
141 +
142 +diff --git a/Makefile b/Makefile
143 +index 3d96b277ffc9..db996459d047 100644
144 +--- a/Makefile
145 ++++ b/Makefile
146 +@@ -1,7 +1,7 @@
147 + # SPDX-License-Identifier: GPL-2.0
148 + VERSION = 4
149 + PATCHLEVEL = 14
150 +-SUBLEVEL = 150
151 ++SUBLEVEL = 151
152 + EXTRAVERSION =
153 + NAME = Petit Gorille
154 +
155 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
156 +index 4714a59fd86d..345c117bd5ef 100644
157 +--- a/arch/arm/boot/dts/am4372.dtsi
158 ++++ b/arch/arm/boot/dts/am4372.dtsi
159 +@@ -1118,6 +1118,8 @@
160 + ti,hwmods = "dss_dispc";
161 + clocks = <&disp_clk>;
162 + clock-names = "fck";
163 ++
164 ++ max-memory-bandwidth = <230000000>;
165 + };
166 +
167 + rfbi: rfbi@4832a800 {
168 +diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
169 +index de06a1d5ffab..e61c14f59063 100644
170 +--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
171 ++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
172 +@@ -966,7 +966,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
173 + .rev_offs = 0x0000,
174 + .sysc_offs = 0x0010,
175 + .syss_offs = 0x0014,
176 +- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
177 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
178 ++ SYSC_HAS_RESET_STATUS,
179 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
180 + SIDLE_SMART_WKUP),
181 + .sysc_fields = &omap_hwmod_sysc_type2,
182 +diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
183 +index b4d78959cadf..bc9a37b3cecd 100644
184 +--- a/arch/arm/xen/efi.c
185 ++++ b/arch/arm/xen/efi.c
186 +@@ -31,7 +31,9 @@ void __init xen_efi_runtime_setup(void)
187 + efi.get_variable = xen_efi_get_variable;
188 + efi.get_next_variable = xen_efi_get_next_variable;
189 + efi.set_variable = xen_efi_set_variable;
190 ++ efi.set_variable_nonblocking = xen_efi_set_variable;
191 + efi.query_variable_info = xen_efi_query_variable_info;
192 ++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
193 + efi.update_capsule = xen_efi_update_capsule;
194 + efi.query_capsule_caps = xen_efi_query_capsule_caps;
195 + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
196 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
197 +index c30cd78b6918..e296ae3e20f4 100644
198 +--- a/arch/arm64/Kconfig
199 ++++ b/arch/arm64/Kconfig
200 +@@ -49,6 +49,7 @@ config ARM64
201 + select GENERIC_CLOCKEVENTS
202 + select GENERIC_CLOCKEVENTS_BROADCAST
203 + select GENERIC_CPU_AUTOPROBE
204 ++ select GENERIC_CPU_VULNERABILITIES
205 + select GENERIC_EARLY_IOREMAP
206 + select GENERIC_IDLE_POLL_SETUP
207 + select GENERIC_IRQ_PROBE
208 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
209 +index 7d6425d426ac..2f8bd0388905 100644
210 +--- a/arch/arm64/include/asm/cpucaps.h
211 ++++ b/arch/arm64/include/asm/cpucaps.h
212 +@@ -42,9 +42,9 @@
213 + #define ARM64_HAS_DCPOP 21
214 + #define ARM64_UNMAP_KERNEL_AT_EL0 23
215 + #define ARM64_HARDEN_BRANCH_PREDICTOR 24
216 +-#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
217 +-#define ARM64_SSBD 26
218 +-#define ARM64_MISMATCHED_CACHE_TYPE 27
219 ++#define ARM64_SSBD 25
220 ++#define ARM64_MISMATCHED_CACHE_TYPE 26
221 ++#define ARM64_SSBS 27
222 +
223 + #define ARM64_NCAPS 28
224 +
225 +diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
226 +index 5048c7a55eef..166f81b7afee 100644
227 +--- a/arch/arm64/include/asm/cpufeature.h
228 ++++ b/arch/arm64/include/asm/cpufeature.h
229 +@@ -10,6 +10,7 @@
230 + #define __ASM_CPUFEATURE_H
231 +
232 + #include <asm/cpucaps.h>
233 ++#include <asm/cputype.h>
234 + #include <asm/hwcap.h>
235 + #include <asm/sysreg.h>
236 +
237 +@@ -85,24 +86,227 @@ struct arm64_ftr_reg {
238 +
239 + extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
240 +
241 +-/* scope of capability check */
242 +-enum {
243 +- SCOPE_SYSTEM,
244 +- SCOPE_LOCAL_CPU,
245 +-};
246 ++/*
247 ++ * CPU capabilities:
248 ++ *
249 ++ * We use arm64_cpu_capabilities to represent system features, errata work
250 ++ * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
251 ++ * ELF HWCAPs (which are exposed to user).
252 ++ *
253 ++ * To support systems with heterogeneous CPUs, we need to make sure that we
254 ++ * detect the capabilities correctly on the system and take appropriate
255 ++ * measures to ensure there are no incompatibilities.
256 ++ *
257 ++ * This comment tries to explain how we treat the capabilities.
258 ++ * Each capability has the following list of attributes :
259 ++ *
260 ++ * 1) Scope of Detection : The system detects a given capability by
261 ++ * performing some checks at runtime. This could be, e.g, checking the
262 ++ * value of a field in CPU ID feature register or checking the cpu
263 ++ * model. The capability provides a call back ( @matches() ) to
264 ++ * perform the check. Scope defines how the checks should be performed.
265 ++ * There are three cases:
266 ++ *
267 ++ * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
268 ++ * matches. This implies, we have to run the check on all the
269 ++ * booting CPUs, until the system decides that state of the
270 ++ * capability is finalised. (See section 2 below)
271 ++ * Or
272 ++ * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
273 ++ * matches. This implies, we run the check only once, when the
274 ++ * system decides to finalise the state of the capability. If the
275 ++ * capability relies on a field in one of the CPU ID feature
276 ++ * registers, we use the sanitised value of the register from the
277 ++ * CPU feature infrastructure to make the decision.
278 ++ * Or
279 ++ * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
280 ++ * feature. This category is for features that are "finalised"
281 ++ * (or used) by the kernel very early even before the SMP cpus
282 ++ * are brought up.
283 ++ *
284 ++ * The process of detection is usually denoted by "update" capability
285 ++ * state in the code.
286 ++ *
287 ++ * 2) Finalise the state : The kernel should finalise the state of a
288 ++ * capability at some point during its execution and take necessary
289 ++ * actions if any. Usually, this is done, after all the boot-time
290 ++ * enabled CPUs are brought up by the kernel, so that it can make
291 ++ * better decision based on the available set of CPUs. However, there
292 ++ * are some special cases, where the action is taken during the early
293 ++ * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
294 ++ * Virtualisation Host Extensions). The kernel usually disallows any
295 ++ * changes to the state of a capability once it finalises the capability
296 ++ * and takes any action, as it may be impossible to execute the actions
297 ++ * safely. A CPU brought up after a capability is "finalised" is
298 ++ * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
299 ++ * CPUs are treated "late CPUs" for capabilities determined by the boot
300 ++ * CPU.
301 ++ *
302 ++ * At the moment there are two passes of finalising the capabilities.
303 ++ * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
304 ++ * setup_boot_cpu_capabilities().
305 ++ * b) Everything except (a) - Run via setup_system_capabilities().
306 ++ *
307 ++ * 3) Verification: When a CPU is brought online (e.g, by user or by the
308 ++ * kernel), the kernel should make sure that it is safe to use the CPU,
309 ++ * by verifying that the CPU is compliant with the state of the
310 ++ * capabilities finalised already. This happens via :
311 ++ *
312 ++ * secondary_start_kernel()-> check_local_cpu_capabilities()
313 ++ *
314 ++ * As explained in (2) above, capabilities could be finalised at
315 ++ * different points in the execution. Each newly booted CPU is verified
316 ++ * against the capabilities that have been finalised by the time it
317 ++ * boots.
318 ++ *
319 ++ * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
320 ++ * except for the primary boot CPU.
321 ++ *
322 ++ * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
323 ++ * user after the kernel boot are verified against the capability.
324 ++ *
325 ++ * If there is a conflict, the kernel takes an action, based on the
326 ++ * severity (e.g, a CPU could be prevented from booting or cause a
327 ++ * kernel panic). The CPU is allowed to "affect" the state of the
328 ++ * capability, if it has not been finalised already. See section 5
329 ++ * for more details on conflicts.
330 ++ *
331 ++ * 4) Action: As mentioned in (2), the kernel can take an action for each
332 ++ * detected capability, on all CPUs on the system. Appropriate actions
333 ++ * include, turning on an architectural feature, modifying the control
334 ++ * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
335 ++ * alternatives. The kernel patching is batched and performed at later
336 ++ * point. The actions are always initiated only after the capability
337 ++ * is finalised. This is usally denoted by "enabling" the capability.
338 ++ * The actions are initiated as follows :
339 ++ * a) Action is triggered on all online CPUs, after the capability is
340 ++ * finalised, invoked within the stop_machine() context from
341 ++ * enable_cpu_capabilitie().
342 ++ *
343 ++ * b) Any late CPU, brought up after (1), the action is triggered via:
344 ++ *
345 ++ * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
346 ++ *
347 ++ * 5) Conflicts: Based on the state of the capability on a late CPU vs.
348 ++ * the system state, we could have the following combinations :
349 ++ *
350 ++ * x-----------------------------x
351 ++ * | Type | System | Late CPU |
352 ++ * |-----------------------------|
353 ++ * | a | y | n |
354 ++ * |-----------------------------|
355 ++ * | b | n | y |
356 ++ * x-----------------------------x
357 ++ *
358 ++ * Two separate flag bits are defined to indicate whether each kind of
359 ++ * conflict can be allowed:
360 ++ * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
361 ++ * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
362 ++ *
363 ++ * Case (a) is not permitted for a capability that the system requires
364 ++ * all CPUs to have in order for the capability to be enabled. This is
365 ++ * typical for capabilities that represent enhanced functionality.
366 ++ *
367 ++ * Case (b) is not permitted for a capability that must be enabled
368 ++ * during boot if any CPU in the system requires it in order to run
369 ++ * safely. This is typical for erratum work arounds that cannot be
370 ++ * enabled after the corresponding capability is finalised.
371 ++ *
372 ++ * In some non-typical cases either both (a) and (b), or neither,
373 ++ * should be permitted. This can be described by including neither
374 ++ * or both flags in the capability's type field.
375 ++ */
376 ++
377 ++
378 ++/*
379 ++ * Decide how the capability is detected.
380 ++ * On any local CPU vs System wide vs the primary boot CPU
381 ++ */
382 ++#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
383 ++#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
384 ++/*
385 ++ * The capabilitiy is detected on the Boot CPU and is used by kernel
386 ++ * during early boot. i.e, the capability should be "detected" and
387 ++ * "enabled" as early as possibly on all booting CPUs.
388 ++ */
389 ++#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
390 ++#define ARM64_CPUCAP_SCOPE_MASK \
391 ++ (ARM64_CPUCAP_SCOPE_SYSTEM | \
392 ++ ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
393 ++ ARM64_CPUCAP_SCOPE_BOOT_CPU)
394 ++
395 ++#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
396 ++#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
397 ++#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
398 ++#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
399 ++
400 ++/*
401 ++ * Is it permitted for a late CPU to have this capability when system
402 ++ * hasn't already enabled it ?
403 ++ */
404 ++#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
405 ++/* Is it safe for a late CPU to miss this capability when system has it */
406 ++#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
407 ++
408 ++/*
409 ++ * CPU errata workarounds that need to be enabled at boot time if one or
410 ++ * more CPUs in the system requires it. When one of these capabilities
411 ++ * has been enabled, it is safe to allow any CPU to boot that doesn't
412 ++ * require the workaround. However, it is not safe if a "late" CPU
413 ++ * requires a workaround and the system hasn't enabled it already.
414 ++ */
415 ++#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
416 ++ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
417 ++/*
418 ++ * CPU feature detected at boot time based on system-wide value of a
419 ++ * feature. It is safe for a late CPU to have this feature even though
420 ++ * the system hasn't enabled it, although the featuer will not be used
421 ++ * by Linux in this case. If the system has enabled this feature already,
422 ++ * then every late CPU must have it.
423 ++ */
424 ++#define ARM64_CPUCAP_SYSTEM_FEATURE \
425 ++ (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
426 ++/*
427 ++ * CPU feature detected at boot time based on feature of one or more CPUs.
428 ++ * All possible conflicts for a late CPU are ignored.
429 ++ */
430 ++#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
431 ++ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
432 ++ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
433 ++ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
434 ++
435 ++/*
436 ++ * CPU feature detected at boot time, on one or more CPUs. A late CPU
437 ++ * is not allowed to have the capability when the system doesn't have it.
438 ++ * It is Ok for a late CPU to miss the feature.
439 ++ */
440 ++#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
441 ++ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
442 ++ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
443 ++
444 ++/*
445 ++ * CPU feature used early in the boot based on the boot CPU. All secondary
446 ++ * CPUs must match the state of the capability as detected by the boot CPU.
447 ++ */
448 ++#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
449 +
450 + struct arm64_cpu_capabilities {
451 + const char *desc;
452 + u16 capability;
453 +- int def_scope; /* default scope */
454 ++ u16 type;
455 + bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
456 +- int (*enable)(void *); /* Called on all active CPUs */
457 ++ /*
458 ++ * Take the appropriate actions to enable this capability for this CPU.
459 ++ * For each successfully booted CPU, this method is called for each
460 ++ * globally detected capability.
461 ++ */
462 ++ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
463 + union {
464 + struct { /* To be used for erratum handling only */
465 +- u32 midr_model;
466 +- u32 midr_range_min, midr_range_max;
467 ++ struct midr_range midr_range;
468 + };
469 +
470 ++ const struct midr_range *midr_range_list;
471 + struct { /* Feature register checking */
472 + u32 sys_reg;
473 + u8 field_pos;
474 +@@ -114,6 +318,23 @@ struct arm64_cpu_capabilities {
475 + };
476 + };
477 +
478 ++static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
479 ++{
480 ++ return cap->type & ARM64_CPUCAP_SCOPE_MASK;
481 ++}
482 ++
483 ++static inline bool
484 ++cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
485 ++{
486 ++ return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
487 ++}
488 ++
489 ++static inline bool
490 ++cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
491 ++{
492 ++ return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
493 ++}
494 ++
495 + extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
496 + extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
497 + extern struct static_key_false arm64_const_caps_ready;
498 +@@ -225,15 +446,8 @@ static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
499 + }
500 +
501 + void __init setup_cpu_features(void);
502 +-
503 +-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
504 +- const char *info);
505 +-void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
506 + void check_local_cpu_capabilities(void);
507 +
508 +-void update_cpu_errata_workarounds(void);
509 +-void __init enable_errata_workarounds(void);
510 +-void verify_local_cpu_errata_workarounds(void);
511 +
512 + u64 read_sanitised_ftr_reg(u32 id);
513 +
514 +@@ -279,11 +493,7 @@ static inline int arm64_get_ssbd_state(void)
515 + #endif
516 + }
517 +
518 +-#ifdef CONFIG_ARM64_SSBD
519 + void arm64_set_ssbd_mitigation(bool state);
520 +-#else
521 +-static inline void arm64_set_ssbd_mitigation(bool state) {}
522 +-#endif
523 +
524 + #endif /* __ASSEMBLY__ */
525 +
526 +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
527 +index 04569aa267fd..b23456035eac 100644
528 +--- a/arch/arm64/include/asm/cputype.h
529 ++++ b/arch/arm64/include/asm/cputype.h
530 +@@ -85,6 +85,8 @@
531 + #define ARM_CPU_PART_CORTEX_A53 0xD03
532 + #define ARM_CPU_PART_CORTEX_A73 0xD09
533 + #define ARM_CPU_PART_CORTEX_A75 0xD0A
534 ++#define ARM_CPU_PART_CORTEX_A35 0xD04
535 ++#define ARM_CPU_PART_CORTEX_A55 0xD05
536 +
537 + #define APM_CPU_PART_POTENZA 0x000
538 +
539 +@@ -108,6 +110,8 @@
540 + #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
541 + #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
542 + #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
543 ++#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
544 ++#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
545 + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
546 + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
547 + #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
548 +@@ -125,6 +129,45 @@
549 +
550 + #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
551 +
552 ++/*
553 ++ * Represent a range of MIDR values for a given CPU model and a
554 ++ * range of variant/revision values.
555 ++ *
556 ++ * @model - CPU model as defined by MIDR_CPU_MODEL
557 ++ * @rv_min - Minimum value for the revision/variant as defined by
558 ++ * MIDR_CPU_VAR_REV
559 ++ * @rv_max - Maximum value for the variant/revision for the range.
560 ++ */
561 ++struct midr_range {
562 ++ u32 model;
563 ++ u32 rv_min;
564 ++ u32 rv_max;
565 ++};
566 ++
567 ++#define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \
568 ++ { \
569 ++ .model = m, \
570 ++ .rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \
571 ++ .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \
572 ++ }
573 ++
574 ++#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
575 ++
576 ++static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
577 ++{
578 ++ return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
579 ++ range->rv_min, range->rv_max);
580 ++}
581 ++
582 ++static inline bool
583 ++is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
584 ++{
585 ++ while (ranges->model)
586 ++ if (is_midr_in_range(midr, ranges++))
587 ++ return true;
588 ++ return false;
589 ++}
590 ++
591 + /*
592 + * The CPU ID never changes at run time, so we might as well tell the
593 + * compiler that it's constant. Use this function to read the CPU ID
594 +diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
595 +index 1a6d02350fc6..c59e81b65132 100644
596 +--- a/arch/arm64/include/asm/kvm_asm.h
597 ++++ b/arch/arm64/include/asm/kvm_asm.h
598 +@@ -70,8 +70,6 @@ extern u32 __kvm_get_mdcr_el2(void);
599 +
600 + extern u32 __init_stage2_translation(void);
601 +
602 +-extern void __qcom_hyp_sanitize_btac_predictors(void);
603 +-
604 + /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
605 + #define __hyp_this_cpu_ptr(sym) \
606 + ({ \
607 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
608 +index b01ad3489bd8..f982c9d1d10b 100644
609 +--- a/arch/arm64/include/asm/kvm_host.h
610 ++++ b/arch/arm64/include/asm/kvm_host.h
611 +@@ -356,6 +356,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
612 + void __kvm_set_tpidr_el2(u64 tpidr_el2);
613 + DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
614 +
615 ++void __kvm_enable_ssbs(void);
616 ++
617 + static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
618 + unsigned long hyp_stack_ptr,
619 + unsigned long vector_ptr)
620 +@@ -380,6 +382,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
621 + - (u64)kvm_ksym_ref(kvm_host_cpu_state);
622 +
623 + kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
624 ++
625 ++ /*
626 ++ * Disabling SSBD on a non-VHE system requires us to enable SSBS
627 ++ * at EL2.
628 ++ */
629 ++ if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
630 ++ arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
631 ++ kvm_call_hyp(__kvm_enable_ssbs);
632 ++ }
633 + }
634 +
635 + static inline void kvm_arch_hardware_unsetup(void) {}
636 +diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
637 +index 91bb97d8bdbf..9eb95ab19924 100644
638 +--- a/arch/arm64/include/asm/processor.h
639 ++++ b/arch/arm64/include/asm/processor.h
640 +@@ -37,6 +37,7 @@
641 + #include <linux/string.h>
642 +
643 + #include <asm/alternative.h>
644 ++#include <asm/cpufeature.h>
645 + #include <asm/fpsimd.h>
646 + #include <asm/hw_breakpoint.h>
647 + #include <asm/lse.h>
648 +@@ -147,11 +148,25 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
649 + regs->pc = pc;
650 + }
651 +
652 ++static inline void set_ssbs_bit(struct pt_regs *regs)
653 ++{
654 ++ regs->pstate |= PSR_SSBS_BIT;
655 ++}
656 ++
657 ++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
658 ++{
659 ++ regs->pstate |= PSR_AA32_SSBS_BIT;
660 ++}
661 ++
662 + static inline void start_thread(struct pt_regs *regs, unsigned long pc,
663 + unsigned long sp)
664 + {
665 + start_thread_common(regs, pc);
666 + regs->pstate = PSR_MODE_EL0t;
667 ++
668 ++ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
669 ++ set_ssbs_bit(regs);
670 ++
671 + regs->sp = sp;
672 + }
673 +
674 +@@ -168,6 +183,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
675 + regs->pstate |= COMPAT_PSR_E_BIT;
676 + #endif
677 +
678 ++ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
679 ++ set_compat_ssbs_bit(regs);
680 ++
681 + regs->compat_sp = sp;
682 + }
683 + #endif
684 +@@ -222,8 +240,8 @@ static inline void spin_lock_prefetch(const void *ptr)
685 +
686 + #endif
687 +
688 +-int cpu_enable_pan(void *__unused);
689 +-int cpu_enable_cache_maint_trap(void *__unused);
690 ++void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
691 ++void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
692 +
693 + #endif /* __ASSEMBLY__ */
694 + #endif /* __ASM_PROCESSOR_H */
695 +diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
696 +index 6069d66e0bc2..b466d763a90d 100644
697 +--- a/arch/arm64/include/asm/ptrace.h
698 ++++ b/arch/arm64/include/asm/ptrace.h
699 +@@ -35,7 +35,38 @@
700 + #define COMPAT_PTRACE_GETHBPREGS 29
701 + #define COMPAT_PTRACE_SETHBPREGS 30
702 +
703 +-/* AArch32 CPSR bits */
704 ++/* SPSR_ELx bits for exceptions taken from AArch32 */
705 ++#define PSR_AA32_MODE_MASK 0x0000001f
706 ++#define PSR_AA32_MODE_USR 0x00000010
707 ++#define PSR_AA32_MODE_FIQ 0x00000011
708 ++#define PSR_AA32_MODE_IRQ 0x00000012
709 ++#define PSR_AA32_MODE_SVC 0x00000013
710 ++#define PSR_AA32_MODE_ABT 0x00000017
711 ++#define PSR_AA32_MODE_HYP 0x0000001a
712 ++#define PSR_AA32_MODE_UND 0x0000001b
713 ++#define PSR_AA32_MODE_SYS 0x0000001f
714 ++#define PSR_AA32_T_BIT 0x00000020
715 ++#define PSR_AA32_F_BIT 0x00000040
716 ++#define PSR_AA32_I_BIT 0x00000080
717 ++#define PSR_AA32_A_BIT 0x00000100
718 ++#define PSR_AA32_E_BIT 0x00000200
719 ++#define PSR_AA32_SSBS_BIT 0x00800000
720 ++#define PSR_AA32_DIT_BIT 0x01000000
721 ++#define PSR_AA32_Q_BIT 0x08000000
722 ++#define PSR_AA32_V_BIT 0x10000000
723 ++#define PSR_AA32_C_BIT 0x20000000
724 ++#define PSR_AA32_Z_BIT 0x40000000
725 ++#define PSR_AA32_N_BIT 0x80000000
726 ++#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
727 ++#define PSR_AA32_GE_MASK 0x000f0000
728 ++
729 ++#ifdef CONFIG_CPU_BIG_ENDIAN
730 ++#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
731 ++#else
732 ++#define PSR_AA32_ENDSTATE 0
733 ++#endif
734 ++
735 ++/* AArch32 CPSR bits, as seen in AArch32 */
736 + #define COMPAT_PSR_MODE_MASK 0x0000001f
737 + #define COMPAT_PSR_MODE_USR 0x00000010
738 + #define COMPAT_PSR_MODE_FIQ 0x00000011
739 +@@ -50,6 +81,7 @@
740 + #define COMPAT_PSR_I_BIT 0x00000080
741 + #define COMPAT_PSR_A_BIT 0x00000100
742 + #define COMPAT_PSR_E_BIT 0x00000200
743 ++#define COMPAT_PSR_DIT_BIT 0x00200000
744 + #define COMPAT_PSR_J_BIT 0x01000000
745 + #define COMPAT_PSR_Q_BIT 0x08000000
746 + #define COMPAT_PSR_V_BIT 0x10000000
747 +@@ -111,6 +143,30 @@
748 + #define compat_sp_fiq regs[29]
749 + #define compat_lr_fiq regs[30]
750 +
751 ++static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
752 ++{
753 ++ unsigned long pstate;
754 ++
755 ++ pstate = psr & ~COMPAT_PSR_DIT_BIT;
756 ++
757 ++ if (psr & COMPAT_PSR_DIT_BIT)
758 ++ pstate |= PSR_AA32_DIT_BIT;
759 ++
760 ++ return pstate;
761 ++}
762 ++
763 ++static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
764 ++{
765 ++ unsigned long psr;
766 ++
767 ++ psr = pstate & ~PSR_AA32_DIT_BIT;
768 ++
769 ++ if (pstate & PSR_AA32_DIT_BIT)
770 ++ psr |= COMPAT_PSR_DIT_BIT;
771 ++
772 ++ return psr;
773 ++}
774 ++
775 + /*
776 + * This struct defines the way the registers are stored on the stack during an
777 + * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
778 +diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
779 +index ede80d47d0ef..50a89bcf9072 100644
780 +--- a/arch/arm64/include/asm/sysreg.h
781 ++++ b/arch/arm64/include/asm/sysreg.h
782 +@@ -20,6 +20,7 @@
783 + #ifndef __ASM_SYSREG_H
784 + #define __ASM_SYSREG_H
785 +
786 ++#include <asm/compiler.h>
787 + #include <linux/stringify.h>
788 +
789 + /*
790 +@@ -85,11 +86,14 @@
791 +
792 + #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
793 + #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
794 ++#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1)
795 +
796 + #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
797 + (!!x)<<8 | 0x1f)
798 + #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
799 + (!!x)<<8 | 0x1f)
800 ++#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
801 ++ (!!x)<<8 | 0x1f)
802 +
803 + #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
804 + #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
805 +@@ -296,28 +300,94 @@
806 + #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
807 +
808 + /* Common SCTLR_ELx flags. */
809 ++#define SCTLR_ELx_DSSBS (1UL << 44)
810 + #define SCTLR_ELx_EE (1 << 25)
811 ++#define SCTLR_ELx_WXN (1 << 19)
812 + #define SCTLR_ELx_I (1 << 12)
813 + #define SCTLR_ELx_SA (1 << 3)
814 + #define SCTLR_ELx_C (1 << 2)
815 + #define SCTLR_ELx_A (1 << 1)
816 + #define SCTLR_ELx_M 1
817 +
818 ++#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
819 ++ SCTLR_ELx_SA | SCTLR_ELx_I)
820 ++
821 ++/* SCTLR_EL2 specific flags. */
822 + #define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
823 + (1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
824 + (1 << 29))
825 ++#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
826 ++ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
827 ++ (1 << 17) | (1 << 20) | (1 << 21) | (1 << 24) | \
828 ++ (1 << 26) | (1 << 27) | (1 << 30) | (1 << 31) | \
829 ++ (0xffffefffUL << 32))
830 ++
831 ++#ifdef CONFIG_CPU_BIG_ENDIAN
832 ++#define ENDIAN_SET_EL2 SCTLR_ELx_EE
833 ++#define ENDIAN_CLEAR_EL2 0
834 ++#else
835 ++#define ENDIAN_SET_EL2 0
836 ++#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
837 ++#endif
838 +
839 +-#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
840 +- SCTLR_ELx_SA | SCTLR_ELx_I)
841 ++/* SCTLR_EL2 value used for the hyp-stub */
842 ++#define SCTLR_EL2_SET (ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
843 ++#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
844 ++ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
845 ++ SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
846 ++
847 ++#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
848 ++#error "Inconsistent SCTLR_EL2 set/clear bits"
849 ++#endif
850 +
851 + /* SCTLR_EL1 specific flags. */
852 + #define SCTLR_EL1_UCI (1 << 26)
853 ++#define SCTLR_EL1_E0E (1 << 24)
854 + #define SCTLR_EL1_SPAN (1 << 23)
855 ++#define SCTLR_EL1_NTWE (1 << 18)
856 ++#define SCTLR_EL1_NTWI (1 << 16)
857 + #define SCTLR_EL1_UCT (1 << 15)
858 ++#define SCTLR_EL1_DZE (1 << 14)
859 ++#define SCTLR_EL1_UMA (1 << 9)
860 + #define SCTLR_EL1_SED (1 << 8)
861 ++#define SCTLR_EL1_ITD (1 << 7)
862 + #define SCTLR_EL1_CP15BEN (1 << 5)
863 ++#define SCTLR_EL1_SA0 (1 << 4)
864 ++
865 ++#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
866 ++ (1 << 29))
867 ++#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
868 ++ (1 << 21) | (1 << 27) | (1 << 30) | (1 << 31) | \
869 ++ (0xffffefffUL << 32))
870 ++
871 ++#ifdef CONFIG_CPU_BIG_ENDIAN
872 ++#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
873 ++#define ENDIAN_CLEAR_EL1 0
874 ++#else
875 ++#define ENDIAN_SET_EL1 0
876 ++#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
877 ++#endif
878 ++
879 ++#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
880 ++ SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
881 ++ SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\
882 ++ SCTLR_EL1_NTWE | SCTLR_EL1_SPAN | ENDIAN_SET_EL1 |\
883 ++ SCTLR_EL1_UCI | SCTLR_EL1_RES1)
884 ++#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
885 ++ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
886 ++ SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
887 ++
888 ++#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
889 ++#error "Inconsistent SCTLR_EL1 set/clear bits"
890 ++#endif
891 +
892 + /* id_aa64isar0 */
893 ++#define ID_AA64ISAR0_TS_SHIFT 52
894 ++#define ID_AA64ISAR0_FHM_SHIFT 48
895 ++#define ID_AA64ISAR0_DP_SHIFT 44
896 ++#define ID_AA64ISAR0_SM4_SHIFT 40
897 ++#define ID_AA64ISAR0_SM3_SHIFT 36
898 ++#define ID_AA64ISAR0_SHA3_SHIFT 32
899 + #define ID_AA64ISAR0_RDM_SHIFT 28
900 + #define ID_AA64ISAR0_ATOMICS_SHIFT 20
901 + #define ID_AA64ISAR0_CRC32_SHIFT 16
902 +@@ -334,6 +404,7 @@
903 + /* id_aa64pfr0 */
904 + #define ID_AA64PFR0_CSV3_SHIFT 60
905 + #define ID_AA64PFR0_CSV2_SHIFT 56
906 ++#define ID_AA64PFR0_DIT_SHIFT 48
907 + #define ID_AA64PFR0_GIC_SHIFT 24
908 + #define ID_AA64PFR0_ASIMD_SHIFT 20
909 + #define ID_AA64PFR0_FP_SHIFT 16
910 +@@ -350,6 +421,13 @@
911 + #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
912 + #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
913 +
914 ++/* id_aa64pfr1 */
915 ++#define ID_AA64PFR1_SSBS_SHIFT 4
916 ++
917 ++#define ID_AA64PFR1_SSBS_PSTATE_NI 0
918 ++#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
919 ++#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
920 ++
921 + /* id_aa64mmfr0 */
922 + #define ID_AA64MMFR0_TGRAN4_SHIFT 28
923 + #define ID_AA64MMFR0_TGRAN64_SHIFT 24
924 +@@ -379,6 +457,7 @@
925 + #define ID_AA64MMFR1_VMIDBITS_16 2
926 +
927 + /* id_aa64mmfr2 */
928 ++#define ID_AA64MMFR2_AT_SHIFT 32
929 + #define ID_AA64MMFR2_LVA_SHIFT 16
930 + #define ID_AA64MMFR2_IESB_SHIFT 12
931 + #define ID_AA64MMFR2_LSM_SHIFT 8
932 +@@ -463,6 +542,7 @@
933 +
934 + #else
935 +
936 ++#include <linux/build_bug.h>
937 + #include <linux/types.h>
938 +
939 + asm(
940 +@@ -515,6 +595,17 @@ asm(
941 + asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
942 + } while (0)
943 +
944 ++/*
945 ++ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
946 ++ * set mask are set. Other bits are left as-is.
947 ++ */
948 ++#define sysreg_clear_set(sysreg, clear, set) do { \
949 ++ u64 __scs_val = read_sysreg(sysreg); \
950 ++ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
951 ++ if (__scs_new != __scs_val) \
952 ++ write_sysreg(__scs_new, sysreg); \
953 ++} while (0)
954 ++
955 + static inline void config_sctlr_el1(u32 clear, u32 set)
956 + {
957 + u32 val;
958 +diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
959 +index c5f89442785c..9d1e24e030b3 100644
960 +--- a/arch/arm64/include/asm/virt.h
961 ++++ b/arch/arm64/include/asm/virt.h
962 +@@ -102,12 +102,6 @@ static inline bool has_vhe(void)
963 + return false;
964 + }
965 +
966 +-#ifdef CONFIG_ARM64_VHE
967 +-extern void verify_cpu_run_el(void);
968 +-#else
969 +-static inline void verify_cpu_run_el(void) {}
970 +-#endif
971 +-
972 + #endif /* __ASSEMBLY__ */
973 +
974 + #endif /* ! __ASM__VIRT_H */
975 +diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
976 +index b3fdeee739ea..2bcd6e4f3474 100644
977 +--- a/arch/arm64/include/uapi/asm/hwcap.h
978 ++++ b/arch/arm64/include/uapi/asm/hwcap.h
979 +@@ -37,5 +37,17 @@
980 + #define HWCAP_FCMA (1 << 14)
981 + #define HWCAP_LRCPC (1 << 15)
982 + #define HWCAP_DCPOP (1 << 16)
983 ++#define HWCAP_SHA3 (1 << 17)
984 ++#define HWCAP_SM3 (1 << 18)
985 ++#define HWCAP_SM4 (1 << 19)
986 ++#define HWCAP_ASIMDDP (1 << 20)
987 ++#define HWCAP_SHA512 (1 << 21)
988 ++#define HWCAP_SVE (1 << 22)
989 ++#define HWCAP_ASIMDFHM (1 << 23)
990 ++#define HWCAP_DIT (1 << 24)
991 ++#define HWCAP_USCAT (1 << 25)
992 ++#define HWCAP_ILRCPC (1 << 26)
993 ++#define HWCAP_FLAGM (1 << 27)
994 ++#define HWCAP_SSBS (1 << 28)
995 +
996 + #endif /* _UAPI__ASM_HWCAP_H */
997 +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
998 +index 67d4c33974e8..eea58f8ec355 100644
999 +--- a/arch/arm64/include/uapi/asm/ptrace.h
1000 ++++ b/arch/arm64/include/uapi/asm/ptrace.h
1001 +@@ -45,6 +45,7 @@
1002 + #define PSR_I_BIT 0x00000080
1003 + #define PSR_A_BIT 0x00000100
1004 + #define PSR_D_BIT 0x00000200
1005 ++#define PSR_SSBS_BIT 0x00001000
1006 + #define PSR_PAN_BIT 0x00400000
1007 + #define PSR_UAO_BIT 0x00800000
1008 + #define PSR_Q_BIT 0x08000000
1009 +diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
1010 +index e5de33513b5d..4cae34e5a24e 100644
1011 +--- a/arch/arm64/kernel/bpi.S
1012 ++++ b/arch/arm64/kernel/bpi.S
1013 +@@ -55,29 +55,14 @@ ENTRY(__bp_harden_hyp_vecs_start)
1014 + .endr
1015 + ENTRY(__bp_harden_hyp_vecs_end)
1016 +
1017 +-ENTRY(__qcom_hyp_sanitize_link_stack_start)
1018 +- stp x29, x30, [sp, #-16]!
1019 +- .rept 16
1020 +- bl . + 4
1021 +- .endr
1022 +- ldp x29, x30, [sp], #16
1023 +-ENTRY(__qcom_hyp_sanitize_link_stack_end)
1024 +
1025 +-.macro smccc_workaround_1 inst
1026 ++ENTRY(__smccc_workaround_1_smc_start)
1027 + sub sp, sp, #(8 * 4)
1028 + stp x2, x3, [sp, #(8 * 0)]
1029 + stp x0, x1, [sp, #(8 * 2)]
1030 + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
1031 +- \inst #0
1032 ++ smc #0
1033 + ldp x2, x3, [sp, #(8 * 0)]
1034 + ldp x0, x1, [sp, #(8 * 2)]
1035 + add sp, sp, #(8 * 4)
1036 +-.endm
1037 +-
1038 +-ENTRY(__smccc_workaround_1_smc_start)
1039 +- smccc_workaround_1 smc
1040 + ENTRY(__smccc_workaround_1_smc_end)
1041 +-
1042 +-ENTRY(__smccc_workaround_1_hvc_start)
1043 +- smccc_workaround_1 hvc
1044 +-ENTRY(__smccc_workaround_1_hvc_end)
1045 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
1046 +index 3d6d7fae45de..7d15f4cb6393 100644
1047 +--- a/arch/arm64/kernel/cpu_errata.c
1048 ++++ b/arch/arm64/kernel/cpu_errata.c
1049 +@@ -19,17 +19,26 @@
1050 + #include <linux/arm-smccc.h>
1051 + #include <linux/psci.h>
1052 + #include <linux/types.h>
1053 ++#include <linux/cpu.h>
1054 + #include <asm/cpu.h>
1055 + #include <asm/cputype.h>
1056 + #include <asm/cpufeature.h>
1057 +
1058 + static bool __maybe_unused
1059 + is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
1060 ++{
1061 ++ u32 midr = read_cpuid_id();
1062 ++
1063 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1064 ++ return is_midr_in_range(midr, &entry->midr_range);
1065 ++}
1066 ++
1067 ++static bool __maybe_unused
1068 ++is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
1069 ++ int scope)
1070 + {
1071 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1072 +- return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
1073 +- entry->midr_range_min,
1074 +- entry->midr_range_max);
1075 ++ return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
1076 + }
1077 +
1078 + static bool __maybe_unused
1079 +@@ -43,7 +52,7 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
1080 + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
1081 + MIDR_ARCHITECTURE_MASK;
1082 +
1083 +- return model == entry->midr_model;
1084 ++ return model == entry->midr_range.model;
1085 + }
1086 +
1087 + static bool
1088 +@@ -61,26 +70,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
1089 + (arm64_ftr_reg_ctrel0.sys_val & mask);
1090 + }
1091 +
1092 +-static int cpu_enable_trap_ctr_access(void *__unused)
1093 ++static void
1094 ++cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
1095 + {
1096 + /* Clear SCTLR_EL1.UCT */
1097 + config_sctlr_el1(SCTLR_EL1_UCT, 0);
1098 +- return 0;
1099 + }
1100 +
1101 +-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1102 + #include <asm/mmu_context.h>
1103 + #include <asm/cacheflush.h>
1104 +
1105 + DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
1106 +
1107 + #ifdef CONFIG_KVM
1108 +-extern char __qcom_hyp_sanitize_link_stack_start[];
1109 +-extern char __qcom_hyp_sanitize_link_stack_end[];
1110 + extern char __smccc_workaround_1_smc_start[];
1111 + extern char __smccc_workaround_1_smc_end[];
1112 +-extern char __smccc_workaround_1_hvc_start[];
1113 +-extern char __smccc_workaround_1_hvc_end[];
1114 +
1115 + static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
1116 + const char *hyp_vecs_end)
1117 +@@ -94,9 +98,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
1118 + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
1119 + }
1120 +
1121 +-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1122 +- const char *hyp_vecs_start,
1123 +- const char *hyp_vecs_end)
1124 ++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
1125 ++ const char *hyp_vecs_start,
1126 ++ const char *hyp_vecs_end)
1127 + {
1128 + static int last_slot = -1;
1129 + static DEFINE_SPINLOCK(bp_lock);
1130 +@@ -123,14 +127,10 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1131 + spin_unlock(&bp_lock);
1132 + }
1133 + #else
1134 +-#define __qcom_hyp_sanitize_link_stack_start NULL
1135 +-#define __qcom_hyp_sanitize_link_stack_end NULL
1136 + #define __smccc_workaround_1_smc_start NULL
1137 + #define __smccc_workaround_1_smc_end NULL
1138 +-#define __smccc_workaround_1_hvc_start NULL
1139 +-#define __smccc_workaround_1_hvc_end NULL
1140 +
1141 +-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1142 ++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
1143 + const char *hyp_vecs_start,
1144 + const char *hyp_vecs_end)
1145 + {
1146 +@@ -138,23 +138,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1147 + }
1148 + #endif /* CONFIG_KVM */
1149 +
1150 +-static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
1151 +- bp_hardening_cb_t fn,
1152 +- const char *hyp_vecs_start,
1153 +- const char *hyp_vecs_end)
1154 +-{
1155 +- u64 pfr0;
1156 +-
1157 +- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
1158 +- return;
1159 +-
1160 +- pfr0 = read_cpuid(ID_AA64PFR0_EL1);
1161 +- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
1162 +- return;
1163 +-
1164 +- __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
1165 +-}
1166 +-
1167 + #include <uapi/linux/psci.h>
1168 + #include <linux/arm-smccc.h>
1169 + #include <linux/psci.h>
1170 +@@ -169,77 +152,95 @@ static void call_hvc_arch_workaround_1(void)
1171 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1172 + }
1173 +
1174 +-static int enable_smccc_arch_workaround_1(void *data)
1175 ++static void qcom_link_stack_sanitization(void)
1176 ++{
1177 ++ u64 tmp;
1178 ++
1179 ++ asm volatile("mov %0, x30 \n"
1180 ++ ".rept 16 \n"
1181 ++ "bl . + 4 \n"
1182 ++ ".endr \n"
1183 ++ "mov x30, %0 \n"
1184 ++ : "=&r" (tmp));
1185 ++}
1186 ++
1187 ++static bool __nospectre_v2;
1188 ++static int __init parse_nospectre_v2(char *str)
1189 ++{
1190 ++ __nospectre_v2 = true;
1191 ++ return 0;
1192 ++}
1193 ++early_param("nospectre_v2", parse_nospectre_v2);
1194 ++
1195 ++/*
1196 ++ * -1: No workaround
1197 ++ * 0: No workaround required
1198 ++ * 1: Workaround installed
1199 ++ */
1200 ++static int detect_harden_bp_fw(void)
1201 + {
1202 +- const struct arm64_cpu_capabilities *entry = data;
1203 + bp_hardening_cb_t cb;
1204 + void *smccc_start, *smccc_end;
1205 + struct arm_smccc_res res;
1206 +-
1207 +- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
1208 +- return 0;
1209 ++ u32 midr = read_cpuid_id();
1210 +
1211 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1212 +- return 0;
1213 ++ return -1;
1214 +
1215 + switch (psci_ops.conduit) {
1216 + case PSCI_CONDUIT_HVC:
1217 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1218 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1219 +- if ((int)res.a0 < 0)
1220 ++ switch ((int)res.a0) {
1221 ++ case 1:
1222 ++ /* Firmware says we're just fine */
1223 + return 0;
1224 +- cb = call_hvc_arch_workaround_1;
1225 +- smccc_start = __smccc_workaround_1_hvc_start;
1226 +- smccc_end = __smccc_workaround_1_hvc_end;
1227 ++ case 0:
1228 ++ cb = call_hvc_arch_workaround_1;
1229 ++ /* This is a guest, no need to patch KVM vectors */
1230 ++ smccc_start = NULL;
1231 ++ smccc_end = NULL;
1232 ++ break;
1233 ++ default:
1234 ++ return -1;
1235 ++ }
1236 + break;
1237 +
1238 + case PSCI_CONDUIT_SMC:
1239 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1240 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1241 +- if ((int)res.a0 < 0)
1242 ++ switch ((int)res.a0) {
1243 ++ case 1:
1244 ++ /* Firmware says we're just fine */
1245 + return 0;
1246 +- cb = call_smc_arch_workaround_1;
1247 +- smccc_start = __smccc_workaround_1_smc_start;
1248 +- smccc_end = __smccc_workaround_1_smc_end;
1249 ++ case 0:
1250 ++ cb = call_smc_arch_workaround_1;
1251 ++ smccc_start = __smccc_workaround_1_smc_start;
1252 ++ smccc_end = __smccc_workaround_1_smc_end;
1253 ++ break;
1254 ++ default:
1255 ++ return -1;
1256 ++ }
1257 + break;
1258 +
1259 + default:
1260 +- return 0;
1261 ++ return -1;
1262 + }
1263 +
1264 +- install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
1265 ++ if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
1266 ++ ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
1267 ++ cb = qcom_link_stack_sanitization;
1268 +
1269 +- return 0;
1270 +-}
1271 ++ if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
1272 ++ install_bp_hardening_cb(cb, smccc_start, smccc_end);
1273 +
1274 +-static void qcom_link_stack_sanitization(void)
1275 +-{
1276 +- u64 tmp;
1277 +-
1278 +- asm volatile("mov %0, x30 \n"
1279 +- ".rept 16 \n"
1280 +- "bl . + 4 \n"
1281 +- ".endr \n"
1282 +- "mov x30, %0 \n"
1283 +- : "=&r" (tmp));
1284 +-}
1285 +-
1286 +-static int qcom_enable_link_stack_sanitization(void *data)
1287 +-{
1288 +- const struct arm64_cpu_capabilities *entry = data;
1289 +-
1290 +- install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
1291 +- __qcom_hyp_sanitize_link_stack_start,
1292 +- __qcom_hyp_sanitize_link_stack_end);
1293 +-
1294 +- return 0;
1295 ++ return 1;
1296 + }
1297 +-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
1298 +
1299 +-#ifdef CONFIG_ARM64_SSBD
1300 + DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
1301 +
1302 + int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
1303 ++static bool __ssb_safe = true;
1304 +
1305 + static const struct ssbd_options {
1306 + const char *str;
1307 +@@ -309,6 +310,19 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
1308 +
1309 + void arm64_set_ssbd_mitigation(bool state)
1310 + {
1311 ++ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
1312 ++ pr_info_once("SSBD disabled by kernel configuration\n");
1313 ++ return;
1314 ++ }
1315 ++
1316 ++ if (this_cpu_has_cap(ARM64_SSBS)) {
1317 ++ if (state)
1318 ++ asm volatile(SET_PSTATE_SSBS(0));
1319 ++ else
1320 ++ asm volatile(SET_PSTATE_SSBS(1));
1321 ++ return;
1322 ++ }
1323 ++
1324 + switch (psci_ops.conduit) {
1325 + case PSCI_CONDUIT_HVC:
1326 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
1327 +@@ -330,11 +344,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
1328 + struct arm_smccc_res res;
1329 + bool required = true;
1330 + s32 val;
1331 ++ bool this_cpu_safe = false;
1332 +
1333 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1334 +
1335 ++ if (cpu_mitigations_off())
1336 ++ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
1337 ++
1338 ++ /* delay setting __ssb_safe until we get a firmware response */
1339 ++ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
1340 ++ this_cpu_safe = true;
1341 ++
1342 ++ if (this_cpu_has_cap(ARM64_SSBS)) {
1343 ++ if (!this_cpu_safe)
1344 ++ __ssb_safe = false;
1345 ++ required = false;
1346 ++ goto out_printmsg;
1347 ++ }
1348 ++
1349 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
1350 + ssbd_state = ARM64_SSBD_UNKNOWN;
1351 ++ if (!this_cpu_safe)
1352 ++ __ssb_safe = false;
1353 + return false;
1354 + }
1355 +
1356 +@@ -351,6 +382,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
1357 +
1358 + default:
1359 + ssbd_state = ARM64_SSBD_UNKNOWN;
1360 ++ if (!this_cpu_safe)
1361 ++ __ssb_safe = false;
1362 + return false;
1363 + }
1364 +
1365 +@@ -359,14 +392,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
1366 + switch (val) {
1367 + case SMCCC_RET_NOT_SUPPORTED:
1368 + ssbd_state = ARM64_SSBD_UNKNOWN;
1369 ++ if (!this_cpu_safe)
1370 ++ __ssb_safe = false;
1371 + return false;
1372 +
1373 ++ /* machines with mixed mitigation requirements must not return this */
1374 + case SMCCC_RET_NOT_REQUIRED:
1375 + pr_info_once("%s mitigation not required\n", entry->desc);
1376 + ssbd_state = ARM64_SSBD_MITIGATED;
1377 + return false;
1378 +
1379 + case SMCCC_RET_SUCCESS:
1380 ++ __ssb_safe = false;
1381 + required = true;
1382 + break;
1383 +
1384 +@@ -376,12 +413,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
1385 +
1386 + default:
1387 + WARN_ON(1);
1388 ++ if (!this_cpu_safe)
1389 ++ __ssb_safe = false;
1390 + return false;
1391 + }
1392 +
1393 + switch (ssbd_state) {
1394 + case ARM64_SSBD_FORCE_DISABLE:
1395 +- pr_info_once("%s disabled from command-line\n", entry->desc);
1396 + arm64_set_ssbd_mitigation(false);
1397 + required = false;
1398 + break;
1399 +@@ -394,7 +432,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
1400 + break;
1401 +
1402 + case ARM64_SSBD_FORCE_ENABLE:
1403 +- pr_info_once("%s forced from command-line\n", entry->desc);
1404 + arm64_set_ssbd_mitigation(true);
1405 + required = true;
1406 + break;
1407 +@@ -404,23 +441,126 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
1408 + break;
1409 + }
1410 +
1411 ++out_printmsg:
1412 ++ switch (ssbd_state) {
1413 ++ case ARM64_SSBD_FORCE_DISABLE:
1414 ++ pr_info_once("%s disabled from command-line\n", entry->desc);
1415 ++ break;
1416 ++
1417 ++ case ARM64_SSBD_FORCE_ENABLE:
1418 ++ pr_info_once("%s forced from command-line\n", entry->desc);
1419 ++ break;
1420 ++ }
1421 ++
1422 + return required;
1423 + }
1424 +-#endif /* CONFIG_ARM64_SSBD */
1425 +-
1426 +-#define MIDR_RANGE(model, min, max) \
1427 +- .def_scope = SCOPE_LOCAL_CPU, \
1428 +- .matches = is_affected_midr_range, \
1429 +- .midr_model = model, \
1430 +- .midr_range_min = min, \
1431 +- .midr_range_max = max
1432 +-
1433 +-#define MIDR_ALL_VERSIONS(model) \
1434 +- .def_scope = SCOPE_LOCAL_CPU, \
1435 +- .matches = is_affected_midr_range, \
1436 +- .midr_model = model, \
1437 +- .midr_range_min = 0, \
1438 +- .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
1439 ++
1440 ++/* known invulnerable cores */
1441 ++static const struct midr_range arm64_ssb_cpus[] = {
1442 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1443 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1444 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1445 ++ {},
1446 ++};
1447 ++
1448 ++#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
1449 ++ .matches = is_affected_midr_range, \
1450 ++ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
1451 ++
1452 ++#define CAP_MIDR_ALL_VERSIONS(model) \
1453 ++ .matches = is_affected_midr_range, \
1454 ++ .midr_range = MIDR_ALL_VERSIONS(model)
1455 ++
1456 ++#define MIDR_FIXED(rev, revidr_mask) \
1457 ++ .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
1458 ++
1459 ++#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
1460 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
1461 ++ CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
1462 ++
1463 ++#define CAP_MIDR_RANGE_LIST(list) \
1464 ++ .matches = is_affected_midr_range_list, \
1465 ++ .midr_range_list = list
1466 ++
1467 ++/* Errata affecting a range of revisions of given model variant */
1468 ++#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
1469 ++ ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
1470 ++
1471 ++/* Errata affecting a single variant/revision of a model */
1472 ++#define ERRATA_MIDR_REV(model, var, rev) \
1473 ++ ERRATA_MIDR_RANGE(model, var, rev, var, rev)
1474 ++
1475 ++/* Errata affecting all variants/revisions of a given a model */
1476 ++#define ERRATA_MIDR_ALL_VERSIONS(model) \
1477 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
1478 ++ CAP_MIDR_ALL_VERSIONS(model)
1479 ++
1480 ++/* Errata affecting a list of midr ranges, with same work around */
1481 ++#define ERRATA_MIDR_RANGE_LIST(midr_list) \
1482 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
1483 ++ CAP_MIDR_RANGE_LIST(midr_list)
1484 ++
1485 ++/* Track overall mitigation state. We are only mitigated if all cores are ok */
1486 ++static bool __hardenbp_enab = true;
1487 ++static bool __spectrev2_safe = true;
1488 ++
1489 ++/*
1490 ++ * List of CPUs that do not need any Spectre-v2 mitigation at all.
1491 ++ */
1492 ++static const struct midr_range spectre_v2_safe_list[] = {
1493 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1494 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1495 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1496 ++ { /* sentinel */ }
1497 ++};
1498 ++
1499 ++/*
1500 ++ * Track overall bp hardening for all heterogeneous cores in the machine.
1501 ++ * We are only considered "safe" if all booted cores are known safe.
1502 ++ */
1503 ++static bool __maybe_unused
1504 ++check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
1505 ++{
1506 ++ int need_wa;
1507 ++
1508 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1509 ++
1510 ++ /* If the CPU has CSV2 set, we're safe */
1511 ++ if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
1512 ++ ID_AA64PFR0_CSV2_SHIFT))
1513 ++ return false;
1514 ++
1515 ++ /* Alternatively, we have a list of unaffected CPUs */
1516 ++ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
1517 ++ return false;
1518 ++
1519 ++ /* Fallback to firmware detection */
1520 ++ need_wa = detect_harden_bp_fw();
1521 ++ if (!need_wa)
1522 ++ return false;
1523 ++
1524 ++ __spectrev2_safe = false;
1525 ++
1526 ++ if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
1527 ++ pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
1528 ++ __hardenbp_enab = false;
1529 ++ return false;
1530 ++ }
1531 ++
1532 ++ /* forced off */
1533 ++ if (__nospectre_v2 || cpu_mitigations_off()) {
1534 ++ pr_info_once("spectrev2 mitigation disabled by command line option\n");
1535 ++ __hardenbp_enab = false;
1536 ++ return false;
1537 ++ }
1538 ++
1539 ++ if (need_wa < 0) {
1540 ++ pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
1541 ++ __hardenbp_enab = false;
1542 ++ }
1543 ++
1544 ++ return (need_wa > 0);
1545 ++}
1546 +
1547 + const struct arm64_cpu_capabilities arm64_errata[] = {
1548 + #if defined(CONFIG_ARM64_ERRATUM_826319) || \
1549 +@@ -430,8 +570,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1550 + /* Cortex-A53 r0p[012] */
1551 + .desc = "ARM errata 826319, 827319, 824069",
1552 + .capability = ARM64_WORKAROUND_CLEAN_CACHE,
1553 +- MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
1554 +- .enable = cpu_enable_cache_maint_trap,
1555 ++ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
1556 ++ .cpu_enable = cpu_enable_cache_maint_trap,
1557 + },
1558 + #endif
1559 + #ifdef CONFIG_ARM64_ERRATUM_819472
1560 +@@ -439,8 +579,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1561 + /* Cortex-A53 r0p[01] */
1562 + .desc = "ARM errata 819472",
1563 + .capability = ARM64_WORKAROUND_CLEAN_CACHE,
1564 +- MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
1565 +- .enable = cpu_enable_cache_maint_trap,
1566 ++ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
1567 ++ .cpu_enable = cpu_enable_cache_maint_trap,
1568 + },
1569 + #endif
1570 + #ifdef CONFIG_ARM64_ERRATUM_832075
1571 +@@ -448,9 +588,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1572 + /* Cortex-A57 r0p0 - r1p2 */
1573 + .desc = "ARM erratum 832075",
1574 + .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
1575 +- MIDR_RANGE(MIDR_CORTEX_A57,
1576 +- MIDR_CPU_VAR_REV(0, 0),
1577 +- MIDR_CPU_VAR_REV(1, 2)),
1578 ++ ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
1579 ++ 0, 0,
1580 ++ 1, 2),
1581 + },
1582 + #endif
1583 + #ifdef CONFIG_ARM64_ERRATUM_834220
1584 +@@ -458,9 +598,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1585 + /* Cortex-A57 r0p0 - r1p2 */
1586 + .desc = "ARM erratum 834220",
1587 + .capability = ARM64_WORKAROUND_834220,
1588 +- MIDR_RANGE(MIDR_CORTEX_A57,
1589 +- MIDR_CPU_VAR_REV(0, 0),
1590 +- MIDR_CPU_VAR_REV(1, 2)),
1591 ++ ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
1592 ++ 0, 0,
1593 ++ 1, 2),
1594 + },
1595 + #endif
1596 + #ifdef CONFIG_ARM64_ERRATUM_845719
1597 +@@ -468,7 +608,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1598 + /* Cortex-A53 r0p[01234] */
1599 + .desc = "ARM erratum 845719",
1600 + .capability = ARM64_WORKAROUND_845719,
1601 +- MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
1602 ++ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
1603 + },
1604 + #endif
1605 + #ifdef CONFIG_CAVIUM_ERRATUM_23154
1606 +@@ -476,7 +616,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1607 + /* Cavium ThunderX, pass 1.x */
1608 + .desc = "Cavium erratum 23154",
1609 + .capability = ARM64_WORKAROUND_CAVIUM_23154,
1610 +- MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
1611 ++ ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
1612 + },
1613 + #endif
1614 + #ifdef CONFIG_CAVIUM_ERRATUM_27456
1615 +@@ -484,15 +624,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1616 + /* Cavium ThunderX, T88 pass 1.x - 2.1 */
1617 + .desc = "Cavium erratum 27456",
1618 + .capability = ARM64_WORKAROUND_CAVIUM_27456,
1619 +- MIDR_RANGE(MIDR_THUNDERX,
1620 +- MIDR_CPU_VAR_REV(0, 0),
1621 +- MIDR_CPU_VAR_REV(1, 1)),
1622 ++ ERRATA_MIDR_RANGE(MIDR_THUNDERX,
1623 ++ 0, 0,
1624 ++ 1, 1),
1625 + },
1626 + {
1627 + /* Cavium ThunderX, T81 pass 1.0 */
1628 + .desc = "Cavium erratum 27456",
1629 + .capability = ARM64_WORKAROUND_CAVIUM_27456,
1630 +- MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
1631 ++ ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
1632 + },
1633 + #endif
1634 + #ifdef CONFIG_CAVIUM_ERRATUM_30115
1635 +@@ -500,49 +640,48 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1636 + /* Cavium ThunderX, T88 pass 1.x - 2.2 */
1637 + .desc = "Cavium erratum 30115",
1638 + .capability = ARM64_WORKAROUND_CAVIUM_30115,
1639 +- MIDR_RANGE(MIDR_THUNDERX, 0x00,
1640 +- (1 << MIDR_VARIANT_SHIFT) | 2),
1641 ++ ERRATA_MIDR_RANGE(MIDR_THUNDERX,
1642 ++ 0, 0,
1643 ++ 1, 2),
1644 + },
1645 + {
1646 + /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
1647 + .desc = "Cavium erratum 30115",
1648 + .capability = ARM64_WORKAROUND_CAVIUM_30115,
1649 +- MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
1650 ++ ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
1651 + },
1652 + {
1653 + /* Cavium ThunderX, T83 pass 1.0 */
1654 + .desc = "Cavium erratum 30115",
1655 + .capability = ARM64_WORKAROUND_CAVIUM_30115,
1656 +- MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
1657 ++ ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
1658 + },
1659 + #endif
1660 + {
1661 + .desc = "Mismatched cache line size",
1662 + .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
1663 + .matches = has_mismatched_cache_type,
1664 +- .def_scope = SCOPE_LOCAL_CPU,
1665 +- .enable = cpu_enable_trap_ctr_access,
1666 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1667 ++ .cpu_enable = cpu_enable_trap_ctr_access,
1668 + },
1669 + {
1670 + .desc = "Mismatched cache type",
1671 + .capability = ARM64_MISMATCHED_CACHE_TYPE,
1672 + .matches = has_mismatched_cache_type,
1673 +- .def_scope = SCOPE_LOCAL_CPU,
1674 +- .enable = cpu_enable_trap_ctr_access,
1675 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1676 ++ .cpu_enable = cpu_enable_trap_ctr_access,
1677 + },
1678 + #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1679 + {
1680 + .desc = "Qualcomm Technologies Falkor erratum 1003",
1681 + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
1682 +- MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
1683 +- MIDR_CPU_VAR_REV(0, 0),
1684 +- MIDR_CPU_VAR_REV(0, 0)),
1685 ++ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
1686 + },
1687 + {
1688 + .desc = "Qualcomm Technologies Kryo erratum 1003",
1689 + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
1690 +- .def_scope = SCOPE_LOCAL_CPU,
1691 +- .midr_model = MIDR_QCOM_KRYO,
1692 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1693 ++ .midr_range.model = MIDR_QCOM_KRYO,
1694 + .matches = is_kryo_midr,
1695 + },
1696 + #endif
1697 +@@ -550,9 +689,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1698 + {
1699 + .desc = "Qualcomm Technologies Falkor erratum 1009",
1700 + .capability = ARM64_WORKAROUND_REPEAT_TLBI,
1701 +- MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
1702 +- MIDR_CPU_VAR_REV(0, 0),
1703 +- MIDR_CPU_VAR_REV(0, 0)),
1704 ++ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
1705 + },
1706 + #endif
1707 + #ifdef CONFIG_ARM64_ERRATUM_858921
1708 +@@ -560,100 +697,56 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1709 + /* Cortex-A73 all versions */
1710 + .desc = "ARM erratum 858921",
1711 + .capability = ARM64_WORKAROUND_858921,
1712 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1713 ++ ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1714 + },
1715 + #endif
1716 +-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1717 +- {
1718 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1719 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1720 +- .enable = enable_smccc_arch_workaround_1,
1721 +- },
1722 +- {
1723 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1724 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1725 +- .enable = enable_smccc_arch_workaround_1,
1726 +- },
1727 +- {
1728 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1729 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1730 +- .enable = enable_smccc_arch_workaround_1,
1731 +- },
1732 +- {
1733 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1734 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
1735 +- .enable = enable_smccc_arch_workaround_1,
1736 +- },
1737 +- {
1738 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1739 +- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1740 +- .enable = qcom_enable_link_stack_sanitization,
1741 +- },
1742 +- {
1743 +- .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
1744 +- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1745 +- },
1746 +- {
1747 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1748 +- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
1749 +- .enable = qcom_enable_link_stack_sanitization,
1750 +- },
1751 +- {
1752 +- .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
1753 +- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
1754 +- },
1755 +- {
1756 +- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1757 +- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1758 +- .enable = enable_smccc_arch_workaround_1,
1759 +- },
1760 + {
1761 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1762 +- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1763 +- .enable = enable_smccc_arch_workaround_1,
1764 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1765 ++ .matches = check_branch_predictor,
1766 + },
1767 +-#endif
1768 +-#ifdef CONFIG_ARM64_SSBD
1769 + {
1770 + .desc = "Speculative Store Bypass Disable",
1771 +- .def_scope = SCOPE_LOCAL_CPU,
1772 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1773 + .capability = ARM64_SSBD,
1774 + .matches = has_ssbd_mitigation,
1775 ++ .midr_range_list = arm64_ssb_cpus,
1776 + },
1777 +-#endif
1778 + {
1779 + }
1780 + };
1781 +
1782 +-/*
1783 +- * The CPU Errata work arounds are detected and applied at boot time
1784 +- * and the related information is freed soon after. If the new CPU requires
1785 +- * an errata not detected at boot, fail this CPU.
1786 +- */
1787 +-void verify_local_cpu_errata_workarounds(void)
1788 ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
1789 ++ char *buf)
1790 + {
1791 +- const struct arm64_cpu_capabilities *caps = arm64_errata;
1792 +-
1793 +- for (; caps->matches; caps++) {
1794 +- if (cpus_have_cap(caps->capability)) {
1795 +- if (caps->enable)
1796 +- caps->enable((void *)caps);
1797 +- } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
1798 +- pr_crit("CPU%d: Requires work around for %s, not detected"
1799 +- " at boot time\n",
1800 +- smp_processor_id(),
1801 +- caps->desc ? : "an erratum");
1802 +- cpu_die_early();
1803 +- }
1804 +- }
1805 ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1806 + }
1807 +
1808 +-void update_cpu_errata_workarounds(void)
1809 ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
1810 ++ char *buf)
1811 + {
1812 +- update_cpu_capabilities(arm64_errata, "enabling workaround for");
1813 ++ if (__spectrev2_safe)
1814 ++ return sprintf(buf, "Not affected\n");
1815 ++
1816 ++ if (__hardenbp_enab)
1817 ++ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
1818 ++
1819 ++ return sprintf(buf, "Vulnerable\n");
1820 + }
1821 +
1822 +-void __init enable_errata_workarounds(void)
1823 ++ssize_t cpu_show_spec_store_bypass(struct device *dev,
1824 ++ struct device_attribute *attr, char *buf)
1825 + {
1826 +- enable_cpu_capabilities(arm64_errata);
1827 ++ if (__ssb_safe)
1828 ++ return sprintf(buf, "Not affected\n");
1829 ++
1830 ++ switch (ssbd_state) {
1831 ++ case ARM64_SSBD_KERNEL:
1832 ++ case ARM64_SSBD_FORCE_ENABLE:
1833 ++ if (IS_ENABLED(CONFIG_ARM64_SSBD))
1834 ++ return sprintf(buf,
1835 ++ "Mitigation: Speculative Store Bypass disabled via prctl\n");
1836 ++ }
1837 ++
1838 ++ return sprintf(buf, "Vulnerable\n");
1839 + }
1840 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
1841 +index 57ec681a8f11..15ce2c8b9ee2 100644
1842 +--- a/arch/arm64/kernel/cpufeature.c
1843 ++++ b/arch/arm64/kernel/cpufeature.c
1844 +@@ -24,6 +24,7 @@
1845 + #include <linux/stop_machine.h>
1846 + #include <linux/types.h>
1847 + #include <linux/mm.h>
1848 ++#include <linux/cpu.h>
1849 + #include <asm/cpu.h>
1850 + #include <asm/cpufeature.h>
1851 + #include <asm/cpu_ops.h>
1852 +@@ -107,7 +108,13 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1853 + * sync with the documentation of the CPU feature register ABI.
1854 + */
1855 + static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
1856 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
1857 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
1858 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
1859 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
1860 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
1861 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
1862 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
1863 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
1864 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
1865 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
1866 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
1867 +@@ -117,36 +124,42 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
1868 + };
1869 +
1870 + static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
1871 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
1872 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
1873 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
1874 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
1875 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
1876 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
1877 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
1878 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
1879 + ARM64_FTR_END,
1880 + };
1881 +
1882 + static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
1883 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
1884 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
1885 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
1886 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
1887 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
1888 + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
1889 + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
1890 + /* Linux doesn't care about the EL3 */
1891 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
1892 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
1893 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
1894 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
1895 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
1896 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
1897 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
1898 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
1899 ++ ARM64_FTR_END,
1900 ++};
1901 ++
1902 ++static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
1903 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
1904 + ARM64_FTR_END,
1905 + };
1906 +
1907 + static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
1908 +- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
1909 +- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
1910 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
1911 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
1912 ++ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
1913 ++ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
1914 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
1915 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
1916 + /* Linux shouldn't care about secure memory */
1917 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
1918 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
1919 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
1920 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
1921 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
1922 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
1923 + /*
1924 + * Differing PARange is fine as long as all peripherals and memory are mapped
1925 + * within the minimum PARange of all CPUs
1926 +@@ -157,20 +170,21 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
1927 +
1928 + static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
1929 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
1930 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
1931 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
1932 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
1933 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
1934 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
1935 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
1936 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
1937 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
1938 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
1939 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
1940 + ARM64_FTR_END,
1941 + };
1942 +
1943 + static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
1944 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
1945 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
1946 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
1947 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
1948 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
1949 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
1950 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
1951 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
1952 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
1953 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
1954 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
1955 + ARM64_FTR_END,
1956 + };
1957 +
1958 +@@ -197,14 +211,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
1959 + };
1960 +
1961 + static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
1962 +- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
1963 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
1964 ++ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
1965 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
1966 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
1967 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
1968 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
1969 +- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
1970 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
1971 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
1972 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
1973 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
1974 ++ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
1975 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
1976 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
1977 + ARM64_FTR_END,
1978 + };
1979 +
1980 +@@ -225,8 +239,8 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
1981 + };
1982 +
1983 + static const struct arm64_ftr_bits ftr_mvfr2[] = {
1984 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
1985 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
1986 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
1987 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
1988 + ARM64_FTR_END,
1989 + };
1990 +
1991 +@@ -238,25 +252,25 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
1992 +
1993 +
1994 + static const struct arm64_ftr_bits ftr_id_isar5[] = {
1995 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
1996 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
1997 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
1998 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
1999 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
2000 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
2001 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
2002 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
2003 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
2004 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
2005 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
2006 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
2007 + ARM64_FTR_END,
2008 + };
2009 +
2010 + static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
2011 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
2012 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
2013 + ARM64_FTR_END,
2014 + };
2015 +
2016 + static const struct arm64_ftr_bits ftr_id_pfr0[] = {
2017 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
2018 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
2019 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
2020 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
2021 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
2022 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
2023 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
2024 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
2025 + ARM64_FTR_END,
2026 + };
2027 +
2028 +@@ -337,7 +351,7 @@ static const struct __ftr_reg_entry {
2029 +
2030 + /* Op1 = 0, CRn = 0, CRm = 4 */
2031 + ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
2032 +- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
2033 ++ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
2034 +
2035 + /* Op1 = 0, CRn = 0, CRm = 5 */
2036 + ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
2037 +@@ -476,6 +490,9 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
2038 + reg->user_mask = user_mask;
2039 + }
2040 +
2041 ++extern const struct arm64_cpu_capabilities arm64_errata[];
2042 ++static void __init setup_boot_cpu_capabilities(void);
2043 ++
2044 + void __init init_cpu_features(struct cpuinfo_arm64 *info)
2045 + {
2046 + /* Before we start using the tables, make sure it is sorted */
2047 +@@ -513,6 +530,11 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
2048 + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
2049 + }
2050 +
2051 ++ /*
2052 ++ * Detect and enable early CPU capabilities based on the boot CPU,
2053 ++ * after we have initialised the CPU feature infrastructure.
2054 ++ */
2055 ++ setup_boot_cpu_capabilities();
2056 + }
2057 +
2058 + static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
2059 +@@ -609,7 +631,6 @@ void update_cpu_features(int cpu,
2060 +
2061 + /*
2062 + * EL3 is not our concern.
2063 +- * ID_AA64PFR1 is currently RES0.
2064 + */
2065 + taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
2066 + info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
2067 +@@ -804,14 +825,34 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
2068 + ID_AA64PFR0_FP_SHIFT) < 0;
2069 + }
2070 +
2071 +-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2072 ++static bool __meltdown_safe = true;
2073 + static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
2074 +
2075 + static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
2076 +- int __unused)
2077 ++ int scope)
2078 + {
2079 +- char const *str = "command line option";
2080 +- u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2081 ++ /* List of CPUs that are not vulnerable and don't need KPTI */
2082 ++ static const struct midr_range kpti_safe_list[] = {
2083 ++ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
2084 ++ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
2085 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
2086 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
2087 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
2088 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
2089 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
2090 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
2091 ++ };
2092 ++ char const *str = "kpti command line option";
2093 ++ bool meltdown_safe;
2094 ++
2095 ++ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
2096 ++
2097 ++ /* Defer to CPU feature registers */
2098 ++ if (has_cpuid_feature(entry, scope))
2099 ++ meltdown_safe = true;
2100 ++
2101 ++ if (!meltdown_safe)
2102 ++ __meltdown_safe = false;
2103 +
2104 + /*
2105 + * For reasons that aren't entirely clear, enabling KPTI on Cavium
2106 +@@ -823,6 +864,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
2107 + __kpti_forced = -1;
2108 + }
2109 +
2110 ++ /* Useful for KASLR robustness */
2111 ++ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
2112 ++ if (!__kpti_forced) {
2113 ++ str = "KASLR";
2114 ++ __kpti_forced = 1;
2115 ++ }
2116 ++ }
2117 ++
2118 ++ if (cpu_mitigations_off() && !__kpti_forced) {
2119 ++ str = "mitigations=off";
2120 ++ __kpti_forced = -1;
2121 ++ }
2122 ++
2123 ++ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
2124 ++ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
2125 ++ return false;
2126 ++ }
2127 ++
2128 + /* Forced? */
2129 + if (__kpti_forced) {
2130 + pr_info_once("kernel page table isolation forced %s by %s\n",
2131 +@@ -830,28 +889,12 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
2132 + return __kpti_forced > 0;
2133 + }
2134 +
2135 +- /* Useful for KASLR robustness */
2136 +- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
2137 +- return true;
2138 +-
2139 +- /* Don't force KPTI for CPUs that are not vulnerable */
2140 +- switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
2141 +- case MIDR_CAVIUM_THUNDERX2:
2142 +- case MIDR_BRCM_VULCAN:
2143 +- case MIDR_CORTEX_A53:
2144 +- case MIDR_CORTEX_A55:
2145 +- case MIDR_CORTEX_A57:
2146 +- case MIDR_CORTEX_A72:
2147 +- case MIDR_CORTEX_A73:
2148 +- return false;
2149 +- }
2150 +-
2151 +- /* Defer to CPU feature registers */
2152 +- return !cpuid_feature_extract_unsigned_field(pfr0,
2153 +- ID_AA64PFR0_CSV3_SHIFT);
2154 ++ return !meltdown_safe;
2155 + }
2156 +
2157 +-static int kpti_install_ng_mappings(void *__unused)
2158 ++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2159 ++static void
2160 ++kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
2161 + {
2162 + typedef void (kpti_remap_fn)(int, int, phys_addr_t);
2163 + extern kpti_remap_fn idmap_kpti_install_ng_mappings;
2164 +@@ -861,7 +904,7 @@ static int kpti_install_ng_mappings(void *__unused)
2165 + int cpu = smp_processor_id();
2166 +
2167 + if (kpti_applied)
2168 +- return 0;
2169 ++ return;
2170 +
2171 + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
2172 +
2173 +@@ -872,8 +915,14 @@ static int kpti_install_ng_mappings(void *__unused)
2174 + if (!cpu)
2175 + kpti_applied = true;
2176 +
2177 +- return 0;
2178 ++ return;
2179 + }
2180 ++#else
2181 ++static void
2182 ++kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
2183 ++{
2184 ++}
2185 ++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
2186 +
2187 + static int __init parse_kpti(char *str)
2188 + {
2189 +@@ -887,9 +936,8 @@ static int __init parse_kpti(char *str)
2190 + return 0;
2191 + }
2192 + early_param("kpti", parse_kpti);
2193 +-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
2194 +
2195 +-static int cpu_copy_el2regs(void *__unused)
2196 ++static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
2197 + {
2198 + /*
2199 + * Copy register values that aren't redirected by hardware.
2200 +@@ -901,15 +949,55 @@ static int cpu_copy_el2regs(void *__unused)
2201 + */
2202 + if (!alternatives_applied)
2203 + write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
2204 ++}
2205 ++
2206 ++#ifdef CONFIG_ARM64_SSBD
2207 ++static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
2208 ++{
2209 ++ if (user_mode(regs))
2210 ++ return 1;
2211 ++
2212 ++ if (instr & BIT(CRm_shift))
2213 ++ regs->pstate |= PSR_SSBS_BIT;
2214 ++ else
2215 ++ regs->pstate &= ~PSR_SSBS_BIT;
2216 +
2217 ++ arm64_skip_faulting_instruction(regs, 4);
2218 + return 0;
2219 + }
2220 +
2221 ++static struct undef_hook ssbs_emulation_hook = {
2222 ++ .instr_mask = ~(1U << CRm_shift),
2223 ++ .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM,
2224 ++ .fn = ssbs_emulation_handler,
2225 ++};
2226 ++
2227 ++static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
2228 ++{
2229 ++ static bool undef_hook_registered = false;
2230 ++ static DEFINE_SPINLOCK(hook_lock);
2231 ++
2232 ++ spin_lock(&hook_lock);
2233 ++ if (!undef_hook_registered) {
2234 ++ register_undef_hook(&ssbs_emulation_hook);
2235 ++ undef_hook_registered = true;
2236 ++ }
2237 ++ spin_unlock(&hook_lock);
2238 ++
2239 ++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
2240 ++ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
2241 ++ arm64_set_ssbd_mitigation(false);
2242 ++ } else {
2243 ++ arm64_set_ssbd_mitigation(true);
2244 ++ }
2245 ++}
2246 ++#endif /* CONFIG_ARM64_SSBD */
2247 ++
2248 + static const struct arm64_cpu_capabilities arm64_features[] = {
2249 + {
2250 + .desc = "GIC system register CPU interface",
2251 + .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
2252 +- .def_scope = SCOPE_SYSTEM,
2253 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2254 + .matches = has_useable_gicv3_cpuif,
2255 + .sys_reg = SYS_ID_AA64PFR0_EL1,
2256 + .field_pos = ID_AA64PFR0_GIC_SHIFT,
2257 +@@ -920,20 +1008,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2258 + {
2259 + .desc = "Privileged Access Never",
2260 + .capability = ARM64_HAS_PAN,
2261 +- .def_scope = SCOPE_SYSTEM,
2262 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2263 + .matches = has_cpuid_feature,
2264 + .sys_reg = SYS_ID_AA64MMFR1_EL1,
2265 + .field_pos = ID_AA64MMFR1_PAN_SHIFT,
2266 + .sign = FTR_UNSIGNED,
2267 + .min_field_value = 1,
2268 +- .enable = cpu_enable_pan,
2269 ++ .cpu_enable = cpu_enable_pan,
2270 + },
2271 + #endif /* CONFIG_ARM64_PAN */
2272 + #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
2273 + {
2274 + .desc = "LSE atomic instructions",
2275 + .capability = ARM64_HAS_LSE_ATOMICS,
2276 +- .def_scope = SCOPE_SYSTEM,
2277 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2278 + .matches = has_cpuid_feature,
2279 + .sys_reg = SYS_ID_AA64ISAR0_EL1,
2280 + .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
2281 +@@ -944,14 +1032,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2282 + {
2283 + .desc = "Software prefetching using PRFM",
2284 + .capability = ARM64_HAS_NO_HW_PREFETCH,
2285 +- .def_scope = SCOPE_SYSTEM,
2286 ++ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2287 + .matches = has_no_hw_prefetch,
2288 + },
2289 + #ifdef CONFIG_ARM64_UAO
2290 + {
2291 + .desc = "User Access Override",
2292 + .capability = ARM64_HAS_UAO,
2293 +- .def_scope = SCOPE_SYSTEM,
2294 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2295 + .matches = has_cpuid_feature,
2296 + .sys_reg = SYS_ID_AA64MMFR2_EL1,
2297 + .field_pos = ID_AA64MMFR2_UAO_SHIFT,
2298 +@@ -965,21 +1053,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2299 + #ifdef CONFIG_ARM64_PAN
2300 + {
2301 + .capability = ARM64_ALT_PAN_NOT_UAO,
2302 +- .def_scope = SCOPE_SYSTEM,
2303 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2304 + .matches = cpufeature_pan_not_uao,
2305 + },
2306 + #endif /* CONFIG_ARM64_PAN */
2307 ++#ifdef CONFIG_ARM64_VHE
2308 + {
2309 + .desc = "Virtualization Host Extensions",
2310 + .capability = ARM64_HAS_VIRT_HOST_EXTN,
2311 +- .def_scope = SCOPE_SYSTEM,
2312 ++ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2313 + .matches = runs_at_el2,
2314 +- .enable = cpu_copy_el2regs,
2315 ++ .cpu_enable = cpu_copy_el2regs,
2316 + },
2317 ++#endif /* CONFIG_ARM64_VHE */
2318 + {
2319 + .desc = "32-bit EL0 Support",
2320 + .capability = ARM64_HAS_32BIT_EL0,
2321 +- .def_scope = SCOPE_SYSTEM,
2322 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2323 + .matches = has_cpuid_feature,
2324 + .sys_reg = SYS_ID_AA64PFR0_EL1,
2325 + .sign = FTR_UNSIGNED,
2326 +@@ -989,22 +1079,28 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2327 + {
2328 + .desc = "Reduced HYP mapping offset",
2329 + .capability = ARM64_HYP_OFFSET_LOW,
2330 +- .def_scope = SCOPE_SYSTEM,
2331 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2332 + .matches = hyp_offset_low,
2333 + },
2334 +-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2335 + {
2336 + .desc = "Kernel page table isolation (KPTI)",
2337 + .capability = ARM64_UNMAP_KERNEL_AT_EL0,
2338 +- .def_scope = SCOPE_SYSTEM,
2339 ++ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
2340 ++ /*
2341 ++ * The ID feature fields below are used to indicate that
2342 ++ * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
2343 ++ * more details.
2344 ++ */
2345 ++ .sys_reg = SYS_ID_AA64PFR0_EL1,
2346 ++ .field_pos = ID_AA64PFR0_CSV3_SHIFT,
2347 ++ .min_field_value = 1,
2348 + .matches = unmap_kernel_at_el0,
2349 +- .enable = kpti_install_ng_mappings,
2350 ++ .cpu_enable = kpti_install_ng_mappings,
2351 + },
2352 +-#endif
2353 + {
2354 + /* FP/SIMD is not implemented */
2355 + .capability = ARM64_HAS_NO_FPSIMD,
2356 +- .def_scope = SCOPE_SYSTEM,
2357 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2358 + .min_field_value = 0,
2359 + .matches = has_no_fpsimd,
2360 + },
2361 +@@ -1012,26 +1108,39 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2362 + {
2363 + .desc = "Data cache clean to Point of Persistence",
2364 + .capability = ARM64_HAS_DCPOP,
2365 +- .def_scope = SCOPE_SYSTEM,
2366 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2367 + .matches = has_cpuid_feature,
2368 + .sys_reg = SYS_ID_AA64ISAR1_EL1,
2369 + .field_pos = ID_AA64ISAR1_DPB_SHIFT,
2370 + .min_field_value = 1,
2371 + },
2372 ++#endif
2373 ++#ifdef CONFIG_ARM64_SSBD
2374 ++ {
2375 ++ .desc = "Speculative Store Bypassing Safe (SSBS)",
2376 ++ .capability = ARM64_SSBS,
2377 ++ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2378 ++ .matches = has_cpuid_feature,
2379 ++ .sys_reg = SYS_ID_AA64PFR1_EL1,
2380 ++ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
2381 ++ .sign = FTR_UNSIGNED,
2382 ++ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
2383 ++ .cpu_enable = cpu_enable_ssbs,
2384 ++ },
2385 + #endif
2386 + {},
2387 + };
2388 +
2389 +-#define HWCAP_CAP(reg, field, s, min_value, type, cap) \
2390 ++#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
2391 + { \
2392 + .desc = #cap, \
2393 +- .def_scope = SCOPE_SYSTEM, \
2394 ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
2395 + .matches = has_cpuid_feature, \
2396 + .sys_reg = reg, \
2397 + .field_pos = field, \
2398 + .sign = s, \
2399 + .min_field_value = min_value, \
2400 +- .hwcap_type = type, \
2401 ++ .hwcap_type = cap_type, \
2402 + .hwcap = cap, \
2403 + }
2404 +
2405 +@@ -1040,17 +1149,28 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
2406 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
2407 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
2408 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
2409 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
2410 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
2411 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
2412 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
2413 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
2414 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
2415 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
2416 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
2417 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
2418 ++ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
2419 + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
2420 + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
2421 + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
2422 + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
2423 ++ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
2424 + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
2425 + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
2426 + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
2427 + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
2428 ++ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
2429 ++ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
2430 ++ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
2431 + {},
2432 + };
2433 +
2434 +@@ -1115,7 +1235,7 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2435 + /* We support emulation of accesses to CPU ID feature registers */
2436 + elf_hwcap |= HWCAP_CPUID;
2437 + for (; hwcaps->matches; hwcaps++)
2438 +- if (hwcaps->matches(hwcaps, hwcaps->def_scope))
2439 ++ if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
2440 + cap_set_elf_hwcap(hwcaps);
2441 + }
2442 +
2443 +@@ -1138,11 +1258,13 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
2444 + return false;
2445 + }
2446 +
2447 +-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
2448 +- const char *info)
2449 ++static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
2450 ++ u16 scope_mask, const char *info)
2451 + {
2452 ++ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2453 + for (; caps->matches; caps++) {
2454 +- if (!caps->matches(caps, caps->def_scope))
2455 ++ if (!(caps->type & scope_mask) ||
2456 ++ !caps->matches(caps, cpucap_default_scope(caps)))
2457 + continue;
2458 +
2459 + if (!cpus_have_cap(caps->capability) && caps->desc)
2460 +@@ -1151,33 +1273,69 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
2461 + }
2462 + }
2463 +
2464 ++static void update_cpu_capabilities(u16 scope_mask)
2465 ++{
2466 ++ __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
2467 ++ __update_cpu_capabilities(arm64_errata, scope_mask,
2468 ++ "enabling workaround for");
2469 ++}
2470 ++
2471 ++static int __enable_cpu_capability(void *arg)
2472 ++{
2473 ++ const struct arm64_cpu_capabilities *cap = arg;
2474 ++
2475 ++ cap->cpu_enable(cap);
2476 ++ return 0;
2477 ++}
2478 ++
2479 + /*
2480 + * Run through the enabled capabilities and enable() it on all active
2481 + * CPUs
2482 + */
2483 +-void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
2484 ++static void __init
2485 ++__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
2486 ++ u16 scope_mask)
2487 + {
2488 ++ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2489 + for (; caps->matches; caps++) {
2490 + unsigned int num = caps->capability;
2491 +
2492 +- if (!cpus_have_cap(num))
2493 ++ if (!(caps->type & scope_mask) || !cpus_have_cap(num))
2494 + continue;
2495 +
2496 + /* Ensure cpus_have_const_cap(num) works */
2497 + static_branch_enable(&cpu_hwcap_keys[num]);
2498 +
2499 +- if (caps->enable) {
2500 ++ if (caps->cpu_enable) {
2501 + /*
2502 +- * Use stop_machine() as it schedules the work allowing
2503 +- * us to modify PSTATE, instead of on_each_cpu() which
2504 +- * uses an IPI, giving us a PSTATE that disappears when
2505 +- * we return.
2506 ++ * Capabilities with SCOPE_BOOT_CPU scope are finalised
2507 ++ * before any secondary CPU boots. Thus, each secondary
2508 ++ * will enable the capability as appropriate via
2509 ++ * check_local_cpu_capabilities(). The only exception is
2510 ++ * the boot CPU, for which the capability must be
2511 ++ * enabled here. This approach avoids costly
2512 ++ * stop_machine() calls for this case.
2513 ++ *
2514 ++ * Otherwise, use stop_machine() as it schedules the
2515 ++ * work allowing us to modify PSTATE, instead of
2516 ++ * on_each_cpu() which uses an IPI, giving us a PSTATE
2517 ++ * that disappears when we return.
2518 + */
2519 +- stop_machine(caps->enable, (void *)caps, cpu_online_mask);
2520 ++ if (scope_mask & SCOPE_BOOT_CPU)
2521 ++ caps->cpu_enable(caps);
2522 ++ else
2523 ++ stop_machine(__enable_cpu_capability,
2524 ++ (void *)caps, cpu_online_mask);
2525 + }
2526 + }
2527 + }
2528 +
2529 ++static void __init enable_cpu_capabilities(u16 scope_mask)
2530 ++{
2531 ++ __enable_cpu_capabilities(arm64_features, scope_mask);
2532 ++ __enable_cpu_capabilities(arm64_errata, scope_mask);
2533 ++}
2534 ++
2535 + /*
2536 + * Flag to indicate if we have computed the system wide
2537 + * capabilities based on the boot time active CPUs. This
2538 +@@ -1193,14 +1351,83 @@ static inline void set_sys_caps_initialised(void)
2539 + sys_caps_initialised = true;
2540 + }
2541 +
2542 ++/*
2543 ++ * Run through the list of capabilities to check for conflicts.
2544 ++ * If the system has already detected a capability, take necessary
2545 ++ * action on this CPU.
2546 ++ *
2547 ++ * Returns "false" on conflicts.
2548 ++ */
2549 ++static bool
2550 ++__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps_list,
2551 ++ u16 scope_mask)
2552 ++{
2553 ++ bool cpu_has_cap, system_has_cap;
2554 ++ const struct arm64_cpu_capabilities *caps;
2555 ++
2556 ++ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2557 ++
2558 ++ for (caps = caps_list; caps->matches; caps++) {
2559 ++ if (!(caps->type & scope_mask))
2560 ++ continue;
2561 ++
2562 ++ cpu_has_cap = __this_cpu_has_cap(caps_list, caps->capability);
2563 ++ system_has_cap = cpus_have_cap(caps->capability);
2564 ++
2565 ++ if (system_has_cap) {
2566 ++ /*
2567 ++ * Check if the new CPU misses an advertised feature,
2568 ++ * which is not safe to miss.
2569 ++ */
2570 ++ if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
2571 ++ break;
2572 ++ /*
2573 ++ * We have to issue cpu_enable() irrespective of
2574 ++ * whether the CPU has it or not, as it is enabeld
2575 ++ * system wide. It is upto the call back to take
2576 ++ * appropriate action on this CPU.
2577 ++ */
2578 ++ if (caps->cpu_enable)
2579 ++ caps->cpu_enable(caps);
2580 ++ } else {
2581 ++ /*
2582 ++ * Check if the CPU has this capability if it isn't
2583 ++ * safe to have when the system doesn't.
2584 ++ */
2585 ++ if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
2586 ++ break;
2587 ++ }
2588 ++ }
2589 ++
2590 ++ if (caps->matches) {
2591 ++ pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
2592 ++ smp_processor_id(), caps->capability,
2593 ++ caps->desc, system_has_cap, cpu_has_cap);
2594 ++ return false;
2595 ++ }
2596 ++
2597 ++ return true;
2598 ++}
2599 ++
2600 ++static bool verify_local_cpu_caps(u16 scope_mask)
2601 ++{
2602 ++ return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
2603 ++ __verify_local_cpu_caps(arm64_features, scope_mask);
2604 ++}
2605 ++
2606 + /*
2607 + * Check for CPU features that are used in early boot
2608 + * based on the Boot CPU value.
2609 + */
2610 + static void check_early_cpu_features(void)
2611 + {
2612 +- verify_cpu_run_el();
2613 + verify_cpu_asid_bits();
2614 ++ /*
2615 ++ * Early features are used by the kernel already. If there
2616 ++ * is a conflict, we cannot proceed further.
2617 ++ */
2618 ++ if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
2619 ++ cpu_panic_kernel();
2620 + }
2621 +
2622 + static void
2623 +@@ -1215,26 +1442,6 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2624 + }
2625 + }
2626 +
2627 +-static void
2628 +-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
2629 +-{
2630 +- const struct arm64_cpu_capabilities *caps = caps_list;
2631 +- for (; caps->matches; caps++) {
2632 +- if (!cpus_have_cap(caps->capability))
2633 +- continue;
2634 +- /*
2635 +- * If the new CPU misses an advertised feature, we cannot proceed
2636 +- * further, park the cpu.
2637 +- */
2638 +- if (!__this_cpu_has_cap(caps_list, caps->capability)) {
2639 +- pr_crit("CPU%d: missing feature: %s\n",
2640 +- smp_processor_id(), caps->desc);
2641 +- cpu_die_early();
2642 +- }
2643 +- if (caps->enable)
2644 +- caps->enable((void *)caps);
2645 +- }
2646 +-}
2647 +
2648 + /*
2649 + * Run through the enabled system capabilities and enable() it on this CPU.
2650 +@@ -1246,8 +1453,14 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
2651 + */
2652 + static void verify_local_cpu_capabilities(void)
2653 + {
2654 +- verify_local_cpu_errata_workarounds();
2655 +- verify_local_cpu_features(arm64_features);
2656 ++ /*
2657 ++ * The capabilities with SCOPE_BOOT_CPU are checked from
2658 ++ * check_early_cpu_features(), as they need to be verified
2659 ++ * on all secondary CPUs.
2660 ++ */
2661 ++ if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
2662 ++ cpu_die_early();
2663 ++
2664 + verify_local_elf_hwcaps(arm64_elf_hwcaps);
2665 + if (system_supports_32bit_el0())
2666 + verify_local_elf_hwcaps(compat_elf_hwcaps);
2667 +@@ -1263,20 +1476,22 @@ void check_local_cpu_capabilities(void)
2668 +
2669 + /*
2670 + * If we haven't finalised the system capabilities, this CPU gets
2671 +- * a chance to update the errata work arounds.
2672 ++ * a chance to update the errata work arounds and local features.
2673 + * Otherwise, this CPU should verify that it has all the system
2674 + * advertised capabilities.
2675 + */
2676 + if (!sys_caps_initialised)
2677 +- update_cpu_errata_workarounds();
2678 ++ update_cpu_capabilities(SCOPE_LOCAL_CPU);
2679 + else
2680 + verify_local_cpu_capabilities();
2681 + }
2682 +
2683 +-static void __init setup_feature_capabilities(void)
2684 ++static void __init setup_boot_cpu_capabilities(void)
2685 + {
2686 +- update_cpu_capabilities(arm64_features, "detected feature:");
2687 +- enable_cpu_capabilities(arm64_features);
2688 ++ /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
2689 ++ update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
2690 ++ /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
2691 ++ enable_cpu_capabilities(SCOPE_BOOT_CPU);
2692 + }
2693 +
2694 + DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
2695 +@@ -1295,14 +1510,24 @@ bool this_cpu_has_cap(unsigned int cap)
2696 + __this_cpu_has_cap(arm64_errata, cap));
2697 + }
2698 +
2699 ++static void __init setup_system_capabilities(void)
2700 ++{
2701 ++ /*
2702 ++ * We have finalised the system-wide safe feature
2703 ++ * registers, finalise the capabilities that depend
2704 ++ * on it. Also enable all the available capabilities,
2705 ++ * that are not enabled already.
2706 ++ */
2707 ++ update_cpu_capabilities(SCOPE_SYSTEM);
2708 ++ enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2709 ++}
2710 ++
2711 + void __init setup_cpu_features(void)
2712 + {
2713 + u32 cwg;
2714 + int cls;
2715 +
2716 +- /* Set the CPU feature capabilies */
2717 +- setup_feature_capabilities();
2718 +- enable_errata_workarounds();
2719 ++ setup_system_capabilities();
2720 + mark_const_caps_ready();
2721 + setup_elf_hwcaps(arm64_elf_hwcaps);
2722 +
2723 +@@ -1428,3 +1653,15 @@ static int __init enable_mrs_emulation(void)
2724 + }
2725 +
2726 + core_initcall(enable_mrs_emulation);
2727 ++
2728 ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
2729 ++ char *buf)
2730 ++{
2731 ++ if (__meltdown_safe)
2732 ++ return sprintf(buf, "Not affected\n");
2733 ++
2734 ++ if (arm64_kernel_unmapped_at_el0())
2735 ++ return sprintf(buf, "Mitigation: PTI\n");
2736 ++
2737 ++ return sprintf(buf, "Vulnerable\n");
2738 ++}
2739 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
2740 +index 311885962830..9ff64e04e63d 100644
2741 +--- a/arch/arm64/kernel/cpuinfo.c
2742 ++++ b/arch/arm64/kernel/cpuinfo.c
2743 +@@ -69,6 +69,18 @@ static const char *const hwcap_str[] = {
2744 + "fcma",
2745 + "lrcpc",
2746 + "dcpop",
2747 ++ "sha3",
2748 ++ "sm3",
2749 ++ "sm4",
2750 ++ "asimddp",
2751 ++ "sha512",
2752 ++ "sve",
2753 ++ "asimdfhm",
2754 ++ "dit",
2755 ++ "uscat",
2756 ++ "ilrcpc",
2757 ++ "flagm",
2758 ++ "ssbs",
2759 + NULL
2760 + };
2761 +
2762 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
2763 +index 5d547deb6996..f4fdf6420ac5 100644
2764 +--- a/arch/arm64/kernel/fpsimd.c
2765 ++++ b/arch/arm64/kernel/fpsimd.c
2766 +@@ -28,6 +28,7 @@
2767 + #include <linux/signal.h>
2768 +
2769 + #include <asm/fpsimd.h>
2770 ++#include <asm/cpufeature.h>
2771 + #include <asm/cputype.h>
2772 + #include <asm/simd.h>
2773 +
2774 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
2775 +index 1371542de0d3..92cc7b51f100 100644
2776 +--- a/arch/arm64/kernel/head.S
2777 ++++ b/arch/arm64/kernel/head.S
2778 +@@ -388,17 +388,13 @@ ENTRY(el2_setup)
2779 + mrs x0, CurrentEL
2780 + cmp x0, #CurrentEL_EL2
2781 + b.eq 1f
2782 +- mrs x0, sctlr_el1
2783 +-CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
2784 +-CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
2785 ++ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
2786 + msr sctlr_el1, x0
2787 + mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
2788 + isb
2789 + ret
2790 +
2791 +-1: mrs x0, sctlr_el2
2792 +-CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
2793 +-CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
2794 ++1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
2795 + msr sctlr_el2, x0
2796 +
2797 + #ifdef CONFIG_ARM64_VHE
2798 +@@ -505,10 +501,7 @@ install_el2_stub:
2799 + * requires no configuration, and all non-hyp-specific EL2 setup
2800 + * will be done via the _EL1 system register aliases in __cpu_setup.
2801 + */
2802 +- /* sctlr_el1 */
2803 +- mov x0, #0x0800 // Set/clear RES{1,0} bits
2804 +-CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
2805 +-CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
2806 ++ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
2807 + msr sctlr_el1, x0
2808 +
2809 + /* Coprocessor traps. */
2810 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
2811 +index 9e773732520c..243fd247d04e 100644
2812 +--- a/arch/arm64/kernel/process.c
2813 ++++ b/arch/arm64/kernel/process.c
2814 +@@ -296,6 +296,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
2815 + if (IS_ENABLED(CONFIG_ARM64_UAO) &&
2816 + cpus_have_const_cap(ARM64_HAS_UAO))
2817 + childregs->pstate |= PSR_UAO_BIT;
2818 ++
2819 ++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
2820 ++ set_ssbs_bit(childregs);
2821 ++
2822 + p->thread.cpu_context.x19 = stack_start;
2823 + p->thread.cpu_context.x20 = stk_sz;
2824 + }
2825 +@@ -335,6 +339,32 @@ void uao_thread_switch(struct task_struct *next)
2826 + }
2827 + }
2828 +
2829 ++/*
2830 ++ * Force SSBS state on context-switch, since it may be lost after migrating
2831 ++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
2832 ++ */
2833 ++static void ssbs_thread_switch(struct task_struct *next)
2834 ++{
2835 ++ struct pt_regs *regs = task_pt_regs(next);
2836 ++
2837 ++ /*
2838 ++ * Nothing to do for kernel threads, but 'regs' may be junk
2839 ++ * (e.g. idle task) so check the flags and bail early.
2840 ++ */
2841 ++ if (unlikely(next->flags & PF_KTHREAD))
2842 ++ return;
2843 ++
2844 ++ /* If the mitigation is enabled, then we leave SSBS clear. */
2845 ++ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
2846 ++ test_tsk_thread_flag(next, TIF_SSBD))
2847 ++ return;
2848 ++
2849 ++ if (compat_user_mode(regs))
2850 ++ set_compat_ssbs_bit(regs);
2851 ++ else if (user_mode(regs))
2852 ++ set_ssbs_bit(regs);
2853 ++}
2854 ++
2855 + /*
2856 + * We store our current task in sp_el0, which is clobbered by userspace. Keep a
2857 + * shadow copy so that we can restore this upon entry from userspace.
2858 +@@ -363,6 +393,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
2859 + contextidr_thread_switch(next);
2860 + entry_task_switch(next);
2861 + uao_thread_switch(next);
2862 ++ ssbs_thread_switch(next);
2863 +
2864 + /*
2865 + * Complete any pending TLB or cache maintenance on this CPU in case
2866 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
2867 +index 34d915b6974b..242527f29c41 100644
2868 +--- a/arch/arm64/kernel/ptrace.c
2869 ++++ b/arch/arm64/kernel/ptrace.c
2870 +@@ -1402,15 +1402,20 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
2871 + }
2872 +
2873 + /*
2874 +- * Bits which are always architecturally RES0 per ARM DDI 0487A.h
2875 ++ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2876 ++ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2877 ++ * not described in ARM DDI 0487D.a.
2878 ++ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2879 ++ * be allocated an EL0 meaning in future.
2880 + * Userspace cannot use these until they have an architectural meaning.
2881 ++ * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2882 + * We also reserve IL for the kernel; SS is handled dynamically.
2883 + */
2884 + #define SPSR_EL1_AARCH64_RES0_BITS \
2885 +- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
2886 +- GENMASK_ULL(5, 5))
2887 ++ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
2888 ++ GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
2889 + #define SPSR_EL1_AARCH32_RES0_BITS \
2890 +- (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
2891 ++ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2892 +
2893 + static int valid_compat_regs(struct user_pt_regs *regs)
2894 + {
2895 +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
2896 +index b7ad41d7b6ee..a683cd499515 100644
2897 +--- a/arch/arm64/kernel/smp.c
2898 ++++ b/arch/arm64/kernel/smp.c
2899 +@@ -83,43 +83,6 @@ enum ipi_msg_type {
2900 + IPI_WAKEUP
2901 + };
2902 +
2903 +-#ifdef CONFIG_ARM64_VHE
2904 +-
2905 +-/* Whether the boot CPU is running in HYP mode or not*/
2906 +-static bool boot_cpu_hyp_mode;
2907 +-
2908 +-static inline void save_boot_cpu_run_el(void)
2909 +-{
2910 +- boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
2911 +-}
2912 +-
2913 +-static inline bool is_boot_cpu_in_hyp_mode(void)
2914 +-{
2915 +- return boot_cpu_hyp_mode;
2916 +-}
2917 +-
2918 +-/*
2919 +- * Verify that a secondary CPU is running the kernel at the same
2920 +- * EL as that of the boot CPU.
2921 +- */
2922 +-void verify_cpu_run_el(void)
2923 +-{
2924 +- bool in_el2 = is_kernel_in_hyp_mode();
2925 +- bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
2926 +-
2927 +- if (in_el2 ^ boot_cpu_el2) {
2928 +- pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
2929 +- smp_processor_id(),
2930 +- in_el2 ? 2 : 1,
2931 +- boot_cpu_el2 ? 2 : 1);
2932 +- cpu_panic_kernel();
2933 +- }
2934 +-}
2935 +-
2936 +-#else
2937 +-static inline void save_boot_cpu_run_el(void) {}
2938 +-#endif
2939 +-
2940 + #ifdef CONFIG_HOTPLUG_CPU
2941 + static int op_cpu_kill(unsigned int cpu);
2942 + #else
2943 +@@ -448,13 +411,6 @@ void __init smp_prepare_boot_cpu(void)
2944 + */
2945 + jump_label_init();
2946 + cpuinfo_store_boot_cpu();
2947 +- save_boot_cpu_run_el();
2948 +- /*
2949 +- * Run the errata work around checks on the boot CPU, once we have
2950 +- * initialised the cpu feature infrastructure from
2951 +- * cpuinfo_store_boot_cpu() above.
2952 +- */
2953 +- update_cpu_errata_workarounds();
2954 + }
2955 +
2956 + static u64 __init of_get_cpu_mpidr(struct device_node *dn)
2957 +diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
2958 +index 0560738c1d5c..58de005cd756 100644
2959 +--- a/arch/arm64/kernel/ssbd.c
2960 ++++ b/arch/arm64/kernel/ssbd.c
2961 +@@ -3,13 +3,32 @@
2962 + * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
2963 + */
2964 +
2965 ++#include <linux/compat.h>
2966 + #include <linux/errno.h>
2967 + #include <linux/prctl.h>
2968 + #include <linux/sched.h>
2969 ++#include <linux/sched/task_stack.h>
2970 + #include <linux/thread_info.h>
2971 +
2972 ++#include <asm/compat.h>
2973 + #include <asm/cpufeature.h>
2974 +
2975 ++static void ssbd_ssbs_enable(struct task_struct *task)
2976 ++{
2977 ++ u64 val = is_compat_thread(task_thread_info(task)) ?
2978 ++ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
2979 ++
2980 ++ task_pt_regs(task)->pstate |= val;
2981 ++}
2982 ++
2983 ++static void ssbd_ssbs_disable(struct task_struct *task)
2984 ++{
2985 ++ u64 val = is_compat_thread(task_thread_info(task)) ?
2986 ++ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
2987 ++
2988 ++ task_pt_regs(task)->pstate &= ~val;
2989 ++}
2990 ++
2991 + /*
2992 + * prctl interface for SSBD
2993 + */
2994 +@@ -45,12 +64,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
2995 + return -EPERM;
2996 + task_clear_spec_ssb_disable(task);
2997 + clear_tsk_thread_flag(task, TIF_SSBD);
2998 ++ ssbd_ssbs_enable(task);
2999 + break;
3000 + case PR_SPEC_DISABLE:
3001 + if (state == ARM64_SSBD_FORCE_DISABLE)
3002 + return -EPERM;
3003 + task_set_spec_ssb_disable(task);
3004 + set_tsk_thread_flag(task, TIF_SSBD);
3005 ++ ssbd_ssbs_disable(task);
3006 + break;
3007 + case PR_SPEC_FORCE_DISABLE:
3008 + if (state == ARM64_SSBD_FORCE_DISABLE)
3009 +@@ -58,6 +79,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
3010 + task_set_spec_ssb_disable(task);
3011 + task_set_spec_ssb_force_disable(task);
3012 + set_tsk_thread_flag(task, TIF_SSBD);
3013 ++ ssbd_ssbs_disable(task);
3014 + break;
3015 + default:
3016 + return -ERANGE;
3017 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
3018 +index 74259ae9c7f2..a4e49e947684 100644
3019 +--- a/arch/arm64/kernel/traps.c
3020 ++++ b/arch/arm64/kernel/traps.c
3021 +@@ -38,6 +38,7 @@
3022 +
3023 + #include <asm/atomic.h>
3024 + #include <asm/bug.h>
3025 ++#include <asm/cpufeature.h>
3026 + #include <asm/debug-monitors.h>
3027 + #include <asm/esr.h>
3028 + #include <asm/insn.h>
3029 +@@ -436,10 +437,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
3030 + force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
3031 + }
3032 +
3033 +-int cpu_enable_cache_maint_trap(void *__unused)
3034 ++void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
3035 + {
3036 + config_sctlr_el1(SCTLR_EL1_UCI, 0);
3037 +- return 0;
3038 + }
3039 +
3040 + #define __user_cache_maint(insn, address, res) \
3041 +diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
3042 +index a7b3c198d4de..a360ac6e89e9 100644
3043 +--- a/arch/arm64/kvm/hyp/entry.S
3044 ++++ b/arch/arm64/kvm/hyp/entry.S
3045 +@@ -196,15 +196,3 @@ alternative_endif
3046 +
3047 + eret
3048 + ENDPROC(__fpsimd_guest_restore)
3049 +-
3050 +-ENTRY(__qcom_hyp_sanitize_btac_predictors)
3051 +- /**
3052 +- * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
3053 +- * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
3054 +- * b15-b0: contains SiP functionID
3055 +- */
3056 +- movz x0, #0x1700
3057 +- movk x0, #0xc200, lsl #16
3058 +- smc #0
3059 +- ret
3060 +-ENDPROC(__qcom_hyp_sanitize_btac_predictors)
3061 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
3062 +index 44845996b554..4a8fdbb29286 100644
3063 +--- a/arch/arm64/kvm/hyp/switch.c
3064 ++++ b/arch/arm64/kvm/hyp/switch.c
3065 +@@ -405,16 +405,6 @@ again:
3066 +
3067 + __set_host_arch_workaround_state(vcpu);
3068 +
3069 +- if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
3070 +- u32 midr = read_cpuid_id();
3071 +-
3072 +- /* Apply BTAC predictors mitigation to all Falkor chips */
3073 +- if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
3074 +- ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
3075 +- __qcom_hyp_sanitize_btac_predictors();
3076 +- }
3077 +- }
3078 +-
3079 + fp_enabled = __fpsimd_enabled();
3080 +
3081 + __sysreg_save_guest_state(guest_ctxt);
3082 +diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
3083 +index e19d89cabf2a..3773311ffcd0 100644
3084 +--- a/arch/arm64/kvm/hyp/sysreg-sr.c
3085 ++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
3086 +@@ -188,3 +188,14 @@ void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
3087 + {
3088 + asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
3089 + }
3090 ++
3091 ++void __hyp_text __kvm_enable_ssbs(void)
3092 ++{
3093 ++ u64 tmp;
3094 ++
3095 ++ asm volatile(
3096 ++ "mrs %0, sctlr_el2\n"
3097 ++ "orr %0, %0, %1\n"
3098 ++ "msr sctlr_el2, %0"
3099 ++ : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
3100 ++}
3101 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
3102 +index 465b90d7abf2..bf7c285d0c82 100644
3103 +--- a/arch/arm64/mm/fault.c
3104 ++++ b/arch/arm64/mm/fault.c
3105 +@@ -875,7 +875,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
3106 + NOKPROBE_SYMBOL(do_debug_exception);
3107 +
3108 + #ifdef CONFIG_ARM64_PAN
3109 +-int cpu_enable_pan(void *__unused)
3110 ++void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
3111 + {
3112 + /*
3113 + * We modify PSTATE. This won't work from irq context as the PSTATE
3114 +@@ -885,6 +885,5 @@ int cpu_enable_pan(void *__unused)
3115 +
3116 + config_sctlr_el1(SCTLR_EL1_SPAN, 0);
3117 + asm(SET_PSTATE_PAN(1));
3118 +- return 0;
3119 + }
3120 + #endif /* CONFIG_ARM64_PAN */
3121 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
3122 +index 65b040152184..ecbc060807d2 100644
3123 +--- a/arch/arm64/mm/proc.S
3124 ++++ b/arch/arm64/mm/proc.S
3125 +@@ -430,11 +430,7 @@ ENTRY(__cpu_setup)
3126 + /*
3127 + * Prepare SCTLR
3128 + */
3129 +- adr x5, crval
3130 +- ldp w5, w6, [x5]
3131 +- mrs x0, sctlr_el1
3132 +- bic x0, x0, x5 // clear bits
3133 +- orr x0, x0, x6 // set bits
3134 ++ mov_q x0, SCTLR_EL1_SET
3135 + /*
3136 + * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
3137 + * both user and kernel.
3138 +@@ -470,21 +466,3 @@ ENTRY(__cpu_setup)
3139 + msr tcr_el1, x10
3140 + ret // return to head.S
3141 + ENDPROC(__cpu_setup)
3142 +-
3143 +- /*
3144 +- * We set the desired value explicitly, including those of the
3145 +- * reserved bits. The values of bits EE & E0E were set early in
3146 +- * el2_setup, which are left untouched below.
3147 +- *
3148 +- * n n T
3149 +- * U E WT T UD US IHBS
3150 +- * CE0 XWHW CZ ME TEEA S
3151 +- * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
3152 +- * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
3153 +- * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
3154 +- */
3155 +- .type crval, #object
3156 +-crval:
3157 +- .word 0xfcffffff // clear
3158 +- .word 0x34d5d91d // set
3159 +- .popsection
3160 +diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
3161 +index efd5f0722206..39b6269610d4 100644
3162 +--- a/arch/mips/boot/dts/qca/ar9331.dtsi
3163 ++++ b/arch/mips/boot/dts/qca/ar9331.dtsi
3164 +@@ -99,7 +99,7 @@
3165 +
3166 + miscintc: interrupt-controller@18060010 {
3167 + compatible = "qca,ar7240-misc-intc";
3168 +- reg = <0x18060010 0x4>;
3169 ++ reg = <0x18060010 0x8>;
3170 +
3171 + interrupt-parent = <&cpuintc>;
3172 + interrupts = <6>;
3173 +diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
3174 +index ffefc1cb2612..98c3a7feb10f 100644
3175 +--- a/arch/mips/loongson64/common/serial.c
3176 ++++ b/arch/mips/loongson64/common/serial.c
3177 +@@ -110,7 +110,7 @@ static int __init serial_init(void)
3178 + }
3179 + module_init(serial_init);
3180 +
3181 +-static void __init serial_exit(void)
3182 ++static void __exit serial_exit(void)
3183 + {
3184 + platform_device_unregister(&uart8250_device);
3185 + }
3186 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
3187 +index dc495578d44d..b55c74a7f7a4 100644
3188 +--- a/arch/mips/mm/tlbex.c
3189 ++++ b/arch/mips/mm/tlbex.c
3190 +@@ -658,6 +658,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
3191 + int restore_scratch)
3192 + {
3193 + if (restore_scratch) {
3194 ++ /*
3195 ++ * Ensure the MFC0 below observes the value written to the
3196 ++ * KScratch register by the prior MTC0.
3197 ++ */
3198 ++ if (scratch_reg >= 0)
3199 ++ uasm_i_ehb(p);
3200 ++
3201 + /* Reset default page size */
3202 + if (PM_DEFAULT_MASK >> 16) {
3203 + uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
3204 +@@ -672,12 +679,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
3205 + uasm_i_mtc0(p, 0, C0_PAGEMASK);
3206 + uasm_il_b(p, r, lid);
3207 + }
3208 +- if (scratch_reg >= 0) {
3209 +- uasm_i_ehb(p);
3210 ++ if (scratch_reg >= 0)
3211 + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
3212 +- } else {
3213 ++ else
3214 + UASM_i_LW(p, 1, scratchpad_offset(0), 0);
3215 +- }
3216 + } else {
3217 + /* Reset default page size */
3218 + if (PM_DEFAULT_MASK >> 16) {
3219 +@@ -926,6 +931,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
3220 + }
3221 + if (mode != not_refill && check_for_high_segbits) {
3222 + uasm_l_large_segbits_fault(l, *p);
3223 ++
3224 ++ if (mode == refill_scratch && scratch_reg >= 0)
3225 ++ uasm_i_ehb(p);
3226 ++
3227 + /*
3228 + * We get here if we are an xsseg address, or if we are
3229 + * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
3230 +@@ -942,12 +951,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
3231 + uasm_i_jr(p, ptr);
3232 +
3233 + if (mode == refill_scratch) {
3234 +- if (scratch_reg >= 0) {
3235 +- uasm_i_ehb(p);
3236 ++ if (scratch_reg >= 0)
3237 + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
3238 +- } else {
3239 ++ else
3240 + UASM_i_LW(p, 1, scratchpad_offset(0), 0);
3241 +- }
3242 + } else {
3243 + uasm_i_nop(p);
3244 + }
3245 +diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
3246 +index 92a9b5f12f98..f29f682352f0 100644
3247 +--- a/arch/parisc/mm/ioremap.c
3248 ++++ b/arch/parisc/mm/ioremap.c
3249 +@@ -3,7 +3,7 @@
3250 + * arch/parisc/mm/ioremap.c
3251 + *
3252 + * (C) Copyright 1995 1996 Linus Torvalds
3253 +- * (C) Copyright 2001-2006 Helge Deller <deller@×××.de>
3254 ++ * (C) Copyright 2001-2019 Helge Deller <deller@×××.de>
3255 + * (C) Copyright 2005 Kyle McMartin <kyle@××××××××××××.org>
3256 + */
3257 +
3258 +@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
3259 + addr = (void __iomem *) area->addr;
3260 + if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
3261 + phys_addr, pgprot)) {
3262 +- vfree(addr);
3263 ++ vunmap(addr);
3264 + return NULL;
3265 + }
3266 +
3267 +@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
3268 + }
3269 + EXPORT_SYMBOL(__ioremap);
3270 +
3271 +-void iounmap(const volatile void __iomem *addr)
3272 ++void iounmap(const volatile void __iomem *io_addr)
3273 + {
3274 +- if (addr > high_memory)
3275 +- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
3276 ++ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
3277 ++
3278 ++ if (is_vmalloc_addr((void *)addr))
3279 ++ vunmap((void *)addr);
3280 + }
3281 + EXPORT_SYMBOL(iounmap);
3282 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
3283 +index 94af073476ce..00c12158a5dc 100644
3284 +--- a/arch/x86/include/asm/kvm_host.h
3285 ++++ b/arch/x86/include/asm/kvm_host.h
3286 +@@ -973,7 +973,7 @@ struct kvm_x86_ops {
3287 + unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
3288 + void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
3289 +
3290 +- void (*tlb_flush)(struct kvm_vcpu *vcpu);
3291 ++ void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
3292 +
3293 + void (*run)(struct kvm_vcpu *vcpu);
3294 + int (*handle_exit)(struct kvm_vcpu *vcpu);
3295 +@@ -998,7 +998,7 @@ struct kvm_x86_ops {
3296 + void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
3297 + void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
3298 + void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
3299 +- void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
3300 ++ void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
3301 + void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
3302 + void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
3303 + int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
3304 +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
3305 +index 7c67d8939f3e..e00ccbcc2913 100644
3306 +--- a/arch/x86/kernel/head64.c
3307 ++++ b/arch/x86/kernel/head64.c
3308 +@@ -145,13 +145,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
3309 + * we might write invalid pmds, when the kernel is relocated
3310 + * cleanup_highmap() fixes this up along with the mappings
3311 + * beyond _end.
3312 ++ *
3313 ++ * Only the region occupied by the kernel image has so far
3314 ++ * been checked against the table of usable memory regions
3315 ++ * provided by the firmware, so invalidate pages outside that
3316 ++ * region. A page table entry that maps to a reserved area of
3317 ++ * memory would allow processor speculation into that area,
3318 ++ * and on some hardware (particularly the UV platform) even
3319 ++ * speculative access to some reserved areas is caught as an
3320 ++ * error, causing the BIOS to halt the system.
3321 + */
3322 +
3323 + pmd = fixup_pointer(level2_kernel_pgt, physaddr);
3324 +- for (i = 0; i < PTRS_PER_PMD; i++) {
3325 ++
3326 ++ /* invalidate pages before the kernel image */
3327 ++ for (i = 0; i < pmd_index((unsigned long)_text); i++)
3328 ++ pmd[i] &= ~_PAGE_PRESENT;
3329 ++
3330 ++ /* fixup pages that are part of the kernel image */
3331 ++ for (; i <= pmd_index((unsigned long)_end); i++)
3332 + if (pmd[i] & _PAGE_PRESENT)
3333 + pmd[i] += load_delta;
3334 +- }
3335 ++
3336 ++ /* invalidate pages after the kernel image */
3337 ++ for (; i < PTRS_PER_PMD; i++)
3338 ++ pmd[i] &= ~_PAGE_PRESENT;
3339 +
3340 + /*
3341 + * Fixup phys_base - remove the memory encryption mask to obtain
3342 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
3343 +index 7b9ad9de4f37..2307f63efd20 100644
3344 +--- a/arch/x86/kvm/lapic.c
3345 ++++ b/arch/x86/kvm/lapic.c
3346 +@@ -1967,13 +1967,11 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
3347 + }
3348 + }
3349 +
3350 +- if ((old_value ^ value) & X2APIC_ENABLE) {
3351 +- if (value & X2APIC_ENABLE) {
3352 +- kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
3353 +- kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
3354 +- } else
3355 +- kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
3356 +- }
3357 ++ if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
3358 ++ kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
3359 ++
3360 ++ if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
3361 ++ kvm_x86_ops->set_virtual_apic_mode(vcpu);
3362 +
3363 + apic->base_address = apic->vcpu->arch.apic_base &
3364 + MSR_IA32_APICBASE_BASE;
3365 +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
3366 +index 4b9935a38347..bc3446d3cfdf 100644
3367 +--- a/arch/x86/kvm/lapic.h
3368 ++++ b/arch/x86/kvm/lapic.h
3369 +@@ -16,6 +16,13 @@
3370 + #define APIC_BUS_CYCLE_NS 1
3371 + #define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS)
3372 +
3373 ++enum lapic_mode {
3374 ++ LAPIC_MODE_DISABLED = 0,
3375 ++ LAPIC_MODE_INVALID = X2APIC_ENABLE,
3376 ++ LAPIC_MODE_XAPIC = MSR_IA32_APICBASE_ENABLE,
3377 ++ LAPIC_MODE_X2APIC = MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE,
3378 ++};
3379 ++
3380 + struct kvm_timer {
3381 + struct hrtimer timer;
3382 + s64 period; /* unit: ns */
3383 +@@ -89,6 +96,7 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
3384 + int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
3385 + int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
3386 + int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
3387 ++enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu);
3388 + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
3389 +
3390 + u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
3391 +@@ -220,4 +228,10 @@ void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
3392 + void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
3393 + bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
3394 + void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
3395 ++
3396 ++static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
3397 ++{
3398 ++ return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
3399 ++}
3400 ++
3401 + #endif
3402 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
3403 +index 093e7f567e69..f6adc8db0e32 100644
3404 +--- a/arch/x86/kvm/svm.c
3405 ++++ b/arch/x86/kvm/svm.c
3406 +@@ -299,7 +299,7 @@ static int vgif = true;
3407 + module_param(vgif, int, 0444);
3408 +
3409 + static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
3410 +-static void svm_flush_tlb(struct kvm_vcpu *vcpu);
3411 ++static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
3412 + static void svm_complete_interrupts(struct vcpu_svm *svm);
3413 +
3414 + static int nested_svm_exit_handled(struct vcpu_svm *svm);
3415 +@@ -2097,7 +2097,7 @@ static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3416 + return 1;
3417 +
3418 + if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
3419 +- svm_flush_tlb(vcpu);
3420 ++ svm_flush_tlb(vcpu, true);
3421 +
3422 + vcpu->arch.cr4 = cr4;
3423 + if (!npt_enabled)
3424 +@@ -2438,7 +2438,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
3425 +
3426 + svm->vmcb->control.nested_cr3 = __sme_set(root);
3427 + mark_dirty(svm->vmcb, VMCB_NPT);
3428 +- svm_flush_tlb(vcpu);
3429 ++ svm_flush_tlb(vcpu, true);
3430 + }
3431 +
3432 + static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
3433 +@@ -3111,7 +3111,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
3434 + svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3435 + svm->nested.intercept = nested_vmcb->control.intercept;
3436 +
3437 +- svm_flush_tlb(&svm->vcpu);
3438 ++ svm_flush_tlb(&svm->vcpu, true);
3439 + svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3440 + if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3441 + svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3442 +@@ -4589,7 +4589,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3443 + set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3444 + }
3445 +
3446 +-static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3447 ++static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
3448 + {
3449 + return;
3450 + }
3451 +@@ -4947,7 +4947,7 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3452 + return 0;
3453 + }
3454 +
3455 +-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3456 ++static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
3457 + {
3458 + struct vcpu_svm *svm = to_svm(vcpu);
3459 +
3460 +@@ -5288,7 +5288,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3461 +
3462 + svm->vmcb->save.cr3 = __sme_set(root);
3463 + mark_dirty(svm->vmcb, VMCB_CR);
3464 +- svm_flush_tlb(vcpu);
3465 ++ svm_flush_tlb(vcpu, true);
3466 + }
3467 +
3468 + static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3469 +@@ -5302,7 +5302,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3470 + svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
3471 + mark_dirty(svm->vmcb, VMCB_CR);
3472 +
3473 +- svm_flush_tlb(vcpu);
3474 ++ svm_flush_tlb(vcpu, true);
3475 + }
3476 +
3477 + static int is_disabled(void)
3478 +@@ -5713,7 +5713,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
3479 + .enable_nmi_window = enable_nmi_window,
3480 + .enable_irq_window = enable_irq_window,
3481 + .update_cr8_intercept = update_cr8_intercept,
3482 +- .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
3483 ++ .set_virtual_apic_mode = svm_set_virtual_apic_mode,
3484 + .get_enable_apicv = svm_get_enable_apicv,
3485 + .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
3486 + .load_eoi_exitmap = svm_load_eoi_exitmap,
3487 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
3488 +index 7784b02312ca..02c0326dc259 100644
3489 +--- a/arch/x86/kvm/vmx.c
3490 ++++ b/arch/x86/kvm/vmx.c
3491 +@@ -591,7 +591,8 @@ struct nested_vmx {
3492 + */
3493 + bool sync_shadow_vmcs;
3494 +
3495 +- bool change_vmcs01_virtual_x2apic_mode;
3496 ++ bool change_vmcs01_virtual_apic_mode;
3497 ++
3498 + /* L2 must run next, and mustn't decide to exit to L1. */
3499 + bool nested_run_pending;
3500 +
3501 +@@ -4427,9 +4428,10 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
3502 +
3503 + #endif
3504 +
3505 +-static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
3506 ++static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
3507 ++ bool invalidate_gpa)
3508 + {
3509 +- if (enable_ept) {
3510 ++ if (enable_ept && (invalidate_gpa || !enable_vpid)) {
3511 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3512 + return;
3513 + ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
3514 +@@ -4438,15 +4440,9 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
3515 + }
3516 + }
3517 +
3518 +-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3519 +-{
3520 +- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
3521 +-}
3522 +-
3523 +-static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
3524 ++static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
3525 + {
3526 +- if (enable_ept)
3527 +- vmx_flush_tlb(vcpu);
3528 ++ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
3529 + }
3530 +
3531 + static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3532 +@@ -4644,7 +4640,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3533 + ept_load_pdptrs(vcpu);
3534 + }
3535 +
3536 +- vmx_flush_tlb(vcpu);
3537 ++ vmx_flush_tlb(vcpu, true);
3538 + vmcs_writel(GUEST_CR3, guest_cr3);
3539 + }
3540 +
3541 +@@ -8314,7 +8310,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
3542 + return kvm_skip_emulated_instruction(vcpu);
3543 + }
3544 +
3545 +- __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
3546 ++ __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
3547 + nested_vmx_succeed(vcpu);
3548 +
3549 + return kvm_skip_emulated_instruction(vcpu);
3550 +@@ -9295,31 +9291,43 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3551 + vmcs_write32(TPR_THRESHOLD, irr);
3552 + }
3553 +
3554 +-static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3555 ++static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
3556 + {
3557 + u32 sec_exec_control;
3558 +
3559 ++ if (!lapic_in_kernel(vcpu))
3560 ++ return;
3561 ++
3562 + /* Postpone execution until vmcs01 is the current VMCS. */
3563 + if (is_guest_mode(vcpu)) {
3564 +- to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
3565 ++ to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
3566 + return;
3567 + }
3568 +
3569 +- if (!cpu_has_vmx_virtualize_x2apic_mode())
3570 +- return;
3571 +-
3572 + if (!cpu_need_tpr_shadow(vcpu))
3573 + return;
3574 +
3575 + sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
3576 ++ sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3577 ++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
3578 +
3579 +- if (set) {
3580 +- sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3581 +- sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
3582 +- } else {
3583 +- sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
3584 +- sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3585 +- vmx_flush_tlb_ept_only(vcpu);
3586 ++ switch (kvm_get_apic_mode(vcpu)) {
3587 ++ case LAPIC_MODE_INVALID:
3588 ++ WARN_ONCE(true, "Invalid local APIC state");
3589 ++ case LAPIC_MODE_DISABLED:
3590 ++ break;
3591 ++ case LAPIC_MODE_XAPIC:
3592 ++ if (flexpriority_enabled) {
3593 ++ sec_exec_control |=
3594 ++ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3595 ++ vmx_flush_tlb(vcpu, true);
3596 ++ }
3597 ++ break;
3598 ++ case LAPIC_MODE_X2APIC:
3599 ++ if (cpu_has_vmx_virtualize_x2apic_mode())
3600 ++ sec_exec_control |=
3601 ++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
3602 ++ break;
3603 + }
3604 + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
3605 +
3606 +@@ -9347,7 +9355,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
3607 + !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
3608 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3609 + vmcs_write64(APIC_ACCESS_ADDR, hpa);
3610 +- vmx_flush_tlb_ept_only(vcpu);
3611 ++ vmx_flush_tlb(vcpu, true);
3612 + }
3613 + }
3614 +
3615 +@@ -11214,11 +11222,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3616 + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
3617 + if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
3618 + vmx->nested.last_vpid = vmcs12->virtual_processor_id;
3619 +- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
3620 ++ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true);
3621 + }
3622 + } else {
3623 + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
3624 +- vmx_flush_tlb(vcpu);
3625 ++ vmx_flush_tlb(vcpu, true);
3626 + }
3627 +
3628 + }
3629 +@@ -11242,7 +11250,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3630 + }
3631 + } else if (nested_cpu_has2(vmcs12,
3632 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3633 +- vmx_flush_tlb_ept_only(vcpu);
3634 ++ vmx_flush_tlb(vcpu, true);
3635 + }
3636 +
3637 + /*
3638 +@@ -11921,7 +11929,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3639 + * L1's vpid. TODO: move to a more elaborate solution, giving
3640 + * each L2 its own vpid and exposing the vpid feature to L1.
3641 + */
3642 +- vmx_flush_tlb(vcpu);
3643 ++ vmx_flush_tlb(vcpu, true);
3644 + }
3645 + /* Restore posted intr vector. */
3646 + if (nested_cpu_has_posted_intr(vmcs12))
3647 +@@ -12190,14 +12198,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3648 + if (kvm_has_tsc_control)
3649 + decache_tsc_multiplier(vmx);
3650 +
3651 +- if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
3652 +- vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
3653 +- vmx_set_virtual_x2apic_mode(vcpu,
3654 +- vcpu->arch.apic_base & X2APIC_ENABLE);
3655 ++ if (vmx->nested.change_vmcs01_virtual_apic_mode) {
3656 ++ vmx->nested.change_vmcs01_virtual_apic_mode = false;
3657 ++ vmx_set_virtual_apic_mode(vcpu);
3658 + } else if (!nested_cpu_has_ept(vmcs12) &&
3659 + nested_cpu_has2(vmcs12,
3660 + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3661 +- vmx_flush_tlb_ept_only(vcpu);
3662 ++ vmx_flush_tlb(vcpu, true);
3663 + }
3664 +
3665 + /* This is needed for same reason as it was needed in prepare_vmcs02 */
3666 +@@ -12754,7 +12761,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
3667 + .enable_nmi_window = enable_nmi_window,
3668 + .enable_irq_window = enable_irq_window,
3669 + .update_cr8_intercept = update_cr8_intercept,
3670 +- .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
3671 ++ .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
3672 + .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
3673 + .get_enable_apicv = vmx_get_enable_apicv,
3674 + .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
3675 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
3676 +index 98b990f13ae0..4927d0f5be13 100644
3677 +--- a/arch/x86/kvm/x86.c
3678 ++++ b/arch/x86/kvm/x86.c
3679 +@@ -306,23 +306,27 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
3680 + }
3681 + EXPORT_SYMBOL_GPL(kvm_get_apic_base);
3682 +
3683 ++enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
3684 ++{
3685 ++ return kvm_apic_mode(kvm_get_apic_base(vcpu));
3686 ++}
3687 ++EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
3688 ++
3689 + int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3690 + {
3691 +- u64 old_state = vcpu->arch.apic_base &
3692 +- (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
3693 +- u64 new_state = msr_info->data &
3694 +- (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
3695 ++ enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
3696 ++ enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
3697 + u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
3698 + (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
3699 +
3700 +- if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE)
3701 +- return 1;
3702 +- if (!msr_info->host_initiated &&
3703 +- ((new_state == MSR_IA32_APICBASE_ENABLE &&
3704 +- old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
3705 +- (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
3706 +- old_state == 0)))
3707 ++ if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
3708 + return 1;
3709 ++ if (!msr_info->host_initiated) {
3710 ++ if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
3711 ++ return 1;
3712 ++ if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
3713 ++ return 1;
3714 ++ }
3715 +
3716 + kvm_lapic_set_base(vcpu, msr_info->data);
3717 + return 0;
3718 +@@ -6943,10 +6947,10 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
3719 + kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
3720 + }
3721 +
3722 +-static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
3723 ++static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
3724 + {
3725 + ++vcpu->stat.tlb_flush;
3726 +- kvm_x86_ops->tlb_flush(vcpu);
3727 ++ kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
3728 + }
3729 +
3730 + void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
3731 +@@ -7017,7 +7021,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3732 + if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
3733 + kvm_mmu_sync_roots(vcpu);
3734 + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
3735 +- kvm_vcpu_flush_tlb(vcpu);
3736 ++ kvm_vcpu_flush_tlb(vcpu, true);
3737 + if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
3738 + vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
3739 + r = 0;
3740 +diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
3741 +index a18703be9ead..4769a069d5bd 100644
3742 +--- a/arch/x86/xen/efi.c
3743 ++++ b/arch/x86/xen/efi.c
3744 +@@ -77,7 +77,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
3745 + efi.get_variable = xen_efi_get_variable;
3746 + efi.get_next_variable = xen_efi_get_next_variable;
3747 + efi.set_variable = xen_efi_set_variable;
3748 ++ efi.set_variable_nonblocking = xen_efi_set_variable;
3749 + efi.query_variable_info = xen_efi_query_variable_info;
3750 ++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
3751 + efi.update_capsule = xen_efi_update_capsule;
3752 + efi.query_capsule_caps = xen_efi_query_capsule_caps;
3753 + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
3754 +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
3755 +index 672391003e40..dc7b470a423a 100644
3756 +--- a/arch/xtensa/kernel/xtensa_ksyms.c
3757 ++++ b/arch/xtensa/kernel/xtensa_ksyms.c
3758 +@@ -114,13 +114,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
3759 + // FIXME EXPORT_SYMBOL(screen_info);
3760 + #endif
3761 +
3762 +-EXPORT_SYMBOL(outsb);
3763 +-EXPORT_SYMBOL(outsw);
3764 +-EXPORT_SYMBOL(outsl);
3765 +-EXPORT_SYMBOL(insb);
3766 +-EXPORT_SYMBOL(insw);
3767 +-EXPORT_SYMBOL(insl);
3768 +-
3769 + extern long common_exception_return;
3770 + EXPORT_SYMBOL(common_exception_return);
3771 +
3772 +diff --git a/drivers/base/core.c b/drivers/base/core.c
3773 +index 2ec9af90cd28..2b0a1054535c 100644
3774 +--- a/drivers/base/core.c
3775 ++++ b/drivers/base/core.c
3776 +@@ -10,6 +10,7 @@
3777 + *
3778 + */
3779 +
3780 ++#include <linux/cpufreq.h>
3781 + #include <linux/device.h>
3782 + #include <linux/err.h>
3783 + #include <linux/fwnode.h>
3784 +@@ -2845,6 +2846,8 @@ void device_shutdown(void)
3785 + wait_for_device_probe();
3786 + device_block_probing();
3787 +
3788 ++ cpufreq_suspend();
3789 ++
3790 + spin_lock(&devices_kset->list_lock);
3791 + /*
3792 + * Walk the devices list backward, shutting down each in turn.
3793 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
3794 +index 1d60b58a8c19..c617e00f4361 100644
3795 +--- a/drivers/base/memory.c
3796 ++++ b/drivers/base/memory.c
3797 +@@ -552,6 +552,9 @@ store_soft_offline_page(struct device *dev,
3798 + pfn >>= PAGE_SHIFT;
3799 + if (!pfn_valid(pfn))
3800 + return -ENXIO;
3801 ++ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
3802 ++ if (!pfn_to_online_page(pfn))
3803 ++ return -EIO;
3804 + ret = soft_offline_page(pfn_to_page(pfn), 0);
3805 + return ret == 0 ? count : ret;
3806 + }
3807 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
3808 +index 87d7c42affbc..ec61dd873c93 100644
3809 +--- a/drivers/block/loop.c
3810 ++++ b/drivers/block/loop.c
3811 +@@ -1605,6 +1605,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
3812 + arg = (unsigned long) compat_ptr(arg);
3813 + case LOOP_SET_FD:
3814 + case LOOP_CHANGE_FD:
3815 ++ case LOOP_SET_DIRECT_IO:
3816 + err = lo_ioctl(bdev, mode, cmd, arg);
3817 + break;
3818 + default:
3819 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
3820 +index fceb18d26db8..4aa3c5331666 100644
3821 +--- a/drivers/cpufreq/cpufreq.c
3822 ++++ b/drivers/cpufreq/cpufreq.c
3823 +@@ -2570,14 +2570,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
3824 + }
3825 + EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3826 +
3827 +-/*
3828 +- * Stop cpufreq at shutdown to make sure it isn't holding any locks
3829 +- * or mutexes when secondary CPUs are halted.
3830 +- */
3831 +-static struct syscore_ops cpufreq_syscore_ops = {
3832 +- .shutdown = cpufreq_suspend,
3833 +-};
3834 +-
3835 + struct kobject *cpufreq_global_kobject;
3836 + EXPORT_SYMBOL(cpufreq_global_kobject);
3837 +
3838 +@@ -2589,8 +2581,6 @@ static int __init cpufreq_core_init(void)
3839 + cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
3840 + BUG_ON(!cpufreq_global_kobject);
3841 +
3842 +- register_syscore_ops(&cpufreq_syscore_ops);
3843 +-
3844 + return 0;
3845 + }
3846 + module_param(off, int, 0444);
3847 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
3848 +index 4dd68d821353..4894d8a87c04 100644
3849 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
3850 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
3851 +@@ -572,6 +572,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
3852 + if (ret == -EPROBE_DEFER)
3853 + return ret;
3854 +
3855 ++#ifdef CONFIG_DRM_AMDGPU_SI
3856 ++ if (!amdgpu_si_support) {
3857 ++ switch (flags & AMD_ASIC_MASK) {
3858 ++ case CHIP_TAHITI:
3859 ++ case CHIP_PITCAIRN:
3860 ++ case CHIP_VERDE:
3861 ++ case CHIP_OLAND:
3862 ++ case CHIP_HAINAN:
3863 ++ dev_info(&pdev->dev,
3864 ++ "SI support provided by radeon.\n");
3865 ++ dev_info(&pdev->dev,
3866 ++ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
3867 ++ );
3868 ++ return -ENODEV;
3869 ++ }
3870 ++ }
3871 ++#endif
3872 ++#ifdef CONFIG_DRM_AMDGPU_CIK
3873 ++ if (!amdgpu_cik_support) {
3874 ++ switch (flags & AMD_ASIC_MASK) {
3875 ++ case CHIP_KAVERI:
3876 ++ case CHIP_BONAIRE:
3877 ++ case CHIP_HAWAII:
3878 ++ case CHIP_KABINI:
3879 ++ case CHIP_MULLINS:
3880 ++ dev_info(&pdev->dev,
3881 ++ "CIK support provided by radeon.\n");
3882 ++ dev_info(&pdev->dev,
3883 ++ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
3884 ++ );
3885 ++ return -ENODEV;
3886 ++ }
3887 ++ }
3888 ++#endif
3889 ++
3890 + /* Get rid of things like offb */
3891 + ret = amdgpu_kick_out_firmware_fb(pdev);
3892 + if (ret)
3893 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
3894 +index 884ed359f249..c93e72d8ac5f 100644
3895 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
3896 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
3897 +@@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
3898 + struct amdgpu_device *adev;
3899 + int r, acpi_status;
3900 +
3901 +-#ifdef CONFIG_DRM_AMDGPU_SI
3902 +- if (!amdgpu_si_support) {
3903 +- switch (flags & AMD_ASIC_MASK) {
3904 +- case CHIP_TAHITI:
3905 +- case CHIP_PITCAIRN:
3906 +- case CHIP_VERDE:
3907 +- case CHIP_OLAND:
3908 +- case CHIP_HAINAN:
3909 +- dev_info(dev->dev,
3910 +- "SI support provided by radeon.\n");
3911 +- dev_info(dev->dev,
3912 +- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
3913 +- );
3914 +- return -ENODEV;
3915 +- }
3916 +- }
3917 +-#endif
3918 +-#ifdef CONFIG_DRM_AMDGPU_CIK
3919 +- if (!amdgpu_cik_support) {
3920 +- switch (flags & AMD_ASIC_MASK) {
3921 +- case CHIP_KAVERI:
3922 +- case CHIP_BONAIRE:
3923 +- case CHIP_HAWAII:
3924 +- case CHIP_KABINI:
3925 +- case CHIP_MULLINS:
3926 +- dev_info(dev->dev,
3927 +- "CIK support provided by radeon.\n");
3928 +- dev_info(dev->dev,
3929 +- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
3930 +- );
3931 +- return -ENODEV;
3932 +- }
3933 +- }
3934 +-#endif
3935 +-
3936 + adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
3937 + if (adev == NULL) {
3938 + return -ENOMEM;
3939 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
3940 +index ed01e3aae0e8..dfdc7d3147fb 100644
3941 +--- a/drivers/gpu/drm/drm_edid.c
3942 ++++ b/drivers/gpu/drm/drm_edid.c
3943 +@@ -164,6 +164,9 @@ static const struct edid_quirk {
3944 + /* Medion MD 30217 PG */
3945 + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
3946 +
3947 ++ /* Lenovo G50 */
3948 ++ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
3949 ++
3950 + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
3951 + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
3952 +
3953 +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
3954 +index 54d97dd5780a..f4becad0a78c 100644
3955 +--- a/drivers/gpu/drm/radeon/radeon_drv.c
3956 ++++ b/drivers/gpu/drm/radeon/radeon_drv.c
3957 +@@ -368,19 +368,11 @@ radeon_pci_remove(struct pci_dev *pdev)
3958 + static void
3959 + radeon_pci_shutdown(struct pci_dev *pdev)
3960 + {
3961 +- struct drm_device *ddev = pci_get_drvdata(pdev);
3962 +-
3963 + /* if we are running in a VM, make sure the device
3964 + * torn down properly on reboot/shutdown
3965 + */
3966 + if (radeon_device_is_virtual())
3967 + radeon_pci_remove(pdev);
3968 +-
3969 +- /* Some adapters need to be suspended before a
3970 +- * shutdown occurs in order to prevent an error
3971 +- * during kexec.
3972 +- */
3973 +- radeon_suspend_kms(ddev, true, true, false);
3974 + }
3975 +
3976 + static int radeon_pmops_suspend(struct device *dev)
3977 +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
3978 +index b5784cb145f5..805429bbc916 100644
3979 +--- a/drivers/infiniband/hw/cxgb4/mem.c
3980 ++++ b/drivers/infiniband/hw/cxgb4/mem.c
3981 +@@ -260,13 +260,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
3982 + struct sk_buff *skb)
3983 + {
3984 + int err;
3985 +- struct fw_ri_tpte tpt;
3986 ++ struct fw_ri_tpte *tpt;
3987 + u32 stag_idx;
3988 + static atomic_t key;
3989 +
3990 + if (c4iw_fatal_error(rdev))
3991 + return -EIO;
3992 +
3993 ++ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
3994 ++ if (!tpt)
3995 ++ return -ENOMEM;
3996 ++
3997 + stag_state = stag_state > 0;
3998 + stag_idx = (*stag) >> 8;
3999 +
4000 +@@ -276,6 +280,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
4001 + mutex_lock(&rdev->stats.lock);
4002 + rdev->stats.stag.fail++;
4003 + mutex_unlock(&rdev->stats.lock);
4004 ++ kfree(tpt);
4005 + return -ENOMEM;
4006 + }
4007 + mutex_lock(&rdev->stats.lock);
4008 +@@ -290,28 +295,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
4009 +
4010 + /* write TPT entry */
4011 + if (reset_tpt_entry)
4012 +- memset(&tpt, 0, sizeof(tpt));
4013 ++ memset(tpt, 0, sizeof(*tpt));
4014 + else {
4015 +- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
4016 ++ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
4017 + FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
4018 + FW_RI_TPTE_STAGSTATE_V(stag_state) |
4019 + FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
4020 +- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
4021 ++ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
4022 + (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
4023 + FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
4024 + FW_RI_VA_BASED_TO))|
4025 + FW_RI_TPTE_PS_V(page_size));
4026 +- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
4027 ++ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
4028 + FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
4029 +- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
4030 +- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
4031 +- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
4032 +- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
4033 +- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
4034 ++ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
4035 ++ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
4036 ++ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
4037 ++ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
4038 ++ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
4039 + }
4040 + err = write_adapter_mem(rdev, stag_idx +
4041 + (rdev->lldi.vr->stag.start >> 5),
4042 +- sizeof(tpt), &tpt, skb);
4043 ++ sizeof(*tpt), tpt, skb);
4044 +
4045 + if (reset_tpt_entry) {
4046 + c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
4047 +@@ -319,6 +324,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
4048 + rdev->stats.stag.cur -= 32;
4049 + mutex_unlock(&rdev->stats.lock);
4050 + }
4051 ++ kfree(tpt);
4052 + return err;
4053 + }
4054 +
4055 +diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
4056 +index 3e9c353d82ef..a01b25facf46 100644
4057 +--- a/drivers/input/misc/da9063_onkey.c
4058 ++++ b/drivers/input/misc/da9063_onkey.c
4059 +@@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
4060 + onkey->input->phys = onkey->phys;
4061 + onkey->input->dev.parent = &pdev->dev;
4062 +
4063 +- if (onkey->key_power)
4064 +- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
4065 +-
4066 +- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
4067 ++ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
4068 +
4069 + INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
4070 +
4071 +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
4072 +index 997ccae7ee05..bae46816a3b3 100644
4073 +--- a/drivers/input/rmi4/rmi_driver.c
4074 ++++ b/drivers/input/rmi4/rmi_driver.c
4075 +@@ -165,7 +165,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
4076 + }
4077 +
4078 + mutex_lock(&data->irq_mutex);
4079 +- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
4080 ++ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
4081 + data->irq_count);
4082 + /*
4083 + * At this point, irq_status has all bits that are set in the
4084 +@@ -412,6 +412,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
4085 + bitmap_copy(data->current_irq_mask, data->new_irq_mask,
4086 + data->num_of_irq_regs);
4087 +
4088 ++ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
4089 ++
4090 + error_unlock:
4091 + mutex_unlock(&data->irq_mutex);
4092 + return error;
4093 +@@ -425,6 +427,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
4094 + struct device *dev = &rmi_dev->dev;
4095 +
4096 + mutex_lock(&data->irq_mutex);
4097 ++ bitmap_andnot(data->fn_irq_bits,
4098 ++ data->fn_irq_bits, mask, data->irq_count);
4099 + bitmap_andnot(data->new_irq_mask,
4100 + data->current_irq_mask, mask, data->irq_count);
4101 +
4102 +diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
4103 +index 28fb71721770..449c4dd060fc 100644
4104 +--- a/drivers/md/raid0.c
4105 ++++ b/drivers/md/raid0.c
4106 +@@ -158,7 +158,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
4107 + } else {
4108 + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
4109 + mdname(mddev));
4110 +- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
4111 ++ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
4112 + err = -ENOTSUPP;
4113 + goto abort;
4114 + }
4115 +diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
4116 +index 48db922075e2..08fa6400d255 100644
4117 +--- a/drivers/memstick/host/jmb38x_ms.c
4118 ++++ b/drivers/memstick/host/jmb38x_ms.c
4119 +@@ -947,7 +947,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
4120 + if (!cnt) {
4121 + rc = -ENODEV;
4122 + pci_dev_busy = 1;
4123 +- goto err_out;
4124 ++ goto err_out_int;
4125 + }
4126 +
4127 + jm = kzalloc(sizeof(struct jmb38x_ms)
4128 +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
4129 +index c3c9d7e33bd6..8e49974ffa0e 100644
4130 +--- a/drivers/net/dsa/qca8k.c
4131 ++++ b/drivers/net/dsa/qca8k.c
4132 +@@ -551,7 +551,7 @@ qca8k_setup(struct dsa_switch *ds)
4133 + BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
4134 +
4135 + /* Setup connection between CPU port & user ports */
4136 +- for (i = 0; i < DSA_MAX_PORTS; i++) {
4137 ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
4138 + /* CPU port gets connected to all user ports of the switch */
4139 + if (dsa_is_cpu_port(ds, i)) {
4140 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
4141 +@@ -900,7 +900,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
4142 + if (id != QCA8K_ID_QCA8337)
4143 + return -ENODEV;
4144 +
4145 +- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
4146 ++ priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
4147 + if (!priv->ds)
4148 + return -ENOMEM;
4149 +
4150 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
4151 +index 9d499c5c8f8a..f176a0307f39 100644
4152 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
4153 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
4154 +@@ -368,6 +368,7 @@ struct bcmgenet_mib_counters {
4155 + #define EXT_PWR_DOWN_PHY_EN (1 << 20)
4156 +
4157 + #define EXT_RGMII_OOB_CTRL 0x0C
4158 ++#define RGMII_MODE_EN_V123 (1 << 0)
4159 + #define RGMII_LINK (1 << 4)
4160 + #define OOB_DISABLE (1 << 5)
4161 + #define RGMII_MODE_EN (1 << 6)
4162 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
4163 +index c421e2753c8c..fca9da1b1363 100644
4164 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
4165 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
4166 +@@ -277,7 +277,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
4167 + */
4168 + if (priv->ext_phy) {
4169 + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
4170 +- reg |= RGMII_MODE_EN | id_mode_dis;
4171 ++ reg |= id_mode_dis;
4172 ++ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
4173 ++ reg |= RGMII_MODE_EN_V123;
4174 ++ else
4175 ++ reg |= RGMII_MODE_EN;
4176 + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
4177 + }
4178 +
4179 +@@ -292,11 +296,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
4180 + struct bcmgenet_priv *priv = netdev_priv(dev);
4181 + struct device_node *dn = priv->pdev->dev.of_node;
4182 + struct phy_device *phydev;
4183 +- u32 phy_flags;
4184 ++ u32 phy_flags = 0;
4185 + int ret;
4186 +
4187 + /* Communicate the integrated PHY revision */
4188 +- phy_flags = priv->gphy_rev;
4189 ++ if (priv->internal_phy)
4190 ++ phy_flags = priv->gphy_rev;
4191 +
4192 + /* Initialize link state variables that bcmgenet_mii_setup() uses */
4193 + priv->old_link = -1;
4194 +diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
4195 +index baf5cc251f32..9a3bc0994a1d 100644
4196 +--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
4197 ++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
4198 +@@ -156,11 +156,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
4199 + {
4200 + u32 time_cnt;
4201 + u32 reg_value;
4202 ++ int ret;
4203 +
4204 + regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
4205 +
4206 + for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
4207 +- regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
4208 ++ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
4209 ++ if (ret)
4210 ++ return ret;
4211 ++
4212 + reg_value &= st_msk;
4213 + if ((!!check_st) == (!!reg_value))
4214 + break;
4215 +diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
4216 +index b69c622ba8b2..6f0e4019adef 100644
4217 +--- a/drivers/net/ethernet/i825xx/lasi_82596.c
4218 ++++ b/drivers/net/ethernet/i825xx/lasi_82596.c
4219 +@@ -96,6 +96,8 @@
4220 +
4221 + #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
4222 +
4223 ++#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
4224 ++
4225 + #define DMA_WBACK(ndev, addr, len) \
4226 + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
4227 +
4228 +@@ -199,7 +201,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
4229 +
4230 + unregister_netdev (dev);
4231 + dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
4232 +- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
4233 ++ lp->dma_addr, LIB82596_DMA_ATTR);
4234 + free_netdev (dev);
4235 + return 0;
4236 + }
4237 +diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
4238 +index f00a1dc2128c..da3758fdf025 100644
4239 +--- a/drivers/net/ethernet/i825xx/lib82596.c
4240 ++++ b/drivers/net/ethernet/i825xx/lib82596.c
4241 +@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
4242 +
4243 + dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
4244 + &lp->dma_addr, GFP_KERNEL,
4245 +- DMA_ATTR_NON_CONSISTENT);
4246 ++ LIB82596_DMA_ATTR);
4247 + if (!dma) {
4248 + printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
4249 + return -ENOMEM;
4250 +@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
4251 + i = register_netdev(dev);
4252 + if (i) {
4253 + dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
4254 +- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
4255 ++ dma, lp->dma_addr, LIB82596_DMA_ATTR);
4256 + return i;
4257 + }
4258 +
4259 +diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
4260 +index b2c04a789744..43c1fd18670b 100644
4261 +--- a/drivers/net/ethernet/i825xx/sni_82596.c
4262 ++++ b/drivers/net/ethernet/i825xx/sni_82596.c
4263 +@@ -23,6 +23,8 @@
4264 +
4265 + static const char sni_82596_string[] = "snirm_82596";
4266 +
4267 ++#define LIB82596_DMA_ATTR 0
4268 ++
4269 + #define DMA_WBACK(priv, addr, len) do { } while (0)
4270 + #define DMA_INV(priv, addr, len) do { } while (0)
4271 + #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
4272 +@@ -151,7 +153,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
4273 +
4274 + unregister_netdev(dev);
4275 + dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
4276 +- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
4277 ++ lp->dma_addr, LIB82596_DMA_ATTR);
4278 + iounmap(lp->ca);
4279 + iounmap(lp->mpu_port);
4280 + free_netdev (dev);
4281 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4282 +index f4df9ab0aed5..612773b94ae3 100644
4283 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4284 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4285 +@@ -4402,8 +4402,10 @@ int stmmac_suspend(struct device *dev)
4286 + priv->hw->mac->set_mac(priv->ioaddr, false);
4287 + pinctrl_pm_select_sleep_state(priv->device);
4288 + /* Disable clock in case of PWM is off */
4289 +- clk_disable(priv->plat->pclk);
4290 +- clk_disable(priv->plat->stmmac_clk);
4291 ++ if (priv->plat->clk_ptp_ref)
4292 ++ clk_disable_unprepare(priv->plat->clk_ptp_ref);
4293 ++ clk_disable_unprepare(priv->plat->pclk);
4294 ++ clk_disable_unprepare(priv->plat->stmmac_clk);
4295 + }
4296 + spin_unlock_irqrestore(&priv->lock, flags);
4297 +
4298 +@@ -4468,8 +4470,10 @@ int stmmac_resume(struct device *dev)
4299 + } else {
4300 + pinctrl_pm_select_default_state(priv->device);
4301 + /* enable the clk previously disabled */
4302 +- clk_enable(priv->plat->stmmac_clk);
4303 +- clk_enable(priv->plat->pclk);
4304 ++ clk_prepare_enable(priv->plat->stmmac_clk);
4305 ++ clk_prepare_enable(priv->plat->pclk);
4306 ++ if (priv->plat->clk_ptp_ref)
4307 ++ clk_prepare_enable(priv->plat->clk_ptp_ref);
4308 + /* reset the phy so that it's ready */
4309 + if (priv->mii)
4310 + stmmac_mdio_reset(priv->mii);
4311 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
4312 +index dcd10dba08c7..3a58962babd4 100644
4313 +--- a/drivers/net/ieee802154/ca8210.c
4314 ++++ b/drivers/net/ieee802154/ca8210.c
4315 +@@ -3153,12 +3153,12 @@ static int ca8210_probe(struct spi_device *spi_device)
4316 + goto error;
4317 + }
4318 +
4319 ++ priv->spi->dev.platform_data = pdata;
4320 + ret = ca8210_get_platform_data(priv->spi, pdata);
4321 + if (ret) {
4322 + dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
4323 + goto error;
4324 + }
4325 +- priv->spi->dev.platform_data = pdata;
4326 +
4327 + ret = ca8210_dev_com_init(priv);
4328 + if (ret) {
4329 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
4330 +index 455eec3c4694..c0964281ab98 100644
4331 +--- a/drivers/net/usb/r8152.c
4332 ++++ b/drivers/net/usb/r8152.c
4333 +@@ -4465,10 +4465,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
4334 + struct r8152 *tp = usb_get_intfdata(intf);
4335 +
4336 + clear_bit(SELECTIVE_SUSPEND, &tp->flags);
4337 +- mutex_lock(&tp->control);
4338 + tp->rtl_ops.init(tp);
4339 + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
4340 +- mutex_unlock(&tp->control);
4341 ++ set_ethernet_addr(tp);
4342 + return rtl8152_resume(intf);
4343 + }
4344 +
4345 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
4346 +index d465071656b5..2641e76d03d9 100644
4347 +--- a/drivers/net/xen-netback/interface.c
4348 ++++ b/drivers/net/xen-netback/interface.c
4349 +@@ -718,7 +718,6 @@ err_unmap:
4350 + xenvif_unmap_frontend_data_rings(queue);
4351 + netif_napi_del(&queue->napi);
4352 + err:
4353 +- module_put(THIS_MODULE);
4354 + return err;
4355 + }
4356 +
4357 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4358 +index 044b208f7f6a..c847b5554db6 100644
4359 +--- a/drivers/pci/pci.c
4360 ++++ b/drivers/pci/pci.c
4361 +@@ -748,19 +748,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
4362 + }
4363 + }
4364 +
4365 +-/**
4366 +- * pci_power_up - Put the given device into D0 forcibly
4367 +- * @dev: PCI device to power up
4368 +- */
4369 +-void pci_power_up(struct pci_dev *dev)
4370 +-{
4371 +- if (platform_pci_power_manageable(dev))
4372 +- platform_pci_set_power_state(dev, PCI_D0);
4373 +-
4374 +- pci_raw_set_power_state(dev, PCI_D0);
4375 +- pci_update_current_state(dev, PCI_D0);
4376 +-}
4377 +-
4378 + /**
4379 + * pci_platform_power_transition - Use platform to change device power state
4380 + * @dev: PCI device to handle.
4381 +@@ -939,6 +926,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
4382 + }
4383 + EXPORT_SYMBOL(pci_set_power_state);
4384 +
4385 ++/**
4386 ++ * pci_power_up - Put the given device into D0 forcibly
4387 ++ * @dev: PCI device to power up
4388 ++ */
4389 ++void pci_power_up(struct pci_dev *dev)
4390 ++{
4391 ++ __pci_start_power_transition(dev, PCI_D0);
4392 ++ pci_raw_set_power_state(dev, PCI_D0);
4393 ++ pci_update_current_state(dev, PCI_D0);
4394 ++}
4395 ++
4396 + /**
4397 + * pci_choose_state - Choose the power state of a PCI device
4398 + * @dev: PCI device to be suspended
4399 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4400 +index c5fe7d4a9065..262f591ad8a6 100644
4401 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4402 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4403 +@@ -170,10 +170,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
4404 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
4405 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
4406 + 18, 2, "gpio", "uart"),
4407 +- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
4408 +- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
4409 +- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
4410 +- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
4411 ++ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
4412 ++ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
4413 ++ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
4414 ++ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
4415 +
4416 + };
4417 +
4418 +@@ -205,11 +205,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
4419 + };
4420 +
4421 + static inline void armada_37xx_update_reg(unsigned int *reg,
4422 +- unsigned int offset)
4423 ++ unsigned int *offset)
4424 + {
4425 + /* We never have more than 2 registers */
4426 +- if (offset >= GPIO_PER_REG) {
4427 +- offset -= GPIO_PER_REG;
4428 ++ if (*offset >= GPIO_PER_REG) {
4429 ++ *offset -= GPIO_PER_REG;
4430 + *reg += sizeof(u32);
4431 + }
4432 + }
4433 +@@ -373,7 +373,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
4434 + {
4435 + int offset = irqd_to_hwirq(d);
4436 +
4437 +- armada_37xx_update_reg(reg, offset);
4438 ++ armada_37xx_update_reg(reg, &offset);
4439 + }
4440 +
4441 + static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
4442 +@@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
4443 + unsigned int reg = OUTPUT_EN;
4444 + unsigned int mask;
4445 +
4446 +- armada_37xx_update_reg(&reg, offset);
4447 ++ armada_37xx_update_reg(&reg, &offset);
4448 + mask = BIT(offset);
4449 +
4450 + return regmap_update_bits(info->regmap, reg, mask, 0);
4451 +@@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
4452 + unsigned int reg = OUTPUT_EN;
4453 + unsigned int val, mask;
4454 +
4455 +- armada_37xx_update_reg(&reg, offset);
4456 ++ armada_37xx_update_reg(&reg, &offset);
4457 + mask = BIT(offset);
4458 + regmap_read(info->regmap, reg, &val);
4459 +
4460 +@@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
4461 + unsigned int reg = OUTPUT_EN;
4462 + unsigned int mask, val, ret;
4463 +
4464 +- armada_37xx_update_reg(&reg, offset);
4465 ++ armada_37xx_update_reg(&reg, &offset);
4466 + mask = BIT(offset);
4467 +
4468 + ret = regmap_update_bits(info->regmap, reg, mask, mask);
4469 +@@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
4470 + unsigned int reg = INPUT_VAL;
4471 + unsigned int val, mask;
4472 +
4473 +- armada_37xx_update_reg(&reg, offset);
4474 ++ armada_37xx_update_reg(&reg, &offset);
4475 + mask = BIT(offset);
4476 +
4477 + regmap_read(info->regmap, reg, &val);
4478 +@@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
4479 + unsigned int reg = OUTPUT_VAL;
4480 + unsigned int mask, val;
4481 +
4482 +- armada_37xx_update_reg(&reg, offset);
4483 ++ armada_37xx_update_reg(&reg, &offset);
4484 + mask = BIT(offset);
4485 + val = value ? mask : 0;
4486 +
4487 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
4488 +index 00fb98f7b2cd..94d1bcc83fa2 100644
4489 +--- a/drivers/s390/scsi/zfcp_fsf.c
4490 ++++ b/drivers/s390/scsi/zfcp_fsf.c
4491 +@@ -21,6 +21,11 @@
4492 +
4493 + struct kmem_cache *zfcp_fsf_qtcb_cache;
4494 +
4495 ++static bool ber_stop = true;
4496 ++module_param(ber_stop, bool, 0600);
4497 ++MODULE_PARM_DESC(ber_stop,
4498 ++ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
4499 ++
4500 + static void zfcp_fsf_request_timeout_handler(unsigned long data)
4501 + {
4502 + struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
4503 +@@ -230,10 +235,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
4504 + case FSF_STATUS_READ_SENSE_DATA_AVAIL:
4505 + break;
4506 + case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
4507 +- dev_warn(&adapter->ccw_device->dev,
4508 +- "The error threshold for checksum statistics "
4509 +- "has been exceeded\n");
4510 + zfcp_dbf_hba_bit_err("fssrh_3", req);
4511 ++ if (ber_stop) {
4512 ++ dev_warn(&adapter->ccw_device->dev,
4513 ++ "All paths over this FCP device are disused because of excessive bit errors\n");
4514 ++ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
4515 ++ } else {
4516 ++ dev_warn(&adapter->ccw_device->dev,
4517 ++ "The error threshold for checksum statistics has been exceeded\n");
4518 ++ }
4519 + break;
4520 + case FSF_STATUS_READ_LINK_DOWN:
4521 + zfcp_fsf_status_read_link_down(req);
4522 +diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
4523 +index c535c52e72e5..3f7c25d104fe 100644
4524 +--- a/drivers/scsi/ch.c
4525 ++++ b/drivers/scsi/ch.c
4526 +@@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct file *file)
4527 + scsi_changer *ch = file->private_data;
4528 +
4529 + scsi_device_put(ch->device);
4530 +- ch->device = NULL;
4531 + file->private_data = NULL;
4532 + kref_put(&ch->ref, ch_destroy);
4533 + return 0;
4534 +diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
4535 +index 9b6f5d024dba..f5c09bbf9374 100644
4536 +--- a/drivers/scsi/megaraid.c
4537 ++++ b/drivers/scsi/megaraid.c
4538 +@@ -4221,11 +4221,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4539 + */
4540 + if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4541 + pdev->subsystem_device == 0xC000)
4542 +- return -ENODEV;
4543 ++ goto out_disable_device;
4544 + /* Now check the magic signature byte */
4545 + pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4546 + if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4547 +- return -ENODEV;
4548 ++ goto out_disable_device;
4549 + /* Ok it is probably a megaraid */
4550 + }
4551 +
4552 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
4553 +index 87e04c4a4982..11753ed3433c 100644
4554 +--- a/drivers/scsi/qla2xxx/qla_target.c
4555 ++++ b/drivers/scsi/qla2xxx/qla_target.c
4556 +@@ -996,6 +996,7 @@ static void qlt_free_session_done(struct work_struct *work)
4557 +
4558 + if (logout_started) {
4559 + bool traced = false;
4560 ++ u16 cnt = 0;
4561 +
4562 + while (!ACCESS_ONCE(sess->logout_completed)) {
4563 + if (!traced) {
4564 +@@ -1005,6 +1006,9 @@ static void qlt_free_session_done(struct work_struct *work)
4565 + traced = true;
4566 + }
4567 + msleep(100);
4568 ++ cnt++;
4569 ++ if (cnt > 200)
4570 ++ break;
4571 + }
4572 +
4573 + ql_dbg(ql_dbg_disc, vha, 0xf087,
4574 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4575 +index cf70f0bb8375..bdec5f429440 100644
4576 +--- a/drivers/scsi/scsi_error.c
4577 ++++ b/drivers/scsi/scsi_error.c
4578 +@@ -935,6 +935,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
4579 + ses->sdb = scmd->sdb;
4580 + ses->next_rq = scmd->request->next_rq;
4581 + ses->result = scmd->result;
4582 ++ ses->resid_len = scmd->req.resid_len;
4583 + ses->underflow = scmd->underflow;
4584 + ses->prot_op = scmd->prot_op;
4585 + ses->eh_eflags = scmd->eh_eflags;
4586 +@@ -946,6 +947,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
4587 + memset(&scmd->sdb, 0, sizeof(scmd->sdb));
4588 + scmd->request->next_rq = NULL;
4589 + scmd->result = 0;
4590 ++ scmd->req.resid_len = 0;
4591 +
4592 + if (sense_bytes) {
4593 + scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
4594 +@@ -999,6 +1001,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
4595 + scmd->sdb = ses->sdb;
4596 + scmd->request->next_rq = ses->next_rq;
4597 + scmd->result = ses->result;
4598 ++ scmd->req.resid_len = ses->resid_len;
4599 + scmd->underflow = ses->underflow;
4600 + scmd->prot_op = ses->prot_op;
4601 + scmd->eh_eflags = ses->eh_eflags;
4602 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
4603 +index 8ce12ffcbb7a..ffb44d77a01b 100644
4604 +--- a/drivers/scsi/scsi_sysfs.c
4605 ++++ b/drivers/scsi/scsi_sysfs.c
4606 +@@ -722,6 +722,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
4607 + const char *buf, size_t count)
4608 + {
4609 + struct kernfs_node *kn;
4610 ++ struct scsi_device *sdev = to_scsi_device(dev);
4611 ++
4612 ++ /*
4613 ++ * We need to try to get module, avoiding the module been removed
4614 ++ * during delete.
4615 ++ */
4616 ++ if (scsi_device_get(sdev))
4617 ++ return -ENODEV;
4618 +
4619 + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
4620 + WARN_ON_ONCE(!kn);
4621 +@@ -736,9 +744,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
4622 + * state into SDEV_DEL.
4623 + */
4624 + device_remove_file(dev, attr);
4625 +- scsi_remove_device(to_scsi_device(dev));
4626 ++ scsi_remove_device(sdev);
4627 + if (kn)
4628 + sysfs_unbreak_active_protection(kn);
4629 ++ scsi_device_put(sdev);
4630 + return count;
4631 + };
4632 + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
4633 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
4634 +index 3b70f7bb7fe6..35cea5827a7a 100644
4635 +--- a/drivers/scsi/sd.c
4636 ++++ b/drivers/scsi/sd.c
4637 +@@ -1658,7 +1658,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
4638 + /* we need to evaluate the error return */
4639 + if (scsi_sense_valid(sshdr) &&
4640 + (sshdr->asc == 0x3a || /* medium not present */
4641 +- sshdr->asc == 0x20)) /* invalid command */
4642 ++ sshdr->asc == 0x20 || /* invalid command */
4643 ++ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
4644 + /* this is no error here */
4645 + return 0;
4646 +
4647 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
4648 +index 60c9184bad3b..07cae5ea608c 100644
4649 +--- a/drivers/scsi/ufs/ufshcd.c
4650 ++++ b/drivers/scsi/ufs/ufshcd.c
4651 +@@ -7755,6 +7755,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
4652 + {
4653 + int ret = 0;
4654 +
4655 ++ if (!hba->is_powered)
4656 ++ goto out;
4657 ++
4658 + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
4659 + goto out;
4660 +
4661 +diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
4662 +index 42912257e2b9..07b807ceae6d 100644
4663 +--- a/drivers/staging/wlan-ng/cfg80211.c
4664 ++++ b/drivers/staging/wlan-ng/cfg80211.c
4665 +@@ -490,10 +490,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
4666 + /* Set the encryption - we only support wep */
4667 + if (is_wep) {
4668 + if (sme->key) {
4669 +- if (sme->key_idx >= NUM_WEPKEYS) {
4670 +- err = -EINVAL;
4671 +- goto exit;
4672 +- }
4673 ++ if (sme->key_idx >= NUM_WEPKEYS)
4674 ++ return -EINVAL;
4675 +
4676 + result = prism2_domibset_uint32(wlandev,
4677 + DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
4678 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
4679 +index 50836f79f908..5e456a83779d 100644
4680 +--- a/drivers/usb/class/usblp.c
4681 ++++ b/drivers/usb/class/usblp.c
4682 +@@ -458,6 +458,7 @@ static void usblp_cleanup(struct usblp *usblp)
4683 + kfree(usblp->readbuf);
4684 + kfree(usblp->device_id_string);
4685 + kfree(usblp->statusbuf);
4686 ++ usb_put_intf(usblp->intf);
4687 + kfree(usblp);
4688 + }
4689 +
4690 +@@ -1120,7 +1121,7 @@ static int usblp_probe(struct usb_interface *intf,
4691 + init_waitqueue_head(&usblp->wwait);
4692 + init_usb_anchor(&usblp->urbs);
4693 + usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
4694 +- usblp->intf = intf;
4695 ++ usblp->intf = usb_get_intf(intf);
4696 +
4697 + /* Malloc device ID string buffer to the largest expected length,
4698 + * since we can re-query it on an ioctl and a dynamic string
4699 +@@ -1209,6 +1210,7 @@ abort:
4700 + kfree(usblp->readbuf);
4701 + kfree(usblp->statusbuf);
4702 + kfree(usblp->device_id_string);
4703 ++ usb_put_intf(usblp->intf);
4704 + kfree(usblp);
4705 + abort_ret:
4706 + return retval;
4707 +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
4708 +index 6df1aded4503..ac2aa04ca657 100644
4709 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
4710 ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
4711 +@@ -1178,11 +1178,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
4712 + tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
4713 +
4714 + bl = bytes - n;
4715 +- if (bl > 3)
4716 +- bl = 3;
4717 ++ if (bl > 4)
4718 ++ bl = 4;
4719 +
4720 + for (i = 0; i < bl; i++)
4721 +- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
4722 ++ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
4723 + }
4724 + break;
4725 +
4726 +diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
4727 +index fa5cf349ae19..cd92ae1231bc 100644
4728 +--- a/drivers/usb/misc/ldusb.c
4729 ++++ b/drivers/usb/misc/ldusb.c
4730 +@@ -383,10 +383,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
4731 + goto exit;
4732 + }
4733 +
4734 +- if (mutex_lock_interruptible(&dev->mutex)) {
4735 +- retval = -ERESTARTSYS;
4736 +- goto exit;
4737 +- }
4738 ++ mutex_lock(&dev->mutex);
4739 +
4740 + if (dev->open_count != 1) {
4741 + retval = -ENODEV;
4742 +@@ -470,7 +467,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
4743 +
4744 + /* wait for data */
4745 + spin_lock_irq(&dev->rbsl);
4746 +- if (dev->ring_head == dev->ring_tail) {
4747 ++ while (dev->ring_head == dev->ring_tail) {
4748 + dev->interrupt_in_done = 0;
4749 + spin_unlock_irq(&dev->rbsl);
4750 + if (file->f_flags & O_NONBLOCK) {
4751 +@@ -480,12 +477,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
4752 + retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
4753 + if (retval < 0)
4754 + goto unlock_exit;
4755 +- } else {
4756 +- spin_unlock_irq(&dev->rbsl);
4757 ++
4758 ++ spin_lock_irq(&dev->rbsl);
4759 + }
4760 ++ spin_unlock_irq(&dev->rbsl);
4761 +
4762 + /* actual_buffer contains actual_length + interrupt_in_buffer */
4763 + actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
4764 ++ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
4765 ++ retval = -EIO;
4766 ++ goto unlock_exit;
4767 ++ }
4768 + bytes_to_read = min(count, *actual_buffer);
4769 + if (bytes_to_read < *actual_buffer)
4770 + dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
4771 +@@ -696,7 +698,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
4772 + dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
4773 +
4774 + dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
4775 +- dev->ring_buffer = kmalloc(ring_buffer_size*(sizeof(size_t)+dev->interrupt_in_endpoint_size), GFP_KERNEL);
4776 ++ dev->ring_buffer = kcalloc(ring_buffer_size,
4777 ++ sizeof(size_t) + dev->interrupt_in_endpoint_size,
4778 ++ GFP_KERNEL);
4779 + if (!dev->ring_buffer)
4780 + goto error;
4781 + dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
4782 +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
4783 +index 155615aadc9c..378a565ec989 100644
4784 +--- a/drivers/usb/misc/legousbtower.c
4785 ++++ b/drivers/usb/misc/legousbtower.c
4786 +@@ -423,10 +423,7 @@ static int tower_release (struct inode *inode, struct file *file)
4787 + goto exit;
4788 + }
4789 +
4790 +- if (mutex_lock_interruptible(&dev->lock)) {
4791 +- retval = -ERESTARTSYS;
4792 +- goto exit;
4793 +- }
4794 ++ mutex_lock(&dev->lock);
4795 +
4796 + if (dev->open_count != 1) {
4797 + dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
4798 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
4799 +index 57e9f6617084..98c22ace784a 100644
4800 +--- a/drivers/usb/serial/ti_usb_3410_5052.c
4801 ++++ b/drivers/usb/serial/ti_usb_3410_5052.c
4802 +@@ -780,7 +780,6 @@ static void ti_close(struct usb_serial_port *port)
4803 + struct ti_port *tport;
4804 + int port_number;
4805 + int status;
4806 +- int do_unlock;
4807 + unsigned long flags;
4808 +
4809 + tdev = usb_get_serial_data(port->serial);
4810 +@@ -804,16 +803,13 @@ static void ti_close(struct usb_serial_port *port)
4811 + "%s - cannot send close port command, %d\n"
4812 + , __func__, status);
4813 +
4814 +- /* if mutex_lock is interrupted, continue anyway */
4815 +- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
4816 ++ mutex_lock(&tdev->td_open_close_lock);
4817 + --tport->tp_tdev->td_open_port_count;
4818 +- if (tport->tp_tdev->td_open_port_count <= 0) {
4819 ++ if (tport->tp_tdev->td_open_port_count == 0) {
4820 + /* last port is closed, shut down interrupt urb */
4821 + usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
4822 +- tport->tp_tdev->td_open_port_count = 0;
4823 + }
4824 +- if (do_unlock)
4825 +- mutex_unlock(&tdev->td_open_close_lock);
4826 ++ mutex_unlock(&tdev->td_open_close_lock);
4827 + }
4828 +
4829 +
4830 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4831 +index 10dee8245558..fd15f396b3a0 100644
4832 +--- a/fs/btrfs/extent-tree.c
4833 ++++ b/fs/btrfs/extent-tree.c
4834 +@@ -10255,6 +10255,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
4835 + btrfs_err(info,
4836 + "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
4837 + cache->key.objectid);
4838 ++ btrfs_put_block_group(cache);
4839 + ret = -EINVAL;
4840 + goto error;
4841 + }
4842 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
4843 +index f50d3d0b9b87..483458340b10 100644
4844 +--- a/fs/cifs/smb1ops.c
4845 ++++ b/fs/cifs/smb1ops.c
4846 +@@ -181,6 +181,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
4847 + /* we do not want to loop forever */
4848 + last_mid = cur_mid;
4849 + cur_mid++;
4850 ++ /* avoid 0xFFFF MID */
4851 ++ if (cur_mid == 0xffff)
4852 ++ cur_mid++;
4853 +
4854 + /*
4855 + * This nested loop looks more expensive than it is.
4856 +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
4857 +index e5dcea6cee5f..f7fba58618ef 100644
4858 +--- a/fs/ocfs2/journal.c
4859 ++++ b/fs/ocfs2/journal.c
4860 +@@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
4861 + /* At this point, we know that no more recovery threads can be
4862 + * launched, so wait for any recovery completion work to
4863 + * complete. */
4864 +- flush_workqueue(osb->ocfs2_wq);
4865 ++ if (osb->ocfs2_wq)
4866 ++ flush_workqueue(osb->ocfs2_wq);
4867 +
4868 + /*
4869 + * Now that recovery is shut down, and the osb is about to be
4870 +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
4871 +index 5d53d0d63d19..ea38677daa06 100644
4872 +--- a/fs/ocfs2/localalloc.c
4873 ++++ b/fs/ocfs2/localalloc.c
4874 +@@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
4875 + struct ocfs2_dinode *alloc = NULL;
4876 +
4877 + cancel_delayed_work(&osb->la_enable_wq);
4878 +- flush_workqueue(osb->ocfs2_wq);
4879 ++ if (osb->ocfs2_wq)
4880 ++ flush_workqueue(osb->ocfs2_wq);
4881 +
4882 + if (osb->local_alloc_state == OCFS2_LA_UNUSED)
4883 + goto out;
4884 +diff --git a/fs/proc/page.c b/fs/proc/page.c
4885 +index 1491918a33c3..0c952c217118 100644
4886 +--- a/fs/proc/page.c
4887 ++++ b/fs/proc/page.c
4888 +@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
4889 + return -EINVAL;
4890 +
4891 + while (count > 0) {
4892 +- if (pfn_valid(pfn))
4893 +- ppage = pfn_to_page(pfn);
4894 +- else
4895 +- ppage = NULL;
4896 ++ /*
4897 ++ * TODO: ZONE_DEVICE support requires to identify
4898 ++ * memmaps that were actually initialized.
4899 ++ */
4900 ++ ppage = pfn_to_online_page(pfn);
4901 ++
4902 + if (!ppage || PageSlab(ppage))
4903 + pcount = 0;
4904 + else
4905 +@@ -214,10 +216,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
4906 + return -EINVAL;
4907 +
4908 + while (count > 0) {
4909 +- if (pfn_valid(pfn))
4910 +- ppage = pfn_to_page(pfn);
4911 +- else
4912 +- ppage = NULL;
4913 ++ /*
4914 ++ * TODO: ZONE_DEVICE support requires to identify
4915 ++ * memmaps that were actually initialized.
4916 ++ */
4917 ++ ppage = pfn_to_online_page(pfn);
4918 +
4919 + if (put_user(stable_page_flags(ppage), out)) {
4920 + ret = -EFAULT;
4921 +@@ -259,10 +262,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
4922 + return -EINVAL;
4923 +
4924 + while (count > 0) {
4925 +- if (pfn_valid(pfn))
4926 +- ppage = pfn_to_page(pfn);
4927 +- else
4928 +- ppage = NULL;
4929 ++ /*
4930 ++ * TODO: ZONE_DEVICE support requires to identify
4931 ++ * memmaps that were actually initialized.
4932 ++ */
4933 ++ ppage = pfn_to_online_page(pfn);
4934 +
4935 + if (ppage)
4936 + ino = page_cgroup_ino(ppage);
4937 +diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
4938 +index 2b7e227960e1..91f403341dd7 100644
4939 +--- a/include/scsi/scsi_eh.h
4940 ++++ b/include/scsi/scsi_eh.h
4941 +@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
4942 + struct scsi_eh_save {
4943 + /* saved state */
4944 + int result;
4945 ++ unsigned int resid_len;
4946 + int eh_eflags;
4947 + enum dma_data_direction data_direction;
4948 + unsigned underflow;
4949 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4950 +index 8ca0075a5464..310656b4ede6 100644
4951 +--- a/mm/hugetlb.c
4952 ++++ b/mm/hugetlb.c
4953 +@@ -1081,11 +1081,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
4954 + struct page *page;
4955 +
4956 + for (i = start_pfn; i < end_pfn; i++) {
4957 +- if (!pfn_valid(i))
4958 ++ page = pfn_to_online_page(i);
4959 ++ if (!page)
4960 + return false;
4961 +
4962 +- page = pfn_to_page(i);
4963 +-
4964 + if (page_zone(page) != z)
4965 + return false;
4966 +
4967 +diff --git a/mm/page_owner.c b/mm/page_owner.c
4968 +index a71fe4c623ef..6ac05a6ff2d1 100644
4969 +--- a/mm/page_owner.c
4970 ++++ b/mm/page_owner.c
4971 +@@ -273,7 +273,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
4972 + * not matter as the mixed block count will still be correct
4973 + */
4974 + for (; pfn < end_pfn; ) {
4975 +- if (!pfn_valid(pfn)) {
4976 ++ page = pfn_to_online_page(pfn);
4977 ++ if (!page) {
4978 + pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
4979 + continue;
4980 + }
4981 +@@ -281,13 +282,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
4982 + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
4983 + block_end_pfn = min(block_end_pfn, end_pfn);
4984 +
4985 +- page = pfn_to_page(pfn);
4986 + pageblock_mt = get_pageblock_migratetype(page);
4987 +
4988 + for (; pfn < block_end_pfn; pfn++) {
4989 + if (!pfn_valid_within(pfn))
4990 + continue;
4991 +
4992 ++ /* The pageblock is online, no need to recheck. */
4993 + page = pfn_to_page(pfn);
4994 +
4995 + if (page_zone(page) != zone)
4996 +diff --git a/mm/shmem.c b/mm/shmem.c
4997 +index 037e2ee9ccac..5b2cc9f9b1f1 100644
4998 +--- a/mm/shmem.c
4999 ++++ b/mm/shmem.c
5000 +@@ -2657,11 +2657,12 @@ static void shmem_tag_pins(struct address_space *mapping)
5001 + void **slot;
5002 + pgoff_t start;
5003 + struct page *page;
5004 ++ unsigned int tagged = 0;
5005 +
5006 + lru_add_drain();
5007 + start = 0;
5008 +- rcu_read_lock();
5009 +
5010 ++ spin_lock_irq(&mapping->tree_lock);
5011 + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
5012 + page = radix_tree_deref_slot(slot);
5013 + if (!page || radix_tree_exception(page)) {
5014 +@@ -2670,18 +2671,19 @@ static void shmem_tag_pins(struct address_space *mapping)
5015 + continue;
5016 + }
5017 + } else if (page_count(page) - page_mapcount(page) > 1) {
5018 +- spin_lock_irq(&mapping->tree_lock);
5019 + radix_tree_tag_set(&mapping->page_tree, iter.index,
5020 + SHMEM_TAG_PINNED);
5021 +- spin_unlock_irq(&mapping->tree_lock);
5022 + }
5023 +
5024 +- if (need_resched()) {
5025 +- slot = radix_tree_iter_resume(slot, &iter);
5026 +- cond_resched_rcu();
5027 +- }
5028 ++ if (++tagged % 1024)
5029 ++ continue;
5030 ++
5031 ++ slot = radix_tree_iter_resume(slot, &iter);
5032 ++ spin_unlock_irq(&mapping->tree_lock);
5033 ++ cond_resched();
5034 ++ spin_lock_irq(&mapping->tree_lock);
5035 + }
5036 +- rcu_read_unlock();
5037 ++ spin_unlock_irq(&mapping->tree_lock);
5038 + }
5039 +
5040 + /*
5041 +diff --git a/mm/slub.c b/mm/slub.c
5042 +index 220d42e592ef..07aeb129f3f8 100644
5043 +--- a/mm/slub.c
5044 ++++ b/mm/slub.c
5045 +@@ -4790,7 +4790,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
5046 + }
5047 + }
5048 +
5049 +- get_online_mems();
5050 ++ /*
5051 ++ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
5052 ++ * already held which will conflict with an existing lock order:
5053 ++ *
5054 ++ * mem_hotplug_lock->slab_mutex->kernfs_mutex
5055 ++ *
5056 ++ * We don't really need mem_hotplug_lock (to hold off
5057 ++ * slab_mem_going_offline_callback) here because slab's memory hot
5058 ++ * unplug code doesn't destroy the kmem_cache->node[] data.
5059 ++ */
5060 ++
5061 + #ifdef CONFIG_SLUB_DEBUG
5062 + if (flags & SO_ALL) {
5063 + struct kmem_cache_node *n;
5064 +@@ -4831,7 +4841,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
5065 + x += sprintf(buf + x, " N%d=%lu",
5066 + node, nodes[node]);
5067 + #endif
5068 +- put_online_mems();
5069 + kfree(nodes);
5070 + return x + sprintf(buf + x, "\n");
5071 + }
5072 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5073 +index 5a1cffb769fd..de7f955ffd0a 100644
5074 +--- a/net/ipv4/route.c
5075 ++++ b/net/ipv4/route.c
5076 +@@ -2351,14 +2351,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
5077 + int orig_oif = fl4->flowi4_oif;
5078 + unsigned int flags = 0;
5079 + struct rtable *rth;
5080 +- int err = -ENETUNREACH;
5081 ++ int err;
5082 +
5083 + if (fl4->saddr) {
5084 +- rth = ERR_PTR(-EINVAL);
5085 + if (ipv4_is_multicast(fl4->saddr) ||
5086 + ipv4_is_lbcast(fl4->saddr) ||
5087 +- ipv4_is_zeronet(fl4->saddr))
5088 ++ ipv4_is_zeronet(fl4->saddr)) {
5089 ++ rth = ERR_PTR(-EINVAL);
5090 + goto out;
5091 ++ }
5092 ++
5093 ++ rth = ERR_PTR(-ENETUNREACH);
5094 +
5095 + /* I removed check for oif == dev_out->oif here.
5096 + It was wrong for two reasons:
5097 +diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
5098 +index c813207bb123..928b6b0464b8 100644
5099 +--- a/net/mac80211/debugfs_netdev.c
5100 ++++ b/net/mac80211/debugfs_netdev.c
5101 +@@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm(
5102 + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
5103 + {
5104 + struct ieee80211_local *local = sdata->local;
5105 +- struct txq_info *txqi = to_txq_info(sdata->vif.txq);
5106 ++ struct txq_info *txqi;
5107 + int len;
5108 +
5109 ++ if (!sdata->vif.txq)
5110 ++ return 0;
5111 ++
5112 ++ txqi = to_txq_info(sdata->vif.txq);
5113 ++
5114 + spin_lock_bh(&local->fq.lock);
5115 + rcu_read_lock();
5116 +
5117 +@@ -659,7 +664,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
5118 + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
5119 + DEBUGFS_ADD(hw_queues);
5120 +
5121 +- if (sdata->local->ops->wake_tx_queue)
5122 ++ if (sdata->local->ops->wake_tx_queue &&
5123 ++ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
5124 ++ sdata->vif.type != NL80211_IFTYPE_NAN)
5125 + DEBUGFS_ADD(aqm);
5126 + }
5127 +
5128 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
5129 +index d91db72b9e9e..36bd59ff49c4 100644
5130 +--- a/net/mac80211/mlme.c
5131 ++++ b/net/mac80211/mlme.c
5132 +@@ -2430,7 +2430,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
5133 +
5134 + rcu_read_lock();
5135 + ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
5136 +- if (WARN_ON_ONCE(ssid == NULL))
5137 ++ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
5138 ++ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
5139 + ssid_len = 0;
5140 + else
5141 + ssid_len = ssid[1];
5142 +@@ -4756,7 +4757,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
5143 +
5144 + rcu_read_lock();
5145 + ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
5146 +- if (!ssidie) {
5147 ++ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
5148 + rcu_read_unlock();
5149 + kfree(assoc_data);
5150 + return -EINVAL;
5151 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
5152 +index 4444d7e755e6..8ae0addb7657 100644
5153 +--- a/net/sched/act_api.c
5154 ++++ b/net/sched/act_api.c
5155 +@@ -1072,10 +1072,16 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
5156 + static int tcf_action_add(struct net *net, struct nlattr *nla,
5157 + struct nlmsghdr *n, u32 portid, int ovr)
5158 + {
5159 +- int ret = 0;
5160 ++ int loop, ret;
5161 + LIST_HEAD(actions);
5162 +
5163 +- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions);
5164 ++ for (loop = 0; loop < 10; loop++) {
5165 ++ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
5166 ++ &actions);
5167 ++ if (ret != -EAGAIN)
5168 ++ break;
5169 ++ }
5170 ++
5171 + if (ret)
5172 + return ret;
5173 +
5174 +@@ -1122,10 +1128,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
5175 + */
5176 + if (n->nlmsg_flags & NLM_F_REPLACE)
5177 + ovr = 1;
5178 +-replay:
5179 + ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
5180 +- if (ret == -EAGAIN)
5181 +- goto replay;
5182 + break;
5183 + case RTM_DELACTION:
5184 + ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
5185 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5186 +index 6a2532370545..a18e9be77216 100644
5187 +--- a/net/sctp/socket.c
5188 ++++ b/net/sctp/socket.c
5189 +@@ -8313,7 +8313,7 @@ struct proto sctp_prot = {
5190 + .backlog_rcv = sctp_backlog_rcv,
5191 + .hash = sctp_hash,
5192 + .unhash = sctp_unhash,
5193 +- .get_port = sctp_get_port,
5194 ++ .no_autobind = true,
5195 + .obj_size = sizeof(struct sctp_sock),
5196 + .sysctl_mem = sysctl_sctp_mem,
5197 + .sysctl_rmem = sysctl_sctp_rmem,
5198 +@@ -8352,7 +8352,7 @@ struct proto sctpv6_prot = {
5199 + .backlog_rcv = sctp_backlog_rcv,
5200 + .hash = sctp_hash,
5201 + .unhash = sctp_unhash,
5202 +- .get_port = sctp_get_port,
5203 ++ .no_autobind = true,
5204 + .obj_size = sizeof(struct sctp6_sock),
5205 + .sysctl_mem = sysctl_sctp_mem,
5206 + .sysctl_rmem = sysctl_sctp_rmem,
5207 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5208 +index ec504c4a397b..ff31feeee8e3 100644
5209 +--- a/net/wireless/nl80211.c
5210 ++++ b/net/wireless/nl80211.c
5211 +@@ -5504,6 +5504,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
5212 + if (!rdev->ops->del_mpath)
5213 + return -EOPNOTSUPP;
5214 +
5215 ++ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
5216 ++ return -EOPNOTSUPP;
5217 ++
5218 + return rdev_del_mpath(rdev, dev, dst);
5219 + }
5220 +
5221 +diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
5222 +index c67d7a82ab13..73fd0eae08ca 100644
5223 +--- a/net/wireless/wext-sme.c
5224 ++++ b/net/wireless/wext-sme.c
5225 +@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
5226 + struct iw_point *data, char *ssid)
5227 + {
5228 + struct wireless_dev *wdev = dev->ieee80211_ptr;
5229 ++ int ret = 0;
5230 +
5231 + /* call only for station! */
5232 + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
5233 +@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
5234 + if (ie) {
5235 + data->flags = 1;
5236 + data->length = ie[1];
5237 +- memcpy(ssid, ie + 2, data->length);
5238 ++ if (data->length > IW_ESSID_MAX_SIZE)
5239 ++ ret = -EINVAL;
5240 ++ else
5241 ++ memcpy(ssid, ie + 2, data->length);
5242 + }
5243 + rcu_read_unlock();
5244 + } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
5245 +@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
5246 + }
5247 + wdev_unlock(wdev);
5248 +
5249 +- return 0;
5250 ++ return ret;
5251 + }
5252 +
5253 + int cfg80211_mgd_wext_siwap(struct net_device *dev,
5254 +diff --git a/scripts/namespace.pl b/scripts/namespace.pl
5255 +index 729c547fc9e1..30c43e639db8 100755
5256 +--- a/scripts/namespace.pl
5257 ++++ b/scripts/namespace.pl
5258 +@@ -65,13 +65,14 @@
5259 + use warnings;
5260 + use strict;
5261 + use File::Find;
5262 ++use File::Spec;
5263 +
5264 + my $nm = ($ENV{'NM'} || "nm") . " -p";
5265 + my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
5266 +-my $srctree = "";
5267 +-my $objtree = "";
5268 +-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
5269 +-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
5270 ++my $srctree = File::Spec->curdir();
5271 ++my $objtree = File::Spec->curdir();
5272 ++$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
5273 ++$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
5274 +
5275 + if ($#ARGV != -1) {
5276 + print STDERR "usage: $0 takes no parameters\n";
5277 +@@ -231,9 +232,9 @@ sub do_nm
5278 + }
5279 + ($source = $basename) =~ s/\.o$//;
5280 + if (-e "$source.c" || -e "$source.S") {
5281 +- $source = "$objtree$File::Find::dir/$source";
5282 ++ $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
5283 + } else {
5284 +- $source = "$srctree$File::Find::dir/$source";
5285 ++ $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
5286 + }
5287 + if (! -e "$source.c" && ! -e "$source.S") {
5288 + # No obvious source, exclude the object if it is conglomerate
5289 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5290 +index ab7bc7ebb721..5412952557f7 100644
5291 +--- a/sound/pci/hda/patch_realtek.c
5292 ++++ b/sound/pci/hda/patch_realtek.c
5293 +@@ -359,6 +359,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
5294 + case 0x10ec0700:
5295 + case 0x10ec0701:
5296 + case 0x10ec0703:
5297 ++ case 0x10ec0711:
5298 + alc_update_coef_idx(codec, 0x10, 1<<15, 0);
5299 + break;
5300 + case 0x10ec0662:
5301 +@@ -7272,6 +7273,7 @@ static int patch_alc269(struct hda_codec *codec)
5302 + case 0x10ec0700:
5303 + case 0x10ec0701:
5304 + case 0x10ec0703:
5305 ++ case 0x10ec0711:
5306 + spec->codec_variant = ALC269_TYPE_ALC700;
5307 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
5308 + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
5309 +@@ -8365,6 +8367,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
5310 + HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
5311 + HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
5312 + HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
5313 ++ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
5314 + HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
5315 + HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
5316 + HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
5317 +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
5318 +index 710c01cd2ad2..ab0bbef7eb48 100644
5319 +--- a/sound/soc/sh/rcar/core.c
5320 ++++ b/sound/soc/sh/rcar/core.c
5321 +@@ -676,6 +676,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
5322 + }
5323 +
5324 + /* set format */
5325 ++ rdai->bit_clk_inv = 0;
5326 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
5327 + case SND_SOC_DAIFMT_I2S:
5328 + rdai->sys_delay = 0;