Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 11 Oct 2019 17:04:59
Message-Id: 1570813476.18c77bab22d8887efd5b3439655f1b258b7aafce.mpagano@gentoo
1 commit: 18c77bab22d8887efd5b3439655f1b258b7aafce
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Oct 11 17:04:36 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Oct 11 17:04:36 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=18c77bab
7
8 Linux patch 4.19.79
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1078_linux-4.19.79.patch | 4369 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4373 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 021985c..e1f1ada 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -351,6 +351,10 @@ Patch: 1077_linux-4.19.78.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.78
23
24 +Patch: 1078_linux-4.19.79.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.79
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1078_linux-4.19.79.patch b/1078_linux-4.19.79.patch
33 new file mode 100644
34 index 0000000..46d1d23
35 --- /dev/null
36 +++ b/1078_linux-4.19.79.patch
37 @@ -0,0 +1,4369 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index e8ddf0ef232e..16607b178b47 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -2503,8 +2503,8 @@
43 + http://repo.or.cz/w/linux-2.6/mini2440.git
44 +
45 + mitigations=
46 +- [X86,PPC,S390] Control optional mitigations for CPU
47 +- vulnerabilities. This is a set of curated,
48 ++ [X86,PPC,S390,ARM64] Control optional mitigations for
49 ++ CPU vulnerabilities. This is a set of curated,
50 + arch-independent options, each of which is an
51 + aggregation of existing arch-specific options.
52 +
53 +@@ -2513,12 +2513,14 @@
54 + improves system performance, but it may also
55 + expose users to several CPU vulnerabilities.
56 + Equivalent to: nopti [X86,PPC]
57 ++ kpti=0 [ARM64]
58 + nospectre_v1 [PPC]
59 + nobp=0 [S390]
60 + nospectre_v1 [X86]
61 +- nospectre_v2 [X86,PPC,S390]
62 ++ nospectre_v2 [X86,PPC,S390,ARM64]
63 + spectre_v2_user=off [X86]
64 + spec_store_bypass_disable=off [X86,PPC]
65 ++ ssbd=force-off [ARM64]
66 + l1tf=off [X86]
67 + mds=off [X86]
68 +
69 +@@ -2866,10 +2868,10 @@
70 + (bounds check bypass). With this option data leaks
71 + are possible in the system.
72 +
73 +- nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
74 +- (indirect branch prediction) vulnerability. System may
75 +- allow data leaks with this option, which is equivalent
76 +- to spectre_v2=off.
77 ++ nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
78 ++ the Spectre variant 2 (indirect branch prediction)
79 ++ vulnerability. System may allow data leaks with this
80 ++ option.
81 +
82 + nospec_store_bypass_disable
83 + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
84 +diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
85 +index d6aff2c5e9e2..6feaffe90e22 100644
86 +--- a/Documentation/arm64/elf_hwcaps.txt
87 ++++ b/Documentation/arm64/elf_hwcaps.txt
88 +@@ -178,3 +178,7 @@ HWCAP_ILRCPC
89 + HWCAP_FLAGM
90 +
91 + Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
92 ++
93 ++HWCAP_SSBS
94 ++
95 ++ Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
96 +diff --git a/Makefile b/Makefile
97 +index 440c5b5c4f4b..4d29c7370b46 100644
98 +--- a/Makefile
99 ++++ b/Makefile
100 +@@ -1,7 +1,7 @@
101 + # SPDX-License-Identifier: GPL-2.0
102 + VERSION = 4
103 + PATCHLEVEL = 19
104 +-SUBLEVEL = 78
105 ++SUBLEVEL = 79
106 + EXTRAVERSION =
107 + NAME = "People's Front"
108 +
109 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
110 +index e3ebece79617..51fe21f5d078 100644
111 +--- a/arch/arm64/Kconfig
112 ++++ b/arch/arm64/Kconfig
113 +@@ -84,6 +84,7 @@ config ARM64
114 + select GENERIC_CLOCKEVENTS
115 + select GENERIC_CLOCKEVENTS_BROADCAST
116 + select GENERIC_CPU_AUTOPROBE
117 ++ select GENERIC_CPU_VULNERABILITIES
118 + select GENERIC_EARLY_IOREMAP
119 + select GENERIC_IDLE_POLL_SETUP
120 + select GENERIC_IRQ_MULTI_HANDLER
121 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
122 +index 25ce9056cf64..c3de0bbf0e9a 100644
123 +--- a/arch/arm64/include/asm/cpucaps.h
124 ++++ b/arch/arm64/include/asm/cpucaps.h
125 +@@ -52,7 +52,8 @@
126 + #define ARM64_MISMATCHED_CACHE_TYPE 31
127 + #define ARM64_HAS_STAGE2_FWB 32
128 + #define ARM64_WORKAROUND_1463225 33
129 ++#define ARM64_SSBS 34
130 +
131 +-#define ARM64_NCAPS 34
132 ++#define ARM64_NCAPS 35
133 +
134 + #endif /* __ASM_CPUCAPS_H */
135 +diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
136 +index 510f687d269a..dda6e5056810 100644
137 +--- a/arch/arm64/include/asm/cpufeature.h
138 ++++ b/arch/arm64/include/asm/cpufeature.h
139 +@@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(void)
140 + #endif
141 + }
142 +
143 +-#ifdef CONFIG_ARM64_SSBD
144 + void arm64_set_ssbd_mitigation(bool state);
145 +-#else
146 +-static inline void arm64_set_ssbd_mitigation(bool state) {}
147 +-#endif
148 +
149 + #endif /* __ASSEMBLY__ */
150 +
151 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
152 +index 6abe4002945f..367b2e0b6d76 100644
153 +--- a/arch/arm64/include/asm/kvm_host.h
154 ++++ b/arch/arm64/include/asm/kvm_host.h
155 +@@ -398,6 +398,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
156 +
157 + DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
158 +
159 ++void __kvm_enable_ssbs(void);
160 ++
161 + static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
162 + unsigned long hyp_stack_ptr,
163 + unsigned long vector_ptr)
164 +@@ -418,6 +420,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
165 + */
166 + BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
167 + __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
168 ++
169 ++ /*
170 ++ * Disabling SSBD on a non-VHE system requires us to enable SSBS
171 ++ * at EL2.
172 ++ */
173 ++ if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
174 ++ arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
175 ++ kvm_call_hyp(__kvm_enable_ssbs);
176 ++ }
177 + }
178 +
179 + static inline bool kvm_arch_check_sve_has_vhe(void)
180 +diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
181 +index def5a5e807f0..773ea8e0e442 100644
182 +--- a/arch/arm64/include/asm/processor.h
183 ++++ b/arch/arm64/include/asm/processor.h
184 +@@ -177,11 +177,25 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
185 + regs->pc = pc;
186 + }
187 +
188 ++static inline void set_ssbs_bit(struct pt_regs *regs)
189 ++{
190 ++ regs->pstate |= PSR_SSBS_BIT;
191 ++}
192 ++
193 ++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
194 ++{
195 ++ regs->pstate |= PSR_AA32_SSBS_BIT;
196 ++}
197 ++
198 + static inline void start_thread(struct pt_regs *regs, unsigned long pc,
199 + unsigned long sp)
200 + {
201 + start_thread_common(regs, pc);
202 + regs->pstate = PSR_MODE_EL0t;
203 ++
204 ++ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
205 ++ set_ssbs_bit(regs);
206 ++
207 + regs->sp = sp;
208 + }
209 +
210 +@@ -198,6 +212,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
211 + regs->pstate |= PSR_AA32_E_BIT;
212 + #endif
213 +
214 ++ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
215 ++ set_compat_ssbs_bit(regs);
216 ++
217 + regs->compat_sp = sp;
218 + }
219 + #endif
220 +diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
221 +index 177b851ca6d9..6bc43889d11e 100644
222 +--- a/arch/arm64/include/asm/ptrace.h
223 ++++ b/arch/arm64/include/asm/ptrace.h
224 +@@ -50,6 +50,7 @@
225 + #define PSR_AA32_I_BIT 0x00000080
226 + #define PSR_AA32_A_BIT 0x00000100
227 + #define PSR_AA32_E_BIT 0x00000200
228 ++#define PSR_AA32_SSBS_BIT 0x00800000
229 + #define PSR_AA32_DIT_BIT 0x01000000
230 + #define PSR_AA32_Q_BIT 0x08000000
231 + #define PSR_AA32_V_BIT 0x10000000
232 +diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
233 +index c1470931b897..3091ae5975a3 100644
234 +--- a/arch/arm64/include/asm/sysreg.h
235 ++++ b/arch/arm64/include/asm/sysreg.h
236 +@@ -86,11 +86,14 @@
237 +
238 + #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
239 + #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
240 ++#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1)
241 +
242 + #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
243 + (!!x)<<8 | 0x1f)
244 + #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
245 + (!!x)<<8 | 0x1f)
246 ++#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
247 ++ (!!x)<<8 | 0x1f)
248 +
249 + #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
250 + #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
251 +@@ -419,6 +422,7 @@
252 + #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
253 +
254 + /* Common SCTLR_ELx flags. */
255 ++#define SCTLR_ELx_DSSBS (1UL << 44)
256 + #define SCTLR_ELx_EE (1 << 25)
257 + #define SCTLR_ELx_IESB (1 << 21)
258 + #define SCTLR_ELx_WXN (1 << 19)
259 +@@ -439,7 +443,7 @@
260 + (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
261 + (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
262 + (1 << 27) | (1 << 30) | (1 << 31) | \
263 +- (0xffffffffUL << 32))
264 ++ (0xffffefffUL << 32))
265 +
266 + #ifdef CONFIG_CPU_BIG_ENDIAN
267 + #define ENDIAN_SET_EL2 SCTLR_ELx_EE
268 +@@ -453,7 +457,7 @@
269 + #define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
270 + #define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
271 + SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
272 +- ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
273 ++ SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
274 +
275 + #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
276 + #error "Inconsistent SCTLR_EL2 set/clear bits"
277 +@@ -477,7 +481,7 @@
278 + (1 << 29))
279 + #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
280 + (1 << 27) | (1 << 30) | (1 << 31) | \
281 +- (0xffffffffUL << 32))
282 ++ (0xffffefffUL << 32))
283 +
284 + #ifdef CONFIG_CPU_BIG_ENDIAN
285 + #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
286 +@@ -494,7 +498,7 @@
287 + ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
288 + #define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
289 + SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
290 +- SCTLR_EL1_RES0)
291 ++ SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
292 +
293 + #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
294 + #error "Inconsistent SCTLR_EL1 set/clear bits"
295 +@@ -544,6 +548,13 @@
296 + #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
297 + #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
298 +
299 ++/* id_aa64pfr1 */
300 ++#define ID_AA64PFR1_SSBS_SHIFT 4
301 ++
302 ++#define ID_AA64PFR1_SSBS_PSTATE_NI 0
303 ++#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
304 ++#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
305 ++
306 + /* id_aa64mmfr0 */
307 + #define ID_AA64MMFR0_TGRAN4_SHIFT 28
308 + #define ID_AA64MMFR0_TGRAN64_SHIFT 24
309 +diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
310 +index 17c65c8f33cb..2bcd6e4f3474 100644
311 +--- a/arch/arm64/include/uapi/asm/hwcap.h
312 ++++ b/arch/arm64/include/uapi/asm/hwcap.h
313 +@@ -48,5 +48,6 @@
314 + #define HWCAP_USCAT (1 << 25)
315 + #define HWCAP_ILRCPC (1 << 26)
316 + #define HWCAP_FLAGM (1 << 27)
317 ++#define HWCAP_SSBS (1 << 28)
318 +
319 + #endif /* _UAPI__ASM_HWCAP_H */
320 +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
321 +index 5dff8eccd17d..b0fd1d300154 100644
322 +--- a/arch/arm64/include/uapi/asm/ptrace.h
323 ++++ b/arch/arm64/include/uapi/asm/ptrace.h
324 +@@ -46,6 +46,7 @@
325 + #define PSR_I_BIT 0x00000080
326 + #define PSR_A_BIT 0x00000100
327 + #define PSR_D_BIT 0x00000200
328 ++#define PSR_SSBS_BIT 0x00001000
329 + #define PSR_PAN_BIT 0x00400000
330 + #define PSR_UAO_BIT 0x00800000
331 + #define PSR_V_BIT 0x10000000
332 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
333 +index dc6c535cbd13..9ccf16939d13 100644
334 +--- a/arch/arm64/kernel/cpu_errata.c
335 ++++ b/arch/arm64/kernel/cpu_errata.c
336 +@@ -19,6 +19,7 @@
337 + #include <linux/arm-smccc.h>
338 + #include <linux/psci.h>
339 + #include <linux/types.h>
340 ++#include <linux/cpu.h>
341 + #include <asm/cpu.h>
342 + #include <asm/cputype.h>
343 + #include <asm/cpufeature.h>
344 +@@ -87,7 +88,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
345 +
346 + atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
347 +
348 +-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
349 + #include <asm/mmu_context.h>
350 + #include <asm/cacheflush.h>
351 +
352 +@@ -109,9 +109,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
353 + __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
354 + }
355 +
356 +-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
357 +- const char *hyp_vecs_start,
358 +- const char *hyp_vecs_end)
359 ++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
360 ++ const char *hyp_vecs_start,
361 ++ const char *hyp_vecs_end)
362 + {
363 + static DEFINE_SPINLOCK(bp_lock);
364 + int cpu, slot = -1;
365 +@@ -138,7 +138,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
366 + #define __smccc_workaround_1_smc_start NULL
367 + #define __smccc_workaround_1_smc_end NULL
368 +
369 +-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
370 ++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
371 + const char *hyp_vecs_start,
372 + const char *hyp_vecs_end)
373 + {
374 +@@ -146,23 +146,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
375 + }
376 + #endif /* CONFIG_KVM_INDIRECT_VECTORS */
377 +
378 +-static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
379 +- bp_hardening_cb_t fn,
380 +- const char *hyp_vecs_start,
381 +- const char *hyp_vecs_end)
382 +-{
383 +- u64 pfr0;
384 +-
385 +- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
386 +- return;
387 +-
388 +- pfr0 = read_cpuid(ID_AA64PFR0_EL1);
389 +- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
390 +- return;
391 +-
392 +- __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
393 +-}
394 +-
395 + #include <uapi/linux/psci.h>
396 + #include <linux/arm-smccc.h>
397 + #include <linux/psci.h>
398 +@@ -189,60 +172,83 @@ static void qcom_link_stack_sanitization(void)
399 + : "=&r" (tmp));
400 + }
401 +
402 +-static void
403 +-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
404 ++static bool __nospectre_v2;
405 ++static int __init parse_nospectre_v2(char *str)
406 ++{
407 ++ __nospectre_v2 = true;
408 ++ return 0;
409 ++}
410 ++early_param("nospectre_v2", parse_nospectre_v2);
411 ++
412 ++/*
413 ++ * -1: No workaround
414 ++ * 0: No workaround required
415 ++ * 1: Workaround installed
416 ++ */
417 ++static int detect_harden_bp_fw(void)
418 + {
419 + bp_hardening_cb_t cb;
420 + void *smccc_start, *smccc_end;
421 + struct arm_smccc_res res;
422 + u32 midr = read_cpuid_id();
423 +
424 +- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
425 +- return;
426 +-
427 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
428 +- return;
429 ++ return -1;
430 +
431 + switch (psci_ops.conduit) {
432 + case PSCI_CONDUIT_HVC:
433 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
434 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
435 +- if ((int)res.a0 < 0)
436 +- return;
437 +- cb = call_hvc_arch_workaround_1;
438 +- /* This is a guest, no need to patch KVM vectors */
439 +- smccc_start = NULL;
440 +- smccc_end = NULL;
441 ++ switch ((int)res.a0) {
442 ++ case 1:
443 ++ /* Firmware says we're just fine */
444 ++ return 0;
445 ++ case 0:
446 ++ cb = call_hvc_arch_workaround_1;
447 ++ /* This is a guest, no need to patch KVM vectors */
448 ++ smccc_start = NULL;
449 ++ smccc_end = NULL;
450 ++ break;
451 ++ default:
452 ++ return -1;
453 ++ }
454 + break;
455 +
456 + case PSCI_CONDUIT_SMC:
457 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
458 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
459 +- if ((int)res.a0 < 0)
460 +- return;
461 +- cb = call_smc_arch_workaround_1;
462 +- smccc_start = __smccc_workaround_1_smc_start;
463 +- smccc_end = __smccc_workaround_1_smc_end;
464 ++ switch ((int)res.a0) {
465 ++ case 1:
466 ++ /* Firmware says we're just fine */
467 ++ return 0;
468 ++ case 0:
469 ++ cb = call_smc_arch_workaround_1;
470 ++ smccc_start = __smccc_workaround_1_smc_start;
471 ++ smccc_end = __smccc_workaround_1_smc_end;
472 ++ break;
473 ++ default:
474 ++ return -1;
475 ++ }
476 + break;
477 +
478 + default:
479 +- return;
480 ++ return -1;
481 + }
482 +
483 + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
484 + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
485 + cb = qcom_link_stack_sanitization;
486 +
487 +- install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
488 ++ if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
489 ++ install_bp_hardening_cb(cb, smccc_start, smccc_end);
490 +
491 +- return;
492 ++ return 1;
493 + }
494 +-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
495 +
496 +-#ifdef CONFIG_ARM64_SSBD
497 + DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
498 +
499 + int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
500 ++static bool __ssb_safe = true;
501 +
502 + static const struct ssbd_options {
503 + const char *str;
504 +@@ -312,6 +318,19 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
505 +
506 + void arm64_set_ssbd_mitigation(bool state)
507 + {
508 ++ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
509 ++ pr_info_once("SSBD disabled by kernel configuration\n");
510 ++ return;
511 ++ }
512 ++
513 ++ if (this_cpu_has_cap(ARM64_SSBS)) {
514 ++ if (state)
515 ++ asm volatile(SET_PSTATE_SSBS(0));
516 ++ else
517 ++ asm volatile(SET_PSTATE_SSBS(1));
518 ++ return;
519 ++ }
520 ++
521 + switch (psci_ops.conduit) {
522 + case PSCI_CONDUIT_HVC:
523 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
524 +@@ -333,11 +352,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
525 + struct arm_smccc_res res;
526 + bool required = true;
527 + s32 val;
528 ++ bool this_cpu_safe = false;
529 +
530 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
531 +
532 ++ if (cpu_mitigations_off())
533 ++ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
534 ++
535 ++ /* delay setting __ssb_safe until we get a firmware response */
536 ++ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
537 ++ this_cpu_safe = true;
538 ++
539 ++ if (this_cpu_has_cap(ARM64_SSBS)) {
540 ++ if (!this_cpu_safe)
541 ++ __ssb_safe = false;
542 ++ required = false;
543 ++ goto out_printmsg;
544 ++ }
545 ++
546 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
547 + ssbd_state = ARM64_SSBD_UNKNOWN;
548 ++ if (!this_cpu_safe)
549 ++ __ssb_safe = false;
550 + return false;
551 + }
552 +
553 +@@ -354,6 +390,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
554 +
555 + default:
556 + ssbd_state = ARM64_SSBD_UNKNOWN;
557 ++ if (!this_cpu_safe)
558 ++ __ssb_safe = false;
559 + return false;
560 + }
561 +
562 +@@ -362,14 +400,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
563 + switch (val) {
564 + case SMCCC_RET_NOT_SUPPORTED:
565 + ssbd_state = ARM64_SSBD_UNKNOWN;
566 ++ if (!this_cpu_safe)
567 ++ __ssb_safe = false;
568 + return false;
569 +
570 ++ /* machines with mixed mitigation requirements must not return this */
571 + case SMCCC_RET_NOT_REQUIRED:
572 + pr_info_once("%s mitigation not required\n", entry->desc);
573 + ssbd_state = ARM64_SSBD_MITIGATED;
574 + return false;
575 +
576 + case SMCCC_RET_SUCCESS:
577 ++ __ssb_safe = false;
578 + required = true;
579 + break;
580 +
581 +@@ -379,12 +421,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
582 +
583 + default:
584 + WARN_ON(1);
585 ++ if (!this_cpu_safe)
586 ++ __ssb_safe = false;
587 + return false;
588 + }
589 +
590 + switch (ssbd_state) {
591 + case ARM64_SSBD_FORCE_DISABLE:
592 +- pr_info_once("%s disabled from command-line\n", entry->desc);
593 + arm64_set_ssbd_mitigation(false);
594 + required = false;
595 + break;
596 +@@ -397,7 +440,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
597 + break;
598 +
599 + case ARM64_SSBD_FORCE_ENABLE:
600 +- pr_info_once("%s forced from command-line\n", entry->desc);
601 + arm64_set_ssbd_mitigation(true);
602 + required = true;
603 + break;
604 +@@ -407,9 +449,27 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
605 + break;
606 + }
607 +
608 ++out_printmsg:
609 ++ switch (ssbd_state) {
610 ++ case ARM64_SSBD_FORCE_DISABLE:
611 ++ pr_info_once("%s disabled from command-line\n", entry->desc);
612 ++ break;
613 ++
614 ++ case ARM64_SSBD_FORCE_ENABLE:
615 ++ pr_info_once("%s forced from command-line\n", entry->desc);
616 ++ break;
617 ++ }
618 ++
619 + return required;
620 + }
621 +-#endif /* CONFIG_ARM64_SSBD */
622 ++
623 ++/* known invulnerable cores */
624 ++static const struct midr_range arm64_ssb_cpus[] = {
625 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
626 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
627 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
628 ++ {},
629 ++};
630 +
631 + #ifdef CONFIG_ARM64_ERRATUM_1463225
632 + DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
633 +@@ -464,6 +524,10 @@ has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
634 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
635 + CAP_MIDR_RANGE_LIST(midr_list)
636 +
637 ++/* Track overall mitigation state. We are only mitigated if all cores are ok */
638 ++static bool __hardenbp_enab = true;
639 ++static bool __spectrev2_safe = true;
640 ++
641 + /*
642 + * Generic helper for handling capabilties with multiple (match,enable) pairs
643 + * of call backs, sharing the same capability bit.
644 +@@ -496,26 +560,63 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
645 + caps->cpu_enable(caps);
646 + }
647 +
648 +-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
649 +-
650 + /*
651 +- * List of CPUs where we need to issue a psci call to
652 +- * harden the branch predictor.
653 ++ * List of CPUs that do not need any Spectre-v2 mitigation at all.
654 + */
655 +-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
656 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
657 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
658 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
659 +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
660 +- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
661 +- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
662 +- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
663 +- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
664 +- MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
665 +- {},
666 ++static const struct midr_range spectre_v2_safe_list[] = {
667 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
668 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
669 ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
670 ++ { /* sentinel */ }
671 + };
672 +
673 +-#endif
674 ++/*
675 ++ * Track overall bp hardening for all heterogeneous cores in the machine.
676 ++ * We are only considered "safe" if all booted cores are known safe.
677 ++ */
678 ++static bool __maybe_unused
679 ++check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
680 ++{
681 ++ int need_wa;
682 ++
683 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
684 ++
685 ++ /* If the CPU has CSV2 set, we're safe */
686 ++ if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
687 ++ ID_AA64PFR0_CSV2_SHIFT))
688 ++ return false;
689 ++
690 ++ /* Alternatively, we have a list of unaffected CPUs */
691 ++ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
692 ++ return false;
693 ++
694 ++ /* Fallback to firmware detection */
695 ++ need_wa = detect_harden_bp_fw();
696 ++ if (!need_wa)
697 ++ return false;
698 ++
699 ++ __spectrev2_safe = false;
700 ++
701 ++ if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
702 ++ pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
703 ++ __hardenbp_enab = false;
704 ++ return false;
705 ++ }
706 ++
707 ++ /* forced off */
708 ++ if (__nospectre_v2 || cpu_mitigations_off()) {
709 ++ pr_info_once("spectrev2 mitigation disabled by command line option\n");
710 ++ __hardenbp_enab = false;
711 ++ return false;
712 ++ }
713 ++
714 ++ if (need_wa < 0) {
715 ++ pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
716 ++ __hardenbp_enab = false;
717 ++ }
718 ++
719 ++ return (need_wa > 0);
720 ++}
721 +
722 + #ifdef CONFIG_HARDEN_EL2_VECTORS
723 +
724 +@@ -674,13 +775,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
725 + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
726 + },
727 + #endif
728 +-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
729 + {
730 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
731 +- .cpu_enable = enable_smccc_arch_workaround_1,
732 +- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
733 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
734 ++ .matches = check_branch_predictor,
735 + },
736 +-#endif
737 + #ifdef CONFIG_HARDEN_EL2_VECTORS
738 + {
739 + .desc = "EL2 vector hardening",
740 +@@ -688,14 +787,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
741 + ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
742 + },
743 + #endif
744 +-#ifdef CONFIG_ARM64_SSBD
745 + {
746 + .desc = "Speculative Store Bypass Disable",
747 + .capability = ARM64_SSBD,
748 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
749 + .matches = has_ssbd_mitigation,
750 ++ .midr_range_list = arm64_ssb_cpus,
751 + },
752 +-#endif
753 + #ifdef CONFIG_ARM64_ERRATUM_1463225
754 + {
755 + .desc = "ARM erratum 1463225",
756 +@@ -707,3 +805,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
757 + {
758 + }
759 + };
760 ++
761 ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
762 ++ char *buf)
763 ++{
764 ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
765 ++}
766 ++
767 ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
768 ++ char *buf)
769 ++{
770 ++ if (__spectrev2_safe)
771 ++ return sprintf(buf, "Not affected\n");
772 ++
773 ++ if (__hardenbp_enab)
774 ++ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
775 ++
776 ++ return sprintf(buf, "Vulnerable\n");
777 ++}
778 ++
779 ++ssize_t cpu_show_spec_store_bypass(struct device *dev,
780 ++ struct device_attribute *attr, char *buf)
781 ++{
782 ++ if (__ssb_safe)
783 ++ return sprintf(buf, "Not affected\n");
784 ++
785 ++ switch (ssbd_state) {
786 ++ case ARM64_SSBD_KERNEL:
787 ++ case ARM64_SSBD_FORCE_ENABLE:
788 ++ if (IS_ENABLED(CONFIG_ARM64_SSBD))
789 ++ return sprintf(buf,
790 ++ "Mitigation: Speculative Store Bypass disabled via prctl\n");
791 ++ }
792 ++
793 ++ return sprintf(buf, "Vulnerable\n");
794 ++}
795 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
796 +index a897efdb3ddd..ff5beb59b3dc 100644
797 +--- a/arch/arm64/kernel/cpufeature.c
798 ++++ b/arch/arm64/kernel/cpufeature.c
799 +@@ -24,6 +24,7 @@
800 + #include <linux/stop_machine.h>
801 + #include <linux/types.h>
802 + #include <linux/mm.h>
803 ++#include <linux/cpu.h>
804 + #include <asm/cpu.h>
805 + #include <asm/cpufeature.h>
806 + #include <asm/cpu_ops.h>
807 +@@ -164,6 +165,11 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
808 + ARM64_FTR_END,
809 + };
810 +
811 ++static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
812 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
813 ++ ARM64_FTR_END,
814 ++};
815 ++
816 + static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
817 + /*
818 + * We already refuse to boot CPUs that don't support our configured
819 +@@ -379,7 +385,7 @@ static const struct __ftr_reg_entry {
820 +
821 + /* Op1 = 0, CRn = 0, CRm = 4 */
822 + ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
823 +- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
824 ++ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
825 + ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
826 +
827 + /* Op1 = 0, CRn = 0, CRm = 5 */
828 +@@ -669,7 +675,6 @@ void update_cpu_features(int cpu,
829 +
830 + /*
831 + * EL3 is not our concern.
832 +- * ID_AA64PFR1 is currently RES0.
833 + */
834 + taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
835 + info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
836 +@@ -885,7 +890,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
837 + return ctr & BIT(CTR_DIC_SHIFT);
838 + }
839 +
840 +-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
841 ++static bool __meltdown_safe = true;
842 + static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
843 +
844 + static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
845 +@@ -903,7 +908,17 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
846 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
847 + { /* sentinel */ }
848 + };
849 +- char const *str = "command line option";
850 ++ char const *str = "kpti command line option";
851 ++ bool meltdown_safe;
852 ++
853 ++ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
854 ++
855 ++ /* Defer to CPU feature registers */
856 ++ if (has_cpuid_feature(entry, scope))
857 ++ meltdown_safe = true;
858 ++
859 ++ if (!meltdown_safe)
860 ++ __meltdown_safe = false;
861 +
862 + /*
863 + * For reasons that aren't entirely clear, enabling KPTI on Cavium
864 +@@ -915,6 +930,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
865 + __kpti_forced = -1;
866 + }
867 +
868 ++ /* Useful for KASLR robustness */
869 ++ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
870 ++ if (!__kpti_forced) {
871 ++ str = "KASLR";
872 ++ __kpti_forced = 1;
873 ++ }
874 ++ }
875 ++
876 ++ if (cpu_mitigations_off() && !__kpti_forced) {
877 ++ str = "mitigations=off";
878 ++ __kpti_forced = -1;
879 ++ }
880 ++
881 ++ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
882 ++ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
883 ++ return false;
884 ++ }
885 ++
886 + /* Forced? */
887 + if (__kpti_forced) {
888 + pr_info_once("kernel page table isolation forced %s by %s\n",
889 +@@ -922,18 +955,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
890 + return __kpti_forced > 0;
891 + }
892 +
893 +- /* Useful for KASLR robustness */
894 +- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
895 +- return true;
896 +-
897 +- /* Don't force KPTI for CPUs that are not vulnerable */
898 +- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
899 +- return false;
900 +-
901 +- /* Defer to CPU feature registers */
902 +- return !has_cpuid_feature(entry, scope);
903 ++ return !meltdown_safe;
904 + }
905 +
906 ++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
907 + static void
908 + kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
909 + {
910 +@@ -958,6 +983,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
911 +
912 + return;
913 + }
914 ++#else
915 ++static void
916 ++kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
917 ++{
918 ++}
919 ++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
920 +
921 + static int __init parse_kpti(char *str)
922 + {
923 +@@ -971,7 +1002,6 @@ static int __init parse_kpti(char *str)
924 + return 0;
925 + }
926 + early_param("kpti", parse_kpti);
927 +-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
928 +
929 + #ifdef CONFIG_ARM64_HW_AFDBM
930 + static inline void __cpu_enable_hw_dbm(void)
931 +@@ -1067,6 +1097,48 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
932 + WARN_ON(val & (7 << 27 | 7 << 21));
933 + }
934 +
935 ++#ifdef CONFIG_ARM64_SSBD
936 ++static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
937 ++{
938 ++ if (user_mode(regs))
939 ++ return 1;
940 ++
941 ++ if (instr & BIT(CRm_shift))
942 ++ regs->pstate |= PSR_SSBS_BIT;
943 ++ else
944 ++ regs->pstate &= ~PSR_SSBS_BIT;
945 ++
946 ++ arm64_skip_faulting_instruction(regs, 4);
947 ++ return 0;
948 ++}
949 ++
950 ++static struct undef_hook ssbs_emulation_hook = {
951 ++ .instr_mask = ~(1U << CRm_shift),
952 ++ .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM,
953 ++ .fn = ssbs_emulation_handler,
954 ++};
955 ++
956 ++static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
957 ++{
958 ++ static bool undef_hook_registered = false;
959 ++ static DEFINE_SPINLOCK(hook_lock);
960 ++
961 ++ spin_lock(&hook_lock);
962 ++ if (!undef_hook_registered) {
963 ++ register_undef_hook(&ssbs_emulation_hook);
964 ++ undef_hook_registered = true;
965 ++ }
966 ++ spin_unlock(&hook_lock);
967 ++
968 ++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
969 ++ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
970 ++ arm64_set_ssbd_mitigation(false);
971 ++ } else {
972 ++ arm64_set_ssbd_mitigation(true);
973 ++ }
974 ++}
975 ++#endif /* CONFIG_ARM64_SSBD */
976 ++
977 + static const struct arm64_cpu_capabilities arm64_features[] = {
978 + {
979 + .desc = "GIC system register CPU interface",
980 +@@ -1150,7 +1222,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
981 + .field_pos = ID_AA64PFR0_EL0_SHIFT,
982 + .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
983 + },
984 +-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
985 + {
986 + .desc = "Kernel page table isolation (KPTI)",
987 + .capability = ARM64_UNMAP_KERNEL_AT_EL0,
988 +@@ -1166,7 +1237,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
989 + .matches = unmap_kernel_at_el0,
990 + .cpu_enable = kpti_install_ng_mappings,
991 + },
992 +-#endif
993 + {
994 + /* FP/SIMD is not implemented */
995 + .capability = ARM64_HAS_NO_FPSIMD,
996 +@@ -1253,6 +1323,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
997 + .matches = has_hw_dbm,
998 + .cpu_enable = cpu_enable_hw_dbm,
999 + },
1000 ++#endif
1001 ++#ifdef CONFIG_ARM64_SSBD
1002 ++ {
1003 ++ .desc = "Speculative Store Bypassing Safe (SSBS)",
1004 ++ .capability = ARM64_SSBS,
1005 ++ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1006 ++ .matches = has_cpuid_feature,
1007 ++ .sys_reg = SYS_ID_AA64PFR1_EL1,
1008 ++ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
1009 ++ .sign = FTR_UNSIGNED,
1010 ++ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1011 ++ .cpu_enable = cpu_enable_ssbs,
1012 ++ },
1013 + #endif
1014 + {},
1015 + };
1016 +@@ -1299,6 +1382,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1017 + #ifdef CONFIG_ARM64_SVE
1018 + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
1019 + #endif
1020 ++ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
1021 + {},
1022 + };
1023 +
1024 +@@ -1793,3 +1877,15 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1025 + /* Firmware may have left a deferred SError in this register. */
1026 + write_sysreg_s(0, SYS_DISR_EL1);
1027 + }
1028 ++
1029 ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
1030 ++ char *buf)
1031 ++{
1032 ++ if (__meltdown_safe)
1033 ++ return sprintf(buf, "Not affected\n");
1034 ++
1035 ++ if (arm64_kernel_unmapped_at_el0())
1036 ++ return sprintf(buf, "Mitigation: PTI\n");
1037 ++
1038 ++ return sprintf(buf, "Vulnerable\n");
1039 ++}
1040 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
1041 +index e9ab7b3ed317..dce971f2c167 100644
1042 +--- a/arch/arm64/kernel/cpuinfo.c
1043 ++++ b/arch/arm64/kernel/cpuinfo.c
1044 +@@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
1045 + "uscat",
1046 + "ilrcpc",
1047 + "flagm",
1048 ++ "ssbs",
1049 + NULL
1050 + };
1051 +
1052 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
1053 +index 7f1628effe6d..bc2226608e13 100644
1054 +--- a/arch/arm64/kernel/process.c
1055 ++++ b/arch/arm64/kernel/process.c
1056 +@@ -358,6 +358,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
1057 + if (IS_ENABLED(CONFIG_ARM64_UAO) &&
1058 + cpus_have_const_cap(ARM64_HAS_UAO))
1059 + childregs->pstate |= PSR_UAO_BIT;
1060 ++
1061 ++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
1062 ++ set_ssbs_bit(childregs);
1063 ++
1064 + p->thread.cpu_context.x19 = stack_start;
1065 + p->thread.cpu_context.x20 = stk_sz;
1066 + }
1067 +@@ -397,6 +401,32 @@ void uao_thread_switch(struct task_struct *next)
1068 + }
1069 + }
1070 +
1071 ++/*
1072 ++ * Force SSBS state on context-switch, since it may be lost after migrating
1073 ++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
1074 ++ */
1075 ++static void ssbs_thread_switch(struct task_struct *next)
1076 ++{
1077 ++ struct pt_regs *regs = task_pt_regs(next);
1078 ++
1079 ++ /*
1080 ++ * Nothing to do for kernel threads, but 'regs' may be junk
1081 ++ * (e.g. idle task) so check the flags and bail early.
1082 ++ */
1083 ++ if (unlikely(next->flags & PF_KTHREAD))
1084 ++ return;
1085 ++
1086 ++ /* If the mitigation is enabled, then we leave SSBS clear. */
1087 ++ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
1088 ++ test_tsk_thread_flag(next, TIF_SSBD))
1089 ++ return;
1090 ++
1091 ++ if (compat_user_mode(regs))
1092 ++ set_compat_ssbs_bit(regs);
1093 ++ else if (user_mode(regs))
1094 ++ set_ssbs_bit(regs);
1095 ++}
1096 ++
1097 + /*
1098 + * We store our current task in sp_el0, which is clobbered by userspace. Keep a
1099 + * shadow copy so that we can restore this upon entry from userspace.
1100 +@@ -425,6 +455,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
1101 + contextidr_thread_switch(next);
1102 + entry_task_switch(next);
1103 + uao_thread_switch(next);
1104 ++ ssbs_thread_switch(next);
1105 +
1106 + /*
1107 + * Complete any pending TLB or cache maintenance on this CPU in case
1108 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
1109 +index 6219486fa25f..0211c3c7533b 100644
1110 +--- a/arch/arm64/kernel/ptrace.c
1111 ++++ b/arch/arm64/kernel/ptrace.c
1112 +@@ -1666,19 +1666,20 @@ void syscall_trace_exit(struct pt_regs *regs)
1113 + }
1114 +
1115 + /*
1116 +- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
1117 +- * We also take into account DIT (bit 24), which is not yet documented, and
1118 +- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
1119 +- * allocated an EL0 meaning in future.
1120 ++ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1121 ++ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1122 ++ * not described in ARM DDI 0487D.a.
1123 ++ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1124 ++ * be allocated an EL0 meaning in future.
1125 + * Userspace cannot use these until they have an architectural meaning.
1126 + * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1127 + * We also reserve IL for the kernel; SS is handled dynamically.
1128 + */
1129 + #define SPSR_EL1_AARCH64_RES0_BITS \
1130 +- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1131 +- GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
1132 ++ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1133 ++ GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1134 + #define SPSR_EL1_AARCH32_RES0_BITS \
1135 +- (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
1136 ++ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1137 +
1138 + static int valid_compat_regs(struct user_pt_regs *regs)
1139 + {
1140 +diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
1141 +index 388f8fc13080..f496fb2f7122 100644
1142 +--- a/arch/arm64/kernel/ssbd.c
1143 ++++ b/arch/arm64/kernel/ssbd.c
1144 +@@ -3,13 +3,31 @@
1145 + * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
1146 + */
1147 +
1148 ++#include <linux/compat.h>
1149 + #include <linux/errno.h>
1150 + #include <linux/prctl.h>
1151 + #include <linux/sched.h>
1152 ++#include <linux/sched/task_stack.h>
1153 + #include <linux/thread_info.h>
1154 +
1155 + #include <asm/cpufeature.h>
1156 +
1157 ++static void ssbd_ssbs_enable(struct task_struct *task)
1158 ++{
1159 ++ u64 val = is_compat_thread(task_thread_info(task)) ?
1160 ++ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
1161 ++
1162 ++ task_pt_regs(task)->pstate |= val;
1163 ++}
1164 ++
1165 ++static void ssbd_ssbs_disable(struct task_struct *task)
1166 ++{
1167 ++ u64 val = is_compat_thread(task_thread_info(task)) ?
1168 ++ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
1169 ++
1170 ++ task_pt_regs(task)->pstate &= ~val;
1171 ++}
1172 ++
1173 + /*
1174 + * prctl interface for SSBD
1175 + * FIXME: Drop the below ifdefery once merged in 4.18.
1176 +@@ -47,12 +65,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
1177 + return -EPERM;
1178 + task_clear_spec_ssb_disable(task);
1179 + clear_tsk_thread_flag(task, TIF_SSBD);
1180 ++ ssbd_ssbs_enable(task);
1181 + break;
1182 + case PR_SPEC_DISABLE:
1183 + if (state == ARM64_SSBD_FORCE_DISABLE)
1184 + return -EPERM;
1185 + task_set_spec_ssb_disable(task);
1186 + set_tsk_thread_flag(task, TIF_SSBD);
1187 ++ ssbd_ssbs_disable(task);
1188 + break;
1189 + case PR_SPEC_FORCE_DISABLE:
1190 + if (state == ARM64_SSBD_FORCE_DISABLE)
1191 +@@ -60,6 +80,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
1192 + task_set_spec_ssb_disable(task);
1193 + task_set_spec_ssb_force_disable(task);
1194 + set_tsk_thread_flag(task, TIF_SSBD);
1195 ++ ssbd_ssbs_disable(task);
1196 + break;
1197 + default:
1198 + return -ERANGE;
1199 +diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
1200 +index 963d669ae3a2..7414b76191c2 100644
1201 +--- a/arch/arm64/kvm/hyp/sysreg-sr.c
1202 ++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
1203 +@@ -293,3 +293,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
1204 +
1205 + vcpu->arch.sysregs_loaded_on_cpu = false;
1206 + }
1207 ++
1208 ++void __hyp_text __kvm_enable_ssbs(void)
1209 ++{
1210 ++ u64 tmp;
1211 ++
1212 ++ asm volatile(
1213 ++ "mrs %0, sctlr_el2\n"
1214 ++ "orr %0, %0, %1\n"
1215 ++ "msr sctlr_el2, %0"
1216 ++ : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
1217 ++}
1218 +diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
1219 +index 0edba3e75747..4e2ee743088f 100644
1220 +--- a/arch/mips/include/asm/cpu-features.h
1221 ++++ b/arch/mips/include/asm/cpu-features.h
1222 +@@ -387,6 +387,22 @@
1223 + #define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
1224 + #endif
1225 +
1226 ++#ifndef cpu_has_loongson_mmi
1227 ++#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI)
1228 ++#endif
1229 ++
1230 ++#ifndef cpu_has_loongson_cam
1231 ++#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM)
1232 ++#endif
1233 ++
1234 ++#ifndef cpu_has_loongson_ext
1235 ++#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT)
1236 ++#endif
1237 ++
1238 ++#ifndef cpu_has_loongson_ext2
1239 ++#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2)
1240 ++#endif
1241 ++
1242 + #ifndef cpu_has_mipsmt
1243 + #define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
1244 + #endif
1245 +diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
1246 +index dacbdb84516a..2b4b14a56575 100644
1247 +--- a/arch/mips/include/asm/cpu.h
1248 ++++ b/arch/mips/include/asm/cpu.h
1249 +@@ -436,5 +436,9 @@ enum cpu_type_enum {
1250 + #define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
1251 + #define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
1252 + #define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
1253 ++#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */
1254 ++#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */
1255 ++#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */
1256 ++#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
1257 +
1258 + #endif /* _ASM_CPU_H */
1259 +diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
1260 +index 25cd8737e7fe..958b627592c3 100644
1261 +--- a/arch/mips/kernel/cpu-probe.c
1262 ++++ b/arch/mips/kernel/cpu-probe.c
1263 +@@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
1264 + __cpu_name[cpu] = "ICT Loongson-3";
1265 + set_elf_platform(cpu, "loongson3a");
1266 + set_isa(c, MIPS_CPU_ISA_M64R1);
1267 ++ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
1268 ++ MIPS_ASE_LOONGSON_EXT);
1269 + break;
1270 + case PRID_REV_LOONGSON3B_R1:
1271 + case PRID_REV_LOONGSON3B_R2:
1272 +@@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
1273 + __cpu_name[cpu] = "ICT Loongson-3";
1274 + set_elf_platform(cpu, "loongson3b");
1275 + set_isa(c, MIPS_CPU_ISA_M64R1);
1276 ++ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
1277 ++ MIPS_ASE_LOONGSON_EXT);
1278 + break;
1279 + }
1280 +
1281 +@@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
1282 + decode_configs(c);
1283 + c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
1284 + c->writecombine = _CACHE_UNCACHED_ACCELERATED;
1285 ++ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
1286 ++ MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
1287 + break;
1288 + default:
1289 + panic("Unknown Loongson Processor ID!");
1290 +diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
1291 +index b2de408a259e..f8d36710cd58 100644
1292 +--- a/arch/mips/kernel/proc.c
1293 ++++ b/arch/mips/kernel/proc.c
1294 +@@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1295 + if (cpu_has_eva) seq_printf(m, "%s", " eva");
1296 + if (cpu_has_htw) seq_printf(m, "%s", " htw");
1297 + if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
1298 ++ if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi");
1299 ++ if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam");
1300 ++ if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext");
1301 ++ if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2");
1302 + seq_printf(m, "\n");
1303 +
1304 + if (cpu_has_mmips) {
1305 +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
1306 +index 29f49a35d6ee..6a6804c2e1b0 100644
1307 +--- a/arch/powerpc/include/asm/cputable.h
1308 ++++ b/arch/powerpc/include/asm/cputable.h
1309 +@@ -212,7 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
1310 + #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
1311 + #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
1312 + #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
1313 +-#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000)
1314 ++#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
1315 + #define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
1316 +
1317 + #ifndef __ASSEMBLY__
1318 +@@ -460,7 +460,7 @@ static inline void cpu_feature_keys_init(void) { }
1319 + CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
1320 + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
1321 + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
1322 +- CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
1323 ++ CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TIDR)
1324 + #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
1325 + #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
1326 + #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
1327 +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
1328 +index f432054234a4..f3b8e04eca9c 100644
1329 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
1330 ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
1331 +@@ -694,9 +694,35 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
1332 + return true;
1333 + }
1334 +
1335 ++/*
1336 ++ * Handle POWER9 broadcast tlbie invalidation issue using
1337 ++ * cpu feature flag.
1338 ++ */
1339 ++static __init void update_tlbie_feature_flag(unsigned long pvr)
1340 ++{
1341 ++ if (PVR_VER(pvr) == PVR_POWER9) {
1342 ++ /*
1343 ++ * Set the tlbie feature flag for anything below
1344 ++ * Nimbus DD 2.3 and Cumulus DD 1.3
1345 ++ */
1346 ++ if ((pvr & 0xe000) == 0) {
1347 ++ /* Nimbus */
1348 ++ if ((pvr & 0xfff) < 0x203)
1349 ++ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
1350 ++ } else if ((pvr & 0xc000) == 0) {
1351 ++ /* Cumulus */
1352 ++ if ((pvr & 0xfff) < 0x103)
1353 ++ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
1354 ++ } else {
1355 ++ WARN_ONCE(1, "Unknown PVR");
1356 ++ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
1357 ++ }
1358 ++ }
1359 ++}
1360 ++
1361 + static __init void cpufeatures_cpu_quirks(void)
1362 + {
1363 +- int version = mfspr(SPRN_PVR);
1364 ++ unsigned long version = mfspr(SPRN_PVR);
1365 +
1366 + /*
1367 + * Not all quirks can be derived from the cpufeatures device tree.
1368 +@@ -715,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)
1369 +
1370 + if ((version & 0xffff0000) == 0x004e0000) {
1371 + cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
1372 +- cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
1373 + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
1374 + }
1375 +
1376 ++ update_tlbie_feature_flag(version);
1377 + /*
1378 + * PKEY was not in the initial base or feature node
1379 + * specification, but it should become optional in the next
1380 +diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
1381 +index efdd16a79075..93e06778b136 100644
1382 +--- a/arch/powerpc/kernel/mce.c
1383 ++++ b/arch/powerpc/kernel/mce.c
1384 +@@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
1385 + mce_ue_event_queue);
1386 +
1387 + static void machine_check_process_queued_event(struct irq_work *work);
1388 ++static void machine_check_ue_irq_work(struct irq_work *work);
1389 + void machine_check_ue_event(struct machine_check_event *evt);
1390 + static void machine_process_ue_event(struct work_struct *work);
1391 +
1392 +@@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = {
1393 + .func = machine_check_process_queued_event,
1394 + };
1395 +
1396 ++static struct irq_work mce_ue_event_irq_work = {
1397 ++ .func = machine_check_ue_irq_work,
1398 ++};
1399 ++
1400 + DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
1401 +
1402 + static void mce_set_error_info(struct machine_check_event *mce,
1403 +@@ -208,6 +213,10 @@ void release_mce_event(void)
1404 + get_mce_event(NULL, true);
1405 + }
1406 +
1407 ++static void machine_check_ue_irq_work(struct irq_work *work)
1408 ++{
1409 ++ schedule_work(&mce_ue_event_work);
1410 ++}
1411 +
1412 + /*
1413 + * Queue up the MCE event which then can be handled later.
1414 +@@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
1415 + memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
1416 +
1417 + /* Queue work to process this event later. */
1418 +- schedule_work(&mce_ue_event_work);
1419 ++ irq_work_queue(&mce_ue_event_irq_work);
1420 + }
1421 +
1422 + /*
1423 +diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
1424 +index 3022d67f0c48..37a110b8e7e1 100644
1425 +--- a/arch/powerpc/kernel/mce_power.c
1426 ++++ b/arch/powerpc/kernel/mce_power.c
1427 +@@ -39,6 +39,7 @@
1428 + static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
1429 + {
1430 + pte_t *ptep;
1431 ++ unsigned int shift;
1432 + unsigned long flags;
1433 + struct mm_struct *mm;
1434 +
1435 +@@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
1436 + mm = &init_mm;
1437 +
1438 + local_irq_save(flags);
1439 +- if (mm == current->mm)
1440 +- ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
1441 +- else
1442 +- ptep = find_init_mm_pte(addr, NULL);
1443 ++ ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
1444 + local_irq_restore(flags);
1445 ++
1446 + if (!ptep || pte_special(*ptep))
1447 + return ULONG_MAX;
1448 ++
1449 ++ if (shift > PAGE_SHIFT) {
1450 ++ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
1451 ++
1452 ++ return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
1453 ++ }
1454 ++
1455 + return pte_pfn(*ptep);
1456 + }
1457 +
1458 +@@ -339,7 +345,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
1459 + MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
1460 + { 0, false, 0, 0, 0, 0 } };
1461 +
1462 +-static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
1463 ++static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
1464 + uint64_t *phys_addr)
1465 + {
1466 + /*
1467 +@@ -530,7 +536,8 @@ static int mce_handle_derror(struct pt_regs *regs,
1468 + * kernel/exception-64s.h
1469 + */
1470 + if (get_paca()->in_mce < MAX_MCE_DEPTH)
1471 +- mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
1472 ++ mce_find_instr_ea_and_phys(regs, addr,
1473 ++ phys_addr);
1474 + }
1475 + found = 1;
1476 + }
1477 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1478 +index 05b32cc12e41..3ae3e8d141e3 100644
1479 +--- a/arch/powerpc/kvm/book3s_hv.c
1480 ++++ b/arch/powerpc/kvm/book3s_hv.c
1481 +@@ -1407,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1482 + *val = get_reg_val(id, vcpu->arch.pspb);
1483 + break;
1484 + case KVM_REG_PPC_DPDES:
1485 +- *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1486 ++ /*
1487 ++ * On POWER9, where we are emulating msgsndp etc.,
1488 ++ * we return 1 bit for each vcpu, which can come from
1489 ++ * either vcore->dpdes or doorbell_request.
1490 ++ * On POWER8, doorbell_request is 0.
1491 ++ */
1492 ++ *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
1493 ++ vcpu->arch.doorbell_request);
1494 + break;
1495 + case KVM_REG_PPC_VTB:
1496 + *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1497 +@@ -2550,7 +2557,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
1498 + if (!spin_trylock(&pvc->lock))
1499 + continue;
1500 + prepare_threads(pvc);
1501 +- if (!pvc->n_runnable) {
1502 ++ if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
1503 + list_del_init(&pvc->preempt_list);
1504 + if (pvc->runner == NULL) {
1505 + pvc->vcore_state = VCORE_INACTIVE;
1506 +@@ -2571,15 +2578,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
1507 + spin_unlock(&lp->lock);
1508 + }
1509 +
1510 +-static bool recheck_signals(struct core_info *cip)
1511 ++static bool recheck_signals_and_mmu(struct core_info *cip)
1512 + {
1513 + int sub, i;
1514 + struct kvm_vcpu *vcpu;
1515 ++ struct kvmppc_vcore *vc;
1516 +
1517 +- for (sub = 0; sub < cip->n_subcores; ++sub)
1518 +- for_each_runnable_thread(i, vcpu, cip->vc[sub])
1519 ++ for (sub = 0; sub < cip->n_subcores; ++sub) {
1520 ++ vc = cip->vc[sub];
1521 ++ if (!vc->kvm->arch.mmu_ready)
1522 ++ return true;
1523 ++ for_each_runnable_thread(i, vcpu, vc)
1524 + if (signal_pending(vcpu->arch.run_task))
1525 + return true;
1526 ++ }
1527 + return false;
1528 + }
1529 +
1530 +@@ -2800,7 +2812,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1531 + local_irq_disable();
1532 + hard_irq_disable();
1533 + if (lazy_irq_pending() || need_resched() ||
1534 +- recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
1535 ++ recheck_signals_and_mmu(&core_info)) {
1536 + local_irq_enable();
1537 + vc->vcore_state = VCORE_INACTIVE;
1538 + /* Unlock all except the primary vcore */
1539 +diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1540 +index a67cf1cdeda4..7c68d834c94a 100644
1541 +--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1542 ++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1543 +@@ -452,7 +452,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
1544 + "r" (rbvalues[i]), "r" (kvm->arch.lpid));
1545 + }
1546 +
1547 +- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1548 ++ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1549 + /*
1550 + * Need the extra ptesync to make sure we don't
1551 + * re-order the tlbie
1552 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1553 +index 68c7591f2b5f..f1878e13dd56 100644
1554 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1555 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
1556 +@@ -2903,29 +2903,39 @@ kvm_cede_prodded:
1557 + kvm_cede_exit:
1558 + ld r9, HSTATE_KVM_VCPU(r13)
1559 + #ifdef CONFIG_KVM_XICS
1560 +- /* Abort if we still have a pending escalation */
1561 ++ /* are we using XIVE with single escalation? */
1562 ++ ld r10, VCPU_XIVE_ESC_VADDR(r9)
1563 ++ cmpdi r10, 0
1564 ++ beq 3f
1565 ++ li r6, XIVE_ESB_SET_PQ_00
1566 ++ /*
1567 ++ * If we still have a pending escalation, abort the cede,
1568 ++ * and we must set PQ to 10 rather than 00 so that we don't
1569 ++ * potentially end up with two entries for the escalation
1570 ++ * interrupt in the XIVE interrupt queue. In that case
1571 ++ * we also don't want to set xive_esc_on to 1 here in
1572 ++ * case we race with xive_esc_irq().
1573 ++ */
1574 + lbz r5, VCPU_XIVE_ESC_ON(r9)
1575 + cmpwi r5, 0
1576 +- beq 1f
1577 ++ beq 4f
1578 + li r0, 0
1579 + stb r0, VCPU_CEDED(r9)
1580 +-1: /* Enable XIVE escalation */
1581 +- li r5, XIVE_ESB_SET_PQ_00
1582 ++ li r6, XIVE_ESB_SET_PQ_10
1583 ++ b 5f
1584 ++4: li r0, 1
1585 ++ stb r0, VCPU_XIVE_ESC_ON(r9)
1586 ++ /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
1587 ++ sync
1588 ++5: /* Enable XIVE escalation */
1589 + mfmsr r0
1590 + andi. r0, r0, MSR_DR /* in real mode? */
1591 + beq 1f
1592 +- ld r10, VCPU_XIVE_ESC_VADDR(r9)
1593 +- cmpdi r10, 0
1594 +- beq 3f
1595 +- ldx r0, r10, r5
1596 ++ ldx r0, r10, r6
1597 + b 2f
1598 + 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
1599 +- cmpdi r10, 0
1600 +- beq 3f
1601 +- ldcix r0, r10, r5
1602 ++ ldcix r0, r10, r6
1603 + 2: sync
1604 +- li r0, 1
1605 +- stb r0, VCPU_XIVE_ESC_ON(r9)
1606 + #endif /* CONFIG_KVM_XICS */
1607 + 3: b guest_exit_cont
1608 +
1609 +diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
1610 +index aae34f218ab4..031f07f048af 100644
1611 +--- a/arch/powerpc/kvm/book3s_xive.c
1612 ++++ b/arch/powerpc/kvm/book3s_xive.c
1613 +@@ -1037,20 +1037,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1614 + /* Mask the VP IPI */
1615 + xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1616 +
1617 +- /* Disable the VP */
1618 +- xive_native_disable_vp(xc->vp_id);
1619 +-
1620 +- /* Free the queues & associated interrupts */
1621 ++ /* Free escalations */
1622 + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1623 +- struct xive_q *q = &xc->queues[i];
1624 +-
1625 +- /* Free the escalation irq */
1626 + if (xc->esc_virq[i]) {
1627 + free_irq(xc->esc_virq[i], vcpu);
1628 + irq_dispose_mapping(xc->esc_virq[i]);
1629 + kfree(xc->esc_virq_names[i]);
1630 + }
1631 +- /* Free the queue */
1632 ++ }
1633 ++
1634 ++ /* Disable the VP */
1635 ++ xive_native_disable_vp(xc->vp_id);
1636 ++
1637 ++ /* Free the queues */
1638 ++ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1639 ++ struct xive_q *q = &xc->queues[i];
1640 ++
1641 + xive_native_disable_queue(xc->vp_id, q, i);
1642 + if (q->qpage) {
1643 + free_pages((unsigned long)q->qpage,
1644 +diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
1645 +index aaa28fd918fe..0c13561d8b80 100644
1646 +--- a/arch/powerpc/mm/hash_native_64.c
1647 ++++ b/arch/powerpc/mm/hash_native_64.c
1648 +@@ -203,7 +203,7 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
1649 +
1650 + static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
1651 + {
1652 +- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1653 ++ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1654 + /* Need the extra ptesync to ensure we don't reorder tlbie*/
1655 + asm volatile("ptesync": : :"memory");
1656 + ___tlbie(vpn, psize, apsize, ssize);
1657 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
1658 +index 29fd8940867e..b1007e9a31ba 100644
1659 +--- a/arch/powerpc/mm/hash_utils_64.c
1660 ++++ b/arch/powerpc/mm/hash_utils_64.c
1661 +@@ -37,6 +37,7 @@
1662 + #include <linux/context_tracking.h>
1663 + #include <linux/libfdt.h>
1664 + #include <linux/pkeys.h>
1665 ++#include <linux/cpu.h>
1666 +
1667 + #include <asm/debugfs.h>
1668 + #include <asm/processor.h>
1669 +@@ -1891,10 +1892,16 @@ static int hpt_order_get(void *data, u64 *val)
1670 +
1671 + static int hpt_order_set(void *data, u64 val)
1672 + {
1673 ++ int ret;
1674 ++
1675 + if (!mmu_hash_ops.resize_hpt)
1676 + return -ENODEV;
1677 +
1678 +- return mmu_hash_ops.resize_hpt(val);
1679 ++ cpus_read_lock();
1680 ++ ret = mmu_hash_ops.resize_hpt(val);
1681 ++ cpus_read_unlock();
1682 ++
1683 ++ return ret;
1684 + }
1685 +
1686 + DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
1687 +diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
1688 +index fef3e1eb3a19..0cddae4263f9 100644
1689 +--- a/arch/powerpc/mm/tlb-radix.c
1690 ++++ b/arch/powerpc/mm/tlb-radix.c
1691 +@@ -220,7 +220,7 @@ static inline void fixup_tlbie(void)
1692 + unsigned long pid = 0;
1693 + unsigned long va = ((1UL << 52) - 1);
1694 +
1695 +- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1696 ++ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1697 + asm volatile("ptesync": : :"memory");
1698 + __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
1699 + }
1700 +@@ -230,7 +230,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
1701 + {
1702 + unsigned long va = ((1UL << 52) - 1);
1703 +
1704 +- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
1705 ++ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
1706 + asm volatile("ptesync": : :"memory");
1707 + __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
1708 + }
1709 +diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
1710 +index 38fe4087484a..edf9032e2e5c 100644
1711 +--- a/arch/powerpc/platforms/powernv/opal.c
1712 ++++ b/arch/powerpc/platforms/powernv/opal.c
1713 +@@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
1714 + bin_attr->size);
1715 + }
1716 +
1717 +-static BIN_ATTR_RO(symbol_map, 0);
1718 ++static struct bin_attribute symbol_map_attr = {
1719 ++ .attr = {.name = "symbol_map", .mode = 0400},
1720 ++ .read = symbol_map_read
1721 ++};
1722 +
1723 + static void opal_export_symmap(void)
1724 + {
1725 +@@ -697,10 +700,10 @@ static void opal_export_symmap(void)
1726 + return;
1727 +
1728 + /* Setup attributes */
1729 +- bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
1730 +- bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
1731 ++ symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
1732 ++ symbol_map_attr.size = be64_to_cpu(syms[1]);
1733 +
1734 +- rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
1735 ++ rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
1736 + if (rc)
1737 + pr_warn("Error %d creating OPAL symbols file\n", rc);
1738 + }
1739 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1740 +index 29e66d6e5763..15a567128c0f 100644
1741 +--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1742 ++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1743 +@@ -49,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
1744 + return addr;
1745 + }
1746 +
1747 ++static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
1748 ++ unsigned long size, unsigned int levels);
1749 ++
1750 + static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1751 + {
1752 + __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
1753 +@@ -58,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1754 +
1755 + while (level) {
1756 + int n = (idx & mask) >> (level * shift);
1757 +- unsigned long tce;
1758 ++ unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
1759 +
1760 +- if (tmp[n] == 0) {
1761 ++ if (!tce) {
1762 + __be64 *tmp2;
1763 +
1764 + if (!alloc)
1765 +@@ -71,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
1766 + if (!tmp2)
1767 + return NULL;
1768 +
1769 +- tmp[n] = cpu_to_be64(__pa(tmp2) |
1770 +- TCE_PCI_READ | TCE_PCI_WRITE);
1771 ++ tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
1772 ++ oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
1773 ++ cpu_to_be64(tce)));
1774 ++ if (oldtce) {
1775 ++ pnv_pci_ioda2_table_do_free_pages(tmp2,
1776 ++ ilog2(tbl->it_level_size) + 3, 1);
1777 ++ tce = oldtce;
1778 ++ }
1779 + }
1780 +- tce = be64_to_cpu(tmp[n]);
1781 +
1782 + tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
1783 + idx &= ~mask;
1784 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1785 +index 9e52b686a8fa..ea602f7f97ce 100644
1786 +--- a/arch/powerpc/platforms/pseries/lpar.c
1787 ++++ b/arch/powerpc/platforms/pseries/lpar.c
1788 +@@ -647,7 +647,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
1789 + return 0;
1790 + }
1791 +
1792 +-/* Must be called in user context */
1793 ++/*
1794 ++ * Must be called in process context. The caller must hold the
1795 ++ * cpus_lock.
1796 ++ */
1797 + static int pseries_lpar_resize_hpt(unsigned long shift)
1798 + {
1799 + struct hpt_resize_state state = {
1800 +@@ -699,7 +702,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
1801 +
1802 + t1 = ktime_get();
1803 +
1804 +- rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
1805 ++ rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
1806 ++ &state, NULL);
1807 +
1808 + t2 = ktime_get();
1809 +
1810 +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
1811 +index fa2c08e3c05e..a03821b2656a 100644
1812 +--- a/arch/riscv/kernel/entry.S
1813 ++++ b/arch/riscv/kernel/entry.S
1814 +@@ -171,9 +171,13 @@ ENTRY(handle_exception)
1815 + move a1, s4 /* scause */
1816 + tail do_IRQ
1817 + 1:
1818 +- /* Exceptions run with interrupts enabled */
1819 ++ /* Exceptions run with interrupts enabled or disabled
1820 ++ depending on the state of sstatus.SR_SPIE */
1821 ++ andi t0, s1, SR_SPIE
1822 ++ beqz t0, 1f
1823 + csrs sstatus, SR_SIE
1824 +
1825 ++1:
1826 + /* Handle syscalls */
1827 + li t0, EXC_SYSCALL
1828 + beq s4, t0, handle_syscall
1829 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
1830 +index 6e758bb6cd29..99ef537e548a 100644
1831 +--- a/arch/s390/kernel/process.c
1832 ++++ b/arch/s390/kernel/process.c
1833 +@@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_struct *p)
1834 +
1835 + if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
1836 + return 0;
1837 ++
1838 ++ if (!try_get_task_stack(p))
1839 ++ return 0;
1840 ++
1841 + low = task_stack_page(p);
1842 + high = (struct stack_frame *) task_pt_regs(p);
1843 + sf = (struct stack_frame *) p->thread.ksp;
1844 +- if (sf <= low || sf > high)
1845 +- return 0;
1846 ++ if (sf <= low || sf > high) {
1847 ++ return_address = 0;
1848 ++ goto out;
1849 ++ }
1850 + for (count = 0; count < 16; count++) {
1851 + sf = (struct stack_frame *) sf->back_chain;
1852 +- if (sf <= low || sf > high)
1853 +- return 0;
1854 ++ if (sf <= low || sf > high) {
1855 ++ return_address = 0;
1856 ++ goto out;
1857 ++ }
1858 + return_address = sf->gprs[8];
1859 + if (!in_sched_functions(return_address))
1860 +- return return_address;
1861 ++ goto out;
1862 + }
1863 +- return 0;
1864 ++out:
1865 ++ put_task_stack(p);
1866 ++ return return_address;
1867 + }
1868 +
1869 + unsigned long arch_align_stack(unsigned long sp)
1870 +diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
1871 +index e8184a15578a..7b96888974db 100644
1872 +--- a/arch/s390/kernel/topology.c
1873 ++++ b/arch/s390/kernel/topology.c
1874 +@@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
1875 + on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
1876 + for_each_online_cpu(cpu) {
1877 + dev = get_cpu_device(cpu);
1878 +- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1879 ++ if (dev)
1880 ++ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1881 + }
1882 + return rc;
1883 + }
1884 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1885 +index e0551c948c59..fac1d4eaa426 100644
1886 +--- a/arch/s390/kvm/kvm-s390.c
1887 ++++ b/arch/s390/kvm/kvm-s390.c
1888 +@@ -3890,7 +3890,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
1889 + const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
1890 + | KVM_S390_MEMOP_F_CHECK_ONLY;
1891 +
1892 +- if (mop->flags & ~supported_flags)
1893 ++ if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
1894 + return -EINVAL;
1895 +
1896 + if (mop->size > MEM_OP_MAX_SIZE)
1897 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1898 +index e83f4f6bfdac..6f7b3acdab26 100644
1899 +--- a/arch/x86/kvm/vmx.c
1900 ++++ b/arch/x86/kvm/vmx.c
1901 +@@ -8801,7 +8801,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
1902 + /* _system ok, nested_vmx_check_permission has verified cpl=0 */
1903 + if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
1904 + (is_long_mode(vcpu) ? 8 : 4),
1905 +- NULL))
1906 ++ &e))
1907 + kvm_inject_page_fault(vcpu, &e);
1908 + }
1909 +
1910 +@@ -12574,7 +12574,7 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
1911 +
1912 + /* VM-entry exception error code */
1913 + if (has_error_code &&
1914 +- vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
1915 ++ vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))
1916 + return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
1917 +
1918 + /* VM-entry interruption-info field: reserved bits */
1919 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1920 +index 05cb5855255e..6ae8a013af31 100644
1921 +--- a/arch/x86/kvm/x86.c
1922 ++++ b/arch/x86/kvm/x86.c
1923 +@@ -791,34 +791,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1924 + }
1925 + EXPORT_SYMBOL_GPL(kvm_set_xcr);
1926 +
1927 +-int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1928 ++static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1929 + {
1930 +- unsigned long old_cr4 = kvm_read_cr4(vcpu);
1931 +- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1932 +- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
1933 +-
1934 + if (cr4 & CR4_RESERVED_BITS)
1935 +- return 1;
1936 ++ return -EINVAL;
1937 +
1938 + if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
1939 +- return 1;
1940 ++ return -EINVAL;
1941 +
1942 + if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
1943 +- return 1;
1944 ++ return -EINVAL;
1945 +
1946 + if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
1947 +- return 1;
1948 ++ return -EINVAL;
1949 +
1950 + if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
1951 +- return 1;
1952 ++ return -EINVAL;
1953 +
1954 + if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
1955 +- return 1;
1956 ++ return -EINVAL;
1957 +
1958 + if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
1959 +- return 1;
1960 ++ return -EINVAL;
1961 +
1962 + if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
1963 ++ return -EINVAL;
1964 ++
1965 ++ return 0;
1966 ++}
1967 ++
1968 ++int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1969 ++{
1970 ++ unsigned long old_cr4 = kvm_read_cr4(vcpu);
1971 ++ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1972 ++ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
1973 ++
1974 ++ if (kvm_valid_cr4(vcpu, cr4))
1975 + return 1;
1976 +
1977 + if (is_long_mode(vcpu)) {
1978 +@@ -8237,10 +8245,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
1979 +
1980 + static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1981 + {
1982 +- if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1983 +- (sregs->cr4 & X86_CR4_OSXSAVE))
1984 +- return -EINVAL;
1985 +-
1986 + if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
1987 + /*
1988 + * When EFER.LME and CR0.PG are set, the processor is in
1989 +@@ -8259,7 +8263,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1990 + return -EINVAL;
1991 + }
1992 +
1993 +- return 0;
1994 ++ return kvm_valid_cr4(vcpu, sregs->cr4);
1995 + }
1996 +
1997 + static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1998 +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
1999 +index 10fb42da0007..b81b5172cf99 100644
2000 +--- a/arch/x86/purgatory/Makefile
2001 ++++ b/arch/x86/purgatory/Makefile
2002 +@@ -23,6 +23,7 @@ KCOV_INSTRUMENT := n
2003 +
2004 + PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
2005 + PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
2006 ++PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
2007 +
2008 + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
2009 + # in turn leaves some undefined symbols like __fentry__ in purgatory and not
2010 +diff --git a/crypto/skcipher.c b/crypto/skcipher.c
2011 +index b664cf867f5f..a8750b4ebf26 100644
2012 +--- a/crypto/skcipher.c
2013 ++++ b/crypto/skcipher.c
2014 +@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
2015 + return max(start, end_page);
2016 + }
2017 +
2018 +-static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
2019 ++static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
2020 + {
2021 + u8 *addr;
2022 +
2023 +@@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
2024 + addr = skcipher_get_spot(addr, bsize);
2025 + scatterwalk_copychunks(addr, &walk->out, bsize,
2026 + (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
2027 ++ return 0;
2028 + }
2029 +
2030 + int skcipher_walk_done(struct skcipher_walk *walk, int err)
2031 + {
2032 +- unsigned int n; /* bytes processed */
2033 +- bool more;
2034 ++ unsigned int n = walk->nbytes;
2035 ++ unsigned int nbytes = 0;
2036 +
2037 +- if (unlikely(err < 0))
2038 ++ if (!n)
2039 + goto finish;
2040 +
2041 +- n = walk->nbytes - err;
2042 +- walk->total -= n;
2043 +- more = (walk->total != 0);
2044 ++ if (likely(err >= 0)) {
2045 ++ n -= err;
2046 ++ nbytes = walk->total - n;
2047 ++ }
2048 +
2049 + if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
2050 + SKCIPHER_WALK_SLOW |
2051 +@@ -131,7 +133,7 @@ unmap_src:
2052 + memcpy(walk->dst.virt.addr, walk->page, n);
2053 + skcipher_unmap_dst(walk);
2054 + } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
2055 +- if (err) {
2056 ++ if (err > 0) {
2057 + /*
2058 + * Didn't process all bytes. Either the algorithm is
2059 + * broken, or this was the last step and it turned out
2060 +@@ -139,27 +141,29 @@ unmap_src:
2061 + * the algorithm requires it.
2062 + */
2063 + err = -EINVAL;
2064 +- goto finish;
2065 +- }
2066 +- skcipher_done_slow(walk, n);
2067 +- goto already_advanced;
2068 ++ nbytes = 0;
2069 ++ } else
2070 ++ n = skcipher_done_slow(walk, n);
2071 + }
2072 +
2073 ++ if (err > 0)
2074 ++ err = 0;
2075 ++
2076 ++ walk->total = nbytes;
2077 ++ walk->nbytes = 0;
2078 ++
2079 + scatterwalk_advance(&walk->in, n);
2080 + scatterwalk_advance(&walk->out, n);
2081 +-already_advanced:
2082 +- scatterwalk_done(&walk->in, 0, more);
2083 +- scatterwalk_done(&walk->out, 1, more);
2084 ++ scatterwalk_done(&walk->in, 0, nbytes);
2085 ++ scatterwalk_done(&walk->out, 1, nbytes);
2086 +
2087 +- if (more) {
2088 ++ if (nbytes) {
2089 + crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
2090 + CRYPTO_TFM_REQ_MAY_SLEEP : 0);
2091 + return skcipher_walk_next(walk);
2092 + }
2093 +- err = 0;
2094 +-finish:
2095 +- walk->nbytes = 0;
2096 +
2097 ++finish:
2098 + /* Short-circuit for the common/fast path. */
2099 + if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
2100 + goto out;
2101 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2102 +index b1c7009de1f4..bc2fa4e85f0c 100644
2103 +--- a/drivers/block/nbd.c
2104 ++++ b/drivers/block/nbd.c
2105 +@@ -106,6 +106,7 @@ struct nbd_device {
2106 + struct nbd_config *config;
2107 + struct mutex config_lock;
2108 + struct gendisk *disk;
2109 ++ struct workqueue_struct *recv_workq;
2110 +
2111 + struct list_head list;
2112 + struct task_struct *task_recv;
2113 +@@ -132,9 +133,10 @@ static struct dentry *nbd_dbg_dir;
2114 +
2115 + #define NBD_MAGIC 0x68797548
2116 +
2117 ++#define NBD_DEF_BLKSIZE 1024
2118 ++
2119 + static unsigned int nbds_max = 16;
2120 + static int max_part = 16;
2121 +-static struct workqueue_struct *recv_workqueue;
2122 + static int part_shift;
2123 +
2124 + static int nbd_dev_dbg_init(struct nbd_device *nbd);
2125 +@@ -1025,7 +1027,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
2126 + /* We take the tx_mutex in an error path in the recv_work, so we
2127 + * need to queue_work outside of the tx_mutex.
2128 + */
2129 +- queue_work(recv_workqueue, &args->work);
2130 ++ queue_work(nbd->recv_workq, &args->work);
2131 +
2132 + atomic_inc(&config->live_connections);
2133 + wake_up(&config->conn_wait);
2134 +@@ -1126,6 +1128,10 @@ static void nbd_config_put(struct nbd_device *nbd)
2135 + kfree(nbd->config);
2136 + nbd->config = NULL;
2137 +
2138 ++ if (nbd->recv_workq)
2139 ++ destroy_workqueue(nbd->recv_workq);
2140 ++ nbd->recv_workq = NULL;
2141 ++
2142 + nbd->tag_set.timeout = 0;
2143 + nbd->disk->queue->limits.discard_granularity = 0;
2144 + nbd->disk->queue->limits.discard_alignment = 0;
2145 +@@ -1154,6 +1160,14 @@ static int nbd_start_device(struct nbd_device *nbd)
2146 + return -EINVAL;
2147 + }
2148 +
2149 ++ nbd->recv_workq = alloc_workqueue("knbd%d-recv",
2150 ++ WQ_MEM_RECLAIM | WQ_HIGHPRI |
2151 ++ WQ_UNBOUND, 0, nbd->index);
2152 ++ if (!nbd->recv_workq) {
2153 ++ dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
2154 ++ return -ENOMEM;
2155 ++ }
2156 ++
2157 + blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
2158 + nbd->task_recv = current;
2159 +
2160 +@@ -1184,7 +1198,7 @@ static int nbd_start_device(struct nbd_device *nbd)
2161 + INIT_WORK(&args->work, recv_work);
2162 + args->nbd = nbd;
2163 + args->index = i;
2164 +- queue_work(recv_workqueue, &args->work);
2165 ++ queue_work(nbd->recv_workq, &args->work);
2166 + }
2167 + nbd_size_update(nbd);
2168 + return error;
2169 +@@ -1204,8 +1218,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
2170 + mutex_unlock(&nbd->config_lock);
2171 + ret = wait_event_interruptible(config->recv_wq,
2172 + atomic_read(&config->recv_threads) == 0);
2173 +- if (ret)
2174 ++ if (ret) {
2175 + sock_shutdown(nbd);
2176 ++ flush_workqueue(nbd->recv_workq);
2177 ++ }
2178 + mutex_lock(&nbd->config_lock);
2179 + nbd_bdev_reset(bdev);
2180 + /* user requested, ignore socket errors */
2181 +@@ -1227,6 +1243,14 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
2182 + nbd_config_put(nbd);
2183 + }
2184 +
2185 ++static bool nbd_is_valid_blksize(unsigned long blksize)
2186 ++{
2187 ++ if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
2188 ++ blksize > PAGE_SIZE)
2189 ++ return false;
2190 ++ return true;
2191 ++}
2192 ++
2193 + /* Must be called with config_lock held */
2194 + static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2195 + unsigned int cmd, unsigned long arg)
2196 +@@ -1242,8 +1266,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2197 + case NBD_SET_SOCK:
2198 + return nbd_add_socket(nbd, arg, false);
2199 + case NBD_SET_BLKSIZE:
2200 +- if (!arg || !is_power_of_2(arg) || arg < 512 ||
2201 +- arg > PAGE_SIZE)
2202 ++ if (!arg)
2203 ++ arg = NBD_DEF_BLKSIZE;
2204 ++ if (!nbd_is_valid_blksize(arg))
2205 + return -EINVAL;
2206 + nbd_size_set(nbd, arg,
2207 + div_s64(config->bytesize, arg));
2208 +@@ -1323,7 +1348,7 @@ static struct nbd_config *nbd_alloc_config(void)
2209 + atomic_set(&config->recv_threads, 0);
2210 + init_waitqueue_head(&config->recv_wq);
2211 + init_waitqueue_head(&config->conn_wait);
2212 +- config->blksize = 1024;
2213 ++ config->blksize = NBD_DEF_BLKSIZE;
2214 + atomic_set(&config->live_connections, 0);
2215 + try_module_get(THIS_MODULE);
2216 + return config;
2217 +@@ -1759,6 +1784,12 @@ again:
2218 + if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
2219 + u64 bsize =
2220 + nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
2221 ++ if (!bsize)
2222 ++ bsize = NBD_DEF_BLKSIZE;
2223 ++ if (!nbd_is_valid_blksize(bsize)) {
2224 ++ ret = -EINVAL;
2225 ++ goto out;
2226 ++ }
2227 + nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
2228 + }
2229 + if (info->attrs[NBD_ATTR_TIMEOUT]) {
2230 +@@ -1835,6 +1866,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
2231 + nbd_disconnect(nbd);
2232 + nbd_clear_sock(nbd);
2233 + mutex_unlock(&nbd->config_lock);
2234 ++ /*
2235 ++ * Make sure recv thread has finished, so it does not drop the last
2236 ++ * config ref and try to destroy the workqueue from inside the work
2237 ++ * queue.
2238 ++ */
2239 ++ flush_workqueue(nbd->recv_workq);
2240 + if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
2241 + &nbd->config->runtime_flags))
2242 + nbd_config_put(nbd);
2243 +@@ -2215,20 +2252,12 @@ static int __init nbd_init(void)
2244 +
2245 + if (nbds_max > 1UL << (MINORBITS - part_shift))
2246 + return -EINVAL;
2247 +- recv_workqueue = alloc_workqueue("knbd-recv",
2248 +- WQ_MEM_RECLAIM | WQ_HIGHPRI |
2249 +- WQ_UNBOUND, 0);
2250 +- if (!recv_workqueue)
2251 +- return -ENOMEM;
2252 +
2253 +- if (register_blkdev(NBD_MAJOR, "nbd")) {
2254 +- destroy_workqueue(recv_workqueue);
2255 ++ if (register_blkdev(NBD_MAJOR, "nbd"))
2256 + return -EIO;
2257 +- }
2258 +
2259 + if (genl_register_family(&nbd_genl_family)) {
2260 + unregister_blkdev(NBD_MAJOR, "nbd");
2261 +- destroy_workqueue(recv_workqueue);
2262 + return -EINVAL;
2263 + }
2264 + nbd_dbg_init();
2265 +@@ -2270,7 +2299,6 @@ static void __exit nbd_cleanup(void)
2266 +
2267 + idr_destroy(&nbd_index_idr);
2268 + genl_unregister_family(&nbd_genl_family);
2269 +- destroy_workqueue(recv_workqueue);
2270 + unregister_blkdev(NBD_MAJOR, "nbd");
2271 + }
2272 +
2273 +diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
2274 +index a408edd84f34..edacf9b39b63 100644
2275 +--- a/drivers/crypto/caam/caamalg_desc.c
2276 ++++ b/drivers/crypto/caam/caamalg_desc.c
2277 +@@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2278 + const bool is_qi, int era)
2279 + {
2280 + u32 geniv, moveiv;
2281 ++ u32 *wait_cmd;
2282 +
2283 + /* Note: Context registers are saved. */
2284 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2285 +@@ -604,6 +605,14 @@ copy_iv:
2286 +
2287 + /* Will read cryptlen */
2288 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2289 ++
2290 ++ /*
2291 ++ * Wait for IV transfer (ofifo -> class2) to finish before starting
2292 ++ * ciphertext transfer (ofifo -> external memory).
2293 ++ */
2294 ++ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
2295 ++ set_jump_tgt_here(desc, wait_cmd);
2296 ++
2297 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
2298 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
2299 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
2300 +diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
2301 +index a917af5776ce..05516b0a4240 100644
2302 +--- a/drivers/crypto/caam/caamalg_desc.h
2303 ++++ b/drivers/crypto/caam/caamalg_desc.h
2304 +@@ -12,7 +12,7 @@
2305 + #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
2306 + #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
2307 + #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
2308 +-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
2309 ++#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
2310 + #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
2311 + #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
2312 + #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
2313 +diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
2314 +index 6183f9128a8a..ea901bc5733c 100644
2315 +--- a/drivers/crypto/cavium/zip/zip_main.c
2316 ++++ b/drivers/crypto/cavium/zip/zip_main.c
2317 +@@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
2318 + .owner = THIS_MODULE,
2319 + .open = zip_stats_open,
2320 + .read = seq_read,
2321 ++ .release = single_release,
2322 + };
2323 +
2324 + static int zip_clear_open(struct inode *inode, struct file *file)
2325 +@@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
2326 + .owner = THIS_MODULE,
2327 + .open = zip_clear_open,
2328 + .read = seq_read,
2329 ++ .release = single_release,
2330 + };
2331 +
2332 + static int zip_regs_open(struct inode *inode, struct file *file)
2333 +@@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
2334 + .owner = THIS_MODULE,
2335 + .open = zip_regs_open,
2336 + .read = seq_read,
2337 ++ .release = single_release,
2338 + };
2339 +
2340 + /* Root directory for thunderx_zip debugfs entry */
2341 +diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
2342 +index 0669033f5be5..aa6b45bc13b9 100644
2343 +--- a/drivers/crypto/ccree/cc_aead.c
2344 ++++ b/drivers/crypto/ccree/cc_aead.c
2345 +@@ -227,7 +227,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
2346 + /* In case of payload authentication failure, MUST NOT
2347 + * revealed the decrypted message --> zero its memory.
2348 + */
2349 +- cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
2350 ++ cc_zero_sgl(areq->dst, areq->cryptlen);
2351 + err = -EBADMSG;
2352 + }
2353 + } else { /*ENCRYPT*/
2354 +diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
2355 +index 09f708f6418e..bac278d274b0 100644
2356 +--- a/drivers/crypto/ccree/cc_fips.c
2357 ++++ b/drivers/crypto/ccree/cc_fips.c
2358 +@@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
2359 + u32 reg;
2360 +
2361 + reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
2362 +- return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
2363 ++ /* Did the TEE report status? */
2364 ++ if (reg & CC_FIPS_SYNC_TEE_STATUS)
2365 ++ /* Yes. Is it OK? */
2366 ++ return (reg & CC_FIPS_SYNC_MODULE_OK);
2367 ++
2368 ++ /* No. It's either not in use or will be reported later */
2369 ++ return true;
2370 + }
2371 +
2372 + /*
2373 +diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
2374 +index 5c4c0a253129..d78f8d5c89c3 100644
2375 +--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
2376 ++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
2377 +@@ -95,7 +95,7 @@ struct service_hndl {
2378 +
2379 + static inline int get_current_node(void)
2380 + {
2381 +- return topology_physical_package_id(smp_processor_id());
2382 ++ return topology_physical_package_id(raw_smp_processor_id());
2383 + }
2384 +
2385 + int adf_service_register(struct service_hndl *service);
2386 +diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
2387 +index c59d2eee5d30..06768074d2d8 100644
2388 +--- a/drivers/devfreq/tegra-devfreq.c
2389 ++++ b/drivers/devfreq/tegra-devfreq.c
2390 +@@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
2391 + {
2392 + struct tegra_devfreq *tegra = dev_get_drvdata(dev);
2393 + struct dev_pm_opp *opp;
2394 +- unsigned long rate = *freq * KHZ;
2395 ++ unsigned long rate;
2396 +
2397 +- opp = devfreq_recommended_opp(dev, &rate, flags);
2398 ++ opp = devfreq_recommended_opp(dev, freq, flags);
2399 + if (IS_ERR(opp)) {
2400 +- dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
2401 ++ dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
2402 + return PTR_ERR(opp);
2403 + }
2404 + rate = dev_pm_opp_get_freq(opp);
2405 +@@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
2406 + clk_set_min_rate(tegra->emc_clock, rate);
2407 + clk_set_rate(tegra->emc_clock, 0);
2408 +
2409 +- *freq = rate;
2410 +-
2411 + return 0;
2412 + }
2413 +
2414 +@@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
2415 + struct tegra_devfreq *tegra = dev_get_drvdata(dev);
2416 + struct tegra_devfreq_device *actmon_dev;
2417 +
2418 +- stat->current_frequency = tegra->cur_freq;
2419 ++ stat->current_frequency = tegra->cur_freq * KHZ;
2420 +
2421 + /* To be used by the tegra governor */
2422 + stat->private_data = tegra;
2423 +@@ -565,7 +563,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
2424 + target_freq = max(target_freq, dev->target_freq);
2425 + }
2426 +
2427 +- *freq = target_freq;
2428 ++ *freq = target_freq * KHZ;
2429 +
2430 + return 0;
2431 + }
2432 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2433 +index 51b5e977ca88..f4e9d1b10e3e 100644
2434 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2435 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
2436 +@@ -139,7 +139,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
2437 + /* ring tests don't use a job */
2438 + if (job) {
2439 + vm = job->vm;
2440 +- fence_ctx = job->base.s_fence->scheduled.context;
2441 ++ fence_ctx = job->base.s_fence ?
2442 ++ job->base.s_fence->scheduled.context : 0;
2443 + } else {
2444 + vm = NULL;
2445 + fence_ctx = 0;
2446 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2447 +index c0396e83f352..fc93b103f777 100644
2448 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2449 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
2450 +@@ -562,6 +562,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
2451 + if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
2452 + sh_num = 0xffffffff;
2453 +
2454 ++ if (info->read_mmr_reg.count > 128)
2455 ++ return -EINVAL;
2456 ++
2457 + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
2458 + if (!regs)
2459 + return -ENOMEM;
2460 +diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
2461 +index 663a7c9ca3d3..d0e216d85a22 100644
2462 +--- a/drivers/gpu/drm/i915/gvt/scheduler.c
2463 ++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
2464 +@@ -1276,9 +1276,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
2465 + #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
2466 + ((a)->lrca == (b)->lrca))
2467 +
2468 +-#define get_last_workload(q) \
2469 +- (list_empty(q) ? NULL : container_of(q->prev, \
2470 +- struct intel_vgpu_workload, list))
2471 + /**
2472 + * intel_vgpu_create_workload - create a vGPU workload
2473 + * @vgpu: a vGPU
2474 +@@ -1297,7 +1294,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
2475 + {
2476 + struct intel_vgpu_submission *s = &vgpu->submission;
2477 + struct list_head *q = workload_q_head(vgpu, ring_id);
2478 +- struct intel_vgpu_workload *last_workload = get_last_workload(q);
2479 ++ struct intel_vgpu_workload *last_workload = NULL;
2480 + struct intel_vgpu_workload *workload = NULL;
2481 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
2482 + u64 ring_context_gpa;
2483 +@@ -1320,15 +1317,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
2484 + head &= RB_HEAD_OFF_MASK;
2485 + tail &= RB_TAIL_OFF_MASK;
2486 +
2487 +- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
2488 +- gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
2489 +- gvt_dbg_el("ctx head %x real head %lx\n", head,
2490 +- last_workload->rb_tail);
2491 +- /*
2492 +- * cannot use guest context head pointer here,
2493 +- * as it might not be updated at this time
2494 +- */
2495 +- head = last_workload->rb_tail;
2496 ++ list_for_each_entry_reverse(last_workload, q, list) {
2497 ++
2498 ++ if (same_context(&last_workload->ctx_desc, desc)) {
2499 ++ gvt_dbg_el("ring id %d cur workload == last\n",
2500 ++ ring_id);
2501 ++ gvt_dbg_el("ctx head %x real head %lx\n", head,
2502 ++ last_workload->rb_tail);
2503 ++ /*
2504 ++ * cannot use guest context head pointer here,
2505 ++ * as it might not be updated at this time
2506 ++ */
2507 ++ head = last_workload->rb_tail;
2508 ++ break;
2509 ++ }
2510 + }
2511 +
2512 + gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
2513 +diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
2514 +index 96fb5f635314..cc4ea5502d6c 100644
2515 +--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
2516 ++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
2517 +@@ -429,15 +429,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
2518 + }
2519 +
2520 + msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
2521 +- if (!msm_host->byte_clk_src) {
2522 +- ret = -ENODEV;
2523 ++ if (IS_ERR(msm_host->byte_clk_src)) {
2524 ++ ret = PTR_ERR(msm_host->byte_clk_src);
2525 + pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
2526 + goto exit;
2527 + }
2528 +
2529 + msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
2530 +- if (!msm_host->pixel_clk_src) {
2531 +- ret = -ENODEV;
2532 ++ if (IS_ERR(msm_host->pixel_clk_src)) {
2533 ++ ret = PTR_ERR(msm_host->pixel_clk_src);
2534 + pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
2535 + goto exit;
2536 + }
2537 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2538 +index 5e01bfb69d7a..10107e551fac 100644
2539 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
2540 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2541 +@@ -1517,7 +1517,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2542 + nv_encoder->aux = aux;
2543 + }
2544 +
2545 +- if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
2546 ++ if (nv_connector->type != DCB_CONNECTOR_eDP &&
2547 ++ (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
2548 + ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
2549 + ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
2550 + nv_connector->base.base.id,
2551 +diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
2552 +index cb80ddaa19d2..7e9e2f064454 100644
2553 +--- a/drivers/gpu/drm/omapdrm/dss/dss.c
2554 ++++ b/drivers/gpu/drm/omapdrm/dss/dss.c
2555 +@@ -1110,7 +1110,7 @@ static const struct dss_features omap34xx_dss_feats = {
2556 +
2557 + static const struct dss_features omap3630_dss_feats = {
2558 + .model = DSS_MODEL_OMAP3,
2559 +- .fck_div_max = 32,
2560 ++ .fck_div_max = 31,
2561 + .fck_freq_max = 173000000,
2562 + .dss_fck_multiplier = 1,
2563 + .parent_clk_name = "dpll4_ck",
2564 +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
2565 +index 25b5407c74b5..d83310751a8e 100644
2566 +--- a/drivers/gpu/drm/radeon/radeon_drv.c
2567 ++++ b/drivers/gpu/drm/radeon/radeon_drv.c
2568 +@@ -340,8 +340,39 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
2569 + static int radeon_pci_probe(struct pci_dev *pdev,
2570 + const struct pci_device_id *ent)
2571 + {
2572 ++ unsigned long flags = 0;
2573 + int ret;
2574 +
2575 ++ if (!ent)
2576 ++ return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
2577 ++
2578 ++ flags = ent->driver_data;
2579 ++
2580 ++ if (!radeon_si_support) {
2581 ++ switch (flags & RADEON_FAMILY_MASK) {
2582 ++ case CHIP_TAHITI:
2583 ++ case CHIP_PITCAIRN:
2584 ++ case CHIP_VERDE:
2585 ++ case CHIP_OLAND:
2586 ++ case CHIP_HAINAN:
2587 ++ dev_info(&pdev->dev,
2588 ++ "SI support disabled by module param\n");
2589 ++ return -ENODEV;
2590 ++ }
2591 ++ }
2592 ++ if (!radeon_cik_support) {
2593 ++ switch (flags & RADEON_FAMILY_MASK) {
2594 ++ case CHIP_KAVERI:
2595 ++ case CHIP_BONAIRE:
2596 ++ case CHIP_HAWAII:
2597 ++ case CHIP_KABINI:
2598 ++ case CHIP_MULLINS:
2599 ++ dev_info(&pdev->dev,
2600 ++ "CIK support disabled by module param\n");
2601 ++ return -ENODEV;
2602 ++ }
2603 ++ }
2604 ++
2605 + if (vga_switcheroo_client_probe_defer(pdev))
2606 + return -EPROBE_DEFER;
2607 +
2608 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
2609 +index 6a8fb6fd183c..3ff835767ac5 100644
2610 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
2611 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
2612 +@@ -95,31 +95,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
2613 + struct radeon_device *rdev;
2614 + int r, acpi_status;
2615 +
2616 +- if (!radeon_si_support) {
2617 +- switch (flags & RADEON_FAMILY_MASK) {
2618 +- case CHIP_TAHITI:
2619 +- case CHIP_PITCAIRN:
2620 +- case CHIP_VERDE:
2621 +- case CHIP_OLAND:
2622 +- case CHIP_HAINAN:
2623 +- dev_info(dev->dev,
2624 +- "SI support disabled by module param\n");
2625 +- return -ENODEV;
2626 +- }
2627 +- }
2628 +- if (!radeon_cik_support) {
2629 +- switch (flags & RADEON_FAMILY_MASK) {
2630 +- case CHIP_KAVERI:
2631 +- case CHIP_BONAIRE:
2632 +- case CHIP_HAWAII:
2633 +- case CHIP_KABINI:
2634 +- case CHIP_MULLINS:
2635 +- dev_info(dev->dev,
2636 +- "CIK support disabled by module param\n");
2637 +- return -ENODEV;
2638 +- }
2639 +- }
2640 +-
2641 + rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
2642 + if (rdev == NULL) {
2643 + return -ENOMEM;
2644 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
2645 +index 2bce7cf0b0af..e45b5ec2f451 100644
2646 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c
2647 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
2648 +@@ -174,6 +174,12 @@ static void etm4_enable_hw(void *info)
2649 + if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
2650 + dev_err(drvdata->dev,
2651 + "timeout while waiting for Idle Trace Status\n");
2652 ++ /*
2653 ++ * As recommended by section 4.3.7 ("Synchronization when using the
2654 ++ * memory-mapped interface") of ARM IHI 0064D
2655 ++ */
2656 ++ dsb(sy);
2657 ++ isb();
2658 +
2659 + CS_LOCK(drvdata->base);
2660 +
2661 +@@ -324,8 +330,12 @@ static void etm4_disable_hw(void *info)
2662 + /* EN, bit[0] Trace unit enable bit */
2663 + control &= ~0x1;
2664 +
2665 +- /* make sure everything completes before disabling */
2666 +- mb();
2667 ++ /*
2668 ++ * Make sure everything completes before disabling, as recommended
2669 ++ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
2670 ++ * SSTATUS") of ARM IHI 0064D
2671 ++ */
2672 ++ dsb(sy);
2673 + isb();
2674 + writel_relaxed(control, drvdata->base + TRCPRGCTLR);
2675 +
2676 +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
2677 +index e5c598ae5f24..6627523e728b 100644
2678 +--- a/drivers/mmc/host/sdhci-of-esdhc.c
2679 ++++ b/drivers/mmc/host/sdhci-of-esdhc.c
2680 +@@ -480,7 +480,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
2681 + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
2682 +
2683 + value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
2684 +- value |= ESDHC_DMA_SNOOP;
2685 ++
2686 ++ if (of_dma_is_coherent(dev->of_node))
2687 ++ value |= ESDHC_DMA_SNOOP;
2688 ++ else
2689 ++ value &= ~ESDHC_DMA_SNOOP;
2690 ++
2691 + sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
2692 + return 0;
2693 + }
2694 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2695 +index eb33b892b484..e99d5632d8fa 100644
2696 +--- a/drivers/mmc/host/sdhci.c
2697 ++++ b/drivers/mmc/host/sdhci.c
2698 +@@ -2720,6 +2720,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2699 + static void sdhci_adma_show_error(struct sdhci_host *host)
2700 + {
2701 + void *desc = host->adma_table;
2702 ++ dma_addr_t dma = host->adma_addr;
2703 +
2704 + sdhci_dumpregs(host);
2705 +
2706 +@@ -2727,18 +2728,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
2707 + struct sdhci_adma2_64_desc *dma_desc = desc;
2708 +
2709 + if (host->flags & SDHCI_USE_64_BIT_DMA)
2710 +- DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2711 +- desc, le32_to_cpu(dma_desc->addr_hi),
2712 ++ SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2713 ++ (unsigned long long)dma,
2714 ++ le32_to_cpu(dma_desc->addr_hi),
2715 + le32_to_cpu(dma_desc->addr_lo),
2716 + le16_to_cpu(dma_desc->len),
2717 + le16_to_cpu(dma_desc->cmd));
2718 + else
2719 +- DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2720 +- desc, le32_to_cpu(dma_desc->addr_lo),
2721 ++ SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2722 ++ (unsigned long long)dma,
2723 ++ le32_to_cpu(dma_desc->addr_lo),
2724 + le16_to_cpu(dma_desc->len),
2725 + le16_to_cpu(dma_desc->cmd));
2726 +
2727 + desc += host->desc_sz;
2728 ++ dma += host->desc_sz;
2729 +
2730 + if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2731 + break;
2732 +@@ -2814,7 +2818,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2733 + != MMC_BUS_TEST_R)
2734 + host->data->error = -EILSEQ;
2735 + else if (intmask & SDHCI_INT_ADMA_ERROR) {
2736 +- pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2737 ++ pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2738 ++ intmask);
2739 + sdhci_adma_show_error(host);
2740 + host->data->error = -EIO;
2741 + if (host->ops->adma_workaround)
2742 +diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
2743 +index fccb6bf21fad..de8d9dceb123 100644
2744 +--- a/drivers/net/can/spi/mcp251x.c
2745 ++++ b/drivers/net/can/spi/mcp251x.c
2746 +@@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
2747 + static int mcp251x_hw_reset(struct spi_device *spi)
2748 + {
2749 + struct mcp251x_priv *priv = spi_get_drvdata(spi);
2750 +- u8 reg;
2751 ++ unsigned long timeout;
2752 + int ret;
2753 +
2754 + /* Wait for oscillator startup timer after power up */
2755 +@@ -640,10 +640,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
2756 + /* Wait for oscillator startup timer after reset */
2757 + mdelay(MCP251X_OST_DELAY_MS);
2758 +
2759 +- reg = mcp251x_read_reg(spi, CANSTAT);
2760 +- if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
2761 +- return -ENODEV;
2762 +-
2763 ++ /* Wait for reset to finish */
2764 ++ timeout = jiffies + HZ;
2765 ++ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
2766 ++ CANCTRL_REQOP_CONF) {
2767 ++ usleep_range(MCP251X_OST_DELAY_MS * 1000,
2768 ++ MCP251X_OST_DELAY_MS * 1000 * 2);
2769 ++
2770 ++ if (time_after(jiffies, timeout)) {
2771 ++ dev_err(&spi->dev,
2772 ++ "MCP251x didn't enter in conf mode after reset\n");
2773 ++ return -EBUSY;
2774 ++ }
2775 ++ }
2776 + return 0;
2777 + }
2778 +
2779 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
2780 +index 22c572a09b32..c19e88efe958 100644
2781 +--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
2782 ++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
2783 +@@ -272,6 +272,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
2784 + port = nfp_port_alloc(app, port_type, repr);
2785 + if (IS_ERR(port)) {
2786 + err = PTR_ERR(port);
2787 ++ kfree(repr_priv);
2788 + nfp_repr_free(repr);
2789 + goto err_reprs_clean;
2790 + }
2791 +diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
2792 +index 4f684cbcdc57..078027bbe002 100644
2793 +--- a/drivers/net/ieee802154/atusb.c
2794 ++++ b/drivers/net/ieee802154/atusb.c
2795 +@@ -1140,10 +1140,11 @@ static void atusb_disconnect(struct usb_interface *interface)
2796 +
2797 + ieee802154_unregister_hw(atusb->hw);
2798 +
2799 ++ usb_put_dev(atusb->usb_dev);
2800 ++
2801 + ieee802154_free_hw(atusb->hw);
2802 +
2803 + usb_set_intfdata(interface, NULL);
2804 +- usb_put_dev(atusb->usb_dev);
2805 +
2806 + pr_debug("%s done\n", __func__);
2807 + }
2808 +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
2809 +index 2a9d6b0d1f19..80508da3c8b5 100644
2810 +--- a/drivers/ntb/test/ntb_perf.c
2811 ++++ b/drivers/ntb/test/ntb_perf.c
2812 +@@ -1373,7 +1373,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
2813 + int ret;
2814 +
2815 + /* Get outbound MW parameters and map it */
2816 +- ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
2817 ++ ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
2818 + &peer->outbuf_size);
2819 + if (ret)
2820 + return ret;
2821 +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
2822 +index 2ba22cd1331b..54a633e8cb5d 100644
2823 +--- a/drivers/nvdimm/bus.c
2824 ++++ b/drivers/nvdimm/bus.c
2825 +@@ -189,7 +189,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
2826 + sector_t sector;
2827 +
2828 + /* make sure device is a region */
2829 +- if (!is_nd_pmem(dev))
2830 ++ if (!is_memory(dev))
2831 + return 0;
2832 +
2833 + nd_region = to_nd_region(dev);
2834 +diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
2835 +index f9130cc157e8..22224b21c34d 100644
2836 +--- a/drivers/nvdimm/region.c
2837 ++++ b/drivers/nvdimm/region.c
2838 +@@ -42,7 +42,7 @@ static int nd_region_probe(struct device *dev)
2839 + if (rc)
2840 + return rc;
2841 +
2842 +- if (is_nd_pmem(&nd_region->dev)) {
2843 ++ if (is_memory(&nd_region->dev)) {
2844 + struct resource ndr_res;
2845 +
2846 + if (devm_init_badblocks(dev, &nd_region->bb))
2847 +@@ -131,7 +131,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
2848 + struct nd_region *nd_region = to_nd_region(dev);
2849 + struct resource res;
2850 +
2851 +- if (is_nd_pmem(&nd_region->dev)) {
2852 ++ if (is_memory(&nd_region->dev)) {
2853 + res.start = nd_region->ndr_start;
2854 + res.end = nd_region->ndr_start +
2855 + nd_region->ndr_size - 1;
2856 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
2857 +index 0303296e6d5b..609fc450522a 100644
2858 +--- a/drivers/nvdimm/region_devs.c
2859 ++++ b/drivers/nvdimm/region_devs.c
2860 +@@ -633,11 +633,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
2861 + if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
2862 + return 0;
2863 +
2864 +- if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
2865 ++ if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
2866 + return 0;
2867 +
2868 + if (a == &dev_attr_resource.attr) {
2869 +- if (is_nd_pmem(dev))
2870 ++ if (is_memory(dev))
2871 + return 0400;
2872 + else
2873 + return 0;
2874 +diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
2875 +index fd2dbd7eed7b..52d4fa4161dc 100644
2876 +--- a/drivers/pci/controller/vmd.c
2877 ++++ b/drivers/pci/controller/vmd.c
2878 +@@ -31,6 +31,9 @@
2879 + #define PCI_REG_VMLOCK 0x70
2880 + #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
2881 +
2882 ++#define MB2_SHADOW_OFFSET 0x2000
2883 ++#define MB2_SHADOW_SIZE 16
2884 ++
2885 + enum vmd_features {
2886 + /*
2887 + * Device may contain registers which hint the physical location of the
2888 +@@ -600,7 +603,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
2889 + u32 vmlock;
2890 + int ret;
2891 +
2892 +- membar2_offset = 0x2018;
2893 ++ membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
2894 + ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
2895 + if (ret || vmlock == ~0)
2896 + return -ENODEV;
2897 +@@ -612,9 +615,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
2898 + if (!membar2)
2899 + return -ENOMEM;
2900 + offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
2901 +- readq(membar2 + 0x2008);
2902 ++ readq(membar2 + MB2_SHADOW_OFFSET);
2903 + offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
2904 +- readq(membar2 + 0x2010);
2905 ++ readq(membar2 + MB2_SHADOW_OFFSET + 8);
2906 + pci_iounmap(vmd->dev, membar2);
2907 + }
2908 + }
2909 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2910 +index c65465385d8c..6384930a6749 100644
2911 +--- a/drivers/pci/pci.c
2912 ++++ b/drivers/pci/pci.c
2913 +@@ -1366,7 +1366,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
2914 + pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
2915 + bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
2916 + res = pdev->resource + bar_idx;
2917 +- size = order_base_2((resource_size(res) >> 20) | 1) - 1;
2918 ++ size = ilog2(resource_size(res)) - 20;
2919 + ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
2920 + ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
2921 + pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
2922 +diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
2923 +index 8ba6abf584de..3958ee03eec1 100644
2924 +--- a/drivers/power/supply/sbs-battery.c
2925 ++++ b/drivers/power/supply/sbs-battery.c
2926 +@@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_health(
2927 + {
2928 + int ret;
2929 +
2930 +- if (psp == POWER_SUPPLY_PROP_PRESENT) {
2931 +- /* Dummy command; if it succeeds, battery is present. */
2932 +- ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
2933 +- if (ret < 0)
2934 +- val->intval = 0; /* battery disconnected */
2935 +- else
2936 +- val->intval = 1; /* battery present */
2937 +- } else { /* POWER_SUPPLY_PROP_HEALTH */
2938 ++ /* Dummy command; if it succeeds, battery is present. */
2939 ++ ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
2940 ++
2941 ++ if (ret < 0) { /* battery not present*/
2942 ++ if (psp == POWER_SUPPLY_PROP_PRESENT) {
2943 ++ val->intval = 0;
2944 ++ return 0;
2945 ++ }
2946 ++ return ret;
2947 ++ }
2948 ++
2949 ++ if (psp == POWER_SUPPLY_PROP_PRESENT)
2950 ++ val->intval = 1; /* battery present */
2951 ++ else /* POWER_SUPPLY_PROP_HEALTH */
2952 + /* SBS spec doesn't have a general health command. */
2953 + val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
2954 +- }
2955 +
2956 + return 0;
2957 + }
2958 +@@ -629,12 +634,14 @@ static int sbs_get_property(struct power_supply *psy,
2959 + switch (psp) {
2960 + case POWER_SUPPLY_PROP_PRESENT:
2961 + case POWER_SUPPLY_PROP_HEALTH:
2962 +- if (client->flags & SBS_FLAGS_TI_BQ20Z75)
2963 ++ if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
2964 + ret = sbs_get_ti_battery_presence_and_health(client,
2965 + psp, val);
2966 + else
2967 + ret = sbs_get_battery_presence_and_health(client, psp,
2968 + val);
2969 ++
2970 ++ /* this can only be true if no gpio is used */
2971 + if (psp == POWER_SUPPLY_PROP_PRESENT)
2972 + return 0;
2973 + break;
2974 +diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
2975 +index 0059b24cfdc3..28e1f6413476 100644
2976 +--- a/drivers/pwm/pwm-stm32-lp.c
2977 ++++ b/drivers/pwm/pwm-stm32-lp.c
2978 +@@ -58,6 +58,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
2979 + /* Calculate the period and prescaler value */
2980 + div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
2981 + do_div(div, NSEC_PER_SEC);
2982 ++ if (!div) {
2983 ++ /* Clock is too slow to achieve requested period. */
2984 ++ dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period);
2985 ++ return -EINVAL;
2986 ++ }
2987 ++
2988 + prd = div;
2989 + while (div > STM32_LPTIM_MAX_ARR) {
2990 + presc++;
2991 +diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
2992 +index 93b2862bd3fa..674d848e377c 100644
2993 +--- a/drivers/s390/cio/ccwgroup.c
2994 ++++ b/drivers/s390/cio/ccwgroup.c
2995 +@@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
2996 + goto error;
2997 + }
2998 + /* Check for trailing stuff. */
2999 +- if (i == num_devices && strlen(buf) > 0) {
3000 ++ if (i == num_devices && buf && strlen(buf) > 0) {
3001 + rc = -EINVAL;
3002 + goto error;
3003 + }
3004 +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
3005 +index aea502922646..df09ed53ab45 100644
3006 +--- a/drivers/s390/cio/css.c
3007 ++++ b/drivers/s390/cio/css.c
3008 +@@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init);
3009 +
3010 + int sch_is_pseudo_sch(struct subchannel *sch)
3011 + {
3012 ++ if (!sch->dev.parent)
3013 ++ return 0;
3014 + return sch == to_css(sch->dev.parent)->pseudo_subchannel;
3015 + }
3016 +
3017 +diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
3018 +index 0a089cf5c78f..fe6683effd05 100644
3019 +--- a/drivers/staging/erofs/dir.c
3020 ++++ b/drivers/staging/erofs/dir.c
3021 +@@ -100,8 +100,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
3022 + unsigned nameoff, maxsize;
3023 +
3024 + dentry_page = read_mapping_page(mapping, i, NULL);
3025 +- if (IS_ERR(dentry_page))
3026 +- continue;
3027 ++ if (dentry_page == ERR_PTR(-ENOMEM)) {
3028 ++ err = -ENOMEM;
3029 ++ break;
3030 ++ } else if (IS_ERR(dentry_page)) {
3031 ++ errln("fail to readdir of logical block %u of nid %llu",
3032 ++ i, EROFS_V(dir)->nid);
3033 ++ err = PTR_ERR(dentry_page);
3034 ++ break;
3035 ++ }
3036 +
3037 + lock_page(dentry_page);
3038 + de = (struct erofs_dirent *)kmap(dentry_page);
3039 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
3040 +index ad6fe6d9d00a..0f1558c6747e 100644
3041 +--- a/drivers/staging/erofs/unzip_vle.c
3042 ++++ b/drivers/staging/erofs/unzip_vle.c
3043 +@@ -311,7 +311,11 @@ z_erofs_vle_work_lookup(struct super_block *sb,
3044 + /* if multiref is disabled, `primary' is always true */
3045 + primary = true;
3046 +
3047 +- DBG_BUGON(work->pageofs != pageofs);
3048 ++ if (work->pageofs != pageofs) {
3049 ++ DBG_BUGON(1);
3050 ++ erofs_workgroup_put(egrp);
3051 ++ return ERR_PTR(-EIO);
3052 ++ }
3053 +
3054 + /*
3055 + * lock must be taken first to avoid grp->next == NIL between
3056 +@@ -853,6 +857,7 @@ repeat:
3057 + for (i = 0; i < nr_pages; ++i)
3058 + pages[i] = NULL;
3059 +
3060 ++ err = 0;
3061 + z_erofs_pagevec_ctor_init(&ctor,
3062 + Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
3063 +
3064 +@@ -874,8 +879,17 @@ repeat:
3065 + pagenr = z_erofs_onlinepage_index(page);
3066 +
3067 + DBG_BUGON(pagenr >= nr_pages);
3068 +- DBG_BUGON(pages[pagenr]);
3069 +
3070 ++ /*
3071 ++ * currently EROFS doesn't support multiref(dedup),
3072 ++ * so here erroring out one multiref page.
3073 ++ */
3074 ++ if (pages[pagenr]) {
3075 ++ DBG_BUGON(1);
3076 ++ SetPageError(pages[pagenr]);
3077 ++ z_erofs_onlinepage_endio(pages[pagenr]);
3078 ++ err = -EIO;
3079 ++ }
3080 + pages[pagenr] = page;
3081 + }
3082 + sparsemem_pages = i;
3083 +@@ -885,7 +899,6 @@ repeat:
3084 + overlapped = false;
3085 + compressed_pages = grp->compressed_pages;
3086 +
3087 +- err = 0;
3088 + for (i = 0; i < clusterpages; ++i) {
3089 + unsigned pagenr;
3090 +
3091 +@@ -911,7 +924,12 @@ repeat:
3092 + pagenr = z_erofs_onlinepage_index(page);
3093 +
3094 + DBG_BUGON(pagenr >= nr_pages);
3095 +- DBG_BUGON(pages[pagenr]);
3096 ++ if (pages[pagenr]) {
3097 ++ DBG_BUGON(1);
3098 ++ SetPageError(pages[pagenr]);
3099 ++ z_erofs_onlinepage_endio(pages[pagenr]);
3100 ++ err = -EIO;
3101 ++ }
3102 + ++sparsemem_pages;
3103 + pages[pagenr] = page;
3104 +
3105 +@@ -1335,19 +1353,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
3106 + err = z_erofs_do_read_page(&f, page, &pagepool);
3107 + (void)z_erofs_vle_work_iter_end(&f.builder);
3108 +
3109 +- if (err) {
3110 ++ /* if some compressed cluster ready, need submit them anyway */
3111 ++ z_erofs_submit_and_unzip(&f, &pagepool, true);
3112 ++
3113 ++ if (err)
3114 + errln("%s, failed to read, err [%d]", __func__, err);
3115 +- goto out;
3116 +- }
3117 +
3118 +- z_erofs_submit_and_unzip(&f, &pagepool, true);
3119 +-out:
3120 + if (f.m_iter.mpage != NULL)
3121 + put_page(f.m_iter.mpage);
3122 +
3123 + /* clean up the remaining free pages */
3124 + put_pages_list(&pagepool);
3125 +- return 0;
3126 ++ return err;
3127 + }
3128 +
3129 + static inline int __z_erofs_vle_normalaccess_readpages(
3130 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
3131 +index bf9721fc2824..be3eafc7682b 100644
3132 +--- a/drivers/thermal/thermal_core.c
3133 ++++ b/drivers/thermal/thermal_core.c
3134 +@@ -296,7 +296,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
3135 + mod_delayed_work(system_freezable_wq, &tz->poll_queue,
3136 + msecs_to_jiffies(delay));
3137 + else
3138 +- cancel_delayed_work(&tz->poll_queue);
3139 ++ cancel_delayed_work_sync(&tz->poll_queue);
3140 + }
3141 +
3142 + static void monitor_thermal_zone(struct thermal_zone_device *tz)
3143 +diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
3144 +index 40c69a533b24..dd5d8ee37928 100644
3145 +--- a/drivers/thermal/thermal_hwmon.c
3146 ++++ b/drivers/thermal/thermal_hwmon.c
3147 +@@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
3148 + thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
3149 + {
3150 + struct thermal_hwmon_device *hwmon;
3151 ++ char type[THERMAL_NAME_LENGTH];
3152 +
3153 + mutex_lock(&thermal_hwmon_list_lock);
3154 +- list_for_each_entry(hwmon, &thermal_hwmon_list, node)
3155 +- if (!strcmp(hwmon->type, tz->type)) {
3156 ++ list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
3157 ++ strcpy(type, tz->type);
3158 ++ strreplace(type, '-', '_');
3159 ++ if (!strcmp(hwmon->type, type)) {
3160 + mutex_unlock(&thermal_hwmon_list_lock);
3161 + return hwmon;
3162 + }
3163 ++ }
3164 + mutex_unlock(&thermal_hwmon_list_lock);
3165 +
3166 + return NULL;
3167 +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
3168 +index 1abe4d021fd2..ffde179a9bb2 100644
3169 +--- a/drivers/watchdog/aspeed_wdt.c
3170 ++++ b/drivers/watchdog/aspeed_wdt.c
3171 +@@ -38,6 +38,7 @@ static const struct aspeed_wdt_config ast2500_config = {
3172 + static const struct of_device_id aspeed_wdt_of_table[] = {
3173 + { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
3174 + { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
3175 ++ { .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
3176 + { },
3177 + };
3178 + MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
3179 +@@ -264,7 +265,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
3180 + set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
3181 + }
3182 +
3183 +- if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
3184 ++ if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
3185 ++ (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
3186 + u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
3187 +
3188 + reg &= config->ext_pulse_width_mask;
3189 +diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
3190 +index 7e7bdcbbc741..9f3123b04536 100644
3191 +--- a/drivers/watchdog/imx2_wdt.c
3192 ++++ b/drivers/watchdog/imx2_wdt.c
3193 +@@ -55,7 +55,7 @@
3194 +
3195 + #define IMX2_WDT_WMCR 0x08 /* Misc Register */
3196 +
3197 +-#define IMX2_WDT_MAX_TIME 128
3198 ++#define IMX2_WDT_MAX_TIME 128U
3199 + #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
3200 +
3201 + #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
3202 +@@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
3203 + {
3204 + unsigned int actual;
3205 +
3206 +- actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
3207 ++ actual = min(new_timeout, IMX2_WDT_MAX_TIME);
3208 + __imx2_wdt_set_timeout(wdog, actual);
3209 + wdog->timeout = new_timeout;
3210 + return 0;
3211 +diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
3212 +index 7494dbeb4409..db58aaa4dc59 100644
3213 +--- a/drivers/xen/pci.c
3214 ++++ b/drivers/xen/pci.c
3215 +@@ -29,6 +29,8 @@
3216 + #include "../pci/pci.h"
3217 + #ifdef CONFIG_PCI_MMCONFIG
3218 + #include <asm/pci_x86.h>
3219 ++
3220 ++static int xen_mcfg_late(void);
3221 + #endif
3222 +
3223 + static bool __read_mostly pci_seg_supported = true;
3224 +@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
3225 + #ifdef CONFIG_PCI_IOV
3226 + struct pci_dev *physfn = pci_dev->physfn;
3227 + #endif
3228 +-
3229 ++#ifdef CONFIG_PCI_MMCONFIG
3230 ++ static bool pci_mcfg_reserved = false;
3231 ++ /*
3232 ++ * Reserve MCFG areas in Xen on first invocation due to this being
3233 ++ * potentially called from inside of acpi_init immediately after
3234 ++ * MCFG table has been finally parsed.
3235 ++ */
3236 ++ if (!pci_mcfg_reserved) {
3237 ++ xen_mcfg_late();
3238 ++ pci_mcfg_reserved = true;
3239 ++ }
3240 ++#endif
3241 + if (pci_seg_supported) {
3242 + struct {
3243 + struct physdev_pci_device_add add;
3244 +@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
3245 + arch_initcall(register_xen_pci_notifier);
3246 +
3247 + #ifdef CONFIG_PCI_MMCONFIG
3248 +-static int __init xen_mcfg_late(void)
3249 ++static int xen_mcfg_late(void)
3250 + {
3251 + struct pci_mmcfg_region *cfg;
3252 + int rc;
3253 +@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
3254 + }
3255 + return 0;
3256 + }
3257 +-/*
3258 +- * Needs to be done after acpi_init which are subsys_initcall.
3259 +- */
3260 +-subsys_initcall_sync(xen_mcfg_late);
3261 + #endif
3262 +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
3263 +index 39c63152a358..454c6826abdb 100644
3264 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
3265 ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
3266 +@@ -55,6 +55,7 @@
3267 + #include <linux/string.h>
3268 + #include <linux/slab.h>
3269 + #include <linux/miscdevice.h>
3270 ++#include <linux/workqueue.h>
3271 +
3272 + #include <xen/xenbus.h>
3273 + #include <xen/xen.h>
3274 +@@ -116,6 +117,8 @@ struct xenbus_file_priv {
3275 + wait_queue_head_t read_waitq;
3276 +
3277 + struct kref kref;
3278 ++
3279 ++ struct work_struct wq;
3280 + };
3281 +
3282 + /* Read out any raw xenbus messages queued up. */
3283 +@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
3284 + mutex_unlock(&adap->dev_data->reply_mutex);
3285 + }
3286 +
3287 +-static void xenbus_file_free(struct kref *kref)
3288 ++static void xenbus_worker(struct work_struct *wq)
3289 + {
3290 + struct xenbus_file_priv *u;
3291 + struct xenbus_transaction_holder *trans, *tmp;
3292 + struct watch_adapter *watch, *tmp_watch;
3293 + struct read_buffer *rb, *tmp_rb;
3294 +
3295 +- u = container_of(kref, struct xenbus_file_priv, kref);
3296 ++ u = container_of(wq, struct xenbus_file_priv, wq);
3297 +
3298 + /*
3299 + * No need for locking here because there are no other users,
3300 +@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
3301 + kfree(u);
3302 + }
3303 +
3304 ++static void xenbus_file_free(struct kref *kref)
3305 ++{
3306 ++ struct xenbus_file_priv *u;
3307 ++
3308 ++ /*
3309 ++ * We might be called in xenbus_thread().
3310 ++ * Use workqueue to avoid deadlock.
3311 ++ */
3312 ++ u = container_of(kref, struct xenbus_file_priv, kref);
3313 ++ schedule_work(&u->wq);
3314 ++}
3315 ++
3316 + static struct xenbus_transaction_holder *xenbus_get_transaction(
3317 + struct xenbus_file_priv *u, uint32_t tx_id)
3318 + {
3319 +@@ -652,6 +667,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
3320 + INIT_LIST_HEAD(&u->watches);
3321 + INIT_LIST_HEAD(&u->read_buffers);
3322 + init_waitqueue_head(&u->read_waitq);
3323 ++ INIT_WORK(&u->wq, xenbus_worker);
3324 +
3325 + mutex_init(&u->reply_mutex);
3326 + mutex_init(&u->msgbuffer_mutex);
3327 +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
3328 +index 05454a7e22dc..550d0b169d7c 100644
3329 +--- a/fs/9p/vfs_file.c
3330 ++++ b/fs/9p/vfs_file.c
3331 +@@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
3332 + v9inode = V9FS_I(inode);
3333 + mutex_lock(&v9inode->v_mutex);
3334 + if (!v9inode->writeback_fid &&
3335 ++ (vma->vm_flags & VM_SHARED) &&
3336 + (vma->vm_flags & VM_WRITE)) {
3337 + /*
3338 + * clone a fid and add it to writeback_fid
3339 +@@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
3340 + (vma->vm_end - vma->vm_start - 1),
3341 + };
3342 +
3343 ++ if (!(vma->vm_flags & VM_SHARED))
3344 ++ return;
3345 +
3346 + p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
3347 +
3348 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
3349 +index c06845237cba..8196c21d8623 100644
3350 +--- a/fs/ceph/inode.c
3351 ++++ b/fs/ceph/inode.c
3352 +@@ -807,7 +807,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
3353 +
3354 + /* update inode */
3355 + inode->i_rdev = le32_to_cpu(info->rdev);
3356 +- inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
3357 ++ /* directories have fl_stripe_unit set to zero */
3358 ++ if (le32_to_cpu(info->layout.fl_stripe_unit))
3359 ++ inode->i_blkbits =
3360 ++ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
3361 ++ else
3362 ++ inode->i_blkbits = CEPH_BLOCK_SHIFT;
3363 +
3364 + __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
3365 +
3366 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3367 +index bfcf11c70bfa..09db6d08614d 100644
3368 +--- a/fs/ceph/mds_client.c
3369 ++++ b/fs/ceph/mds_client.c
3370 +@@ -3640,7 +3640,9 @@ static void delayed_work(struct work_struct *work)
3371 + pr_info("mds%d hung\n", s->s_mds);
3372 + }
3373 + }
3374 +- if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3375 ++ if (s->s_state == CEPH_MDS_SESSION_NEW ||
3376 ++ s->s_state == CEPH_MDS_SESSION_RESTARTING ||
3377 ++ s->s_state == CEPH_MDS_SESSION_REJECTED) {
3378 + /* this mds is failed or recovering, just wait */
3379 + ceph_put_mds_session(s);
3380 + continue;
3381 +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
3382 +index 8f68181256c0..f057c213c453 100644
3383 +--- a/fs/fuse/cuse.c
3384 ++++ b/fs/fuse/cuse.c
3385 +@@ -518,6 +518,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
3386 + rc = cuse_send_init(cc);
3387 + if (rc) {
3388 + fuse_dev_free(fud);
3389 ++ fuse_conn_put(&cc->fc);
3390 + return rc;
3391 + }
3392 + file->private_data = fud;
3393 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3394 +index b7bde12d8cd5..1c0227c78a7b 100644
3395 +--- a/fs/nfs/nfs4xdr.c
3396 ++++ b/fs/nfs/nfs4xdr.c
3397 +@@ -1171,7 +1171,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
3398 + } else
3399 + *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
3400 + }
3401 +- if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
3402 ++ if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
3403 + *p++ = cpu_to_be32(label->lfs);
3404 + *p++ = cpu_to_be32(label->pi);
3405 + *p++ = cpu_to_be32(label->len);
3406 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
3407 +index 4931c3a75f03..c818f9886f61 100644
3408 +--- a/fs/nfs/pnfs.c
3409 ++++ b/fs/nfs/pnfs.c
3410 +@@ -1426,10 +1426,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
3411 + const nfs4_stateid *res_stateid = NULL;
3412 + struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
3413 +
3414 +- if (ret == 0) {
3415 +- arg_stateid = &args->stateid;
3416 ++ switch (ret) {
3417 ++ case -NFS4ERR_NOMATCHING_LAYOUT:
3418 ++ break;
3419 ++ case 0:
3420 + if (res->lrs_present)
3421 + res_stateid = &res->stateid;
3422 ++ /* Fallthrough */
3423 ++ default:
3424 ++ arg_stateid = &args->stateid;
3425 + }
3426 + pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
3427 + res_stateid);
3428 +diff --git a/fs/statfs.c b/fs/statfs.c
3429 +index f0216629621d..56f655f757ff 100644
3430 +--- a/fs/statfs.c
3431 ++++ b/fs/statfs.c
3432 +@@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
3433 + static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
3434 + {
3435 + struct compat_statfs64 buf;
3436 +- if (sizeof(ubuf->f_bsize) == 4) {
3437 +- if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
3438 +- kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
3439 +- return -EOVERFLOW;
3440 +- /* f_files and f_ffree may be -1; it's okay
3441 +- * to stuff that into 32 bits */
3442 +- if (kbuf->f_files != 0xffffffffffffffffULL
3443 +- && (kbuf->f_files & 0xffffffff00000000ULL))
3444 +- return -EOVERFLOW;
3445 +- if (kbuf->f_ffree != 0xffffffffffffffffULL
3446 +- && (kbuf->f_ffree & 0xffffffff00000000ULL))
3447 +- return -EOVERFLOW;
3448 +- }
3449 ++
3450 ++ if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
3451 ++ return -EOVERFLOW;
3452 ++
3453 + memset(&buf, 0, sizeof(struct compat_statfs64));
3454 + buf.f_type = kbuf->f_type;
3455 + buf.f_bsize = kbuf->f_bsize;
3456 +diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
3457 +index 9c03a7d5e400..c83478271c2e 100644
3458 +--- a/include/linux/ieee80211.h
3459 ++++ b/include/linux/ieee80211.h
3460 +@@ -3185,4 +3185,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
3461 + return true;
3462 + }
3463 +
3464 ++struct element {
3465 ++ u8 id;
3466 ++ u8 datalen;
3467 ++ u8 data[];
3468 ++} __packed;
3469 ++
3470 ++/* element iteration helpers */
3471 ++#define for_each_element(_elem, _data, _datalen) \
3472 ++ for (_elem = (const struct element *)(_data); \
3473 ++ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
3474 ++ (int)sizeof(*_elem) && \
3475 ++ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
3476 ++ (int)sizeof(*_elem) + _elem->datalen; \
3477 ++ _elem = (const struct element *)(_elem->data + _elem->datalen))
3478 ++
3479 ++#define for_each_element_id(element, _id, data, datalen) \
3480 ++ for_each_element(element, data, datalen) \
3481 ++ if (element->id == (_id))
3482 ++
3483 ++#define for_each_element_extid(element, extid, data, datalen) \
3484 ++ for_each_element(element, data, datalen) \
3485 ++ if (element->id == WLAN_EID_EXTENSION && \
3486 ++ element->datalen > 0 && \
3487 ++ element->data[0] == (extid))
3488 ++
3489 ++#define for_each_subelement(sub, element) \
3490 ++ for_each_element(sub, (element)->data, (element)->datalen)
3491 ++
3492 ++#define for_each_subelement_id(sub, id, element) \
3493 ++ for_each_element_id(sub, id, (element)->data, (element)->datalen)
3494 ++
3495 ++#define for_each_subelement_extid(sub, extid, element) \
3496 ++ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
3497 ++
3498 ++/**
3499 ++ * for_each_element_completed - determine if element parsing consumed all data
3500 ++ * @element: element pointer after for_each_element() or friends
3501 ++ * @data: same data pointer as passed to for_each_element() or friends
3502 ++ * @datalen: same data length as passed to for_each_element() or friends
3503 ++ *
3504 ++ * This function returns %true if all the data was parsed or considered
3505 ++ * while walking the elements. Only use this if your for_each_element()
3506 ++ * loop cannot be broken out of, otherwise it always returns %false.
3507 ++ *
3508 ++ * If some data was malformed, this returns %false since the last parsed
3509 ++ * element will not fill the whole remaining data.
3510 ++ */
3511 ++static inline bool for_each_element_completed(const struct element *element,
3512 ++ const void *data, size_t datalen)
3513 ++{
3514 ++ return (const u8 *)element == (const u8 *)data + datalen;
3515 ++}
3516 ++
3517 + #endif /* LINUX_IEEE80211_H */
3518 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
3519 +index 0d10b7ce0da7..e9d4e389aed9 100644
3520 +--- a/include/linux/sched/mm.h
3521 ++++ b/include/linux/sched/mm.h
3522 +@@ -330,6 +330,8 @@ enum {
3523 +
3524 + static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
3525 + {
3526 ++ if (current->mm != mm)
3527 ++ return;
3528 + if (likely(!(atomic_read(&mm->membarrier_state) &
3529 + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
3530 + return;
3531 +diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
3532 +index fdaaafdc7a00..5165e3b30899 100644
3533 +--- a/include/sound/soc-dapm.h
3534 ++++ b/include/sound/soc-dapm.h
3535 +@@ -353,6 +353,8 @@ struct device;
3536 + #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
3537 + #define SND_SOC_DAPM_PRE_POST_PMD \
3538 + (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
3539 ++#define SND_SOC_DAPM_PRE_POST_PMU \
3540 ++ (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
3541 +
3542 + /* convenience event type detection */
3543 + #define SND_SOC_DAPM_EVENT_ON(e) \
3544 +diff --git a/kernel/elfcore.c b/kernel/elfcore.c
3545 +index fc482c8e0bd8..57fb4dcff434 100644
3546 +--- a/kernel/elfcore.c
3547 ++++ b/kernel/elfcore.c
3548 +@@ -3,6 +3,7 @@
3549 + #include <linux/fs.h>
3550 + #include <linux/mm.h>
3551 + #include <linux/binfmts.h>
3552 ++#include <linux/elfcore.h>
3553 +
3554 + Elf_Half __weak elf_core_extra_phdrs(void)
3555 + {
3556 +diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
3557 +index 5a0cf5f9008c..82104d3dd18e 100644
3558 +--- a/kernel/locking/qspinlock_paravirt.h
3559 ++++ b/kernel/locking/qspinlock_paravirt.h
3560 +@@ -271,7 +271,7 @@ pv_wait_early(struct pv_node *prev, int loop)
3561 + if ((loop & PV_PREV_CHECK_MASK) != 0)
3562 + return false;
3563 +
3564 +- return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
3565 ++ return READ_ONCE(prev->state) != vcpu_running;
3566 + }
3567 +
3568 + /*
3569 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3570 +index f4e050681ba1..78ecdfae25b6 100644
3571 +--- a/kernel/sched/core.c
3572 ++++ b/kernel/sched/core.c
3573 +@@ -1077,7 +1077,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
3574 + if (cpumask_equal(&p->cpus_allowed, new_mask))
3575 + goto out;
3576 +
3577 +- if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
3578 ++ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
3579 ++ if (dest_cpu >= nr_cpu_ids) {
3580 + ret = -EINVAL;
3581 + goto out;
3582 + }
3583 +@@ -1098,7 +1099,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
3584 + if (cpumask_test_cpu(task_cpu(p), new_mask))
3585 + goto out;
3586 +
3587 +- dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
3588 + if (task_running(rq, p) || p->state == TASK_WAKING) {
3589 + struct migration_arg arg = { p, dest_cpu };
3590 + /* Need help from migration thread: drop lock and wait. */
3591 +diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
3592 +index 76e0eaf4654e..dd27e632b1ba 100644
3593 +--- a/kernel/sched/membarrier.c
3594 ++++ b/kernel/sched/membarrier.c
3595 +@@ -235,7 +235,7 @@ static int membarrier_register_private_expedited(int flags)
3596 + * groups, which use the same mm. (CLONE_VM but not
3597 + * CLONE_THREAD).
3598 + */
3599 +- if (atomic_read(&mm->membarrier_state) & state)
3600 ++ if ((atomic_read(&mm->membarrier_state) & state) == state)
3601 + return 0;
3602 + atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
3603 + if (flags & MEMBARRIER_FLAG_SYNC_CORE)
3604 +diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
3605 +index a59641fb88b6..a836efd34589 100644
3606 +--- a/kernel/time/tick-broadcast-hrtimer.c
3607 ++++ b/kernel/time/tick-broadcast-hrtimer.c
3608 +@@ -44,34 +44,39 @@ static int bc_shutdown(struct clock_event_device *evt)
3609 + */
3610 + static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
3611 + {
3612 +- int bc_moved;
3613 + /*
3614 +- * We try to cancel the timer first. If the callback is on
3615 +- * flight on some other cpu then we let it handle it. If we
3616 +- * were able to cancel the timer nothing can rearm it as we
3617 +- * own broadcast_lock.
3618 ++ * This is called either from enter/exit idle code or from the
3619 ++ * broadcast handler. In all cases tick_broadcast_lock is held.
3620 + *
3621 +- * However we can also be called from the event handler of
3622 +- * ce_broadcast_hrtimer itself when it expires. We cannot
3623 +- * restart the timer because we are in the callback, but we
3624 +- * can set the expiry time and let the callback return
3625 +- * HRTIMER_RESTART.
3626 ++ * hrtimer_cancel() cannot be called here neither from the
3627 ++ * broadcast handler nor from the enter/exit idle code. The idle
3628 ++ * code can run into the problem described in bc_shutdown() and the
3629 ++ * broadcast handler cannot wait for itself to complete for obvious
3630 ++ * reasons.
3631 + *
3632 +- * Since we are in the idle loop at this point and because
3633 +- * hrtimer_{start/cancel} functions call into tracing,
3634 +- * calls to these functions must be bound within RCU_NONIDLE.
3635 ++ * Each caller tries to arm the hrtimer on its own CPU, but if the
3636 ++ * hrtimer callbback function is currently running, then
3637 ++ * hrtimer_start() cannot move it and the timer stays on the CPU on
3638 ++ * which it is assigned at the moment.
3639 ++ *
3640 ++ * As this can be called from idle code, the hrtimer_start()
3641 ++ * invocation has to be wrapped with RCU_NONIDLE() as
3642 ++ * hrtimer_start() can call into tracing.
3643 + */
3644 +- RCU_NONIDLE({
3645 +- bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
3646 +- if (bc_moved)
3647 +- hrtimer_start(&bctimer, expires,
3648 +- HRTIMER_MODE_ABS_PINNED);});
3649 +- if (bc_moved) {
3650 +- /* Bind the "device" to the cpu */
3651 +- bc->bound_on = smp_processor_id();
3652 +- } else if (bc->bound_on == smp_processor_id()) {
3653 +- hrtimer_set_expires(&bctimer, expires);
3654 +- }
3655 ++ RCU_NONIDLE( {
3656 ++ hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
3657 ++ /*
3658 ++ * The core tick broadcast mode expects bc->bound_on to be set
3659 ++ * correctly to prevent a CPU which has the broadcast hrtimer
3660 ++ * armed from going deep idle.
3661 ++ *
3662 ++ * As tick_broadcast_lock is held, nothing can change the cpu
3663 ++ * base which was just established in hrtimer_start() above. So
3664 ++ * the below access is safe even without holding the hrtimer
3665 ++ * base lock.
3666 ++ */
3667 ++ bc->bound_on = bctimer.base->cpu_base->cpu;
3668 ++ } );
3669 + return 0;
3670 + }
3671 +
3672 +@@ -97,10 +102,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
3673 + {
3674 + ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
3675 +
3676 +- if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
3677 +- if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
3678 +- return HRTIMER_RESTART;
3679 +-
3680 + return HRTIMER_NORESTART;
3681 + }
3682 +
3683 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3684 +index fa49cd753dea..ae64cb819a9a 100644
3685 +--- a/kernel/time/timer.c
3686 ++++ b/kernel/time/timer.c
3687 +@@ -1590,24 +1590,26 @@ void timer_clear_idle(void)
3688 + static int collect_expired_timers(struct timer_base *base,
3689 + struct hlist_head *heads)
3690 + {
3691 ++ unsigned long now = READ_ONCE(jiffies);
3692 ++
3693 + /*
3694 + * NOHZ optimization. After a long idle sleep we need to forward the
3695 + * base to current jiffies. Avoid a loop by searching the bitfield for
3696 + * the next expiring timer.
3697 + */
3698 +- if ((long)(jiffies - base->clk) > 2) {
3699 ++ if ((long)(now - base->clk) > 2) {
3700 + unsigned long next = __next_timer_interrupt(base);
3701 +
3702 + /*
3703 + * If the next timer is ahead of time forward to current
3704 + * jiffies, otherwise forward to the next expiry time:
3705 + */
3706 +- if (time_after(next, jiffies)) {
3707 ++ if (time_after(next, now)) {
3708 + /*
3709 + * The call site will increment base->clk and then
3710 + * terminate the expiry loop immediately.
3711 + */
3712 +- base->clk = jiffies;
3713 ++ base->clk = now;
3714 + return 0;
3715 + }
3716 + base->clk = next;
3717 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
3718 +index 3f34cfb66a85..bdf104596d12 100644
3719 +--- a/kernel/trace/trace_events_hist.c
3720 ++++ b/kernel/trace/trace_events_hist.c
3721 +@@ -2526,6 +2526,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
3722 + return NULL;
3723 + }
3724 +
3725 ++ alias->var_ref_idx = var_ref->var_ref_idx;
3726 ++
3727 + return alias;
3728 + }
3729 +
3730 +diff --git a/mm/usercopy.c b/mm/usercopy.c
3731 +index 51411f9c4068..e81d11715d95 100644
3732 +--- a/mm/usercopy.c
3733 ++++ b/mm/usercopy.c
3734 +@@ -15,6 +15,7 @@
3735 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3736 +
3737 + #include <linux/mm.h>
3738 ++#include <linux/highmem.h>
3739 + #include <linux/slab.h>
3740 + #include <linux/sched.h>
3741 + #include <linux/sched/task.h>
3742 +@@ -231,7 +232,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
3743 + if (!virt_addr_valid(ptr))
3744 + return;
3745 +
3746 +- page = virt_to_head_page(ptr);
3747 ++ /*
3748 ++ * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
3749 ++ * highmem page or fallback to virt_to_page(). The following
3750 ++ * is effectively a highmem-aware virt_to_head_page().
3751 ++ */
3752 ++ page = compound_head(kmap_to_page((void *)ptr));
3753 +
3754 + if (PageSlab(page)) {
3755 + /* Check slab allocator for flags and size. */
3756 +diff --git a/net/9p/client.c b/net/9p/client.c
3757 +index b615aae5a0f8..d62f83f93d7b 100644
3758 +--- a/net/9p/client.c
3759 ++++ b/net/9p/client.c
3760 +@@ -296,6 +296,7 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
3761 +
3762 + p9pdu_reset(&req->tc);
3763 + p9pdu_reset(&req->rc);
3764 ++ req->t_err = 0;
3765 + req->status = REQ_STATUS_ALLOC;
3766 + init_waitqueue_head(&req->wq);
3767 + INIT_LIST_HEAD(&req->req_list);
3768 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3769 +index 2145581d7b3d..24fddf032279 100644
3770 +--- a/net/netfilter/nf_tables_api.c
3771 ++++ b/net/netfilter/nf_tables_api.c
3772 +@@ -3429,8 +3429,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3773 + NFT_SET_OBJECT))
3774 + return -EINVAL;
3775 + /* Only one of these operations is supported */
3776 +- if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
3777 +- (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
3778 ++ if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
3779 ++ (NFT_SET_MAP | NFT_SET_OBJECT))
3780 ++ return -EOPNOTSUPP;
3781 ++ if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
3782 ++ (NFT_SET_EVAL | NFT_SET_OBJECT))
3783 + return -EOPNOTSUPP;
3784 + }
3785 +
3786 +diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
3787 +index 161c3451a747..55754d9939b5 100644
3788 +--- a/net/netfilter/nft_lookup.c
3789 ++++ b/net/netfilter/nft_lookup.c
3790 +@@ -76,9 +76,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
3791 + if (IS_ERR(set))
3792 + return PTR_ERR(set);
3793 +
3794 +- if (set->flags & NFT_SET_EVAL)
3795 +- return -EOPNOTSUPP;
3796 +-
3797 + priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
3798 + err = nft_validate_register_load(priv->sreg, set->klen);
3799 + if (err < 0)
3800 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3801 +index 6168db3c35e4..334e3181f1c5 100644
3802 +--- a/net/wireless/nl80211.c
3803 ++++ b/net/wireless/nl80211.c
3804 +@@ -200,6 +200,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
3805 + return __cfg80211_rdev_from_attrs(netns, info->attrs);
3806 + }
3807 +
3808 ++static int validate_beacon_head(const struct nlattr *attr,
3809 ++ struct netlink_ext_ack *extack)
3810 ++{
3811 ++ const u8 *data = nla_data(attr);
3812 ++ unsigned int len = nla_len(attr);
3813 ++ const struct element *elem;
3814 ++ const struct ieee80211_mgmt *mgmt = (void *)data;
3815 ++ unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
3816 ++ u.beacon.variable);
3817 ++
3818 ++ if (len < fixedlen)
3819 ++ goto err;
3820 ++
3821 ++ if (ieee80211_hdrlen(mgmt->frame_control) !=
3822 ++ offsetof(struct ieee80211_mgmt, u.beacon))
3823 ++ goto err;
3824 ++
3825 ++ data += fixedlen;
3826 ++ len -= fixedlen;
3827 ++
3828 ++ for_each_element(elem, data, len) {
3829 ++ /* nothing */
3830 ++ }
3831 ++
3832 ++ if (for_each_element_completed(elem, data, len))
3833 ++ return 0;
3834 ++
3835 ++err:
3836 ++ NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
3837 ++ return -EINVAL;
3838 ++}
3839 ++
3840 + /* policy for the attributes */
3841 + static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3842 + [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
3843 +@@ -2299,6 +2331,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
3844 +
3845 + control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
3846 +
3847 ++ memset(chandef, 0, sizeof(*chandef));
3848 ++
3849 + chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
3850 + chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
3851 + chandef->center_freq1 = control_freq;
3852 +@@ -2819,7 +2853,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
3853 +
3854 + if (rdev->ops->get_channel) {
3855 + int ret;
3856 +- struct cfg80211_chan_def chandef;
3857 ++ struct cfg80211_chan_def chandef = {};
3858 +
3859 + ret = rdev_get_channel(rdev, wdev, &chandef);
3860 + if (ret == 0) {
3861 +@@ -4014,6 +4048,12 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
3862 + memset(bcn, 0, sizeof(*bcn));
3863 +
3864 + if (attrs[NL80211_ATTR_BEACON_HEAD]) {
3865 ++ int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD],
3866 ++ NULL);
3867 ++
3868 ++ if (ret)
3869 ++ return ret;
3870 ++
3871 + bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
3872 + bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
3873 + if (!bcn->head_len)
3874 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
3875 +index d8ebf4f0ef6e..cccbf845079c 100644
3876 +--- a/net/wireless/reg.c
3877 ++++ b/net/wireless/reg.c
3878 +@@ -2095,7 +2095,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
3879 +
3880 + static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
3881 + {
3882 +- struct cfg80211_chan_def chandef;
3883 ++ struct cfg80211_chan_def chandef = {};
3884 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
3885 + enum nl80211_iftype iftype;
3886 +
3887 +diff --git a/net/wireless/scan.c b/net/wireless/scan.c
3888 +index d0e7472dd9fd..e5d61ba837ad 100644
3889 +--- a/net/wireless/scan.c
3890 ++++ b/net/wireless/scan.c
3891 +@@ -484,6 +484,8 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
3892 + const u8 *match, int match_len,
3893 + int match_offset)
3894 + {
3895 ++ const struct element *elem;
3896 ++
3897 + /* match_offset can't be smaller than 2, unless match_len is
3898 + * zero, in which case match_offset must be zero as well.
3899 + */
3900 +@@ -491,14 +493,10 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
3901 + (!match_len && match_offset)))
3902 + return NULL;
3903 +
3904 +- while (len >= 2 && len >= ies[1] + 2) {
3905 +- if ((ies[0] == eid) &&
3906 +- (ies[1] + 2 >= match_offset + match_len) &&
3907 +- !memcmp(ies + match_offset, match, match_len))
3908 +- return ies;
3909 +-
3910 +- len -= ies[1] + 2;
3911 +- ies += ies[1] + 2;
3912 ++ for_each_element_id(elem, eid, ies, len) {
3913 ++ if (elem->datalen >= match_offset - 2 + match_len &&
3914 ++ !memcmp(elem->data + match_offset - 2, match, match_len))
3915 ++ return (void *)elem;
3916 + }
3917 +
3918 + return NULL;
3919 +diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
3920 +index 06943d9c9835..4f0cfb8cc682 100644
3921 +--- a/net/wireless/wext-compat.c
3922 ++++ b/net/wireless/wext-compat.c
3923 +@@ -800,7 +800,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
3924 + {
3925 + struct wireless_dev *wdev = dev->ieee80211_ptr;
3926 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
3927 +- struct cfg80211_chan_def chandef;
3928 ++ struct cfg80211_chan_def chandef = {};
3929 + int ret;
3930 +
3931 + switch (wdev->iftype) {
3932 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
3933 +index d9e7728027c6..f63b4bd45d60 100644
3934 +--- a/security/integrity/ima/ima_crypto.c
3935 ++++ b/security/integrity/ima/ima_crypto.c
3936 +@@ -271,8 +271,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
3937 + rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
3938 + rc = integrity_kernel_read(file, offset, rbuf[active],
3939 + rbuf_len);
3940 +- if (rc != rbuf_len)
3941 ++ if (rc != rbuf_len) {
3942 ++ if (rc >= 0)
3943 ++ rc = -EINVAL;
3944 ++ /*
3945 ++ * Forward current rc, do not overwrite with return value
3946 ++ * from ahash_wait()
3947 ++ */
3948 ++ ahash_wait(ahash_rc, &wait);
3949 + goto out3;
3950 ++ }
3951 +
3952 + if (rbuf[1] && offset) {
3953 + /* Using two buffers, and it is not the first
3954 +diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
3955 +index 18cddf1729a6..64a52d495b1f 100644
3956 +--- a/sound/soc/codecs/sgtl5000.c
3957 ++++ b/sound/soc/codecs/sgtl5000.c
3958 +@@ -31,6 +31,13 @@
3959 + #define SGTL5000_DAP_REG_OFFSET 0x0100
3960 + #define SGTL5000_MAX_REG_OFFSET 0x013A
3961 +
3962 ++/* Delay for the VAG ramp up */
3963 ++#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
3964 ++/* Delay for the VAG ramp down */
3965 ++#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
3966 ++
3967 ++#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
3968 ++
3969 + /* default value of sgtl5000 registers */
3970 + static const struct reg_default sgtl5000_reg_defaults[] = {
3971 + { SGTL5000_CHIP_DIG_POWER, 0x0000 },
3972 +@@ -116,6 +123,13 @@ enum {
3973 + I2S_LRCLK_STRENGTH_HIGH,
3974 + };
3975 +
3976 ++enum {
3977 ++ HP_POWER_EVENT,
3978 ++ DAC_POWER_EVENT,
3979 ++ ADC_POWER_EVENT,
3980 ++ LAST_POWER_EVENT = ADC_POWER_EVENT
3981 ++};
3982 ++
3983 + /* sgtl5000 private structure in codec */
3984 + struct sgtl5000_priv {
3985 + int sysclk; /* sysclk rate */
3986 +@@ -129,8 +143,109 @@ struct sgtl5000_priv {
3987 + u8 micbias_resistor;
3988 + u8 micbias_voltage;
3989 + u8 lrclk_strength;
3990 ++ u16 mute_state[LAST_POWER_EVENT + 1];
3991 + };
3992 +
3993 ++static inline int hp_sel_input(struct snd_soc_component *component)
3994 ++{
3995 ++ return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
3996 ++ SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
3997 ++}
3998 ++
3999 ++static inline u16 mute_output(struct snd_soc_component *component,
4000 ++ u16 mute_mask)
4001 ++{
4002 ++ u16 mute_reg = snd_soc_component_read32(component,
4003 ++ SGTL5000_CHIP_ANA_CTRL);
4004 ++
4005 ++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
4006 ++ mute_mask, mute_mask);
4007 ++ return mute_reg;
4008 ++}
4009 ++
4010 ++static inline void restore_output(struct snd_soc_component *component,
4011 ++ u16 mute_mask, u16 mute_reg)
4012 ++{
4013 ++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
4014 ++ mute_mask, mute_reg);
4015 ++}
4016 ++
4017 ++static void vag_power_on(struct snd_soc_component *component, u32 source)
4018 ++{
4019 ++ if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
4020 ++ SGTL5000_VAG_POWERUP)
4021 ++ return;
4022 ++
4023 ++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4024 ++ SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
4025 ++
4026 ++ /* When VAG powering on to get local loop from Line-In, the sleep
4027 ++ * is required to avoid loud pop.
4028 ++ */
4029 ++ if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
4030 ++ source == HP_POWER_EVENT)
4031 ++ msleep(SGTL5000_VAG_POWERUP_DELAY);
4032 ++}
4033 ++
4034 ++static int vag_power_consumers(struct snd_soc_component *component,
4035 ++ u16 ana_pwr_reg, u32 source)
4036 ++{
4037 ++ int consumers = 0;
4038 ++
4039 ++ /* count dac/adc consumers unconditional */
4040 ++ if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
4041 ++ consumers++;
4042 ++ if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
4043 ++ consumers++;
4044 ++
4045 ++ /*
4046 ++ * If the event comes from HP and Line-In is selected,
4047 ++ * current action is 'DAC to be powered down'.
4048 ++ * As HP_POWERUP is not set when HP muxed to line-in,
4049 ++ * we need to keep VAG power ON.
4050 ++ */
4051 ++ if (source == HP_POWER_EVENT) {
4052 ++ if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
4053 ++ consumers++;
4054 ++ } else {
4055 ++ if (ana_pwr_reg & SGTL5000_HP_POWERUP)
4056 ++ consumers++;
4057 ++ }
4058 ++
4059 ++ return consumers;
4060 ++}
4061 ++
4062 ++static void vag_power_off(struct snd_soc_component *component, u32 source)
4063 ++{
4064 ++ u16 ana_pwr = snd_soc_component_read32(component,
4065 ++ SGTL5000_CHIP_ANA_POWER);
4066 ++
4067 ++ if (!(ana_pwr & SGTL5000_VAG_POWERUP))
4068 ++ return;
4069 ++
4070 ++ /*
4071 ++ * This function calls when any of VAG power consumers is disappearing.
4072 ++ * Thus, if there is more than one consumer at the moment, as minimum
4073 ++ * one consumer will definitely stay after the end of the current
4074 ++ * event.
4075 ++ * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
4076 ++ * - LINE_IN (for HP events) / HP (for DAC/ADC events)
4077 ++ * - DAC
4078 ++ * - ADC
4079 ++ * (the current consumer is disappearing right now)
4080 ++ */
4081 ++ if (vag_power_consumers(component, ana_pwr, source) >= 2)
4082 ++ return;
4083 ++
4084 ++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4085 ++ SGTL5000_VAG_POWERUP, 0);
4086 ++ /* In power down case, we need wait 400-1000 ms
4087 ++ * when VAG fully ramped down.
4088 ++ * As longer we wait, as smaller pop we've got.
4089 ++ */
4090 ++ msleep(SGTL5000_VAG_POWERDOWN_DELAY);
4091 ++}
4092 ++
4093 + /*
4094 + * mic_bias power on/off share the same register bits with
4095 + * output impedance of mic bias, when power on mic bias, we
4096 +@@ -162,36 +277,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
4097 + return 0;
4098 + }
4099 +
4100 +-/*
4101 +- * As manual described, ADC/DAC only works when VAG powerup,
4102 +- * So enabled VAG before ADC/DAC up.
4103 +- * In power down case, we need wait 400ms when vag fully ramped down.
4104 +- */
4105 +-static int power_vag_event(struct snd_soc_dapm_widget *w,
4106 +- struct snd_kcontrol *kcontrol, int event)
4107 ++static int vag_and_mute_control(struct snd_soc_component *component,
4108 ++ int event, int event_source)
4109 + {
4110 +- struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
4111 +- const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
4112 ++ static const u16 mute_mask[] = {
4113 ++ /*
4114 ++ * Mask for HP_POWER_EVENT.
4115 ++ * Muxing Headphones have to be wrapped with mute/unmute
4116 ++ * headphones only.
4117 ++ */
4118 ++ SGTL5000_HP_MUTE,
4119 ++ /*
4120 ++ * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
4121 ++ * Muxing DAC or ADC block have to wrapped with mute/unmute
4122 ++ * both headphones and line-out.
4123 ++ */
4124 ++ SGTL5000_OUTPUTS_MUTE,
4125 ++ SGTL5000_OUTPUTS_MUTE
4126 ++ };
4127 ++
4128 ++ struct sgtl5000_priv *sgtl5000 =
4129 ++ snd_soc_component_get_drvdata(component);
4130 +
4131 + switch (event) {
4132 ++ case SND_SOC_DAPM_PRE_PMU:
4133 ++ sgtl5000->mute_state[event_source] =
4134 ++ mute_output(component, mute_mask[event_source]);
4135 ++ break;
4136 + case SND_SOC_DAPM_POST_PMU:
4137 +- snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4138 +- SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
4139 +- msleep(400);
4140 ++ vag_power_on(component, event_source);
4141 ++ restore_output(component, mute_mask[event_source],
4142 ++ sgtl5000->mute_state[event_source]);
4143 + break;
4144 +-
4145 + case SND_SOC_DAPM_PRE_PMD:
4146 +- /*
4147 +- * Don't clear VAG_POWERUP, when both DAC and ADC are
4148 +- * operational to prevent inadvertently starving the
4149 +- * other one of them.
4150 +- */
4151 +- if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
4152 +- mask) != mask) {
4153 +- snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
4154 +- SGTL5000_VAG_POWERUP, 0);
4155 +- msleep(400);
4156 +- }
4157 ++ sgtl5000->mute_state[event_source] =
4158 ++ mute_output(component, mute_mask[event_source]);
4159 ++ vag_power_off(component, event_source);
4160 ++ break;
4161 ++ case SND_SOC_DAPM_POST_PMD:
4162 ++ restore_output(component, mute_mask[event_source],
4163 ++ sgtl5000->mute_state[event_source]);
4164 + break;
4165 + default:
4166 + break;
4167 +@@ -200,6 +325,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
4168 + return 0;
4169 + }
4170 +
4171 ++/*
4172 ++ * Mute Headphone when power it up/down.
4173 ++ * Control VAG power on HP power path.
4174 ++ */
4175 ++static int headphone_pga_event(struct snd_soc_dapm_widget *w,
4176 ++ struct snd_kcontrol *kcontrol, int event)
4177 ++{
4178 ++ struct snd_soc_component *component =
4179 ++ snd_soc_dapm_to_component(w->dapm);
4180 ++
4181 ++ return vag_and_mute_control(component, event, HP_POWER_EVENT);
4182 ++}
4183 ++
4184 ++/* As manual describes, ADC/DAC powering up/down requires
4185 ++ * to mute outputs to avoid pops.
4186 ++ * Control VAG power on ADC/DAC power path.
4187 ++ */
4188 ++static int adc_updown_depop(struct snd_soc_dapm_widget *w,
4189 ++ struct snd_kcontrol *kcontrol, int event)
4190 ++{
4191 ++ struct snd_soc_component *component =
4192 ++ snd_soc_dapm_to_component(w->dapm);
4193 ++
4194 ++ return vag_and_mute_control(component, event, ADC_POWER_EVENT);
4195 ++}
4196 ++
4197 ++static int dac_updown_depop(struct snd_soc_dapm_widget *w,
4198 ++ struct snd_kcontrol *kcontrol, int event)
4199 ++{
4200 ++ struct snd_soc_component *component =
4201 ++ snd_soc_dapm_to_component(w->dapm);
4202 ++
4203 ++ return vag_and_mute_control(component, event, DAC_POWER_EVENT);
4204 ++}
4205 ++
4206 + /* input sources for ADC */
4207 + static const char *adc_mux_text[] = {
4208 + "MIC_IN", "LINE_IN"
4209 +@@ -272,7 +432,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
4210 + mic_bias_event,
4211 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
4212 +
4213 +- SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
4214 ++ SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
4215 ++ headphone_pga_event,
4216 ++ SND_SOC_DAPM_PRE_POST_PMU |
4217 ++ SND_SOC_DAPM_PRE_POST_PMD),
4218 + SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
4219 +
4220 + SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
4221 +@@ -293,11 +456,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
4222 + 0, SGTL5000_CHIP_DIG_POWER,
4223 + 1, 0),
4224 +
4225 +- SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
4226 +- SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
4227 +-
4228 +- SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
4229 +- SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
4230 ++ SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
4231 ++ adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
4232 ++ SND_SOC_DAPM_PRE_POST_PMD),
4233 ++ SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
4234 ++ dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
4235 ++ SND_SOC_DAPM_PRE_POST_PMD),
4236 + };
4237 +
4238 + /* routes for sgtl5000 */
4239 +diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
4240 +index 95a43ccb6dd0..bca0c9e5452c 100644
4241 +--- a/tools/lib/traceevent/Makefile
4242 ++++ b/tools/lib/traceevent/Makefile
4243 +@@ -259,8 +259,8 @@ endef
4244 +
4245 + define do_generate_dynamic_list_file
4246 + symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
4247 +- xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
4248 +- if [ "$$symbol_type" = "U W w" ];then \
4249 ++ xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
4250 ++ if [ "$$symbol_type" = "U W" ];then \
4251 + (echo '{'; \
4252 + $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
4253 + echo '};'; \
4254 +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
4255 +index 6ccfd13d5cf9..382e476629fb 100644
4256 +--- a/tools/lib/traceevent/event-parse.c
4257 ++++ b/tools/lib/traceevent/event-parse.c
4258 +@@ -254,10 +254,10 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
4259 + errno = ENOMEM;
4260 + return -1;
4261 + }
4262 ++ pevent->cmdlines = cmdlines;
4263 +
4264 + cmdlines[pevent->cmdline_count].comm = strdup(comm);
4265 + if (!cmdlines[pevent->cmdline_count].comm) {
4266 +- free(cmdlines);
4267 + errno = ENOMEM;
4268 + return -1;
4269 + }
4270 +@@ -268,7 +268,6 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
4271 + pevent->cmdline_count++;
4272 +
4273 + qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
4274 +- pevent->cmdlines = cmdlines;
4275 +
4276 + return 0;
4277 + }
4278 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
4279 +index 849b3be15bd8..510caedd7319 100644
4280 +--- a/tools/perf/Makefile.config
4281 ++++ b/tools/perf/Makefile.config
4282 +@@ -837,7 +837,7 @@ ifndef NO_JVMTI
4283 + JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
4284 + else
4285 + ifneq (,$(wildcard /usr/sbin/alternatives))
4286 +- JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
4287 ++ JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
4288 + endif
4289 + endif
4290 + ifndef JDIR
4291 +diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
4292 +index 05920e3edf7a..47357973b55b 100644
4293 +--- a/tools/perf/arch/x86/util/unwind-libunwind.c
4294 ++++ b/tools/perf/arch/x86/util/unwind-libunwind.c
4295 +@@ -1,11 +1,11 @@
4296 + // SPDX-License-Identifier: GPL-2.0
4297 +
4298 + #include <errno.h>
4299 ++#include "../../util/debug.h"
4300 + #ifndef REMOTE_UNWIND_LIBUNWIND
4301 + #include <libunwind.h>
4302 + #include "perf_regs.h"
4303 + #include "../../util/unwind.h"
4304 +-#include "../../util/debug.h"
4305 + #endif
4306 +
4307 + #ifdef HAVE_ARCH_X86_64_SUPPORT
4308 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
4309 +index 789962565c9c..6aae10ff954c 100644
4310 +--- a/tools/perf/builtin-stat.c
4311 ++++ b/tools/perf/builtin-stat.c
4312 +@@ -3090,8 +3090,11 @@ int cmd_stat(int argc, const char **argv)
4313 + fprintf(output, "[ perf stat: executing run #%d ... ]\n",
4314 + run_idx + 1);
4315 +
4316 ++ if (run_idx != 0)
4317 ++ perf_evlist__reset_prev_raw_counts(evsel_list);
4318 ++
4319 + status = run_perf_stat(argc, argv, run_idx);
4320 +- if (forever && status != -1) {
4321 ++ if (forever && status != -1 && !interval) {
4322 + print_counters(NULL, argc, argv);
4323 + perf_stat__reset_stats();
4324 + }
4325 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
4326 +index 0c70788593c8..3c0d74fc1ff2 100644
4327 +--- a/tools/perf/util/header.c
4328 ++++ b/tools/perf/util/header.c
4329 +@@ -1114,7 +1114,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
4330 +
4331 + scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
4332 + if (sysfs__read_str(file, &cache->map, &len)) {
4333 +- free(cache->map);
4334 ++ free(cache->size);
4335 + free(cache->type);
4336 + return -1;
4337 + }
4338 +diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
4339 +index a0061e0b0fad..6917ba8a0024 100644
4340 +--- a/tools/perf/util/stat.c
4341 ++++ b/tools/perf/util/stat.c
4342 +@@ -154,6 +154,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
4343 + evsel->prev_raw_counts = NULL;
4344 + }
4345 +
4346 ++static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
4347 ++{
4348 ++ if (evsel->prev_raw_counts) {
4349 ++ evsel->prev_raw_counts->aggr.val = 0;
4350 ++ evsel->prev_raw_counts->aggr.ena = 0;
4351 ++ evsel->prev_raw_counts->aggr.run = 0;
4352 ++ }
4353 ++}
4354 ++
4355 + static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
4356 + {
4357 + int ncpus = perf_evsel__nr_cpus(evsel);
4358 +@@ -204,6 +213,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
4359 + }
4360 + }
4361 +
4362 ++void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
4363 ++{
4364 ++ struct perf_evsel *evsel;
4365 ++
4366 ++ evlist__for_each_entry(evlist, evsel)
4367 ++ perf_evsel__reset_prev_raw_counts(evsel);
4368 ++}
4369 ++
4370 + static void zero_per_pkg(struct perf_evsel *counter)
4371 + {
4372 + if (counter->per_pkg_mask)
4373 +diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
4374 +index 36efb986f7fc..e19abb1635c4 100644
4375 +--- a/tools/perf/util/stat.h
4376 ++++ b/tools/perf/util/stat.h
4377 +@@ -158,6 +158,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
4378 + int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
4379 + void perf_evlist__free_stats(struct perf_evlist *evlist);
4380 + void perf_evlist__reset_stats(struct perf_evlist *evlist);
4381 ++void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
4382 +
4383 + int perf_stat_process_counter(struct perf_stat_config *config,
4384 + struct perf_evsel *counter);
4385 +diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
4386 +index 33752e06ff8d..3de57cc8716b 100644
4387 +--- a/tools/testing/nvdimm/test/nfit_test.h
4388 ++++ b/tools/testing/nvdimm/test/nfit_test.h
4389 +@@ -12,6 +12,7 @@
4390 + */
4391 + #ifndef __NFIT_TEST_H__
4392 + #define __NFIT_TEST_H__
4393 ++#include <linux/acpi.h>
4394 + #include <linux/list.h>
4395 + #include <linux/uuid.h>
4396 + #include <linux/ioport.h>
4397 +@@ -234,9 +235,6 @@ struct nd_intel_lss {
4398 + __u32 status;
4399 + } __packed;
4400 +
4401 +-union acpi_object;
4402 +-typedef void *acpi_handle;
4403 +-
4404 + typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
4405 + typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
4406 + const guid_t *guid, u64 rev, u64 func,