Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.1 commit in: /
Date: Fri, 31 May 2019 14:04:26
Message-Id: 1559311442.7b28b2e87d40d965b55c189c5dceb90e6b9d31d2.mpagano@gentoo
1 commit: 7b28b2e87d40d965b55c189c5dceb90e6b9d31d2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri May 31 14:04:02 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri May 31 14:04:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7b28b2e8
7
8 Linux patch 5.1.6
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1005_linux-5.1.6.patch | 14203 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 14207 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2431699..7713f53 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -63,6 +63,10 @@ Patch: 1004_linux-5.1.5.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.1.5
23
24 +Patch: 1005_linux-5.1.6.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.1.6
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1005_linux-5.1.6.patch b/1005_linux-5.1.6.patch
33 new file mode 100644
34 index 0000000..897ab6d
35 --- /dev/null
36 +++ b/1005_linux-5.1.6.patch
37 @@ -0,0 +1,14203 @@
38 +diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
39 +index d1e2bb801e1b..6e97a3f771ef 100644
40 +--- a/Documentation/arm64/silicon-errata.txt
41 ++++ b/Documentation/arm64/silicon-errata.txt
42 +@@ -61,6 +61,7 @@ stable kernels.
43 + | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
44 + | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
45 + | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
46 ++| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
47 + | ARM | MMU-500 | #841119,#826419 | N/A |
48 + | | | | |
49 + | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
50 +diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
51 +index 5d181fc3cc18..4a78ba8b85bc 100644
52 +--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
53 ++++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
54 +@@ -59,7 +59,8 @@ Required properties:
55 + one for each entry in reset-names.
56 + - reset-names: "phy" for reset of phy block,
57 + "common" for phy common block reset,
58 +- "cfg" for phy's ahb cfg block reset.
59 ++ "cfg" for phy's ahb cfg block reset,
60 ++ "ufsphy" for the PHY reset in the UFS controller.
61 +
62 + For "qcom,ipq8074-qmp-pcie-phy" must contain:
63 + "phy", "common".
64 +@@ -74,7 +75,8 @@ Required properties:
65 + "phy", "common".
66 + For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
67 + "phy", "common".
68 +- For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
69 ++ For "qcom,sdm845-qmp-ufs-phy": must contain:
70 ++ "ufsphy".
71 +
72 + - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
73 + - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
74 +diff --git a/Makefile b/Makefile
75 +index 24a16a544ffd..d8bdd2bb55dc 100644
76 +--- a/Makefile
77 ++++ b/Makefile
78 +@@ -1,7 +1,7 @@
79 + # SPDX-License-Identifier: GPL-2.0
80 + VERSION = 5
81 + PATCHLEVEL = 1
82 +-SUBLEVEL = 5
83 ++SUBLEVEL = 6
84 + EXTRAVERSION =
85 + NAME = Shy Crocodile
86 +
87 +diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
88 +index 07e27f212dc7..d2453e2d3f1f 100644
89 +--- a/arch/arm/include/asm/cp15.h
90 ++++ b/arch/arm/include/asm/cp15.h
91 +@@ -68,6 +68,8 @@
92 + #define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
93 + #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
94 +
95 ++#define CNTVCT __ACCESS_CP15_64(1, c14)
96 ++
97 + extern unsigned long cr_alignment; /* defined in entry-armv.S */
98 +
99 + static inline unsigned long get_cr(void)
100 +diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
101 +index a9dd619c6c29..7bdbf5d5c47d 100644
102 +--- a/arch/arm/vdso/vgettimeofday.c
103 ++++ b/arch/arm/vdso/vgettimeofday.c
104 +@@ -18,9 +18,9 @@
105 + #include <linux/compiler.h>
106 + #include <linux/hrtimer.h>
107 + #include <linux/time.h>
108 +-#include <asm/arch_timer.h>
109 + #include <asm/barrier.h>
110 + #include <asm/bug.h>
111 ++#include <asm/cp15.h>
112 + #include <asm/page.h>
113 + #include <asm/unistd.h>
114 + #include <asm/vdso_datapage.h>
115 +@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
116 + u64 cycle_now;
117 + u64 nsec;
118 +
119 +- cycle_now = arch_counter_get_cntvct();
120 ++ isb();
121 ++ cycle_now = read_sysreg(CNTVCT);
122 +
123 + cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
124 +
125 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
126 +index 7e34b9eba5de..d218729ec852 100644
127 +--- a/arch/arm64/Kconfig
128 ++++ b/arch/arm64/Kconfig
129 +@@ -517,6 +517,24 @@ config ARM64_ERRATUM_1286807
130 +
131 + If unsure, say Y.
132 +
133 ++config ARM64_ERRATUM_1463225
134 ++ bool "Cortex-A76: Software Step might prevent interrupt recognition"
135 ++ default y
136 ++ help
137 ++ This option adds a workaround for Arm Cortex-A76 erratum 1463225.
138 ++
139 ++ On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
140 ++ of a system call instruction (SVC) can prevent recognition of
141 ++ subsequent interrupts when software stepping is disabled in the
142 ++ exception handler of the system call and either kernel debugging
143 ++ is enabled or VHE is in use.
144 ++
145 ++ Work around the erratum by triggering a dummy step exception
146 ++ when handling a system call from a task that is being stepped
147 ++ in a VHE configuration of the kernel.
148 ++
149 ++ If unsure, say Y.
150 ++
151 + config CAVIUM_ERRATUM_22375
152 + bool "Cavium erratum 22375, 24313"
153 + default y
154 +@@ -1347,6 +1365,7 @@ config ARM64_MODULE_PLTS
155 +
156 + config ARM64_PSEUDO_NMI
157 + bool "Support for NMI-like interrupts"
158 ++ depends on BROKEN # 1556553607-46531-1-git-send-email-julien.thierry@×××.com
159 + select CONFIG_ARM_GIC_V3
160 + help
161 + Adds support for mimicking Non-Maskable Interrupts through the use of
162 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
163 +index f6a76e43f39e..4389d5d0ca0f 100644
164 +--- a/arch/arm64/include/asm/cpucaps.h
165 ++++ b/arch/arm64/include/asm/cpucaps.h
166 +@@ -61,7 +61,8 @@
167 + #define ARM64_HAS_GENERIC_AUTH_ARCH 40
168 + #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
169 + #define ARM64_HAS_IRQ_PRIO_MASKING 42
170 ++#define ARM64_WORKAROUND_1463225 43
171 +
172 +-#define ARM64_NCAPS 43
173 ++#define ARM64_NCAPS 44
174 +
175 + #endif /* __ASM_CPUCAPS_H */
176 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
177 +index 6fb2214333a2..2d78ea6932b7 100644
178 +--- a/arch/arm64/include/asm/futex.h
179 ++++ b/arch/arm64/include/asm/futex.h
180 +@@ -58,7 +58,7 @@ do { \
181 + static inline int
182 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
183 + {
184 +- int oldval = 0, ret, tmp;
185 ++ int oldval, ret, tmp;
186 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
187 +
188 + pagefault_disable();
189 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
190 +index de70c1eabf33..74ebe9693714 100644
191 +--- a/arch/arm64/include/asm/pgtable.h
192 ++++ b/arch/arm64/include/asm/pgtable.h
193 +@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
194 + return __pmd_to_phys(pmd);
195 + }
196 +
197 ++static inline void pte_unmap(pte_t *pte) { }
198 ++
199 + /* Find an entry in the third-level page table. */
200 + #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
201 +
202 +@@ -486,7 +488,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
203 +
204 + #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
205 + #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
206 +-#define pte_unmap(pte) do { } while (0)
207 + #define pte_unmap_nested(pte) do { } while (0)
208 +
209 + #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
210 +diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
211 +index 2b9a63771eda..f89263c8e11a 100644
212 +--- a/arch/arm64/include/asm/vdso_datapage.h
213 ++++ b/arch/arm64/include/asm/vdso_datapage.h
214 +@@ -38,6 +38,7 @@ struct vdso_data {
215 + __u32 tz_minuteswest; /* Whacky timezone stuff */
216 + __u32 tz_dsttime;
217 + __u32 use_syscall;
218 ++ __u32 hrtimer_res;
219 + };
220 +
221 + #endif /* !__ASSEMBLY__ */
222 +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
223 +index 7f40dcbdd51d..e10e2a5d9ddc 100644
224 +--- a/arch/arm64/kernel/asm-offsets.c
225 ++++ b/arch/arm64/kernel/asm-offsets.c
226 +@@ -94,7 +94,7 @@ int main(void)
227 + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
228 + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
229 + DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
230 +- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
231 ++ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
232 + DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
233 + DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
234 + DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
235 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
236 +index 9950bb0cbd52..87019cd73f22 100644
237 +--- a/arch/arm64/kernel/cpu_errata.c
238 ++++ b/arch/arm64/kernel/cpu_errata.c
239 +@@ -464,6 +464,22 @@ out_printmsg:
240 + }
241 + #endif /* CONFIG_ARM64_SSBD */
242 +
243 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
244 ++DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
245 ++
246 ++static bool
247 ++has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
248 ++ int scope)
249 ++{
250 ++ u32 midr = read_cpuid_id();
251 ++ /* Cortex-A76 r0p0 - r3p1 */
252 ++ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
253 ++
254 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
255 ++ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
256 ++}
257 ++#endif
258 ++
259 + static void __maybe_unused
260 + cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
261 + {
262 +@@ -738,6 +754,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
263 + .capability = ARM64_WORKAROUND_1165522,
264 + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
265 + },
266 ++#endif
267 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
268 ++ {
269 ++ .desc = "ARM erratum 1463225",
270 ++ .capability = ARM64_WORKAROUND_1463225,
271 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
272 ++ .matches = has_cortex_a76_erratum_1463225,
273 ++ },
274 + #endif
275 + {
276 + }
277 +diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
278 +index ea001241bdd4..00f8b8612b69 100644
279 +--- a/arch/arm64/kernel/cpu_ops.c
280 ++++ b/arch/arm64/kernel/cpu_ops.c
281 +@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
282 + pr_err("%pOF: missing enable-method property\n",
283 + dn);
284 + }
285 ++ of_node_put(dn);
286 + } else {
287 + enable_method = acpi_get_enable_method(cpu);
288 + if (!enable_method) {
289 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
290 +index b09b6f75f759..06941c1fe418 100644
291 +--- a/arch/arm64/kernel/kaslr.c
292 ++++ b/arch/arm64/kernel/kaslr.c
293 +@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
294 +
295 + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
296 + /*
297 +- * Randomize the module region over a 4 GB window covering the
298 ++ * Randomize the module region over a 2 GB window covering the
299 + * kernel. This reduces the risk of modules leaking information
300 + * about the address of the kernel itself, but results in
301 + * branches between modules and the core kernel that are
302 + * resolved via PLTs. (Branches between modules will be
303 + * resolved normally.)
304 + */
305 +- module_range = SZ_4G - (u64)(_end - _stext);
306 +- module_alloc_base = max((u64)_end + offset - SZ_4G,
307 ++ module_range = SZ_2G - (u64)(_end - _stext);
308 ++ module_alloc_base = max((u64)_end + offset - SZ_2G,
309 + (u64)MODULES_VADDR);
310 + } else {
311 + /*
312 +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
313 +index f713e2fc4d75..1e418e69b58c 100644
314 +--- a/arch/arm64/kernel/module.c
315 ++++ b/arch/arm64/kernel/module.c
316 +@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
317 + * can simply omit this fallback in that case.
318 + */
319 + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
320 +- module_alloc_base + SZ_4G, GFP_KERNEL,
321 ++ module_alloc_base + SZ_2G, GFP_KERNEL,
322 + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
323 + __builtin_return_address(0));
324 +
325 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
326 +index 5610ac01c1ec..871c739f060a 100644
327 +--- a/arch/arm64/kernel/syscall.c
328 ++++ b/arch/arm64/kernel/syscall.c
329 +@@ -8,6 +8,7 @@
330 + #include <linux/syscalls.h>
331 +
332 + #include <asm/daifflags.h>
333 ++#include <asm/debug-monitors.h>
334 + #include <asm/fpsimd.h>
335 + #include <asm/syscall.h>
336 + #include <asm/thread_info.h>
337 +@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
338 + int syscall_trace_enter(struct pt_regs *regs);
339 + void syscall_trace_exit(struct pt_regs *regs);
340 +
341 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
342 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
343 ++
344 ++static void cortex_a76_erratum_1463225_svc_handler(void)
345 ++{
346 ++ u32 reg, val;
347 ++
348 ++ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
349 ++ return;
350 ++
351 ++ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
352 ++ return;
353 ++
354 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
355 ++ reg = read_sysreg(mdscr_el1);
356 ++ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
357 ++ write_sysreg(val, mdscr_el1);
358 ++ asm volatile("msr daifclr, #8");
359 ++ isb();
360 ++
361 ++ /* We will have taken a single-step exception by this point */
362 ++
363 ++ write_sysreg(reg, mdscr_el1);
364 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
365 ++}
366 ++#else
367 ++static void cortex_a76_erratum_1463225_svc_handler(void) { }
368 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
369 ++
370 + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
371 + const syscall_fn_t syscall_table[])
372 + {
373 +@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
374 + regs->orig_x0 = regs->regs[0];
375 + regs->syscallno = scno;
376 +
377 ++ cortex_a76_erratum_1463225_svc_handler();
378 + local_daif_restore(DAIF_PROCCTX);
379 + user_exit();
380 +
381 +diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
382 +index 2d419006ad43..ec0bb588d755 100644
383 +--- a/arch/arm64/kernel/vdso.c
384 ++++ b/arch/arm64/kernel/vdso.c
385 +@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
386 + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
387 + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
388 +
389 ++ /* Read without the seqlock held by clock_getres() */
390 ++ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
391 ++
392 + if (!use_syscall) {
393 + /* tkr_mono.cycle_last == tkr_raw.cycle_last */
394 + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
395 +diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
396 +index e8f60112818f..856fee6d3512 100644
397 +--- a/arch/arm64/kernel/vdso/gettimeofday.S
398 ++++ b/arch/arm64/kernel/vdso/gettimeofday.S
399 +@@ -308,13 +308,14 @@ ENTRY(__kernel_clock_getres)
400 + ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
401 + b.ne 1f
402 +
403 +- ldr x2, 5f
404 ++ adr vdso_data, _vdso_data
405 ++ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
406 + b 2f
407 + 1:
408 + cmp w0, #CLOCK_REALTIME_COARSE
409 + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
410 + b.ne 4f
411 +- ldr x2, 6f
412 ++ ldr x2, 5f
413 + 2:
414 + cbz x1, 3f
415 + stp xzr, x2, [x1]
416 +@@ -328,8 +329,6 @@ ENTRY(__kernel_clock_getres)
417 + svc #0
418 + ret
419 + 5:
420 +- .quad CLOCK_REALTIME_RES
421 +-6:
422 + .quad CLOCK_COARSE_RES
423 + .cfi_endproc
424 + ENDPROC(__kernel_clock_getres)
425 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
426 +index 78c0a72f822c..674860e3e478 100644
427 +--- a/arch/arm64/mm/dma-mapping.c
428 ++++ b/arch/arm64/mm/dma-mapping.c
429 +@@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
430 + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
431 + return ret;
432 +
433 ++ if (!is_vmalloc_addr(cpu_addr)) {
434 ++ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
435 ++ return __swiotlb_mmap_pfn(vma, pfn, size);
436 ++ }
437 ++
438 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
439 + /*
440 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
441 +@@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
442 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
443 + struct vm_struct *area = find_vm_area(cpu_addr);
444 +
445 ++ if (!is_vmalloc_addr(cpu_addr)) {
446 ++ struct page *page = virt_to_page(cpu_addr);
447 ++ return __swiotlb_get_sgtable_page(sgt, page, size);
448 ++ }
449 ++
450 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
451 + /*
452 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
453 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
454 +index 1a7e92ab69eb..9a6099a2c633 100644
455 +--- a/arch/arm64/mm/fault.c
456 ++++ b/arch/arm64/mm/fault.c
457 +@@ -810,14 +810,47 @@ void __init hook_debug_fault_code(int nr,
458 + debug_fault_info[nr].name = name;
459 + }
460 +
461 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
462 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
463 ++
464 ++static int __exception
465 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
466 ++{
467 ++ if (user_mode(regs))
468 ++ return 0;
469 ++
470 ++ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
471 ++ return 0;
472 ++
473 ++ /*
474 ++ * We've taken a dummy step exception from the kernel to ensure
475 ++ * that interrupts are re-enabled on the syscall path. Return back
476 ++ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
477 ++ * masked so that we can safely restore the mdscr and get on with
478 ++ * handling the syscall.
479 ++ */
480 ++ regs->pstate |= PSR_D_BIT;
481 ++ return 1;
482 ++}
483 ++#else
484 ++static int __exception
485 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
486 ++{
487 ++ return 0;
488 ++}
489 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
490 ++
491 + asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
492 +- unsigned int esr,
493 +- struct pt_regs *regs)
494 ++ unsigned int esr,
495 ++ struct pt_regs *regs)
496 + {
497 + const struct fault_info *inf = esr_to_debug_fault_info(esr);
498 + unsigned long pc = instruction_pointer(regs);
499 + int rv;
500 +
501 ++ if (cortex_a76_erratum_1463225_debug_handler(regs))
502 ++ return 0;
503 ++
504 + /*
505 + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
506 + * already disabled to preserve the last enabled/disabled addresses.
507 +diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
508 +index 9d9f6f334d3c..3da3e2b1b51b 100644
509 +--- a/arch/powerpc/boot/addnote.c
510 ++++ b/arch/powerpc/boot/addnote.c
511 +@@ -223,7 +223,11 @@ main(int ac, char **av)
512 + PUT_16(E_PHNUM, np + 2);
513 +
514 + /* write back */
515 +- lseek(fd, (long) 0, SEEK_SET);
516 ++ i = lseek(fd, (long) 0, SEEK_SET);
517 ++ if (i < 0) {
518 ++ perror("lseek");
519 ++ exit(1);
520 ++ }
521 + i = write(fd, buf, n);
522 + if (i < 0) {
523 + perror("write");
524 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
525 +index 3fad8d499767..5321a11c2835 100644
526 +--- a/arch/powerpc/kernel/head_64.S
527 ++++ b/arch/powerpc/kernel/head_64.S
528 +@@ -968,7 +968,9 @@ start_here_multiplatform:
529 +
530 + /* Restore parameters passed from prom_init/kexec */
531 + mr r3,r31
532 +- bl early_setup /* also sets r13 and SPRG_PACA */
533 ++ LOAD_REG_ADDR(r12, DOTSYM(early_setup))
534 ++ mtctr r12
535 ++ bctrl /* also sets r13 and SPRG_PACA */
536 +
537 + LOAD_REG_ADDR(r3, start_here_common)
538 + ld r4,PACAKMSR(r13)
539 +diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
540 +index 3c6ab22a0c4e..af3c15a1d41e 100644
541 +--- a/arch/powerpc/kernel/watchdog.c
542 ++++ b/arch/powerpc/kernel/watchdog.c
543 +@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
544 +
545 + static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
546 +
547 +-static DEFINE_PER_CPU(struct timer_list, wd_timer);
548 ++static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
549 + static DEFINE_PER_CPU(u64, wd_timer_tb);
550 +
551 + /* SMP checker bits */
552 +@@ -293,21 +293,21 @@ out:
553 + nmi_exit();
554 + }
555 +
556 +-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
557 +-{
558 +- t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
559 +- if (wd_timer_period_ms > 1000)
560 +- t->expires = __round_jiffies_up(t->expires, cpu);
561 +- add_timer_on(t, cpu);
562 +-}
563 +-
564 +-static void wd_timer_fn(struct timer_list *t)
565 ++static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
566 + {
567 + int cpu = smp_processor_id();
568 +
569 ++ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
570 ++ return HRTIMER_NORESTART;
571 ++
572 ++ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
573 ++ return HRTIMER_NORESTART;
574 ++
575 + watchdog_timer_interrupt(cpu);
576 +
577 +- wd_timer_reset(cpu, t);
578 ++ hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
579 ++
580 ++ return HRTIMER_RESTART;
581 + }
582 +
583 + void arch_touch_nmi_watchdog(void)
584 +@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
585 + }
586 + EXPORT_SYMBOL(arch_touch_nmi_watchdog);
587 +
588 +-static void start_watchdog_timer_on(unsigned int cpu)
589 +-{
590 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
591 +-
592 +- per_cpu(wd_timer_tb, cpu) = get_tb();
593 +-
594 +- timer_setup(t, wd_timer_fn, TIMER_PINNED);
595 +- wd_timer_reset(cpu, t);
596 +-}
597 +-
598 +-static void stop_watchdog_timer_on(unsigned int cpu)
599 +-{
600 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
601 +-
602 +- del_timer_sync(t);
603 +-}
604 +-
605 +-static int start_wd_on_cpu(unsigned int cpu)
606 ++static void start_watchdog(void *arg)
607 + {
608 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
609 ++ int cpu = smp_processor_id();
610 + unsigned long flags;
611 +
612 + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
613 + WARN_ON(1);
614 +- return 0;
615 ++ return;
616 + }
617 +
618 + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
619 +- return 0;
620 ++ return;
621 +
622 + if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
623 +- return 0;
624 ++ return;
625 +
626 + wd_smp_lock(&flags);
627 + cpumask_set_cpu(cpu, &wd_cpus_enabled);
628 +@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
629 + }
630 + wd_smp_unlock(&flags);
631 +
632 +- start_watchdog_timer_on(cpu);
633 ++ *this_cpu_ptr(&wd_timer_tb) = get_tb();
634 +
635 +- return 0;
636 ++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
637 ++ hrtimer->function = watchdog_timer_fn;
638 ++ hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
639 ++ HRTIMER_MODE_REL_PINNED);
640 + }
641 +
642 +-static int stop_wd_on_cpu(unsigned int cpu)
643 ++static int start_watchdog_on_cpu(unsigned int cpu)
644 + {
645 ++ return smp_call_function_single(cpu, start_watchdog, NULL, true);
646 ++}
647 ++
648 ++static void stop_watchdog(void *arg)
649 ++{
650 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
651 ++ int cpu = smp_processor_id();
652 + unsigned long flags;
653 +
654 + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
655 +- return 0; /* Can happen in CPU unplug case */
656 ++ return; /* Can happen in CPU unplug case */
657 +
658 +- stop_watchdog_timer_on(cpu);
659 ++ hrtimer_cancel(hrtimer);
660 +
661 + wd_smp_lock(&flags);
662 + cpumask_clear_cpu(cpu, &wd_cpus_enabled);
663 + wd_smp_unlock(&flags);
664 +
665 + wd_smp_clear_cpu_pending(cpu, get_tb());
666 ++}
667 +
668 +- return 0;
669 ++static int stop_watchdog_on_cpu(unsigned int cpu)
670 ++{
671 ++ return smp_call_function_single(cpu, stop_watchdog, NULL, true);
672 + }
673 +
674 + static void watchdog_calc_timeouts(void)
675 +@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
676 + int cpu;
677 +
678 + for_each_cpu(cpu, &wd_cpus_enabled)
679 +- stop_wd_on_cpu(cpu);
680 ++ stop_watchdog_on_cpu(cpu);
681 + }
682 +
683 + void watchdog_nmi_start(void)
684 +@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
685 +
686 + watchdog_calc_timeouts();
687 + for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
688 +- start_wd_on_cpu(cpu);
689 ++ start_watchdog_on_cpu(cpu);
690 + }
691 +
692 + /*
693 +@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
694 +
695 + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
696 + "powerpc/watchdog:online",
697 +- start_wd_on_cpu, stop_wd_on_cpu);
698 ++ start_watchdog_on_cpu,
699 ++ stop_watchdog_on_cpu);
700 + if (err < 0) {
701 + pr_warn("could not be initialized");
702 + return err;
703 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
704 +index f976676004ad..48c9a97eb2c3 100644
705 +--- a/arch/powerpc/mm/numa.c
706 ++++ b/arch/powerpc/mm/numa.c
707 +@@ -1498,6 +1498,9 @@ int start_topology_update(void)
708 + {
709 + int rc = 0;
710 +
711 ++ if (!topology_updates_enabled)
712 ++ return 0;
713 ++
714 + if (firmware_has_feature(FW_FEATURE_PRRN)) {
715 + if (!prrn_enabled) {
716 + prrn_enabled = 1;
717 +@@ -1531,6 +1534,9 @@ int stop_topology_update(void)
718 + {
719 + int rc = 0;
720 +
721 ++ if (!topology_updates_enabled)
722 ++ return 0;
723 ++
724 + if (prrn_enabled) {
725 + prrn_enabled = 0;
726 + #ifdef CONFIG_SMP
727 +@@ -1588,11 +1594,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
728 +
729 + kbuf[read_len] = '\0';
730 +
731 +- if (!strncmp(kbuf, "on", 2))
732 ++ if (!strncmp(kbuf, "on", 2)) {
733 ++ topology_updates_enabled = true;
734 + start_topology_update();
735 +- else if (!strncmp(kbuf, "off", 3))
736 ++ } else if (!strncmp(kbuf, "off", 3)) {
737 + stop_topology_update();
738 +- else
739 ++ topology_updates_enabled = false;
740 ++ } else
741 + return -EINVAL;
742 +
743 + return count;
744 +@@ -1607,9 +1615,7 @@ static const struct file_operations topology_ops = {
745 +
746 + static int topology_update_init(void)
747 + {
748 +- /* Do not poll for changes if disabled at boot */
749 +- if (topology_updates_enabled)
750 +- start_topology_update();
751 ++ start_topology_update();
752 +
753 + if (vphn_enabled)
754 + topology_schedule_update();
755 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
756 +index b1c37cc3fa98..2d12f0037e3a 100644
757 +--- a/arch/powerpc/perf/imc-pmu.c
758 ++++ b/arch/powerpc/perf/imc-pmu.c
759 +@@ -487,6 +487,11 @@ static int nest_imc_event_init(struct perf_event *event)
760 + * Get the base memory addresss for this cpu.
761 + */
762 + chip_id = cpu_to_chip_id(event->cpu);
763 ++
764 ++ /* Return, if chip_id is not valid */
765 ++ if (chip_id < 0)
766 ++ return -ENODEV;
767 ++
768 + pcni = pmu->mem_info;
769 + do {
770 + if (pcni->id == chip_id) {
771 +@@ -494,7 +499,7 @@ static int nest_imc_event_init(struct perf_event *event)
772 + break;
773 + }
774 + pcni++;
775 +- } while (pcni);
776 ++ } while (pcni->vbase != 0);
777 +
778 + if (!flag)
779 + return -ENODEV;
780 +diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
781 +index 58a07948c76e..3d27f02695e4 100644
782 +--- a/arch/powerpc/platforms/powernv/opal-imc.c
783 ++++ b/arch/powerpc/platforms/powernv/opal-imc.c
784 +@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
785 + nr_chips))
786 + goto error;
787 +
788 +- pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
789 ++ pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
790 + GFP_KERNEL);
791 + if (!pmu_ptr->mem_info)
792 + goto error;
793 +diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
794 +index 5a286b012043..602e7cc26d11 100644
795 +--- a/arch/s390/kernel/kexec_elf.c
796 ++++ b/arch/s390/kernel/kexec_elf.c
797 +@@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
798 + struct kexec_buf buf;
799 + const Elf_Ehdr *ehdr;
800 + const Elf_Phdr *phdr;
801 ++ Elf_Addr entry;
802 + int i, ret;
803 +
804 + ehdr = (Elf_Ehdr *)kernel;
805 + buf.image = image;
806 ++ if (image->type == KEXEC_TYPE_CRASH)
807 ++ entry = STARTUP_KDUMP_OFFSET;
808 ++ else
809 ++ entry = ehdr->e_entry;
810 +
811 + phdr = (void *)ehdr + ehdr->e_phoff;
812 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
813 +@@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
814 + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
815 + buf.memsz = phdr->p_memsz;
816 +
817 +- if (phdr->p_paddr == 0) {
818 ++ if (entry - phdr->p_paddr < phdr->p_memsz) {
819 + data->kernel_buf = buf.buffer;
820 + data->memsz += STARTUP_NORMAL_OFFSET;
821 +
822 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
823 +index 8485d6dc2754..9ebd01219812 100644
824 +--- a/arch/s390/mm/pgtable.c
825 ++++ b/arch/s390/mm/pgtable.c
826 +@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
827 + return old;
828 + }
829 +
830 ++#ifdef CONFIG_PGSTE
831 + static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
832 + {
833 + pgd_t *pgd;
834 +@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
835 + pmd = pmd_alloc(mm, pud, addr);
836 + return pmd;
837 + }
838 ++#endif
839 +
840 + pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
841 + pmd_t *pmdp, pmd_t new)
842 +diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
843 +index 8f9bfbf3cdb1..d6cce65b4871 100644
844 +--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
845 ++++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
846 +@@ -132,7 +132,7 @@ enum {
847 +
848 + static inline u32 sh7786_mm_sel(void)
849 + {
850 +- return __raw_readl(0xFC400020) & 0x7;
851 ++ return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
852 + }
853 +
854 + #endif /* __CPU_SH7786_H__ */
855 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
856 +index a587805c6687..56e748a7679f 100644
857 +--- a/arch/x86/Makefile
858 ++++ b/arch/x86/Makefile
859 +@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
860 + export BITS
861 +
862 + ifdef CONFIG_X86_NEED_RELOCS
863 +- LDFLAGS_vmlinux := --emit-relocs
864 ++ LDFLAGS_vmlinux := --emit-relocs --discard-none
865 + endif
866 +
867 + #
868 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
869 +index d41de9af7a39..6072f92cb8ea 100644
870 +--- a/arch/x86/events/intel/cstate.c
871 ++++ b/arch/x86/events/intel/cstate.c
872 +@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
873 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
874 +
875 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
876 ++
877 ++ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
878 + { },
879 + };
880 + MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
881 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
882 +index 94dc564146ca..37ebf6fc5415 100644
883 +--- a/arch/x86/events/intel/rapl.c
884 ++++ b/arch/x86/events/intel/rapl.c
885 +@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
886 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
887 +
888 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
889 ++
890 ++ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
891 + {},
892 + };
893 +
894 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
895 +index a878e6286e4a..f3f4c2263501 100644
896 +--- a/arch/x86/events/msr.c
897 ++++ b/arch/x86/events/msr.c
898 +@@ -89,6 +89,7 @@ static bool test_intel(int idx)
899 + case INTEL_FAM6_SKYLAKE_X:
900 + case INTEL_FAM6_KABYLAKE_MOBILE:
901 + case INTEL_FAM6_KABYLAKE_DESKTOP:
902 ++ case INTEL_FAM6_ICELAKE_MOBILE:
903 + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
904 + return true;
905 + break;
906 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
907 +index 321fe5f5d0e9..4d5fcd47ab75 100644
908 +--- a/arch/x86/ia32/ia32_signal.c
909 ++++ b/arch/x86/ia32/ia32_signal.c
910 +@@ -61,9 +61,8 @@
911 + } while (0)
912 +
913 + #define RELOAD_SEG(seg) { \
914 +- unsigned int pre = GET_SEG(seg); \
915 ++ unsigned int pre = (seg) | 3; \
916 + unsigned int cur = get_user_seg(seg); \
917 +- pre |= 3; \
918 + if (pre != cur) \
919 + set_user_seg(seg, pre); \
920 + }
921 +@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
922 + struct sigcontext_32 __user *sc)
923 + {
924 + unsigned int tmpflags, err = 0;
925 ++ u16 gs, fs, es, ds;
926 + void __user *buf;
927 + u32 tmp;
928 +
929 +@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
930 + current->restart_block.fn = do_no_restart_syscall;
931 +
932 + get_user_try {
933 +- /*
934 +- * Reload fs and gs if they have changed in the signal
935 +- * handler. This does not handle long fs/gs base changes in
936 +- * the handler, but does not clobber them at least in the
937 +- * normal case.
938 +- */
939 +- RELOAD_SEG(gs);
940 +- RELOAD_SEG(fs);
941 +- RELOAD_SEG(ds);
942 +- RELOAD_SEG(es);
943 ++ gs = GET_SEG(gs);
944 ++ fs = GET_SEG(fs);
945 ++ ds = GET_SEG(ds);
946 ++ es = GET_SEG(es);
947 +
948 + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
949 + COPY(dx); COPY(cx); COPY(ip); COPY(ax);
950 +@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
951 + buf = compat_ptr(tmp);
952 + } get_user_catch(err);
953 +
954 ++ /*
955 ++ * Reload fs and gs if they have changed in the signal
956 ++ * handler. This does not handle long fs/gs base changes in
957 ++ * the handler, but does not clobber them at least in the
958 ++ * normal case.
959 ++ */
960 ++ RELOAD_SEG(gs);
961 ++ RELOAD_SEG(fs);
962 ++ RELOAD_SEG(ds);
963 ++ RELOAD_SEG(es);
964 ++
965 + err |= fpu__restore_sig(buf, 1);
966 +
967 + force_iret();
968 +diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
969 +index 05861cc08787..0bbb07eaed6b 100644
970 +--- a/arch/x86/include/asm/text-patching.h
971 ++++ b/arch/x86/include/asm/text-patching.h
972 +@@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
973 + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
974 + extern int after_bootmem;
975 +
976 ++#ifndef CONFIG_UML_X86
977 + static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
978 + {
979 + regs->ip = ip;
980 +@@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
981 + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
982 + int3_emulate_jmp(regs, func);
983 + }
984 +-#endif
985 ++#endif /* CONFIG_X86_64 */
986 ++#endif /* !CONFIG_UML_X86 */
987 +
988 + #endif /* _ASM_X86_TEXT_PATCHING_H */
989 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
990 +index 1954dd5552a2..3822cc8ac9d6 100644
991 +--- a/arch/x86/include/asm/uaccess.h
992 ++++ b/arch/x86/include/asm/uaccess.h
993 +@@ -427,10 +427,11 @@ do { \
994 + ({ \
995 + __label__ __pu_label; \
996 + int __pu_err = -EFAULT; \
997 +- __typeof__(*(ptr)) __pu_val; \
998 +- __pu_val = x; \
999 ++ __typeof__(*(ptr)) __pu_val = (x); \
1000 ++ __typeof__(ptr) __pu_ptr = (ptr); \
1001 ++ __typeof__(size) __pu_size = (size); \
1002 + __uaccess_begin(); \
1003 +- __put_user_size(__pu_val, (ptr), (size), __pu_label); \
1004 ++ __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
1005 + __pu_err = 0; \
1006 + __pu_label: \
1007 + __uaccess_end(); \
1008 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
1009 +index 9a79c7808f9c..d7df79fc448c 100644
1010 +--- a/arch/x86/kernel/alternative.c
1011 ++++ b/arch/x86/kernel/alternative.c
1012 +@@ -667,15 +667,29 @@ void __init alternative_instructions(void)
1013 + * handlers seeing an inconsistent instruction while you patch.
1014 + */
1015 + void *__init_or_module text_poke_early(void *addr, const void *opcode,
1016 +- size_t len)
1017 ++ size_t len)
1018 + {
1019 + unsigned long flags;
1020 +- local_irq_save(flags);
1021 +- memcpy(addr, opcode, len);
1022 +- local_irq_restore(flags);
1023 +- sync_core();
1024 +- /* Could also do a CLFLUSH here to speed up CPU recovery; but
1025 +- that causes hangs on some VIA CPUs. */
1026 ++
1027 ++ if (boot_cpu_has(X86_FEATURE_NX) &&
1028 ++ is_module_text_address((unsigned long)addr)) {
1029 ++ /*
1030 ++ * Modules text is marked initially as non-executable, so the
1031 ++ * code cannot be running and speculative code-fetches are
1032 ++ * prevented. Just change the code.
1033 ++ */
1034 ++ memcpy(addr, opcode, len);
1035 ++ } else {
1036 ++ local_irq_save(flags);
1037 ++ memcpy(addr, opcode, len);
1038 ++ local_irq_restore(flags);
1039 ++ sync_core();
1040 ++
1041 ++ /*
1042 ++ * Could also do a CLFLUSH here to speed up CPU recovery; but
1043 ++ * that causes hangs on some VIA CPUs.
1044 ++ */
1045 ++ }
1046 + return addr;
1047 + }
1048 +
1049 +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
1050 +index cf25405444ab..415621ddb8a2 100644
1051 +--- a/arch/x86/kernel/cpu/hygon.c
1052 ++++ b/arch/x86/kernel/cpu/hygon.c
1053 +@@ -19,6 +19,8 @@
1054 +
1055 + #include "cpu.h"
1056 +
1057 ++#define APICID_SOCKET_ID_BIT 6
1058 ++
1059 + /*
1060 + * nodes_per_socket: Stores the number of nodes per socket.
1061 + * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
1062 +@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
1063 + if (!err)
1064 + c->x86_coreid_bits = get_count_order(c->x86_max_cores);
1065 +
1066 ++ /* Socket ID is ApicId[6] for these processors. */
1067 ++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
1068 ++
1069 + cacheinfo_hygon_init_llc_id(c, cpu, node_id);
1070 + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
1071 + u64 value;
1072 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1073 +index 1a7084ba9a3b..9e6a94c208e0 100644
1074 +--- a/arch/x86/kernel/cpu/mce/core.c
1075 ++++ b/arch/x86/kernel/cpu/mce/core.c
1076 +@@ -712,19 +712,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
1077 +
1078 + barrier();
1079 + m.status = mce_rdmsrl(msr_ops.status(i));
1080 ++
1081 ++ /* If this entry is not valid, ignore it */
1082 + if (!(m.status & MCI_STATUS_VAL))
1083 + continue;
1084 +
1085 + /*
1086 +- * Uncorrected or signalled events are handled by the exception
1087 +- * handler when it is enabled, so don't process those here.
1088 +- *
1089 +- * TBD do the same check for MCI_STATUS_EN here?
1090 ++ * If we are logging everything (at CPU online) or this
1091 ++ * is a corrected error, then we must log it.
1092 + */
1093 +- if (!(flags & MCP_UC) &&
1094 +- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
1095 +- continue;
1096 ++ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
1097 ++ goto log_it;
1098 ++
1099 ++ /*
1100 ++ * Newer Intel systems that support software error
1101 ++ * recovery need to make additional checks. Other
1102 ++ * CPUs should skip over uncorrected errors, but log
1103 ++ * everything else.
1104 ++ */
1105 ++ if (!mca_cfg.ser) {
1106 ++ if (m.status & MCI_STATUS_UC)
1107 ++ continue;
1108 ++ goto log_it;
1109 ++ }
1110 ++
1111 ++ /* Log "not enabled" (speculative) errors */
1112 ++ if (!(m.status & MCI_STATUS_EN))
1113 ++ goto log_it;
1114 ++
1115 ++ /*
1116 ++ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
1117 ++ * UC == 1 && PCC == 0 && S == 0
1118 ++ */
1119 ++ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
1120 ++ goto log_it;
1121 ++
1122 ++ /*
1123 ++ * Skip anything else. Presumption is that our read of this
1124 ++ * bank is racing with a machine check. Leave the log alone
1125 ++ * for do_machine_check() to deal with it.
1126 ++ */
1127 ++ continue;
1128 +
1129 ++log_it:
1130 + error_seen = true;
1131 +
1132 + mce_read_aux(&m, i);
1133 +@@ -1451,13 +1481,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1134 + static int __mcheck_cpu_mce_banks_init(void)
1135 + {
1136 + int i;
1137 +- u8 num_banks = mca_cfg.banks;
1138 +
1139 +- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
1140 ++ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
1141 + if (!mce_banks)
1142 + return -ENOMEM;
1143 +
1144 +- for (i = 0; i < num_banks; i++) {
1145 ++ for (i = 0; i < MAX_NR_BANKS; i++) {
1146 + struct mce_bank *b = &mce_banks[i];
1147 +
1148 + b->ctl = -1ULL;
1149 +@@ -1471,28 +1500,19 @@ static int __mcheck_cpu_mce_banks_init(void)
1150 + */
1151 + static int __mcheck_cpu_cap_init(void)
1152 + {
1153 +- unsigned b;
1154 + u64 cap;
1155 ++ u8 b;
1156 +
1157 + rdmsrl(MSR_IA32_MCG_CAP, cap);
1158 +
1159 + b = cap & MCG_BANKCNT_MASK;
1160 +- if (!mca_cfg.banks)
1161 +- pr_info("CPU supports %d MCE banks\n", b);
1162 +-
1163 +- if (b > MAX_NR_BANKS) {
1164 +- pr_warn("Using only %u machine check banks out of %u\n",
1165 +- MAX_NR_BANKS, b);
1166 ++ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
1167 + b = MAX_NR_BANKS;
1168 +- }
1169 +
1170 +- /* Don't support asymmetric configurations today */
1171 +- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1172 +- mca_cfg.banks = b;
1173 ++ mca_cfg.banks = max(mca_cfg.banks, b);
1174 +
1175 + if (!mce_banks) {
1176 + int err = __mcheck_cpu_mce_banks_init();
1177 +-
1178 + if (err)
1179 + return err;
1180 + }
1181 +@@ -2459,6 +2479,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
1182 +
1183 + static int __init mcheck_late_init(void)
1184 + {
1185 ++ pr_info("Using %d MCE banks\n", mca_cfg.banks);
1186 ++
1187 + if (mca_cfg.recovery)
1188 + static_branch_inc(&mcsafe_key);
1189 +
1190 +diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
1191 +index 8492ef7d9015..3f82afd0f46f 100644
1192 +--- a/arch/x86/kernel/cpu/mce/inject.c
1193 ++++ b/arch/x86/kernel/cpu/mce/inject.c
1194 +@@ -46,8 +46,6 @@
1195 + static struct mce i_mce;
1196 + static struct dentry *dfs_inj;
1197 +
1198 +-static u8 n_banks;
1199 +-
1200 + #define MAX_FLAG_OPT_SIZE 4
1201 + #define NBCFG 0x44
1202 +
1203 +@@ -570,9 +568,15 @@ err:
1204 + static int inj_bank_set(void *data, u64 val)
1205 + {
1206 + struct mce *m = (struct mce *)data;
1207 ++ u8 n_banks;
1208 ++ u64 cap;
1209 ++
1210 ++ /* Get bank count on target CPU so we can handle non-uniform values. */
1211 ++ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
1212 ++ n_banks = cap & MCG_BANKCNT_MASK;
1213 +
1214 + if (val >= n_banks) {
1215 +- pr_err("Non-existent MCE bank: %llu\n", val);
1216 ++ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
1217 + return -EINVAL;
1218 + }
1219 +
1220 +@@ -665,10 +669,6 @@ static struct dfs_node {
1221 + static int __init debugfs_init(void)
1222 + {
1223 + unsigned int i;
1224 +- u64 cap;
1225 +-
1226 +- rdmsrl(MSR_IA32_MCG_CAP, cap);
1227 +- n_banks = cap & MCG_BANKCNT_MASK;
1228 +
1229 + dfs_inj = debugfs_create_dir("mce-inject", NULL);
1230 + if (!dfs_inj)
1231 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
1232 +index 5260185cbf7b..8a4a7823451a 100644
1233 +--- a/arch/x86/kernel/cpu/microcode/core.c
1234 ++++ b/arch/x86/kernel/cpu/microcode/core.c
1235 +@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
1236 + if (ustate == UCODE_ERROR) {
1237 + error = -1;
1238 + break;
1239 +- } else if (ustate == UCODE_OK)
1240 ++ } else if (ustate == UCODE_NEW) {
1241 + apply_microcode_on_target(cpu);
1242 ++ }
1243 + }
1244 +
1245 + return error;
1246 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1247 +index bd553b3af22e..6e0c0ed8e4bf 100644
1248 +--- a/arch/x86/kernel/ftrace.c
1249 ++++ b/arch/x86/kernel/ftrace.c
1250 +@@ -749,6 +749,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1251 + unsigned long end_offset;
1252 + unsigned long op_offset;
1253 + unsigned long offset;
1254 ++ unsigned long npages;
1255 + unsigned long size;
1256 + unsigned long retq;
1257 + unsigned long *ptr;
1258 +@@ -781,6 +782,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1259 + return 0;
1260 +
1261 + *tramp_size = size + RET_SIZE + sizeof(void *);
1262 ++ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
1263 +
1264 + /* Copy ftrace_caller onto the trampoline memory */
1265 + ret = probe_kernel_read(trampoline, (void *)start_offset, size);
1266 +@@ -825,6 +827,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1267 + /* ALLOC_TRAMP flags lets us know we created it */
1268 + ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
1269 +
1270 ++ /*
1271 ++ * Module allocation needs to be completed by making the page
1272 ++ * executable. The page is still writable, which is a security hazard,
1273 ++ * but anyhow ftrace breaks W^X completely.
1274 ++ */
1275 ++ set_memory_x((unsigned long)trampoline, npages);
1276 + return (unsigned long)trampoline;
1277 + fail:
1278 + tramp_free(trampoline, *tramp_size);
1279 +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
1280 +index 0469cd078db1..b50ac9c7397b 100644
1281 +--- a/arch/x86/kernel/irq_64.c
1282 ++++ b/arch/x86/kernel/irq_64.c
1283 +@@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
1284 + /*
1285 + * Probabilistic stack overflow check:
1286 + *
1287 +- * Only check the stack in process context, because everything else
1288 +- * runs on the big interrupt stacks. Checking reliably is too expensive,
1289 +- * so we just check from interrupts.
1290 ++ * Regular device interrupts can enter on the following stacks:
1291 ++ *
1292 ++ * - User stack
1293 ++ *
1294 ++ * - Kernel task stack
1295 ++ *
1296 ++ * - Interrupt stack if a device driver reenables interrupts
1297 ++ * which should only happen in really old drivers.
1298 ++ *
1299 ++ * - Debug IST stack
1300 ++ *
1301 ++ * All other contexts are invalid.
1302 + */
1303 + static inline void stack_overflow_check(struct pt_regs *regs)
1304 + {
1305 +@@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
1306 + return;
1307 +
1308 + oist = this_cpu_ptr(&orig_ist);
1309 +- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
1310 +- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
1311 ++ estack_bottom = (u64)oist->ist[DEBUG_STACK];
1312 ++ estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
1313 + if (regs->sp >= estack_top && regs->sp <= estack_bottom)
1314 + return;
1315 +
1316 +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
1317 +index b052e883dd8c..cfa3106faee4 100644
1318 +--- a/arch/x86/kernel/module.c
1319 ++++ b/arch/x86/kernel/module.c
1320 +@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
1321 + p = __vmalloc_node_range(size, MODULE_ALIGN,
1322 + MODULES_VADDR + get_module_load_offset(),
1323 + MODULES_END, GFP_KERNEL,
1324 +- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
1325 ++ PAGE_KERNEL, 0, NUMA_NO_NODE,
1326 + __builtin_return_address(0));
1327 + if (p && (kasan_module_alloc(p, size) < 0)) {
1328 + vfree(p);
1329 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1330 +index 08dfd4c1a4f9..c8aa58a2bab9 100644
1331 +--- a/arch/x86/kernel/signal.c
1332 ++++ b/arch/x86/kernel/signal.c
1333 +@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
1334 + COPY_SEG_CPL3(cs);
1335 + COPY_SEG_CPL3(ss);
1336 +
1337 +-#ifdef CONFIG_X86_64
1338 +- /*
1339 +- * Fix up SS if needed for the benefit of old DOSEMU and
1340 +- * CRIU.
1341 +- */
1342 +- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
1343 +- user_64bit_mode(regs)))
1344 +- force_valid_ss(regs);
1345 +-#endif
1346 +-
1347 + get_user_ex(tmpflags, &sc->flags);
1348 + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
1349 + regs->orig_ax = -1; /* disable syscall checks */
1350 +@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
1351 + buf = (void __user *)buf_val;
1352 + } get_user_catch(err);
1353 +
1354 ++#ifdef CONFIG_X86_64
1355 ++ /*
1356 ++ * Fix up SS if needed for the benefit of old DOSEMU and
1357 ++ * CRIU.
1358 ++ */
1359 ++ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
1360 ++ force_valid_ss(regs);
1361 ++#endif
1362 ++
1363 + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
1364 +
1365 + force_iret();
1366 +@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1367 + {
1368 + struct rt_sigframe __user *frame;
1369 + void __user *fp = NULL;
1370 ++ unsigned long uc_flags;
1371 + int err = 0;
1372 +
1373 + frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
1374 +@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1375 + return -EFAULT;
1376 + }
1377 +
1378 ++ uc_flags = frame_uc_flags(regs);
1379 ++
1380 + put_user_try {
1381 + /* Create the ucontext. */
1382 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1383 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1384 + put_user_ex(0, &frame->uc.uc_link);
1385 + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1386 +
1387 +@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1388 + {
1389 + #ifdef CONFIG_X86_X32_ABI
1390 + struct rt_sigframe_x32 __user *frame;
1391 ++ unsigned long uc_flags;
1392 + void __user *restorer;
1393 + int err = 0;
1394 + void __user *fpstate = NULL;
1395 +@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1396 + return -EFAULT;
1397 + }
1398 +
1399 ++ uc_flags = frame_uc_flags(regs);
1400 ++
1401 + put_user_try {
1402 + /* Create the ucontext. */
1403 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1404 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1405 + put_user_ex(0, &frame->uc.uc_link);
1406 + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1407 + put_user_ex(0, &frame->uc.uc__pad0);
1408 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1409 +index a5127b2c195f..834659288ba9 100644
1410 +--- a/arch/x86/kernel/vmlinux.lds.S
1411 ++++ b/arch/x86/kernel/vmlinux.lds.S
1412 +@@ -141,11 +141,11 @@ SECTIONS
1413 + *(.text.__x86.indirect_thunk)
1414 + __indirect_thunk_end = .;
1415 + #endif
1416 +-
1417 +- /* End of text section */
1418 +- _etext = .;
1419 + } :text = 0x9090
1420 +
1421 ++ /* End of text section */
1422 ++ _etext = .;
1423 ++
1424 + NOTES :text :note
1425 +
1426 + EXCEPTION_TABLE(16) :text = 0x9090
1427 +diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
1428 +index faa264822cee..007bc654f928 100644
1429 +--- a/arch/x86/kvm/irq.c
1430 ++++ b/arch/x86/kvm/irq.c
1431 +@@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
1432 + __kvm_migrate_apic_timer(vcpu);
1433 + __kvm_migrate_pit_timer(vcpu);
1434 + }
1435 ++
1436 ++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
1437 ++{
1438 ++ bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
1439 ++
1440 ++ return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
1441 ++}
1442 +diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
1443 +index d5005cc26521..fd210cdd4983 100644
1444 +--- a/arch/x86/kvm/irq.h
1445 ++++ b/arch/x86/kvm/irq.h
1446 +@@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
1447 + return mode != KVM_IRQCHIP_NONE;
1448 + }
1449 +
1450 ++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1451 + void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
1452 + void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
1453 + void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
1454 +diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
1455 +index 1495a735b38e..50fa9450fcf1 100644
1456 +--- a/arch/x86/kvm/pmu_amd.c
1457 ++++ b/arch/x86/kvm/pmu_amd.c
1458 +@@ -269,10 +269,10 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
1459 +
1460 + pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
1461 + pmu->reserved_bits = 0xffffffff00200000ull;
1462 ++ pmu->version = 1;
1463 + /* not applicable to AMD; but clean them to prevent any fall out */
1464 + pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
1465 + pmu->nr_arch_fixed_counters = 0;
1466 +- pmu->version = 0;
1467 + pmu->global_status = 0;
1468 + }
1469 +
1470 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1471 +index 406b558abfef..ae6e51828a54 100644
1472 +--- a/arch/x86/kvm/svm.c
1473 ++++ b/arch/x86/kvm/svm.c
1474 +@@ -2024,7 +2024,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1475 + if (!kvm_vcpu_apicv_active(vcpu))
1476 + return;
1477 +
1478 +- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1479 ++ /*
1480 ++ * Since the host physical APIC id is 8 bits,
1481 ++ * we can support host APIC ID upto 255.
1482 ++ */
1483 ++ if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1484 + return;
1485 +
1486 + entry = READ_ONCE(*(svm->avic_physical_id_cache));
1487 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1488 +index 0c601d079cd2..8f6f69c26c35 100644
1489 +--- a/arch/x86/kvm/vmx/nested.c
1490 ++++ b/arch/x86/kvm/vmx/nested.c
1491 +@@ -2792,14 +2792,13 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
1492 + : "cc", "memory"
1493 + );
1494 +
1495 +- preempt_enable();
1496 +-
1497 + if (vmx->msr_autoload.host.nr)
1498 + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
1499 + if (vmx->msr_autoload.guest.nr)
1500 + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
1501 +
1502 + if (vm_fail) {
1503 ++ preempt_enable();
1504 + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
1505 + VMXERR_ENTRY_INVALID_CONTROL_FIELD);
1506 + return 1;
1507 +@@ -2811,6 +2810,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
1508 + local_irq_enable();
1509 + if (hw_breakpoint_active())
1510 + set_debugreg(__this_cpu_read(cpu_dr7), 7);
1511 ++ preempt_enable();
1512 +
1513 + /*
1514 + * A non-failing VMEntry means we somehow entered guest mode with
1515 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1516 +index fed1ab6a825c..6b8575c547ee 100644
1517 +--- a/arch/x86/kvm/x86.c
1518 ++++ b/arch/x86/kvm/x86.c
1519 +@@ -1288,7 +1288,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1520 + u64 efer = msr_info->data;
1521 +
1522 + if (efer & efer_reserved_bits)
1523 +- return false;
1524 ++ return 1;
1525 +
1526 + if (!msr_info->host_initiated) {
1527 + if (!__kvm_valid_efer(vcpu, efer))
1528 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
1529 +index 3b24dc05251c..9d05572370ed 100644
1530 +--- a/arch/x86/lib/memcpy_64.S
1531 ++++ b/arch/x86/lib/memcpy_64.S
1532 +@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
1533 + /* Copy successful. Return zero */
1534 + .L_done_memcpy_trap:
1535 + xorl %eax, %eax
1536 ++.L_done:
1537 + ret
1538 + ENDPROC(__memcpy_mcsafe)
1539 + EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1540 +@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1541 + addl %edx, %ecx
1542 + .E_trailing_bytes:
1543 + mov %ecx, %eax
1544 +- ret
1545 ++ jmp .L_done
1546 +
1547 + /*
1548 + * For write fault handling, given the destination is unaligned,
1549 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1550 +index 667f1da36208..5eaf67e8314f 100644
1551 +--- a/arch/x86/mm/fault.c
1552 ++++ b/arch/x86/mm/fault.c
1553 +@@ -359,8 +359,6 @@ static noinline int vmalloc_fault(unsigned long address)
1554 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
1555 + return -1;
1556 +
1557 +- WARN_ON_ONCE(in_nmi());
1558 +-
1559 + /*
1560 + * Copy kernel mappings over when needed. This can also
1561 + * happen within a race in page table update. In the later
1562 +diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
1563 +index 2c53b0f19329..1297e185b8c8 100644
1564 +--- a/arch/x86/platform/uv/tlb_uv.c
1565 ++++ b/arch/x86/platform/uv/tlb_uv.c
1566 +@@ -2133,14 +2133,19 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
1567 + */
1568 + static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1569 + {
1570 +- unsigned char *uvhub_mask;
1571 + struct uvhub_desc *uvhub_descs;
1572 ++ unsigned char *uvhub_mask = NULL;
1573 +
1574 + if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
1575 + timeout_us = calculate_destination_timeout();
1576 +
1577 + uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
1578 ++ if (!uvhub_descs)
1579 ++ goto fail;
1580 ++
1581 + uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1582 ++ if (!uvhub_mask)
1583 ++ goto fail;
1584 +
1585 + if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1586 + goto fail;
1587 +diff --git a/block/bio.c b/block/bio.c
1588 +index 716510ecd7ff..a3c80a6c1fe5 100644
1589 +--- a/block/bio.c
1590 ++++ b/block/bio.c
1591 +@@ -776,6 +776,8 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
1592 +
1593 + if (vec_end_addr + 1 != page_addr + off)
1594 + return false;
1595 ++ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
1596 ++ return false;
1597 + if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
1598 + return false;
1599 +
1600 +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
1601 +index aa6bc5c02643..c59babca6857 100644
1602 +--- a/block/blk-mq-sched.c
1603 ++++ b/block/blk-mq-sched.c
1604 +@@ -413,6 +413,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1605 + struct list_head *list, bool run_queue_async)
1606 + {
1607 + struct elevator_queue *e;
1608 ++ struct request_queue *q = hctx->queue;
1609 ++
1610 ++ /*
1611 ++ * blk_mq_sched_insert_requests() is called from flush plug
1612 ++ * context only, and hold one usage counter to prevent queue
1613 ++ * from being released.
1614 ++ */
1615 ++ percpu_ref_get(&q->q_usage_counter);
1616 +
1617 + e = hctx->queue->elevator;
1618 + if (e && e->type->ops.insert_requests)
1619 +@@ -426,12 +434,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1620 + if (!hctx->dispatch_busy && !e && !run_queue_async) {
1621 + blk_mq_try_issue_list_directly(hctx, list);
1622 + if (list_empty(list))
1623 +- return;
1624 ++ goto out;
1625 + }
1626 + blk_mq_insert_requests(hctx, ctx, list);
1627 + }
1628 +
1629 + blk_mq_run_hw_queue(hctx, run_queue_async);
1630 ++ out:
1631 ++ percpu_ref_put(&q->q_usage_counter);
1632 + }
1633 +
1634 + static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
1635 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1636 +index b0e5e67e20a2..8a41cc5974fe 100644
1637 +--- a/block/blk-mq.c
1638 ++++ b/block/blk-mq.c
1639 +@@ -2284,15 +2284,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
1640 + }
1641 + }
1642 +
1643 ++static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
1644 ++{
1645 ++ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
1646 ++
1647 ++ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
1648 ++ __alignof__(struct blk_mq_hw_ctx)) !=
1649 ++ sizeof(struct blk_mq_hw_ctx));
1650 ++
1651 ++ if (tag_set->flags & BLK_MQ_F_BLOCKING)
1652 ++ hw_ctx_size += sizeof(struct srcu_struct);
1653 ++
1654 ++ return hw_ctx_size;
1655 ++}
1656 ++
1657 + static int blk_mq_init_hctx(struct request_queue *q,
1658 + struct blk_mq_tag_set *set,
1659 + struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1660 + {
1661 +- int node;
1662 ++ hctx->queue_num = hctx_idx;
1663 ++
1664 ++ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1665 ++
1666 ++ hctx->tags = set->tags[hctx_idx];
1667 ++
1668 ++ if (set->ops->init_hctx &&
1669 ++ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1670 ++ goto unregister_cpu_notifier;
1671 +
1672 +- node = hctx->numa_node;
1673 ++ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
1674 ++ hctx->numa_node))
1675 ++ goto exit_hctx;
1676 ++ return 0;
1677 ++
1678 ++ exit_hctx:
1679 ++ if (set->ops->exit_hctx)
1680 ++ set->ops->exit_hctx(hctx, hctx_idx);
1681 ++ unregister_cpu_notifier:
1682 ++ blk_mq_remove_cpuhp(hctx);
1683 ++ return -1;
1684 ++}
1685 ++
1686 ++static struct blk_mq_hw_ctx *
1687 ++blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
1688 ++ int node)
1689 ++{
1690 ++ struct blk_mq_hw_ctx *hctx;
1691 ++ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
1692 ++
1693 ++ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
1694 ++ if (!hctx)
1695 ++ goto fail_alloc_hctx;
1696 ++
1697 ++ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
1698 ++ goto free_hctx;
1699 ++
1700 ++ atomic_set(&hctx->nr_active, 0);
1701 + if (node == NUMA_NO_NODE)
1702 +- node = hctx->numa_node = set->numa_node;
1703 ++ node = set->numa_node;
1704 ++ hctx->numa_node = node;
1705 +
1706 + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1707 + spin_lock_init(&hctx->lock);
1708 +@@ -2300,58 +2350,45 @@ static int blk_mq_init_hctx(struct request_queue *q,
1709 + hctx->queue = q;
1710 + hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1711 +
1712 +- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1713 +-
1714 +- hctx->tags = set->tags[hctx_idx];
1715 +-
1716 + /*
1717 + * Allocate space for all possible cpus to avoid allocation at
1718 + * runtime
1719 + */
1720 + hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
1721 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
1722 ++ gfp, node);
1723 + if (!hctx->ctxs)
1724 +- goto unregister_cpu_notifier;
1725 ++ goto free_cpumask;
1726 +
1727 + if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
1728 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
1729 ++ gfp, node))
1730 + goto free_ctxs;
1731 +-
1732 + hctx->nr_ctx = 0;
1733 +
1734 + spin_lock_init(&hctx->dispatch_wait_lock);
1735 + init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
1736 + INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
1737 +
1738 +- if (set->ops->init_hctx &&
1739 +- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1740 +- goto free_bitmap;
1741 +-
1742 + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
1743 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
1744 ++ gfp);
1745 + if (!hctx->fq)
1746 +- goto exit_hctx;
1747 +-
1748 +- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
1749 +- goto free_fq;
1750 ++ goto free_bitmap;
1751 +
1752 + if (hctx->flags & BLK_MQ_F_BLOCKING)
1753 + init_srcu_struct(hctx->srcu);
1754 ++ blk_mq_hctx_kobj_init(hctx);
1755 +
1756 +- return 0;
1757 ++ return hctx;
1758 +
1759 +- free_fq:
1760 +- blk_free_flush_queue(hctx->fq);
1761 +- exit_hctx:
1762 +- if (set->ops->exit_hctx)
1763 +- set->ops->exit_hctx(hctx, hctx_idx);
1764 + free_bitmap:
1765 + sbitmap_free(&hctx->ctx_map);
1766 + free_ctxs:
1767 + kfree(hctx->ctxs);
1768 +- unregister_cpu_notifier:
1769 +- blk_mq_remove_cpuhp(hctx);
1770 +- return -1;
1771 ++ free_cpumask:
1772 ++ free_cpumask_var(hctx->cpumask);
1773 ++ free_hctx:
1774 ++ kfree(hctx);
1775 ++ fail_alloc_hctx:
1776 ++ return NULL;
1777 + }
1778 +
1779 + static void blk_mq_init_cpu_queues(struct request_queue *q,
1780 +@@ -2695,51 +2732,25 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
1781 + }
1782 + EXPORT_SYMBOL(blk_mq_init_sq_queue);
1783 +
1784 +-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
1785 +-{
1786 +- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
1787 +-
1788 +- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
1789 +- __alignof__(struct blk_mq_hw_ctx)) !=
1790 +- sizeof(struct blk_mq_hw_ctx));
1791 +-
1792 +- if (tag_set->flags & BLK_MQ_F_BLOCKING)
1793 +- hw_ctx_size += sizeof(struct srcu_struct);
1794 +-
1795 +- return hw_ctx_size;
1796 +-}
1797 +-
1798 + static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
1799 + struct blk_mq_tag_set *set, struct request_queue *q,
1800 + int hctx_idx, int node)
1801 + {
1802 + struct blk_mq_hw_ctx *hctx;
1803 +
1804 +- hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
1805 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1806 +- node);
1807 ++ hctx = blk_mq_alloc_hctx(q, set, node);
1808 + if (!hctx)
1809 +- return NULL;
1810 +-
1811 +- if (!zalloc_cpumask_var_node(&hctx->cpumask,
1812 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1813 +- node)) {
1814 +- kfree(hctx);
1815 +- return NULL;
1816 +- }
1817 +-
1818 +- atomic_set(&hctx->nr_active, 0);
1819 +- hctx->numa_node = node;
1820 +- hctx->queue_num = hctx_idx;
1821 ++ goto fail;
1822 +
1823 +- if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
1824 +- free_cpumask_var(hctx->cpumask);
1825 +- kfree(hctx);
1826 +- return NULL;
1827 +- }
1828 +- blk_mq_hctx_kobj_init(hctx);
1829 ++ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
1830 ++ goto free_hctx;
1831 +
1832 + return hctx;
1833 ++
1834 ++ free_hctx:
1835 ++ kobject_put(&hctx->kobj);
1836 ++ fail:
1837 ++ return NULL;
1838 + }
1839 +
1840 + static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1841 +diff --git a/block/blk.h b/block/blk.h
1842 +index 5d636ee41663..e27fd1512e4b 100644
1843 +--- a/block/blk.h
1844 ++++ b/block/blk.h
1845 +@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
1846 +
1847 + if (addr1 + vec1->bv_len != addr2)
1848 + return false;
1849 +- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
1850 ++ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
1851 + return false;
1852 + if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
1853 + return false;
1854 +diff --git a/block/genhd.c b/block/genhd.c
1855 +index 703267865f14..d8dff0b21f7d 100644
1856 +--- a/block/genhd.c
1857 ++++ b/block/genhd.c
1858 +@@ -531,6 +531,18 @@ void blk_free_devt(dev_t devt)
1859 + }
1860 + }
1861 +
1862 ++/**
1863 ++ * We invalidate devt by assigning NULL pointer for devt in idr.
1864 ++ */
1865 ++void blk_invalidate_devt(dev_t devt)
1866 ++{
1867 ++ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1868 ++ spin_lock_bh(&ext_devt_lock);
1869 ++ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
1870 ++ spin_unlock_bh(&ext_devt_lock);
1871 ++ }
1872 ++}
1873 ++
1874 + static char *bdevt_str(dev_t devt, char *buf)
1875 + {
1876 + if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
1877 +@@ -793,6 +805,13 @@ void del_gendisk(struct gendisk *disk)
1878 +
1879 + if (!(disk->flags & GENHD_FL_HIDDEN))
1880 + blk_unregister_region(disk_devt(disk), disk->minors);
1881 ++ /*
1882 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1883 ++ * while RCU period before freeing gendisk is running to prevent
1884 ++ * use-after-free issues. Note that the device number stays
1885 ++ * "in-use" until we really free the gendisk.
1886 ++ */
1887 ++ blk_invalidate_devt(disk_devt(disk));
1888 +
1889 + kobject_put(disk->part0.holder_dir);
1890 + kobject_put(disk->slave_dir);
1891 +diff --git a/block/partition-generic.c b/block/partition-generic.c
1892 +index 8e596a8dff32..aee643ce13d1 100644
1893 +--- a/block/partition-generic.c
1894 ++++ b/block/partition-generic.c
1895 +@@ -285,6 +285,13 @@ void delete_partition(struct gendisk *disk, int partno)
1896 + kobject_put(part->holder_dir);
1897 + device_del(part_to_dev(part));
1898 +
1899 ++ /*
1900 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1901 ++ * while RCU period before freeing gendisk is running to prevent
1902 ++ * use-after-free issues. Note that the device number stays
1903 ++ * "in-use" until we really free the gendisk.
1904 ++ */
1905 ++ blk_invalidate_devt(part_devt(part));
1906 + hd_struct_kill(part);
1907 + }
1908 +
1909 +diff --git a/block/sed-opal.c b/block/sed-opal.c
1910 +index e0de4dd448b3..119640897293 100644
1911 +--- a/block/sed-opal.c
1912 ++++ b/block/sed-opal.c
1913 +@@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
1914 + static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
1915 + struct opal_mbr_data *opal_mbr)
1916 + {
1917 ++ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
1918 ++ OPAL_TRUE : OPAL_FALSE;
1919 ++
1920 + const struct opal_step mbr_steps[] = {
1921 + { opal_discovery0, },
1922 + { start_admin1LSP_opal_session, &opal_mbr->key },
1923 +- { set_mbr_done, &opal_mbr->enable_disable },
1924 ++ { set_mbr_done, &enable_disable },
1925 + { end_opal_session, },
1926 + { start_admin1LSP_opal_session, &opal_mbr->key },
1927 +- { set_mbr_enable_disable, &opal_mbr->enable_disable },
1928 ++ { set_mbr_enable_disable, &enable_disable },
1929 + { end_opal_session, },
1930 + { NULL, }
1931 + };
1932 +@@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
1933 +
1934 + static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
1935 + {
1936 +- u8 mbr_done_tf = 1;
1937 ++ u8 mbr_done_tf = OPAL_TRUE;
1938 + const struct opal_step mbrdone_step [] = {
1939 + { opal_discovery0, },
1940 + { start_admin1LSP_opal_session, key },
1941 +diff --git a/crypto/hmac.c b/crypto/hmac.c
1942 +index e74730224f0a..4b8c8ee8f15c 100644
1943 +--- a/crypto/hmac.c
1944 ++++ b/crypto/hmac.c
1945 +@@ -168,6 +168,8 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
1946 +
1947 + parent->descsize = sizeof(struct shash_desc) +
1948 + crypto_shash_descsize(hash);
1949 ++ if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE))
1950 ++ return -EINVAL;
1951 +
1952 + ctx->hash = hash;
1953 + return 0;
1954 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
1955 +index e48894e002ba..a46c2c162c03 100644
1956 +--- a/drivers/acpi/arm64/iort.c
1957 ++++ b/drivers/acpi/arm64/iort.c
1958 +@@ -1232,18 +1232,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1959 + /*
1960 + * set numa proximity domain for smmuv3 device
1961 + */
1962 +-static void __init arm_smmu_v3_set_proximity(struct device *dev,
1963 ++static int __init arm_smmu_v3_set_proximity(struct device *dev,
1964 + struct acpi_iort_node *node)
1965 + {
1966 + struct acpi_iort_smmu_v3 *smmu;
1967 +
1968 + smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1969 + if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1970 +- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1971 ++ int node = acpi_map_pxm_to_node(smmu->pxm);
1972 ++
1973 ++ if (node != NUMA_NO_NODE && !node_online(node))
1974 ++ return -EINVAL;
1975 ++
1976 ++ set_dev_node(dev, node);
1977 + pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1978 + smmu->base_address,
1979 + smmu->pxm);
1980 + }
1981 ++ return 0;
1982 + }
1983 + #else
1984 + #define arm_smmu_v3_set_proximity NULL
1985 +@@ -1318,7 +1324,7 @@ struct iort_dev_config {
1986 + int (*dev_count_resources)(struct acpi_iort_node *node);
1987 + void (*dev_init_resources)(struct resource *res,
1988 + struct acpi_iort_node *node);
1989 +- void (*dev_set_proximity)(struct device *dev,
1990 ++ int (*dev_set_proximity)(struct device *dev,
1991 + struct acpi_iort_node *node);
1992 + };
1993 +
1994 +@@ -1369,8 +1375,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
1995 + if (!pdev)
1996 + return -ENOMEM;
1997 +
1998 +- if (ops->dev_set_proximity)
1999 +- ops->dev_set_proximity(&pdev->dev, node);
2000 ++ if (ops->dev_set_proximity) {
2001 ++ ret = ops->dev_set_proximity(&pdev->dev, node);
2002 ++ if (ret)
2003 ++ goto dev_put;
2004 ++ }
2005 +
2006 + count = ops->dev_count_resources(node);
2007 +
2008 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
2009 +index 77abe0ec4043..bd533f68b1de 100644
2010 +--- a/drivers/acpi/property.c
2011 ++++ b/drivers/acpi/property.c
2012 +@@ -1031,6 +1031,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
2013 + const struct acpi_data_node *data = to_acpi_data_node(fwnode);
2014 + struct acpi_data_node *dn;
2015 +
2016 ++ /*
2017 ++ * We can have a combination of device and data nodes, e.g. with
2018 ++ * hierarchical _DSD properties. Make sure the adev pointer is
2019 ++ * restored before going through data nodes, otherwise we will
2020 ++ * be looking for data_nodes below the last device found instead
2021 ++ * of the common fwnode shared by device_nodes and data_nodes.
2022 ++ */
2023 ++ adev = to_acpi_device_node(fwnode);
2024 + if (adev)
2025 + head = &adev->data.subnodes;
2026 + else if (data)
2027 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
2028 +index f80d298de3fa..8ad20ed0cb7c 100644
2029 +--- a/drivers/base/power/main.c
2030 ++++ b/drivers/base/power/main.c
2031 +@@ -1747,6 +1747,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
2032 + if (dev->power.syscore)
2033 + goto Complete;
2034 +
2035 ++ /* Avoid direct_complete to let wakeup_path propagate. */
2036 ++ if (device_may_wakeup(dev) || dev->power.wakeup_path)
2037 ++ dev->power.direct_complete = false;
2038 ++
2039 + if (dev->power.direct_complete) {
2040 + if (pm_runtime_status_suspended(dev)) {
2041 + pm_runtime_disable(dev);
2042 +diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
2043 +index d5d6e6e5da3b..62d3aa2b26f6 100644
2044 +--- a/drivers/bluetooth/btbcm.c
2045 ++++ b/drivers/bluetooth/btbcm.c
2046 +@@ -37,6 +37,7 @@
2047 + #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
2048 + #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
2049 + #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
2050 ++#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
2051 +
2052 + int btbcm_check_bdaddr(struct hci_dev *hdev)
2053 + {
2054 +@@ -82,7 +83,8 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
2055 + !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
2056 + !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
2057 + !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
2058 +- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
2059 ++ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
2060 ++ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
2061 + bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
2062 + &bda->bdaddr);
2063 + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
2064 +diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
2065 +index b0b680dd69f4..f5dbeec8e274 100644
2066 +--- a/drivers/bluetooth/btmtkuart.c
2067 ++++ b/drivers/bluetooth/btmtkuart.c
2068 +@@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
2069 + {
2070 + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
2071 + struct btmtk_hci_wmt_params wmt_params;
2072 +- u32 baudrate;
2073 ++ __le32 baudrate;
2074 + u8 param;
2075 + int err;
2076 +
2077 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
2078 +index 237aea34b69f..d3b467792eb3 100644
2079 +--- a/drivers/bluetooth/hci_qca.c
2080 ++++ b/drivers/bluetooth/hci_qca.c
2081 +@@ -508,6 +508,8 @@ static int qca_open(struct hci_uart *hu)
2082 + qcadev = serdev_device_get_drvdata(hu->serdev);
2083 + if (qcadev->btsoc_type != QCA_WCN3990) {
2084 + gpiod_set_value_cansleep(qcadev->bt_en, 1);
2085 ++ /* Controller needs time to bootup. */
2086 ++ msleep(150);
2087 + } else {
2088 + hu->init_speed = qcadev->init_speed;
2089 + hu->oper_speed = qcadev->oper_speed;
2090 +@@ -992,7 +994,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
2091 + while (!skb_queue_empty(&qca->txq))
2092 + usleep_range(100, 200);
2093 +
2094 +- serdev_device_wait_until_sent(hu->serdev,
2095 ++ if (hu->serdev)
2096 ++ serdev_device_wait_until_sent(hu->serdev,
2097 + msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
2098 +
2099 + /* Give the controller time to process the request */
2100 +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
2101 +index b65ff6962899..e9b6ac61fb7f 100644
2102 +--- a/drivers/char/hw_random/omap-rng.c
2103 ++++ b/drivers/char/hw_random/omap-rng.c
2104 +@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
2105 + priv->rng.read = omap_rng_do_read;
2106 + priv->rng.init = omap_rng_init;
2107 + priv->rng.cleanup = omap_rng_cleanup;
2108 ++ priv->rng.quality = 900;
2109 +
2110 + priv->rng.priv = (unsigned long)priv;
2111 + platform_set_drvdata(pdev, priv);
2112 +diff --git a/drivers/char/random.c b/drivers/char/random.c
2113 +index 38c6d1af6d1c..af6e240f98ff 100644
2114 +--- a/drivers/char/random.c
2115 ++++ b/drivers/char/random.c
2116 +@@ -777,6 +777,7 @@ static struct crng_state **crng_node_pool __read_mostly;
2117 + #endif
2118 +
2119 + static void invalidate_batched_entropy(void);
2120 ++static void numa_crng_init(void);
2121 +
2122 + static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
2123 + static int __init parse_trust_cpu(char *arg)
2124 +@@ -805,7 +806,9 @@ static void crng_initialize(struct crng_state *crng)
2125 + }
2126 + crng->state[i] ^= rv;
2127 + }
2128 +- if (trust_cpu && arch_init) {
2129 ++ if (trust_cpu && arch_init && crng == &primary_crng) {
2130 ++ invalidate_batched_entropy();
2131 ++ numa_crng_init();
2132 + crng_init = 2;
2133 + pr_notice("random: crng done (trusting CPU's manufacturer)\n");
2134 + }
2135 +@@ -2211,8 +2214,8 @@ struct batched_entropy {
2136 + u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2137 + };
2138 + unsigned int position;
2139 ++ spinlock_t batch_lock;
2140 + };
2141 +-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2142 +
2143 + /*
2144 + * Get a random word for internal kernel use only. The quality of the random
2145 +@@ -2222,12 +2225,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
2146 + * wait_for_random_bytes() should be called and return 0 at least once
2147 + * at any point prior.
2148 + */
2149 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2150 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2151 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2152 ++};
2153 ++
2154 + u64 get_random_u64(void)
2155 + {
2156 + u64 ret;
2157 +- bool use_lock;
2158 +- unsigned long flags = 0;
2159 ++ unsigned long flags;
2160 + struct batched_entropy *batch;
2161 + static void *previous;
2162 +
2163 +@@ -2242,28 +2247,25 @@ u64 get_random_u64(void)
2164 +
2165 + warn_unseeded_randomness(&previous);
2166 +
2167 +- use_lock = READ_ONCE(crng_init) < 2;
2168 +- batch = &get_cpu_var(batched_entropy_u64);
2169 +- if (use_lock)
2170 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
2171 ++ batch = raw_cpu_ptr(&batched_entropy_u64);
2172 ++ spin_lock_irqsave(&batch->batch_lock, flags);
2173 + if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2174 + extract_crng((u8 *)batch->entropy_u64);
2175 + batch->position = 0;
2176 + }
2177 + ret = batch->entropy_u64[batch->position++];
2178 +- if (use_lock)
2179 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2180 +- put_cpu_var(batched_entropy_u64);
2181 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
2182 + return ret;
2183 + }
2184 + EXPORT_SYMBOL(get_random_u64);
2185 +
2186 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2187 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2188 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2189 ++};
2190 + u32 get_random_u32(void)
2191 + {
2192 + u32 ret;
2193 +- bool use_lock;
2194 +- unsigned long flags = 0;
2195 ++ unsigned long flags;
2196 + struct batched_entropy *batch;
2197 + static void *previous;
2198 +
2199 +@@ -2272,18 +2274,14 @@ u32 get_random_u32(void)
2200 +
2201 + warn_unseeded_randomness(&previous);
2202 +
2203 +- use_lock = READ_ONCE(crng_init) < 2;
2204 +- batch = &get_cpu_var(batched_entropy_u32);
2205 +- if (use_lock)
2206 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
2207 ++ batch = raw_cpu_ptr(&batched_entropy_u32);
2208 ++ spin_lock_irqsave(&batch->batch_lock, flags);
2209 + if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2210 + extract_crng((u8 *)batch->entropy_u32);
2211 + batch->position = 0;
2212 + }
2213 + ret = batch->entropy_u32[batch->position++];
2214 +- if (use_lock)
2215 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2216 +- put_cpu_var(batched_entropy_u32);
2217 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
2218 + return ret;
2219 + }
2220 + EXPORT_SYMBOL(get_random_u32);
2221 +@@ -2297,12 +2295,19 @@ static void invalidate_batched_entropy(void)
2222 + int cpu;
2223 + unsigned long flags;
2224 +
2225 +- write_lock_irqsave(&batched_entropy_reset_lock, flags);
2226 + for_each_possible_cpu (cpu) {
2227 +- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2228 +- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2229 ++ struct batched_entropy *batched_entropy;
2230 ++
2231 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2232 ++ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2233 ++ batched_entropy->position = 0;
2234 ++ spin_unlock(&batched_entropy->batch_lock);
2235 ++
2236 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2237 ++ spin_lock(&batched_entropy->batch_lock);
2238 ++ batched_entropy->position = 0;
2239 ++ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2240 + }
2241 +- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2242 + }
2243 +
2244 + /**
2245 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
2246 +index fbeb71953526..05dbfdb9f4af 100644
2247 +--- a/drivers/char/virtio_console.c
2248 ++++ b/drivers/char/virtio_console.c
2249 +@@ -75,7 +75,7 @@ struct ports_driver_data {
2250 + /* All the console devices handled by this driver */
2251 + struct list_head consoles;
2252 + };
2253 +-static struct ports_driver_data pdrvdata;
2254 ++static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
2255 +
2256 + static DEFINE_SPINLOCK(pdrvdata_lock);
2257 + static DECLARE_COMPLETION(early_console_added);
2258 +@@ -1394,6 +1394,7 @@ static int add_port(struct ports_device *portdev, u32 id)
2259 + port->async_queue = NULL;
2260 +
2261 + port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
2262 ++ port->cons.vtermno = 0;
2263 +
2264 + port->host_connected = port->guest_connected = false;
2265 + port->stats = (struct port_stats) { 0 };
2266 +diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2267 +index 4d92b27a6153..7a4c5957939a 100644
2268 +--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2269 ++++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2270 +@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
2271 + DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
2272 + DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
2273 + DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
2274 +- DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
2275 +- DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
2276 ++ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1),
2277 ++ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1),
2278 + DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
2279 + DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
2280 + DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
2281 +@@ -143,8 +143,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
2282 + DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
2283 + DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
2284 + DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
2285 +- DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
2286 +- DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
2287 ++ DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2),
2288 ++ DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2),
2289 + DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
2290 + DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
2291 + DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
2292 +diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2293 +index 34e274f2a273..93dacd826fd0 100644
2294 +--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2295 ++++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2296 +@@ -157,7 +157,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
2297 + DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
2298 + DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
2299 +
2300 +- DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
2301 ++ DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2),
2302 + DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
2303 + DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
2304 + DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
2305 +diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
2306 +index 86842c9fd314..0825cd0ff286 100644
2307 +--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
2308 ++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
2309 +@@ -129,8 +129,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
2310 + DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
2311 + DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
2312 + DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
2313 +- DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
2314 +- DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
2315 ++ DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
2316 ++ DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
2317 + DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
2318 + DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
2319 + DEF_MOD("cmt3", 300, R8A7795_CLK_R),
2320 +@@ -153,8 +153,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
2321 + DEF_MOD("rwdt", 402, R8A7795_CLK_R),
2322 + DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
2323 + DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
2324 +- DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
2325 +- DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
2326 ++ DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2),
2327 ++ DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2),
2328 + DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
2329 + DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
2330 + DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
2331 +diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
2332 +index 12c455859f2c..997cd956f12b 100644
2333 +--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
2334 ++++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
2335 +@@ -126,8 +126,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
2336 + DEF_MOD("msiof2", 209, R8A7796_CLK_MSO),
2337 + DEF_MOD("msiof1", 210, R8A7796_CLK_MSO),
2338 + DEF_MOD("msiof0", 211, R8A7796_CLK_MSO),
2339 +- DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
2340 +- DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
2341 ++ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
2342 ++ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
2343 + DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
2344 + DEF_MOD("cmt3", 300, R8A7796_CLK_R),
2345 + DEF_MOD("cmt2", 301, R8A7796_CLK_R),
2346 +@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
2347 + DEF_MOD("rwdt", 402, R8A7796_CLK_R),
2348 + DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
2349 + DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
2350 +- DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
2351 +- DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
2352 ++ DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2),
2353 ++ DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2),
2354 + DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
2355 + DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
2356 + DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
2357 +diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
2358 +index eb1cca58a1e1..afc9c72fa094 100644
2359 +--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
2360 ++++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
2361 +@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
2362 + DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
2363 + DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
2364 + DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
2365 +- DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
2366 +- DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
2367 ++ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
2368 ++ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
2369 + DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
2370 +
2371 + DEF_MOD("cmt3", 300, R8A77965_CLK_R),
2372 +@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
2373 + DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
2374 + DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
2375 +
2376 +- DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
2377 +- DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
2378 ++ DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2),
2379 ++ DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2),
2380 + DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
2381 + DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
2382 + DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
2383 +diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
2384 +index 9a278c75c918..03f445d47ef6 100644
2385 +--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
2386 ++++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
2387 +@@ -152,7 +152,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
2388 + DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
2389 + DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
2390 +
2391 +- DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
2392 ++ DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2),
2393 + DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
2394 + DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
2395 + DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
2396 +diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
2397 +index eee3874865a9..68707277b17b 100644
2398 +--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
2399 ++++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
2400 +@@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
2401 + DEF_MOD("rwdt", 402, R8A77995_CLK_R),
2402 + DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
2403 + DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
2404 +- DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
2405 ++ DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2),
2406 + DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
2407 + DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
2408 + DEF_MOD("thermal", 522, R8A77995_CLK_CP),
2409 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
2410 +index 5a67b7869960..355d6a3611db 100644
2411 +--- a/drivers/clk/rockchip/clk-rk3288.c
2412 ++++ b/drivers/clk/rockchip/clk-rk3288.c
2413 +@@ -219,7 +219,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
2414 + PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
2415 + PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
2416 +
2417 +-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
2418 ++PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
2419 + PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
2420 + "sclk_otgphy0_480m" };
2421 + PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
2422 +@@ -313,13 +313,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2423 + COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
2424 + RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
2425 + RK3288_CLKGATE_CON(12), 6, GFLAGS),
2426 +- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
2427 ++ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
2428 + RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
2429 + RK3288_CLKGATE_CON(12), 7, GFLAGS),
2430 + COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
2431 + RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
2432 + RK3288_CLKGATE_CON(12), 8, GFLAGS),
2433 +- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
2434 ++ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
2435 + RK3288_CLKGATE_CON(12), 9, GFLAGS),
2436 + GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
2437 + RK3288_CLKGATE_CON(12), 10, GFLAGS),
2438 +@@ -420,7 +420,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2439 + COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
2440 + RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
2441 + RK3288_CLKGATE_CON(3), 11, GFLAGS),
2442 +- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
2443 ++ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
2444 + RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
2445 + GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
2446 + RK3288_CLKGATE_CON(9), 0, GFLAGS),
2447 +@@ -647,7 +647,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2448 + INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
2449 + RK3288_CLKSEL_CON(22), 7, IFLAGS),
2450 +
2451 +- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
2452 ++ GATE(0, "jtag", "ext_jtag", 0,
2453 + RK3288_CLKGATE_CON(4), 14, GFLAGS),
2454 +
2455 + COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
2456 +@@ -656,7 +656,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2457 + COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
2458 + RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
2459 + RK3288_CLKGATE_CON(3), 6, GFLAGS),
2460 +- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
2461 ++ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
2462 + RK3288_CLKGATE_CON(13), 9, GFLAGS),
2463 + DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
2464 + RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
2465 +@@ -697,7 +697,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2466 + GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
2467 + GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
2468 + GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
2469 +- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
2470 ++ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
2471 +
2472 + /* ddrctrl [DDR Controller PHY clock] gates */
2473 + GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
2474 +@@ -837,12 +837,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
2475 + "pclk_alive_niu",
2476 + "pclk_pd_pmu",
2477 + "pclk_pmu_niu",
2478 +- "pclk_core_niu",
2479 +- "pclk_ddrupctl0",
2480 +- "pclk_publ0",
2481 +- "pclk_ddrupctl1",
2482 +- "pclk_publ1",
2483 + "pmu_hclk_otg0",
2484 ++ /* pwm-regulators on some boards, so handoff-critical later */
2485 ++ "pclk_rkpwm",
2486 + };
2487 +
2488 + static void __iomem *rk3288_cru_base;
2489 +diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
2490 +index a371c66e72ef..bd9b5fbc443b 100644
2491 +--- a/drivers/clk/zynqmp/divider.c
2492 ++++ b/drivers/clk/zynqmp/divider.c
2493 +@@ -31,12 +31,14 @@
2494 + * struct zynqmp_clk_divider - adjustable divider clock
2495 + * @hw: handle between common and hardware-specific interfaces
2496 + * @flags: Hardware specific flags
2497 ++ * @is_frac: The divider is a fractional divider
2498 + * @clk_id: Id of clock
2499 + * @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
2500 + */
2501 + struct zynqmp_clk_divider {
2502 + struct clk_hw hw;
2503 + u8 flags;
2504 ++ bool is_frac;
2505 + u32 clk_id;
2506 + u32 div_type;
2507 + };
2508 +@@ -116,8 +118,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
2509 +
2510 + bestdiv = zynqmp_divider_get_val(*prate, rate);
2511 +
2512 +- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
2513 +- (divider->flags & CLK_FRAC))
2514 ++ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
2515 + bestdiv = rate % *prate ? 1 : bestdiv;
2516 + *prate = rate * bestdiv;
2517 +
2518 +@@ -195,11 +196,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
2519 +
2520 + init.name = name;
2521 + init.ops = &zynqmp_clk_divider_ops;
2522 +- init.flags = nodes->flag;
2523 ++ /* CLK_FRAC is not defined in the common clk framework */
2524 ++ init.flags = nodes->flag & ~CLK_FRAC;
2525 + init.parent_names = parents;
2526 + init.num_parents = 1;
2527 +
2528 + /* struct clk_divider assignments */
2529 ++ div->is_frac = !!(nodes->flag & CLK_FRAC);
2530 + div->flags = nodes->type_flag;
2531 + div->hw.init = &init;
2532 + div->clk_id = clk_id;
2533 +diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
2534 +index b3f4bd647e9b..988ebc326bdb 100644
2535 +--- a/drivers/cpufreq/armada-8k-cpufreq.c
2536 ++++ b/drivers/cpufreq/armada-8k-cpufreq.c
2537 +@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
2538 + of_node_put(node);
2539 + return -ENODEV;
2540 + }
2541 ++ of_node_put(node);
2542 +
2543 + nb_cpus = num_possible_cpus();
2544 + freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
2545 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2546 +index e10922709d13..bbf79544d0ad 100644
2547 +--- a/drivers/cpufreq/cpufreq.c
2548 ++++ b/drivers/cpufreq/cpufreq.c
2549 +@@ -1098,6 +1098,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
2550 + cpufreq_global_kobject