Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Fri, 31 May 2019 14:03:31
Message-Id: 1559311387.e62379d875e51b478db75a6675ecb6180f41ebbc.mpagano@gentoo
1 commit: e62379d875e51b478db75a6675ecb6180f41ebbc
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri May 31 14:03:07 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri May 31 14:03:07 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e62379d8
7
8 Linux patch 5.0.20
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1019_linux-5.0.20.patch | 11613 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 11617 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 599546c..cf5191b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -119,6 +119,10 @@ Patch: 1018_linux-5.0.19.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.19
23
24 +Patch: 1019_linux-5.0.20.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.20
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1019_linux-5.0.20.patch b/1019_linux-5.0.20.patch
33 new file mode 100644
34 index 0000000..d10f5ca
35 --- /dev/null
36 +++ b/1019_linux-5.0.20.patch
37 @@ -0,0 +1,11613 @@
38 +diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
39 +index ddb8ce5333ba..7a7e271be3f1 100644
40 +--- a/Documentation/arm64/silicon-errata.txt
41 ++++ b/Documentation/arm64/silicon-errata.txt
42 +@@ -61,6 +61,7 @@ stable kernels.
43 + | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
44 + | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
45 + | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
46 ++| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
47 + | ARM | MMU-500 | #841119,#826419 | N/A |
48 + | | | | |
49 + | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
50 +diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
51 +index 41a1074228ba..6b6ca4456dc7 100644
52 +--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
53 ++++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
54 +@@ -53,7 +53,8 @@ Required properties:
55 + one for each entry in reset-names.
56 + - reset-names: "phy" for reset of phy block,
57 + "common" for phy common block reset,
58 +- "cfg" for phy's ahb cfg block reset.
59 ++ "cfg" for phy's ahb cfg block reset,
60 ++ "ufsphy" for the PHY reset in the UFS controller.
61 +
62 + For "qcom,ipq8074-qmp-pcie-phy" must contain:
63 + "phy", "common".
64 +@@ -65,7 +66,8 @@ Required properties:
65 + "phy", "common".
66 + For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
67 + "phy", "common".
68 +- For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
69 ++ For "qcom,sdm845-qmp-ufs-phy": must contain:
70 ++ "ufsphy".
71 +
72 + - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
73 + - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
74 +diff --git a/Makefile b/Makefile
75 +index 66efffc3fb41..25390977536b 100644
76 +--- a/Makefile
77 ++++ b/Makefile
78 +@@ -1,7 +1,7 @@
79 + # SPDX-License-Identifier: GPL-2.0
80 + VERSION = 5
81 + PATCHLEVEL = 0
82 +-SUBLEVEL = 19
83 ++SUBLEVEL = 20
84 + EXTRAVERSION =
85 + NAME = Shy Crocodile
86 +
87 +diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
88 +index 07e27f212dc7..d2453e2d3f1f 100644
89 +--- a/arch/arm/include/asm/cp15.h
90 ++++ b/arch/arm/include/asm/cp15.h
91 +@@ -68,6 +68,8 @@
92 + #define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
93 + #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
94 +
95 ++#define CNTVCT __ACCESS_CP15_64(1, c14)
96 ++
97 + extern unsigned long cr_alignment; /* defined in entry-armv.S */
98 +
99 + static inline unsigned long get_cr(void)
100 +diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
101 +index a9dd619c6c29..7bdbf5d5c47d 100644
102 +--- a/arch/arm/vdso/vgettimeofday.c
103 ++++ b/arch/arm/vdso/vgettimeofday.c
104 +@@ -18,9 +18,9 @@
105 + #include <linux/compiler.h>
106 + #include <linux/hrtimer.h>
107 + #include <linux/time.h>
108 +-#include <asm/arch_timer.h>
109 + #include <asm/barrier.h>
110 + #include <asm/bug.h>
111 ++#include <asm/cp15.h>
112 + #include <asm/page.h>
113 + #include <asm/unistd.h>
114 + #include <asm/vdso_datapage.h>
115 +@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
116 + u64 cycle_now;
117 + u64 nsec;
118 +
119 +- cycle_now = arch_counter_get_cntvct();
120 ++ isb();
121 ++ cycle_now = read_sysreg(CNTVCT);
122 +
123 + cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
124 +
125 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
126 +index a4168d366127..4535b2b48fd9 100644
127 +--- a/arch/arm64/Kconfig
128 ++++ b/arch/arm64/Kconfig
129 +@@ -518,6 +518,24 @@ config ARM64_ERRATUM_1286807
130 +
131 + If unsure, say Y.
132 +
133 ++config ARM64_ERRATUM_1463225
134 ++ bool "Cortex-A76: Software Step might prevent interrupt recognition"
135 ++ default y
136 ++ help
137 ++ This option adds a workaround for Arm Cortex-A76 erratum 1463225.
138 ++
139 ++ On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
140 ++ of a system call instruction (SVC) can prevent recognition of
141 ++ subsequent interrupts when software stepping is disabled in the
142 ++ exception handler of the system call and either kernel debugging
143 ++ is enabled or VHE is in use.
144 ++
145 ++ Work around the erratum by triggering a dummy step exception
146 ++ when handling a system call from a task that is being stepped
147 ++ in a VHE configuration of the kernel.
148 ++
149 ++ If unsure, say Y.
150 ++
151 + config CAVIUM_ERRATUM_22375
152 + bool "Cavium erratum 22375, 24313"
153 + default y
154 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
155 +index 82e9099834ae..99db8de83734 100644
156 +--- a/arch/arm64/include/asm/cpucaps.h
157 ++++ b/arch/arm64/include/asm/cpucaps.h
158 +@@ -60,7 +60,8 @@
159 + #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
160 + #define ARM64_HAS_GENERIC_AUTH_ARCH 40
161 + #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
162 ++#define ARM64_WORKAROUND_1463225 42
163 +
164 +-#define ARM64_NCAPS 42
165 ++#define ARM64_NCAPS 43
166 +
167 + #endif /* __ASM_CPUCAPS_H */
168 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
169 +index de70c1eabf33..74ebe9693714 100644
170 +--- a/arch/arm64/include/asm/pgtable.h
171 ++++ b/arch/arm64/include/asm/pgtable.h
172 +@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
173 + return __pmd_to_phys(pmd);
174 + }
175 +
176 ++static inline void pte_unmap(pte_t *pte) { }
177 ++
178 + /* Find an entry in the third-level page table. */
179 + #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
180 +
181 +@@ -486,7 +488,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
182 +
183 + #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
184 + #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
185 +-#define pte_unmap(pte) do { } while (0)
186 + #define pte_unmap_nested(pte) do { } while (0)
187 +
188 + #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
189 +diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
190 +index 2b9a63771eda..f89263c8e11a 100644
191 +--- a/arch/arm64/include/asm/vdso_datapage.h
192 ++++ b/arch/arm64/include/asm/vdso_datapage.h
193 +@@ -38,6 +38,7 @@ struct vdso_data {
194 + __u32 tz_minuteswest; /* Whacky timezone stuff */
195 + __u32 tz_dsttime;
196 + __u32 use_syscall;
197 ++ __u32 hrtimer_res;
198 + };
199 +
200 + #endif /* !__ASSEMBLY__ */
201 +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
202 +index 65b8afc84466..ddcd3ea87b81 100644
203 +--- a/arch/arm64/kernel/asm-offsets.c
204 ++++ b/arch/arm64/kernel/asm-offsets.c
205 +@@ -102,7 +102,7 @@ int main(void)
206 + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
207 + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
208 + DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
209 +- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
210 ++ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
211 + DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
212 + DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
213 + DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
214 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
215 +index 9950bb0cbd52..87019cd73f22 100644
216 +--- a/arch/arm64/kernel/cpu_errata.c
217 ++++ b/arch/arm64/kernel/cpu_errata.c
218 +@@ -464,6 +464,22 @@ out_printmsg:
219 + }
220 + #endif /* CONFIG_ARM64_SSBD */
221 +
222 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
223 ++DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
224 ++
225 ++static bool
226 ++has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
227 ++ int scope)
228 ++{
229 ++ u32 midr = read_cpuid_id();
230 ++ /* Cortex-A76 r0p0 - r3p1 */
231 ++ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
232 ++
233 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
234 ++ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
235 ++}
236 ++#endif
237 ++
238 + static void __maybe_unused
239 + cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
240 + {
241 +@@ -738,6 +754,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
242 + .capability = ARM64_WORKAROUND_1165522,
243 + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
244 + },
245 ++#endif
246 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
247 ++ {
248 ++ .desc = "ARM erratum 1463225",
249 ++ .capability = ARM64_WORKAROUND_1463225,
250 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
251 ++ .matches = has_cortex_a76_erratum_1463225,
252 ++ },
253 + #endif
254 + {
255 + }
256 +diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
257 +index ea001241bdd4..00f8b8612b69 100644
258 +--- a/arch/arm64/kernel/cpu_ops.c
259 ++++ b/arch/arm64/kernel/cpu_ops.c
260 +@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
261 + pr_err("%pOF: missing enable-method property\n",
262 + dn);
263 + }
264 ++ of_node_put(dn);
265 + } else {
266 + enable_method = acpi_get_enable_method(cpu);
267 + if (!enable_method) {
268 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
269 +index b09b6f75f759..06941c1fe418 100644
270 +--- a/arch/arm64/kernel/kaslr.c
271 ++++ b/arch/arm64/kernel/kaslr.c
272 +@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
273 +
274 + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
275 + /*
276 +- * Randomize the module region over a 4 GB window covering the
277 ++ * Randomize the module region over a 2 GB window covering the
278 + * kernel. This reduces the risk of modules leaking information
279 + * about the address of the kernel itself, but results in
280 + * branches between modules and the core kernel that are
281 + * resolved via PLTs. (Branches between modules will be
282 + * resolved normally.)
283 + */
284 +- module_range = SZ_4G - (u64)(_end - _stext);
285 +- module_alloc_base = max((u64)_end + offset - SZ_4G,
286 ++ module_range = SZ_2G - (u64)(_end - _stext);
287 ++ module_alloc_base = max((u64)_end + offset - SZ_2G,
288 + (u64)MODULES_VADDR);
289 + } else {
290 + /*
291 +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
292 +index f713e2fc4d75..1e418e69b58c 100644
293 +--- a/arch/arm64/kernel/module.c
294 ++++ b/arch/arm64/kernel/module.c
295 +@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
296 + * can simply omit this fallback in that case.
297 + */
298 + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
299 +- module_alloc_base + SZ_4G, GFP_KERNEL,
300 ++ module_alloc_base + SZ_2G, GFP_KERNEL,
301 + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
302 + __builtin_return_address(0));
303 +
304 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
305 +index 5610ac01c1ec..871c739f060a 100644
306 +--- a/arch/arm64/kernel/syscall.c
307 ++++ b/arch/arm64/kernel/syscall.c
308 +@@ -8,6 +8,7 @@
309 + #include <linux/syscalls.h>
310 +
311 + #include <asm/daifflags.h>
312 ++#include <asm/debug-monitors.h>
313 + #include <asm/fpsimd.h>
314 + #include <asm/syscall.h>
315 + #include <asm/thread_info.h>
316 +@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
317 + int syscall_trace_enter(struct pt_regs *regs);
318 + void syscall_trace_exit(struct pt_regs *regs);
319 +
320 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
321 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
322 ++
323 ++static void cortex_a76_erratum_1463225_svc_handler(void)
324 ++{
325 ++ u32 reg, val;
326 ++
327 ++ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
328 ++ return;
329 ++
330 ++ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
331 ++ return;
332 ++
333 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
334 ++ reg = read_sysreg(mdscr_el1);
335 ++ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
336 ++ write_sysreg(val, mdscr_el1);
337 ++ asm volatile("msr daifclr, #8");
338 ++ isb();
339 ++
340 ++ /* We will have taken a single-step exception by this point */
341 ++
342 ++ write_sysreg(reg, mdscr_el1);
343 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
344 ++}
345 ++#else
346 ++static void cortex_a76_erratum_1463225_svc_handler(void) { }
347 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
348 ++
349 + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
350 + const syscall_fn_t syscall_table[])
351 + {
352 +@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
353 + regs->orig_x0 = regs->regs[0];
354 + regs->syscallno = scno;
355 +
356 ++ cortex_a76_erratum_1463225_svc_handler();
357 + local_daif_restore(DAIF_PROCCTX);
358 + user_exit();
359 +
360 +diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
361 +index 2d419006ad43..ec0bb588d755 100644
362 +--- a/arch/arm64/kernel/vdso.c
363 ++++ b/arch/arm64/kernel/vdso.c
364 +@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
365 + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
366 + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
367 +
368 ++ /* Read without the seqlock held by clock_getres() */
369 ++ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
370 ++
371 + if (!use_syscall) {
372 + /* tkr_mono.cycle_last == tkr_raw.cycle_last */
373 + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
374 +diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
375 +index e8f60112818f..856fee6d3512 100644
376 +--- a/arch/arm64/kernel/vdso/gettimeofday.S
377 ++++ b/arch/arm64/kernel/vdso/gettimeofday.S
378 +@@ -308,13 +308,14 @@ ENTRY(__kernel_clock_getres)
379 + ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
380 + b.ne 1f
381 +
382 +- ldr x2, 5f
383 ++ adr vdso_data, _vdso_data
384 ++ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
385 + b 2f
386 + 1:
387 + cmp w0, #CLOCK_REALTIME_COARSE
388 + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
389 + b.ne 4f
390 +- ldr x2, 6f
391 ++ ldr x2, 5f
392 + 2:
393 + cbz x1, 3f
394 + stp xzr, x2, [x1]
395 +@@ -328,8 +329,6 @@ ENTRY(__kernel_clock_getres)
396 + svc #0
397 + ret
398 + 5:
399 +- .quad CLOCK_REALTIME_RES
400 +-6:
401 + .quad CLOCK_COARSE_RES
402 + .cfi_endproc
403 + ENDPROC(__kernel_clock_getres)
404 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
405 +index 78c0a72f822c..674860e3e478 100644
406 +--- a/arch/arm64/mm/dma-mapping.c
407 ++++ b/arch/arm64/mm/dma-mapping.c
408 +@@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
409 + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
410 + return ret;
411 +
412 ++ if (!is_vmalloc_addr(cpu_addr)) {
413 ++ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
414 ++ return __swiotlb_mmap_pfn(vma, pfn, size);
415 ++ }
416 ++
417 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
418 + /*
419 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
420 +@@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
421 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
422 + struct vm_struct *area = find_vm_area(cpu_addr);
423 +
424 ++ if (!is_vmalloc_addr(cpu_addr)) {
425 ++ struct page *page = virt_to_page(cpu_addr);
426 ++ return __swiotlb_get_sgtable_page(sgt, page, size);
427 ++ }
428 ++
429 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
430 + /*
431 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
432 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
433 +index ef46925096f0..d3bdef0b2f60 100644
434 +--- a/arch/arm64/mm/fault.c
435 ++++ b/arch/arm64/mm/fault.c
436 +@@ -824,14 +824,47 @@ void __init hook_debug_fault_code(int nr,
437 + debug_fault_info[nr].name = name;
438 + }
439 +
440 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
441 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
442 ++
443 ++static int __exception
444 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
445 ++{
446 ++ if (user_mode(regs))
447 ++ return 0;
448 ++
449 ++ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
450 ++ return 0;
451 ++
452 ++ /*
453 ++ * We've taken a dummy step exception from the kernel to ensure
454 ++ * that interrupts are re-enabled on the syscall path. Return back
455 ++ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
456 ++ * masked so that we can safely restore the mdscr and get on with
457 ++ * handling the syscall.
458 ++ */
459 ++ regs->pstate |= PSR_D_BIT;
460 ++ return 1;
461 ++}
462 ++#else
463 ++static int __exception
464 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
465 ++{
466 ++ return 0;
467 ++}
468 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
469 ++
470 + asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
471 +- unsigned int esr,
472 +- struct pt_regs *regs)
473 ++ unsigned int esr,
474 ++ struct pt_regs *regs)
475 + {
476 + const struct fault_info *inf = esr_to_debug_fault_info(esr);
477 + unsigned long pc = instruction_pointer(regs);
478 + int rv;
479 +
480 ++ if (cortex_a76_erratum_1463225_debug_handler(regs))
481 ++ return 0;
482 ++
483 + /*
484 + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
485 + * already disabled to preserve the last enabled/disabled addresses.
486 +diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
487 +index 9d9f6f334d3c..3da3e2b1b51b 100644
488 +--- a/arch/powerpc/boot/addnote.c
489 ++++ b/arch/powerpc/boot/addnote.c
490 +@@ -223,7 +223,11 @@ main(int ac, char **av)
491 + PUT_16(E_PHNUM, np + 2);
492 +
493 + /* write back */
494 +- lseek(fd, (long) 0, SEEK_SET);
495 ++ i = lseek(fd, (long) 0, SEEK_SET);
496 ++ if (i < 0) {
497 ++ perror("lseek");
498 ++ exit(1);
499 ++ }
500 + i = write(fd, buf, n);
501 + if (i < 0) {
502 + perror("write");
503 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
504 +index 4898e9491a1c..9168a247e24f 100644
505 +--- a/arch/powerpc/kernel/head_64.S
506 ++++ b/arch/powerpc/kernel/head_64.S
507 +@@ -970,7 +970,9 @@ start_here_multiplatform:
508 +
509 + /* Restore parameters passed from prom_init/kexec */
510 + mr r3,r31
511 +- bl early_setup /* also sets r13 and SPRG_PACA */
512 ++ LOAD_REG_ADDR(r12, DOTSYM(early_setup))
513 ++ mtctr r12
514 ++ bctrl /* also sets r13 and SPRG_PACA */
515 +
516 + LOAD_REG_ADDR(r3, start_here_common)
517 + ld r4,PACAKMSR(r13)
518 +diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
519 +index 3c6ab22a0c4e..af3c15a1d41e 100644
520 +--- a/arch/powerpc/kernel/watchdog.c
521 ++++ b/arch/powerpc/kernel/watchdog.c
522 +@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
523 +
524 + static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
525 +
526 +-static DEFINE_PER_CPU(struct timer_list, wd_timer);
527 ++static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
528 + static DEFINE_PER_CPU(u64, wd_timer_tb);
529 +
530 + /* SMP checker bits */
531 +@@ -293,21 +293,21 @@ out:
532 + nmi_exit();
533 + }
534 +
535 +-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
536 +-{
537 +- t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
538 +- if (wd_timer_period_ms > 1000)
539 +- t->expires = __round_jiffies_up(t->expires, cpu);
540 +- add_timer_on(t, cpu);
541 +-}
542 +-
543 +-static void wd_timer_fn(struct timer_list *t)
544 ++static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
545 + {
546 + int cpu = smp_processor_id();
547 +
548 ++ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
549 ++ return HRTIMER_NORESTART;
550 ++
551 ++ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
552 ++ return HRTIMER_NORESTART;
553 ++
554 + watchdog_timer_interrupt(cpu);
555 +
556 +- wd_timer_reset(cpu, t);
557 ++ hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
558 ++
559 ++ return HRTIMER_RESTART;
560 + }
561 +
562 + void arch_touch_nmi_watchdog(void)
563 +@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
564 + }
565 + EXPORT_SYMBOL(arch_touch_nmi_watchdog);
566 +
567 +-static void start_watchdog_timer_on(unsigned int cpu)
568 +-{
569 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
570 +-
571 +- per_cpu(wd_timer_tb, cpu) = get_tb();
572 +-
573 +- timer_setup(t, wd_timer_fn, TIMER_PINNED);
574 +- wd_timer_reset(cpu, t);
575 +-}
576 +-
577 +-static void stop_watchdog_timer_on(unsigned int cpu)
578 +-{
579 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
580 +-
581 +- del_timer_sync(t);
582 +-}
583 +-
584 +-static int start_wd_on_cpu(unsigned int cpu)
585 ++static void start_watchdog(void *arg)
586 + {
587 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
588 ++ int cpu = smp_processor_id();
589 + unsigned long flags;
590 +
591 + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
592 + WARN_ON(1);
593 +- return 0;
594 ++ return;
595 + }
596 +
597 + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
598 +- return 0;
599 ++ return;
600 +
601 + if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
602 +- return 0;
603 ++ return;
604 +
605 + wd_smp_lock(&flags);
606 + cpumask_set_cpu(cpu, &wd_cpus_enabled);
607 +@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
608 + }
609 + wd_smp_unlock(&flags);
610 +
611 +- start_watchdog_timer_on(cpu);
612 ++ *this_cpu_ptr(&wd_timer_tb) = get_tb();
613 +
614 +- return 0;
615 ++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
616 ++ hrtimer->function = watchdog_timer_fn;
617 ++ hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
618 ++ HRTIMER_MODE_REL_PINNED);
619 + }
620 +
621 +-static int stop_wd_on_cpu(unsigned int cpu)
622 ++static int start_watchdog_on_cpu(unsigned int cpu)
623 + {
624 ++ return smp_call_function_single(cpu, start_watchdog, NULL, true);
625 ++}
626 ++
627 ++static void stop_watchdog(void *arg)
628 ++{
629 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
630 ++ int cpu = smp_processor_id();
631 + unsigned long flags;
632 +
633 + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
634 +- return 0; /* Can happen in CPU unplug case */
635 ++ return; /* Can happen in CPU unplug case */
636 +
637 +- stop_watchdog_timer_on(cpu);
638 ++ hrtimer_cancel(hrtimer);
639 +
640 + wd_smp_lock(&flags);
641 + cpumask_clear_cpu(cpu, &wd_cpus_enabled);
642 + wd_smp_unlock(&flags);
643 +
644 + wd_smp_clear_cpu_pending(cpu, get_tb());
645 ++}
646 +
647 +- return 0;
648 ++static int stop_watchdog_on_cpu(unsigned int cpu)
649 ++{
650 ++ return smp_call_function_single(cpu, stop_watchdog, NULL, true);
651 + }
652 +
653 + static void watchdog_calc_timeouts(void)
654 +@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
655 + int cpu;
656 +
657 + for_each_cpu(cpu, &wd_cpus_enabled)
658 +- stop_wd_on_cpu(cpu);
659 ++ stop_watchdog_on_cpu(cpu);
660 + }
661 +
662 + void watchdog_nmi_start(void)
663 +@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
664 +
665 + watchdog_calc_timeouts();
666 + for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
667 +- start_wd_on_cpu(cpu);
668 ++ start_watchdog_on_cpu(cpu);
669 + }
670 +
671 + /*
672 +@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
673 +
674 + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
675 + "powerpc/watchdog:online",
676 +- start_wd_on_cpu, stop_wd_on_cpu);
677 ++ start_watchdog_on_cpu,
678 ++ stop_watchdog_on_cpu);
679 + if (err < 0) {
680 + pr_warn("could not be initialized");
681 + return err;
682 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
683 +index b5d1c45c1475..2a85a4bcc277 100644
684 +--- a/arch/powerpc/mm/numa.c
685 ++++ b/arch/powerpc/mm/numa.c
686 +@@ -1494,6 +1494,9 @@ int start_topology_update(void)
687 + {
688 + int rc = 0;
689 +
690 ++ if (!topology_updates_enabled)
691 ++ return 0;
692 ++
693 + if (firmware_has_feature(FW_FEATURE_PRRN)) {
694 + if (!prrn_enabled) {
695 + prrn_enabled = 1;
696 +@@ -1527,6 +1530,9 @@ int stop_topology_update(void)
697 + {
698 + int rc = 0;
699 +
700 ++ if (!topology_updates_enabled)
701 ++ return 0;
702 ++
703 + if (prrn_enabled) {
704 + prrn_enabled = 0;
705 + #ifdef CONFIG_SMP
706 +@@ -1584,11 +1590,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
707 +
708 + kbuf[read_len] = '\0';
709 +
710 +- if (!strncmp(kbuf, "on", 2))
711 ++ if (!strncmp(kbuf, "on", 2)) {
712 ++ topology_updates_enabled = true;
713 + start_topology_update();
714 +- else if (!strncmp(kbuf, "off", 3))
715 ++ } else if (!strncmp(kbuf, "off", 3)) {
716 + stop_topology_update();
717 +- else
718 ++ topology_updates_enabled = false;
719 ++ } else
720 + return -EINVAL;
721 +
722 + return count;
723 +@@ -1603,9 +1611,7 @@ static const struct file_operations topology_ops = {
724 +
725 + static int topology_update_init(void)
726 + {
727 +- /* Do not poll for changes if disabled at boot */
728 +- if (topology_updates_enabled)
729 +- start_topology_update();
730 ++ start_topology_update();
731 +
732 + if (vphn_enabled)
733 + topology_schedule_update();
734 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
735 +index f292a3f284f1..d1009fe3130b 100644
736 +--- a/arch/powerpc/perf/imc-pmu.c
737 ++++ b/arch/powerpc/perf/imc-pmu.c
738 +@@ -496,6 +496,11 @@ static int nest_imc_event_init(struct perf_event *event)
739 + * Get the base memory addresss for this cpu.
740 + */
741 + chip_id = cpu_to_chip_id(event->cpu);
742 ++
743 ++ /* Return, if chip_id is not valid */
744 ++ if (chip_id < 0)
745 ++ return -ENODEV;
746 ++
747 + pcni = pmu->mem_info;
748 + do {
749 + if (pcni->id == chip_id) {
750 +@@ -503,7 +508,7 @@ static int nest_imc_event_init(struct perf_event *event)
751 + break;
752 + }
753 + pcni++;
754 +- } while (pcni);
755 ++ } while (pcni->vbase != 0);
756 +
757 + if (!flag)
758 + return -ENODEV;
759 +diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
760 +index 58a07948c76e..3d27f02695e4 100644
761 +--- a/arch/powerpc/platforms/powernv/opal-imc.c
762 ++++ b/arch/powerpc/platforms/powernv/opal-imc.c
763 +@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
764 + nr_chips))
765 + goto error;
766 +
767 +- pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
768 ++ pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
769 + GFP_KERNEL);
770 + if (!pmu_ptr->mem_info)
771 + goto error;
772 +diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
773 +index 5a286b012043..602e7cc26d11 100644
774 +--- a/arch/s390/kernel/kexec_elf.c
775 ++++ b/arch/s390/kernel/kexec_elf.c
776 +@@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
777 + struct kexec_buf buf;
778 + const Elf_Ehdr *ehdr;
779 + const Elf_Phdr *phdr;
780 ++ Elf_Addr entry;
781 + int i, ret;
782 +
783 + ehdr = (Elf_Ehdr *)kernel;
784 + buf.image = image;
785 ++ if (image->type == KEXEC_TYPE_CRASH)
786 ++ entry = STARTUP_KDUMP_OFFSET;
787 ++ else
788 ++ entry = ehdr->e_entry;
789 +
790 + phdr = (void *)ehdr + ehdr->e_phoff;
791 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
792 +@@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
793 + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
794 + buf.memsz = phdr->p_memsz;
795 +
796 +- if (phdr->p_paddr == 0) {
797 ++ if (entry - phdr->p_paddr < phdr->p_memsz) {
798 + data->kernel_buf = buf.buffer;
799 + data->memsz += STARTUP_NORMAL_OFFSET;
800 +
801 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
802 +index f2cc7da473e4..ae894ac83fd6 100644
803 +--- a/arch/s390/mm/pgtable.c
804 ++++ b/arch/s390/mm/pgtable.c
805 +@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
806 + return old;
807 + }
808 +
809 ++#ifdef CONFIG_PGSTE
810 + static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
811 + {
812 + pgd_t *pgd;
813 +@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
814 + pmd = pmd_alloc(mm, pud, addr);
815 + return pmd;
816 + }
817 ++#endif
818 +
819 + pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
820 + pmd_t *pmdp, pmd_t new)
821 +diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
822 +index 8f9bfbf3cdb1..d6cce65b4871 100644
823 +--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
824 ++++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
825 +@@ -132,7 +132,7 @@ enum {
826 +
827 + static inline u32 sh7786_mm_sel(void)
828 + {
829 +- return __raw_readl(0xFC400020) & 0x7;
830 ++ return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
831 + }
832 +
833 + #endif /* __CPU_SH7786_H__ */
834 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
835 +index c0c7291d4ccf..2cf52617a1e7 100644
836 +--- a/arch/x86/Makefile
837 ++++ b/arch/x86/Makefile
838 +@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
839 + export BITS
840 +
841 + ifdef CONFIG_X86_NEED_RELOCS
842 +- LDFLAGS_vmlinux := --emit-relocs
843 ++ LDFLAGS_vmlinux := --emit-relocs --discard-none
844 + endif
845 +
846 + #
847 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
848 +index 56194c571299..4a650eb3d94a 100644
849 +--- a/arch/x86/events/intel/cstate.c
850 ++++ b/arch/x86/events/intel/cstate.c
851 +@@ -584,6 +584,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
852 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
853 +
854 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
855 ++
856 ++ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
857 + { },
858 + };
859 + MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
860 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
861 +index 91039ffed633..2413169ce362 100644
862 +--- a/arch/x86/events/intel/rapl.c
863 ++++ b/arch/x86/events/intel/rapl.c
864 +@@ -780,6 +780,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
865 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
866 +
867 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
868 ++
869 ++ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
870 + {},
871 + };
872 +
873 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
874 +index 1b9f85abf9bc..ace6c1e752fb 100644
875 +--- a/arch/x86/events/msr.c
876 ++++ b/arch/x86/events/msr.c
877 +@@ -89,6 +89,7 @@ static bool test_intel(int idx)
878 + case INTEL_FAM6_SKYLAKE_X:
879 + case INTEL_FAM6_KABYLAKE_MOBILE:
880 + case INTEL_FAM6_KABYLAKE_DESKTOP:
881 ++ case INTEL_FAM6_ICELAKE_MOBILE:
882 + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
883 + return true;
884 + break;
885 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
886 +index 321fe5f5d0e9..4d5fcd47ab75 100644
887 +--- a/arch/x86/ia32/ia32_signal.c
888 ++++ b/arch/x86/ia32/ia32_signal.c
889 +@@ -61,9 +61,8 @@
890 + } while (0)
891 +
892 + #define RELOAD_SEG(seg) { \
893 +- unsigned int pre = GET_SEG(seg); \
894 ++ unsigned int pre = (seg) | 3; \
895 + unsigned int cur = get_user_seg(seg); \
896 +- pre |= 3; \
897 + if (pre != cur) \
898 + set_user_seg(seg, pre); \
899 + }
900 +@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
901 + struct sigcontext_32 __user *sc)
902 + {
903 + unsigned int tmpflags, err = 0;
904 ++ u16 gs, fs, es, ds;
905 + void __user *buf;
906 + u32 tmp;
907 +
908 +@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
909 + current->restart_block.fn = do_no_restart_syscall;
910 +
911 + get_user_try {
912 +- /*
913 +- * Reload fs and gs if they have changed in the signal
914 +- * handler. This does not handle long fs/gs base changes in
915 +- * the handler, but does not clobber them at least in the
916 +- * normal case.
917 +- */
918 +- RELOAD_SEG(gs);
919 +- RELOAD_SEG(fs);
920 +- RELOAD_SEG(ds);
921 +- RELOAD_SEG(es);
922 ++ gs = GET_SEG(gs);
923 ++ fs = GET_SEG(fs);
924 ++ ds = GET_SEG(ds);
925 ++ es = GET_SEG(es);
926 +
927 + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
928 + COPY(dx); COPY(cx); COPY(ip); COPY(ax);
929 +@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
930 + buf = compat_ptr(tmp);
931 + } get_user_catch(err);
932 +
933 ++ /*
934 ++ * Reload fs and gs if they have changed in the signal
935 ++ * handler. This does not handle long fs/gs base changes in
936 ++ * the handler, but does not clobber them at least in the
937 ++ * normal case.
938 ++ */
939 ++ RELOAD_SEG(gs);
940 ++ RELOAD_SEG(fs);
941 ++ RELOAD_SEG(ds);
942 ++ RELOAD_SEG(es);
943 ++
944 + err |= fpu__restore_sig(buf, 1);
945 +
946 + force_iret();
947 +diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
948 +index 05861cc08787..0bbb07eaed6b 100644
949 +--- a/arch/x86/include/asm/text-patching.h
950 ++++ b/arch/x86/include/asm/text-patching.h
951 +@@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
952 + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
953 + extern int after_bootmem;
954 +
955 ++#ifndef CONFIG_UML_X86
956 + static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
957 + {
958 + regs->ip = ip;
959 +@@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
960 + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
961 + int3_emulate_jmp(regs, func);
962 + }
963 +-#endif
964 ++#endif /* CONFIG_X86_64 */
965 ++#endif /* !CONFIG_UML_X86 */
966 +
967 + #endif /* _ASM_X86_TEXT_PATCHING_H */
968 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
969 +index f3aed639dccd..2b0dd1b9c208 100644
970 +--- a/arch/x86/include/asm/uaccess.h
971 ++++ b/arch/x86/include/asm/uaccess.h
972 +@@ -431,10 +431,11 @@ do { \
973 + ({ \
974 + __label__ __pu_label; \
975 + int __pu_err = -EFAULT; \
976 +- __typeof__(*(ptr)) __pu_val; \
977 +- __pu_val = x; \
978 ++ __typeof__(*(ptr)) __pu_val = (x); \
979 ++ __typeof__(ptr) __pu_ptr = (ptr); \
980 ++ __typeof__(size) __pu_size = (size); \
981 + __uaccess_begin(); \
982 +- __put_user_size(__pu_val, (ptr), (size), __pu_label); \
983 ++ __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
984 + __pu_err = 0; \
985 + __pu_label: \
986 + __uaccess_end(); \
987 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
988 +index ebeac487a20c..2db985513917 100644
989 +--- a/arch/x86/kernel/alternative.c
990 ++++ b/arch/x86/kernel/alternative.c
991 +@@ -666,15 +666,29 @@ void __init alternative_instructions(void)
992 + * handlers seeing an inconsistent instruction while you patch.
993 + */
994 + void *__init_or_module text_poke_early(void *addr, const void *opcode,
995 +- size_t len)
996 ++ size_t len)
997 + {
998 + unsigned long flags;
999 +- local_irq_save(flags);
1000 +- memcpy(addr, opcode, len);
1001 +- local_irq_restore(flags);
1002 +- sync_core();
1003 +- /* Could also do a CLFLUSH here to speed up CPU recovery; but
1004 +- that causes hangs on some VIA CPUs. */
1005 ++
1006 ++ if (boot_cpu_has(X86_FEATURE_NX) &&
1007 ++ is_module_text_address((unsigned long)addr)) {
1008 ++ /*
1009 ++ * Modules text is marked initially as non-executable, so the
1010 ++ * code cannot be running and speculative code-fetches are
1011 ++ * prevented. Just change the code.
1012 ++ */
1013 ++ memcpy(addr, opcode, len);
1014 ++ } else {
1015 ++ local_irq_save(flags);
1016 ++ memcpy(addr, opcode, len);
1017 ++ local_irq_restore(flags);
1018 ++ sync_core();
1019 ++
1020 ++ /*
1021 ++ * Could also do a CLFLUSH here to speed up CPU recovery; but
1022 ++ * that causes hangs on some VIA CPUs.
1023 ++ */
1024 ++ }
1025 + return addr;
1026 + }
1027 +
1028 +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
1029 +index cf25405444ab..415621ddb8a2 100644
1030 +--- a/arch/x86/kernel/cpu/hygon.c
1031 ++++ b/arch/x86/kernel/cpu/hygon.c
1032 +@@ -19,6 +19,8 @@
1033 +
1034 + #include "cpu.h"
1035 +
1036 ++#define APICID_SOCKET_ID_BIT 6
1037 ++
1038 + /*
1039 + * nodes_per_socket: Stores the number of nodes per socket.
1040 + * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
1041 +@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
1042 + if (!err)
1043 + c->x86_coreid_bits = get_count_order(c->x86_max_cores);
1044 +
1045 ++ /* Socket ID is ApicId[6] for these processors. */
1046 ++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
1047 ++
1048 + cacheinfo_hygon_init_llc_id(c, cpu, node_id);
1049 + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
1050 + u64 value;
1051 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1052 +index 1a7084ba9a3b..9e6a94c208e0 100644
1053 +--- a/arch/x86/kernel/cpu/mce/core.c
1054 ++++ b/arch/x86/kernel/cpu/mce/core.c
1055 +@@ -712,19 +712,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
1056 +
1057 + barrier();
1058 + m.status = mce_rdmsrl(msr_ops.status(i));
1059 ++
1060 ++ /* If this entry is not valid, ignore it */
1061 + if (!(m.status & MCI_STATUS_VAL))
1062 + continue;
1063 +
1064 + /*
1065 +- * Uncorrected or signalled events are handled by the exception
1066 +- * handler when it is enabled, so don't process those here.
1067 +- *
1068 +- * TBD do the same check for MCI_STATUS_EN here?
1069 ++ * If we are logging everything (at CPU online) or this
1070 ++ * is a corrected error, then we must log it.
1071 + */
1072 +- if (!(flags & MCP_UC) &&
1073 +- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
1074 +- continue;
1075 ++ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
1076 ++ goto log_it;
1077 ++
1078 ++ /*
1079 ++ * Newer Intel systems that support software error
1080 ++ * recovery need to make additional checks. Other
1081 ++ * CPUs should skip over uncorrected errors, but log
1082 ++ * everything else.
1083 ++ */
1084 ++ if (!mca_cfg.ser) {
1085 ++ if (m.status & MCI_STATUS_UC)
1086 ++ continue;
1087 ++ goto log_it;
1088 ++ }
1089 ++
1090 ++ /* Log "not enabled" (speculative) errors */
1091 ++ if (!(m.status & MCI_STATUS_EN))
1092 ++ goto log_it;
1093 ++
1094 ++ /*
1095 ++ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
1096 ++ * UC == 1 && PCC == 0 && S == 0
1097 ++ */
1098 ++ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
1099 ++ goto log_it;
1100 ++
1101 ++ /*
1102 ++ * Skip anything else. Presumption is that our read of this
1103 ++ * bank is racing with a machine check. Leave the log alone
1104 ++ * for do_machine_check() to deal with it.
1105 ++ */
1106 ++ continue;
1107 +
1108 ++log_it:
1109 + error_seen = true;
1110 +
1111 + mce_read_aux(&m, i);
1112 +@@ -1451,13 +1481,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1113 + static int __mcheck_cpu_mce_banks_init(void)
1114 + {
1115 + int i;
1116 +- u8 num_banks = mca_cfg.banks;
1117 +
1118 +- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
1119 ++ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
1120 + if (!mce_banks)
1121 + return -ENOMEM;
1122 +
1123 +- for (i = 0; i < num_banks; i++) {
1124 ++ for (i = 0; i < MAX_NR_BANKS; i++) {
1125 + struct mce_bank *b = &mce_banks[i];
1126 +
1127 + b->ctl = -1ULL;
1128 +@@ -1471,28 +1500,19 @@ static int __mcheck_cpu_mce_banks_init(void)
1129 + */
1130 + static int __mcheck_cpu_cap_init(void)
1131 + {
1132 +- unsigned b;
1133 + u64 cap;
1134 ++ u8 b;
1135 +
1136 + rdmsrl(MSR_IA32_MCG_CAP, cap);
1137 +
1138 + b = cap & MCG_BANKCNT_MASK;
1139 +- if (!mca_cfg.banks)
1140 +- pr_info("CPU supports %d MCE banks\n", b);
1141 +-
1142 +- if (b > MAX_NR_BANKS) {
1143 +- pr_warn("Using only %u machine check banks out of %u\n",
1144 +- MAX_NR_BANKS, b);
1145 ++ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
1146 + b = MAX_NR_BANKS;
1147 +- }
1148 +
1149 +- /* Don't support asymmetric configurations today */
1150 +- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1151 +- mca_cfg.banks = b;
1152 ++ mca_cfg.banks = max(mca_cfg.banks, b);
1153 +
1154 + if (!mce_banks) {
1155 + int err = __mcheck_cpu_mce_banks_init();
1156 +-
1157 + if (err)
1158 + return err;
1159 + }
1160 +@@ -2459,6 +2479,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
1161 +
1162 + static int __init mcheck_late_init(void)
1163 + {
1164 ++ pr_info("Using %d MCE banks\n", mca_cfg.banks);
1165 ++
1166 + if (mca_cfg.recovery)
1167 + static_branch_inc(&mcsafe_key);
1168 +
1169 +diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
1170 +index 8492ef7d9015..3f82afd0f46f 100644
1171 +--- a/arch/x86/kernel/cpu/mce/inject.c
1172 ++++ b/arch/x86/kernel/cpu/mce/inject.c
1173 +@@ -46,8 +46,6 @@
1174 + static struct mce i_mce;
1175 + static struct dentry *dfs_inj;
1176 +
1177 +-static u8 n_banks;
1178 +-
1179 + #define MAX_FLAG_OPT_SIZE 4
1180 + #define NBCFG 0x44
1181 +
1182 +@@ -570,9 +568,15 @@ err:
1183 + static int inj_bank_set(void *data, u64 val)
1184 + {
1185 + struct mce *m = (struct mce *)data;
1186 ++ u8 n_banks;
1187 ++ u64 cap;
1188 ++
1189 ++ /* Get bank count on target CPU so we can handle non-uniform values. */
1190 ++ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
1191 ++ n_banks = cap & MCG_BANKCNT_MASK;
1192 +
1193 + if (val >= n_banks) {
1194 +- pr_err("Non-existent MCE bank: %llu\n", val);
1195 ++ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
1196 + return -EINVAL;
1197 + }
1198 +
1199 +@@ -665,10 +669,6 @@ static struct dfs_node {
1200 + static int __init debugfs_init(void)
1201 + {
1202 + unsigned int i;
1203 +- u64 cap;
1204 +-
1205 +- rdmsrl(MSR_IA32_MCG_CAP, cap);
1206 +- n_banks = cap & MCG_BANKCNT_MASK;
1207 +
1208 + dfs_inj = debugfs_create_dir("mce-inject", NULL);
1209 + if (!dfs_inj)
1210 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
1211 +index 97f9ada9ceda..fc70d39b804f 100644
1212 +--- a/arch/x86/kernel/cpu/microcode/core.c
1213 ++++ b/arch/x86/kernel/cpu/microcode/core.c
1214 +@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
1215 + if (ustate == UCODE_ERROR) {
1216 + error = -1;
1217 + break;
1218 +- } else if (ustate == UCODE_OK)
1219 ++ } else if (ustate == UCODE_NEW) {
1220 + apply_microcode_on_target(cpu);
1221 ++ }
1222 + }
1223 +
1224 + return error;
1225 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1226 +index 2ee4b12a70e8..becb075954aa 100644
1227 +--- a/arch/x86/kernel/ftrace.c
1228 ++++ b/arch/x86/kernel/ftrace.c
1229 +@@ -748,6 +748,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1230 + unsigned long end_offset;
1231 + unsigned long op_offset;
1232 + unsigned long offset;
1233 ++ unsigned long npages;
1234 + unsigned long size;
1235 + unsigned long retq;
1236 + unsigned long *ptr;
1237 +@@ -780,6 +781,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1238 + return 0;
1239 +
1240 + *tramp_size = size + RET_SIZE + sizeof(void *);
1241 ++ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
1242 +
1243 + /* Copy ftrace_caller onto the trampoline memory */
1244 + ret = probe_kernel_read(trampoline, (void *)start_offset, size);
1245 +@@ -824,6 +826,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1246 + /* ALLOC_TRAMP flags lets us know we created it */
1247 + ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
1248 +
1249 ++ /*
1250 ++ * Module allocation needs to be completed by making the page
1251 ++ * executable. The page is still writable, which is a security hazard,
1252 ++ * but anyhow ftrace breaks W^X completely.
1253 ++ */
1254 ++ set_memory_x((unsigned long)trampoline, npages);
1255 + return (unsigned long)trampoline;
1256 + fail:
1257 + tramp_free(trampoline, *tramp_size);
1258 +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
1259 +index 0469cd078db1..b50ac9c7397b 100644
1260 +--- a/arch/x86/kernel/irq_64.c
1261 ++++ b/arch/x86/kernel/irq_64.c
1262 +@@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
1263 + /*
1264 + * Probabilistic stack overflow check:
1265 + *
1266 +- * Only check the stack in process context, because everything else
1267 +- * runs on the big interrupt stacks. Checking reliably is too expensive,
1268 +- * so we just check from interrupts.
1269 ++ * Regular device interrupts can enter on the following stacks:
1270 ++ *
1271 ++ * - User stack
1272 ++ *
1273 ++ * - Kernel task stack
1274 ++ *
1275 ++ * - Interrupt stack if a device driver reenables interrupts
1276 ++ * which should only happen in really old drivers.
1277 ++ *
1278 ++ * - Debug IST stack
1279 ++ *
1280 ++ * All other contexts are invalid.
1281 + */
1282 + static inline void stack_overflow_check(struct pt_regs *regs)
1283 + {
1284 +@@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
1285 + return;
1286 +
1287 + oist = this_cpu_ptr(&orig_ist);
1288 +- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
1289 +- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
1290 ++ estack_bottom = (u64)oist->ist[DEBUG_STACK];
1291 ++ estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
1292 + if (regs->sp >= estack_top && regs->sp <= estack_bottom)
1293 + return;
1294 +
1295 +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
1296 +index b052e883dd8c..cfa3106faee4 100644
1297 +--- a/arch/x86/kernel/module.c
1298 ++++ b/arch/x86/kernel/module.c
1299 +@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
1300 + p = __vmalloc_node_range(size, MODULE_ALIGN,
1301 + MODULES_VADDR + get_module_load_offset(),
1302 + MODULES_END, GFP_KERNEL,
1303 +- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
1304 ++ PAGE_KERNEL, 0, NUMA_NO_NODE,
1305 + __builtin_return_address(0));
1306 + if (p && (kasan_module_alloc(p, size) < 0)) {
1307 + vfree(p);
1308 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1309 +index 08dfd4c1a4f9..c8aa58a2bab9 100644
1310 +--- a/arch/x86/kernel/signal.c
1311 ++++ b/arch/x86/kernel/signal.c
1312 +@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
1313 + COPY_SEG_CPL3(cs);
1314 + COPY_SEG_CPL3(ss);
1315 +
1316 +-#ifdef CONFIG_X86_64
1317 +- /*
1318 +- * Fix up SS if needed for the benefit of old DOSEMU and
1319 +- * CRIU.
1320 +- */
1321 +- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
1322 +- user_64bit_mode(regs)))
1323 +- force_valid_ss(regs);
1324 +-#endif
1325 +-
1326 + get_user_ex(tmpflags, &sc->flags);
1327 + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
1328 + regs->orig_ax = -1; /* disable syscall checks */
1329 +@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
1330 + buf = (void __user *)buf_val;
1331 + } get_user_catch(err);
1332 +
1333 ++#ifdef CONFIG_X86_64
1334 ++ /*
1335 ++ * Fix up SS if needed for the benefit of old DOSEMU and
1336 ++ * CRIU.
1337 ++ */
1338 ++ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
1339 ++ force_valid_ss(regs);
1340 ++#endif
1341 ++
1342 + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
1343 +
1344 + force_iret();
1345 +@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1346 + {
1347 + struct rt_sigframe __user *frame;
1348 + void __user *fp = NULL;
1349 ++ unsigned long uc_flags;
1350 + int err = 0;
1351 +
1352 + frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
1353 +@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1354 + return -EFAULT;
1355 + }
1356 +
1357 ++ uc_flags = frame_uc_flags(regs);
1358 ++
1359 + put_user_try {
1360 + /* Create the ucontext. */
1361 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1362 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1363 + put_user_ex(0, &frame->uc.uc_link);
1364 + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1365 +
1366 +@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1367 + {
1368 + #ifdef CONFIG_X86_X32_ABI
1369 + struct rt_sigframe_x32 __user *frame;
1370 ++ unsigned long uc_flags;
1371 + void __user *restorer;
1372 + int err = 0;
1373 + void __user *fpstate = NULL;
1374 +@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1375 + return -EFAULT;
1376 + }
1377 +
1378 ++ uc_flags = frame_uc_flags(regs);
1379 ++
1380 + put_user_try {
1381 + /* Create the ucontext. */
1382 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1383 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1384 + put_user_ex(0, &frame->uc.uc_link);
1385 + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1386 + put_user_ex(0, &frame->uc.uc__pad0);
1387 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1388 +index c45214c44e61..5cbce783d4d1 100644
1389 +--- a/arch/x86/kernel/vmlinux.lds.S
1390 ++++ b/arch/x86/kernel/vmlinux.lds.S
1391 +@@ -141,11 +141,11 @@ SECTIONS
1392 + *(.text.__x86.indirect_thunk)
1393 + __indirect_thunk_end = .;
1394 + #endif
1395 +-
1396 +- /* End of text section */
1397 +- _etext = .;
1398 + } :text = 0x9090
1399 +
1400 ++ /* End of text section */
1401 ++ _etext = .;
1402 ++
1403 + NOTES :text :note
1404 +
1405 + EXCEPTION_TABLE(16) :text = 0x9090
1406 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1407 +index 2a07e43ee666..847db4bd1dc5 100644
1408 +--- a/arch/x86/kvm/svm.c
1409 ++++ b/arch/x86/kvm/svm.c
1410 +@@ -2020,7 +2020,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1411 + if (!kvm_vcpu_apicv_active(vcpu))
1412 + return;
1413 +
1414 +- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1415 ++ /*
1416 ++ * Since the host physical APIC id is 8 bits,
1417 ++ * we can support host APIC ID upto 255.
1418 ++ */
1419 ++ if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1420 + return;
1421 +
1422 + entry = READ_ONCE(*(svm->avic_physical_id_cache));
1423 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1424 +index 0bbb21a49082..03b5c5803b5c 100644
1425 +--- a/arch/x86/kvm/x86.c
1426 ++++ b/arch/x86/kvm/x86.c
1427 +@@ -1288,7 +1288,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1428 + u64 efer = msr_info->data;
1429 +
1430 + if (efer & efer_reserved_bits)
1431 +- return false;
1432 ++ return 1;
1433 +
1434 + if (!msr_info->host_initiated) {
1435 + if (!__kvm_valid_efer(vcpu, efer))
1436 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
1437 +index 3b24dc05251c..9d05572370ed 100644
1438 +--- a/arch/x86/lib/memcpy_64.S
1439 ++++ b/arch/x86/lib/memcpy_64.S
1440 +@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
1441 + /* Copy successful. Return zero */
1442 + .L_done_memcpy_trap:
1443 + xorl %eax, %eax
1444 ++.L_done:
1445 + ret
1446 + ENDPROC(__memcpy_mcsafe)
1447 + EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1448 +@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1449 + addl %edx, %ecx
1450 + .E_trailing_bytes:
1451 + mov %ecx, %eax
1452 +- ret
1453 ++ jmp .L_done
1454 +
1455 + /*
1456 + * For write fault handling, given the destination is unaligned,
1457 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1458 +index 9d5c75f02295..55233dec5ff4 100644
1459 +--- a/arch/x86/mm/fault.c
1460 ++++ b/arch/x86/mm/fault.c
1461 +@@ -359,8 +359,6 @@ static noinline int vmalloc_fault(unsigned long address)
1462 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
1463 + return -1;
1464 +
1465 +- WARN_ON_ONCE(in_nmi());
1466 +-
1467 + /*
1468 + * Copy kernel mappings over when needed. This can also
1469 + * happen within a race in page table update. In the later
1470 +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
1471 +index 0c98b6c1ca49..1213556a20da 100644
1472 +--- a/block/blk-mq-sched.c
1473 ++++ b/block/blk-mq-sched.c
1474 +@@ -413,6 +413,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1475 + struct list_head *list, bool run_queue_async)
1476 + {
1477 + struct elevator_queue *e;
1478 ++ struct request_queue *q = hctx->queue;
1479 ++
1480 ++ /*
1481 ++ * blk_mq_sched_insert_requests() is called from flush plug
1482 ++ * context only, and hold one usage counter to prevent queue
1483 ++ * from being released.
1484 ++ */
1485 ++ percpu_ref_get(&q->q_usage_counter);
1486 +
1487 + e = hctx->queue->elevator;
1488 + if (e && e->type->ops.insert_requests)
1489 +@@ -426,12 +434,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1490 + if (!hctx->dispatch_busy && !e && !run_queue_async) {
1491 + blk_mq_try_issue_list_directly(hctx, list);
1492 + if (list_empty(list))
1493 +- return;
1494 ++ goto out;
1495 + }
1496 + blk_mq_insert_requests(hctx, ctx, list);
1497 + }
1498 +
1499 + blk_mq_run_hw_queue(hctx, run_queue_async);
1500 ++ out:
1501 ++ percpu_ref_put(&q->q_usage_counter);
1502 + }
1503 +
1504 + static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
1505 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1506 +index 9957e0fc17fc..27526095319c 100644
1507 +--- a/block/blk-mq.c
1508 ++++ b/block/blk-mq.c
1509 +@@ -2287,15 +2287,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
1510 + }
1511 + }
1512 +
1513 ++static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
1514 ++{
1515 ++ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
1516 ++
1517 ++ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
1518 ++ __alignof__(struct blk_mq_hw_ctx)) !=
1519 ++ sizeof(struct blk_mq_hw_ctx));
1520 ++
1521 ++ if (tag_set->flags & BLK_MQ_F_BLOCKING)
1522 ++ hw_ctx_size += sizeof(struct srcu_struct);
1523 ++
1524 ++ return hw_ctx_size;
1525 ++}
1526 ++
1527 + static int blk_mq_init_hctx(struct request_queue *q,
1528 + struct blk_mq_tag_set *set,
1529 + struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1530 + {
1531 +- int node;
1532 ++ hctx->queue_num = hctx_idx;
1533 ++
1534 ++ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1535 ++
1536 ++ hctx->tags = set->tags[hctx_idx];
1537 ++
1538 ++ if (set->ops->init_hctx &&
1539 ++ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1540 ++ goto unregister_cpu_notifier;
1541 +
1542 +- node = hctx->numa_node;
1543 ++ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
1544 ++ hctx->numa_node))
1545 ++ goto exit_hctx;
1546 ++ return 0;
1547 ++
1548 ++ exit_hctx:
1549 ++ if (set->ops->exit_hctx)
1550 ++ set->ops->exit_hctx(hctx, hctx_idx);
1551 ++ unregister_cpu_notifier:
1552 ++ blk_mq_remove_cpuhp(hctx);
1553 ++ return -1;
1554 ++}
1555 ++
1556 ++static struct blk_mq_hw_ctx *
1557 ++blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
1558 ++ int node)
1559 ++{
1560 ++ struct blk_mq_hw_ctx *hctx;
1561 ++ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
1562 ++
1563 ++ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
1564 ++ if (!hctx)
1565 ++ goto fail_alloc_hctx;
1566 ++
1567 ++ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
1568 ++ goto free_hctx;
1569 ++
1570 ++ atomic_set(&hctx->nr_active, 0);
1571 + if (node == NUMA_NO_NODE)
1572 +- node = hctx->numa_node = set->numa_node;
1573 ++ node = set->numa_node;
1574 ++ hctx->numa_node = node;
1575 +
1576 + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1577 + spin_lock_init(&hctx->lock);
1578 +@@ -2303,58 +2353,45 @@ static int blk_mq_init_hctx(struct request_queue *q,
1579 + hctx->queue = q;
1580 + hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1581 +
1582 +- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1583 +-
1584 +- hctx->tags = set->tags[hctx_idx];
1585 +-
1586 + /*
1587 + * Allocate space for all possible cpus to avoid allocation at
1588 + * runtime
1589 + */
1590 + hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
1591 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
1592 ++ gfp, node);
1593 + if (!hctx->ctxs)
1594 +- goto unregister_cpu_notifier;
1595 ++ goto free_cpumask;
1596 +
1597 + if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
1598 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
1599 ++ gfp, node))
1600 + goto free_ctxs;
1601 +-
1602 + hctx->nr_ctx = 0;
1603 +
1604 + spin_lock_init(&hctx->dispatch_wait_lock);
1605 + init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
1606 + INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
1607 +
1608 +- if (set->ops->init_hctx &&
1609 +- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1610 +- goto free_bitmap;
1611 +-
1612 + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
1613 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
1614 ++ gfp);
1615 + if (!hctx->fq)
1616 +- goto exit_hctx;
1617 +-
1618 +- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
1619 +- goto free_fq;
1620 ++ goto free_bitmap;
1621 +
1622 + if (hctx->flags & BLK_MQ_F_BLOCKING)
1623 + init_srcu_struct(hctx->srcu);
1624 ++ blk_mq_hctx_kobj_init(hctx);
1625 +
1626 +- return 0;
1627 ++ return hctx;
1628 +
1629 +- free_fq:
1630 +- blk_free_flush_queue(hctx->fq);
1631 +- exit_hctx:
1632 +- if (set->ops->exit_hctx)
1633 +- set->ops->exit_hctx(hctx, hctx_idx);
1634 + free_bitmap:
1635 + sbitmap_free(&hctx->ctx_map);
1636 + free_ctxs:
1637 + kfree(hctx->ctxs);
1638 +- unregister_cpu_notifier:
1639 +- blk_mq_remove_cpuhp(hctx);
1640 +- return -1;
1641 ++ free_cpumask:
1642 ++ free_cpumask_var(hctx->cpumask);
1643 ++ free_hctx:
1644 ++ kfree(hctx);
1645 ++ fail_alloc_hctx:
1646 ++ return NULL;
1647 + }
1648 +
1649 + static void blk_mq_init_cpu_queues(struct request_queue *q,
1650 +@@ -2691,51 +2728,25 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
1651 + }
1652 + EXPORT_SYMBOL(blk_mq_init_sq_queue);
1653 +
1654 +-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
1655 +-{
1656 +- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
1657 +-
1658 +- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
1659 +- __alignof__(struct blk_mq_hw_ctx)) !=
1660 +- sizeof(struct blk_mq_hw_ctx));
1661 +-
1662 +- if (tag_set->flags & BLK_MQ_F_BLOCKING)
1663 +- hw_ctx_size += sizeof(struct srcu_struct);
1664 +-
1665 +- return hw_ctx_size;
1666 +-}
1667 +-
1668 + static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
1669 + struct blk_mq_tag_set *set, struct request_queue *q,
1670 + int hctx_idx, int node)
1671 + {
1672 + struct blk_mq_hw_ctx *hctx;
1673 +
1674 +- hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
1675 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1676 +- node);
1677 ++ hctx = blk_mq_alloc_hctx(q, set, node);
1678 + if (!hctx)
1679 +- return NULL;
1680 +-
1681 +- if (!zalloc_cpumask_var_node(&hctx->cpumask,
1682 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1683 +- node)) {
1684 +- kfree(hctx);
1685 +- return NULL;
1686 +- }
1687 +-
1688 +- atomic_set(&hctx->nr_active, 0);
1689 +- hctx->numa_node = node;
1690 +- hctx->queue_num = hctx_idx;
1691 ++ goto fail;
1692 +
1693 +- if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
1694 +- free_cpumask_var(hctx->cpumask);
1695 +- kfree(hctx);
1696 +- return NULL;
1697 +- }
1698 +- blk_mq_hctx_kobj_init(hctx);
1699 ++ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
1700 ++ goto free_hctx;
1701 +
1702 + return hctx;
1703 ++
1704 ++ free_hctx:
1705 ++ kobject_put(&hctx->kobj);
1706 ++ fail:
1707 ++ return NULL;
1708 + }
1709 +
1710 + static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1711 +diff --git a/block/blk.h b/block/blk.h
1712 +index 848278c52030..a57bc90e44bb 100644
1713 +--- a/block/blk.h
1714 ++++ b/block/blk.h
1715 +@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
1716 +
1717 + if (addr1 + vec1->bv_len != addr2)
1718 + return false;
1719 +- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
1720 ++ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
1721 + return false;
1722 + if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
1723 + return false;
1724 +diff --git a/block/genhd.c b/block/genhd.c
1725 +index 1dd8fd6613b8..ef28a5126d21 100644
1726 +--- a/block/genhd.c
1727 ++++ b/block/genhd.c
1728 +@@ -531,6 +531,18 @@ void blk_free_devt(dev_t devt)
1729 + }
1730 + }
1731 +
1732 ++/**
1733 ++ * We invalidate devt by assigning NULL pointer for devt in idr.
1734 ++ */
1735 ++void blk_invalidate_devt(dev_t devt)
1736 ++{
1737 ++ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1738 ++ spin_lock_bh(&ext_devt_lock);
1739 ++ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
1740 ++ spin_unlock_bh(&ext_devt_lock);
1741 ++ }
1742 ++}
1743 ++
1744 + static char *bdevt_str(dev_t devt, char *buf)
1745 + {
1746 + if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
1747 +@@ -791,6 +803,13 @@ void del_gendisk(struct gendisk *disk)
1748 +
1749 + if (!(disk->flags & GENHD_FL_HIDDEN))
1750 + blk_unregister_region(disk_devt(disk), disk->minors);
1751 ++ /*
1752 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1753 ++ * while RCU period before freeing gendisk is running to prevent
1754 ++ * use-after-free issues. Note that the device number stays
1755 ++ * "in-use" until we really free the gendisk.
1756 ++ */
1757 ++ blk_invalidate_devt(disk_devt(disk));
1758 +
1759 + kobject_put(disk->part0.holder_dir);
1760 + kobject_put(disk->slave_dir);
1761 +diff --git a/block/partition-generic.c b/block/partition-generic.c
1762 +index 8e596a8dff32..aee643ce13d1 100644
1763 +--- a/block/partition-generic.c
1764 ++++ b/block/partition-generic.c
1765 +@@ -285,6 +285,13 @@ void delete_partition(struct gendisk *disk, int partno)
1766 + kobject_put(part->holder_dir);
1767 + device_del(part_to_dev(part));
1768 +
1769 ++ /*
1770 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1771 ++ * while RCU period before freeing gendisk is running to prevent
1772 ++ * use-after-free issues. Note that the device number stays
1773 ++ * "in-use" until we really free the gendisk.
1774 ++ */
1775 ++ blk_invalidate_devt(part_devt(part));
1776 + hd_struct_kill(part);
1777 + }
1778 +
1779 +diff --git a/block/sed-opal.c b/block/sed-opal.c
1780 +index e0de4dd448b3..119640897293 100644
1781 +--- a/block/sed-opal.c
1782 ++++ b/block/sed-opal.c
1783 +@@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
1784 + static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
1785 + struct opal_mbr_data *opal_mbr)
1786 + {
1787 ++ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
1788 ++ OPAL_TRUE : OPAL_FALSE;
1789 ++
1790 + const struct opal_step mbr_steps[] = {
1791 + { opal_discovery0, },
1792 + { start_admin1LSP_opal_session, &opal_mbr->key },
1793 +- { set_mbr_done, &opal_mbr->enable_disable },
1794 ++ { set_mbr_done, &enable_disable },
1795 + { end_opal_session, },
1796 + { start_admin1LSP_opal_session, &opal_mbr->key },
1797 +- { set_mbr_enable_disable, &opal_mbr->enable_disable },
1798 ++ { set_mbr_enable_disable, &enable_disable },
1799 + { end_opal_session, },
1800 + { NULL, }
1801 + };
1802 +@@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
1803 +
1804 + static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
1805 + {
1806 +- u8 mbr_done_tf = 1;
1807 ++ u8 mbr_done_tf = OPAL_TRUE;
1808 + const struct opal_step mbrdone_step [] = {
1809 + { opal_discovery0, },
1810 + { start_admin1LSP_opal_session, key },
1811 +diff --git a/crypto/hmac.c b/crypto/hmac.c
1812 +index e74730224f0a..4b8c8ee8f15c 100644
1813 +--- a/crypto/hmac.c
1814 ++++ b/crypto/hmac.c
1815 +@@ -168,6 +168,8 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
1816 +
1817 + parent->descsize = sizeof(struct shash_desc) +
1818 + crypto_shash_descsize(hash);
1819 ++ if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE))
1820 ++ return -EINVAL;
1821 +
1822 + ctx->hash = hash;
1823 + return 0;
1824 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
1825 +index e48894e002ba..a46c2c162c03 100644
1826 +--- a/drivers/acpi/arm64/iort.c
1827 ++++ b/drivers/acpi/arm64/iort.c
1828 +@@ -1232,18 +1232,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1829 + /*
1830 + * set numa proximity domain for smmuv3 device
1831 + */
1832 +-static void __init arm_smmu_v3_set_proximity(struct device *dev,
1833 ++static int __init arm_smmu_v3_set_proximity(struct device *dev,
1834 + struct acpi_iort_node *node)
1835 + {
1836 + struct acpi_iort_smmu_v3 *smmu;
1837 +
1838 + smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1839 + if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1840 +- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1841 ++ int node = acpi_map_pxm_to_node(smmu->pxm);
1842 ++
1843 ++ if (node != NUMA_NO_NODE && !node_online(node))
1844 ++ return -EINVAL;
1845 ++
1846 ++ set_dev_node(dev, node);
1847 + pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1848 + smmu->base_address,
1849 + smmu->pxm);
1850 + }
1851 ++ return 0;
1852 + }
1853 + #else
1854 + #define arm_smmu_v3_set_proximity NULL
1855 +@@ -1318,7 +1324,7 @@ struct iort_dev_config {
1856 + int (*dev_count_resources)(struct acpi_iort_node *node);
1857 + void (*dev_init_resources)(struct resource *res,
1858 + struct acpi_iort_node *node);
1859 +- void (*dev_set_proximity)(struct device *dev,
1860 ++ int (*dev_set_proximity)(struct device *dev,
1861 + struct acpi_iort_node *node);
1862 + };
1863 +
1864 +@@ -1369,8 +1375,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
1865 + if (!pdev)
1866 + return -ENOMEM;
1867 +
1868 +- if (ops->dev_set_proximity)
1869 +- ops->dev_set_proximity(&pdev->dev, node);
1870 ++ if (ops->dev_set_proximity) {
1871 ++ ret = ops->dev_set_proximity(&pdev->dev, node);
1872 ++ if (ret)
1873 ++ goto dev_put;
1874 ++ }
1875 +
1876 + count = ops->dev_count_resources(node);
1877 +
1878 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
1879 +index 77abe0ec4043..bd533f68b1de 100644
1880 +--- a/drivers/acpi/property.c
1881 ++++ b/drivers/acpi/property.c
1882 +@@ -1031,6 +1031,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
1883 + const struct acpi_data_node *data = to_acpi_data_node(fwnode);
1884 + struct acpi_data_node *dn;
1885 +
1886 ++ /*
1887 ++ * We can have a combination of device and data nodes, e.g. with
1888 ++ * hierarchical _DSD properties. Make sure the adev pointer is
1889 ++ * restored before going through data nodes, otherwise we will
1890 ++ * be looking for data_nodes below the last device found instead
1891 ++ * of the common fwnode shared by device_nodes and data_nodes.
1892 ++ */
1893 ++ adev = to_acpi_device_node(fwnode);
1894 + if (adev)
1895 + head = &adev->data.subnodes;
1896 + else if (data)
1897 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
1898 +index 0992e67e862b..7900debc5ce4 100644
1899 +--- a/drivers/base/power/main.c
1900 ++++ b/drivers/base/power/main.c
1901 +@@ -1738,6 +1738,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1902 + if (dev->power.syscore)
1903 + goto Complete;
1904 +
1905 ++ /* Avoid direct_complete to let wakeup_path propagate. */
1906 ++ if (device_may_wakeup(dev) || dev->power.wakeup_path)
1907 ++ dev->power.direct_complete = false;
1908 ++
1909 + if (dev->power.direct_complete) {
1910 + if (pm_runtime_status_suspended(dev)) {
1911 + pm_runtime_disable(dev);
1912 +diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
1913 +index d5d6e6e5da3b..62d3aa2b26f6 100644
1914 +--- a/drivers/bluetooth/btbcm.c
1915 ++++ b/drivers/bluetooth/btbcm.c
1916 +@@ -37,6 +37,7 @@
1917 + #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
1918 + #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
1919 + #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
1920 ++#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
1921 +
1922 + int btbcm_check_bdaddr(struct hci_dev *hdev)
1923 + {
1924 +@@ -82,7 +83,8 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
1925 + !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
1926 + !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
1927 + !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
1928 +- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
1929 ++ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
1930 ++ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
1931 + bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
1932 + &bda->bdaddr);
1933 + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
1934 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
1935 +index f036c8f98ea3..97bc17670b7a 100644
1936 +--- a/drivers/bluetooth/hci_qca.c
1937 ++++ b/drivers/bluetooth/hci_qca.c
1938 +@@ -508,6 +508,8 @@ static int qca_open(struct hci_uart *hu)
1939 + qcadev = serdev_device_get_drvdata(hu->serdev);
1940 + if (qcadev->btsoc_type != QCA_WCN3990) {
1941 + gpiod_set_value_cansleep(qcadev->bt_en, 1);
1942 ++ /* Controller needs time to bootup. */
1943 ++ msleep(150);
1944 + } else {
1945 + hu->init_speed = qcadev->init_speed;
1946 + hu->oper_speed = qcadev->oper_speed;
1947 +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
1948 +index b65ff6962899..e9b6ac61fb7f 100644
1949 +--- a/drivers/char/hw_random/omap-rng.c
1950 ++++ b/drivers/char/hw_random/omap-rng.c
1951 +@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
1952 + priv->rng.read = omap_rng_do_read;
1953 + priv->rng.init = omap_rng_init;
1954 + priv->rng.cleanup = omap_rng_cleanup;
1955 ++ priv->rng.quality = 900;
1956 +
1957 + priv->rng.priv = (unsigned long)priv;
1958 + platform_set_drvdata(pdev, priv);
1959 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1960 +index 38c6d1af6d1c..af6e240f98ff 100644
1961 +--- a/drivers/char/random.c
1962 ++++ b/drivers/char/random.c
1963 +@@ -777,6 +777,7 @@ static struct crng_state **crng_node_pool __read_mostly;
1964 + #endif
1965 +
1966 + static void invalidate_batched_entropy(void);
1967 ++static void numa_crng_init(void);
1968 +
1969 + static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
1970 + static int __init parse_trust_cpu(char *arg)
1971 +@@ -805,7 +806,9 @@ static void crng_initialize(struct crng_state *crng)
1972 + }
1973 + crng->state[i] ^= rv;
1974 + }
1975 +- if (trust_cpu && arch_init) {
1976 ++ if (trust_cpu && arch_init && crng == &primary_crng) {
1977 ++ invalidate_batched_entropy();
1978 ++ numa_crng_init();
1979 + crng_init = 2;
1980 + pr_notice("random: crng done (trusting CPU's manufacturer)\n");
1981 + }
1982 +@@ -2211,8 +2214,8 @@ struct batched_entropy {
1983 + u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
1984 + };
1985 + unsigned int position;
1986 ++ spinlock_t batch_lock;
1987 + };
1988 +-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
1989 +
1990 + /*
1991 + * Get a random word for internal kernel use only. The quality of the random
1992 +@@ -2222,12 +2225,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
1993 + * wait_for_random_bytes() should be called and return 0 at least once
1994 + * at any point prior.
1995 + */
1996 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
1997 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
1998 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
1999 ++};
2000 ++
2001 + u64 get_random_u64(void)
2002 + {
2003 + u64 ret;
2004 +- bool use_lock;
2005 +- unsigned long flags = 0;
2006 ++ unsigned long flags;
2007 + struct batched_entropy *batch;
2008 + static void *previous;
2009 +
2010 +@@ -2242,28 +2247,25 @@ u64 get_random_u64(void)
2011 +
2012 + warn_unseeded_randomness(&previous);
2013 +
2014 +- use_lock = READ_ONCE(crng_init) < 2;
2015 +- batch = &get_cpu_var(batched_entropy_u64);
2016 +- if (use_lock)
2017 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
2018 ++ batch = raw_cpu_ptr(&batched_entropy_u64);
2019 ++ spin_lock_irqsave(&batch->batch_lock, flags);
2020 + if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2021 + extract_crng((u8 *)batch->entropy_u64);
2022 + batch->position = 0;
2023 + }
2024 + ret = batch->entropy_u64[batch->position++];
2025 +- if (use_lock)
2026 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2027 +- put_cpu_var(batched_entropy_u64);
2028 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
2029 + return ret;
2030 + }
2031 + EXPORT_SYMBOL(get_random_u64);
2032 +
2033 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2034 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2035 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2036 ++};
2037 + u32 get_random_u32(void)
2038 + {
2039 + u32 ret;
2040 +- bool use_lock;
2041 +- unsigned long flags = 0;
2042 ++ unsigned long flags;
2043 + struct batched_entropy *batch;
2044 + static void *previous;
2045 +
2046 +@@ -2272,18 +2274,14 @@ u32 get_random_u32(void)
2047 +
2048 + warn_unseeded_randomness(&previous);
2049 +
2050 +- use_lock = READ_ONCE(crng_init) < 2;
2051 +- batch = &get_cpu_var(batched_entropy_u32);
2052 +- if (use_lock)
2053 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
2054 ++ batch = raw_cpu_ptr(&batched_entropy_u32);
2055 ++ spin_lock_irqsave(&batch->batch_lock, flags);
2056 + if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2057 + extract_crng((u8 *)batch->entropy_u32);
2058 + batch->position = 0;
2059 + }
2060 + ret = batch->entropy_u32[batch->position++];
2061 +- if (use_lock)
2062 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2063 +- put_cpu_var(batched_entropy_u32);
2064 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
2065 + return ret;
2066 + }
2067 + EXPORT_SYMBOL(get_random_u32);
2068 +@@ -2297,12 +2295,19 @@ static void invalidate_batched_entropy(void)
2069 + int cpu;
2070 + unsigned long flags;
2071 +
2072 +- write_lock_irqsave(&batched_entropy_reset_lock, flags);
2073 + for_each_possible_cpu (cpu) {
2074 +- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2075 +- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2076 ++ struct batched_entropy *batched_entropy;
2077 ++
2078 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2079 ++ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2080 ++ batched_entropy->position = 0;
2081 ++ spin_unlock(&batched_entropy->batch_lock);
2082 ++
2083 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2084 ++ spin_lock(&batched_entropy->batch_lock);
2085 ++ batched_entropy->position = 0;
2086 ++ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2087 + }
2088 +- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2089 + }
2090 +
2091 + /**
2092 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
2093 +index fbeb71953526..05dbfdb9f4af 100644
2094 +--- a/drivers/char/virtio_console.c
2095 ++++ b/drivers/char/virtio_console.c
2096 +@@ -75,7 +75,7 @@ struct ports_driver_data {
2097 + /* All the console devices handled by this driver */
2098 + struct list_head consoles;
2099 + };
2100 +-static struct ports_driver_data pdrvdata;
2101 ++static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
2102 +
2103 + static DEFINE_SPINLOCK(pdrvdata_lock);
2104 + static DECLARE_COMPLETION(early_console_added);
2105 +@@ -1394,6 +1394,7 @@ static int add_port(struct ports_device *portdev, u32 id)
2106 + port->async_queue = NULL;
2107 +
2108 + port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
2109 ++ port->cons.vtermno = 0;
2110 +
2111 + port->host_connected = port->guest_connected = false;
2112 + port->stats = (struct port_stats) { 0 };
2113 +diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2114 +index 10e852518870..904d4d4ebcad 100644
2115 +--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2116 ++++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2117 +@@ -122,8 +122,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
2118 + DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
2119 + DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
2120 + DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
2121 +- DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
2122 +- DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
2123 ++ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1),
2124 ++ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1),
2125 + DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
2126 + DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
2127 + DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
2128 +@@ -142,8 +142,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
2129 + DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
2130 + DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
2131 + DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
2132 +- DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
2133 +- DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
2134 ++ DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2),
2135 ++ DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2),
2136 + DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
2137 + DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
2138 + DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
2139 +diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2140 +index 10b96895d452..4a0525425c16 100644
2141 +--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2142 ++++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2143 +@@ -149,7 +149,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
2144 + DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
2145 + DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
2146 +
2147 +- DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
2148 ++ DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2),
2149 + DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
2150 + DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
2151 + DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
2152 +diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
2153 +index 86842c9fd314..0825cd0ff286 100644
2154 +--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
2155 ++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
2156 +@@ -129,8 +129,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
2157 + DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
2158 + DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
2159 + DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
2160 +- DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
2161 +- DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
2162 ++ DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
2163 ++ DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
2164 + DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
2165 + DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
2166 + DEF_MOD("cmt3", 300, R8A7795_CLK_R),
2167 +@@ -153,8 +153,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
2168 + DEF_MOD("rwdt", 402, R8A7795_CLK_R),
2169 + DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
2170 + DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
2171 +- DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
2172 +- DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
2173 ++ DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2),
2174 ++ DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2),
2175 + DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
2176 + DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
2177 + DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
2178 +diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
2179 +index 12c455859f2c..997cd956f12b 100644
2180 +--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
2181 ++++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
2182 +@@ -126,8 +126,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
2183 + DEF_MOD("msiof2", 209, R8A7796_CLK_MSO),
2184 + DEF_MOD("msiof1", 210, R8A7796_CLK_MSO),
2185 + DEF_MOD("msiof0", 211, R8A7796_CLK_MSO),
2186 +- DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
2187 +- DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
2188 ++ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
2189 ++ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
2190 + DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
2191 + DEF_MOD("cmt3", 300, R8A7796_CLK_R),
2192 + DEF_MOD("cmt2", 301, R8A7796_CLK_R),
2193 +@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
2194 + DEF_MOD("rwdt", 402, R8A7796_CLK_R),
2195 + DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
2196 + DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
2197 +- DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
2198 +- DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
2199 ++ DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2),
2200 ++ DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2),
2201 + DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
2202 + DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
2203 + DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
2204 +diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
2205 +index eb1cca58a1e1..afc9c72fa094 100644
2206 +--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
2207 ++++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
2208 +@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
2209 + DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
2210 + DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
2211 + DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
2212 +- DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
2213 +- DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
2214 ++ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
2215 ++ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
2216 + DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
2217 +
2218 + DEF_MOD("cmt3", 300, R8A77965_CLK_R),
2219 +@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
2220 + DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
2221 + DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
2222 +
2223 +- DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
2224 +- DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
2225 ++ DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2),
2226 ++ DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2),
2227 + DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
2228 + DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
2229 + DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
2230 +diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
2231 +index 9a278c75c918..03f445d47ef6 100644
2232 +--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
2233 ++++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
2234 +@@ -152,7 +152,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
2235 + DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
2236 + DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
2237 +
2238 +- DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
2239 ++ DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2),
2240 + DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
2241 + DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
2242 + DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
2243 +diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
2244 +index eee3874865a9..68707277b17b 100644
2245 +--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
2246 ++++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
2247 +@@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
2248 + DEF_MOD("rwdt", 402, R8A77995_CLK_R),
2249 + DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
2250 + DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
2251 +- DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
2252 ++ DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2),
2253 + DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
2254 + DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
2255 + DEF_MOD("thermal", 522, R8A77995_CLK_CP),
2256 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
2257 +index 5a67b7869960..355d6a3611db 100644
2258 +--- a/drivers/clk/rockchip/clk-rk3288.c
2259 ++++ b/drivers/clk/rockchip/clk-rk3288.c
2260 +@@ -219,7 +219,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
2261 + PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
2262 + PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
2263 +
2264 +-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
2265 ++PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
2266 + PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
2267 + "sclk_otgphy0_480m" };
2268 + PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
2269 +@@ -313,13 +313,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2270 + COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
2271 + RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
2272 + RK3288_CLKGATE_CON(12), 6, GFLAGS),
2273 +- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
2274 ++ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
2275 + RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
2276 + RK3288_CLKGATE_CON(12), 7, GFLAGS),
2277 + COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
2278 + RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
2279 + RK3288_CLKGATE_CON(12), 8, GFLAGS),
2280 +- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
2281 ++ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
2282 + RK3288_CLKGATE_CON(12), 9, GFLAGS),
2283 + GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
2284 + RK3288_CLKGATE_CON(12), 10, GFLAGS),
2285 +@@ -420,7 +420,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2286 + COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
2287 + RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
2288 + RK3288_CLKGATE_CON(3), 11, GFLAGS),
2289 +- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
2290 ++ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
2291 + RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
2292 + GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
2293 + RK3288_CLKGATE_CON(9), 0, GFLAGS),
2294 +@@ -647,7 +647,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2295 + INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
2296 + RK3288_CLKSEL_CON(22), 7, IFLAGS),
2297 +
2298 +- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
2299 ++ GATE(0, "jtag", "ext_jtag", 0,
2300 + RK3288_CLKGATE_CON(4), 14, GFLAGS),
2301 +
2302 + COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
2303 +@@ -656,7 +656,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2304 + COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
2305 + RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
2306 + RK3288_CLKGATE_CON(3), 6, GFLAGS),
2307 +- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
2308 ++ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
2309 + RK3288_CLKGATE_CON(13), 9, GFLAGS),
2310 + DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
2311 + RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
2312 +@@ -697,7 +697,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2313 + GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
2314 + GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
2315 + GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
2316 +- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
2317 ++ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
2318 +
2319 + /* ddrctrl [DDR Controller PHY clock] gates */
2320 + GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
2321 +@@ -837,12 +837,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
2322 + "pclk_alive_niu",
2323 + "pclk_pd_pmu",
2324 + "pclk_pmu_niu",
2325 +- "pclk_core_niu",
2326 +- "pclk_ddrupctl0",
2327 +- "pclk_publ0",
2328 +- "pclk_ddrupctl1",
2329 +- "pclk_publ1",
2330 + "pmu_hclk_otg0",
2331 ++ /* pwm-regulators on some boards, so handoff-critical later */
2332 ++ "pclk_rkpwm",
2333 + };
2334 +
2335 + static void __iomem *rk3288_cru_base;
2336 +diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
2337 +index a371c66e72ef..bd9b5fbc443b 100644
2338 +--- a/drivers/clk/zynqmp/divider.c
2339 ++++ b/drivers/clk/zynqmp/divider.c
2340 +@@ -31,12 +31,14 @@
2341 + * struct zynqmp_clk_divider - adjustable divider clock
2342 + * @hw: handle between common and hardware-specific interfaces
2343 + * @flags: Hardware specific flags
2344 ++ * @is_frac: The divider is a fractional divider
2345 + * @clk_id: Id of clock
2346 + * @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
2347 + */
2348 + struct zynqmp_clk_divider {
2349 + struct clk_hw hw;
2350 + u8 flags;
2351 ++ bool is_frac;
2352 + u32 clk_id;
2353 + u32 div_type;
2354 + };
2355 +@@ -116,8 +118,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
2356 +
2357 + bestdiv = zynqmp_divider_get_val(*prate, rate);
2358 +
2359 +- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
2360 +- (divider->flags & CLK_FRAC))
2361 ++ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
2362 + bestdiv = rate % *prate ? 1 : bestdiv;
2363 + *prate = rate * bestdiv;
2364 +
2365 +@@ -195,11 +196,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
2366 +
2367 + init.name = name;
2368 + init.ops = &zynqmp_clk_divider_ops;
2369 +- init.flags = nodes->flag;
2370 ++ /* CLK_FRAC is not defined in the common clk framework */
2371 ++ init.flags = nodes->flag & ~CLK_FRAC;
2372 + init.parent_names = parents;
2373 + init.num_parents = 1;
2374 +
2375 + /* struct clk_divider assignments */
2376 ++ div->is_frac = !!(nodes->flag & CLK_FRAC);
2377 + div->flags = nodes->type_flag;
2378 + div->hw.init = &init;
2379 + div->clk_id = clk_id;
2380 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2381 +index ef0e33e21b98..97b094963253 100644
2382 +--- a/drivers/cpufreq/cpufreq.c
2383 ++++ b/drivers/cpufreq/cpufreq.c
2384 +@@ -1103,6 +1103,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
2385 + cpufreq_global_kobject, "policy%u", cpu);
2386 + if (ret) {
2387 + pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
2388 ++ kobject_put(&policy->kobj);
2389 + goto err_free_real_cpus;
2390 + }
2391 +
2392 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
2393 +index ffa9adeaba31..9d1d9bf02710 100644
2394 +--- a/drivers/cpufreq/cpufreq_governor.c
2395 ++++ b/drivers/cpufreq/cpufreq_governor.c
2396 +@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
2397 + /* Failure, so roll back. */
2398 + pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
2399 +
2400 ++ kobject_put(&dbs_data->attr_set.kobj);
2401 ++
2402 + policy->governor_data = NULL;
2403 +
2404 + if (!have_governor_per_policy())
2405 +diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
2406 +index 9fedf627e000..3ee55aee5d71 100644
2407 +--- a/drivers/cpufreq/imx6q-cpufreq.c
2408 ++++ b/drivers/cpufreq/imx6q-cpufreq.c
2409 +@@ -407,11 +407,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
2410 + ret = imx6ul_opp_check_speed_grading(cpu_dev);
2411 + if (ret) {
2412 + if (ret == -EPROBE_DEFER)
2413 +- return ret;
2414 ++ goto put_node;
2415 +
2416 + dev_err(cpu_dev, "failed to read ocotp: %d\n",
2417 + ret);
2418 +- return ret;
2419 ++ goto put_node;
2420 + }
2421 + } else {
2422 + imx6q_opp_check_speed_grading(cpu_dev);
2423 +diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
2424 +index c2dd43f3f5d8..8d63a6dc8383 100644
2425 +--- a/drivers/cpufreq/kirkwood-cpufreq.c
2426 ++++ b/drivers/cpufreq/kirkwood-cpufreq.c
2427 +@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
2428 + priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
2429 + if (IS_ERR(priv.cpu_clk)) {
2430 + dev_err(priv.dev, "Unable to get cpuclk\n");
2431 +- return PTR_ERR(priv.cpu_clk);
2432 ++ err = PTR_ERR(priv.cpu_clk);
2433 ++ goto out_node;
2434 + }
2435 +
2436 + err = clk_prepare_enable(priv.cpu_clk);
2437 + if (err) {
2438 + dev_err(priv.dev, "Unable to prepare cpuclk\n");
2439 +- return err;
2440 ++ goto out_node;
2441 + }
2442 +
2443 + kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
2444 +@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
2445 + goto out_ddr;
2446 + }
2447 +
2448 +- of_node_put(np);
2449 +- np = NULL;
2450 +-
2451 + err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
2452 +- if (!err)
2453 +- return 0;
2454 ++ if (err) {
2455 ++ dev_err(priv.dev, "Failed to register cpufreq driver\n");
2456 ++ goto out_powersave;
2457 ++ }
2458 +
2459 +- dev_err(priv.dev, "Failed to register cpufreq driver\n");
2460 ++ of_node_put(np);
2461 ++ return 0;
2462 +
2463 ++out_powersave:
2464 + clk_disable_unprepare(priv.powersave_clk);
2465 + out_ddr:
2466 + clk_disable_unprepare(priv.ddr_clk);
2467 + out_cpu:
2468 + clk_disable_unprepare(priv.cpu_clk);
2469 ++out_node:
2470 + of_node_put(np);
2471 +
2472 + return err;
2473 +diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
2474 +index 75dfbd2a58ea..c7710c149de8 100644
2475 +--- a/drivers/cpufreq/pasemi-cpufreq.c
2476 ++++ b/drivers/cpufreq/pasemi-cpufreq.c
2477 +@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
2478 +
2479 + cpu = of_get_cpu_node(policy->cpu, NULL);
2480 +
2481 ++ of_node_put(cpu);
2482 + if (!cpu)
2483 + goto out;
2484 +
2485 +diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
2486 +index 52f0d91d30c1..9b4ce2eb8222 100644
2487 +--- a/drivers/cpufreq/pmac32-cpufreq.c
2488 ++++ b/drivers/cpufreq/pmac32-cpufreq.c
2489 +@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
2490 + volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
2491 + if (volt_gpio_np)
2492 + voltage_gpio = read_gpio(volt_gpio_np);
2493 ++ of_node_put(volt_gpio_np);
2494 + if (!voltage_gpio){
2495 + pr_err("missing cpu-vcore-select gpio\n");
2496 + return 1;
2497 +@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
2498 + if (volt_gpio_np)
2499 + voltage_gpio = read_gpio(volt_gpio_np);
2500 +
2501 ++ of_node_put(volt_gpio_np);
2502 + pvr = mfspr(SPRN_PVR);
2503 + has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
2504 +
2505 +diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
2506 +index 41a0f0be3f9f..8414c3a4ea08 100644
2507 +--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
2508 ++++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
2509 +@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
2510 + if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
2511 + !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
2512 + pr_info("invalid CBE regs pointers for cpufreq\n");
2513 ++ of_node_put(cpu);
2514 + return -EINVAL;
2515 + }
2516 +
2517 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
2518 +index a4b5ff2b72f8..f6936bb3b7be 100644
2519 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
2520 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
2521 +@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
2522 + }
2523 + } else {
2524 + /* Since we have the flag final, we can go up to modulo 4 */
2525 +- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
2526 ++ if (areq->nbytes < 4)
2527 ++ end = 0;
2528 ++ else
2529 ++ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
2530 + }
2531 +
2532 + /* TODO if SGlen % 4 and !op->len then DMA */
2533 +diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
2534 +index de78282b8f44..9c6b5c1d6a1a 100644
2535 +--- a/drivers/crypto/vmx/aesp8-ppc.pl
2536 ++++ b/drivers/crypto/vmx/aesp8-ppc.pl
2537 +@@ -1357,7 +1357,7 @@ Loop_ctr32_enc:
2538 + addi $idx,$idx,16
2539 + bdnz Loop_ctr32_enc
2540 +
2541 +- vadduwm $ivec,$ivec,$one
2542 ++ vadduqm $ivec,$ivec,$one
2543 + vmr $dat,$inptail
2544 + lvx $inptail,0,$inp
2545 + addi $inp,$inp,16
2546 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
2547 +index 0ae3de76833b..839621b044f4 100644
2548 +--- a/drivers/devfreq/devfreq.c
2549 ++++ b/drivers/devfreq/devfreq.c
2550 +@@ -228,7 +228,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)