Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 31 May 2019 15:02:44
Message-Id: 1559314932.598ad5bdcb5346e952e480a99a53f2b9157112d0.mpagano@gentoo
1 commit: 598ad5bdcb5346e952e480a99a53f2b9157112d0
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri May 31 15:02:12 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri May 31 15:02:12 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=598ad5bd
7
8 Linux patch 4.19.47
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1046_linux-4.19.47.patch | 9410 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 9414 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 87c4eaf..0c0cd1a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -227,6 +227,10 @@ Patch: 1045_linux-4.19.46.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.46
23
24 +Patch: 1046_linux-4.19.47.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.47
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1046_linux-4.19.47.patch b/1046_linux-4.19.47.patch
33 new file mode 100644
34 index 0000000..645af41
35 --- /dev/null
36 +++ b/1046_linux-4.19.47.patch
37 @@ -0,0 +1,9410 @@
38 +diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
39 +index 3c6fc2e08d04..eeb3fc9d777b 100644
40 +--- a/Documentation/arm64/silicon-errata.txt
41 ++++ b/Documentation/arm64/silicon-errata.txt
42 +@@ -58,6 +58,7 @@ stable kernels.
43 + | ARM | Cortex-A72 | #853709 | N/A |
44 + | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
45 + | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
46 ++| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
47 + | ARM | MMU-500 | #841119,#826419 | N/A |
48 + | | | | |
49 + | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
50 +diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
51 +index 9ecde517728c..2793d4eac55f 100644
52 +--- a/Documentation/sysctl/net.txt
53 ++++ b/Documentation/sysctl/net.txt
54 +@@ -92,6 +92,14 @@ Values :
55 + 0 - disable JIT kallsyms export (default value)
56 + 1 - enable JIT kallsyms export for privileged users only
57 +
58 ++bpf_jit_limit
59 ++-------------
60 ++
61 ++This enforces a global limit for memory allocations to the BPF JIT
62 ++compiler in order to reject unprivileged JIT requests once it has
63 ++been surpassed. bpf_jit_limit contains the value of the global limit
64 ++in bytes.
65 ++
66 + dev_weight
67 + --------------
68 +
69 +diff --git a/Makefile b/Makefile
70 +index 5383dd317d59..b3ba28ff73d5 100644
71 +--- a/Makefile
72 ++++ b/Makefile
73 +@@ -1,7 +1,7 @@
74 + # SPDX-License-Identifier: GPL-2.0
75 + VERSION = 4
76 + PATCHLEVEL = 19
77 +-SUBLEVEL = 46
78 ++SUBLEVEL = 47
79 + EXTRAVERSION =
80 + NAME = "People's Front"
81 +
82 +diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
83 +index 07e27f212dc7..d2453e2d3f1f 100644
84 +--- a/arch/arm/include/asm/cp15.h
85 ++++ b/arch/arm/include/asm/cp15.h
86 +@@ -68,6 +68,8 @@
87 + #define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
88 + #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
89 +
90 ++#define CNTVCT __ACCESS_CP15_64(1, c14)
91 ++
92 + extern unsigned long cr_alignment; /* defined in entry-armv.S */
93 +
94 + static inline unsigned long get_cr(void)
95 +diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
96 +index a9dd619c6c29..7bdbf5d5c47d 100644
97 +--- a/arch/arm/vdso/vgettimeofday.c
98 ++++ b/arch/arm/vdso/vgettimeofday.c
99 +@@ -18,9 +18,9 @@
100 + #include <linux/compiler.h>
101 + #include <linux/hrtimer.h>
102 + #include <linux/time.h>
103 +-#include <asm/arch_timer.h>
104 + #include <asm/barrier.h>
105 + #include <asm/bug.h>
106 ++#include <asm/cp15.h>
107 + #include <asm/page.h>
108 + #include <asm/unistd.h>
109 + #include <asm/vdso_datapage.h>
110 +@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
111 + u64 cycle_now;
112 + u64 nsec;
113 +
114 +- cycle_now = arch_counter_get_cntvct();
115 ++ isb();
116 ++ cycle_now = read_sysreg(CNTVCT);
117 +
118 + cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
119 +
120 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
121 +index 1b1a0e95c751..8790a29d0af4 100644
122 +--- a/arch/arm64/Kconfig
123 ++++ b/arch/arm64/Kconfig
124 +@@ -479,6 +479,24 @@ config ARM64_ERRATUM_1024718
125 +
126 + If unsure, say Y.
127 +
128 ++config ARM64_ERRATUM_1463225
129 ++ bool "Cortex-A76: Software Step might prevent interrupt recognition"
130 ++ default y
131 ++ help
132 ++ This option adds a workaround for Arm Cortex-A76 erratum 1463225.
133 ++
134 ++ On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
135 ++ of a system call instruction (SVC) can prevent recognition of
136 ++ subsequent interrupts when software stepping is disabled in the
137 ++ exception handler of the system call and either kernel debugging
138 ++ is enabled or VHE is in use.
139 ++
140 ++ Work around the erratum by triggering a dummy step exception
141 ++ when handling a system call from a task that is being stepped
142 ++ in a VHE configuration of the kernel.
143 ++
144 ++ If unsure, say Y.
145 ++
146 + config CAVIUM_ERRATUM_22375
147 + bool "Cavium erratum 22375, 24313"
148 + default y
149 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
150 +index ae1f70450fb2..25ce9056cf64 100644
151 +--- a/arch/arm64/include/asm/cpucaps.h
152 ++++ b/arch/arm64/include/asm/cpucaps.h
153 +@@ -51,7 +51,8 @@
154 + #define ARM64_SSBD 30
155 + #define ARM64_MISMATCHED_CACHE_TYPE 31
156 + #define ARM64_HAS_STAGE2_FWB 32
157 ++#define ARM64_WORKAROUND_1463225 33
158 +
159 +-#define ARM64_NCAPS 33
160 ++#define ARM64_NCAPS 34
161 +
162 + #endif /* __ASM_CPUCAPS_H */
163 +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
164 +index ea690b3562af..b4a48419769f 100644
165 +--- a/arch/arm64/include/asm/cputype.h
166 ++++ b/arch/arm64/include/asm/cputype.h
167 +@@ -86,6 +86,7 @@
168 + #define ARM_CPU_PART_CORTEX_A75 0xD0A
169 + #define ARM_CPU_PART_CORTEX_A35 0xD04
170 + #define ARM_CPU_PART_CORTEX_A55 0xD05
171 ++#define ARM_CPU_PART_CORTEX_A76 0xD0B
172 +
173 + #define APM_CPU_PART_POTENZA 0x000
174 +
175 +@@ -110,6 +111,7 @@
176 + #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
177 + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
178 + #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
179 ++#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
180 + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
181 + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
182 + #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
183 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
184 +index 1bdeca8918a6..ea423db39364 100644
185 +--- a/arch/arm64/include/asm/pgtable.h
186 ++++ b/arch/arm64/include/asm/pgtable.h
187 +@@ -444,6 +444,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
188 + return __pmd_to_phys(pmd);
189 + }
190 +
191 ++static inline void pte_unmap(pte_t *pte) { }
192 ++
193 + /* Find an entry in the third-level page table. */
194 + #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
195 +
196 +@@ -452,7 +454,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
197 +
198 + #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
199 + #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
200 +-#define pte_unmap(pte) do { } while (0)
201 + #define pte_unmap_nested(pte) do { } while (0)
202 +
203 + #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
204 +diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
205 +index 2b9a63771eda..f89263c8e11a 100644
206 +--- a/arch/arm64/include/asm/vdso_datapage.h
207 ++++ b/arch/arm64/include/asm/vdso_datapage.h
208 +@@ -38,6 +38,7 @@ struct vdso_data {
209 + __u32 tz_minuteswest; /* Whacky timezone stuff */
210 + __u32 tz_dsttime;
211 + __u32 use_syscall;
212 ++ __u32 hrtimer_res;
213 + };
214 +
215 + #endif /* !__ASSEMBLY__ */
216 +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
217 +index 323aeb5f2fe6..92fba851ce53 100644
218 +--- a/arch/arm64/kernel/asm-offsets.c
219 ++++ b/arch/arm64/kernel/asm-offsets.c
220 +@@ -99,7 +99,7 @@ int main(void)
221 + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
222 + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
223 + DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
224 +- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
225 ++ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
226 + DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
227 + DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
228 + DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
229 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
230 +index dec10898d688..dc6c535cbd13 100644
231 +--- a/arch/arm64/kernel/cpu_errata.c
232 ++++ b/arch/arm64/kernel/cpu_errata.c
233 +@@ -411,6 +411,22 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
234 + }
235 + #endif /* CONFIG_ARM64_SSBD */
236 +
237 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
238 ++DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
239 ++
240 ++static bool
241 ++has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
242 ++ int scope)
243 ++{
244 ++ u32 midr = read_cpuid_id();
245 ++ /* Cortex-A76 r0p0 - r3p1 */
246 ++ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
247 ++
248 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
249 ++ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
250 ++}
251 ++#endif
252 ++
253 + #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
254 + .matches = is_affected_midr_range, \
255 + .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
256 +@@ -679,6 +695,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
257 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
258 + .matches = has_ssbd_mitigation,
259 + },
260 ++#endif
261 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
262 ++ {
263 ++ .desc = "ARM erratum 1463225",
264 ++ .capability = ARM64_WORKAROUND_1463225,
265 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
266 ++ .matches = has_cortex_a76_erratum_1463225,
267 ++ },
268 + #endif
269 + {
270 + }
271 +diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
272 +index ea001241bdd4..00f8b8612b69 100644
273 +--- a/arch/arm64/kernel/cpu_ops.c
274 ++++ b/arch/arm64/kernel/cpu_ops.c
275 +@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
276 + pr_err("%pOF: missing enable-method property\n",
277 + dn);
278 + }
279 ++ of_node_put(dn);
280 + } else {
281 + enable_method = acpi_get_enable_method(cpu);
282 + if (!enable_method) {
283 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
284 +index b09b6f75f759..06941c1fe418 100644
285 +--- a/arch/arm64/kernel/kaslr.c
286 ++++ b/arch/arm64/kernel/kaslr.c
287 +@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
288 +
289 + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
290 + /*
291 +- * Randomize the module region over a 4 GB window covering the
292 ++ * Randomize the module region over a 2 GB window covering the
293 + * kernel. This reduces the risk of modules leaking information
294 + * about the address of the kernel itself, but results in
295 + * branches between modules and the core kernel that are
296 + * resolved via PLTs. (Branches between modules will be
297 + * resolved normally.)
298 + */
299 +- module_range = SZ_4G - (u64)(_end - _stext);
300 +- module_alloc_base = max((u64)_end + offset - SZ_4G,
301 ++ module_range = SZ_2G - (u64)(_end - _stext);
302 ++ module_alloc_base = max((u64)_end + offset - SZ_2G,
303 + (u64)MODULES_VADDR);
304 + } else {
305 + /*
306 +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
307 +index f0f27aeefb73..0b368ceccee4 100644
308 +--- a/arch/arm64/kernel/module.c
309 ++++ b/arch/arm64/kernel/module.c
310 +@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
311 + * can simply omit this fallback in that case.
312 + */
313 + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
314 +- module_alloc_base + SZ_4G, GFP_KERNEL,
315 ++ module_alloc_base + SZ_2G, GFP_KERNEL,
316 + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
317 + __builtin_return_address(0));
318 +
319 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
320 +index 5610ac01c1ec..871c739f060a 100644
321 +--- a/arch/arm64/kernel/syscall.c
322 ++++ b/arch/arm64/kernel/syscall.c
323 +@@ -8,6 +8,7 @@
324 + #include <linux/syscalls.h>
325 +
326 + #include <asm/daifflags.h>
327 ++#include <asm/debug-monitors.h>
328 + #include <asm/fpsimd.h>
329 + #include <asm/syscall.h>
330 + #include <asm/thread_info.h>
331 +@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
332 + int syscall_trace_enter(struct pt_regs *regs);
333 + void syscall_trace_exit(struct pt_regs *regs);
334 +
335 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
336 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
337 ++
338 ++static void cortex_a76_erratum_1463225_svc_handler(void)
339 ++{
340 ++ u32 reg, val;
341 ++
342 ++ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
343 ++ return;
344 ++
345 ++ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
346 ++ return;
347 ++
348 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
349 ++ reg = read_sysreg(mdscr_el1);
350 ++ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
351 ++ write_sysreg(val, mdscr_el1);
352 ++ asm volatile("msr daifclr, #8");
353 ++ isb();
354 ++
355 ++ /* We will have taken a single-step exception by this point */
356 ++
357 ++ write_sysreg(reg, mdscr_el1);
358 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
359 ++}
360 ++#else
361 ++static void cortex_a76_erratum_1463225_svc_handler(void) { }
362 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
363 ++
364 + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
365 + const syscall_fn_t syscall_table[])
366 + {
367 +@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
368 + regs->orig_x0 = regs->regs[0];
369 + regs->syscallno = scno;
370 +
371 ++ cortex_a76_erratum_1463225_svc_handler();
372 + local_daif_restore(DAIF_PROCCTX);
373 + user_exit();
374 +
375 +diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
376 +index 2d419006ad43..ec0bb588d755 100644
377 +--- a/arch/arm64/kernel/vdso.c
378 ++++ b/arch/arm64/kernel/vdso.c
379 +@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
380 + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
381 + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
382 +
383 ++ /* Read without the seqlock held by clock_getres() */
384 ++ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
385 ++
386 + if (!use_syscall) {
387 + /* tkr_mono.cycle_last == tkr_raw.cycle_last */
388 + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
389 +diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
390 +index e8f60112818f..856fee6d3512 100644
391 +--- a/arch/arm64/kernel/vdso/gettimeofday.S
392 ++++ b/arch/arm64/kernel/vdso/gettimeofday.S
393 +@@ -308,13 +308,14 @@ ENTRY(__kernel_clock_getres)
394 + ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
395 + b.ne 1f
396 +
397 +- ldr x2, 5f
398 ++ adr vdso_data, _vdso_data
399 ++ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
400 + b 2f
401 + 1:
402 + cmp w0, #CLOCK_REALTIME_COARSE
403 + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
404 + b.ne 4f
405 +- ldr x2, 6f
406 ++ ldr x2, 5f
407 + 2:
408 + cbz x1, 3f
409 + stp xzr, x2, [x1]
410 +@@ -328,8 +329,6 @@ ENTRY(__kernel_clock_getres)
411 + svc #0
412 + ret
413 + 5:
414 +- .quad CLOCK_REALTIME_RES
415 +-6:
416 + .quad CLOCK_COARSE_RES
417 + .cfi_endproc
418 + ENDPROC(__kernel_clock_getres)
419 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
420 +index c389f2bef938..d3a5bb16f0b2 100644
421 +--- a/arch/arm64/mm/dma-mapping.c
422 ++++ b/arch/arm64/mm/dma-mapping.c
423 +@@ -664,6 +664,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
424 + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
425 + return ret;
426 +
427 ++ if (!is_vmalloc_addr(cpu_addr)) {
428 ++ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
429 ++ return __swiotlb_mmap_pfn(vma, pfn, size);
430 ++ }
431 ++
432 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
433 + /*
434 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
435 +@@ -687,6 +692,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
436 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
437 + struct vm_struct *area = find_vm_area(cpu_addr);
438 +
439 ++ if (!is_vmalloc_addr(cpu_addr)) {
440 ++ struct page *page = virt_to_page(cpu_addr);
441 ++ return __swiotlb_get_sgtable_page(sgt, page, size);
442 ++ }
443 ++
444 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
445 + /*
446 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
447 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
448 +index a4c134677285..88cf0a0cb616 100644
449 +--- a/arch/arm64/mm/fault.c
450 ++++ b/arch/arm64/mm/fault.c
451 +@@ -827,14 +827,47 @@ void __init hook_debug_fault_code(int nr,
452 + debug_fault_info[nr].name = name;
453 + }
454 +
455 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
456 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
457 ++
458 ++static int __exception
459 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
460 ++{
461 ++ if (user_mode(regs))
462 ++ return 0;
463 ++
464 ++ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
465 ++ return 0;
466 ++
467 ++ /*
468 ++ * We've taken a dummy step exception from the kernel to ensure
469 ++ * that interrupts are re-enabled on the syscall path. Return back
470 ++ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
471 ++ * masked so that we can safely restore the mdscr and get on with
472 ++ * handling the syscall.
473 ++ */
474 ++ regs->pstate |= PSR_D_BIT;
475 ++ return 1;
476 ++}
477 ++#else
478 ++static int __exception
479 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
480 ++{
481 ++ return 0;
482 ++}
483 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
484 ++
485 + asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
486 +- unsigned int esr,
487 +- struct pt_regs *regs)
488 ++ unsigned int esr,
489 ++ struct pt_regs *regs)
490 + {
491 + const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
492 + unsigned long pc = instruction_pointer(regs);
493 + int rv;
494 +
495 ++ if (cortex_a76_erratum_1463225_debug_handler(regs))
496 ++ return 0;
497 ++
498 + /*
499 + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
500 + * already disabled to preserve the last enabled/disabled addresses.
501 +diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
502 +index 9d9f6f334d3c..3da3e2b1b51b 100644
503 +--- a/arch/powerpc/boot/addnote.c
504 ++++ b/arch/powerpc/boot/addnote.c
505 +@@ -223,7 +223,11 @@ main(int ac, char **av)
506 + PUT_16(E_PHNUM, np + 2);
507 +
508 + /* write back */
509 +- lseek(fd, (long) 0, SEEK_SET);
510 ++ i = lseek(fd, (long) 0, SEEK_SET);
511 ++ if (i < 0) {
512 ++ perror("lseek");
513 ++ exit(1);
514 ++ }
515 + i = write(fd, buf, n);
516 + if (i < 0) {
517 + perror("write");
518 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
519 +index 4898e9491a1c..9168a247e24f 100644
520 +--- a/arch/powerpc/kernel/head_64.S
521 ++++ b/arch/powerpc/kernel/head_64.S
522 +@@ -970,7 +970,9 @@ start_here_multiplatform:
523 +
524 + /* Restore parameters passed from prom_init/kexec */
525 + mr r3,r31
526 +- bl early_setup /* also sets r13 and SPRG_PACA */
527 ++ LOAD_REG_ADDR(r12, DOTSYM(early_setup))
528 ++ mtctr r12
529 ++ bctrl /* also sets r13 and SPRG_PACA */
530 +
531 + LOAD_REG_ADDR(r3, start_here_common)
532 + ld r4,PACAKMSR(r13)
533 +diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
534 +index 3c6ab22a0c4e..af3c15a1d41e 100644
535 +--- a/arch/powerpc/kernel/watchdog.c
536 ++++ b/arch/powerpc/kernel/watchdog.c
537 +@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
538 +
539 + static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
540 +
541 +-static DEFINE_PER_CPU(struct timer_list, wd_timer);
542 ++static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
543 + static DEFINE_PER_CPU(u64, wd_timer_tb);
544 +
545 + /* SMP checker bits */
546 +@@ -293,21 +293,21 @@ out:
547 + nmi_exit();
548 + }
549 +
550 +-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
551 +-{
552 +- t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
553 +- if (wd_timer_period_ms > 1000)
554 +- t->expires = __round_jiffies_up(t->expires, cpu);
555 +- add_timer_on(t, cpu);
556 +-}
557 +-
558 +-static void wd_timer_fn(struct timer_list *t)
559 ++static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
560 + {
561 + int cpu = smp_processor_id();
562 +
563 ++ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
564 ++ return HRTIMER_NORESTART;
565 ++
566 ++ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
567 ++ return HRTIMER_NORESTART;
568 ++
569 + watchdog_timer_interrupt(cpu);
570 +
571 +- wd_timer_reset(cpu, t);
572 ++ hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
573 ++
574 ++ return HRTIMER_RESTART;
575 + }
576 +
577 + void arch_touch_nmi_watchdog(void)
578 +@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
579 + }
580 + EXPORT_SYMBOL(arch_touch_nmi_watchdog);
581 +
582 +-static void start_watchdog_timer_on(unsigned int cpu)
583 +-{
584 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
585 +-
586 +- per_cpu(wd_timer_tb, cpu) = get_tb();
587 +-
588 +- timer_setup(t, wd_timer_fn, TIMER_PINNED);
589 +- wd_timer_reset(cpu, t);
590 +-}
591 +-
592 +-static void stop_watchdog_timer_on(unsigned int cpu)
593 +-{
594 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
595 +-
596 +- del_timer_sync(t);
597 +-}
598 +-
599 +-static int start_wd_on_cpu(unsigned int cpu)
600 ++static void start_watchdog(void *arg)
601 + {
602 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
603 ++ int cpu = smp_processor_id();
604 + unsigned long flags;
605 +
606 + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
607 + WARN_ON(1);
608 +- return 0;
609 ++ return;
610 + }
611 +
612 + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
613 +- return 0;
614 ++ return;
615 +
616 + if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
617 +- return 0;
618 ++ return;
619 +
620 + wd_smp_lock(&flags);
621 + cpumask_set_cpu(cpu, &wd_cpus_enabled);
622 +@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
623 + }
624 + wd_smp_unlock(&flags);
625 +
626 +- start_watchdog_timer_on(cpu);
627 ++ *this_cpu_ptr(&wd_timer_tb) = get_tb();
628 +
629 +- return 0;
630 ++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
631 ++ hrtimer->function = watchdog_timer_fn;
632 ++ hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
633 ++ HRTIMER_MODE_REL_PINNED);
634 + }
635 +
636 +-static int stop_wd_on_cpu(unsigned int cpu)
637 ++static int start_watchdog_on_cpu(unsigned int cpu)
638 + {
639 ++ return smp_call_function_single(cpu, start_watchdog, NULL, true);
640 ++}
641 ++
642 ++static void stop_watchdog(void *arg)
643 ++{
644 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
645 ++ int cpu = smp_processor_id();
646 + unsigned long flags;
647 +
648 + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
649 +- return 0; /* Can happen in CPU unplug case */
650 ++ return; /* Can happen in CPU unplug case */
651 +
652 +- stop_watchdog_timer_on(cpu);
653 ++ hrtimer_cancel(hrtimer);
654 +
655 + wd_smp_lock(&flags);
656 + cpumask_clear_cpu(cpu, &wd_cpus_enabled);
657 + wd_smp_unlock(&flags);
658 +
659 + wd_smp_clear_cpu_pending(cpu, get_tb());
660 ++}
661 +
662 +- return 0;
663 ++static int stop_watchdog_on_cpu(unsigned int cpu)
664 ++{
665 ++ return smp_call_function_single(cpu, stop_watchdog, NULL, true);
666 + }
667 +
668 + static void watchdog_calc_timeouts(void)
669 +@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
670 + int cpu;
671 +
672 + for_each_cpu(cpu, &wd_cpus_enabled)
673 +- stop_wd_on_cpu(cpu);
674 ++ stop_watchdog_on_cpu(cpu);
675 + }
676 +
677 + void watchdog_nmi_start(void)
678 +@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
679 +
680 + watchdog_calc_timeouts();
681 + for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
682 +- start_wd_on_cpu(cpu);
683 ++ start_watchdog_on_cpu(cpu);
684 + }
685 +
686 + /*
687 +@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
688 +
689 + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
690 + "powerpc/watchdog:online",
691 +- start_wd_on_cpu, stop_wd_on_cpu);
692 ++ start_watchdog_on_cpu,
693 ++ stop_watchdog_on_cpu);
694 + if (err < 0) {
695 + pr_warn("could not be initialized");
696 + return err;
697 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
698 +index 10fb43efef50..f473c05e9649 100644
699 +--- a/arch/powerpc/mm/numa.c
700 ++++ b/arch/powerpc/mm/numa.c
701 +@@ -1495,6 +1495,9 @@ int start_topology_update(void)
702 + {
703 + int rc = 0;
704 +
705 ++ if (!topology_updates_enabled)
706 ++ return 0;
707 ++
708 + if (firmware_has_feature(FW_FEATURE_PRRN)) {
709 + if (!prrn_enabled) {
710 + prrn_enabled = 1;
711 +@@ -1524,6 +1527,9 @@ int stop_topology_update(void)
712 + {
713 + int rc = 0;
714 +
715 ++ if (!topology_updates_enabled)
716 ++ return 0;
717 ++
718 + if (prrn_enabled) {
719 + prrn_enabled = 0;
720 + #ifdef CONFIG_SMP
721 +@@ -1579,11 +1585,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
722 +
723 + kbuf[read_len] = '\0';
724 +
725 +- if (!strncmp(kbuf, "on", 2))
726 ++ if (!strncmp(kbuf, "on", 2)) {
727 ++ topology_updates_enabled = true;
728 + start_topology_update();
729 +- else if (!strncmp(kbuf, "off", 3))
730 ++ } else if (!strncmp(kbuf, "off", 3)) {
731 + stop_topology_update();
732 +- else
733 ++ topology_updates_enabled = false;
734 ++ } else
735 + return -EINVAL;
736 +
737 + return count;
738 +@@ -1598,9 +1606,7 @@ static const struct file_operations topology_ops = {
739 +
740 + static int topology_update_init(void)
741 + {
742 +- /* Do not poll for changes if disabled at boot */
743 +- if (topology_updates_enabled)
744 +- start_topology_update();
745 ++ start_topology_update();
746 +
747 + if (vphn_enabled)
748 + topology_schedule_update();
749 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
750 +index 1fafc32b12a0..555322677074 100644
751 +--- a/arch/powerpc/perf/imc-pmu.c
752 ++++ b/arch/powerpc/perf/imc-pmu.c
753 +@@ -496,6 +496,11 @@ static int nest_imc_event_init(struct perf_event *event)
754 + * Get the base memory addresss for this cpu.
755 + */
756 + chip_id = cpu_to_chip_id(event->cpu);
757 ++
758 ++ /* Return, if chip_id is not valid */
759 ++ if (chip_id < 0)
760 ++ return -ENODEV;
761 ++
762 + pcni = pmu->mem_info;
763 + do {
764 + if (pcni->id == chip_id) {
765 +@@ -503,7 +508,7 @@ static int nest_imc_event_init(struct perf_event *event)
766 + break;
767 + }
768 + pcni++;
769 +- } while (pcni);
770 ++ } while (pcni->vbase != 0);
771 +
772 + if (!flag)
773 + return -ENODEV;
774 +diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
775 +index 58a07948c76e..3d27f02695e4 100644
776 +--- a/arch/powerpc/platforms/powernv/opal-imc.c
777 ++++ b/arch/powerpc/platforms/powernv/opal-imc.c
778 +@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
779 + nr_chips))
780 + goto error;
781 +
782 +- pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
783 ++ pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
784 + GFP_KERNEL);
785 + if (!pmu_ptr->mem_info)
786 + goto error;
787 +diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
788 +index 5a286b012043..602e7cc26d11 100644
789 +--- a/arch/s390/kernel/kexec_elf.c
790 ++++ b/arch/s390/kernel/kexec_elf.c
791 +@@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
792 + struct kexec_buf buf;
793 + const Elf_Ehdr *ehdr;
794 + const Elf_Phdr *phdr;
795 ++ Elf_Addr entry;
796 + int i, ret;
797 +
798 + ehdr = (Elf_Ehdr *)kernel;
799 + buf.image = image;
800 ++ if (image->type == KEXEC_TYPE_CRASH)
801 ++ entry = STARTUP_KDUMP_OFFSET;
802 ++ else
803 ++ entry = ehdr->e_entry;
804 +
805 + phdr = (void *)ehdr + ehdr->e_phoff;
806 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
807 +@@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
808 + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
809 + buf.memsz = phdr->p_memsz;
810 +
811 +- if (phdr->p_paddr == 0) {
812 ++ if (entry - phdr->p_paddr < phdr->p_memsz) {
813 + data->kernel_buf = buf.buffer;
814 + data->memsz += STARTUP_NORMAL_OFFSET;
815 +
816 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
817 +index f2cc7da473e4..ae894ac83fd6 100644
818 +--- a/arch/s390/mm/pgtable.c
819 ++++ b/arch/s390/mm/pgtable.c
820 +@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
821 + return old;
822 + }
823 +
824 ++#ifdef CONFIG_PGSTE
825 + static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
826 + {
827 + pgd_t *pgd;
828 +@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
829 + pmd = pmd_alloc(mm, pud, addr);
830 + return pmd;
831 + }
832 ++#endif
833 +
834 + pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
835 + pmd_t *pmdp, pmd_t new)
836 +diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
837 +index 96b8cb1f754a..029bbadaf7ab 100644
838 +--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
839 ++++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
840 +@@ -135,7 +135,7 @@ enum {
841 +
842 + static inline u32 sh7786_mm_sel(void)
843 + {
844 +- return __raw_readl(0xFC400020) & 0x7;
845 ++ return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
846 + }
847 +
848 + #endif /* __CPU_SH7786_H__ */
849 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
850 +index ffc823a8312f..ab2071e40efe 100644
851 +--- a/arch/x86/Makefile
852 ++++ b/arch/x86/Makefile
853 +@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
854 + export BITS
855 +
856 + ifdef CONFIG_X86_NEED_RELOCS
857 +- LDFLAGS_vmlinux := --emit-relocs
858 ++ LDFLAGS_vmlinux := --emit-relocs --discard-none
859 + endif
860 +
861 + #
862 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
863 +index 56194c571299..4a650eb3d94a 100644
864 +--- a/arch/x86/events/intel/cstate.c
865 ++++ b/arch/x86/events/intel/cstate.c
866 +@@ -584,6 +584,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
867 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
868 +
869 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
870 ++
871 ++ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
872 + { },
873 + };
874 + MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
875 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
876 +index 91039ffed633..2413169ce362 100644
877 +--- a/arch/x86/events/intel/rapl.c
878 ++++ b/arch/x86/events/intel/rapl.c
879 +@@ -780,6 +780,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
880 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
881 +
882 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
883 ++
884 ++ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
885 + {},
886 + };
887 +
888 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
889 +index 1b9f85abf9bc..ace6c1e752fb 100644
890 +--- a/arch/x86/events/msr.c
891 ++++ b/arch/x86/events/msr.c
892 +@@ -89,6 +89,7 @@ static bool test_intel(int idx)
893 + case INTEL_FAM6_SKYLAKE_X:
894 + case INTEL_FAM6_KABYLAKE_MOBILE:
895 + case INTEL_FAM6_KABYLAKE_DESKTOP:
896 ++ case INTEL_FAM6_ICELAKE_MOBILE:
897 + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
898 + return true;
899 + break;
900 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
901 +index 86b1341cba9a..513ba49c204f 100644
902 +--- a/arch/x86/ia32/ia32_signal.c
903 ++++ b/arch/x86/ia32/ia32_signal.c
904 +@@ -61,9 +61,8 @@
905 + } while (0)
906 +
907 + #define RELOAD_SEG(seg) { \
908 +- unsigned int pre = GET_SEG(seg); \
909 ++ unsigned int pre = (seg) | 3; \
910 + unsigned int cur = get_user_seg(seg); \
911 +- pre |= 3; \
912 + if (pre != cur) \
913 + set_user_seg(seg, pre); \
914 + }
915 +@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
916 + struct sigcontext_32 __user *sc)
917 + {
918 + unsigned int tmpflags, err = 0;
919 ++ u16 gs, fs, es, ds;
920 + void __user *buf;
921 + u32 tmp;
922 +
923 +@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
924 + current->restart_block.fn = do_no_restart_syscall;
925 +
926 + get_user_try {
927 +- /*
928 +- * Reload fs and gs if they have changed in the signal
929 +- * handler. This does not handle long fs/gs base changes in
930 +- * the handler, but does not clobber them at least in the
931 +- * normal case.
932 +- */
933 +- RELOAD_SEG(gs);
934 +- RELOAD_SEG(fs);
935 +- RELOAD_SEG(ds);
936 +- RELOAD_SEG(es);
937 ++ gs = GET_SEG(gs);
938 ++ fs = GET_SEG(fs);
939 ++ ds = GET_SEG(ds);
940 ++ es = GET_SEG(es);
941 +
942 + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
943 + COPY(dx); COPY(cx); COPY(ip); COPY(ax);
944 +@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
945 + buf = compat_ptr(tmp);
946 + } get_user_catch(err);
947 +
948 ++ /*
949 ++ * Reload fs and gs if they have changed in the signal
950 ++ * handler. This does not handle long fs/gs base changes in
951 ++ * the handler, but does not clobber them at least in the
952 ++ * normal case.
953 ++ */
954 ++ RELOAD_SEG(gs);
955 ++ RELOAD_SEG(fs);
956 ++ RELOAD_SEG(ds);
957 ++ RELOAD_SEG(es);
958 ++
959 + err |= fpu__restore_sig(buf, 1);
960 +
961 + force_iret();
962 +diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
963 +index 05861cc08787..0bbb07eaed6b 100644
964 +--- a/arch/x86/include/asm/text-patching.h
965 ++++ b/arch/x86/include/asm/text-patching.h
966 +@@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
967 + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
968 + extern int after_bootmem;
969 +
970 ++#ifndef CONFIG_UML_X86
971 + static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
972 + {
973 + regs->ip = ip;
974 +@@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
975 + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
976 + int3_emulate_jmp(regs, func);
977 + }
978 +-#endif
979 ++#endif /* CONFIG_X86_64 */
980 ++#endif /* !CONFIG_UML_X86 */
981 +
982 + #endif /* _ASM_X86_TEXT_PATCHING_H */
983 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
984 +index b9d5e7c9ef43..918a23704c0c 100644
985 +--- a/arch/x86/kernel/alternative.c
986 ++++ b/arch/x86/kernel/alternative.c
987 +@@ -662,15 +662,29 @@ void __init alternative_instructions(void)
988 + * handlers seeing an inconsistent instruction while you patch.
989 + */
990 + void *__init_or_module text_poke_early(void *addr, const void *opcode,
991 +- size_t len)
992 ++ size_t len)
993 + {
994 + unsigned long flags;
995 +- local_irq_save(flags);
996 +- memcpy(addr, opcode, len);
997 +- local_irq_restore(flags);
998 +- sync_core();
999 +- /* Could also do a CLFLUSH here to speed up CPU recovery; but
1000 +- that causes hangs on some VIA CPUs. */
1001 ++
1002 ++ if (boot_cpu_has(X86_FEATURE_NX) &&
1003 ++ is_module_text_address((unsigned long)addr)) {
1004 ++ /*
1005 ++ * Modules text is marked initially as non-executable, so the
1006 ++ * code cannot be running and speculative code-fetches are
1007 ++ * prevented. Just change the code.
1008 ++ */
1009 ++ memcpy(addr, opcode, len);
1010 ++ } else {
1011 ++ local_irq_save(flags);
1012 ++ memcpy(addr, opcode, len);
1013 ++ local_irq_restore(flags);
1014 ++ sync_core();
1015 ++
1016 ++ /*
1017 ++ * Could also do a CLFLUSH here to speed up CPU recovery; but
1018 ++ * that causes hangs on some VIA CPUs.
1019 ++ */
1020 ++ }
1021 + return addr;
1022 + }
1023 +
1024 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
1025 +index c805a06e14c3..ff1c00b695ae 100644
1026 +--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
1027 ++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
1028 +@@ -46,8 +46,6 @@
1029 + static struct mce i_mce;
1030 + static struct dentry *dfs_inj;
1031 +
1032 +-static u8 n_banks;
1033 +-
1034 + #define MAX_FLAG_OPT_SIZE 4
1035 + #define NBCFG 0x44
1036 +
1037 +@@ -567,9 +565,15 @@ err:
1038 + static int inj_bank_set(void *data, u64 val)
1039 + {
1040 + struct mce *m = (struct mce *)data;
1041 ++ u8 n_banks;
1042 ++ u64 cap;
1043 ++
1044 ++ /* Get bank count on target CPU so we can handle non-uniform values. */
1045 ++ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
1046 ++ n_banks = cap & MCG_BANKCNT_MASK;
1047 +
1048 + if (val >= n_banks) {
1049 +- pr_err("Non-existent MCE bank: %llu\n", val);
1050 ++ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
1051 + return -EINVAL;
1052 + }
1053 +
1054 +@@ -659,10 +663,6 @@ static struct dfs_node {
1055 + static int __init debugfs_init(void)
1056 + {
1057 + unsigned int i;
1058 +- u64 cap;
1059 +-
1060 +- rdmsrl(MSR_IA32_MCG_CAP, cap);
1061 +- n_banks = cap & MCG_BANKCNT_MASK;
1062 +
1063 + dfs_inj = debugfs_create_dir("mce-inject", NULL);
1064 + if (!dfs_inj)
1065 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1066 +index f9e7096b1804..fee118b3b69f 100644
1067 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
1068 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
1069 +@@ -711,19 +711,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
1070 +
1071 + barrier();
1072 + m.status = mce_rdmsrl(msr_ops.status(i));
1073 ++
1074 ++ /* If this entry is not valid, ignore it */
1075 + if (!(m.status & MCI_STATUS_VAL))
1076 + continue;
1077 +
1078 + /*
1079 +- * Uncorrected or signalled events are handled by the exception
1080 +- * handler when it is enabled, so don't process those here.
1081 +- *
1082 +- * TBD do the same check for MCI_STATUS_EN here?
1083 ++ * If we are logging everything (at CPU online) or this
1084 ++ * is a corrected error, then we must log it.
1085 + */
1086 +- if (!(flags & MCP_UC) &&
1087 +- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
1088 +- continue;
1089 ++ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
1090 ++ goto log_it;
1091 ++
1092 ++ /*
1093 ++ * Newer Intel systems that support software error
1094 ++ * recovery need to make additional checks. Other
1095 ++ * CPUs should skip over uncorrected errors, but log
1096 ++ * everything else.
1097 ++ */
1098 ++ if (!mca_cfg.ser) {
1099 ++ if (m.status & MCI_STATUS_UC)
1100 ++ continue;
1101 ++ goto log_it;
1102 ++ }
1103 ++
1104 ++ /* Log "not enabled" (speculative) errors */
1105 ++ if (!(m.status & MCI_STATUS_EN))
1106 ++ goto log_it;
1107 ++
1108 ++ /*
1109 ++ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
1110 ++ * UC == 1 && PCC == 0 && S == 0
1111 ++ */
1112 ++ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
1113 ++ goto log_it;
1114 ++
1115 ++ /*
1116 ++ * Skip anything else. Presumption is that our read of this
1117 ++ * bank is racing with a machine check. Leave the log alone
1118 ++ * for do_machine_check() to deal with it.
1119 ++ */
1120 ++ continue;
1121 +
1122 ++log_it:
1123 + error_seen = true;
1124 +
1125 + mce_read_aux(&m, i);
1126 +@@ -1450,13 +1480,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1127 + static int __mcheck_cpu_mce_banks_init(void)
1128 + {
1129 + int i;
1130 +- u8 num_banks = mca_cfg.banks;
1131 +
1132 +- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
1133 ++ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
1134 + if (!mce_banks)
1135 + return -ENOMEM;
1136 +
1137 +- for (i = 0; i < num_banks; i++) {
1138 ++ for (i = 0; i < MAX_NR_BANKS; i++) {
1139 + struct mce_bank *b = &mce_banks[i];
1140 +
1141 + b->ctl = -1ULL;
1142 +@@ -1470,28 +1499,19 @@ static int __mcheck_cpu_mce_banks_init(void)
1143 + */
1144 + static int __mcheck_cpu_cap_init(void)
1145 + {
1146 +- unsigned b;
1147 + u64 cap;
1148 ++ u8 b;
1149 +
1150 + rdmsrl(MSR_IA32_MCG_CAP, cap);
1151 +
1152 + b = cap & MCG_BANKCNT_MASK;
1153 +- if (!mca_cfg.banks)
1154 +- pr_info("CPU supports %d MCE banks\n", b);
1155 +-
1156 +- if (b > MAX_NR_BANKS) {
1157 +- pr_warn("Using only %u machine check banks out of %u\n",
1158 +- MAX_NR_BANKS, b);
1159 ++ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
1160 + b = MAX_NR_BANKS;
1161 +- }
1162 +
1163 +- /* Don't support asymmetric configurations today */
1164 +- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1165 +- mca_cfg.banks = b;
1166 ++ mca_cfg.banks = max(mca_cfg.banks, b);
1167 +
1168 + if (!mce_banks) {
1169 + int err = __mcheck_cpu_mce_banks_init();
1170 +-
1171 + if (err)
1172 + return err;
1173 + }
1174 +@@ -2473,6 +2493,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
1175 +
1176 + static int __init mcheck_late_init(void)
1177 + {
1178 ++ pr_info("Using %d MCE banks\n", mca_cfg.banks);
1179 ++
1180 + if (mca_cfg.recovery)
1181 + static_branch_inc(&mcsafe_key);
1182 +
1183 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
1184 +index b9bc8a1a584e..b43ddefd77f4 100644
1185 +--- a/arch/x86/kernel/cpu/microcode/core.c
1186 ++++ b/arch/x86/kernel/cpu/microcode/core.c
1187 +@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
1188 + if (ustate == UCODE_ERROR) {
1189 + error = -1;
1190 + break;
1191 +- } else if (ustate == UCODE_OK)
1192 ++ } else if (ustate == UCODE_NEW) {
1193 + apply_microcode_on_target(cpu);
1194 ++ }
1195 + }
1196 +
1197 + return error;
1198 +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
1199 +index 0469cd078db1..b50ac9c7397b 100644
1200 +--- a/arch/x86/kernel/irq_64.c
1201 ++++ b/arch/x86/kernel/irq_64.c
1202 +@@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
1203 + /*
1204 + * Probabilistic stack overflow check:
1205 + *
1206 +- * Only check the stack in process context, because everything else
1207 +- * runs on the big interrupt stacks. Checking reliably is too expensive,
1208 +- * so we just check from interrupts.
1209 ++ * Regular device interrupts can enter on the following stacks:
1210 ++ *
1211 ++ * - User stack
1212 ++ *
1213 ++ * - Kernel task stack
1214 ++ *
1215 ++ * - Interrupt stack if a device driver reenables interrupts
1216 ++ * which should only happen in really old drivers.
1217 ++ *
1218 ++ * - Debug IST stack
1219 ++ *
1220 ++ * All other contexts are invalid.
1221 + */
1222 + static inline void stack_overflow_check(struct pt_regs *regs)
1223 + {
1224 +@@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
1225 + return;
1226 +
1227 + oist = this_cpu_ptr(&orig_ist);
1228 +- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
1229 +- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
1230 ++ estack_bottom = (u64)oist->ist[DEBUG_STACK];
1231 ++ estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
1232 + if (regs->sp >= estack_top && regs->sp <= estack_bottom)
1233 + return;
1234 +
1235 +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
1236 +index f58336af095c..6645f123419c 100644
1237 +--- a/arch/x86/kernel/module.c
1238 ++++ b/arch/x86/kernel/module.c
1239 +@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
1240 + p = __vmalloc_node_range(size, MODULE_ALIGN,
1241 + MODULES_VADDR + get_module_load_offset(),
1242 + MODULES_END, GFP_KERNEL,
1243 +- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
1244 ++ PAGE_KERNEL, 0, NUMA_NO_NODE,
1245 + __builtin_return_address(0));
1246 + if (p && (kasan_module_alloc(p, size) < 0)) {
1247 + vfree(p);
1248 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1249 +index 92a3b312a53c..44e647a65de8 100644
1250 +--- a/arch/x86/kernel/signal.c
1251 ++++ b/arch/x86/kernel/signal.c
1252 +@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
1253 + COPY_SEG_CPL3(cs);
1254 + COPY_SEG_CPL3(ss);
1255 +
1256 +-#ifdef CONFIG_X86_64
1257 +- /*
1258 +- * Fix up SS if needed for the benefit of old DOSEMU and
1259 +- * CRIU.
1260 +- */
1261 +- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
1262 +- user_64bit_mode(regs)))
1263 +- force_valid_ss(regs);
1264 +-#endif
1265 +-
1266 + get_user_ex(tmpflags, &sc->flags);
1267 + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
1268 + regs->orig_ax = -1; /* disable syscall checks */
1269 +@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
1270 + buf = (void __user *)buf_val;
1271 + } get_user_catch(err);
1272 +
1273 ++#ifdef CONFIG_X86_64
1274 ++ /*
1275 ++ * Fix up SS if needed for the benefit of old DOSEMU and
1276 ++ * CRIU.
1277 ++ */
1278 ++ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
1279 ++ force_valid_ss(regs);
1280 ++#endif
1281 ++
1282 + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
1283 +
1284 + force_iret();
1285 +@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1286 + {
1287 + struct rt_sigframe __user *frame;
1288 + void __user *fp = NULL;
1289 ++ unsigned long uc_flags;
1290 + int err = 0;
1291 +
1292 + frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
1293 +@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1294 + return -EFAULT;
1295 + }
1296 +
1297 ++ uc_flags = frame_uc_flags(regs);
1298 ++
1299 + put_user_try {
1300 + /* Create the ucontext. */
1301 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1302 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1303 + put_user_ex(0, &frame->uc.uc_link);
1304 + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1305 +
1306 +@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1307 + {
1308 + #ifdef CONFIG_X86_X32_ABI
1309 + struct rt_sigframe_x32 __user *frame;
1310 ++ unsigned long uc_flags;
1311 + void __user *restorer;
1312 + int err = 0;
1313 + void __user *fpstate = NULL;
1314 +@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1315 + return -EFAULT;
1316 + }
1317 +
1318 ++ uc_flags = frame_uc_flags(regs);
1319 ++
1320 + put_user_try {
1321 + /* Create the ucontext. */
1322 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1323 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1324 + put_user_ex(0, &frame->uc.uc_link);
1325 + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1326 + put_user_ex(0, &frame->uc.uc__pad0);
1327 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1328 +index 85e6d5620188..2fb152d813c1 100644
1329 +--- a/arch/x86/kernel/vmlinux.lds.S
1330 ++++ b/arch/x86/kernel/vmlinux.lds.S
1331 +@@ -151,11 +151,11 @@ SECTIONS
1332 + *(.text.__x86.indirect_thunk)
1333 + __indirect_thunk_end = .;
1334 + #endif
1335 +-
1336 +- /* End of text section */
1337 +- _etext = .;
1338 + } :text = 0x9090
1339 +
1340 ++ /* End of text section */
1341 ++ _etext = .;
1342 ++
1343 + NOTES :text :note
1344 +
1345 + EXCEPTION_TABLE(16) :text = 0x9090
1346 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1347 +index 8dd9208ae4de..ea454d3f7763 100644
1348 +--- a/arch/x86/kvm/svm.c
1349 ++++ b/arch/x86/kvm/svm.c
1350 +@@ -2022,7 +2022,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1351 + if (!kvm_vcpu_apicv_active(vcpu))
1352 + return;
1353 +
1354 +- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1355 ++ /*
1356 ++ * Since the host physical APIC id is 8 bits,
1357 ++ * we can support host APIC ID upto 255.
1358 ++ */
1359 ++ if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1360 + return;
1361 +
1362 + entry = READ_ONCE(*(svm->avic_physical_id_cache));
1363 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1364 +index d0eb37c069b8..be4ba0975a0f 100644
1365 +--- a/arch/x86/kvm/x86.c
1366 ++++ b/arch/x86/kvm/x86.c
1367 +@@ -1188,7 +1188,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1368 + u64 efer = msr_info->data;
1369 +
1370 + if (efer & efer_reserved_bits)
1371 +- return false;
1372 ++ return 1;
1373 +
1374 + if (!msr_info->host_initiated) {
1375 + if (!__kvm_valid_efer(vcpu, efer))
1376 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
1377 +index 3b24dc05251c..9d05572370ed 100644
1378 +--- a/arch/x86/lib/memcpy_64.S
1379 ++++ b/arch/x86/lib/memcpy_64.S
1380 +@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
1381 + /* Copy successful. Return zero */
1382 + .L_done_memcpy_trap:
1383 + xorl %eax, %eax
1384 ++.L_done:
1385 + ret
1386 + ENDPROC(__memcpy_mcsafe)
1387 + EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1388 +@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1389 + addl %edx, %ecx
1390 + .E_trailing_bytes:
1391 + mov %ecx, %eax
1392 +- ret
1393 ++ jmp .L_done
1394 +
1395 + /*
1396 + * For write fault handling, given the destination is unaligned,
1397 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1398 +index 47bebfe6efa7..9d9765e4d1ef 100644
1399 +--- a/arch/x86/mm/fault.c
1400 ++++ b/arch/x86/mm/fault.c
1401 +@@ -427,8 +427,6 @@ static noinline int vmalloc_fault(unsigned long address)
1402 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
1403 + return -1;
1404 +
1405 +- WARN_ON_ONCE(in_nmi());
1406 +-
1407 + /*
1408 + * Copy kernel mappings over when needed. This can also
1409 + * happen within a race in page table update. In the later
1410 +diff --git a/block/genhd.c b/block/genhd.c
1411 +index be5bab20b2ab..2b2a936cf848 100644
1412 +--- a/block/genhd.c
1413 ++++ b/block/genhd.c
1414 +@@ -518,6 +518,18 @@ void blk_free_devt(dev_t devt)
1415 + }
1416 + }
1417 +
1418 ++/**
1419 ++ * We invalidate devt by assigning NULL pointer for devt in idr.
1420 ++ */
1421 ++void blk_invalidate_devt(dev_t devt)
1422 ++{
1423 ++ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1424 ++ spin_lock_bh(&ext_devt_lock);
1425 ++ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
1426 ++ spin_unlock_bh(&ext_devt_lock);
1427 ++ }
1428 ++}
1429 ++
1430 + static char *bdevt_str(dev_t devt, char *buf)
1431 + {
1432 + if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
1433 +@@ -769,6 +781,13 @@ void del_gendisk(struct gendisk *disk)
1434 +
1435 + if (!(disk->flags & GENHD_FL_HIDDEN))
1436 + blk_unregister_region(disk_devt(disk), disk->minors);
1437 ++ /*
1438 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1439 ++ * while RCU period before freeing gendisk is running to prevent
1440 ++ * use-after-free issues. Note that the device number stays
1441 ++ * "in-use" until we really free the gendisk.
1442 ++ */
1443 ++ blk_invalidate_devt(disk_devt(disk));
1444 +
1445 + kobject_put(disk->part0.holder_dir);
1446 + kobject_put(disk->slave_dir);
1447 +diff --git a/block/partition-generic.c b/block/partition-generic.c
1448 +index 5f8db5c5140f..98d60a59b843 100644
1449 +--- a/block/partition-generic.c
1450 ++++ b/block/partition-generic.c
1451 +@@ -289,6 +289,13 @@ void delete_partition(struct gendisk *disk, int partno)
1452 + kobject_put(part->holder_dir);
1453 + device_del(part_to_dev(part));
1454 +
1455 ++ /*
1456 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1457 ++ * while RCU period before freeing gendisk is running to prevent
1458 ++ * use-after-free issues. Note that the device number stays
1459 ++ * "in-use" until we really free the gendisk.
1460 ++ */
1461 ++ blk_invalidate_devt(part_devt(part));
1462 + hd_struct_kill(part);
1463 + }
1464 +
1465 +diff --git a/block/sed-opal.c b/block/sed-opal.c
1466 +index e0de4dd448b3..119640897293 100644
1467 +--- a/block/sed-opal.c
1468 ++++ b/block/sed-opal.c
1469 +@@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
1470 + static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
1471 + struct opal_mbr_data *opal_mbr)
1472 + {
1473 ++ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
1474 ++ OPAL_TRUE : OPAL_FALSE;
1475 ++
1476 + const struct opal_step mbr_steps[] = {
1477 + { opal_discovery0, },
1478 + { start_admin1LSP_opal_session, &opal_mbr->key },
1479 +- { set_mbr_done, &opal_mbr->enable_disable },
1480 ++ { set_mbr_done, &enable_disable },
1481 + { end_opal_session, },
1482 + { start_admin1LSP_opal_session, &opal_mbr->key },
1483 +- { set_mbr_enable_disable, &opal_mbr->enable_disable },
1484 ++ { set_mbr_enable_disable, &enable_disable },
1485 + { end_opal_session, },
1486 + { NULL, }
1487 + };
1488 +@@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
1489 +
1490 + static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
1491 + {
1492 +- u8 mbr_done_tf = 1;
1493 ++ u8 mbr_done_tf = OPAL_TRUE;
1494 + const struct opal_step mbrdone_step [] = {
1495 + { opal_discovery0, },
1496 + { start_admin1LSP_opal_session, key },
1497 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
1498 +index e48eebc27b81..43c2615434b4 100644
1499 +--- a/drivers/acpi/arm64/iort.c
1500 ++++ b/drivers/acpi/arm64/iort.c
1501 +@@ -1231,18 +1231,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1502 + /*
1503 + * set numa proximity domain for smmuv3 device
1504 + */
1505 +-static void __init arm_smmu_v3_set_proximity(struct device *dev,
1506 ++static int __init arm_smmu_v3_set_proximity(struct device *dev,
1507 + struct acpi_iort_node *node)
1508 + {
1509 + struct acpi_iort_smmu_v3 *smmu;
1510 +
1511 + smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1512 + if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1513 +- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1514 ++ int node = acpi_map_pxm_to_node(smmu->pxm);
1515 ++
1516 ++ if (node != NUMA_NO_NODE && !node_online(node))
1517 ++ return -EINVAL;
1518 ++
1519 ++ set_dev_node(dev, node);
1520 + pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1521 + smmu->base_address,
1522 + smmu->pxm);
1523 + }
1524 ++ return 0;
1525 + }
1526 + #else
1527 + #define arm_smmu_v3_set_proximity NULL
1528 +@@ -1317,7 +1323,7 @@ struct iort_dev_config {
1529 + int (*dev_count_resources)(struct acpi_iort_node *node);
1530 + void (*dev_init_resources)(struct resource *res,
1531 + struct acpi_iort_node *node);
1532 +- void (*dev_set_proximity)(struct device *dev,
1533 ++ int (*dev_set_proximity)(struct device *dev,
1534 + struct acpi_iort_node *node);
1535 + };
1536 +
1537 +@@ -1368,8 +1374,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
1538 + if (!pdev)
1539 + return -ENOMEM;
1540 +
1541 +- if (ops->dev_set_proximity)
1542 +- ops->dev_set_proximity(&pdev->dev, node);
1543 ++ if (ops->dev_set_proximity) {
1544 ++ ret = ops->dev_set_proximity(&pdev->dev, node);
1545 ++ if (ret)
1546 ++ goto dev_put;
1547 ++ }
1548 +
1549 + count = ops->dev_count_resources(node);
1550 +
1551 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
1552 +index 693cf05b0cc4..288673cff85e 100644
1553 +--- a/drivers/acpi/property.c
1554 ++++ b/drivers/acpi/property.c
1555 +@@ -975,6 +975,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
1556 + const struct acpi_data_node *data = to_acpi_data_node(fwnode);
1557 + struct acpi_data_node *dn;
1558 +
1559 ++ /*
1560 ++ * We can have a combination of device and data nodes, e.g. with
1561 ++ * hierarchical _DSD properties. Make sure the adev pointer is
1562 ++ * restored before going through data nodes, otherwise we will
1563 ++ * be looking for data_nodes below the last device found instead
1564 ++ * of the common fwnode shared by device_nodes and data_nodes.
1565 ++ */
1566 ++ adev = to_acpi_device_node(fwnode);
1567 + if (adev)
1568 + head = &adev->data.subnodes;
1569 + else if (data)
1570 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
1571 +index a690fd400260..4abd7c6531d9 100644
1572 +--- a/drivers/base/power/main.c
1573 ++++ b/drivers/base/power/main.c
1574 +@@ -1736,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1575 + if (dev->power.syscore)
1576 + goto Complete;
1577 +
1578 ++ /* Avoid direct_complete to let wakeup_path propagate. */
1579 ++ if (device_may_wakeup(dev) || dev->power.wakeup_path)
1580 ++ dev->power.direct_complete = false;
1581 ++
1582 + if (dev->power.direct_complete) {
1583 + if (pm_runtime_status_suspended(dev)) {
1584 + pm_runtime_disable(dev);
1585 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
1586 +index f0d593c3fa72..77004c29da08 100644
1587 +--- a/drivers/bluetooth/hci_qca.c
1588 ++++ b/drivers/bluetooth/hci_qca.c
1589 +@@ -504,6 +504,8 @@ static int qca_open(struct hci_uart *hu)
1590 + qcadev = serdev_device_get_drvdata(hu->serdev);
1591 + if (qcadev->btsoc_type != QCA_WCN3990) {
1592 + gpiod_set_value_cansleep(qcadev->bt_en, 1);
1593 ++ /* Controller needs time to bootup. */
1594 ++ msleep(150);
1595 + } else {
1596 + hu->init_speed = qcadev->init_speed;
1597 + hu->oper_speed = qcadev->oper_speed;
1598 +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
1599 +index b65ff6962899..e9b6ac61fb7f 100644
1600 +--- a/drivers/char/hw_random/omap-rng.c
1601 ++++ b/drivers/char/hw_random/omap-rng.c
1602 +@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
1603 + priv->rng.read = omap_rng_do_read;
1604 + priv->rng.init = omap_rng_init;
1605 + priv->rng.cleanup = omap_rng_cleanup;
1606 ++ priv->rng.quality = 900;
1607 +
1608 + priv->rng.priv = (unsigned long)priv;
1609 + platform_set_drvdata(pdev, priv);
1610 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1611 +index c75b6cdf0053..0a84b7f468ad 100644
1612 +--- a/drivers/char/random.c
1613 ++++ b/drivers/char/random.c
1614 +@@ -778,6 +778,7 @@ static struct crng_state **crng_node_pool __read_mostly;
1615 + #endif
1616 +
1617 + static void invalidate_batched_entropy(void);
1618 ++static void numa_crng_init(void);
1619 +
1620 + static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
1621 + static int __init parse_trust_cpu(char *arg)
1622 +@@ -806,7 +807,9 @@ static void crng_initialize(struct crng_state *crng)
1623 + }
1624 + crng->state[i] ^= rv;
1625 + }
1626 +- if (trust_cpu && arch_init) {
1627 ++ if (trust_cpu && arch_init && crng == &primary_crng) {
1628 ++ invalidate_batched_entropy();
1629 ++ numa_crng_init();
1630 + crng_init = 2;
1631 + pr_notice("random: crng done (trusting CPU's manufacturer)\n");
1632 + }
1633 +@@ -2212,8 +2215,8 @@ struct batched_entropy {
1634 + u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
1635 + };
1636 + unsigned int position;
1637 ++ spinlock_t batch_lock;
1638 + };
1639 +-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
1640 +
1641 + /*
1642 + * Get a random word for internal kernel use only. The quality of the random
1643 +@@ -2223,12 +2226,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
1644 + * wait_for_random_bytes() should be called and return 0 at least once
1645 + * at any point prior.
1646 + */
1647 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
1648 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
1649 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
1650 ++};
1651 ++
1652 + u64 get_random_u64(void)
1653 + {
1654 + u64 ret;
1655 +- bool use_lock;
1656 +- unsigned long flags = 0;
1657 ++ unsigned long flags;
1658 + struct batched_entropy *batch;
1659 + static void *previous;
1660 +
1661 +@@ -2243,28 +2248,25 @@ u64 get_random_u64(void)
1662 +
1663 + warn_unseeded_randomness(&previous);
1664 +
1665 +- use_lock = READ_ONCE(crng_init) < 2;
1666 +- batch = &get_cpu_var(batched_entropy_u64);
1667 +- if (use_lock)
1668 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
1669 ++ batch = raw_cpu_ptr(&batched_entropy_u64);
1670 ++ spin_lock_irqsave(&batch->batch_lock, flags);
1671 + if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
1672 + extract_crng((__u32 *)batch->entropy_u64);
1673 + batch->position = 0;
1674 + }
1675 + ret = batch->entropy_u64[batch->position++];
1676 +- if (use_lock)
1677 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
1678 +- put_cpu_var(batched_entropy_u64);
1679 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
1680 + return ret;
1681 + }
1682 + EXPORT_SYMBOL(get_random_u64);
1683 +
1684 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
1685 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
1686 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
1687 ++};
1688 + u32 get_random_u32(void)
1689 + {
1690 + u32 ret;
1691 +- bool use_lock;
1692 +- unsigned long flags = 0;
1693 ++ unsigned long flags;
1694 + struct batched_entropy *batch;
1695 + static void *previous;
1696 +
1697 +@@ -2273,18 +2275,14 @@ u32 get_random_u32(void)
1698 +
1699 + warn_unseeded_randomness(&previous);
1700 +
1701 +- use_lock = READ_ONCE(crng_init) < 2;
1702 +- batch = &get_cpu_var(batched_entropy_u32);
1703 +- if (use_lock)
1704 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
1705 ++ batch = raw_cpu_ptr(&batched_entropy_u32);
1706 ++ spin_lock_irqsave(&batch->batch_lock, flags);
1707 + if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
1708 + extract_crng(batch->entropy_u32);
1709 + batch->position = 0;
1710 + }
1711 + ret = batch->entropy_u32[batch->position++];
1712 +- if (use_lock)
1713 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
1714 +- put_cpu_var(batched_entropy_u32);
1715 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
1716 + return ret;
1717 + }
1718 + EXPORT_SYMBOL(get_random_u32);
1719 +@@ -2298,12 +2296,19 @@ static void invalidate_batched_entropy(void)
1720 + int cpu;
1721 + unsigned long flags;
1722 +
1723 +- write_lock_irqsave(&batched_entropy_reset_lock, flags);
1724 + for_each_possible_cpu (cpu) {
1725 +- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
1726 +- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
1727 ++ struct batched_entropy *batched_entropy;
1728 ++
1729 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
1730 ++ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
1731 ++ batched_entropy->position = 0;
1732 ++ spin_unlock(&batched_entropy->batch_lock);
1733 ++
1734 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
1735 ++ spin_lock(&batched_entropy->batch_lock);
1736 ++ batched_entropy->position = 0;
1737 ++ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
1738 + }
1739 +- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
1740 + }
1741 +
1742 + /**
1743 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1744 +index 5b5b5d72eab7..c55f6aeb4227 100644
1745 +--- a/drivers/char/virtio_console.c
1746 ++++ b/drivers/char/virtio_console.c
1747 +@@ -75,7 +75,7 @@ struct ports_driver_data {
1748 + /* All the console devices handled by this driver */
1749 + struct list_head consoles;
1750 + };
1751 +-static struct ports_driver_data pdrvdata;
1752 ++static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
1753 +
1754 + static DEFINE_SPINLOCK(pdrvdata_lock);
1755 + static DECLARE_COMPLETION(early_console_added);
1756 +@@ -1405,6 +1405,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1757 + port->async_queue = NULL;
1758 +
1759 + port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1760 ++ port->cons.vtermno = 0;
1761 +
1762 + port->host_connected = port->guest_connected = false;
1763 + port->stats = (struct port_stats) { 0 };
1764 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
1765 +index 450de24a1b42..64191694ff6e 100644
1766 +--- a/drivers/clk/rockchip/clk-rk3288.c
1767 ++++ b/drivers/clk/rockchip/clk-rk3288.c
1768 +@@ -198,7 +198,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
1769 + PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
1770 + PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
1771 +
1772 +-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
1773 ++PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
1774 + PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
1775 + "sclk_otgphy0_480m" };
1776 + PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
1777 +@@ -292,13 +292,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1778 + COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
1779 + RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
1780 + RK3288_CLKGATE_CON(12), 6, GFLAGS),
1781 +- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
1782 ++ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
1783 + RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
1784 + RK3288_CLKGATE_CON(12), 7, GFLAGS),
1785 + COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
1786 + RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
1787 + RK3288_CLKGATE_CON(12), 8, GFLAGS),
1788 +- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
1789 ++ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
1790 + RK3288_CLKGATE_CON(12), 9, GFLAGS),
1791 + GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
1792 + RK3288_CLKGATE_CON(12), 10, GFLAGS),
1793 +@@ -399,7 +399,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1794 + COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
1795 + RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
1796 + RK3288_CLKGATE_CON(3), 11, GFLAGS),
1797 +- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
1798 ++ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
1799 + RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
1800 + GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
1801 + RK3288_CLKGATE_CON(9), 0, GFLAGS),
1802 +@@ -626,7 +626,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1803 + INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
1804 + RK3288_CLKSEL_CON(22), 7, IFLAGS),
1805 +
1806 +- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
1807 ++ GATE(0, "jtag", "ext_jtag", 0,
1808 + RK3288_CLKGATE_CON(4), 14, GFLAGS),
1809 +
1810 + COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
1811 +@@ -635,7 +635,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1812 + COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
1813 + RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
1814 + RK3288_CLKGATE_CON(3), 6, GFLAGS),
1815 +- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
1816 ++ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
1817 + RK3288_CLKGATE_CON(13), 9, GFLAGS),
1818 + DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
1819 + RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
1820 +@@ -676,7 +676,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1821 + GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
1822 + GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
1823 + GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
1824 +- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
1825 ++ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
1826 +
1827 + /* ddrctrl [DDR Controller PHY clock] gates */
1828 + GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
1829 +@@ -816,12 +816,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
1830 + "pclk_alive_niu",
1831 + "pclk_pd_pmu",
1832 + "pclk_pmu_niu",
1833 +- "pclk_core_niu",
1834 +- "pclk_ddrupctl0",
1835 +- "pclk_publ0",
1836 +- "pclk_ddrupctl1",
1837 +- "pclk_publ1",
1838 + "pmu_hclk_otg0",
1839 ++ /* pwm-regulators on some boards, so handoff-critical later */
1840 ++ "pclk_rkpwm",
1841 + };
1842 +
1843 + static void __iomem *rk3288_cru_base;
1844 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1845 +index 505c9a55d555..d3213594d1a7 100644
1846 +--- a/drivers/cpufreq/cpufreq.c
1847 ++++ b/drivers/cpufreq/cpufreq.c
1848 +@@ -1103,6 +1103,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1849 + cpufreq_global_kobject, "policy%u", cpu);
1850 + if (ret) {
1851 + pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1852 ++ kobject_put(&policy->kobj);
1853 + goto err_free_real_cpus;
1854 + }
1855 +
1856 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
1857 +index 6d53f7d9fc7a..69fc5cf4782f 100644
1858 +--- a/drivers/cpufreq/cpufreq_governor.c
1859 ++++ b/drivers/cpufreq/cpufreq_governor.c
1860 +@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
1861 + /* Failure, so roll back. */
1862 + pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
1863 +
1864 ++ kobject_put(&dbs_data->attr_set.kobj);
1865 ++
1866 + policy->governor_data = NULL;
1867 +
1868 + if (!have_governor_per_policy())
1869 +diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
1870 +index c2dd43f3f5d8..8d63a6dc8383 100644
1871 +--- a/drivers/cpufreq/kirkwood-cpufreq.c
1872 ++++ b/drivers/cpufreq/kirkwood-cpufreq.c
1873 +@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
1874 + priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
1875 + if (IS_ERR(priv.cpu_clk)) {
1876 + dev_err(priv.dev, "Unable to get cpuclk\n");
1877 +- return PTR_ERR(priv.cpu_clk);
1878 ++ err = PTR_ERR(priv.cpu_clk);
1879 ++ goto out_node;
1880 + }
1881 +
1882 + err = clk_prepare_enable(priv.cpu_clk);
1883 + if (err) {
1884 + dev_err(priv.dev, "Unable to prepare cpuclk\n");
1885 +- return err;
1886 ++ goto out_node;
1887 + }
1888 +
1889 + kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
1890 +@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
1891 + goto out_ddr;
1892 + }
1893 +
1894 +- of_node_put(np);
1895 +- np = NULL;
1896 +-
1897 + err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
1898 +- if (!err)
1899 +- return 0;
1900 ++ if (err) {
1901 ++ dev_err(priv.dev, "Failed to register cpufreq driver\n");
1902 ++ goto out_powersave;
1903 ++ }
1904 +
1905 +- dev_err(priv.dev, "Failed to register cpufreq driver\n");
1906 ++ of_node_put(np);
1907 ++ return 0;
1908 +
1909 ++out_powersave:
1910 + clk_disable_unprepare(priv.powersave_clk);
1911 + out_ddr:
1912 + clk_disable_unprepare(priv.ddr_clk);
1913 + out_cpu:
1914 + clk_disable_unprepare(priv.cpu_clk);
1915 ++out_node:
1916 + of_node_put(np);
1917 +
1918 + return err;
1919 +diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
1920 +index 75dfbd2a58ea..c7710c149de8 100644
1921 +--- a/drivers/cpufreq/pasemi-cpufreq.c
1922 ++++ b/drivers/cpufreq/pasemi-cpufreq.c
1923 +@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
1924 +
1925 + cpu = of_get_cpu_node(policy->cpu, NULL);
1926 +
1927 ++ of_node_put(cpu);
1928 + if (!cpu)
1929 + goto out;
1930 +
1931 +diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
1932 +index 61ae06ca008e..e225edb5c359 100644
1933 +--- a/drivers/cpufreq/pmac32-cpufreq.c
1934 ++++ b/drivers/cpufreq/pmac32-cpufreq.c
1935 +@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
1936 + volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
1937 + if (volt_gpio_np)
1938 + voltage_gpio = read_gpio(volt_gpio_np);
1939 ++ of_node_put(volt_gpio_np);
1940 + if (!voltage_gpio){
1941 + pr_err("missing cpu-vcore-select gpio\n");
1942 + return 1;
1943 +@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
1944 + if (volt_gpio_np)
1945 + voltage_gpio = read_gpio(volt_gpio_np);
1946 +
1947 ++ of_node_put(volt_gpio_np);
1948 + pvr = mfspr(SPRN_PVR);
1949 + has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
1950 +
1951 +diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
1952 +index 41a0f0be3f9f..8414c3a4ea08 100644
1953 +--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
1954 ++++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
1955 +@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
1956 + if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
1957 + !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
1958 + pr_info("invalid CBE regs pointers for cpufreq\n");
1959 ++ of_node_put(cpu);
1960 + return -EINVAL;
1961 + }
1962 +
1963 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
1964 +index a4b5ff2b72f8..f6936bb3b7be 100644
1965 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
1966 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
1967 +@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
1968 + }
1969 + } else {
1970 + /* Since we have the flag final, we can go up to modulo 4 */
1971 +- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
1972 ++ if (areq->nbytes < 4)
1973 ++ end = 0;
1974 ++ else
1975 ++ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
1976 + }
1977 +
1978 + /* TODO if SGlen % 4 and !op->len then DMA */
1979 +diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
1980 +index de78282b8f44..9c6b5c1d6a1a 100644
1981 +--- a/drivers/crypto/vmx/aesp8-ppc.pl
1982 ++++ b/drivers/crypto/vmx/aesp8-ppc.pl
1983 +@@ -1357,7 +1357,7 @@ Loop_ctr32_enc:
1984 + addi $idx,$idx,16
1985 + bdnz Loop_ctr32_enc
1986 +
1987 +- vadduwm $ivec,$ivec,$one
1988 ++ vadduqm $ivec,$ivec,$one
1989 + vmr $dat,$inptail
1990 + lvx $inptail,0,$inp
1991 + addi $inp,$inp,16
1992 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1993 +index a75b95fac3bd..db5b8fe1dd4a 100644
1994 +--- a/drivers/dma/at_xdmac.c
1995 ++++ b/drivers/dma/at_xdmac.c
1996 +@@ -1606,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data)
1997 + struct at_xdmac_desc,
1998 + xfer_node);
1999 + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
2000 +- BUG_ON(!desc->active_xfer);
2001 ++ if (!desc->active_xfer) {
2002 ++ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
2003 ++ spin_unlock_bh(&atchan->lock);
2004 ++ return;
2005 ++ }
2006 +
2007 + txd = &desc->tx_dma_desc;
2008 +
2009 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
2010 +index 88750a34e859..bc8050c025b7 100644
2011 +--- a/drivers/dma/pl330.c
2012 ++++ b/drivers/dma/pl330.c
2013 +@@ -961,6 +961,7 @@ static void _stop(struct pl330_thread *thrd)
2014 + {
2015 + void __iomem *regs = thrd->dmac->base;
2016 + u8 insn[6] = {0, 0, 0, 0, 0, 0};
2017 ++ u32 inten = readl(regs + INTEN);
2018 +
2019 + if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
2020 + UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
2021 +@@ -973,10 +974,13 @@ static void _stop(struct pl330_thread *thrd)
2022 +
2023 + _emit_KILL(0, insn);
2024 +
2025 +- /* Stop generating interrupts for SEV */
2026 +- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
2027 +-
2028 + _execute_DBGINSN(thrd, insn, is_manager(thrd));
2029 ++
2030 ++ /* clear the event */
2031 ++ if (inten & (1 << thrd->ev))
2032 ++ writel(1 << thrd->ev, regs + INTCLR);
2033 ++ /* Stop generating interrupts for SEV */
2034 ++ writel(inten & ~(1 << thrd->ev), regs + INTEN);
2035 + }
2036 +
2037 + /* Start doing req 'idx' of thread 'thrd' */
2038 +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
2039 +index b26256f23d67..09b6756366c3 100644
2040 +--- a/drivers/dma/tegra210-adma.c
2041 ++++ b/drivers/dma/tegra210-adma.c
2042 +@@ -22,7 +22,6 @@
2043 + #include <linux/of_device.h>
2044 + #include <linux/of_dma.h>
2045 + #include <linux/of_irq.h>
2046 +-#include <linux/pm_clock.h>
2047 + #include <linux/pm_runtime.h>
2048 + #include <linux/slab.h>
2049 +
2050 +@@ -141,6 +140,7 @@ struct tegra_adma {
2051 + struct dma_device dma_dev;
2052 + struct device *dev;
2053 + void __iomem *base_addr;
2054 ++ struct clk *ahub_clk;
2055 + unsigned int nr_channels;
2056 + unsigned long rx_requests_reserved;
2057 + unsigned long tx_requests_reserved;
2058 +@@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev)
2059 + struct tegra_adma *tdma = dev_get_drvdata(dev);
2060 +
2061 + tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
2062 ++ clk_disable_unprepare(tdma->ahub_clk);
2063 +
2064 +- return pm_clk_suspend(dev);
2065 ++ return 0;
2066 + }
2067 +
2068 + static int tegra_adma_runtime_resume(struct device *dev)
2069 +@@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev)
2070 + struct tegra_adma *tdma = dev_get_drvdata(dev);
2071 + int ret;
2072 +
2073 +- ret = pm_clk_resume(dev);
2074 +- if (ret)
2075 ++ ret = clk_prepare_enable(tdma->ahub_clk);
2076 ++ if (ret) {
2077 ++ dev_err(dev, "ahub clk_enable failed: %d\n", ret);
2078 + return ret;
2079 +-
2080 ++ }
2081 + tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
2082 +
2083 + return 0;
2084 +@@ -692,13 +694,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
2085 + if (IS_ERR(tdma->base_addr))
2086 + return PTR_ERR(tdma->base_addr);
2087 +
2088 +- ret = pm_clk_create(&pdev->dev);
2089 +- if (ret)
2090 +- return ret;
2091 +-
2092 +- ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
2093 +- if (ret)
2094 +- goto clk_destroy;
2095 ++ tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
2096 ++ if (IS_ERR(tdma->ahub_clk)) {
2097 ++ dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
2098 ++ return PTR_ERR(tdma->ahub_clk);
2099 ++ }
2100 +
2101 + pm_runtime_enable(&pdev->dev);
2102 +
2103 +@@ -775,8 +775,6 @@ rpm_put:
2104 + pm_runtime_put_sync(&pdev->dev);
2105 + rpm_disable:
2106 + pm_runtime_disable(&pdev->dev);
2107 +-clk_destroy:
2108 +- pm_clk_destroy(&pdev->dev);
2109 +
2110 + return ret;
2111 + }
2112 +@@ -786,6 +784,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
2113 + struct tegra_adma *tdma = platform_get_drvdata(pdev);
2114 + int i;
2115 +
2116 ++ of_dma_controller_free(pdev->dev.of_node);
2117 + dma_async_device_unregister(&tdma->dma_dev);
2118 +
2119 + for (i = 0; i < tdma->nr_channels; ++i)
2120 +@@ -793,7 +792,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
2121 +
2122 + pm_runtime_put_sync(&pdev->dev);
2123 + pm_runtime_disable(&pdev->dev);
2124 +- pm_clk_destroy(&pdev->dev);
2125 +
2126 + return 0;
2127 + }
2128 +diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
2129 +index da0e9bc4262f..9327479c719c 100644
2130 +--- a/drivers/extcon/extcon-arizona.c
2131 ++++ b/drivers/extcon/extcon-arizona.c
2132 +@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
2133 + struct arizona_extcon_info *info = platform_get_drvdata(pdev);
2134 + struct arizona *arizona = info->arizona;
2135 + int jack_irq_rise, jack_irq_fall;
2136 ++ bool change;
2137 ++
2138 ++ regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
2139 ++ ARIZONA_MICD_ENA, 0,
2140 ++ &change);
2141 ++
2142 ++ if (change) {
2143 ++ regulator_disable(info->micvdd);
2144 ++ pm_runtime_put(info->dev);
2145 ++ }
2146 +
2147 + gpiod_put(info->micd_pol_gpio);
2148 +
2149 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2150 +index 7056925eb386..869ff624b108 100644
2151 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2152 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2153 +@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
2154 + {
2155 + struct amdgpu_device *adev = ring->adev;
2156 + struct amdgpu_fence *fence;
2157 +- struct dma_fence *old, **ptr;
2158 ++ struct dma_fence __rcu **ptr;
2159 + uint32_t seq;
2160 ++ int r;
2161 +
2162 + fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
2163 + if (fence == NULL)
2164 +@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
2165 + seq, flags | AMDGPU_FENCE_FLAG_INT);
2166 +
2167 + ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
2168 ++ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
2169 ++ struct dma_fence *old;
2170 ++
2171 ++ rcu_read_lock();
2172 ++ old = dma_fence_get_rcu_safe(ptr);
2173 ++ rcu_read_unlock();
2174 ++
2175 ++ if (old) {
2176 ++ r = dma_fence_wait(old, false);
2177 ++ dma_fence_put(old);
2178 ++ if (r)
2179 ++ return r;
2180 ++ }
2181 ++ }
2182 ++
2183 + /* This function can't be called concurrently anyway, otherwise
2184 + * emitting the fence would mess up the hardware ring buffer.
2185 + */
2186 +- old = rcu_dereference_protected(*ptr, 1);
2187 +- if (old && !dma_fence_is_signaled(old)) {
2188 +- DRM_INFO("rcu slot is busy\n");
2189 +- dma_fence_wait(old, false);
2190 +- }
2191 +-
2192 + rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
2193 +
2194 + *f = &fence->base;
2195 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2196 +index 76ee2de43ea6..dac7978f5ee1 100644
2197 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2198 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2199 +@@ -4369,8 +4369,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
2200 + static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
2201 + struct dc_stream_state *stream_state)
2202 + {
2203 +- stream_state->mode_changed =
2204 +- crtc_state->mode_changed || crtc_state->active_changed;
2205 ++ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
2206 + }
2207 +
2208 + static int amdgpu_dm_atomic_commit(struct drm_device *dev,
2209 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2210 +index 87bf422f16be..e0a96abb3c46 100644
2211 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2212 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2213 +@@ -1401,10 +1401,12 @@ bool dc_remove_plane_from_context(
2214 + * For head pipe detach surfaces from pipe for tail
2215 + * pipe just zero it out
2216 + */
2217 +- if (!pipe_ctx->top_pipe) {
2218 ++ if (!pipe_ctx->top_pipe ||
2219 ++ (!pipe_ctx->top_pipe->top_pipe &&
2220 ++ pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) {
2221 + pipe_ctx->plane_state = NULL;
2222 + pipe_ctx->bottom_pipe = NULL;
2223 +- } else {
2224 ++ } else {
2225 + memset(pipe_ctx, 0, sizeof(*pipe_ctx));
2226 + }
2227 + }
2228 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
2229 +index 4a863a5dab41..321af9af95e8 100644
2230 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
2231 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
2232 +@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
2233 + int *num_part_y,
2234 + int *num_part_c)
2235 + {
2236 ++ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
2237 ++ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
2238 ++
2239 + int line_size = scl_data->viewport.width < scl_data->recout.width ?
2240 + scl_data->viewport.width : scl_data->recout.width;
2241 + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
2242 + scl_data->viewport_c.width : scl_data->recout.width;
2243 +- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
2244 +- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
2245 +- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
2246 +- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
2247 +- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
2248 ++
2249 ++ if (line_size == 0)
2250 ++ line_size = 1;
2251 ++
2252 ++ if (line_size_c == 0)
2253 ++ line_size_c = 1;
2254 ++
2255 ++
2256 ++ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
2257 ++ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
2258 ++ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
2259 ++ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
2260 +
2261 + if (lb_config == LB_MEMORY_CONFIG_1) {
2262 + lb_memory_size = 816;
2263 +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
2264 +index 0201ccb22f4c..d8ae4ca129c7 100644
2265 +--- a/drivers/gpu/drm/drm_drv.c
2266 ++++ b/drivers/gpu/drm/drm_drv.c
2267 +@@ -499,7 +499,7 @@ int drm_dev_init(struct drm_device *dev,
2268 + }
2269 +
2270 + kref_init(&dev->ref);
2271 +- dev->dev = parent;
2272 ++ dev->dev = get_device(parent);
2273 + dev->driver = driver;
2274 +
2275 + INIT_LIST_HEAD(&dev->filelist);
2276 +@@ -568,6 +568,7 @@ err_minors:
2277 + drm_minor_free(dev, DRM_MINOR_RENDER);
2278 + drm_fs_inode_free(dev->anon_inode);
2279 + err_free:
2280 ++ put_device(dev->dev);
2281 + mutex_destroy(&dev->master_mutex);
2282 + mutex_destroy(&dev->ctxlist_mutex);
2283 + mutex_destroy(&dev->clientlist_mutex);
2284 +@@ -603,6 +604,8 @@ void drm_dev_fini(struct drm_device *dev)
2285 + drm_minor_free(dev, DRM_MINOR_PRIMARY);
2286 + drm_minor_free(dev, DRM_MINOR_RENDER);
2287 +
2288 ++ put_device(dev->dev);
2289 ++
2290 + mutex_destroy(&dev->master_mutex);
2291 + mutex_destroy(&dev->ctxlist_mutex);
2292 + mutex_destroy(&dev->clientlist_mutex);
2293 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
2294 +index e4ccb52c67ea..334addaca9c5 100644
2295 +--- a/drivers/gpu/drm/drm_file.c
2296 ++++ b/drivers/gpu/drm/drm_file.c
2297 +@@ -567,6 +567,7 @@ put_back_event:
2298 + file_priv->event_space -= length;
2299 + list_add(&e->link, &file_priv->event_list);
2300 + spin_unlock_irq(&dev->event_lock);
2301 ++ wake_up_interruptible(&file_priv->event_wait);
2302 + break;
2303 + }
2304 +
2305 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
2306 +index 83c1f46670bf..00675fcbffa2 100644
2307 +--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
2308 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
2309 +@@ -527,6 +527,9 @@ static int etnaviv_bind(struct device *dev)
2310 + }
2311 + drm->dev_private = priv;
2312 +
2313 ++ dev->dma_parms = &priv->dma_parms;
2314 ++ dma_set_max_seg_size(dev, SZ_2G);
2315 ++
2316 + mutex_init(&priv->gem_lock);
2317 + INIT_LIST_HEAD(&priv->gem_list);
2318 + priv->num_gpus = 0;
2319 +@@ -564,6 +567,8 @@ static void etnaviv_unbind(struct device *dev)
2320 +
2321 + component_unbind_all(dev, drm);
2322 +
2323 ++ dev->dma_parms = NULL;
2324 ++
2325 + drm->dev_private = NULL;
2326 + kfree(priv);
2327 +
2328 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
2329 +index 8d02d1b7dcf5..b2930d1fe97c 100644
2330 +--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
2331 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
2332 +@@ -43,6 +43,7 @@ struct etnaviv_file_private {
2333 +
2334 + struct etnaviv_drm_private {
2335 + int num_gpus;
2336 ++ struct device_dma_parameters dma_parms;
2337 + struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
2338 +
2339 + /* list of GEM objects: */
2340 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2341 +index ab1d9308c311..ba6f3c14495c 100644
2342 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2343 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2344 +@@ -35,7 +35,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
2345 + {
2346 + struct device *dev = &gpu->pdev->dev;
2347 + const struct firmware *fw;
2348 +- struct device_node *np;
2349 ++ struct device_node *np, *mem_np;
2350 + struct resource r;
2351 + phys_addr_t mem_phys;
2352 + ssize_t mem_size;
2353 +@@ -49,11 +49,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
2354 + if (!np)
2355 + return -ENODEV;
2356 +
2357 +- np = of_parse_phandle(np, "memory-region", 0);
2358 +- if (!np)
2359 ++ mem_np = of_parse_phandle(np, "memory-region", 0);
2360 ++ of_node_put(np);
2361 ++ if (!mem_np)
2362 + return -EINVAL;
2363 +
2364 +- ret = of_address_to_resource(np, 0, &r);
2365 ++ ret = of_address_to_resource(mem_np, 0, &r);
2366 ++ of_node_put(mem_np);
2367 + if (ret)
2368 + return ret;
2369 +
2370 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
2371 +index 157b076a1272..38c9c086754b 100644
2372 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
2373 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
2374 +@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
2375 + struct nvkm_device *device = bar->base.subdev.device;
2376 + static struct lock_class_key bar1_lock;
2377 + static struct lock_class_key bar2_lock;
2378 +- u64 start, limit;
2379 ++ u64 start, limit, size;
2380 + int ret;
2381 +
2382 + ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
2383 +@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
2384 +
2385 + /* BAR2 */
2386 + start = 0x0100000000ULL;
2387 +- limit = start + device->func->resource_size(device, 3);
2388 ++ size = device->func->resource_size(device, 3);
2389 ++ if (!size)
2390 ++ return -ENOMEM;
2391 ++ limit = start + size;
2392 +
2393 + ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
2394 + &bar2_lock, "bar2", &bar->bar2_vmm);
2395 +@@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
2396 +
2397 + /* BAR1 */
2398 + start = 0x0000000000ULL;
2399 +- limit = start + device->func->resource_size(device, 1);
2400 ++ size = device->func->resource_size(device, 1);
2401 ++ if (!size)
2402 ++ return -ENOMEM;
2403 ++ limit = start + size;
2404 +
2405 + ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
2406 + &bar1_lock, "bar1", &bar->bar1_vmm);
2407 +diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
2408 +index 74467b308721..8160954ebc25 100644
2409 +--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
2410 ++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
2411 +@@ -1386,12 +1386,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
2412 + */
2413 + dsi_enable_scp_clk(dsi);
2414 +
2415 +- if (!dsi->vdds_dsi_enabled) {
2416 +- r = regulator_enable(dsi->vdds_dsi_reg);
2417 +- if (r)
2418 +- goto err0;
2419 +- dsi->vdds_dsi_enabled = true;
2420 +- }
2421 ++ r = regulator_enable(dsi->vdds_dsi_reg);
2422 ++ if (r)
2423 ++ goto err0;
2424 +
2425 + /* XXX PLL does not come out of reset without this... */
2426 + dispc_pck_free_enable(dsi->dss->dispc, 1);
2427 +@@ -1416,36 +1413,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
2428 +
2429 + return 0;
2430 + err1:
2431 +- if (dsi->vdds_dsi_enabled) {
2432 +- regulator_disable(dsi->vdds_dsi_reg);
2433 +- dsi->vdds_dsi_enabled = false;
2434 +- }
2435 ++ regulator_disable(dsi->vdds_dsi_reg);
2436 + err0:
2437 + dsi_disable_scp_clk(dsi);
2438 + dsi_runtime_put(dsi);
2439 + return r;
2440 + }
2441 +
2442 +-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
2443 ++static void dsi_pll_disable(struct dss_pll *pll)
2444 + {
2445 ++ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
2446 ++
2447 + dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
2448 +- if (disconnect_lanes) {
2449 +- WARN_ON(!dsi->vdds_dsi_enabled);
2450 +- regulator_disable(dsi->vdds_dsi_reg);
2451 +- dsi->vdds_dsi_enabled = false;
2452 +- }
2453 ++
2454 ++ regulator_disable(dsi->vdds_dsi_reg);
2455 +
2456 + dsi_disable_scp_clk(dsi);
2457 + dsi_runtime_put(dsi);
2458 +
2459 +- DSSDBG("PLL uninit done\n");
2460 +-}
2461 +-
2462 +-static void dsi_pll_disable(struct dss_pll *pll)
2463 +-{
2464 +- struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
2465 +-
2466 +- dsi_pll_uninit(dsi, true);
2467 ++ DSSDBG("PLL disable done\n");
2468 + }
2469 +
2470 + static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
2471 +@@ -4195,11 +4181,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
2472 +
2473 + r = dss_pll_enable(&dsi->pll);
2474 + if (r)
2475 +- goto err0;
2476 ++ return r;
2477 +
2478 + r = dsi_configure_dsi_clocks(dsi);
2479 + if (r)
2480 +- goto err1;
2481 ++ goto err0;
2482 +
2483 + dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
2484 + dsi->module_id == 0 ?
2485 +@@ -4207,6 +4193,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
2486 +
2487 + DSSDBG("PLL OK\n");
2488 +
2489 ++ if (!dsi->vdds_dsi_enabled) {
2490 ++ r = regulator_enable(dsi->vdds_dsi_reg);
2491 ++ if (r)
2492 ++ goto err1;
2493 ++
2494 ++ dsi->vdds_dsi_enabled = true;
2495 ++ }
2496 ++
2497 + r = dsi_cio_init(dsi);
2498 + if (r)
2499 + goto err2;
2500 +@@ -4235,10 +4229,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
2501 + err3:
2502 + dsi_cio_uninit(dsi);
2503 + err2:
2504 +- dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
2505 ++ regulator_disable(dsi->vdds_dsi_reg);
2506 ++ dsi->vdds_dsi_enabled = false;
2507 + err1:
2508 +- dss_pll_disable(&dsi->pll);
2509 ++ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
2510 + err0:
2511 ++ dss_pll_disable(&dsi->pll);
2512 ++
2513 + return r;
2514 + }
2515 +
2516 +@@ -4257,7 +4254,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
2517 +
2518 + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
2519 + dsi_cio_uninit(dsi);
2520 +- dsi_pll_uninit(dsi, disconnect_lanes);
2521 ++ dss_pll_disable(&dsi->pll);
2522 ++
2523 ++ if (disconnect_lanes) {
2524 ++ regulator_disable(dsi->vdds_dsi_reg);
2525 ++ dsi->vdds_dsi_enabled = false;
2526 ++ }
2527 + }
2528 +
2529 + static int dsi_display_enable(struct omap_dss_device *dssdev)
2530 +diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
2531 +index 87fa316e1d7b..58ccf648b70f 100644
2532 +--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
2533 ++++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
2534 +@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
2535 + /* Send Command GRAM memory write (no parameters) */
2536 + dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
2537 +
2538 ++ /* Wait a short while to let the panel be ready before the 1st frame */
2539 ++ mdelay(10);
2540 ++
2541 + return 0;
2542 + }
2543 +
2544 +diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
2545 +index b9baefdba38a..1c318ad32a8c 100644
2546 +--- a/drivers/gpu/drm/pl111/pl111_versatile.c
2547 ++++ b/drivers/gpu/drm/pl111/pl111_versatile.c
2548 +@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
2549 + ret = vexpress_muxfpga_init();
2550 + if (ret) {
2551 + dev_err(dev, "unable to initialize muxfpga driver\n");
2552 ++ of_node_put(np);
2553 + return ret;
2554 + }
2555 +
2556 +@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
2557 + pdev = of_find_device_b