Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 05 Dec 2017 11:37:51
Message-Id: 1512473859.1bb602ad6adb38293ba3d45967d7d636fdd71698.mpagano@gentoo
1 commit: 1bb602ad6adb38293ba3d45967d7d636fdd71698
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Dec 5 11:37:39 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Dec 5 11:37:39 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1bb602ad
7
8 Linux patch 4.14.4
9
10 0000_README | 4 +
11 1003_linux-4.14.4.patch | 3494 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3498 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 9aaf65a..3b5d05b 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -55,6 +55,10 @@ Patch: 1002_linux-4.14.3.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.3
21
22 +Patch: 1003_linux-4.14.4.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.4
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1003_linux-4.14.4.patch b/1003_linux-4.14.4.patch
31 new file mode 100644
32 index 0000000..c43b800
33 --- /dev/null
34 +++ b/1003_linux-4.14.4.patch
35 @@ -0,0 +1,3494 @@
36 +diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt
37 +index 07a250498fbb..f569db58f64a 100644
38 +--- a/Documentation/devicetree/bindings/hwmon/jc42.txt
39 ++++ b/Documentation/devicetree/bindings/hwmon/jc42.txt
40 +@@ -34,6 +34,10 @@ Required properties:
41 +
42 + - reg: I2C address
43 +
44 ++Optional properties:
45 ++- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
46 ++ This is not supported on all chips.
47 ++
48 + Example:
49 +
50 + temp-sensor@1a {
51 +diff --git a/Makefile b/Makefile
52 +index ede4de0d8634..ba1648c093fe 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 4
58 + PATCHLEVEL = 14
59 +-SUBLEVEL = 3
60 ++SUBLEVEL = 4
61 + EXTRAVERSION =
62 + NAME = Petit Gorille
63 +
64 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
65 +index 939b310913cf..3eb4397150df 100644
66 +--- a/arch/arm64/Makefile
67 ++++ b/arch/arm64/Makefile
68 +@@ -77,9 +77,6 @@ endif
69 +
70 + ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
71 + KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
72 +-ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
73 +-KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o
74 +-endif
75 + endif
76 +
77 + # Default value
78 +diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
79 +index 19bd97671bb8..4f766178fa6f 100644
80 +--- a/arch/arm64/include/asm/module.h
81 ++++ b/arch/arm64/include/asm/module.h
82 +@@ -32,7 +32,7 @@ struct mod_arch_specific {
83 + struct mod_plt_sec init;
84 +
85 + /* for CONFIG_DYNAMIC_FTRACE */
86 +- void *ftrace_trampoline;
87 ++ struct plt_entry *ftrace_trampoline;
88 + };
89 + #endif
90 +
91 +@@ -45,4 +45,48 @@ extern u64 module_alloc_base;
92 + #define module_alloc_base ((u64)_etext - MODULES_VSIZE)
93 + #endif
94 +
95 ++struct plt_entry {
96 ++ /*
97 ++ * A program that conforms to the AArch64 Procedure Call Standard
98 ++ * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
99 ++ * IP1 (x17) may be inserted at any branch instruction that is
100 ++ * exposed to a relocation that supports long branches. Since that
101 ++ * is exactly what we are dealing with here, we are free to use x16
102 ++ * as a scratch register in the PLT veneers.
103 ++ */
104 ++ __le32 mov0; /* movn x16, #0x.... */
105 ++ __le32 mov1; /* movk x16, #0x...., lsl #16 */
106 ++ __le32 mov2; /* movk x16, #0x...., lsl #32 */
107 ++ __le32 br; /* br x16 */
108 ++};
109 ++
110 ++static inline struct plt_entry get_plt_entry(u64 val)
111 ++{
112 ++ /*
113 ++ * MOVK/MOVN/MOVZ opcode:
114 ++ * +--------+------------+--------+-----------+-------------+---------+
115 ++ * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
116 ++ * +--------+------------+--------+-----------+-------------+---------+
117 ++ *
118 ++ * Rd := 0x10 (x16)
119 ++ * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
120 ++ * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
121 ++ * sf := 1 (64-bit variant)
122 ++ */
123 ++ return (struct plt_entry){
124 ++ cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
125 ++ cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
126 ++ cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
127 ++ cpu_to_le32(0xd61f0200)
128 ++ };
129 ++}
130 ++
131 ++static inline bool plt_entries_equal(const struct plt_entry *a,
132 ++ const struct plt_entry *b)
133 ++{
134 ++ return a->mov0 == b->mov0 &&
135 ++ a->mov1 == b->mov1 &&
136 ++ a->mov2 == b->mov2;
137 ++}
138 ++
139 + #endif /* __ASM_MODULE_H */
140 +diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
141 +index 0029e13adb59..2f5ff2a65db3 100644
142 +--- a/arch/arm64/kernel/Makefile
143 ++++ b/arch/arm64/kernel/Makefile
144 +@@ -63,6 +63,3 @@ extra-y += $(head-y) vmlinux.lds
145 + ifeq ($(CONFIG_DEBUG_EFI),y)
146 + AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
147 + endif
148 +-
149 +-# will be included by each individual module but not by the core kernel itself
150 +-extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
151 +diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S
152 +deleted file mode 100644
153 +index 00c4025be4ff..000000000000
154 +--- a/arch/arm64/kernel/ftrace-mod.S
155 ++++ /dev/null
156 +@@ -1,18 +0,0 @@
157 +-/*
158 +- * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@××××××.org>
159 +- *
160 +- * This program is free software; you can redistribute it and/or modify
161 +- * it under the terms of the GNU General Public License version 2 as
162 +- * published by the Free Software Foundation.
163 +- */
164 +-
165 +-#include <linux/linkage.h>
166 +-#include <asm/assembler.h>
167 +-
168 +- .section ".text.ftrace_trampoline", "ax"
169 +- .align 3
170 +-0: .quad 0
171 +-__ftrace_trampoline:
172 +- ldr x16, 0b
173 +- br x16
174 +-ENDPROC(__ftrace_trampoline)
175 +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
176 +index c13b1fca0e5b..50986e388d2b 100644
177 +--- a/arch/arm64/kernel/ftrace.c
178 ++++ b/arch/arm64/kernel/ftrace.c
179 +@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
180 +
181 + if (offset < -SZ_128M || offset >= SZ_128M) {
182 + #ifdef CONFIG_ARM64_MODULE_PLTS
183 +- unsigned long *trampoline;
184 ++ struct plt_entry trampoline;
185 + struct module *mod;
186 +
187 + /*
188 +@@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
189 + * is added in the future, but for now, the pr_err() below
190 + * deals with a theoretical issue only.
191 + */
192 +- trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
193 +- if (trampoline[0] != addr) {
194 +- if (trampoline[0] != 0) {
195 ++ trampoline = get_plt_entry(addr);
196 ++ if (!plt_entries_equal(mod->arch.ftrace_trampoline,
197 ++ &trampoline)) {
198 ++ if (!plt_entries_equal(mod->arch.ftrace_trampoline,
199 ++ &(struct plt_entry){})) {
200 + pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
201 + return -EINVAL;
202 + }
203 +
204 + /* point the trampoline to our ftrace entry point */
205 + module_disable_ro(mod);
206 +- trampoline[0] = addr;
207 ++ *mod->arch.ftrace_trampoline = trampoline;
208 + module_enable_ro(mod, true);
209 +
210 + /* update trampoline before patching in the branch */
211 + smp_wmb();
212 + }
213 +- addr = (unsigned long)&trampoline[1];
214 ++ addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
215 + #else /* CONFIG_ARM64_MODULE_PLTS */
216 + return -EINVAL;
217 + #endif /* CONFIG_ARM64_MODULE_PLTS */
218 +diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
219 +index d05dbe658409..ea640f92fe5a 100644
220 +--- a/arch/arm64/kernel/module-plts.c
221 ++++ b/arch/arm64/kernel/module-plts.c
222 +@@ -11,21 +11,6 @@
223 + #include <linux/module.h>
224 + #include <linux/sort.h>
225 +
226 +-struct plt_entry {
227 +- /*
228 +- * A program that conforms to the AArch64 Procedure Call Standard
229 +- * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
230 +- * IP1 (x17) may be inserted at any branch instruction that is
231 +- * exposed to a relocation that supports long branches. Since that
232 +- * is exactly what we are dealing with here, we are free to use x16
233 +- * as a scratch register in the PLT veneers.
234 +- */
235 +- __le32 mov0; /* movn x16, #0x.... */
236 +- __le32 mov1; /* movk x16, #0x...., lsl #16 */
237 +- __le32 mov2; /* movk x16, #0x...., lsl #32 */
238 +- __le32 br; /* br x16 */
239 +-};
240 +-
241 + static bool in_init(const struct module *mod, void *loc)
242 + {
243 + return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
244 +@@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
245 + int i = pltsec->plt_num_entries;
246 + u64 val = sym->st_value + rela->r_addend;
247 +
248 +- /*
249 +- * MOVK/MOVN/MOVZ opcode:
250 +- * +--------+------------+--------+-----------+-------------+---------+
251 +- * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
252 +- * +--------+------------+--------+-----------+-------------+---------+
253 +- *
254 +- * Rd := 0x10 (x16)
255 +- * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
256 +- * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
257 +- * sf := 1 (64-bit variant)
258 +- */
259 +- plt[i] = (struct plt_entry){
260 +- cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
261 +- cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
262 +- cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
263 +- cpu_to_le32(0xd61f0200)
264 +- };
265 ++ plt[i] = get_plt_entry(val);
266 +
267 + /*
268 + * Check if the entry we just created is a duplicate. Given that the
269 + * relocations are sorted, this will be the last entry we allocated.
270 + * (if one exists).
271 + */
272 +- if (i > 0 &&
273 +- plt[i].mov0 == plt[i - 1].mov0 &&
274 +- plt[i].mov1 == plt[i - 1].mov1 &&
275 +- plt[i].mov2 == plt[i - 1].mov2)
276 ++ if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
277 + return (u64)&plt[i - 1];
278 +
279 + pltsec->plt_num_entries++;
280 +@@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
281 + unsigned long core_plts = 0;
282 + unsigned long init_plts = 0;
283 + Elf64_Sym *syms = NULL;
284 ++ Elf_Shdr *tramp = NULL;
285 + int i;
286 +
287 + /*
288 +@@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
289 + mod->arch.core.plt = sechdrs + i;
290 + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
291 + mod->arch.init.plt = sechdrs + i;
292 ++ else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
293 ++ !strcmp(secstrings + sechdrs[i].sh_name,
294 ++ ".text.ftrace_trampoline"))
295 ++ tramp = sechdrs + i;
296 + else if (sechdrs[i].sh_type == SHT_SYMTAB)
297 + syms = (Elf64_Sym *)sechdrs[i].sh_addr;
298 + }
299 +@@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
300 + mod->arch.init.plt_num_entries = 0;
301 + mod->arch.init.plt_max_entries = init_plts;
302 +
303 ++ if (tramp) {
304 ++ tramp->sh_type = SHT_NOBITS;
305 ++ tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
306 ++ tramp->sh_addralign = __alignof__(struct plt_entry);
307 ++ tramp->sh_size = sizeof(struct plt_entry);
308 ++ }
309 ++
310 + return 0;
311 + }
312 +diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
313 +index f7c9781a9d48..22e36a21c113 100644
314 +--- a/arch/arm64/kernel/module.lds
315 ++++ b/arch/arm64/kernel/module.lds
316 +@@ -1,4 +1,5 @@
317 + SECTIONS {
318 + .plt (NOLOAD) : { BYTE(0) }
319 + .init.plt (NOLOAD) : { BYTE(0) }
320 ++ .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
321 + }
322 +diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
323 +index 8ac0bd2bddb0..3280953a82cf 100644
324 +--- a/arch/powerpc/kernel/misc_64.S
325 ++++ b/arch/powerpc/kernel/misc_64.S
326 +@@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
327 + * NOTE, we rely on r0 being 0 from above.
328 + */
329 + mtspr SPRN_IAMR,r0
330 ++BEGIN_FTR_SECTION_NESTED(42)
331 + mtspr SPRN_AMOR,r0
332 ++END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
333 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
334 +
335 + /* save regs for local vars on new stack.
336 +diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
337 +index 3848af167df9..640cf566e986 100644
338 +--- a/arch/powerpc/mm/hash_native_64.c
339 ++++ b/arch/powerpc/mm/hash_native_64.c
340 +@@ -47,7 +47,8 @@
341 +
342 + DEFINE_RAW_SPINLOCK(native_tlbie_lock);
343 +
344 +-static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
345 ++static inline unsigned long ___tlbie(unsigned long vpn, int psize,
346 ++ int apsize, int ssize)
347 + {
348 + unsigned long va;
349 + unsigned int penc;
350 +@@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
351 + : "memory");
352 + break;
353 + }
354 +- trace_tlbie(0, 0, va, 0, 0, 0, 0);
355 ++ return va;
356 ++}
357 ++
358 ++static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
359 ++{
360 ++ unsigned long rb;
361 ++
362 ++ rb = ___tlbie(vpn, psize, apsize, ssize);
363 ++ trace_tlbie(0, 0, rb, 0, 0, 0, 0);
364 + }
365 +
366 + static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
367 +@@ -652,7 +661,7 @@ static void native_hpte_clear(void)
368 + if (hpte_v & HPTE_V_VALID) {
369 + hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
370 + hptep->v = 0;
371 +- __tlbie(vpn, psize, apsize, ssize);
372 ++ ___tlbie(vpn, psize, apsize, ssize);
373 + }
374 + }
375 +
376 +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
377 +index 9a3cb3983c01..1a61b1b997f2 100644
378 +--- a/arch/s390/include/asm/elf.h
379 ++++ b/arch/s390/include/asm/elf.h
380 +@@ -194,13 +194,14 @@ struct arch_elf_state {
381 + #define CORE_DUMP_USE_REGSET
382 + #define ELF_EXEC_PAGESIZE PAGE_SIZE
383 +
384 +-/*
385 +- * This is the base location for PIE (ET_DYN with INTERP) loads. On
386 +- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
387 +- * space open for things that want to use the area for 32-bit pointers.
388 +- */
389 +-#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
390 +- 0x100000000UL)
391 ++/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
392 ++ use of this is to invoke "./ld.so someprog" to test out a new version of
393 ++ the loader. We need to make sure that it is out of the way of the program
394 ++ that it will "exec", and that there is sufficient room for the brk. 64-bit
395 ++ tasks are aligned to 4GB. */
396 ++#define ELF_ET_DYN_BASE (is_compat_task() ? \
397 ++ (STACK_TOP / 3 * 2) : \
398 ++ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
399 +
400 + /* This yields a mask that user programs can use to figure out what
401 + instruction set this CPU supports. */
402 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
403 +index 518d9286b3d1..2e956afe272c 100644
404 +--- a/arch/x86/entry/entry_64.S
405 ++++ b/arch/x86/entry/entry_64.S
406 +@@ -51,19 +51,15 @@ ENTRY(native_usergs_sysret64)
407 + END(native_usergs_sysret64)
408 + #endif /* CONFIG_PARAVIRT */
409 +
410 +-.macro TRACE_IRQS_FLAGS flags:req
411 ++.macro TRACE_IRQS_IRETQ
412 + #ifdef CONFIG_TRACE_IRQFLAGS
413 +- bt $9, \flags /* interrupts off? */
414 ++ bt $9, EFLAGS(%rsp) /* interrupts off? */
415 + jnc 1f
416 + TRACE_IRQS_ON
417 + 1:
418 + #endif
419 + .endm
420 +
421 +-.macro TRACE_IRQS_IRETQ
422 +- TRACE_IRQS_FLAGS EFLAGS(%rsp)
423 +-.endm
424 +-
425 + /*
426 + * When dynamic function tracer is enabled it will add a breakpoint
427 + * to all locations that it is about to modify, sync CPUs, update
428 +@@ -927,13 +923,11 @@ ENTRY(native_load_gs_index)
429 + FRAME_BEGIN
430 + pushfq
431 + DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
432 +- TRACE_IRQS_OFF
433 + SWAPGS
434 + .Lgs_change:
435 + movl %edi, %gs
436 + 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
437 + SWAPGS
438 +- TRACE_IRQS_FLAGS (%rsp)
439 + popfq
440 + FRAME_END
441 + ret
442 +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
443 +index f735c3016325..f02de8bc1f72 100644
444 +--- a/arch/x86/include/asm/pgtable.h
445 ++++ b/arch/x86/include/asm/pgtable.h
446 +@@ -1093,6 +1093,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
447 + clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
448 + }
449 +
450 ++#define pud_write pud_write
451 ++static inline int pud_write(pud_t pud)
452 ++{
453 ++ return pud_flags(pud) & _PAGE_RW;
454 ++}
455 ++
456 + /*
457 + * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
458 + *
459 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
460 +index 36c90d631096..ef03efba1c23 100644
461 +--- a/arch/x86/kvm/lapic.c
462 ++++ b/arch/x86/kvm/lapic.c
463 +@@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
464 + recalculate_apic_map(apic->vcpu->kvm);
465 + }
466 +
467 ++static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
468 ++{
469 ++ return ((id >> 4) << 16) | (1 << (id & 0xf));
470 ++}
471 ++
472 + static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
473 + {
474 +- u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
475 ++ u32 ldr = kvm_apic_calc_x2apic_ldr(id);
476 +
477 + WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
478 +
479 +@@ -2196,6 +2201,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
480 + {
481 + if (apic_x2apic_mode(vcpu->arch.apic)) {
482 + u32 *id = (u32 *)(s->regs + APIC_ID);
483 ++ u32 *ldr = (u32 *)(s->regs + APIC_LDR);
484 +
485 + if (vcpu->kvm->arch.x2apic_format) {
486 + if (*id != vcpu->vcpu_id)
487 +@@ -2206,6 +2212,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
488 + else
489 + *id <<= 24;
490 + }
491 ++
492 ++ /* In x2APIC mode, the LDR is fixed and based on the id */
493 ++ if (set)
494 ++ *ldr = kvm_apic_calc_x2apic_ldr(*id);
495 + }
496 +
497 + return 0;
498 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
499 +index ca209a4a7834..17fb6c6d939a 100644
500 +--- a/arch/x86/kvm/svm.c
501 ++++ b/arch/x86/kvm/svm.c
502 +@@ -2189,6 +2189,8 @@ static int ud_interception(struct vcpu_svm *svm)
503 + int er;
504 +
505 + er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
506 ++ if (er == EMULATE_USER_EXIT)
507 ++ return 0;
508 + if (er != EMULATE_DONE)
509 + kvm_queue_exception(&svm->vcpu, UD_VECTOR);
510 + return 1;
511 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
512 +index 21cad7068cbf..b21113bcf227 100644
513 +--- a/arch/x86/kvm/vmx.c
514 ++++ b/arch/x86/kvm/vmx.c
515 +@@ -5914,6 +5914,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
516 + return 1;
517 + }
518 + er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
519 ++ if (er == EMULATE_USER_EXIT)
520 ++ return 0;
521 + if (er != EMULATE_DONE)
522 + kvm_queue_exception(vcpu, UD_VECTOR);
523 + return 1;
524 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
525 +index 03869eb7fcd6..4195cbcdb310 100644
526 +--- a/arch/x86/kvm/x86.c
527 ++++ b/arch/x86/kvm/x86.c
528 +@@ -1830,6 +1830,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
529 + */
530 + BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
531 +
532 ++ if (guest_hv_clock.version & 1)
533 ++ ++guest_hv_clock.version; /* first time write, random junk */
534 ++
535 + vcpu->hv_clock.version = guest_hv_clock.version + 1;
536 + kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
537 + &vcpu->hv_clock,
538 +@@ -5705,6 +5708,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
539 + if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
540 + emulation_type))
541 + return EMULATE_DONE;
542 ++ if (ctxt->have_exception && inject_emulated_exception(vcpu))
543 ++ return EMULATE_DONE;
544 + if (emulation_type & EMULTYPE_SKIP)
545 + return EMULATE_FAIL;
546 + return handle_emulation_failure(vcpu);
547 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
548 +index 337cf382718e..a72659f452a5 100644
549 +--- a/crypto/af_alg.c
550 ++++ b/crypto/af_alg.c
551 +@@ -1047,6 +1047,18 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
552 + }
553 + EXPORT_SYMBOL_GPL(af_alg_sendpage);
554 +
555 ++/**
556 ++ * af_alg_free_resources - release resources required for crypto request
557 ++ */
558 ++void af_alg_free_resources(struct af_alg_async_req *areq)
559 ++{
560 ++ struct sock *sk = areq->sk;
561 ++
562 ++ af_alg_free_areq_sgls(areq);
563 ++ sock_kfree_s(sk, areq, areq->areqlen);
564 ++}
565 ++EXPORT_SYMBOL_GPL(af_alg_free_resources);
566 ++
567 + /**
568 + * af_alg_async_cb - AIO callback handler
569 + *
570 +@@ -1063,18 +1075,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
571 + struct kiocb *iocb = areq->iocb;
572 + unsigned int resultlen;
573 +
574 +- lock_sock(sk);
575 +-
576 + /* Buffer size written by crypto operation. */
577 + resultlen = areq->outlen;
578 +
579 +- af_alg_free_areq_sgls(areq);
580 +- sock_kfree_s(sk, areq, areq->areqlen);
581 +- __sock_put(sk);
582 ++ af_alg_free_resources(areq);
583 ++ sock_put(sk);
584 +
585 + iocb->ki_complete(iocb, err ? err : resultlen, 0);
586 +-
587 +- release_sock(sk);
588 + }
589 + EXPORT_SYMBOL_GPL(af_alg_async_cb);
590 +
591 +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
592 +index 516b38c3a169..d0b45145cb30 100644
593 +--- a/crypto/algif_aead.c
594 ++++ b/crypto/algif_aead.c
595 +@@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
596 + struct aead_tfm *aeadc = pask->private;
597 + struct crypto_aead *tfm = aeadc->aead;
598 + struct crypto_skcipher *null_tfm = aeadc->null_tfm;
599 +- unsigned int as = crypto_aead_authsize(tfm);
600 ++ unsigned int i, as = crypto_aead_authsize(tfm);
601 + struct af_alg_async_req *areq;
602 +- struct af_alg_tsgl *tsgl;
603 +- struct scatterlist *src;
604 ++ struct af_alg_tsgl *tsgl, *tmp;
605 ++ struct scatterlist *rsgl_src, *tsgl_src = NULL;
606 + int err = 0;
607 + size_t used = 0; /* [in] TX bufs to be en/decrypted */
608 + size_t outlen = 0; /* [out] RX bufs produced by kernel */
609 +@@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
610 + }
611 +
612 + processed = used + ctx->aead_assoclen;
613 +- tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
614 ++ list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
615 ++ for (i = 0; i < tsgl->cur; i++) {
616 ++ struct scatterlist *process_sg = tsgl->sg + i;
617 ++
618 ++ if (!(process_sg->length) || !sg_page(process_sg))
619 ++ continue;
620 ++ tsgl_src = process_sg;
621 ++ break;
622 ++ }
623 ++ if (tsgl_src)
624 ++ break;
625 ++ }
626 ++ if (processed && !tsgl_src) {
627 ++ err = -EFAULT;
628 ++ goto free;
629 ++ }
630 +
631 + /*
632 + * Copy of AAD from source to destination
633 +@@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
634 + */
635 +
636 + /* Use the RX SGL as source (and destination) for crypto op. */
637 +- src = areq->first_rsgl.sgl.sg;
638 ++ rsgl_src = areq->first_rsgl.sgl.sg;
639 +
640 + if (ctx->enc) {
641 + /*
642 +@@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
643 + * v v
644 + * RX SGL: AAD || PT || Tag
645 + */
646 +- err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
647 ++ err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
648 + areq->first_rsgl.sgl.sg, processed);
649 + if (err)
650 + goto free;
651 +@@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
652 + */
653 +
654 + /* Copy AAD || CT to RX SGL buffer for in-place operation. */
655 +- err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
656 ++ err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
657 + areq->first_rsgl.sgl.sg, outlen);
658 + if (err)
659 + goto free;
660 +@@ -257,23 +272,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
661 + areq->tsgl);
662 + } else
663 + /* no RX SGL present (e.g. authentication only) */
664 +- src = areq->tsgl;
665 ++ rsgl_src = areq->tsgl;
666 + }
667 +
668 + /* Initialize the crypto operation */
669 +- aead_request_set_crypt(&areq->cra_u.aead_req, src,
670 ++ aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
671 + areq->first_rsgl.sgl.sg, used, ctx->iv);
672 + aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
673 + aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
674 +
675 + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
676 + /* AIO operation */
677 ++ sock_hold(sk);
678 + areq->iocb = msg->msg_iocb;
679 + aead_request_set_callback(&areq->cra_u.aead_req,
680 + CRYPTO_TFM_REQ_MAY_BACKLOG,
681 + af_alg_async_cb, areq);
682 + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
683 + crypto_aead_decrypt(&areq->cra_u.aead_req);
684 ++
685 ++ /* AIO operation in progress */
686 ++ if (err == -EINPROGRESS || err == -EBUSY) {
687 ++ /* Remember output size that will be generated. */
688 ++ areq->outlen = outlen;
689 ++
690 ++ return -EIOCBQUEUED;
691 ++ }
692 ++
693 ++ sock_put(sk);
694 + } else {
695 + /* Synchronous operation */
696 + aead_request_set_callback(&areq->cra_u.aead_req,
697 +@@ -285,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
698 + &ctx->completion);
699 + }
700 +
701 +- /* AIO operation in progress */
702 +- if (err == -EINPROGRESS) {
703 +- sock_hold(sk);
704 +-
705 +- /* Remember output size that will be generated. */
706 +- areq->outlen = outlen;
707 +-
708 +- return -EIOCBQUEUED;
709 +- }
710 +
711 + free:
712 +- af_alg_free_areq_sgls(areq);
713 +- sock_kfree_s(sk, areq, areq->areqlen);
714 ++ af_alg_free_resources(areq);
715 +
716 + return err ? err : outlen;
717 + }
718 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
719 +index 8ae4170aaeb4..30ee2a8e8f42 100644
720 +--- a/crypto/algif_skcipher.c
721 ++++ b/crypto/algif_skcipher.c
722 +@@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
723 +
724 + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
725 + /* AIO operation */
726 ++ sock_hold(sk);
727 + areq->iocb = msg->msg_iocb;
728 + skcipher_request_set_callback(&areq->cra_u.skcipher_req,
729 + CRYPTO_TFM_REQ_MAY_SLEEP,
730 +@@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
731 + err = ctx->enc ?
732 + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
733 + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
734 ++
735 ++ /* AIO operation in progress */
736 ++ if (err == -EINPROGRESS || err == -EBUSY) {
737 ++ /* Remember output size that will be generated. */
738 ++ areq->outlen = len;
739 ++
740 ++ return -EIOCBQUEUED;
741 ++ }
742 ++
743 ++ sock_put(sk);
744 + } else {
745 + /* Synchronous operation */
746 + skcipher_request_set_callback(&areq->cra_u.skcipher_req,
747 +@@ -137,19 +148,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
748 + &ctx->completion);
749 + }
750 +
751 +- /* AIO operation in progress */
752 +- if (err == -EINPROGRESS) {
753 +- sock_hold(sk);
754 +-
755 +- /* Remember output size that will be generated. */
756 +- areq->outlen = len;
757 +-
758 +- return -EIOCBQUEUED;
759 +- }
760 +
761 + free:
762 +- af_alg_free_areq_sgls(areq);
763 +- sock_kfree_s(sk, areq, areq->areqlen);
764 ++ af_alg_free_resources(areq);
765 +
766 + return err ? err : len;
767 + }
768 +diff --git a/crypto/skcipher.c b/crypto/skcipher.c
769 +index d5692e35fab1..778e0ff42bfa 100644
770 +--- a/crypto/skcipher.c
771 ++++ b/crypto/skcipher.c
772 +@@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
773 + scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
774 + scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
775 +
776 ++ scatterwalk_done(&walk->in, 0, walk->total);
777 ++ scatterwalk_done(&walk->out, 0, walk->total);
778 ++
779 + walk->iv = req->iv;
780 + walk->oiv = req->iv;
781 +
782 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
783 +index 82b3ce5e937e..df842465634a 100644
784 +--- a/drivers/acpi/ec.c
785 ++++ b/drivers/acpi/ec.c
786 +@@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
787 + {
788 + struct acpi_ec *ec = NULL;
789 + int ret;
790 ++ bool is_ecdt = false;
791 ++ acpi_status status;
792 +
793 + strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
794 + strcpy(acpi_device_class(device), ACPI_EC_CLASS);
795 +
796 +- ec = acpi_ec_alloc();
797 +- if (!ec)
798 +- return -ENOMEM;
799 +- if (ec_parse_device(device->handle, 0, ec, NULL) !=
800 +- AE_CTRL_TERMINATE) {
801 ++ if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
802 ++ is_ecdt = true;
803 ++ ec = boot_ec;
804 ++ } else {
805 ++ ec = acpi_ec_alloc();
806 ++ if (!ec)
807 ++ return -ENOMEM;
808 ++ status = ec_parse_device(device->handle, 0, ec, NULL);
809 ++ if (status != AE_CTRL_TERMINATE) {
810 + ret = -EINVAL;
811 + goto err_alloc;
812 ++ }
813 + }
814 +
815 + if (acpi_is_boot_ec(ec)) {
816 +- boot_ec_is_ecdt = false;
817 +- /*
818 +- * Trust PNP0C09 namespace location rather than ECDT ID.
819 +- *
820 +- * But trust ECDT GPE rather than _GPE because of ASUS quirks,
821 +- * so do not change boot_ec->gpe to ec->gpe.
822 +- */
823 +- boot_ec->handle = ec->handle;
824 +- acpi_handle_debug(ec->handle, "duplicated.\n");
825 +- acpi_ec_free(ec);
826 +- ec = boot_ec;
827 +- ret = acpi_config_boot_ec(ec, ec->handle, true, false);
828 ++ boot_ec_is_ecdt = is_ecdt;
829 ++ if (!is_ecdt) {
830 ++ /*
831 ++ * Trust PNP0C09 namespace location rather than
832 ++ * ECDT ID. But trust ECDT GPE rather than _GPE
833 ++ * because of ASUS quirks, so do not change
834 ++ * boot_ec->gpe to ec->gpe.
835 ++ */
836 ++ boot_ec->handle = ec->handle;
837 ++ acpi_handle_debug(ec->handle, "duplicated.\n");
838 ++ acpi_ec_free(ec);
839 ++ ec = boot_ec;
840 ++ }
841 ++ ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
842 + } else
843 + ret = acpi_ec_setup(ec, true);
844 + if (ret)
845 +@@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
846 + ret = !!request_region(ec->command_addr, 1, "EC cmd");
847 + WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
848 +
849 +- /* Reprobe devices depending on the EC */
850 +- acpi_walk_dep_device_list(ec->handle);
851 ++ if (!is_ecdt) {
852 ++ /* Reprobe devices depending on the EC */
853 ++ acpi_walk_dep_device_list(ec->handle);
854 ++ }
855 + acpi_handle_debug(ec->handle, "enumerated.\n");
856 + return 0;
857 +
858 +@@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
859 +
860 + static const struct acpi_device_id ec_device_ids[] = {
861 + {"PNP0C09", 0},
862 ++ {ACPI_ECDT_HID, 0},
863 + {"", 0},
864 + };
865 +
866 +@@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
867 + * Note: ec->handle can be valid if this function is called after
868 + * acpi_ec_add(), hence the fast path.
869 + */
870 +- if (boot_ec->handle != ACPI_ROOT_OBJECT)
871 +- handle = boot_ec->handle;
872 +- else if (!acpi_ec_ecdt_get_handle(&handle))
873 +- return -ENODEV;
874 +- return acpi_config_boot_ec(boot_ec, handle, true, true);
875 ++ if (boot_ec->handle == ACPI_ROOT_OBJECT) {
876 ++ if (!acpi_ec_ecdt_get_handle(&handle))
877 ++ return -ENODEV;
878 ++ boot_ec->handle = handle;
879 ++ }
880 ++
881 ++ /* Register to ACPI bus with PM ops attached */
882 ++ return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
883 + }
884 +
885 + #if 0
886 +@@ -2020,6 +2035,12 @@ int __init acpi_ec_init(void)
887 +
888 + /* Drivers must be started after acpi_ec_query_init() */
889 + dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
890 ++ /*
891 ++ * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
892 ++ * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
893 ++ * settings but invalid DSDT settings.
894 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=196847
895 ++ */
896 + ecdt_fail = acpi_ec_ecdt_start();
897 + return ecdt_fail && dsdt_fail ? -ENODEV : 0;
898 + }
899 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
900 +index 4361c4415b4f..ede83d38beed 100644
901 +--- a/drivers/acpi/internal.h
902 ++++ b/drivers/acpi/internal.h
903 +@@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
904 + bool acpi_device_is_battery(struct acpi_device *adev);
905 + bool acpi_device_is_first_physical_node(struct acpi_device *adev,
906 + const struct device *dev);
907 ++int acpi_bus_register_early_device(int type);
908 +
909 + /* --------------------------------------------------------------------------
910 + Device Matching and Notification
911 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
912 +index 602f8ff212f2..2f2f50322ffb 100644
913 +--- a/drivers/acpi/scan.c
914 ++++ b/drivers/acpi/scan.c
915 +@@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
916 + case ACPI_BUS_TYPE_SLEEP_BUTTON:
917 + strcpy(device->pnp.bus_id, "SLPF");
918 + break;
919 ++ case ACPI_BUS_TYPE_ECDT_EC:
920 ++ strcpy(device->pnp.bus_id, "ECDT");
921 ++ break;
922 + default:
923 + acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
924 + /* Clean up trailing underscores (if any) */
925 +@@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
926 + case ACPI_BUS_TYPE_SLEEP_BUTTON:
927 + acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
928 + break;
929 ++ case ACPI_BUS_TYPE_ECDT_EC:
930 ++ acpi_add_id(pnp, ACPI_ECDT_HID);
931 ++ break;
932 + }
933 + }
934 +
935 +@@ -2049,6 +2055,21 @@ void acpi_bus_trim(struct acpi_device *adev)
936 + }
937 + EXPORT_SYMBOL_GPL(acpi_bus_trim);
938 +
939 ++int acpi_bus_register_early_device(int type)
940 ++{
941 ++ struct acpi_device *device = NULL;
942 ++ int result;
943 ++
944 ++ result = acpi_add_single_object(&device, NULL,
945 ++ type, ACPI_STA_DEFAULT);
946 ++ if (result)
947 ++ return result;
948 ++
949 ++ device->flags.match_driver = true;
950 ++ return device_attach(&device->dev);
951 ++}
952 ++EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
953 ++
954 + static int acpi_bus_scan_fixed(void)
955 + {
956 + int result = 0;
957 +diff --git a/drivers/dax/device.c b/drivers/dax/device.c
958 +index e9f3b3e4bbf4..375b99bca002 100644
959 +--- a/drivers/dax/device.c
960 ++++ b/drivers/dax/device.c
961 +@@ -427,9 +427,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
962 + return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
963 + }
964 +
965 ++static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
966 ++{
967 ++ struct file *filp = vma->vm_file;
968 ++ struct dev_dax *dev_dax = filp->private_data;
969 ++ struct dax_region *dax_region = dev_dax->region;
970 ++
971 ++ if (!IS_ALIGNED(addr, dax_region->align))
972 ++ return -EINVAL;
973 ++ return 0;
974 ++}
975 ++
976 + static const struct vm_operations_struct dax_vm_ops = {
977 + .fault = dev_dax_fault,
978 + .huge_fault = dev_dax_huge_fault,
979 ++ .split = dev_dax_split,
980 + };
981 +
982 + static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
983 +diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
984 +index dec3a815455d..b44d9d7db347 100644
985 +--- a/drivers/dma-buf/reservation.c
986 ++++ b/drivers/dma-buf/reservation.c
987 +@@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
988 + * @dst: the destination reservation object
989 + * @src: the source reservation object
990 + *
991 +-* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
992 +-* held.
993 ++* Copy all fences from src to dst. dst-lock must be held.
994 + */
995 + int reservation_object_copy_fences(struct reservation_object *dst,
996 + struct reservation_object *src)
997 +@@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
998 + size_t size;
999 + unsigned i;
1000 +
1001 +- src_list = reservation_object_get_list(src);
1002 ++ rcu_read_lock();
1003 ++ src_list = rcu_dereference(src->fence);
1004 +
1005 ++retry:
1006 + if (src_list) {
1007 +- size = offsetof(typeof(*src_list),
1008 +- shared[src_list->shared_count]);
1009 ++ unsigned shared_count = src_list->shared_count;
1010 ++
1011 ++ size = offsetof(typeof(*src_list), shared[shared_count]);
1012 ++ rcu_read_unlock();
1013 ++
1014 + dst_list = kmalloc(size, GFP_KERNEL);
1015 + if (!dst_list)
1016 + return -ENOMEM;
1017 +
1018 +- dst_list->shared_count = src_list->shared_count;
1019 +- dst_list->shared_max = src_list->shared_count;
1020 +- for (i = 0; i < src_list->shared_count; ++i)
1021 +- dst_list->shared[i] =
1022 +- dma_fence_get(src_list->shared[i]);
1023 ++ rcu_read_lock();
1024 ++ src_list = rcu_dereference(src->fence);
1025 ++ if (!src_list || src_list->shared_count > shared_count) {
1026 ++ kfree(dst_list);
1027 ++ goto retry;
1028 ++ }
1029 ++
1030 ++ dst_list->shared_count = 0;
1031 ++ dst_list->shared_max = shared_count;
1032 ++ for (i = 0; i < src_list->shared_count; ++i) {
1033 ++ struct dma_fence *fence;
1034 ++
1035 ++ fence = rcu_dereference(src_list->shared[i]);
1036 ++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1037 ++ &fence->flags))
1038 ++ continue;
1039 ++
1040 ++ if (!dma_fence_get_rcu(fence)) {
1041 ++ kfree(dst_list);
1042 ++ src_list = rcu_dereference(src->fence);
1043 ++ goto retry;
1044 ++ }
1045 ++
1046 ++ if (dma_fence_is_signaled(fence)) {
1047 ++ dma_fence_put(fence);
1048 ++ continue;
1049 ++ }
1050 ++
1051 ++ dst_list->shared[dst_list->shared_count++] = fence;
1052 ++ }
1053 + } else {
1054 + dst_list = NULL;
1055 + }
1056 +
1057 ++ new = dma_fence_get_rcu_safe(&src->fence_excl);
1058 ++ rcu_read_unlock();
1059 ++
1060 + kfree(dst->staged);
1061 + dst->staged = NULL;
1062 +
1063 + src_list = reservation_object_get_list(dst);
1064 +-
1065 + old = reservation_object_get_excl(dst);
1066 +- new = reservation_object_get_excl(src);
1067 +-
1068 +- dma_fence_get(new);
1069 +
1070 + preempt_disable();
1071 + write_seqcount_begin(&dst->seq);
1072 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1073 +index 103635ab784c..87801faaf264 100644
1074 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1075 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1076 +@@ -1536,18 +1536,14 @@ struct amdgpu_device {
1077 + /* sdma */
1078 + struct amdgpu_sdma sdma;
1079 +
1080 +- union {
1081 +- struct {
1082 +- /* uvd */
1083 +- struct amdgpu_uvd uvd;
1084 +-
1085 +- /* vce */
1086 +- struct amdgpu_vce vce;
1087 +- };
1088 +-
1089 +- /* vcn */
1090 +- struct amdgpu_vcn vcn;
1091 +- };
1092 ++ /* uvd */
1093 ++ struct amdgpu_uvd uvd;
1094 ++
1095 ++ /* vce */
1096 ++ struct amdgpu_vce vce;
1097 ++
1098 ++ /* vcn */
1099 ++ struct amdgpu_vcn vcn;
1100 +
1101 + /* firmwares */
1102 + struct amdgpu_firmware firmware;
1103 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1104 +index ce443586a0c7..cc4e18dcd8b6 100644
1105 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1106 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1107 +@@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
1108 + return true;
1109 + }
1110 +
1111 +-/* Atom needs data in little endian format
1112 +- * so swap as appropriate when copying data to
1113 +- * or from atom. Note that atom operates on
1114 +- * dw units.
1115 ++/* Atom needs data in little endian format so swap as appropriate when copying
1116 ++ * data to or from atom. Note that atom operates on dw units.
1117 ++ *
1118 ++ * Use to_le=true when sending data to atom and provide at least
1119 ++ * ALIGN(num_bytes,4) bytes in the dst buffer.
1120 ++ *
1121 ++ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
1122 ++ * byes in the src buffer.
1123 + */
1124 + void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1125 + {
1126 + #ifdef __BIG_ENDIAN
1127 +- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
1128 +- u32 *dst32, *src32;
1129 ++ u32 src_tmp[5], dst_tmp[5];
1130 + int i;
1131 ++ u8 align_num_bytes = ALIGN(num_bytes, 4);
1132 +
1133 +- memcpy(src_tmp, src, num_bytes);
1134 +- src32 = (u32 *)src_tmp;
1135 +- dst32 = (u32 *)dst_tmp;
1136 + if (to_le) {
1137 +- for (i = 0; i < ((num_bytes + 3) / 4); i++)
1138 +- dst32[i] = cpu_to_le32(src32[i]);
1139 +- memcpy(dst, dst_tmp, num_bytes);
1140 ++ memcpy(src_tmp, src, num_bytes);
1141 ++ for (i = 0; i < align_num_bytes / 4; i++)
1142 ++ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
1143 ++ memcpy(dst, dst_tmp, align_num_bytes);
1144 + } else {
1145 +- u8 dws = num_bytes & ~3;
1146 +- for (i = 0; i < ((num_bytes + 3) / 4); i++)
1147 +- dst32[i] = le32_to_cpu(src32[i]);
1148 +- memcpy(dst, dst_tmp, dws);
1149 +- if (num_bytes % 4) {
1150 +- for (i = 0; i < (num_bytes % 4); i++)
1151 +- dst[dws+i] = dst_tmp[dws+i];
1152 +- }
1153 ++ memcpy(src_tmp, src, align_num_bytes);
1154 ++ for (i = 0; i < align_num_bytes / 4; i++)
1155 ++ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
1156 ++ memcpy(dst, dst_tmp, num_bytes);
1157 + }
1158 + #else
1159 + memcpy(dst, src, num_bytes);
1160 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
1161 +index c21adf60a7f2..057e1ecd83ce 100644
1162 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
1163 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
1164 +@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
1165 + return false;
1166 + }
1167 +
1168 +- tmp = bios[0x18] | (bios[0x19] << 8);
1169 +- if (bios[tmp + 0x14] != 0x0) {
1170 +- DRM_INFO("Not an x86 BIOS ROM\n");
1171 +- return false;
1172 +- }
1173 +-
1174 + bios_header_start = bios[0x48] | (bios[0x49] << 8);
1175 + if (!bios_header_start) {
1176 + DRM_INFO("Can't locate bios header\n");
1177 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1178 +index 9e495da0bb03..ffe483980362 100644
1179 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1180 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1181 +@@ -391,6 +391,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
1182 + r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
1183 + &bo->placement, page_align, !kernel, NULL,
1184 + acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
1185 ++ if (unlikely(r != 0))
1186 ++ return r;
1187 ++
1188 + bytes_moved = atomic64_read(&adev->num_bytes_moved) -
1189 + initial_bytes_moved;
1190 + if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
1191 +@@ -400,9 +403,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
1192 + else
1193 + amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
1194 +
1195 +- if (unlikely(r != 0))
1196 +- return r;
1197 +-
1198 + if (kernel)
1199 + bo->tbo.priority = 1;
1200 +
1201 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1202 +index c855366521ab..9fc3d387eae3 100644
1203 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1204 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1205 +@@ -647,7 +647,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
1206 + uint32_t allocated = 0;
1207 + uint32_t tmp, handle = 0;
1208 + uint32_t *size = &tmp;
1209 +- int i, r, idx = 0;
1210 ++ int i, r = 0, idx = 0;
1211 +
1212 + p->job->vm = NULL;
1213 + ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1214 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1215 +index bd20ff018512..863c6dd0123a 100644
1216 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1217 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1218 +@@ -1201,7 +1201,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
1219 + int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1220 + struct amdgpu_vm *vm)
1221 + {
1222 +- int r;
1223 ++ int r = 0;
1224 +
1225 + r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
1226 + if (r)
1227 +@@ -2586,7 +2586,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1228 + {
1229 + struct amdgpu_bo_va_mapping *mapping, *tmp;
1230 + bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
1231 +- int i;
1232 ++ struct amdgpu_bo *root;
1233 ++ int i, r;
1234 +
1235 + amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1236 +
1237 +@@ -2609,7 +2610,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1238 + amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
1239 + }
1240 +
1241 +- amdgpu_vm_free_levels(&vm->root);
1242 ++ root = amdgpu_bo_ref(vm->root.bo);
1243 ++ r = amdgpu_bo_reserve(root, true);
1244 ++ if (r) {
1245 ++ dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
1246 ++ } else {
1247 ++ amdgpu_vm_free_levels(&vm->root);
1248 ++ amdgpu_bo_unreserve(root);
1249 ++ }
1250 ++ amdgpu_bo_unref(&root);
1251 + dma_fence_put(vm->last_dir_update);
1252 + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
1253 + amdgpu_vm_free_reserved_vmid(adev, vm, i);
1254 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1255 +index d04d0b123212..6dc0f6e346e7 100644
1256 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1257 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1258 +@@ -395,7 +395,16 @@ static int gmc_v9_0_early_init(void *handle)
1259 + static int gmc_v9_0_late_init(void *handle)
1260 + {
1261 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1262 +- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
1263 ++ /*
1264 ++ * The latest engine allocation on gfx9 is:
1265 ++ * Engine 0, 1: idle
1266 ++ * Engine 2, 3: firmware
1267 ++ * Engine 4~13: amdgpu ring, subject to change when ring number changes
1268 ++ * Engine 14~15: idle
1269 ++ * Engine 16: kfd tlb invalidation
1270 ++ * Engine 17: Gart flushes
1271 ++ */
1272 ++ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
1273 + unsigned i;
1274 +
1275 + for(i = 0; i < adev->num_rings; ++i) {
1276 +@@ -408,9 +417,9 @@ static int gmc_v9_0_late_init(void *handle)
1277 + ring->funcs->vmhub);
1278 + }
1279 +
1280 +- /* Engine 17 is used for GART flushes */
1281 ++ /* Engine 16 is used for KFD and 17 for GART flushes */
1282 + for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
1283 +- BUG_ON(vm_inv_eng[i] > 17);
1284 ++ BUG_ON(vm_inv_eng[i] > 16);
1285 +
1286 + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
1287 + }
1288 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1289 +index f2c3a49f73a0..3e59c766722c 100644
1290 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1291 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1292 +@@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
1293 + }
1294 + static u32 soc15_get_xclk(struct amdgpu_device *adev)
1295 + {
1296 +- if (adev->asic_type == CHIP_VEGA10)
1297 +- return adev->clock.spll.reference_freq/4;
1298 +- else
1299 +- return adev->clock.spll.reference_freq;
1300 ++ return adev->clock.spll.reference_freq;
1301 + }
1302 +
1303 +
1304 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1305 +index 21e7b88401e1..a098712bdd2f 100644
1306 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1307 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1308 +@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1309 +
1310 + static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1311 + {
1312 +- adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
1313 ++ adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1314 + adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1315 + }
1316 +
1317 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
1318 +index 84f01fd33aff..b50aa292d026 100644
1319 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
1320 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
1321 +@@ -850,9 +850,9 @@ static int init_over_drive_limits(
1322 + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
1323 + {
1324 + hwmgr->platform_descriptor.overdriveLimit.engineClock =
1325 +- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
1326 ++ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
1327 + hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1328 +- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
1329 ++ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
1330 +
1331 + hwmgr->platform_descriptor.minOverdriveVDDC = 0;
1332 + hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
1333 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1334 +index 6bb6337be920..fc7946eb6665 100644
1335 +--- a/drivers/gpu/drm/drm_edid.c
1336 ++++ b/drivers/gpu/drm/drm_edid.c
1337 +@@ -4809,7 +4809,8 @@ void
1338 + drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
1339 + const struct drm_display_mode *mode,
1340 + enum hdmi_quantization_range rgb_quant_range,
1341 +- bool rgb_quant_range_selectable)
1342 ++ bool rgb_quant_range_selectable,
1343 ++ bool is_hdmi2_sink)
1344 + {
1345 + /*
1346 + * CEA-861:
1347 +@@ -4833,8 +4834,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
1348 + * YQ-field to match the RGB Quantization Range being transmitted
1349 + * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
1350 + * set YQ=1) and the Sink shall ignore the YQ-field."
1351 ++ *
1352 ++ * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
1353 ++ * by non-zero YQ when receiving RGB. There doesn't seem to be any
1354 ++ * good way to tell which version of CEA-861 the sink supports, so
1355 ++ * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
1356 ++ * on on CEA-861-F.
1357 + */
1358 +- if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
1359 ++ if (!is_hdmi2_sink ||
1360 ++ rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
1361 + frame->ycc_quantization_range =
1362 + HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
1363 + else
1364 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1365 +index 1b8f013ffa65..5e93589c335c 100644
1366 +--- a/drivers/gpu/drm/drm_fb_helper.c
1367 ++++ b/drivers/gpu/drm/drm_fb_helper.c
1368 +@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1369 +
1370 + if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
1371 + DRM_INFO("Cannot find any crtc or sizes\n");
1372 ++
1373 ++ /* First time: disable all crtc's.. */
1374 ++ if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
1375 ++ restore_fbdev_mode(fb_helper);
1376 + return -EAGAIN;
1377 + }
1378 +
1379 +diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
1380 +index 70f2b9593edc..17e8ef9a1c11 100644
1381 +--- a/drivers/gpu/drm/drm_vblank.c
1382 ++++ b/drivers/gpu/drm/drm_vblank.c
1383 +@@ -311,8 +311,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
1384 + u32 vblank;
1385 + unsigned long flags;
1386 +
1387 +- WARN(!dev->driver->get_vblank_timestamp,
1388 +- "This function requires support for accurate vblank timestamps.");
1389 ++ WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
1390 ++ "This function requires support for accurate vblank timestamps.");
1391 +
1392 + spin_lock_irqsave(&dev->vblank_time_lock, flags);
1393 +
1394 +@@ -869,7 +869,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1395 + assert_spin_locked(&dev->event_lock);
1396 +
1397 + e->pipe = pipe;
1398 +- e->event.sequence = drm_vblank_count(dev, pipe);
1399 ++ e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
1400 + e->event.crtc_id = crtc->base.id;
1401 + list_add_tail(&e->base.link, &dev->vblank_event_list);
1402 + }
1403 +diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
1404 +index edd7d8127d19..c54806d08dd7 100644
1405 +--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
1406 ++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
1407 +@@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
1408 + {
1409 + struct drm_encoder *encoder = &fsl_dev->encoder;
1410 + struct drm_connector *connector = &fsl_dev->connector.base;
1411 +- struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
1412 + int ret;
1413 +
1414 + fsl_dev->connector.encoder = encoder;
1415 +@@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
1416 + if (ret < 0)
1417 + goto err_sysfs;
1418 +
1419 +- drm_object_property_set_value(&connector->base,
1420 +- mode_config->dpms_property,
1421 +- DRM_MODE_DPMS_OFF);
1422 +-
1423 + ret = drm_panel_attach(panel, connector);
1424 + if (ret) {
1425 + dev_err(fsl_dev->dev, "failed to attach panel\n");
1426 +diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1427 +index 9823477b1855..2269be91f3e1 100644
1428 +--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1429 ++++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1430 +@@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
1431 + {
1432 + struct ade_crtc *acrtc = to_ade_crtc(crtc);
1433 + struct ade_hw_ctx *ctx = acrtc->ctx;
1434 ++ struct drm_display_mode *mode = &crtc->state->mode;
1435 ++ struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
1436 +
1437 + if (!ctx->power_on)
1438 + (void)ade_power_up(ctx);
1439 ++ ade_ldi_set_mode(acrtc, mode, adj_mode);
1440 + }
1441 +
1442 + static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
1443 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1444 +index e6dfc3331f4b..a385838e2919 100644
1445 +--- a/drivers/gpu/drm/i915/gvt/gtt.c
1446 ++++ b/drivers/gpu/drm/i915/gvt/gtt.c
1447 +@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
1448 +
1449 + #define GTT_HAW 46
1450 +
1451 +-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
1452 +-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
1453 +-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
1454 ++#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
1455 ++#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
1456 ++#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
1457 +
1458 + static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
1459 + {
1460 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1461 +index 9f45cfeae775..82498f8232eb 100644
1462 +--- a/drivers/gpu/drm/i915/i915_drv.c
1463 ++++ b/drivers/gpu/drm/i915/i915_drv.c
1464 +@@ -2591,6 +2591,8 @@ static int intel_runtime_resume(struct device *kdev)
1465 + ret = vlv_resume_prepare(dev_priv, true);
1466 + }
1467 +
1468 ++ intel_uncore_runtime_resume(dev_priv);
1469 ++
1470 + /*
1471 + * No point of rolling back things in case of an error, as the best
1472 + * we can do is to hope that things will still work (and disable RPM).
1473 +diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
1474 +index 262e75c00dd2..da2d309574ba 100644
1475 +--- a/drivers/gpu/drm/i915/intel_fbdev.c
1476 ++++ b/drivers/gpu/drm/i915/intel_fbdev.c
1477 +@@ -694,10 +694,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
1478 +
1479 + /* Due to peculiar init order wrt to hpd handling this is separate. */
1480 + if (drm_fb_helper_initial_config(&ifbdev->helper,
1481 +- ifbdev->preferred_bpp)) {
1482 ++ ifbdev->preferred_bpp))
1483 + intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
1484 +- intel_fbdev_fini(to_i915(ifbdev->helper.dev));
1485 +- }
1486 + }
1487 +
1488 + void intel_fbdev_initial_config_async(struct drm_device *dev)
1489 +@@ -797,7 +795,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
1490 + {
1491 + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
1492 +
1493 +- if (ifbdev)
1494 ++ if (!ifbdev)
1495 ++ return;
1496 ++
1497 ++ intel_fbdev_sync(ifbdev);
1498 ++ if (ifbdev->vma)
1499 + drm_fb_helper_hotplug_event(&ifbdev->helper);
1500 + }
1501 +
1502 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1503 +index e8abea7594ec..3fed1d3ecded 100644
1504 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
1505 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
1506 +@@ -481,7 +481,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
1507 + crtc_state->limited_color_range ?
1508 + HDMI_QUANTIZATION_RANGE_LIMITED :
1509 + HDMI_QUANTIZATION_RANGE_FULL,
1510 +- intel_hdmi->rgb_quant_range_selectable);
1511 ++ intel_hdmi->rgb_quant_range_selectable,
1512 ++ is_hdmi2_sink);
1513 +
1514 + /* TODO: handle pixel repetition for YCBCR420 outputs */
1515 + intel_write_infoframe(encoder, crtc_state, &frame);
1516 +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
1517 +index eb5827110d8f..49fdf09f9919 100644
1518 +--- a/drivers/gpu/drm/i915/intel_i2c.c
1519 ++++ b/drivers/gpu/drm/i915/intel_i2c.c
1520 +@@ -438,7 +438,9 @@ static bool
1521 + gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
1522 + {
1523 + return (i + 1 < num &&
1524 +- !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
1525 ++ msgs[i].addr == msgs[i + 1].addr &&
1526 ++ !(msgs[i].flags & I2C_M_RD) &&
1527 ++ (msgs[i].len == 1 || msgs[i].len == 2) &&
1528 + (msgs[i + 1].flags & I2C_M_RD));
1529 + }
1530 +
1531 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
1532 +index 1d7b879cc68c..e9ed02518406 100644
1533 +--- a/drivers/gpu/drm/i915/intel_uncore.c
1534 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
1535 +@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
1536 + i915_check_and_clear_faults(dev_priv);
1537 + }
1538 +
1539 ++void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
1540 ++{
1541 ++ iosf_mbi_register_pmic_bus_access_notifier(
1542 ++ &dev_priv->uncore.pmic_bus_access_nb);
1543 ++}
1544 ++
1545 + void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
1546 + {
1547 + i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
1548 +@@ -1171,8 +1177,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1549 + * bus, which will be busy after this notification, leading to:
1550 + * "render: timed out waiting for forcewake ack request."
1551 + * errors.
1552 ++ *
1553 ++ * The notifier is unregistered during intel_runtime_suspend(),
1554 ++ * so it's ok to access the HW here without holding a RPM
1555 ++ * wake reference -> disable wakeref asserts for the time of
1556 ++ * the access.
1557 + */
1558 ++ disable_rpm_wakeref_asserts(dev_priv);
1559 + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1560 ++ enable_rpm_wakeref_asserts(dev_priv);
1561 + break;
1562 + case MBI_PMIC_BUS_ACCESS_END:
1563 + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1564 +diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
1565 +index 5f90278da461..0bdc3fcc0e64 100644
1566 +--- a/drivers/gpu/drm/i915/intel_uncore.h
1567 ++++ b/drivers/gpu/drm/i915/intel_uncore.h
1568 +@@ -121,6 +121,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
1569 + void intel_uncore_fini(struct drm_i915_private *dev_priv);
1570 + void intel_uncore_suspend(struct drm_i915_private *dev_priv);
1571 + void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
1572 ++void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
1573 +
1574 + u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
1575 + void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
1576 +diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
1577 +index daf286fc8a40..ca1e3b489540 100644
1578 +--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
1579 ++++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
1580 +@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
1581 + }
1582 +
1583 + static const struct soc_device_attribute dpi_soc_devices[] = {
1584 +- { .family = "OMAP3[456]*" },
1585 +- { .family = "[AD]M37*" },
1586 ++ { .machine = "OMAP3[456]*" },
1587 ++ { .machine = "[AD]M37*" },
1588 + { /* sentinel */ }
1589 + };
1590 +
1591 +diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1592 +index 365cf07daa01..c3453f3bd603 100644
1593 +--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1594 ++++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1595 +@@ -889,25 +889,36 @@ struct hdmi4_features {
1596 + bool audio_use_mclk;
1597 + };
1598 +
1599 +-static const struct hdmi4_features hdmi4_es1_features = {
1600 ++static const struct hdmi4_features hdmi4430_es1_features = {
1601 + .cts_swmode = false,
1602 + .audio_use_mclk = false,
1603 + };
1604 +
1605 +-static const struct hdmi4_features hdmi4_es2_features = {
1606 ++static const struct hdmi4_features hdmi4430_es2_features = {
1607 + .cts_swmode = true,
1608 + .audio_use_mclk = false,
1609 + };
1610 +
1611 +-static const struct hdmi4_features hdmi4_es3_features = {
1612 ++static const struct hdmi4_features hdmi4_features = {
1613 + .cts_swmode = true,
1614 + .audio_use_mclk = true,
1615 + };
1616 +
1617 + static const struct soc_device_attribute hdmi4_soc_devices[] = {
1618 +- { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
1619 +- { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
1620 +- { .family = "OMAP4", .data = &hdmi4_es3_features },
1621 ++ {
1622 ++ .machine = "OMAP4430",
1623 ++ .revision = "ES1.?",
1624 ++ .data = &hdmi4430_es1_features,
1625 ++ },
1626 ++ {
1627 ++ .machine = "OMAP4430",
1628 ++ .revision = "ES2.?",
1629 ++ .data = &hdmi4430_es2_features,
1630 ++ },
1631 ++ {
1632 ++ .family = "OMAP4",
1633 ++ .data = &hdmi4_features,
1634 ++ },
1635 + { /* sentinel */ }
1636 + };
1637 +
1638 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
1639 +index 474fa759e06e..234af81fb3d0 100644
1640 +--- a/drivers/gpu/drm/panel/panel-simple.c
1641 ++++ b/drivers/gpu/drm/panel/panel-simple.c
1642 +@@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev)
1643 + drm_panel_remove(&panel->base);
1644 +
1645 + panel_simple_disable(&panel->base);
1646 ++ panel_simple_unprepare(&panel->base);
1647 +
1648 + if (panel->ddc)
1649 + put_device(&panel->ddc->dev);
1650 +@@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev)
1651 + struct panel_simple *panel = dev_get_drvdata(dev);
1652 +
1653 + panel_simple_disable(&panel->base);
1654 ++ panel_simple_unprepare(&panel->base);
1655 + }
1656 +
1657 + static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
1658 +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1659 +index 432cb46f6a34..fd7682bf335d 100644
1660 +--- a/drivers/gpu/drm/radeon/atombios_dp.c
1661 ++++ b/drivers/gpu/drm/radeon/atombios_dp.c
1662 +@@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
1663 +
1664 + /***** radeon AUX functions *****/
1665 +
1666 +-/* Atom needs data in little endian format
1667 +- * so swap as appropriate when copying data to
1668 +- * or from atom. Note that atom operates on
1669 +- * dw units.
1670 ++/* Atom needs data in little endian format so swap as appropriate when copying
1671 ++ * data to or from atom. Note that atom operates on dw units.
1672 ++ *
1673 ++ * Use to_le=true when sending data to atom and provide at least
1674 ++ * ALIGN(num_bytes,4) bytes in the dst buffer.
1675 ++ *
1676 ++ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
1677 ++ * byes in the src buffer.
1678 + */
1679 + void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1680 + {
1681 + #ifdef __BIG_ENDIAN
1682 +- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
1683 +- u32 *dst32, *src32;
1684 ++ u32 src_tmp[5], dst_tmp[5];
1685 + int i;
1686 ++ u8 align_num_bytes = ALIGN(num_bytes, 4);
1687 +
1688 +- memcpy(src_tmp, src, num_bytes);
1689 +- src32 = (u32 *)src_tmp;
1690 +- dst32 = (u32 *)dst_tmp;
1691 + if (to_le) {
1692 +- for (i = 0; i < ((num_bytes + 3) / 4); i++)
1693 +- dst32[i] = cpu_to_le32(src32[i]);
1694 +- memcpy(dst, dst_tmp, num_bytes);
1695 ++ memcpy(src_tmp, src, num_bytes);
1696 ++ for (i = 0; i < align_num_bytes / 4; i++)
1697 ++ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
1698 ++ memcpy(dst, dst_tmp, align_num_bytes);
1699 + } else {
1700 +- u8 dws = num_bytes & ~3;
1701 +- for (i = 0; i < ((num_bytes + 3) / 4); i++)
1702 +- dst32[i] = le32_to_cpu(src32[i]);
1703 +- memcpy(dst, dst_tmp, dws);
1704 +- if (num_bytes % 4) {
1705 +- for (i = 0; i < (num_bytes % 4); i++)
1706 +- dst[dws+i] = dst_tmp[dws+i];
1707 +- }
1708 ++ memcpy(src_tmp, src, align_num_bytes);
1709 ++ for (i = 0; i < align_num_bytes / 4; i++)
1710 ++ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
1711 ++ memcpy(dst, dst_tmp, num_bytes);
1712 + }
1713 + #else
1714 + memcpy(dst, src, num_bytes);
1715 +diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
1716 +index fd25361ac681..4ef967d1a9de 100644
1717 +--- a/drivers/gpu/drm/radeon/radeon_fb.c
1718 ++++ b/drivers/gpu/drm/radeon/radeon_fb.c
1719 +@@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
1720 + }
1721 +
1722 + info->par = rfbdev;
1723 +- info->skip_vt_switch = true;
1724 +
1725 + ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
1726 + if (ret) {
1727 +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
1728 +index 406fe4544b83..06d6e785c920 100644
1729 +--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
1730 ++++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
1731 +@@ -24,6 +24,7 @@
1732 + #include <linux/completion.h>
1733 + #include <linux/dma-mapping.h>
1734 + #include <linux/of_graph.h>
1735 ++#include <linux/math64.h>
1736 +
1737 + #include "tilcdc_drv.h"
1738 + #include "tilcdc_regs.h"
1739 +@@ -48,6 +49,7 @@ struct tilcdc_crtc {
1740 + unsigned int lcd_fck_rate;
1741 +
1742 + ktime_t last_vblank;
1743 ++ unsigned int hvtotal_us;
1744 +
1745 + struct drm_framebuffer *curr_fb;
1746 + struct drm_framebuffer *next_fb;
1747 +@@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
1748 + LCDC_V2_CORE_CLK_EN);
1749 + }
1750 +
1751 ++uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
1752 ++{
1753 ++ return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
1754 ++ mode->clock);
1755 ++}
1756 ++
1757 + static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
1758 + {
1759 + struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
1760 +@@ -459,6 +467,9 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
1761 + drm_framebuffer_reference(fb);
1762 +
1763 + crtc->hwmode = crtc->state->adjusted_mode;
1764 ++
1765 ++ tilcdc_crtc->hvtotal_us =
1766 ++ tilcdc_mode_hvtotal(&crtc->hwmode);
1767 + }
1768 +
1769 + static void tilcdc_crtc_enable(struct drm_crtc *crtc)
1770 +@@ -648,7 +659,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
1771 + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
1772 +
1773 + next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
1774 +- 1000000 / crtc->hwmode.vrefresh);
1775 ++ tilcdc_crtc->hvtotal_us);
1776 + tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
1777 +
1778 + if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
1779 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1780 +index 180ce6296416..c088703777e2 100644
1781 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
1782 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
1783 +@@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
1784 + ttm_tt_destroy(bo->ttm);
1785 + atomic_dec(&bo->glob->bo_count);
1786 + dma_fence_put(bo->moving);
1787 +- if (bo->resv == &bo->ttm_resv)
1788 +- reservation_object_fini(&bo->ttm_resv);
1789 ++ reservation_object_fini(&bo->ttm_resv);
1790 + mutex_destroy(&bo->wu_mutex);
1791 + if (bo->destroy)
1792 + bo->destroy(bo);
1793 +@@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
1794 + if (bo->resv == &bo->ttm_resv)
1795 + return 0;
1796 +
1797 +- reservation_object_init(&bo->ttm_resv);
1798 + BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
1799 +
1800 + r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
1801 +- if (r) {
1802 ++ if (r)
1803 + reservation_object_unlock(&bo->ttm_resv);
1804 +- reservation_object_fini(&bo->ttm_resv);
1805 +- }
1806 +
1807 + return r;
1808 + }
1809 +@@ -440,28 +436,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
1810 + struct ttm_bo_global *glob = bo->glob;
1811 + int ret;
1812 +
1813 ++ ret = ttm_bo_individualize_resv(bo);
1814 ++ if (ret) {
1815 ++ /* Last resort, if we fail to allocate memory for the
1816 ++ * fences block for the BO to become idle
1817 ++ */
1818 ++ reservation_object_wait_timeout_rcu(bo->resv, true, false,
1819 ++ 30 * HZ);
1820 ++ spin_lock(&glob->lru_lock);
1821 ++ goto error;
1822 ++ }
1823 ++
1824 + spin_lock(&glob->lru_lock);
1825 + ret = __ttm_bo_reserve(bo, false, true, NULL);
1826 +-
1827 + if (!ret) {
1828 +- if (!ttm_bo_wait(bo, false, true)) {
1829 ++ if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
1830 + ttm_bo_del_from_lru(bo);
1831 + spin_unlock(&glob->lru_lock);
1832 +- ttm_bo_cleanup_memtype_use(bo);
1833 ++ if (bo->resv != &bo->ttm_resv)
1834 ++ reservation_object_unlock(&bo->ttm_resv);
1835 +
1836 +- return;
1837 +- }
1838 +-
1839 +- ret = ttm_bo_individualize_resv(bo);
1840 +- if (ret) {
1841 +- /* Last resort, if we fail to allocate memory for the
1842 +- * fences block for the BO to become idle and free it.
1843 +- */
1844 +- spin_unlock(&glob->lru_lock);
1845 +- ttm_bo_wait(bo, true, true);
1846 + ttm_bo_cleanup_memtype_use(bo);
1847 + return;
1848 + }
1849 ++
1850 + ttm_bo_flush_all_fences(bo);
1851 +
1852 + /*
1853 +@@ -474,11 +472,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
1854 + ttm_bo_add_to_lru(bo);
1855 + }
1856 +
1857 +- if (bo->resv != &bo->ttm_resv)
1858 +- reservation_object_unlock(&bo->ttm_resv);
1859 + __ttm_bo_unreserve(bo);
1860 + }
1861 ++ if (bo->resv != &bo->ttm_resv)
1862 ++ reservation_object_unlock(&bo->ttm_resv);
1863 +
1864 ++error:
1865 + kref_get(&bo->list_kref);
1866 + list_add_tail(&bo->ddestroy, &bdev->ddestroy);
1867 + spin_unlock(&glob->lru_lock);
1868 +@@ -1203,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1869 + lockdep_assert_held(&bo->resv->lock.base);
1870 + } else {
1871 + bo->resv = &bo->ttm_resv;
1872 +- reservation_object_init(&bo->ttm_resv);
1873 + }
1874 ++ reservation_object_init(&bo->ttm_resv);
1875 + atomic_inc(&bo->glob->bo_count);
1876 + drm_vma_node_reset(&bo->vma_node);
1877 + bo->priority = 0;
1878 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
1879 +index c934ad5b3903..7c2fbdbbd048 100644
1880 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
1881 ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
1882 +@@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
1883 + INIT_LIST_HEAD(&fbo->lru);
1884 + INIT_LIST_HEAD(&fbo->swap);
1885 + INIT_LIST_HEAD(&fbo->io_reserve_lru);
1886 ++ mutex_init(&fbo->wu_mutex);
1887 + fbo->moving = NULL;
1888 + drm_vma_node_reset(&fbo->vma_node);
1889 + atomic_set(&fbo->cpu_writers, 0);
1890 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
1891 +index 937da8dd65b8..8f71157a2b06 100644
1892 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
1893 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
1894 +@@ -433,7 +433,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
1895 + vc4_encoder->limited_rgb_range ?
1896 + HDMI_QUANTIZATION_RANGE_LIMITED :
1897 + HDMI_QUANTIZATION_RANGE_FULL,
1898 +- vc4_encoder->rgb_range_selectable);
1899 ++ vc4_encoder->rgb_range_selectable,
1900 ++ false);
1901 +
1902 + vc4_hdmi_write_infoframe(encoder, &frame);
1903 + }
1904 +diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
1905 +index 5f11dc014ed6..e5234f953a6d 100644
1906 +--- a/drivers/hwmon/jc42.c
1907 ++++ b/drivers/hwmon/jc42.c
1908 +@@ -22,6 +22,7 @@
1909 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
1910 + */
1911 +
1912 ++#include <linux/bitops.h>
1913 + #include <linux/module.h>
1914 + #include <linux/init.h>
1915 + #include <linux/slab.h>
1916 +@@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
1917 + #define JC42_REG_TEMP 0x05
1918 + #define JC42_REG_MANID 0x06
1919 + #define JC42_REG_DEVICEID 0x07
1920 ++#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */
1921 +
1922 + /* Status bits in temperature register */
1923 + #define JC42_ALARM_CRIT_BIT 15
1924 +@@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
1925 + #define GT_MANID 0x1c68 /* Giantec */
1926 + #define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */
1927 +
1928 ++/* SMBUS register */
1929 ++#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */
1930 ++
1931 + /* Supported chips */
1932 +
1933 + /* Analog Devices */
1934 +@@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
1935 +
1936 + data->extended = !!(cap & JC42_CAP_RANGE);
1937 +
1938 ++ if (device_property_read_bool(dev, "smbus-timeout-disable")) {
1939 ++ int smbus;
1940 ++
1941 ++ /*
1942 ++ * Not all chips support this register, but from a
1943 ++ * quick read of various datasheets no chip appears
1944 ++ * incompatible with the below attempt to disable
1945 ++ * the timeout. And the whole thing is opt-in...
1946 ++ */
1947 ++ smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
1948 ++ if (smbus < 0)
1949 ++ return smbus;
1950 ++ i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
1951 ++ smbus | SMBUS_STMOUT);
1952 ++ }
1953 ++
1954 + config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
1955 + if (config < 0)
1956 + return config;
1957 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1958 +index 9e12a53ef7b8..8eac00efadc1 100644
1959 +--- a/drivers/i2c/busses/i2c-i801.c
1960 ++++ b/drivers/i2c/busses/i2c-i801.c
1961 +@@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1962 + /* Default timeout in interrupt mode: 200 ms */
1963 + priv->adapter.timeout = HZ / 5;
1964 +
1965 ++ if (dev->irq == IRQ_NOTCONNECTED)
1966 ++ priv->features &= ~FEATURE_IRQ;
1967 ++
1968 + if (priv->features & FEATURE_IRQ) {
1969 + u16 pcictl, pcists;
1970 +
1971 +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1972 +index 21e60b1e2ff4..130606c3b07c 100644
1973 +--- a/drivers/infiniband/core/umem.c
1974 ++++ b/drivers/infiniband/core/umem.c
1975 +@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
1976 + sg_list_start = umem->sg_head.sgl;
1977 +
1978 + while (npages) {
1979 +- ret = get_user_pages(cur_base,
1980 ++ ret = get_user_pages_longterm(cur_base,
1981 + min_t(unsigned long, npages,
1982 + PAGE_SIZE / sizeof (struct page *)),
1983 + gup_flags, page_list, vma_list);
1984 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1985 +index c1696e6084b2..603acaf91828 100644
1986 +--- a/drivers/infiniband/core/user_mad.c
1987 ++++ b/drivers/infiniband/core/user_mad.c
1988 +@@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
1989 + packet->mad.hdr.status = 0;
1990 + packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
1991 + packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
1992 +- packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
1993 ++ /*
1994 ++ * On OPA devices it is okay to lose the upper 16 bits of LID as this
1995 ++ * information is obtained elsewhere. Mask off the upper 16 bits.
1996 ++ */
1997 ++ if (agent->device->port_immutable[agent->port_num].core_cap_flags &
1998 ++ RDMA_CORE_PORT_INTEL_OPA)
1999 ++ packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
2000 ++ mad_recv_wc->wc->slid);
2001 ++ else
2002 ++ packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
2003 + packet->mad.hdr.sl = mad_recv_wc->wc->sl;
2004 + packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
2005 + packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
2006 +diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
2007 +index f4c0ffc040cc..07b80faf1675 100644
2008 +--- a/drivers/infiniband/hw/hfi1/mad.c
2009 ++++ b/drivers/infiniband/hw/hfi1/mad.c
2010 +@@ -4293,7 +4293,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
2011 + const struct ib_wc *in_wc)
2012 + {
2013 + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2014 +- u16 slid = ib_lid_cpu16(in_wc->slid);
2015 + u16 pkey;
2016 +
2017 + if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
2018 +@@ -4320,7 +4319,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
2019 + */
2020 + if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
2021 + return 0;
2022 +- ingress_pkey_table_fail(ppd, pkey, slid);
2023 ++ /*
2024 ++ * On OPA devices it is okay to lose the upper 16 bits of LID as this
2025 ++ * information is obtained elsewhere. Mask off the upper 16 bits.
2026 ++ */
2027 ++ ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
2028 + return 1;
2029 + }
2030 +
2031 +diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
2032 +index c9934139d609..934b1fce4ce1 100644
2033 +--- a/drivers/md/bcache/alloc.c
2034 ++++ b/drivers/md/bcache/alloc.c
2035 +@@ -480,7 +480,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
2036 + if (b == -1)
2037 + goto err;
2038 +
2039 +- k->ptr[i] = PTR(ca->buckets[b].gen,
2040 ++ k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
2041 + bucket_to_sector(c, b),
2042 + ca->sb.nr_this_dev);
2043 +
2044 +diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
2045 +index 41c238fc3733..f9d391711595 100644
2046 +--- a/drivers/md/bcache/extents.c
2047 ++++ b/drivers/md/bcache/extents.c
2048 +@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
2049 + return false;
2050 +
2051 + for (i = 0; i < KEY_PTRS(l); i++)
2052 +- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
2053 ++ if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
2054 + PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
2055 + return false;
2056 +
2057 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
2058 +index 02a98ddb592d..03cc0722ae48 100644
2059 +--- a/drivers/md/bcache/journal.c
2060 ++++ b/drivers/md/bcache/journal.c
2061 +@@ -507,7 +507,7 @@ static void journal_reclaim(struct cache_set *c)
2062 + continue;
2063 +
2064 + ja->cur_idx = next;
2065 +- k->ptr[n++] = PTR(0,
2066 ++ k->ptr[n++] = MAKE_PTR(0,
2067 + bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
2068 + ca->sb.nr_this_dev);
2069 + }
2070 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
2071 +index 3475d6628e21..14d13cab5cda 100644
2072 +--- a/drivers/md/bcache/request.c
2073 ++++ b/drivers/md/bcache/request.c
2074 +@@ -699,7 +699,14 @@ static void cached_dev_read_error(struct closure *cl)
2075 + struct search *s = container_of(cl, struct search, cl);
2076 + struct bio *bio = &s->bio.bio;
2077 +
2078 +- if (s->recoverable) {
2079 ++ /*
2080 ++ * If read request hit dirty data (s->read_dirty_data is true),
2081 ++ * then recovery a failed read request from cached device may
2082 ++ * get a stale data back. So read failure recovery is only
2083 ++ * permitted when read request hit clean data in cache device,
2084 ++ * or when cache read race happened.
2085 ++ */
2086 ++ if (s->recoverable && !s->read_dirty_data) {
2087 + /* Retry from the backing device: */
2088 + trace_bcache_read_retry(s->orig_bio);
2089 +
2090 +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
2091 +index cae57b5be817..f425905c97fa 100644
2092 +--- a/drivers/md/bitmap.c
2093 ++++ b/drivers/md/bitmap.c
2094 +@@ -1816,6 +1816,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
2095 +
2096 + BUG_ON(file && mddev->bitmap_info.offset);
2097 +
2098 ++ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
2099 ++ pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
2100 ++ mdname(mddev));
2101 ++ return ERR_PTR(-EBUSY);
2102 ++ }
2103 ++
2104 + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
2105 + if (!bitmap)
2106 + return ERR_PTR(-ENOMEM);
2107 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2108 +index e019cf8c0d13..98ea86309ceb 100644
2109 +--- a/drivers/md/md.c
2110 ++++ b/drivers/md/md.c
2111 +@@ -6362,7 +6362,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
2112 + break;
2113 + }
2114 + }
2115 +- if (has_journal) {
2116 ++ if (has_journal || mddev->bitmap) {
2117 + export_rdev(rdev);
2118 + return -EBUSY;
2119 + }
2120 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2121 +index 928e24a07133..7aed69a4f655 100644
2122 +--- a/drivers/md/raid5.c
2123 ++++ b/drivers/md/raid5.c
2124 +@@ -7156,6 +7156,13 @@ static int raid5_run(struct mddev *mddev)
2125 + min_offset_diff = diff;
2126 + }
2127 +
2128 ++ if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
2129 ++ (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
2130 ++ pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
2131 ++ mdname(mddev));
2132 ++ return -EINVAL;
2133 ++ }
2134 ++
2135 + if (mddev->reshape_position != MaxSector) {
2136 + /* Check that we can continue the reshape.
2137 + * Difficulties arise if the stripe we would write to
2138 +diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
2139 +index 0b5c43f7e020..f412429cf5ba 100644
2140 +--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
2141 ++++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
2142 +@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
2143 + dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
2144 + data, size, dma->nr_pages);
2145 +
2146 +- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
2147 ++ err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
2148 + flags, dma->pages, NULL);
2149 +
2150 + if (err != dma->nr_pages) {
2151 + dma->nr_pages = (err >= 0) ? err : 0;
2152 +- dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
2153 ++ dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
2154 ++ dma->nr_pages);
2155 + return err < 0 ? err : -EINVAL;
2156 + }
2157 + return 0;
2158 +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
2159 +index 3ba04f371380..81093f8157a9 100644
2160 +--- a/drivers/misc/cxl/pci.c
2161 ++++ b/drivers/misc/cxl/pci.c
2162 +@@ -2043,6 +2043,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
2163 + /* There should only be one entry, but go through the list
2164 + * anyway
2165 + */
2166 ++ if (afu->phb == NULL)
2167 ++ return result;
2168 ++
2169 + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2170 + if (!afu_dev->driver)
2171 + continue;
2172 +@@ -2084,8 +2087,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
2173 + * Tell the AFU drivers; but we don't care what they
2174 + * say, we're going away.
2175 + */
2176 +- if (afu->phb != NULL)
2177 +- cxl_vphb_error_detected(afu, state);
2178 ++ cxl_vphb_error_detected(afu, state);
2179 + }
2180 + return PCI_ERS_RESULT_DISCONNECT;
2181 + }
2182 +@@ -2225,6 +2227,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
2183 + if (cxl_afu_select_best_mode(afu))
2184 + goto err;
2185 +
2186 ++ if (afu->phb == NULL)
2187 ++ continue;
2188 ++
2189 + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2190 + /* Reset the device context.
2191 + * TODO: make this less disruptive
2192 +@@ -2287,6 +2292,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
2193 + for (i = 0; i < adapter->slices; i++) {
2194 + afu = adapter->afu[i];
2195 +
2196 ++ if (afu->phb == NULL)
2197 ++ continue;
2198 ++
2199 + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2200 + if (afu_dev->driver && afu_dev->driver->err_handler &&
2201 + afu_dev->driver->err_handler->resume)
2202 +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
2203 +index 764ff5df0dbc..372b2060fbba 100644
2204 +--- a/drivers/misc/eeprom/at24.c
2205 ++++ b/drivers/misc/eeprom/at24.c
2206 +@@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
2207 + memset(msg, 0, sizeof(msg));
2208 + msg[0].addr = client->addr;
2209 + msg[0].buf = addrbuf;
2210 +- addrbuf[0] = 0x90 + offset;
2211 ++ /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
2212 ++ addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
2213 + msg[0].len = 1;
2214 + msg[1].addr = client->addr;
2215 + msg[1].flags = I2C_M_RD;
2216 +@@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
2217 + if (unlikely(!count))
2218 + return count;
2219 +
2220 ++ if (off + count > at24->chip.byte_len)
2221 ++ return -EINVAL;
2222 ++
2223 + /*
2224 + * Read data from chip, protecting against concurrent updates
2225 + * from this host, but not from other I2C masters.
2226 +@@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
2227 + if (unlikely(!count))
2228 + return -EINVAL;
2229 +
2230 ++ if (off + count > at24->chip.byte_len)
2231 ++ return -EINVAL;
2232 ++
2233 + /*
2234 + * Write data to chip, protecting against concurrent updates
2235 + * from this host, but not from other I2C masters.
2236 +@@ -631,6 +638,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
2237 + dev_warn(&client->dev,
2238 + "page_size looks suspicious (no power of 2)!\n");
2239 +
2240 ++ /*
2241 ++ * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
2242 ++ * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
2243 ++ *
2244 ++ * Eventually we'll get rid of the magic values altoghether in favor of
2245 ++ * real structs, but for now just manually set the right size.
2246 ++ */
2247 ++ if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
2248 ++ chip.byte_len = 6;
2249 ++
2250 + /* Use I2C operations unless we're stuck with SMBus extensions. */
2251 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
2252 + if (chip.flags & AT24_FLAG_ADDR16)
2253 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2254 +index 2ad7b5c69156..ccb516f18d72 100644
2255 +--- a/drivers/mmc/core/block.c
2256 ++++ b/drivers/mmc/core/block.c
2257 +@@ -119,6 +119,10 @@ struct mmc_blk_data {
2258 + struct device_attribute force_ro;
2259 + struct device_attribute power_ro_lock;
2260 + int area_type;
2261 ++
2262 ++ /* debugfs files (only in main mmc_blk_data) */
2263 ++ struct dentry *status_dentry;
2264 ++ struct dentry *ext_csd_dentry;
2265 + };
2266 +
2267 + static DEFINE_MUTEX(open_lock);
2268 +@@ -204,9 +208,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
2269 +
2270 + /* Dispatch locking to the block layer */
2271 + req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
2272 ++ if (IS_ERR(req)) {
2273 ++ count = PTR_ERR(req);
2274 ++ goto out_put;
2275 ++ }
2276 + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
2277 + blk_execute_rq(mq->queue, NULL, req, 0);
2278 + ret = req_to_mmc_queue_req(req)->drv_op_result;
2279 ++ blk_put_request(req);
2280 +
2281 + if (!ret) {
2282 + pr_info("%s: Locking boot partition ro until next power on\n",
2283 +@@ -219,7 +228,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
2284 + set_disk_ro(part_md->disk, 1);
2285 + }
2286 + }
2287 +-
2288 ++out_put:
2289 + mmc_blk_put(md);
2290 + return count;
2291 + }
2292 +@@ -580,6 +589,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
2293 + req = blk_get_request(mq->queue,
2294 + idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
2295 + __GFP_RECLAIM);
2296 ++ if (IS_ERR(req)) {
2297 ++ err = PTR_ERR(req);
2298 ++ goto cmd_done;
2299 ++ }
2300 + idatas[0] = idata;
2301 + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
2302 + req_to_mmc_queue_req(req)->drv_op_data = idatas;
2303 +@@ -643,6 +656,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
2304 + req = blk_get_request(mq->queue,
2305 + idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
2306 + __GFP_RECLAIM);
2307 ++ if (IS_ERR(req)) {
2308 ++ err = PTR_ERR(req);
2309 ++ goto cmd_err;
2310 ++ }
2311 + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
2312 + req_to_mmc_queue_req(req)->drv_op_data = idata;
2313 + req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
2314 +@@ -2314,6 +2331,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
2315 +
2316 + /* Ask the block layer about the card status */
2317 + req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2318 ++ if (IS_ERR(req))
2319 ++ return PTR_ERR(req);
2320 + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2321 + blk_execute_rq(mq->queue, NULL, req, 0);
2322 + ret = req_to_mmc_queue_req(req)->drv_op_result;
2323 +@@ -2321,6 +2340,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
2324 + *val = ret;
2325 + ret = 0;
2326 + }
2327 ++ blk_put_request(req);
2328 +
2329 + return ret;
2330 + }
2331 +@@ -2347,10 +2367,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
2332 +
2333 + /* Ask the block layer for the EXT CSD */
2334 + req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2335 ++ if (IS_ERR(req)) {
2336 ++ err = PTR_ERR(req);
2337 ++ goto out_free;
2338 ++ }
2339 + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2340 + req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
2341 + blk_execute_rq(mq->queue, NULL, req, 0);
2342 + err = req_to_mmc_queue_req(req)->drv_op_result;
2343 ++ blk_put_request(req);
2344 + if (err) {
2345 + pr_err("FAILED %d\n", err);
2346 + goto out_free;
2347 +@@ -2396,7 +2421,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
2348 + .llseek = default_llseek,
2349 + };
2350 +
2351 +-static int mmc_blk_add_debugfs(struct mmc_card *card)
2352 ++static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2353 + {
2354 + struct dentry *root;
2355 +
2356 +@@ -2406,28 +2431,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
2357 + root = card->debugfs_root;
2358 +
2359 + if (mmc_card_mmc(card) || mmc_card_sd(card)) {
2360 +- if (!debugfs_create_file("status", S_IRUSR, root, card,
2361 +- &mmc_dbg_card_status_fops))
2362 ++ md->status_dentry =
2363 ++ debugfs_create_file("status", S_IRUSR, root, card,
2364 ++ &mmc_dbg_card_status_fops);
2365 ++ if (!md->status_dentry)
2366 + return -EIO;
2367 + }
2368 +
2369 + if (mmc_card_mmc(card)) {
2370 +- if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
2371 +- &mmc_dbg_ext_csd_fops))
2372 ++ md->ext_csd_dentry =
2373 ++ debugfs_create_file("ext_csd", S_IRUSR, root, card,
2374 ++ &mmc_dbg_ext_csd_fops);
2375 ++ if (!md->ext_csd_dentry)
2376 + return -EIO;
2377 + }
2378 +
2379 + return 0;
2380 + }
2381 +
2382 ++static void mmc_blk_remove_debugfs(struct mmc_card *card,
2383 ++ struct mmc_blk_data *md)
2384 ++{
2385 ++ if (!card->debugfs_root)
2386 ++ return;
2387 ++
2388 ++ if (!IS_ERR_OR_NULL(md->status_dentry)) {
2389 ++ debugfs_remove(md->status_dentry);
2390 ++ md->status_dentry = NULL;
2391 ++ }
2392 ++
2393 ++ if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
2394 ++ debugfs_remove(md->ext_csd_dentry);
2395 ++ md->ext_csd_dentry = NULL;
2396 ++ }
2397 ++}
2398 +
2399 + #else
2400 +
2401 +-static int mmc_blk_add_debugfs(struct mmc_card *card)
2402 ++static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2403 + {
2404 + return 0;
2405 + }
2406 +
2407 ++static void mmc_blk_remove_debugfs(struct mmc_card *card,
2408 ++ struct mmc_blk_data *md)
2409 ++{
2410 ++}
2411 ++
2412 + #endif /* CONFIG_DEBUG_FS */
2413 +
2414 + static int mmc_blk_probe(struct mmc_card *card)
2415 +@@ -2467,7 +2517,7 @@ static int mmc_blk_probe(struct mmc_card *card)
2416 + }
2417 +
2418 + /* Add two debugfs entries */
2419 +- mmc_blk_add_debugfs(card);
2420 ++ mmc_blk_add_debugfs(card, md);
2421 +
2422 + pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2423 + pm_runtime_use_autosuspend(&card->dev);
2424 +@@ -2493,6 +2543,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2425 + {
2426 + struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2427 +
2428 ++ mmc_blk_remove_debugfs(card, md);
2429 + mmc_blk_remove_parts(card, md);
2430 + pm_runtime_get_sync(&card->dev);
2431 + mmc_claim_host(card->host);
2432 +diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
2433 +index 301246513a37..7f428e387de3 100644
2434 +--- a/drivers/mmc/core/bus.c
2435 ++++ b/drivers/mmc/core/bus.c
2436 +@@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
2437 + return ret;
2438 +
2439 + ret = host->bus_ops->suspend(host);
2440 ++ if (ret)
2441 ++ pm_generic_resume(dev);
2442 ++
2443 + return ret;
2444 + }
2445 +
2446 +diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
2447 +index 01e459a34f33..0f4a7d7b2626 100644
2448 +--- a/drivers/mmc/core/debugfs.c
2449 ++++ b/drivers/mmc/core/debugfs.c
2450 +@@ -314,4 +314,5 @@ void mmc_add_card_debugfs(struct mmc_card *card)
2451 + void mmc_remove_card_debugfs(struct mmc_card *card)
2452 + {
2453 + debugfs_remove_recursive(card->debugfs_root);
2454 ++ card->debugfs_root = NULL;
2455 + }
2456 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
2457 +index 36217ad5e9b1..bad5c1bf4ed9 100644
2458 +--- a/drivers/mmc/core/mmc.c
2459 ++++ b/drivers/mmc/core/mmc.c
2460 +@@ -780,7 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
2461 + MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
2462 + MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
2463 + MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
2464 +-MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
2465 ++MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
2466 + MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
2467 + card->ext_csd.device_life_time_est_typ_a,
2468 + card->ext_csd.device_life_time_est_typ_b);
2469 +@@ -790,7 +790,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
2470 + MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
2471 + MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
2472 + MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
2473 +-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
2474 ++MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
2475 + MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
2476 +
2477 + static ssize_t mmc_fwrev_show(struct device *dev,
2478 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
2479 +index 4fd1620b732d..eb9de2134967 100644
2480 +--- a/drivers/mmc/core/sd.c
2481 ++++ b/drivers/mmc/core/sd.c
2482 +@@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
2483 + MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
2484 + MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
2485 + MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
2486 +-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
2487 ++MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
2488 +
2489 +
2490 + static ssize_t mmc_dsr_show(struct device *dev,
2491 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2492 +index 0d5fcca18c9e..6152e83ff935 100644
2493 +--- a/drivers/mmc/host/sdhci.c
2494 ++++ b/drivers/mmc/host/sdhci.c
2495 +@@ -21,6 +21,7 @@
2496 + #include <linux/dma-mapping.h>
2497 + #include <linux/slab.h>
2498 + #include <linux/scatterlist.h>
2499 ++#include <linux/swiotlb.h>
2500 + #include <linux/regulator/consumer.h>
2501 + #include <linux/pm_runtime.h>
2502 + #include <linux/of.h>
2503 +@@ -3650,23 +3651,30 @@ int sdhci_setup_host(struct sdhci_host *host)
2504 +
2505 + spin_lock_init(&host->lock);
2506 +
2507 ++ /*
2508 ++ * Maximum number of sectors in one transfer. Limited by SDMA boundary
2509 ++ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
2510 ++ * is less anyway.
2511 ++ */
2512 ++ mmc->max_req_size = 524288;
2513 ++
2514 + /*
2515 + * Maximum number of segments. Depends on if the hardware
2516 + * can do scatter/gather or not.
2517 + */
2518 +- if (host->flags & SDHCI_USE_ADMA)
2519 ++ if (host->flags & SDHCI_USE_ADMA) {
2520 + mmc->max_segs = SDHCI_MAX_SEGS;
2521 +- else if (host->flags & SDHCI_USE_SDMA)
2522 ++ } else if (host->flags & SDHCI_USE_SDMA) {
2523 + mmc->max_segs = 1;
2524 +- else /* PIO */
2525 ++ if (swiotlb_max_segment()) {
2526 ++ unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
2527 ++ IO_TLB_SEGSIZE;
2528 ++ mmc->max_req_size = min(mmc->max_req_size,
2529 ++ max_req_size);
2530 ++ }
2531 ++ } else { /* PIO */
2532 + mmc->max_segs = SDHCI_MAX_SEGS;
2533 +-
2534 +- /*
2535 +- * Maximum number of sectors in one transfer. Limited by SDMA boundary
2536 +- * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
2537 +- * is less anyway.
2538 +- */
2539 +- mmc->max_req_size = 524288;
2540 ++ }
2541 +
2542 + /*
2543 + * Maximum segment size. Could be one segment with the maximum number
2544 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
2545 +index 67163ca898ba..00a36df02a3f 100644
2546 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
2547 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
2548 +@@ -113,7 +113,8 @@
2549 + #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
2550 + #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
2551 + #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
2552 +-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
2553 ++#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
2554 ++#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
2555 + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
2556 +
2557 + #define E1000_ICH_RAR_ENTRIES 7
2558 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2559 +index c38b00c90f48..991c2a0dd67e 100644
2560 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
2561 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2562 +@@ -3030,9 +3030,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2563 + ew32(IOSFPC, reg_val);
2564 +
2565 + reg_val = er32(TARC(0));
2566 +- /* SPT and KBL Si errata workaround to avoid Tx hang */
2567 +- reg_val &= ~BIT(28);
2568 +- reg_val |= BIT(29);
2569 ++ /* SPT and KBL Si errata workaround to avoid Tx hang.
2570 ++ * Dropping the number of outstanding requests from
2571 ++ * 3 to 2 in order to avoid a buffer overrun.
2572 ++ */
2573 ++ reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
2574 ++ reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
2575 + ew32(TARC(0), reg_val);
2576 + }
2577 + }
2578 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2579 +index d3f3c4447515..044af553204c 100644
2580 +--- a/drivers/nvme/host/nvme.h
2581 ++++ b/drivers/nvme/host/nvme.h
2582 +@@ -108,7 +108,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
2583 + * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
2584 + * found empirically.
2585 + */
2586 +-#define NVME_QUIRK_DELAY_AMOUNT 2000
2587 ++#define NVME_QUIRK_DELAY_AMOUNT 2300
2588 +
2589 + enum nvme_ctrl_state {
2590 + NVME_CTRL_NEW,
2591 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2592 +index 3f5a04c586ce..75539f7c58b9 100644
2593 +--- a/drivers/nvme/host/pci.c
2594 ++++ b/drivers/nvme/host/pci.c
2595 +@@ -2519,6 +2519,8 @@ static const struct pci_device_id nvme_id_table[] = {
2596 + .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2597 + { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
2598 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2599 ++ { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
2600 ++ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2601 + { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
2602 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2603 + { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
2604 +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
2605 +index b4ed3dc983d5..b4224389febe 100644
2606 +--- a/drivers/platform/x86/hp-wmi.c
2607 ++++ b/drivers/platform/x86/hp-wmi.c
2608 +@@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask)
2609 + if (state < 0)
2610 + return state;
2611 +
2612 +- return state & 0x1;
2613 ++ return !!(state & mask);
2614 + }
2615 +
2616 + static int __init hp_wmi_bios_2008_later(void)
2617 +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
2618 +index d79ced925861..82e8f6edfb48 100644
2619 +--- a/fs/autofs4/root.c
2620 ++++ b/fs/autofs4/root.c
2621 +@@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
2622 + pr_debug("waiting for mount name=%pd\n", path->dentry);
2623 + status = autofs4_wait(sbi, path, NFY_MOUNT);
2624 + pr_debug("mount wait done status=%d\n", status);
2625 +- ino->last_used = jiffies;
2626 + }
2627 ++ ino->last_used = jiffies;
2628 + return status;
2629 + }
2630 +
2631 +@@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
2632 + */
2633 + if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
2634 + struct dentry *parent = dentry->d_parent;
2635 ++ struct autofs_info *ino;
2636 + struct dentry *new;
2637 +
2638 + new = d_lookup(parent, &dentry->d_name);
2639 + if (!new)
2640 + return NULL;
2641 +- if (new == dentry)
2642 +- dput(new);
2643 +- else {
2644 +- struct autofs_info *ino;
2645 +-
2646 +- ino = autofs4_dentry_ino(new);
2647 +- ino->last_used = jiffies;
2648 +- dput(path->dentry);
2649 +- path->dentry = new;
2650 +- }
2651 ++ ino = autofs4_dentry_ino(new);
2652 ++ ino->last_used = jiffies;
2653 ++ dput(path->dentry);
2654 ++ path->dentry = new;
2655 + }
2656 + return path->dentry;
2657 + }
2658 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2659 +index 08698105fa4a..e4774c02d922 100644
2660 +--- a/fs/btrfs/extent-tree.c
2661 ++++ b/fs/btrfs/extent-tree.c
2662 +@@ -3526,13 +3526,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2663 + goto again;
2664 + }
2665 +
2666 +- /* We've already setup this transaction, go ahead and exit */
2667 +- if (block_group->cache_generation == trans->transid &&
2668 +- i_size_read(inode)) {
2669 +- dcs = BTRFS_DC_SETUP;
2670 +- goto out_put;
2671 +- }
2672 +-
2673 + /*
2674 + * We want to set the generation to 0, that way if anything goes wrong
2675 + * from here on out we know not to trust this cache when we load up next
2676 +@@ -3556,6 +3549,13 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2677 + }
2678 + WARN_ON(ret);
2679 +
2680 ++ /* We've already setup this transaction, go ahead and exit */
2681 ++ if (block_group->cache_generation == trans->transid &&
2682 ++ i_size_read(inode)) {
2683 ++ dcs = BTRFS_DC_SETUP;
2684 ++ goto out_put;
2685 ++ }
2686 ++
2687 + if (i_size_read(inode) > 0) {
2688 + ret = btrfs_check_trunc_cache_free_space(fs_info,
2689 + &fs_info->global_block_rsv);
2690 +diff --git a/fs/exec.c b/fs/exec.c
2691 +index 3e14ba25f678..4726c777dd38 100644
2692 +--- a/fs/exec.c
2693 ++++ b/fs/exec.c
2694 +@@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm)
2695 + * avoid bad behavior from the prior rlimits. This has to
2696 + * happen before arch_pick_mmap_layout(), which examines
2697 + * RLIMIT_STACK, but after the point of no return to avoid
2698 +- * needing to clean up the change on failure.
2699 ++ * races from other threads changing the limits. This also
2700 ++ * must be protected from races with prlimit() calls.
2701 + */
2702 ++ task_lock(current->group_leader);
2703 + if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
2704 + current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
2705 ++ if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
2706 ++ current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
2707 ++ task_unlock(current->group_leader);
2708 + }
2709 +
2710 + arch_pick_mmap_layout(current->mm);
2711 +diff --git a/fs/fat/inode.c b/fs/fat/inode.c
2712 +index 30c52394a7ad..c7a4dee206b9 100644
2713 +--- a/fs/fat/inode.c
2714 ++++ b/fs/fat/inode.c
2715 +@@ -779,7 +779,7 @@ static void __exit fat_destroy_inodecache(void)
2716 +
2717 + static int fat_remount(struct super_block *sb, int *flags, char *data)
2718 + {
2719 +- int new_rdonly;
2720 ++ bool new_rdonly;
2721 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
2722 + *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
2723 +
2724 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
2725 +index f04ecfc7ece0..45e96549ebd2 100644
2726 +--- a/fs/lockd/svc.c
2727 ++++ b/fs/lockd/svc.c
2728 +@@ -274,6 +274,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
2729 + if (ln->nlmsvc_users) {
2730 + if (--ln->nlmsvc_users == 0) {
2731 + nlm_shutdown_hosts_net(net);
2732 ++ cancel_delayed_work_sync(&ln->grace_period_end);
2733 ++ locks_end_grace(&ln->lockd_manager);
2734 + svc_shutdown_net(serv, net);
2735 + dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
2736 + }
2737 +diff --git a/fs/namei.c b/fs/namei.c
2738 +index ed8b9488a890..62a0db6e6725 100644
2739 +--- a/fs/namei.c
2740 ++++ b/fs/namei.c
2741 +@@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
2742 + * of the daemon to instantiate them before they can be used.
2743 + */
2744 + if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
2745 +- LOOKUP_OPEN | LOOKUP_CREATE |
2746 +- LOOKUP_AUTOMOUNT))) {
2747 +- /* Positive dentry that isn't meant to trigger an
2748 +- * automount, EISDIR will allow it to be used,
2749 +- * otherwise there's no mount here "now" so return
2750 +- * ENOENT.
2751 +- */
2752 +- if (path->dentry->d_inode)
2753 +- return -EISDIR;
2754 +- else
2755 +- return -ENOENT;
2756 +- }
2757 ++ LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
2758 ++ path->dentry->d_inode)
2759 ++ return -EISDIR;
2760 +
2761 + if (path->dentry->d_sb->s_user_ns != &init_user_ns)
2762 + return -EACCES;
2763 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2764 +index d386d569edbc..a439a70177a4 100644
2765 +--- a/fs/nfsd/nfs4state.c
2766 ++++ b/fs/nfsd/nfs4state.c
2767 +@@ -3512,7 +3512,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
2768 + /* ignore lock owners */
2769 + if (local->st_stateowner->so_is_open_owner == 0)
2770 + continue;
2771 +- if (local->st_stateowner == &oo->oo_owner) {
2772 ++ if (local->st_stateowner != &oo->oo_owner)
2773 ++ continue;
2774 ++ if (local->st_stid.sc_type == NFS4_OPEN_STID) {
2775 + ret = local;
2776 + atomic_inc(&ret->st_stid.sc_count);
2777 + break;
2778 +@@ -3521,6 +3523,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
2779 + return ret;
2780 + }
2781 +
2782 ++static __be32
2783 ++nfsd4_verify_open_stid(struct nfs4_stid *s)
2784 ++{
2785 ++ __be32 ret = nfs_ok;
2786 ++
2787 ++ switch (s->sc_type) {
2788 ++ default:
2789 ++ break;
2790 ++ case NFS4_CLOSED_STID:
2791 ++ case NFS4_CLOSED_DELEG_STID:
2792 ++ ret = nfserr_bad_stateid;
2793 ++ break;
2794 ++ case NFS4_REVOKED_DELEG_STID:
2795 ++ ret = nfserr_deleg_revoked;
2796 ++ }
2797 ++ return ret;
2798 ++}
2799 ++
2800 ++/* Lock the stateid st_mutex, and deal with races with CLOSE */
2801 ++static __be32
2802 ++nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
2803 ++{
2804 ++ __be32 ret;
2805 ++
2806 ++ mutex_lock(&stp->st_mutex);
2807 ++ ret = nfsd4_verify_open_stid(&stp->st_stid);
2808 ++ if (ret != nfs_ok)
2809 ++ mutex_unlock(&stp->st_mutex);
2810 ++ return ret;
2811 ++}
2812 ++
2813 ++static struct nfs4_ol_stateid *
2814 ++nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
2815 ++{
2816 ++ struct nfs4_ol_stateid *stp;
2817 ++ for (;;) {
2818 ++ spin_lock(&fp->fi_lock);
2819 ++ stp = nfsd4_find_existing_open(fp, open);
2820 ++ spin_unlock(&fp->fi_lock);
2821 ++ if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
2822 ++ break;
2823 ++ nfs4_put_stid(&stp->st_stid);
2824 ++ }
2825 ++ return stp;
2826 ++}
2827 ++
2828 + static struct nfs4_openowner *
2829 + alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
2830 + struct nfsd4_compound_state *cstate)
2831 +@@ -3565,6 +3613,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
2832 + mutex_init(&stp->st_mutex);
2833 + mutex_lock(&stp->st_mutex);
2834 +
2835 ++retry:
2836 + spin_lock(&oo->oo_owner.so_client->cl_lock);
2837 + spin_lock(&fp->fi_lock);
2838 +
2839 +@@ -3589,7 +3638,11 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
2840 + spin_unlock(&fp->fi_lock);
2841 + spin_unlock(&oo->oo_owner.so_client->cl_lock);
2842 + if (retstp) {
2843 +- mutex_lock(&retstp->st_mutex);
2844 ++ /* Handle races with CLOSE */
2845 ++ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
2846 ++ nfs4_put_stid(&retstp->st_stid);
2847 ++ goto retry;
2848 ++ }
2849 + /* To keep mutex tracking happy */
2850 + mutex_unlock(&stp->st_mutex);
2851 + stp = retstp;
2852 +@@ -4399,6 +4452,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2853 + struct nfs4_ol_stateid *stp = NULL;
2854 + struct nfs4_delegation *dp = NULL;
2855 + __be32 status;
2856 ++ bool new_stp = false;
2857 +
2858 + /*
2859 + * Lookup file; if found, lookup stateid and check open request,
2860 +@@ -4410,9 +4464,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2861 + status = nfs4_check_deleg(cl, open, &dp);
2862 + if (status)
2863 + goto out;
2864 +- spin_lock(&fp->fi_lock);
2865 +- stp = nfsd4_find_existing_open(fp, open);
2866 +- spin_unlock(&fp->fi_lock);
2867 ++ stp = nfsd4_find_and_lock_existing_open(fp, open);
2868 + } else {
2869 + open->op_file = NULL;
2870 + status = nfserr_bad_stateid;
2871 +@@ -4420,35 +4472,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2872 + goto out;
2873 + }
2874 +
2875 ++ if (!stp) {
2876 ++ stp = init_open_stateid(fp, open);
2877 ++ if (!open->op_stp)
2878 ++ new_stp = true;
2879 ++ }
2880 ++
2881 + /*
2882 + * OPEN the file, or upgrade an existing OPEN.
2883 + * If truncate fails, the OPEN fails.
2884 ++ *
2885 ++ * stp is already locked.
2886 + */
2887 +- if (stp) {
2888 ++ if (!new_stp) {
2889 + /* Stateid was found, this is an OPEN upgrade */
2890 +- mutex_lock(&stp->st_mutex);
2891 + status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2892 + if (status) {
2893 + mutex_unlock(&stp->st_mutex);
2894 + goto out;
2895 + }
2896 + } else {
2897 +- /* stp is returned locked. */
2898 +- stp = init_open_stateid(fp, open);
2899 +- /* See if we lost the race to some other thread */
2900 +- if (stp->st_access_bmap != 0) {
2901 +- status = nfs4_upgrade_open(rqstp, fp, current_fh,
2902 +- stp, open);
2903 +- if (status) {
2904 +- mutex_unlock(&stp->st_mutex);
2905 +- goto out;
2906 +- }
2907 +- goto upgrade_out;
2908 +- }
2909 + status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
2910 + if (status) {
2911 +- mutex_unlock(&stp->st_mutex);
2912 ++ stp->st_stid.sc_type = NFS4_CLOSED_STID;
2913 + release_open_stateid(stp);
2914 ++ mutex_unlock(&stp->st_mutex);
2915 + goto out;
2916 + }
2917 +
2918 +@@ -4457,7 +4505,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2919 + if (stp->st_clnt_odstate == open->op_odstate)
2920 + open->op_odstate = NULL;
2921 + }
2922 +-upgrade_out:
2923 ++
2924 + nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
2925 + mutex_unlock(&stp->st_mutex);
2926 +
2927 +@@ -4684,7 +4732,7 @@ nfs4_laundromat(struct nfsd_net *nn)
2928 + spin_unlock(&nn->blocked_locks_lock);
2929 +
2930 + while (!list_empty(&reaplist)) {
2931 +- nbl = list_first_entry(&nn->blocked_locks_lru,
2932 ++ nbl = list_first_entry(&reaplist,
2933 + struct nfsd4_blocked_lock, nbl_lru);
2934 + list_del_init(&nbl->nbl_lru);
2935 + posix_unblock_lock(&nbl->nbl_lock);
2936 +@@ -5317,7 +5365,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
2937 + bool unhashed;
2938 + LIST_HEAD(reaplist);
2939 +
2940 +- s->st_stid.sc_type = NFS4_CLOSED_STID;
2941 + spin_lock(&clp->cl_lock);
2942 + unhashed = unhash_open_stateid(s, &reaplist);
2943 +
2944 +@@ -5357,10 +5404,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2945 + nfsd4_bump_seqid(cstate, status);
2946 + if (status)
2947 + goto out;
2948 ++
2949 ++ stp->st_stid.sc_type = NFS4_CLOSED_STID;
2950 + nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
2951 +- mutex_unlock(&stp->st_mutex);
2952 +
2953 + nfsd4_close_open_stateid(stp);
2954 ++ mutex_unlock(&stp->st_mutex);
2955 +
2956 + /* put reference from nfs4_preprocess_seqid_op */
2957 + nfs4_put_stid(&stp->st_stid);
2958 +@@ -7103,7 +7152,7 @@ nfs4_state_shutdown_net(struct net *net)
2959 + spin_unlock(&nn->blocked_locks_lock);
2960 +
2961 + while (!list_empty(&reaplist)) {
2962 +- nbl = list_first_entry(&nn->blocked_locks_lru,
2963 ++ nbl = list_first_entry(&reaplist,
2964 + struct nfsd4_blocked_lock, nbl_lru);
2965 + list_del_init(&nbl->nbl_lru);
2966 + posix_unblock_lock(&nbl->nbl_lock);
2967 +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
2968 +index fa1505292f6c..324a04df3785 100644
2969 +--- a/include/acpi/acpi_bus.h
2970 ++++ b/include/acpi/acpi_bus.h
2971 +@@ -105,6 +105,7 @@ enum acpi_bus_device_type {
2972 + ACPI_BUS_TYPE_THERMAL,
2973 + ACPI_BUS_TYPE_POWER_BUTTON,
2974 + ACPI_BUS_TYPE_SLEEP_BUTTON,
2975 ++ ACPI_BUS_TYPE_ECDT_EC,
2976 + ACPI_BUS_DEVICE_TYPE_COUNT
2977 + };
2978 +
2979 +diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
2980 +index 29c691265b49..14499757338f 100644
2981 +--- a/include/acpi/acpi_drivers.h
2982 ++++ b/include/acpi/acpi_drivers.h
2983 +@@ -58,6 +58,7 @@
2984 + #define ACPI_VIDEO_HID "LNXVIDEO"
2985 + #define ACPI_BAY_HID "LNXIOBAY"
2986 + #define ACPI_DOCK_HID "LNXDOCK"
2987 ++#define ACPI_ECDT_HID "LNXEC"
2988 + /* Quirk for broken IBM BIOSes */
2989 + #define ACPI_SMBUS_IBM_HID "SMBUSIBM"
2990 +
2991 +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
2992 +index 757dc6ffc7ba..1ac457511f4e 100644
2993 +--- a/include/asm-generic/pgtable.h
2994 ++++ b/include/asm-generic/pgtable.h
2995 +@@ -814,6 +814,14 @@ static inline int pmd_write(pmd_t pmd)
2996 + #endif /* __HAVE_ARCH_PMD_WRITE */
2997 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2998 +
2999 ++#ifndef pud_write
3000 ++static inline int pud_write(pud_t pud)
3001 ++{
3002 ++ BUG();
3003 ++ return 0;
3004 ++}
3005 ++#endif /* pud_write */
3006 ++
3007 + #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
3008 + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
3009 + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
3010 +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
3011 +index 75ec9c662268..aeec003a566b 100644
3012 +--- a/include/crypto/if_alg.h
3013 ++++ b/include/crypto/if_alg.h
3014 +@@ -255,6 +255,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
3015 + unsigned int ivsize);
3016 + ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
3017 + int offset, size_t size, int flags);
3018 ++void af_alg_free_resources(struct af_alg_async_req *areq);
3019 + void af_alg_async_cb(struct crypto_async_request *_req, int err);
3020 + unsigned int af_alg_poll(struct file *file, struct socket *sock,
3021 + poll_table *wait);
3022 +diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
3023 +index 1e1908a6b1d6..a992434ded99 100644
3024 +--- a/include/drm/drm_edid.h
3025 ++++ b/include/drm/drm_edid.h
3026 +@@ -360,7 +360,8 @@ void
3027 + drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
3028 + const struct drm_display_mode *mode,
3029 + enum hdmi_quantization_range rgb_quant_range,
3030 +- bool rgb_quant_range_selectable);
3031 ++ bool rgb_quant_range_selectable,
3032 ++ bool is_hdmi2_sink);
3033 +
3034 + /**
3035 + * drm_eld_mnl - Get ELD monitor name length in bytes.
3036 +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
3037 +index 54dfef70a072..780b1242bf24 100644
3038 +--- a/include/linux/compiler-clang.h
3039 ++++ b/include/linux/compiler-clang.h
3040 +@@ -16,3 +16,6 @@
3041 + * with any version that can compile the kernel
3042 + */
3043 + #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
3044 ++
3045 ++#define randomized_struct_fields_start struct {
3046 ++#define randomized_struct_fields_end };
3047 +diff --git a/include/linux/fs.h b/include/linux/fs.h
3048 +index 885266aae2d7..440281f8564d 100644
3049 +--- a/include/linux/fs.h
3050 ++++ b/include/linux/fs.h
3051 +@@ -3069,7 +3069,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
3052 + static inline int vfs_fstatat(int dfd, const char __user *filename,
3053 + struct kstat *stat, int flags)
3054 + {
3055 +- return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
3056 ++ return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
3057 ++ stat, STATX_BASIC_STATS);
3058 + }
3059 + static inline int vfs_fstat(int fd, struct kstat *stat)
3060 + {
3061 +@@ -3175,6 +3176,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
3062 + return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
3063 + }
3064 +
3065 ++static inline bool vma_is_fsdax(struct vm_area_struct *vma)
3066 ++{
3067 ++ struct inode *inode;
3068 ++
3069 ++ if (!vma->vm_file)
3070 ++ return false;
3071 ++ if (!vma_is_dax(vma))
3072 ++ return false;
3073 ++ inode = file_inode(vma->vm_file);
3074 ++ if (inode->i_mode == S_IFCHR)
3075 ++ return false; /* device-dax */
3076 ++ return true;
3077 ++}
3078 ++
3079 + static inline int iocb_flags(struct file *file)
3080 + {
3081 + int res = 0;
3082 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3083 +index fbf5b31d47ee..82a25880714a 100644
3084 +--- a/include/linux/hugetlb.h
3085 ++++ b/include/linux/hugetlb.h
3086 +@@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
3087 + }
3088 + #endif
3089 +
3090 +-#ifndef pud_write
3091 +-static inline int pud_write(pud_t pud)
3092 +-{
3093 +- BUG();
3094 +- return 0;
3095 +-}
3096 +-#endif
3097 +-
3098 + #define HUGETLB_ANON_FILE "anon_hugepage"
3099 +
3100 + enum {
3101 +diff --git a/include/linux/migrate.h b/include/linux/migrate.h
3102 +index 895ec0c4942e..a2246cf670ba 100644
3103 +--- a/include/linux/migrate.h
3104 ++++ b/include/linux/migrate.h
3105 +@@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
3106 + new_page = __alloc_pages_nodemask(gfp_mask, order,
3107 + preferred_nid, nodemask);
3108 +
3109 +- if (new_page && PageTransHuge(page))
3110 ++ if (new_page && PageTransHuge(new_page))
3111 + prep_transhuge_page(new_page);
3112 +
3113 + return new_page;
3114 +diff --git a/include/linux/mm.h b/include/linux/mm.h
3115 +index 43edf659453b..db647d428100 100644
3116 +--- a/include/linux/mm.h
3117 ++++ b/include/linux/mm.h
3118 +@@ -367,6 +367,7 @@ enum page_entry_size {
3119 + struct vm_operations_struct {
3120 + void (*open)(struct vm_area_struct * area);
3121 + void (*close)(struct vm_area_struct * area);
3122 ++ int (*split)(struct vm_area_struct * area, unsigned long addr);
3123 + int (*mremap)(struct vm_area_struct * area);
3124 + int (*fault)(struct vm_fault *vmf);
3125 + int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
3126 +@@ -1367,6 +1368,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
3127 + unsigned int gup_flags, struct page **pages, int *locked);
3128 + long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3129 + struct page **pages, unsigned int gup_flags);
3130 ++#ifdef CONFIG_FS_DAX
3131 ++long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
3132 ++ unsigned int gup_flags, struct page **pages,
3133 ++ struct vm_area_struct **vmas);
3134 ++#else
3135 ++static inline long get_user_pages_longterm(unsigned long start,
3136 ++ unsigned long nr_pages, unsigned int gup_flags,
3137 ++ struct page **pages, struct vm_area_struct **vmas)
3138 ++{
3139 ++ return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
3140 ++}
3141 ++#endif /* CONFIG_FS_DAX */
3142 ++
3143 + int get_user_pages_fast(unsigned long start, int nr_pages, int write,
3144 + struct page **pages);
3145 +
3146 +diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
3147 +index 90fc490f973f..821f71a2e48f 100644
3148 +--- a/include/uapi/linux/bcache.h
3149 ++++ b/include/uapi/linux/bcache.h
3150 +@@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN, 0, 8)
3151 +
3152 + #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
3153 +
3154 +-#define PTR(gen, offset, dev) \
3155 ++#define MAKE_PTR(gen, offset, dev) \
3156 + ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
3157 +
3158 + /* Bkey utility code */
3159 +diff --git a/mm/frame_vector.c b/mm/frame_vector.c
3160 +index 2f98df0d460e..297c7238f7d4 100644
3161 +--- a/mm/frame_vector.c
3162 ++++ b/mm/frame_vector.c
3163 +@@ -53,6 +53,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
3164 + ret = -EFAULT;
3165 + goto out;
3166 + }
3167 ++
3168 ++ /*
3169 ++ * While get_vaddr_frames() could be used for transient (kernel
3170 ++ * controlled lifetime) pinning of memory pages all current
3171 ++ * users establish long term (userspace controlled lifetime)
3172 ++ * page pinning. Treat get_vaddr_frames() like
3173 ++ * get_user_pages_longterm() and disallow it for filesystem-dax
3174 ++ * mappings.
3175 ++ */
3176 ++ if (vma_is_fsdax(vma))
3177 ++ return -EOPNOTSUPP;
3178 ++
3179 + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
3180 + vec->got_ref = true;
3181 + vec->is_pfns = false;
3182 +diff --git a/mm/gup.c b/mm/gup.c
3183 +index b2b4d4263768..165ba2174c75 100644
3184 +--- a/mm/gup.c
3185 ++++ b/mm/gup.c
3186 +@@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
3187 + }
3188 + EXPORT_SYMBOL(get_user_pages);
3189 +
3190 ++#ifdef CONFIG_FS_DAX
3191 ++/*
3192 ++ * This is the same as get_user_pages() in that it assumes we are
3193 ++ * operating on the current task's mm, but it goes further to validate
3194 ++ * that the vmas associated with the address range are suitable for
3195 ++ * longterm elevated page reference counts. For example, filesystem-dax
3196 ++ * mappings are subject to the lifetime enforced by the filesystem and
3197 ++ * we need guarantees that longterm users like RDMA and V4L2 only
3198 ++ * establish mappings that have a kernel enforced revocation mechanism.
3199 ++ *
3200 ++ * "longterm" == userspace controlled elevated page count lifetime.
3201 ++ * Contrast this to iov_iter_get_pages() usages which are transient.
3202 ++ */
3203 ++long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
3204 ++ unsigned int gup_flags, struct page **pages,
3205 ++ struct vm_area_struct **vmas_arg)
3206 ++{
3207 ++ struct vm_area_struct **vmas = vmas_arg;
3208 ++ struct vm_area_struct *vma_prev = NULL;
3209 ++ long rc, i;
3210 ++
3211 ++ if (!pages)
3212 ++ return -EINVAL;
3213 ++
3214 ++ if (!vmas) {
3215 ++ vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
3216 ++ GFP_KERNEL);
3217 ++ if (!vmas)
3218 ++ return -ENOMEM;
3219 ++ }
3220 ++
3221 ++ rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
3222 ++
3223 ++ for (i = 0; i < rc; i++) {
3224 ++ struct vm_area_struct *vma = vmas[i];
3225 ++
3226 ++ if (vma == vma_prev)
3227 ++ continue;
3228 ++
3229 ++ vma_prev = vma;
3230 ++
3231 ++ if (vma_is_fsdax(vma))
3232 ++ break;
3233 ++ }
3234 ++
3235 ++ /*
3236 ++ * Either get_user_pages() failed, or the vma validation
3237 ++ * succeeded, in either case we don't need to put_page() before
3238 ++ * returning.
3239 ++ */
3240 ++ if (i >= rc)
3241 ++ goto out;
3242 ++
3243 ++ for (i = 0; i < rc; i++)
3244 ++ put_page(pages[i]);
3245 ++ rc = -EOPNOTSUPP;
3246 ++out:
3247 ++ if (vmas != vmas_arg)
3248 ++ kfree(vmas);
3249 ++ return rc;
3250 ++}
3251 ++EXPORT_SYMBOL(get_user_pages_longterm);
3252 ++#endif /* CONFIG_FS_DAX */
3253 ++
3254 + /**
3255 + * populate_vma_page_range() - populate a range of pages in the vma.
3256 + * @vma: target vma
3257 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3258 +index 1981ed697dab..eba34cdfc3e5 100644
3259 +--- a/mm/huge_memory.c
3260 ++++ b/mm/huge_memory.c
3261 +@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
3262 + #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
3263 +
3264 + static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
3265 +- pmd_t *pmd)
3266 ++ pmd_t *pmd, int flags)
3267 + {
3268 + pmd_t _pmd;
3269 +
3270 +- /*
3271 +- * We should set the dirty bit only for FOLL_WRITE but for now
3272 +- * the dirty bit in the pmd is meaningless. And if the dirty
3273 +- * bit will become meaningful and we'll only set it with
3274 +- * FOLL_WRITE, an atomic set_bit will be required on the pmd to
3275 +- * set the young bit, instead of the current set_pmd_at.
3276 +- */
3277 +- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
3278 ++ _pmd = pmd_mkyoung(*pmd);
3279 ++ if (flags & FOLL_WRITE)
3280 ++ _pmd = pmd_mkdirty(_pmd);
3281 + if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
3282 +- pmd, _pmd, 1))
3283 ++ pmd, _pmd, flags & FOLL_WRITE))
3284 + update_mmu_cache_pmd(vma, addr, pmd);
3285 + }
3286 +
3287 +@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
3288 + return NULL;
3289 +
3290 + if (flags & FOLL_TOUCH)
3291 +- touch_pmd(vma, addr, pmd);
3292 ++ touch_pmd(vma, addr, pmd, flags);
3293 +
3294 + /*
3295 + * device mapped pages can only be returned if the
3296 +@@ -995,20 +990,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
3297 +
3298 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
3299 + static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
3300 +- pud_t *pud)
3301 ++ pud_t *pud, int flags)
3302 + {
3303 + pud_t _pud;
3304 +
3305 +- /*
3306 +- * We should set the dirty bit only for FOLL_WRITE but for now
3307 +- * the dirty bit in the pud is meaningless. And if the dirty
3308 +- * bit will become meaningful and we'll only set it with
3309 +- * FOLL_WRITE, an atomic set_bit will be required on the pud to
3310 +- * set the young bit, instead of the current set_pud_at.
3311 +- */
3312 +- _pud = pud_mkyoung(pud_mkdirty(*pud));
3313 ++ _pud = pud_mkyoung(*pud);
3314 ++ if (flags & FOLL_WRITE)
3315 ++ _pud = pud_mkdirty(_pud);
3316 + if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
3317 +- pud, _pud, 1))
3318 ++ pud, _pud, flags & FOLL_WRITE))
3319 + update_mmu_cache_pud(vma, addr, pud);
3320 + }
3321 +
3322 +@@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
3323 + return NULL;
3324 +
3325 + if (flags & FOLL_TOUCH)
3326 +- touch_pud(vma, addr, pud);
3327 ++ touch_pud(vma, addr, pud, flags);
3328 +
3329 + /*
3330 + * device mapped pages can only be returned if the
3331 +@@ -1407,7 +1397,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
3332 + page = pmd_page(*pmd);
3333 + VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
3334 + if (flags & FOLL_TOUCH)
3335 +- touch_pmd(vma, addr, pmd);
3336 ++ touch_pmd(vma, addr, pmd, flags);
3337 + if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
3338 + /*
3339 + * We don't mlock() pte-mapped THPs. This way we can avoid
3340 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3341 +index 2d2ff5e8bf2b..c539941671b4 100644
3342 +--- a/mm/hugetlb.c
3343 ++++ b/mm/hugetlb.c
3344 +@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3345 + }
3346 + }
3347 +
3348 ++static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3349 ++{
3350 ++ if (addr & ~(huge_page_mask(hstate_vma(vma))))
3351 ++ return -EINVAL;
3352 ++ return 0;
3353 ++}
3354 ++
3355 + /*
3356 + * We cannot handle pagefaults against hugetlb pages at all. They cause
3357 + * handle_mm_fault() to try to instantiate regular-sized pages in the
3358 +@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
3359 + .fault = hugetlb_vm_op_fault,
3360 + .open = hugetlb_vm_op_open,
3361 + .close = hugetlb_vm_op_close,
3362 ++ .split = hugetlb_vm_op_split,
3363 + };
3364 +
3365 + static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3366 +@@ -4617,7 +4625,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
3367 + pte_t *pte = NULL;
3368 +
3369 + pgd = pgd_offset(mm, addr);
3370 +- p4d = p4d_offset(pgd, addr);
3371 ++ p4d = p4d_alloc(mm, pgd, addr);
3372 ++ if (!p4d)
3373 ++ return NULL;
3374 + pud = pud_alloc(mm, p4d, addr);
3375 + if (pud) {
3376 + if (sz == PUD_SIZE) {
3377 +diff --git a/mm/madvise.c b/mm/madvise.c
3378 +index 375cf32087e4..751e97aa2210 100644
3379 +--- a/mm/madvise.c
3380 ++++ b/mm/madvise.c
3381 +@@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
3382 + {
3383 + struct file *file = vma->vm_file;
3384 +
3385 ++ *prev = vma;
3386 + #ifdef CONFIG_SWAP
3387 + if (!file) {
3388 +- *prev = vma;
3389 + force_swapin_readahead(vma, start, end);
3390 + return 0;
3391 + }
3392 +
3393 + if (shmem_mapping(file->f_mapping)) {
3394 +- *prev = vma;
3395 + force_shm_swapin_readahead(vma, start, end,
3396 + file->f_mapping);
3397 + return 0;
3398 +@@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
3399 + return 0;
3400 + }
3401 +
3402 +- *prev = vma;
3403 + start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3404 + if (end > vma->vm_end)
3405 + end = vma->vm_end;
3406 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3407 +index 661f046ad318..53f7c919b916 100644
3408 +--- a/mm/memcontrol.c
3409 ++++ b/mm/memcontrol.c
3410 +@@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
3411 + memcg_check_events(memcg, page);
3412 +
3413 + if (!mem_cgroup_is_root(memcg))
3414 +- css_put(&memcg->css);
3415 ++ css_put_many(&memcg->css, nr_entries);
3416 + }
3417 +
3418 + /**
3419 +diff --git a/mm/mmap.c b/mm/mmap.c
3420 +index 680506faceae..476e810cf100 100644
3421 +--- a/mm/mmap.c
3422 ++++ b/mm/mmap.c
3423 +@@ -2540,9 +2540,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
3424 + struct vm_area_struct *new;
3425 + int err;
3426 +
3427 +- if (is_vm_hugetlb_page(vma) && (addr &
3428 +- ~(huge_page_mask(hstate_vma(vma)))))
3429 +- return -EINVAL;
3430 ++ if (vma->vm_ops && vma->vm_ops->split) {
3431 ++ err = vma->vm_ops->split(vma, addr);
3432 ++ if (err)
3433 ++ return err;
3434 ++ }
3435 +
3436 + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
3437 + if (!new)
3438 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
3439 +index dee0f75c3013..18c5b356b505 100644
3440 +--- a/mm/oom_kill.c
3441 ++++ b/mm/oom_kill.c
3442 +@@ -532,7 +532,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
3443 + */
3444 + set_bit(MMF_UNSTABLE, &mm->flags);
3445 +
3446 +- tlb_gather_mmu(&tlb, mm, 0, -1);
3447 + for (vma = mm->mmap ; vma; vma = vma->vm_next) {
3448 + if (!can_madv_dontneed_vma(vma))
3449 + continue;
3450 +@@ -547,11 +546,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
3451 + * we do not want to block exit_mmap by keeping mm ref
3452 + * count elevated without a good reason.
3453 + */
3454 +- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
3455 ++ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
3456 ++ tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
3457 + unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
3458 + NULL);
3459 ++ tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
3460 ++ }
3461 + }
3462 +- tlb_finish_mmu(&tlb, 0, -1);
3463 + pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
3464 + task_pid_nr(tsk), tsk->comm,
3465 + K(get_mm_counter(mm, MM_ANONPAGES)),
3466 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3467 +index 82a6270c9743..d51c2087c498 100644
3468 +--- a/mm/page_alloc.c
3469 ++++ b/mm/page_alloc.c
3470 +@@ -2487,10 +2487,6 @@ void drain_all_pages(struct zone *zone)
3471 + if (WARN_ON_ONCE(!mm_percpu_wq))
3472 + return;
3473 +
3474 +- /* Workqueues cannot recurse */
3475 +- if (current->flags & PF_WQ_WORKER)
3476 +- return;
3477 +-
3478 + /*
3479 + * Do not drain if one is already in progress unless it's specific to
3480 + * a zone. Such callers are primarily CMA and memory hotplug and need
3481 +@@ -7591,11 +7587,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
3482 +
3483 + /*
3484 + * In case of -EBUSY, we'd like to know which page causes problem.
3485 +- * So, just fall through. We will check it in test_pages_isolated().
3486 ++ * So, just fall through. test_pages_isolated() has a tracepoint
3487 ++ * which will report the busy page.
3488 ++ *
3489 ++ * It is possible that busy pages could become available before
3490 ++ * the call to test_pages_isolated, and the range will actually be
3491 ++ * allocated. So, if we fall through be sure to clear ret so that
3492 ++ * -EBUSY is not accidentally used or returned to caller.
3493 + */
3494 + ret = __alloc_contig_migrate_range(&cc, start, end);
3495 + if (ret && ret != -EBUSY)
3496 + goto done;
3497 ++ ret =0;
3498 +
3499 + /*
3500 + * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
3501 +diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
3502 +index 620e81169659..4ac095118717 100644
3503 +--- a/security/apparmor/include/audit.h
3504 ++++ b/security/apparmor/include/audit.h
3505 +@@ -121,17 +121,19 @@ struct apparmor_audit_data {
3506 + /* these entries require a custom callback fn */
3507 + struct {
3508 + struct aa_label *peer;
3509 +- struct {
3510 +- const char *target;
3511 +- kuid_t ouid;
3512 +- } fs;
3513 ++ union {
3514 ++ struct {
3515 ++ const char *target;
3516 ++ kuid_t ouid;
3517 ++ } fs;
3518 ++ int signal;
3519 ++ };
3520 + };
3521 + struct {
3522 + struct aa_profile *profile;
3523 + const char *ns;
3524 + long pos;
3525 + } iface;
3526 +- int signal;
3527 + struct {
3528 + int rlim;
3529 + unsigned long max;