Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.12 commit in: /
Date: Wed, 26 May 2021 12:08:30
Message-Id: 1622030886.3e880daf8c1c2089abdf0b092cb6143b35fddb3c.mpagano@gentoo
1 commit: 3e880daf8c1c2089abdf0b092cb6143b35fddb3c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 26 12:08:06 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 26 12:08:06 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3e880daf
7
8 Linux patch 5.12.7
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1006_linux-5.12.7.patch | 5240 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5244 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 528cc11..22c40ca 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -67,6 +67,10 @@ Patch: 1005_linux-5.12.6.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.12.6
23
24 +Patch: 1006_linux-5.12.7.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.12.7
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1006_linux-5.12.7.patch b/1006_linux-5.12.7.patch
33 new file mode 100644
34 index 0000000..6bf34f2
35 --- /dev/null
36 +++ b/1006_linux-5.12.7.patch
37 @@ -0,0 +1,5240 @@
38 +diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
39 +index dabee3729e5a5..56490c4c0c07a 100644
40 +--- a/Documentation/powerpc/syscall64-abi.rst
41 ++++ b/Documentation/powerpc/syscall64-abi.rst
42 +@@ -109,6 +109,16 @@ auxiliary vector.
43 +
44 + scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
45 +
46 ++ptrace
47 ++------
48 ++When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
49 ++the system call type that can be used to distinguish between sc and scv 0
50 ++system calls, and the different register conventions can be accounted for.
51 ++
52 ++If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
53 ++performed with the sc instruction, if it is 0x3000 then the system call was
54 ++performed with the scv 0 instruction.
55 ++
56 + vsyscall
57 + ========
58 +
59 +diff --git a/Makefile b/Makefile
60 +index dd021135838b8..6a73dee7c2219 100644
61 +--- a/Makefile
62 ++++ b/Makefile
63 +@@ -1,7 +1,7 @@
64 + # SPDX-License-Identifier: GPL-2.0
65 + VERSION = 5
66 + PATCHLEVEL = 12
67 +-SUBLEVEL = 6
68 ++SUBLEVEL = 7
69 + EXTRAVERSION =
70 + NAME = Frozen Wasteland
71 +
72 +diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
73 +index 2416a9f915330..c6f9e7b9f7cb2 100644
74 +--- a/arch/openrisc/kernel/setup.c
75 ++++ b/arch/openrisc/kernel/setup.c
76 +@@ -278,6 +278,8 @@ void calibrate_delay(void)
77 + pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
78 + loops_per_jiffy / (500000 / HZ),
79 + (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
80 ++
81 ++ of_node_put(cpu);
82 + }
83 +
84 + void __init setup_arch(char **cmdline_p)
85 +diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
86 +index bf9b2310fc936..f3fa02b8838af 100644
87 +--- a/arch/openrisc/mm/init.c
88 ++++ b/arch/openrisc/mm/init.c
89 +@@ -75,7 +75,6 @@ static void __init map_ram(void)
90 + /* These mark extents of read-only kernel pages...
91 + * ...from vmlinux.lds.S
92 + */
93 +- struct memblock_region *region;
94 +
95 + v = PAGE_OFFSET;
96 +
97 +@@ -121,7 +120,7 @@ static void __init map_ram(void)
98 + }
99 +
100 + printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
101 +- region->base, region->base + region->size);
102 ++ start, end);
103 + }
104 + }
105 +
106 +diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
107 +index ed6086d57b22e..0c92b01a3c3c1 100644
108 +--- a/arch/powerpc/include/asm/hvcall.h
109 ++++ b/arch/powerpc/include/asm/hvcall.h
110 +@@ -446,6 +446,9 @@
111 + */
112 + long plpar_hcall_norets(unsigned long opcode, ...);
113 +
114 ++/* Variant which does not do hcall tracing */
115 ++long plpar_hcall_norets_notrace(unsigned long opcode, ...);
116 ++
117 + /**
118 + * plpar_hcall: - Make a pseries hypervisor call
119 + * @opcode: The hypervisor call to make.
120 +diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
121 +index 5d1726bb28e79..bcb7b5f917be6 100644
122 +--- a/arch/powerpc/include/asm/paravirt.h
123 ++++ b/arch/powerpc/include/asm/paravirt.h
124 +@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
125 + return be32_to_cpu(yield_count);
126 + }
127 +
128 ++/*
129 ++ * Spinlock code confers and prods, so don't trace the hcalls because the
130 ++ * tracing code takes spinlocks which can cause recursion deadlocks.
131 ++ *
132 ++ * These calls are made while the lock is not held: the lock slowpath yields if
133 ++ * it can not acquire the lock, and unlock slow path might prod if a waiter has
134 ++ * yielded). So this may not be a problem for simple spin locks because the
135 ++ * tracing does not technically recurse on the lock, but we avoid it anyway.
136 ++ *
137 ++ * However the queued spin lock contended path is more strictly ordered: the
138 ++ * H_CONFER hcall is made after the task has queued itself on the lock, so then
139 ++ * recursing on that lock will cause the task to then queue up again behind the
140 ++ * first instance (or worse: queued spinlocks use tricks that assume a context
141 ++ * never waits on more than one spinlock, so such recursion may cause random
142 ++ * corruption in the lock code).
143 ++ */
144 + static inline void yield_to_preempted(int cpu, u32 yield_count)
145 + {
146 +- plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
147 ++ plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
148 + }
149 +
150 + static inline void prod_cpu(int cpu)
151 + {
152 +- plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
153 ++ plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
154 + }
155 +
156 + static inline void yield_to_any(void)
157 + {
158 +- plpar_hcall_norets(H_CONFER, -1, 0);
159 ++ plpar_hcall_norets_notrace(H_CONFER, -1, 0);
160 + }
161 + #else
162 + static inline bool is_shared_processor(void)
163 +diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
164 +index 1499e928ea6a6..5d8d397e928a0 100644
165 +--- a/arch/powerpc/include/asm/ptrace.h
166 ++++ b/arch/powerpc/include/asm/ptrace.h
167 +@@ -19,6 +19,7 @@
168 + #ifndef _ASM_POWERPC_PTRACE_H
169 + #define _ASM_POWERPC_PTRACE_H
170 +
171 ++#include <linux/err.h>
172 + #include <uapi/asm/ptrace.h>
173 + #include <asm/asm-const.h>
174 +
175 +@@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
176 + long do_syscall_trace_enter(struct pt_regs *regs);
177 + void do_syscall_trace_leave(struct pt_regs *regs);
178 +
179 +-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
180 +-static inline int is_syscall_success(struct pt_regs *regs)
181 +-{
182 +- return !(regs->ccr & 0x10000000);
183 +-}
184 +-
185 +-static inline long regs_return_value(struct pt_regs *regs)
186 +-{
187 +- if (is_syscall_success(regs))
188 +- return regs->gpr[3];
189 +- else
190 +- return -regs->gpr[3];
191 +-}
192 +-
193 +-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
194 +-{
195 +- regs->gpr[3] = rc;
196 +-}
197 +-
198 + #ifdef __powerpc64__
199 + #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
200 + #else
201 +@@ -252,6 +234,31 @@ static inline void set_trap_norestart(struct pt_regs *regs)
202 + regs->trap |= 0x10;
203 + }
204 +
205 ++#define kernel_stack_pointer(regs) ((regs)->gpr[1])
206 ++static inline int is_syscall_success(struct pt_regs *regs)
207 ++{
208 ++ if (trap_is_scv(regs))
209 ++ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
210 ++ else
211 ++ return !(regs->ccr & 0x10000000);
212 ++}
213 ++
214 ++static inline long regs_return_value(struct pt_regs *regs)
215 ++{
216 ++ if (trap_is_scv(regs))
217 ++ return regs->gpr[3];
218 ++
219 ++ if (is_syscall_success(regs))
220 ++ return regs->gpr[3];
221 ++ else
222 ++ return -regs->gpr[3];
223 ++}
224 ++
225 ++static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
226 ++{
227 ++ regs->gpr[3] = rc;
228 ++}
229 ++
230 + #define arch_has_single_step() (1)
231 + #define arch_has_block_step() (true)
232 + #define ARCH_HAS_USER_SINGLE_STEP_REPORT
233 +diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
234 +index fd1b518eed17c..ba0f88f3a30da 100644
235 +--- a/arch/powerpc/include/asm/syscall.h
236 ++++ b/arch/powerpc/include/asm/syscall.h
237 +@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
238 + static inline long syscall_get_error(struct task_struct *task,
239 + struct pt_regs *regs)
240 + {
241 +- /*
242 +- * If the system call failed,
243 +- * regs->gpr[3] contains a positive ERRORCODE.
244 +- */
245 +- return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
246 ++ if (trap_is_scv(regs)) {
247 ++ unsigned long error = regs->gpr[3];
248 ++
249 ++ return IS_ERR_VALUE(error) ? error : 0;
250 ++ } else {
251 ++ /*
252 ++ * If the system call failed,
253 ++ * regs->gpr[3] contains a positive ERRORCODE.
254 ++ */
255 ++ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
256 ++ }
257 + }
258 +
259 + static inline long syscall_get_return_value(struct task_struct *task,
260 +@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
261 + struct pt_regs *regs,
262 + int error, long val)
263 + {
264 +- /*
265 +- * In the general case it's not obvious that we must deal with CCR
266 +- * here, as the syscall exit path will also do that for us. However
267 +- * there are some places, eg. the signal code, which check ccr to
268 +- * decide if the value in r3 is actually an error.
269 +- */
270 +- if (error) {
271 +- regs->ccr |= 0x10000000L;
272 +- regs->gpr[3] = error;
273 ++ if (trap_is_scv(regs)) {
274 ++ regs->gpr[3] = (long) error ?: val;
275 + } else {
276 +- regs->ccr &= ~0x10000000L;
277 +- regs->gpr[3] = val;
278 ++ /*
279 ++ * In the general case it's not obvious that we must deal with
280 ++ * CCR here, as the syscall exit path will also do that for us.
281 ++ * However there are some places, eg. the signal code, which
282 ++ * check ccr to decide if the value in r3 is actually an error.
283 ++ */
284 ++ if (error) {
285 ++ regs->ccr |= 0x10000000L;
286 ++ regs->gpr[3] = error;
287 ++ } else {
288 ++ regs->ccr &= ~0x10000000L;
289 ++ regs->gpr[3] = val;
290 ++ }
291 + }
292 + }
293 +
294 +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
295 +index 830fee91b2d99..c914fe8a2c67f 100644
296 +--- a/arch/powerpc/kernel/setup_64.c
297 ++++ b/arch/powerpc/kernel/setup_64.c
298 +@@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
299 + apply_feature_fixups();
300 + setup_feature_keys();
301 +
302 +- early_ioremap_setup();
303 +-
304 + /* Initialize the hash table or TLB handling */
305 + early_init_mmu();
306 +
307 ++ early_ioremap_setup();
308 ++
309 + /*
310 + * After firmware and early platform setup code has set things up,
311 + * we note the SPR values for configurable control/performance
312 +diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
313 +index 2136e42833af3..8a2b8d64265bc 100644
314 +--- a/arch/powerpc/platforms/pseries/hvCall.S
315 ++++ b/arch/powerpc/platforms/pseries/hvCall.S
316 +@@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1); \
317 + #define HCALL_BRANCH(LABEL)
318 + #endif
319 +
320 ++_GLOBAL_TOC(plpar_hcall_norets_notrace)
321 ++ HMT_MEDIUM
322 ++
323 ++ mfcr r0
324 ++ stw r0,8(r1)
325 ++ HVSC /* invoke the hypervisor */
326 ++ lwz r0,8(r1)
327 ++ mtcrf 0xff,r0
328 ++ blr /* return r3 = status */
329 ++
330 + _GLOBAL_TOC(plpar_hcall_norets)
331 + HMT_MEDIUM
332 +
333 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
334 +index cd38bd421f381..d4aa6a46e1fa6 100644
335 +--- a/arch/powerpc/platforms/pseries/lpar.c
336 ++++ b/arch/powerpc/platforms/pseries/lpar.c
337 +@@ -1830,8 +1830,7 @@ void hcall_tracepoint_unregfunc(void)
338 +
339 + /*
340 + * Since the tracing code might execute hcalls we need to guard against
341 +- * recursion. One example of this are spinlocks calling H_YIELD on
342 +- * shared processor partitions.
343 ++ * recursion.
344 + */
345 + static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
346 +
347 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
348 +index 78faf9c7e3aed..1f2e5bfb9bb03 100644
349 +--- a/arch/x86/Makefile
350 ++++ b/arch/x86/Makefile
351 +@@ -170,11 +170,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
352 + KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
353 + endif
354 +
355 +-ifdef CONFIG_LTO_CLANG
356 +-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
357 +- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
358 +-endif
359 +-
360 + # Workaround for a gcc prelease that unfortunately was shipped in a suse release
361 + KBUILD_CFLAGS += -Wno-sign-compare
362 + #
363 +@@ -194,7 +189,12 @@ ifdef CONFIG_RETPOLINE
364 + endif
365 + endif
366 +
367 +-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
368 ++KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
369 ++
370 ++ifdef CONFIG_LTO_CLANG
371 ++KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
372 ++ -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
373 ++endif
374 +
375 + ifdef CONFIG_X86_NEED_RELOCS
376 + LDFLAGS_vmlinux := --emit-relocs --discard-none
377 +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
378 +index e94874f4bbc1d..ae1fe558a2d88 100644
379 +--- a/arch/x86/boot/compressed/head_64.S
380 ++++ b/arch/x86/boot/compressed/head_64.S
381 +@@ -172,11 +172,21 @@ SYM_FUNC_START(startup_32)
382 + */
383 + call get_sev_encryption_bit
384 + xorl %edx, %edx
385 ++#ifdef CONFIG_AMD_MEM_ENCRYPT
386 + testl %eax, %eax
387 + jz 1f
388 + subl $32, %eax /* Encryption bit is always above bit 31 */
389 + bts %eax, %edx /* Set encryption mask for page tables */
390 ++ /*
391 ++ * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
392 ++ * will do a check. The sev_status memory will be fully initialized
393 ++ * with the contents of MSR_AMD_SEV_STATUS later in
394 ++ * set_sev_encryption_mask(). For now it is sufficient to know that SEV
395 ++ * is active.
396 ++ */
397 ++ movl $1, rva(sev_status)(%ebp)
398 + 1:
399 ++#endif
400 +
401 + /* Initialize Page tables to 0 */
402 + leal rva(pgtable)(%ebx), %edi
403 +@@ -261,6 +271,9 @@ SYM_FUNC_START(startup_32)
404 + movl %esi, %edx
405 + 1:
406 + #endif
407 ++ /* Check if the C-bit position is correct when SEV is active */
408 ++ call startup32_check_sev_cbit
409 ++
410 + pushl $__KERNEL_CS
411 + pushl %eax
412 +
413 +@@ -786,6 +799,78 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
414 + SYM_DATA_END(loaded_image_proto)
415 + #endif
416 +
417 ++/*
418 ++ * Check for the correct C-bit position when the startup_32 boot-path is used.
419 ++ *
420 ++ * The check makes use of the fact that all memory is encrypted when paging is
421 ++ * disabled. The function creates 64 bits of random data using the RDRAND
422 ++ * instruction. RDRAND is mandatory for SEV guests, so always available. If the
423 ++ * hypervisor violates that the kernel will crash right here.
424 ++ *
425 ++ * The 64 bits of random data are stored to a memory location and at the same
426 ++ * time kept in the %eax and %ebx registers. Since encryption is always active
427 ++ * when paging is off the random data will be stored encrypted in main memory.
428 ++ *
429 ++ * Then paging is enabled. When the C-bit position is correct all memory is
430 ++ * still mapped encrypted and comparing the register values with memory will
431 ++ * succeed. An incorrect C-bit position will map all memory unencrypted, so that
432 ++ * the compare will use the encrypted random data and fail.
433 ++ */
434 ++ __HEAD
435 ++ .code32
436 ++SYM_FUNC_START(startup32_check_sev_cbit)
437 ++#ifdef CONFIG_AMD_MEM_ENCRYPT
438 ++ pushl %eax
439 ++ pushl %ebx
440 ++ pushl %ecx
441 ++ pushl %edx
442 ++
443 ++ /* Check for non-zero sev_status */
444 ++ movl rva(sev_status)(%ebp), %eax
445 ++ testl %eax, %eax
446 ++ jz 4f
447 ++
448 ++ /*
449 ++ * Get two 32-bit random values - Don't bail out if RDRAND fails
450 ++ * because it is better to prevent forward progress if no random value
451 ++ * can be gathered.
452 ++ */
453 ++1: rdrand %eax
454 ++ jnc 1b
455 ++2: rdrand %ebx
456 ++ jnc 2b
457 ++
458 ++ /* Store to memory and keep it in the registers */
459 ++ movl %eax, rva(sev_check_data)(%ebp)
460 ++ movl %ebx, rva(sev_check_data+4)(%ebp)
461 ++
462 ++ /* Enable paging to see if encryption is active */
463 ++ movl %cr0, %edx /* Backup %cr0 in %edx */
464 ++ movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
465 ++ movl %ecx, %cr0
466 ++
467 ++ cmpl %eax, rva(sev_check_data)(%ebp)
468 ++ jne 3f
469 ++ cmpl %ebx, rva(sev_check_data+4)(%ebp)
470 ++ jne 3f
471 ++
472 ++ movl %edx, %cr0 /* Restore previous %cr0 */
473 ++
474 ++ jmp 4f
475 ++
476 ++3: /* Check failed - hlt the machine */
477 ++ hlt
478 ++ jmp 3b
479 ++
480 ++4:
481 ++ popl %edx
482 ++ popl %ecx
483 ++ popl %ebx
484 ++ popl %eax
485 ++#endif
486 ++ ret
487 ++SYM_FUNC_END(startup32_check_sev_cbit)
488 ++
489 + /*
490 + * Stack and heap for uncompression
491 + */
492 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
493 +index c57ec8e279078..4c18e7fb58f58 100644
494 +--- a/arch/x86/events/intel/core.c
495 ++++ b/arch/x86/events/intel/core.c
496 +@@ -5741,7 +5741,7 @@ __init int intel_pmu_init(void)
497 + * Check all LBT MSR here.
498 + * Disable LBR access if any LBR MSRs can not be accessed.
499 + */
500 +- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
501 ++ if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
502 + x86_pmu.lbr_nr = 0;
503 + for (i = 0; i < x86_pmu.lbr_nr; i++) {
504 + if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
505 +diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
506 +index 387b716698187..ecb20b17b7df6 100644
507 +--- a/arch/x86/kernel/sev-es-shared.c
508 ++++ b/arch/x86/kernel/sev-es-shared.c
509 +@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
510 +
511 + static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
512 + {
513 ++ ghcb->save.sw_exit_code = 0;
514 + memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
515 + }
516 +
517 +diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
518 +index 04a780abb512d..e0cdab7cb632b 100644
519 +--- a/arch/x86/kernel/sev-es.c
520 ++++ b/arch/x86/kernel/sev-es.c
521 +@@ -191,8 +191,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
522 + if (unlikely(data->ghcb_active)) {
523 + /* GHCB is already in use - save its contents */
524 +
525 +- if (unlikely(data->backup_ghcb_active))
526 +- return NULL;
527 ++ if (unlikely(data->backup_ghcb_active)) {
528 ++ /*
529 ++ * Backup-GHCB is also already in use. There is no way
530 ++ * to continue here so just kill the machine. To make
531 ++ * panic() work, mark GHCBs inactive so that messages
532 ++ * can be printed out.
533 ++ */
534 ++ data->ghcb_active = false;
535 ++ data->backup_ghcb_active = false;
536 ++
537 ++ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
538 ++ }
539 +
540 + /* Mark backup_ghcb active before writing to it */
541 + data->backup_ghcb_active = true;
542 +@@ -209,24 +219,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
543 + return ghcb;
544 + }
545 +
546 +-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
547 +-{
548 +- struct sev_es_runtime_data *data;
549 +- struct ghcb *ghcb;
550 +-
551 +- data = this_cpu_read(runtime_data);
552 +- ghcb = &data->ghcb_page;
553 +-
554 +- if (state->ghcb) {
555 +- /* Restore GHCB from Backup */
556 +- *ghcb = *state->ghcb;
557 +- data->backup_ghcb_active = false;
558 +- state->ghcb = NULL;
559 +- } else {
560 +- data->ghcb_active = false;
561 +- }
562 +-}
563 +-
564 + /* Needed in vc_early_forward_exception */
565 + void do_early_exception(struct pt_regs *regs, int trapnr);
566 +
567 +@@ -296,31 +288,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
568 + u16 d2;
569 + u8 d1;
570 +
571 +- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
572 +- if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
573 +- memcpy(dst, buf, size);
574 +- return ES_OK;
575 +- }
576 +-
577 ++ /*
578 ++ * This function uses __put_user() independent of whether kernel or user
579 ++ * memory is accessed. This works fine because __put_user() does no
580 ++ * sanity checks of the pointer being accessed. All that it does is
581 ++ * to report when the access failed.
582 ++ *
583 ++ * Also, this function runs in atomic context, so __put_user() is not
584 ++ * allowed to sleep. The page-fault handler detects that it is running
585 ++ * in atomic context and will not try to take mmap_sem and handle the
586 ++ * fault, so additional pagefault_enable()/disable() calls are not
587 ++ * needed.
588 ++ *
589 ++ * The access can't be done via copy_to_user() here because
590 ++ * vc_write_mem() must not use string instructions to access unsafe
591 ++ * memory. The reason is that MOVS is emulated by the #VC handler by
592 ++ * splitting the move up into a read and a write and taking a nested #VC
593 ++ * exception on whatever of them is the MMIO access. Using string
594 ++ * instructions here would cause infinite nesting.
595 ++ */
596 + switch (size) {
597 + case 1:
598 + memcpy(&d1, buf, 1);
599 +- if (put_user(d1, target))
600 ++ if (__put_user(d1, target))
601 + goto fault;
602 + break;
603 + case 2:
604 + memcpy(&d2, buf, 2);
605 +- if (put_user(d2, target))
606 ++ if (__put_user(d2, target))
607 + goto fault;
608 + break;
609 + case 4:
610 + memcpy(&d4, buf, 4);
611 +- if (put_user(d4, target))
612 ++ if (__put_user(d4, target))
613 + goto fault;
614 + break;
615 + case 8:
616 + memcpy(&d8, buf, 8);
617 +- if (put_user(d8, target))
618 ++ if (__put_user(d8, target))
619 + goto fault;
620 + break;
621 + default:
622 +@@ -351,30 +356,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
623 + u16 d2;
624 + u8 d1;
625 +
626 +- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
627 +- if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
628 +- memcpy(buf, src, size);
629 +- return ES_OK;
630 +- }
631 +-
632 ++ /*
633 ++ * This function uses __get_user() independent of whether kernel or user
634 ++ * memory is accessed. This works fine because __get_user() does no
635 ++ * sanity checks of the pointer being accessed. All that it does is
636 ++ * to report when the access failed.
637 ++ *
638 ++ * Also, this function runs in atomic context, so __get_user() is not
639 ++ * allowed to sleep. The page-fault handler detects that it is running
640 ++ * in atomic context and will not try to take mmap_sem and handle the
641 ++ * fault, so additional pagefault_enable()/disable() calls are not
642 ++ * needed.
643 ++ *
644 ++ * The access can't be done via copy_from_user() here because
645 ++ * vc_read_mem() must not use string instructions to access unsafe
646 ++ * memory. The reason is that MOVS is emulated by the #VC handler by
647 ++ * splitting the move up into a read and a write and taking a nested #VC
648 ++ * exception on whatever of them is the MMIO access. Using string
649 ++ * instructions here would cause infinite nesting.
650 ++ */
651 + switch (size) {
652 + case 1:
653 +- if (get_user(d1, s))
654 ++ if (__get_user(d1, s))
655 + goto fault;
656 + memcpy(buf, &d1, 1);
657 + break;
658 + case 2:
659 +- if (get_user(d2, s))
660 ++ if (__get_user(d2, s))
661 + goto fault;
662 + memcpy(buf, &d2, 2);
663 + break;
664 + case 4:
665 +- if (get_user(d4, s))
666 ++ if (__get_user(d4, s))
667 + goto fault;
668 + memcpy(buf, &d4, 4);
669 + break;
670 + case 8:
671 +- if (get_user(d8, s))
672 ++ if (__get_user(d8, s))
673 + goto fault;
674 + memcpy(buf, &d8, 8);
675 + break;
676 +@@ -434,6 +452,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
677 + /* Include code shared with pre-decompression boot stage */
678 + #include "sev-es-shared.c"
679 +
680 ++static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
681 ++{
682 ++ struct sev_es_runtime_data *data;
683 ++ struct ghcb *ghcb;
684 ++
685 ++ data = this_cpu_read(runtime_data);
686 ++ ghcb = &data->ghcb_page;
687 ++
688 ++ if (state->ghcb) {
689 ++ /* Restore GHCB from Backup */
690 ++ *ghcb = *state->ghcb;
691 ++ data->backup_ghcb_active = false;
692 ++ state->ghcb = NULL;
693 ++ } else {
694 ++ /*
695 ++ * Invalidate the GHCB so a VMGEXIT instruction issued
696 ++ * from userspace won't appear to be valid.
697 ++ */
698 ++ vc_ghcb_invalidate(ghcb);
699 ++ data->ghcb_active = false;
700 ++ }
701 ++}
702 ++
703 + void noinstr __sev_es_nmi_complete(void)
704 + {
705 + struct ghcb_state state;
706 +@@ -1228,6 +1269,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
707 + case X86_TRAP_UD:
708 + exc_invalid_op(ctxt->regs);
709 + break;
710 ++ case X86_TRAP_PF:
711 ++ write_cr2(ctxt->fi.cr2);
712 ++ exc_page_fault(ctxt->regs, error_code);
713 ++ break;
714 + case X86_TRAP_AC:
715 + exc_alignment_check(ctxt->regs, error_code);
716 + break;
717 +@@ -1257,7 +1302,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
718 + */
719 + DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
720 + {
721 +- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
722 + irqentry_state_t irq_state;
723 + struct ghcb_state state;
724 + struct es_em_ctxt ctxt;
725 +@@ -1283,16 +1327,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
726 + */
727 +
728 + ghcb = sev_es_get_ghcb(&state);
729 +- if (!ghcb) {
730 +- /*
731 +- * Mark GHCBs inactive so that panic() is able to print the
732 +- * message.
733 +- */
734 +- data->ghcb_active = false;
735 +- data->backup_ghcb_active = false;
736 +-
737 +- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
738 +- }
739 +
740 + vc_ghcb_invalidate(ghcb);
741 + result = vc_init_em_ctxt(&ctxt, regs, error_code);
742 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
743 +index dc0a337f985b6..8183ddb3700c4 100644
744 +--- a/arch/x86/xen/enlighten_pv.c
745 ++++ b/arch/x86/xen/enlighten_pv.c
746 +@@ -1276,16 +1276,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
747 + /* Get mfn list */
748 + xen_build_dynamic_phys_to_machine();
749 +
750 ++ /* Work out if we support NX */
751 ++ get_cpu_cap(&boot_cpu_data);
752 ++ x86_configure_nx();
753 ++
754 + /*
755 + * Set up kernel GDT and segment registers, mainly so that
756 + * -fstack-protector code can be executed.
757 + */
758 + xen_setup_gdt(0);
759 +
760 +- /* Work out if we support NX */
761 +- get_cpu_cap(&boot_cpu_data);
762 +- x86_configure_nx();
763 +-
764 + /* Determine virtual and physical address sizes */
765 + get_cpu_address_sizes(&boot_cpu_data);
766 +
767 +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
768 +index 9874fc1c815b5..1831099306aa9 100644
769 +--- a/drivers/cdrom/gdrom.c
770 ++++ b/drivers/cdrom/gdrom.c
771 +@@ -743,6 +743,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
772 + static int probe_gdrom(struct platform_device *devptr)
773 + {
774 + int err;
775 ++
776 ++ /*
777 ++ * Ensure our "one" device is initialized properly in case of previous
778 ++ * usages of it
779 ++ */
780 ++ memset(&gd, 0, sizeof(gd));
781 ++
782 + /* Start the device */
783 + if (gdrom_execute_diagnostic() != 1) {
784 + pr_warn("ATA Probe for GDROM failed\n");
785 +@@ -831,6 +838,8 @@ static int remove_gdrom(struct platform_device *devptr)
786 + if (gdrom_major)
787 + unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
788 + unregister_cdrom(gd.cd_info);
789 ++ kfree(gd.cd_info);
790 ++ kfree(gd.toc);
791 +
792 + return 0;
793 + }
794 +@@ -846,7 +855,7 @@ static struct platform_driver gdrom_driver = {
795 + static int __init init_gdrom(void)
796 + {
797 + int rc;
798 +- gd.toc = NULL;
799 ++
800 + rc = platform_driver_register(&gdrom_driver);
801 + if (rc)
802 + return rc;
803 +@@ -862,8 +871,6 @@ static void __exit exit_gdrom(void)
804 + {
805 + platform_device_unregister(pd);
806 + platform_driver_unregister(&gdrom_driver);
807 +- kfree(gd.toc);
808 +- kfree(gd.cd_info);
809 + }
810 +
811 + module_init(init_gdrom);
812 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
813 +index f264b70c383eb..eadd1eaa2fb54 100644
814 +--- a/drivers/dma-buf/dma-buf.c
815 ++++ b/drivers/dma-buf/dma-buf.c
816 +@@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
817 +
818 + if (dma_buf_is_dynamic(attach->dmabuf)) {
819 + dma_resv_lock(attach->dmabuf->resv, NULL);
820 +- ret = dma_buf_pin(attach);
821 ++ ret = dmabuf->ops->pin(attach);
822 + if (ret)
823 + goto err_unlock;
824 + }
825 +@@ -786,7 +786,7 @@ err_attach:
826 +
827 + err_unpin:
828 + if (dma_buf_is_dynamic(attach->dmabuf))
829 +- dma_buf_unpin(attach);
830 ++ dmabuf->ops->unpin(attach);
831 +
832 + err_unlock:
833 + if (dma_buf_is_dynamic(attach->dmabuf))
834 +@@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
835 + __unmap_dma_buf(attach, attach->sgt, attach->dir);
836 +
837 + if (dma_buf_is_dynamic(attach->dmabuf)) {
838 +- dma_buf_unpin(attach);
839 ++ dmabuf->ops->unpin(attach);
840 + dma_resv_unlock(attach->dmabuf->resv);
841 + }
842 + }
843 +@@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
844 + if (dma_buf_is_dynamic(attach->dmabuf)) {
845 + dma_resv_assert_held(attach->dmabuf->resv);
846 + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
847 +- r = dma_buf_pin(attach);
848 ++ r = attach->dmabuf->ops->pin(attach);
849 + if (r)
850 + return ERR_PTR(r);
851 + }
852 +@@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
853 +
854 + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
855 + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
856 +- dma_buf_unpin(attach);
857 ++ attach->dmabuf->ops->unpin(attach);
858 +
859 + if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
860 + attach->sgt = sg_table;
861 +diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
862 +index d0dee37ad5228..4ceba5ef78958 100644
863 +--- a/drivers/firmware/arm_scpi.c
864 ++++ b/drivers/firmware/arm_scpi.c
865 +@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
866 +
867 + ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
868 + sizeof(le_clk_id), &rate, sizeof(rate));
869 ++ if (ret)
870 ++ return 0;
871 +
872 +- return ret ? ret : le32_to_cpu(rate);
873 ++ return le32_to_cpu(rate);
874 + }
875 +
876 + static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
877 +diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
878 +index 1bd9e44df7184..05974b760796b 100644
879 +--- a/drivers/gpio/gpio-tegra186.c
880 ++++ b/drivers/gpio/gpio-tegra186.c
881 +@@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
882 + return 0;
883 + }
884 +
885 +-static int tegra186_irq_set_affinity(struct irq_data *data,
886 +- const struct cpumask *dest,
887 +- bool force)
888 +-{
889 +- if (data->parent_data)
890 +- return irq_chip_set_affinity_parent(data, dest, force);
891 +-
892 +- return -EINVAL;
893 +-}
894 +-
895 + static void tegra186_gpio_irq(struct irq_desc *desc)
896 + {
897 + struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
898 +@@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
899 + gpio->intc.irq_unmask = tegra186_irq_unmask;
900 + gpio->intc.irq_set_type = tegra186_irq_set_type;
901 + gpio->intc.irq_set_wake = tegra186_irq_set_wake;
902 +- gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
903 +
904 + irq = &gpio->gpio.irq;
905 + irq->chip = &gpio->intc;
906 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
907 +index 383c178cf0746..6b14626c148ee 100644
908 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
909 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
910 +@@ -267,7 +267,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
911 + *addr += offset & ~PAGE_MASK;
912 +
913 + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
914 +- num_bytes = num_pages * 8;
915 ++ num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
916 +
917 + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
918 + AMDGPU_IB_POOL_DELAYED, &job);
919 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
920 +index 63691deb7df3c..2342c5d216f9b 100644
921 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
922 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
923 +@@ -1391,9 +1391,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
924 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
925 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
926 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
927 +- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
928 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
929 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
930 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
931 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
932 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
933 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
934 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
935 +@@ -1411,12 +1412,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
936 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
937 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
938 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
939 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
940 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
941 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
942 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
943 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
944 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
945 +- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
946 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
947 + };
948 +
949 + static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
950 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
951 +index 65db88bb6cbcd..d2c020a91c0be 100644
952 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
953 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
954 +@@ -4864,7 +4864,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
955 + amdgpu_gfx_rlc_enter_safe_mode(adev);
956 +
957 + /* Enable 3D CGCG/CGLS */
958 +- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
959 ++ if (enable) {
960 + /* write cmd to clear cgcg/cgls ov */
961 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
962 + /* unset CGCG override */
963 +@@ -4876,8 +4876,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
964 + /* enable 3Dcgcg FSM(0x0000363f) */
965 + def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
966 +
967 +- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
968 +- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
969 ++ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
970 ++ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
971 ++ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
972 ++ else
973 ++ data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
974 ++
975 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
976 + data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
977 + RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
978 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
979 +index d345e324837dd..2a27fe26232b6 100644
980 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
981 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
982 +@@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
983 +
984 + static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
985 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
986 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
987 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
988 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
989 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
990 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
991 + };
992 +
993 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
994 +index 1221aa6b40a9f..d1045a9b37d98 100644
995 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
996 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
997 +@@ -1151,7 +1151,6 @@ static int soc15_common_early_init(void *handle)
998 + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
999 + AMD_CG_SUPPORT_GFX_MGLS |
1000 + AMD_CG_SUPPORT_GFX_CP_LS |
1001 +- AMD_CG_SUPPORT_GFX_3D_CGCG |
1002 + AMD_CG_SUPPORT_GFX_3D_CGLS |
1003 + AMD_CG_SUPPORT_GFX_CGCG |
1004 + AMD_CG_SUPPORT_GFX_CGLS |
1005 +@@ -1170,7 +1169,6 @@ static int soc15_common_early_init(void *handle)
1006 + AMD_CG_SUPPORT_GFX_MGLS |
1007 + AMD_CG_SUPPORT_GFX_RLC_LS |
1008 + AMD_CG_SUPPORT_GFX_CP_LS |
1009 +- AMD_CG_SUPPORT_GFX_3D_CGCG |
1010 + AMD_CG_SUPPORT_GFX_3D_CGLS |
1011 + AMD_CG_SUPPORT_GFX_CGCG |
1012 + AMD_CG_SUPPORT_GFX_CGLS |
1013 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
1014 +index 71e2d5e025710..9b33182f3abd5 100644
1015 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
1016 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
1017 +@@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
1018 + .fp16 = 16000
1019 + },
1020 +
1021 ++ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
1022 + .max_downscale_factor = {
1023 +- .argb8888 = 600,
1024 +- .nv12 = 600,
1025 +- .fp16 = 600
1026 ++ .argb8888 = 167,
1027 ++ .nv12 = 167,
1028 ++ .fp16 = 167
1029 + }
1030 + };
1031 +
1032 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
1033 +index c494235016e09..00f066f1da0c7 100644
1034 +--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
1035 ++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
1036 +@@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
1037 + .fp16 = 16000
1038 + },
1039 +
1040 ++ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
1041 + .max_downscale_factor = {
1042 +- .argb8888 = 600,
1043 +- .nv12 = 600,
1044 +- .fp16 = 600
1045 ++ .argb8888 = 167,
1046 ++ .nv12 = 167,
1047 ++ .fp16 = 167
1048 + },
1049 + 64,
1050 + 64
1051 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
1052 +index d03b1975e4178..7d9d591de411b 100644
1053 +--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
1054 ++++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
1055 +@@ -282,10 +282,11 @@ static const struct dc_plane_cap plane_cap = {
1056 + .nv12 = 16000,
1057 + .fp16 = 16000
1058 + },
1059 ++ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
1060 + .max_downscale_factor = {
1061 +- .argb8888 = 600,
1062 +- .nv12 = 600,
1063 +- .fp16 = 600
1064 ++ .argb8888 = 167,
1065 ++ .nv12 = 167,
1066 ++ .fp16 = 167
1067 + },
1068 + 16,
1069 + 16
1070 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
1071 +index 43028f3539a6d..76574e2459161 100644
1072 +--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
1073 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
1074 +@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
1075 + i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1076 + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1077 + i915_gem_object_set_tiling_quirk(obj);
1078 ++ GEM_BUG_ON(!list_empty(&obj->mm.link));
1079 ++ atomic_inc(&obj->mm.shrink_pin);
1080 + shrinkable = false;
1081 + }
1082 +
1083 +diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
1084 +index de575fdb033f5..21f08e53889c3 100644
1085 +--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
1086 ++++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
1087 +@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
1088 + gen7_emit_pipeline_invalidate(&cmds);
1089 + batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
1090 + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
1091 +- batch_add(&cmds, 0xffff0000);
1092 ++ batch_add(&cmds, 0xffff0000 |
1093 ++ ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
1094 ++ HIZ_RAW_STALL_OPT_DISABLE :
1095 ++ 0));
1096 + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
1097 + batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
1098 + gen7_emit_pipeline_invalidate(&cmds);
1099 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1100 +index aa44909344694..19351addb68c4 100644
1101 +--- a/drivers/gpu/drm/i915/i915_gem.c
1102 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1103 +@@ -972,12 +972,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1104 + obj->mm.madv = args->madv;
1105 +
1106 + if (i915_gem_object_has_pages(obj)) {
1107 +- struct list_head *list;
1108 ++ unsigned long flags;
1109 +
1110 +- if (i915_gem_object_is_shrinkable(obj)) {
1111 +- unsigned long flags;
1112 +-
1113 +- spin_lock_irqsave(&i915->mm.obj_lock, flags);
1114 ++ spin_lock_irqsave(&i915->mm.obj_lock, flags);
1115 ++ if (!list_empty(&obj->mm.link)) {
1116 ++ struct list_head *list;
1117 +
1118 + if (obj->mm.madv != I915_MADV_WILLNEED)
1119 + list = &i915->mm.purge_list;
1120 +@@ -985,8 +984,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1121 + list = &i915->mm.shrink_list;
1122 + list_move_tail(&obj->mm.link, list);
1123 +
1124 +- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1125 + }
1126 ++ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1127 + }
1128 +
1129 + /* if the object is no longer attached, discard its backing storage */
1130 +diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
1131 +index 3808a753127bc..04109a2a6fd76 100644
1132 +--- a/drivers/gpu/drm/radeon/radeon_gart.c
1133 ++++ b/drivers/gpu/drm/radeon/radeon_gart.c
1134 +@@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
1135 + p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
1136 +
1137 + for (i = 0; i < pages; i++, p++) {
1138 +- rdev->gart.pages[p] = pagelist[i];
1139 ++ rdev->gart.pages[p] = pagelist ? pagelist[i] :
1140 ++ rdev->dummy_page.page;
1141 + page_base = dma_addr[i];
1142 + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
1143 + page_entry = radeon_gart_get_page_entry(page_base, flags);
1144 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1145 +index 101a68dc615b6..799ec7a7caa4d 100644
1146 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
1147 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
1148 +@@ -153,6 +153,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
1149 +
1150 + swap = &ttm_bo_glob.swap_lru[bo->priority];
1151 + list_move_tail(&bo->swap, swap);
1152 ++ } else {
1153 ++ list_del_init(&bo->swap);
1154 + }
1155 +
1156 + if (bdev->driver->del_from_lru_notify)
1157 +diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
1158 +index ac4adb44b224d..97ab491d2922c 100644
1159 +--- a/drivers/hwmon/lm80.c
1160 ++++ b/drivers/hwmon/lm80.c
1161 +@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
1162 + struct device *dev = &client->dev;
1163 + struct device *hwmon_dev;
1164 + struct lm80_data *data;
1165 +- int rv;
1166 +
1167 + data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
1168 + if (!data)
1169 +@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
1170 + lm80_init_client(client);
1171 +
1172 + /* A few vars need to be filled upon startup */
1173 +- rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
1174 +- if (rv < 0)
1175 +- return rv;
1176 +- data->fan[f_min][0] = rv;
1177 +- rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
1178 +- if (rv < 0)
1179 +- return rv;
1180 +- data->fan[f_min][1] = rv;
1181 ++ data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
1182 ++ data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
1183 +
1184 + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
1185 + data, lm80_groups);
1186 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1187 +index 6ac07911a17bd..5b9022a8c9ece 100644
1188 +--- a/drivers/infiniband/core/cma.c
1189 ++++ b/drivers/infiniband/core/cma.c
1190 +@@ -482,6 +482,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
1191 + list_del(&id_priv->list);
1192 + cma_dev_put(id_priv->cma_dev);
1193 + id_priv->cma_dev = NULL;
1194 ++ id_priv->id.device = NULL;
1195 + if (id_priv->id.route.addr.dev_addr.sgid_attr) {
1196 + rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
1197 + id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
1198 +@@ -1864,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
1199 + iw_destroy_cm_id(id_priv->cm_id.iw);
1200 + }
1201 + cma_leave_mc_groups(id_priv);
1202 ++ rdma_restrack_del(&id_priv->res);
1203 + cma_release_dev(id_priv);
1204 + }
1205 +
1206 +@@ -1877,7 +1879,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
1207 + kfree(id_priv->id.route.path_rec);
1208 +
1209 + put_net(id_priv->id.route.addr.dev_addr.net);
1210 +- rdma_restrack_del(&id_priv->res);
1211 + kfree(id_priv);
1212 + }
1213 +
1214 +@@ -3740,7 +3741,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
1215 + }
1216 +
1217 + id_priv->backlog = backlog;
1218 +- if (id->device) {
1219 ++ if (id_priv->cma_dev) {
1220 + if (rdma_cap_ib_cm(id->device, 1)) {
1221 + ret = cma_ib_listen(id_priv);
1222 + if (ret)
1223 +diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
1224 +index 9ec6971056fa8..049684880ae03 100644
1225 +--- a/drivers/infiniband/core/uverbs_std_types_device.c
1226 ++++ b/drivers/infiniband/core/uverbs_std_types_device.c
1227 +@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
1228 + return ret;
1229 +
1230 + uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
1231 +- if (!uapi_object)
1232 +- return -EINVAL;
1233 ++ if (IS_ERR(uapi_object))
1234 ++ return PTR_ERR(uapi_object);
1235 +
1236 + handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
1237 + out_len, &total);
1238 +@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
1239 + if (ret)
1240 + return ret;
1241 +
1242 ++ if (!user_entry_size)
1243 ++ return -EINVAL;
1244 ++
1245 + max_entries = uverbs_attr_ptr_get_array_size(
1246 + attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
1247 + user_entry_size);
1248 +diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
1249 +index 07b8350929cd6..81276b4247f8e 100644
1250 +--- a/drivers/infiniband/hw/mlx5/devx.c
1251 ++++ b/drivers/infiniband/hw/mlx5/devx.c
1252 +@@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
1253 + case UVERBS_OBJECT_QP:
1254 + {
1255 + struct mlx5_ib_qp *qp = to_mqp(uobj->object);
1256 +- enum ib_qp_type qp_type = qp->ibqp.qp_type;
1257 +
1258 +- if (qp_type == IB_QPT_RAW_PACKET ||
1259 ++ if (qp->type == IB_QPT_RAW_PACKET ||
1260 + (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
1261 + struct mlx5_ib_raw_packet_qp *raw_packet_qp =
1262 + &qp->raw_packet_qp;
1263 +@@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
1264 + sq->tisn) == obj_id);
1265 + }
1266 +
1267 +- if (qp_type == MLX5_IB_QPT_DCT)
1268 ++ if (qp->type == MLX5_IB_QPT_DCT)
1269 + return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
1270 + qp->dct.mdct.mqp.qpn) == obj_id;
1271 +-
1272 + return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
1273 + qp->ibqp.qp_num) == obj_id;
1274 + }
1275 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1276 +index 4be7bccefaa40..59ffbbdda3179 100644
1277 +--- a/drivers/infiniband/hw/mlx5/main.c
1278 ++++ b/drivers/infiniband/hw/mlx5/main.c
1279 +@@ -4655,6 +4655,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
1280 +
1281 + if (bound) {
1282 + rdma_roce_rescan_device(&dev->ib_dev);
1283 ++ mpi->ibdev->ib_active = true;
1284 + break;
1285 + }
1286 + }
1287 +diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
1288 +index 17a361b8dbb16..06b556169867a 100644
1289 +--- a/drivers/infiniband/sw/rxe/rxe_comp.c
1290 ++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
1291 +@@ -345,14 +345,16 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
1292 +
1293 + ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
1294 + &wqe->dma, payload_addr(pkt),
1295 +- payload_size(pkt), to_mem_obj, NULL);
1296 +- if (ret)
1297 ++ payload_size(pkt), to_mr_obj, NULL);
1298 ++ if (ret) {
1299 ++ wqe->status = IB_WC_LOC_PROT_ERR;
1300 + return COMPST_ERROR;
1301 ++ }
1302 +
1303 + if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
1304 + return COMPST_COMP_ACK;
1305 +- else
1306 +- return COMPST_UPDATE_COMP;
1307 ++
1308 ++ return COMPST_UPDATE_COMP;
1309 + }
1310 +
1311 + static inline enum comp_state do_atomic(struct rxe_qp *qp,
1312 +@@ -365,11 +367,13 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
1313 +
1314 + ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
1315 + &wqe->dma, &atomic_orig,
1316 +- sizeof(u64), to_mem_obj, NULL);
1317 +- if (ret)
1318 ++ sizeof(u64), to_mr_obj, NULL);
1319 ++ if (ret) {
1320 ++ wqe->status = IB_WC_LOC_PROT_ERR;
1321 + return COMPST_ERROR;
1322 +- else
1323 +- return COMPST_COMP_ACK;
1324 ++ }
1325 ++
1326 ++ return COMPST_COMP_ACK;
1327 + }
1328 +
1329 + static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
1330 +diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
1331 +index 0d758760b9ae7..08e21fa9ec97e 100644
1332 +--- a/drivers/infiniband/sw/rxe/rxe_loc.h
1333 ++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
1334 +@@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1335 +
1336 + /* rxe_mr.c */
1337 + enum copy_direction {
1338 +- to_mem_obj,
1339 +- from_mem_obj,
1340 ++ to_mr_obj,
1341 ++ from_mr_obj,
1342 + };
1343 +
1344 +-void rxe_mem_init_dma(struct rxe_pd *pd,
1345 +- int access, struct rxe_mem *mem);
1346 ++void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
1347 +
1348 +-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
1349 +- u64 length, u64 iova, int access, struct ib_udata *udata,
1350 +- struct rxe_mem *mr);
1351 ++int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
1352 ++ int access, struct ib_udata *udata, struct rxe_mr *mr);
1353 +
1354 +-int rxe_mem_init_fast(struct rxe_pd *pd,
1355 +- int max_pages, struct rxe_mem *mem);
1356 ++int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
1357 +
1358 +-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
1359 +- int length, enum copy_direction dir, u32 *crcp);
1360 ++int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
1361 ++ enum copy_direction dir, u32 *crcp);
1362 +
1363 + int copy_data(struct rxe_pd *pd, int access,
1364 + struct rxe_dma_info *dma, void *addr, int length,
1365 + enum copy_direction dir, u32 *crcp);
1366 +
1367 +-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
1368 ++void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
1369 +
1370 + enum lookup_type {
1371 + lookup_local,
1372 + lookup_remote,
1373 + };
1374 +
1375 +-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
1376 +- enum lookup_type type);
1377 ++struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
1378 ++ enum lookup_type type);
1379 +
1380 +-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
1381 ++int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
1382 +
1383 +-void rxe_mem_cleanup(struct rxe_pool_entry *arg);
1384 ++void rxe_mr_cleanup(struct rxe_pool_entry *arg);
1385 +
1386 + int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
1387 +
1388 +diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
1389 +index 6e8c41567ba08..9f63947bab123 100644
1390 +--- a/drivers/infiniband/sw/rxe/rxe_mr.c
1391 ++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
1392 +@@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
1393 + return key;
1394 + }
1395 +
1396 +-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
1397 ++int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
1398 + {
1399 +- switch (mem->type) {
1400 +- case RXE_MEM_TYPE_DMA:
1401 ++ switch (mr->type) {
1402 ++ case RXE_MR_TYPE_DMA:
1403 + return 0;
1404 +
1405 +- case RXE_MEM_TYPE_MR:
1406 +- if (iova < mem->iova ||
1407 +- length > mem->length ||
1408 +- iova > mem->iova + mem->length - length)
1409 ++ case RXE_MR_TYPE_MR:
1410 ++ if (iova < mr->iova || length > mr->length ||
1411 ++ iova > mr->iova + mr->length - length)
1412 + return -EFAULT;
1413 + return 0;
1414 +
1415 +@@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
1416 + | IB_ACCESS_REMOTE_WRITE \
1417 + | IB_ACCESS_REMOTE_ATOMIC)
1418 +
1419 +-static void rxe_mem_init(int access, struct rxe_mem *mem)
1420 ++static void rxe_mr_init(int access, struct rxe_mr *mr)
1421 + {
1422 +- u32 lkey = mem->pelem.index << 8 | rxe_get_key();
1423 ++ u32 lkey = mr->pelem.index << 8 | rxe_get_key();
1424 + u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
1425 +
1426 +- mem->ibmr.lkey = lkey;
1427 +- mem->ibmr.rkey = rkey;
1428 +- mem->state = RXE_MEM_STATE_INVALID;
1429 +- mem->type = RXE_MEM_TYPE_NONE;
1430 +- mem->map_shift = ilog2(RXE_BUF_PER_MAP);
1431 ++ mr->ibmr.lkey = lkey;
1432 ++ mr->ibmr.rkey = rkey;
1433 ++ mr->state = RXE_MR_STATE_INVALID;
1434 ++ mr->type = RXE_MR_TYPE_NONE;
1435 ++ mr->map_shift = ilog2(RXE_BUF_PER_MAP);
1436 + }
1437 +
1438 +-void rxe_mem_cleanup(struct rxe_pool_entry *arg)
1439 ++void rxe_mr_cleanup(struct rxe_pool_entry *arg)
1440 + {
1441 +- struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
1442 ++ struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
1443 + int i;
1444 +
1445 +- ib_umem_release(mem->umem);
1446 ++ ib_umem_release(mr->umem);
1447 +
1448 +- if (mem->map) {
1449 +- for (i = 0; i < mem->num_map; i++)
1450 +- kfree(mem->map[i]);
1451 ++ if (mr->map) {
1452 ++ for (i = 0; i < mr->num_map; i++)
1453 ++ kfree(mr->map[i]);
1454 +
1455 +- kfree(mem->map);
1456 ++ kfree(mr->map);
1457 + }
1458 + }
1459 +
1460 +-static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
1461 ++static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
1462 + {
1463 + int i;
1464 + int num_map;
1465 +- struct rxe_map **map = mem->map;
1466 ++ struct rxe_map **map = mr->map;
1467 +
1468 + num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
1469 +
1470 +- mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
1471 +- if (!mem->map)
1472 ++ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
1473 ++ if (!mr->map)
1474 + goto err1;
1475 +
1476 + for (i = 0; i < num_map; i++) {
1477 +- mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
1478 +- if (!mem->map[i])
1479 ++ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
1480 ++ if (!mr->map[i])
1481 + goto err2;
1482 + }
1483 +
1484 + BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
1485 +
1486 +- mem->map_shift = ilog2(RXE_BUF_PER_MAP);
1487 +- mem->map_mask = RXE_BUF_PER_MAP - 1;
1488 ++ mr->map_shift = ilog2(RXE_BUF_PER_MAP);
1489 ++ mr->map_mask = RXE_BUF_PER_MAP - 1;
1490 +
1491 +- mem->num_buf = num_buf;
1492 +- mem->num_map = num_map;
1493 +- mem->max_buf = num_map * RXE_BUF_PER_MAP;
1494 ++ mr->num_buf = num_buf;
1495 ++ mr->num_map = num_map;
1496 ++ mr->max_buf = num_map * RXE_BUF_PER_MAP;
1497 +
1498 + return 0;
1499 +
1500 + err2:
1501 + for (i--; i >= 0; i--)
1502 +- kfree(mem->map[i]);
1503 ++ kfree(mr->map[i]);
1504 +
1505 +- kfree(mem->map);
1506 ++ kfree(mr->map);
1507 + err1:
1508 + return -ENOMEM;
1509 + }
1510 +
1511 +-void rxe_mem_init_dma(struct rxe_pd *pd,
1512 +- int access, struct rxe_mem *mem)
1513 ++void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
1514 + {
1515 +- rxe_mem_init(access, mem);
1516 ++ rxe_mr_init(access, mr);
1517 +
1518 +- mem->ibmr.pd = &pd->ibpd;
1519 +- mem->access = access;
1520 +- mem->state = RXE_MEM_STATE_VALID;
1521 +- mem->type = RXE_MEM_TYPE_DMA;
1522 ++ mr->ibmr.pd = &pd->ibpd;
1523 ++ mr->access = access;
1524 ++ mr->state = RXE_MR_STATE_VALID;
1525 ++ mr->type = RXE_MR_TYPE_DMA;
1526 + }
1527 +
1528 +-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
1529 +- u64 length, u64 iova, int access, struct ib_udata *udata,
1530 +- struct rxe_mem *mem)
1531 ++int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
1532 ++ int access, struct ib_udata *udata, struct rxe_mr *mr)
1533 + {
1534 + struct rxe_map **map;
1535 + struct rxe_phys_buf *buf = NULL;
1536 +@@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
1537 + goto err1;
1538 + }
1539 +
1540 +- mem->umem = umem;
1541 ++ mr->umem = umem;
1542 + num_buf = ib_umem_num_pages(umem);
1543 +
1544 +- rxe_mem_init(access, mem);
1545 ++ rxe_mr_init(access, mr);
1546 +
1547 +- err = rxe_mem_alloc(mem, num_buf);
1548 ++ err = rxe_mr_alloc(mr, num_buf);
1549 + if (err) {
1550 +- pr_warn("err %d from rxe_mem_alloc\n", err);
1551 ++ pr_warn("err %d from rxe_mr_alloc\n", err);
1552 + ib_umem_release(umem);
1553 + goto err1;
1554 + }
1555 +
1556 +- mem->page_shift = PAGE_SHIFT;
1557 +- mem->page_mask = PAGE_SIZE - 1;
1558 ++ mr->page_shift = PAGE_SHIFT;
1559 ++ mr->page_mask = PAGE_SIZE - 1;
1560 +
1561 + num_buf = 0;
1562 +- map = mem->map;
1563 ++ map = mr->map;
1564 + if (length > 0) {
1565 + buf = map[0]->buf;
1566 +
1567 +@@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
1568 + }
1569 + }
1570 +
1571 +- mem->ibmr.pd = &pd->ibpd;
1572 +- mem->umem = umem;
1573 +- mem->access = access;
1574 +- mem->length = length;
1575 +- mem->iova = iova;
1576 +- mem->va = start;
1577 +- mem->offset = ib_umem_offset(umem);
1578 +- mem->state = RXE_MEM_STATE_VALID;
1579 +- mem->type = RXE_MEM_TYPE_MR;
1580 ++ mr->ibmr.pd = &pd->ibpd;
1581 ++ mr->umem = umem;
1582 ++ mr->access = access;
1583 ++ mr->length = length;
1584 ++ mr->iova = iova;
1585 ++ mr->va = start;
1586 ++ mr->offset = ib_umem_offset(umem);
1587 ++ mr->state = RXE_MR_STATE_VALID;
1588 ++ mr->type = RXE_MR_TYPE_MR;
1589 +
1590 + return 0;
1591 +
1592 +@@ -201,24 +198,23 @@ err1:
1593 + return err;
1594 + }
1595 +
1596 +-int rxe_mem_init_fast(struct rxe_pd *pd,
1597 +- int max_pages, struct rxe_mem *mem)
1598 ++int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
1599 + {
1600 + int err;
1601 +
1602 +- rxe_mem_init(0, mem);
1603 ++ rxe_mr_init(0, mr);
1604 +
1605 + /* In fastreg, we also set the rkey */
1606 +- mem->ibmr.rkey = mem->ibmr.lkey;
1607 ++ mr->ibmr.rkey = mr->ibmr.lkey;
1608 +
1609 +- err = rxe_mem_alloc(mem, max_pages);
1610 ++ err = rxe_mr_alloc(mr, max_pages);
1611 + if (err)
1612 + goto err1;
1613 +
1614 +- mem->ibmr.pd = &pd->ibpd;
1615 +- mem->max_buf = max_pages;
1616 +- mem->state = RXE_MEM_STATE_FREE;
1617 +- mem->type = RXE_MEM_TYPE_MR;
1618 ++ mr->ibmr.pd = &pd->ibpd;
1619 ++ mr->max_buf = max_pages;
1620 ++ mr->state = RXE_MR_STATE_FREE;
1621 ++ mr->type = RXE_MR_TYPE_MR;
1622 +
1623 + return 0;
1624 +
1625 +@@ -226,28 +222,24 @@ err1:
1626 + return err;
1627 + }
1628 +
1629 +-static void lookup_iova(
1630 +- struct rxe_mem *mem,
1631 +- u64 iova,
1632 +- int *m_out,
1633 +- int *n_out,
1634 +- size_t *offset_out)
1635 ++static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
1636 ++ size_t *offset_out)
1637 + {
1638 +- size_t offset = iova - mem->iova + mem->offset;
1639 ++ size_t offset = iova - mr->iova + mr->offset;
1640 + int map_index;
1641 + int buf_index;
1642 + u64 length;
1643 +
1644 +- if (likely(mem->page_shift)) {
1645 +- *offset_out = offset & mem->page_mask;
1646 +- offset >>= mem->page_shift;
1647 +- *n_out = offset & mem->map_mask;
1648 +- *m_out = offset >> mem->map_shift;
1649 ++ if (likely(mr->page_shift)) {
1650 ++ *offset_out = offset & mr->page_mask;
1651 ++ offset >>= mr->page_shift;
1652 ++ *n_out = offset & mr->map_mask;
1653 ++ *m_out = offset >> mr->map_shift;
1654 + } else {
1655 + map_index = 0;
1656 + buf_index = 0;
1657 +
1658 +- length = mem->map[map_index]->buf[buf_index].size;
1659 ++ length = mr->map[map_index]->buf[buf_index].size;
1660 +
1661 + while (offset >= length) {
1662 + offset -= length;
1663 +@@ -257,7 +249,7 @@ static void lookup_iova(
1664 + map_index++;
1665 + buf_index = 0;
1666 + }
1667 +- length = mem->map[map_index]->buf[buf_index].size;
1668 ++ length = mr->map[map_index]->buf[buf_index].size;
1669 + }
1670 +
1671 + *m_out = map_index;
1672 +@@ -266,49 +258,49 @@ static void lookup_iova(
1673 + }
1674 + }
1675 +
1676 +-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
1677 ++void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
1678 + {
1679 + size_t offset;
1680 + int m, n;
1681 + void *addr;
1682 +
1683 +- if (mem->state != RXE_MEM_STATE_VALID) {
1684 +- pr_warn("mem not in valid state\n");
1685 ++ if (mr->state != RXE_MR_STATE_VALID) {
1686 ++ pr_warn("mr not in valid state\n");
1687 + addr = NULL;
1688 + goto out;
1689 + }
1690 +
1691 +- if (!mem->map) {
1692 ++ if (!mr->map) {
1693 + addr = (void *)(uintptr_t)iova;
1694 + goto out;
1695 + }
1696 +
1697 +- if (mem_check_range(mem, iova, length)) {
1698 ++ if (mr_check_range(mr, iova, length)) {
1699 + pr_warn("range violation\n");
1700 + addr = NULL;
1701 + goto out;
1702 + }
1703 +
1704 +- lookup_iova(mem, iova, &m, &n, &offset);
1705 ++ lookup_iova(mr, iova, &m, &n, &offset);
1706 +
1707 +- if (offset + length > mem->map[m]->buf[n].size) {
1708 ++ if (offset + length > mr->map[m]->buf[n].size) {
1709 + pr_warn("crosses page boundary\n");
1710 + addr = NULL;
1711 + goto out;
1712 + }
1713 +
1714 +- addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
1715 ++ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
1716 +
1717 + out:
1718 + return addr;
1719 + }
1720 +
1721 + /* copy data from a range (vaddr, vaddr+length-1) to or from
1722 +- * a mem object starting at iova. Compute incremental value of
1723 +- * crc32 if crcp is not zero. caller must hold a reference to mem
1724 ++ * a mr object starting at iova. Compute incremental value of
1725 ++ * crc32 if crcp is not zero. caller must hold a reference to mr
1726 + */
1727 +-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
1728 +- enum copy_direction dir, u32 *crcp)
1729 ++int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
1730 ++ enum copy_direction dir, u32 *crcp)
1731 + {
1732 + int err;
1733 + int bytes;
1734 +@@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
1735 + if (length == 0)
1736 + return 0;
1737 +
1738 +- if (mem->type == RXE_MEM_TYPE_DMA) {
1739 ++ if (mr->type == RXE_MR_TYPE_DMA) {
1740 + u8 *src, *dest;
1741 +
1742 +- src = (dir == to_mem_obj) ?
1743 +- addr : ((void *)(uintptr_t)iova);
1744 ++ src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
1745 +
1746 +- dest = (dir == to_mem_obj) ?
1747 +- ((void *)(uintptr_t)iova) : addr;
1748 ++ dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
1749 +
1750 + memcpy(dest, src, length);
1751 +
1752 + if (crcp)
1753 +- *crcp = rxe_crc32(to_rdev(mem->ibmr.device),
1754 +- *crcp, dest, length);
1755 ++ *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
1756 ++ length);
1757 +
1758 + return 0;
1759 + }
1760 +
1761 +- WARN_ON_ONCE(!mem->map);
1762 ++ WARN_ON_ONCE(!mr->map);
1763 +
1764 +- err = mem_check_range(mem, iova, length);
1765 ++ err = mr_check_range(mr, iova, length);
1766 + if (err) {
1767 + err = -EFAULT;
1768 + goto err1;
1769 + }
1770 +
1771 +- lookup_iova(mem, iova, &m, &i, &offset);
1772 ++ lookup_iova(mr, iova, &m, &i, &offset);
1773 +
1774 +- map = mem->map + m;
1775 ++ map = mr->map + m;
1776 + buf = map[0]->buf + i;
1777 +
1778 + while (length > 0) {
1779 + u8 *src, *dest;
1780 +
1781 + va = (u8 *)(uintptr_t)buf->addr + offset;
1782 +- src = (dir == to_mem_obj) ? addr : va;
1783 +- dest = (dir == to_mem_obj) ? va : addr;
1784 ++ src = (dir == to_mr_obj) ? addr : va;
1785 ++ dest = (dir == to_mr_obj) ? va : addr;
1786 +
1787 + bytes = buf->size - offset;
1788 +
1789 +@@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
1790 + memcpy(dest, src, bytes);
1791 +
1792 + if (crcp)
1793 +- crc = rxe_crc32(to_rdev(mem->ibmr.device),
1794 +- crc, dest, bytes);
1795 ++ crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
1796 ++ bytes);
1797 +
1798 + length -= bytes;
1799 + addr += bytes;
1800 +@@ -411,7 +401,7 @@ int copy_data(
1801 + struct rxe_sge *sge = &dma->sge[dma->cur_sge];
1802 + int offset = dma->sge_offset;
1803 + int resid = dma->resid;
1804 +- struct rxe_mem *mem = NULL;
1805 ++ struct rxe_mr *mr = NULL;
1806 + u64 iova;
1807 + int err;
1808 +
1809 +@@ -424,8 +414,8 @@ int copy_data(
1810 + }
1811 +
1812 + if (sge->length && (offset < sge->length)) {
1813 +- mem = lookup_mem(pd, access, sge->lkey, lookup_local);
1814 +- if (!mem) {
1815 ++ mr = lookup_mr(pd, access, sge->lkey, lookup_local);
1816 ++ if (!mr) {
1817 + err = -EINVAL;
1818 + goto err1;
1819 + }
1820 +@@ -435,9 +425,9 @@ int copy_data(
1821 + bytes = length;
1822 +
1823 + if (offset >= sge->length) {
1824 +- if (mem) {
1825 +- rxe_drop_ref(mem);
1826 +- mem = NULL;
1827 ++ if (mr) {
1828 ++ rxe_drop_ref(mr);
1829 ++ mr = NULL;
1830 + }
1831 + sge++;
1832 + dma->cur_sge++;
1833 +@@ -449,9 +439,9 @@ int copy_data(
1834 + }
1835 +
1836 + if (sge->length) {
1837 +- mem = lookup_mem(pd, access, sge->lkey,
1838 +- lookup_local);
1839 +- if (!mem) {
1840 ++ mr = lookup_mr(pd, access, sge->lkey,
1841 ++ lookup_local);
1842 ++ if (!mr) {
1843 + err = -EINVAL;
1844 + goto err1;
1845 + }
1846 +@@ -466,7 +456,7 @@ int copy_data(
1847 + if (bytes > 0) {
1848 + iova = sge->addr + offset;
1849 +
1850 +- err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
1851 ++ err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
1852 + if (err)
1853 + goto err2;
1854 +
1855 +@@ -480,14 +470,14 @@ int copy_data(
1856 + dma->sge_offset = offset;
1857 + dma->resid = resid;
1858 +
1859 +- if (mem)
1860 +- rxe_drop_ref(mem);
1861 ++ if (mr)
1862 ++ rxe_drop_ref(mr);
1863 +
1864 + return 0;
1865 +
1866 + err2:
1867 +- if (mem)
1868 +- rxe_drop_ref(mem);
1869 ++ if (mr)
1870 ++ rxe_drop_ref(mr);
1871 + err1:
1872 + return err;
1873 + }
1874 +@@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
1875 + return 0;
1876 + }
1877 +
1878 +-/* (1) find the mem (mr or mw) corresponding to lkey/rkey
1879 ++/* (1) find the mr corresponding to lkey/rkey
1880 + * depending on lookup_type
1881 +- * (2) verify that the (qp) pd matches the mem pd
1882 +- * (3) verify that the mem can support the requested access
1883 +- * (4) verify that mem state is valid
1884 ++ * (2) verify that the (qp) pd matches the mr pd
1885 ++ * (3) verify that the mr can support the requested access
1886 ++ * (4) verify that mr state is valid
1887 + */
1888 +-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
1889 +- enum lookup_type type)
1890 ++struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
1891 ++ enum lookup_type type)
1892 + {
1893 +- struct rxe_mem *mem;
1894 ++ struct rxe_mr *mr;
1895 + struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
1896 + int index = key >> 8;
1897 +
1898 +- mem = rxe_pool_get_index(&rxe->mr_pool, index);
1899 +- if (!mem)
1900 ++ mr = rxe_pool_get_index(&rxe->mr_pool, index);
1901 ++ if (!mr)
1902 + return NULL;
1903 +
1904 +- if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
1905 +- (type == lookup_remote && mr_rkey(mem) != key) ||
1906 +- mr_pd(mem) != pd ||
1907 +- (access && !(access & mem->access)) ||
1908 +- mem->state != RXE_MEM_STATE_VALID)) {
1909 +- rxe_drop_ref(mem);
1910 +- mem = NULL;
1911 ++ if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
1912 ++ (type == lookup_remote && mr_rkey(mr) != key) ||
1913 ++ mr_pd(mr) != pd || (access && !(access & mr->access)) ||
1914 ++ mr->state != RXE_MR_STATE_VALID)) {
1915 ++ rxe_drop_ref(mr);
1916 ++ mr = NULL;
1917 + }
1918 +
1919 +- return mem;
1920 ++ return mr;
1921 + }
1922 +diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
1923 +index 307d8986e7c9b..d24901f2af3fb 100644
1924 +--- a/drivers/infiniband/sw/rxe/rxe_pool.c
1925 ++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
1926 +@@ -8,8 +8,6 @@
1927 + #include "rxe_loc.h"
1928 +
1929 + /* info about object pools
1930 +- * note that mr and mw share a single index space
1931 +- * so that one can map an lkey to the correct type of object
1932 + */
1933 + struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
1934 + [RXE_TYPE_UC] = {
1935 +@@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
1936 + },
1937 + [RXE_TYPE_MR] = {
1938 + .name = "rxe-mr",
1939 +- .size = sizeof(struct rxe_mem),
1940 +- .elem_offset = offsetof(struct rxe_mem, pelem),
1941 +- .cleanup = rxe_mem_cleanup,
1942 ++ .size = sizeof(struct rxe_mr),
1943 ++ .elem_offset = offsetof(struct rxe_mr, pelem),
1944 ++ .cleanup = rxe_mr_cleanup,
1945 + .flags = RXE_POOL_INDEX,
1946 + .max_index = RXE_MAX_MR_INDEX,
1947 + .min_index = RXE_MIN_MR_INDEX,
1948 + },
1949 + [RXE_TYPE_MW] = {
1950 + .name = "rxe-mw",
1951 +- .size = sizeof(struct rxe_mem),
1952 +- .elem_offset = offsetof(struct rxe_mem, pelem),
1953 +- .flags = RXE_POOL_INDEX,
1954 ++ .size = sizeof(struct rxe_mw),
1955 ++ .elem_offset = offsetof(struct rxe_mw, pelem),
1956 ++ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
1957 + .max_index = RXE_MAX_MW_INDEX,
1958 + .min_index = RXE_MIN_MW_INDEX,
1959 + },
1960 +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
1961 +index 34ae957a315ca..b0f350d674fdb 100644
1962 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c
1963 ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
1964 +@@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
1965 + if (err) {
1966 + vfree(qp->sq.queue->buf);
1967 + kfree(qp->sq.queue);
1968 ++ qp->sq.queue = NULL;
1969 + return err;
1970 + }
1971 +
1972 +@@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
1973 + if (err) {
1974 + vfree(qp->rq.queue->buf);
1975 + kfree(qp->rq.queue);
1976 ++ qp->rq.queue = NULL;
1977 + return err;
1978 + }
1979 + }
1980 +@@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
1981 + err2:
1982 + rxe_queue_cleanup(qp->sq.queue);
1983 + err1:
1984 ++ qp->pd = NULL;
1985 ++ qp->rcq = NULL;
1986 ++ qp->scq = NULL;
1987 ++ qp->srq = NULL;
1988 ++
1989 + if (srq)
1990 + rxe_drop_ref(srq);
1991 + rxe_drop_ref(scq);
1992 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
1993 +index 889290793d75b..3664cdae7e1f4 100644
1994 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
1995 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
1996 +@@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
1997 + } else {
1998 + err = copy_data(qp->pd, 0, &wqe->dma,
1999 + payload_addr(pkt), paylen,
2000 +- from_mem_obj,
2001 ++ from_mr_obj,
2002 + &crc);
2003 + if (err)
2004 + return err;
2005 +@@ -596,7 +596,7 @@ next_wqe:
2006 + if (wqe->mask & WR_REG_MASK) {
2007 + if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
2008 + struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
2009 +- struct rxe_mem *rmr;
2010 ++ struct rxe_mr *rmr;
2011 +
2012 + rmr = rxe_pool_get_index(&rxe->mr_pool,
2013 + wqe->wr.ex.invalidate_rkey >> 8);
2014 +@@ -607,14 +607,14 @@ next_wqe:
2015 + wqe->status = IB_WC_MW_BIND_ERR;
2016 + goto exit;
2017 + }
2018 +- rmr->state = RXE_MEM_STATE_FREE;
2019 ++ rmr->state = RXE_MR_STATE_FREE;
2020 + rxe_drop_ref(rmr);
2021 + wqe->state = wqe_state_done;
2022 + wqe->status = IB_WC_SUCCESS;
2023 + } else if (wqe->wr.opcode == IB_WR_REG_MR) {
2024 +- struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
2025 ++ struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
2026 +
2027 +- rmr->state = RXE_MEM_STATE_VALID;
2028 ++ rmr->state = RXE_MR_STATE_VALID;
2029 + rmr->access = wqe->wr.wr.reg.access;
2030 + rmr->ibmr.lkey = wqe->wr.wr.reg.key;
2031 + rmr->ibmr.rkey = wqe->wr.wr.reg.key;
2032 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
2033 +index 142f3d8014d83..8e237b623b316 100644
2034 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
2035 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
2036 +@@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
2037 + static enum resp_states check_rkey(struct rxe_qp *qp,
2038 + struct rxe_pkt_info *pkt)
2039 + {
2040 +- struct rxe_mem *mem = NULL;
2041 ++ struct rxe_mr *mr = NULL;
2042 + u64 va;
2043 + u32 rkey;
2044 + u32 resid;
2045 +@@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
2046 + resid = qp->resp.resid;
2047 + pktlen = payload_size(pkt);
2048 +
2049 +- mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
2050 +- if (!mem) {
2051 ++ mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
2052 ++ if (!mr) {
2053 + state = RESPST_ERR_RKEY_VIOLATION;
2054 + goto err;
2055 + }
2056 +
2057 +- if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
2058 ++ if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
2059 + state = RESPST_ERR_RKEY_VIOLATION;
2060 + goto err;
2061 + }
2062 +
2063 +- if (mem_check_range(mem, va, resid)) {
2064 ++ if (mr_check_range(mr, va, resid)) {
2065 + state = RESPST_ERR_RKEY_VIOLATION;
2066 + goto err;
2067 + }
2068 +@@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
2069 +
2070 + WARN_ON_ONCE(qp->resp.mr);
2071 +
2072 +- qp->resp.mr = mem;
2073 ++ qp->resp.mr = mr;
2074 + return RESPST_EXECUTE;
2075 +
2076 + err:
2077 +- if (mem)
2078 +- rxe_drop_ref(mem);
2079 ++ if (mr)
2080 ++ rxe_drop_ref(mr);
2081 + return state;
2082 + }
2083 +
2084 +@@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
2085 + int err;
2086 +
2087 + err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
2088 +- data_addr, data_len, to_mem_obj, NULL);
2089 ++ data_addr, data_len, to_mr_obj, NULL);
2090 + if (unlikely(err))
2091 + return (err == -ENOSPC) ? RESPST_ERR_LENGTH
2092 + : RESPST_ERR_MALFORMED_WQE;
2093 +@@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
2094 + int err;
2095 + int data_len = payload_size(pkt);
2096 +
2097 +- err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
2098 +- data_len, to_mem_obj, NULL);
2099 ++ err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
2100 ++ to_mr_obj, NULL);
2101 + if (err) {
2102 + rc = RESPST_ERR_RKEY_VIOLATION;
2103 + goto out;
2104 +@@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
2105 + u64 iova = atmeth_va(pkt);
2106 + u64 *vaddr;
2107 + enum resp_states ret;
2108 +- struct rxe_mem *mr = qp->resp.mr;
2109 ++ struct rxe_mr *mr = qp->resp.mr;
2110 +
2111 +- if (mr->state != RXE_MEM_STATE_VALID) {
2112 ++ if (mr->state != RXE_MR_STATE_VALID) {
2113 + ret = RESPST_ERR_RKEY_VIOLATION;
2114 + goto out;
2115 + }
2116 +@@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
2117 + if (!skb)
2118 + return RESPST_ERR_RNR;
2119 +
2120 +- err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
2121 +- payload, from_mem_obj, &icrc);
2122 ++ err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
2123 ++ payload, from_mr_obj, &icrc);
2124 + if (err)
2125 + pr_err("Failed copying memory\n");
2126 +
2127 +@@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
2128 + }
2129 +
2130 + if (pkt->mask & RXE_IETH_MASK) {
2131 +- struct rxe_mem *rmr;
2132 ++ struct rxe_mr *rmr;
2133 +
2134 + wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2135 + wc->ex.invalidate_rkey = ieth_rkey(pkt);
2136 +@@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
2137 + wc->ex.invalidate_rkey);
2138 + return RESPST_ERROR;
2139 + }
2140 +- rmr->state = RXE_MEM_STATE_FREE;
2141 ++ rmr->state = RXE_MR_STATE_FREE;
2142 + rxe_drop_ref(rmr);
2143 + }
2144 +
2145 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
2146 +index dee5e0e919d28..38249c1a76a88 100644
2147 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
2148 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
2149 +@@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
2150 + {
2151 + struct rxe_dev *rxe = to_rdev(ibpd->device);
2152 + struct rxe_pd *pd = to_rpd(ibpd);
2153 +- struct rxe_mem *mr;
2154 ++ struct rxe_mr *mr;
2155 +
2156 + mr = rxe_alloc(&rxe->mr_pool);
2157 + if (!mr)
2158 +@@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
2159 +
2160 + rxe_add_index(mr);
2161 + rxe_add_ref(pd);
2162 +- rxe_mem_init_dma(pd, access, mr);
2163 ++ rxe_mr_init_dma(pd, access, mr);
2164 +
2165 + return &mr->ibmr;
2166 + }
2167 +@@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
2168 + int err;
2169 + struct rxe_dev *rxe = to_rdev(ibpd->device);
2170 + struct rxe_pd *pd = to_rpd(ibpd);
2171 +- struct rxe_mem *mr;
2172 ++ struct rxe_mr *mr;
2173 +
2174 + mr = rxe_alloc(&rxe->mr_pool);
2175 + if (!mr) {
2176 +@@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
2177 +
2178 + rxe_add_ref(pd);
2179 +
2180 +- err = rxe_mem_init_user(pd, start, length, iova,
2181 +- access, udata, mr);
2182 ++ err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
2183 + if (err)
2184 + goto err3;
2185 +
2186 +@@ -916,9 +915,9 @@ err2:
2187 +
2188 + static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
2189 + {
2190 +- struct rxe_mem *mr = to_rmr(ibmr);
2191 ++ struct rxe_mr *mr = to_rmr(ibmr);
2192 +
2193 +- mr->state = RXE_MEM_STATE_ZOMBIE;
2194 ++ mr->state = RXE_MR_STATE_ZOMBIE;
2195 + rxe_drop_ref(mr_pd(mr));
2196 + rxe_drop_index(mr);
2197 + rxe_drop_ref(mr);
2198 +@@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2199 + {
2200 + struct rxe_dev *rxe = to_rdev(ibpd->device);
2201 + struct rxe_pd *pd = to_rpd(ibpd);
2202 +- struct rxe_mem *mr;
2203 ++ struct rxe_mr *mr;
2204 + int err;
2205 +
2206 + if (mr_type != IB_MR_TYPE_MEM_REG)
2207 +@@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2208 +
2209 + rxe_add_ref(pd);
2210 +
2211 +- err = rxe_mem_init_fast(pd, max_num_sg, mr);
2212 ++ err = rxe_mr_init_fast(pd, max_num_sg, mr);
2213 + if (err)
2214 + goto err2;
2215 +
2216 +@@ -962,7 +961,7 @@ err1:
2217 +
2218 + static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
2219 + {
2220 +- struct rxe_mem *mr = to_rmr(ibmr);
2221 ++ struct rxe_mr *mr = to_rmr(ibmr);
2222 + struct rxe_map *map;
2223 + struct rxe_phys_buf *buf;
2224 +
2225 +@@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
2226 + static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2227 + int sg_nents, unsigned int *sg_offset)
2228 + {
2229 +- struct rxe_mem *mr = to_rmr(ibmr);
2230 ++ struct rxe_mr *mr = to_rmr(ibmr);
2231 + int n;
2232 +
2233 + mr->nbuf = 0;
2234 +@@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
2235 + INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
2236 + INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
2237 + INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
2238 ++ INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
2239 + };
2240 +
2241 + int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
2242 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
2243 +index 79e0a5a878da3..11eba7a3ba8f4 100644
2244 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
2245 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
2246 +@@ -156,7 +156,7 @@ struct resp_res {
2247 + struct sk_buff *skb;
2248 + } atomic;
2249 + struct {
2250 +- struct rxe_mem *mr;
2251 ++ struct rxe_mr *mr;
2252 + u64 va_org;
2253 + u32 rkey;
2254 + u32 length;
2255 +@@ -183,7 +183,7 @@ struct rxe_resp_info {
2256 +
2257 + /* RDMA read / atomic only */
2258 + u64 va;
2259 +- struct rxe_mem *mr;
2260 ++ struct rxe_mr *mr;
2261 + u32 resid;
2262 + u32 rkey;
2263 + u32 length;
2264 +@@ -262,18 +262,18 @@ struct rxe_qp {
2265 + struct execute_work cleanup_work;
2266 + };
2267 +
2268 +-enum rxe_mem_state {
2269 +- RXE_MEM_STATE_ZOMBIE,
2270 +- RXE_MEM_STATE_INVALID,
2271 +- RXE_MEM_STATE_FREE,
2272 +- RXE_MEM_STATE_VALID,
2273 ++enum rxe_mr_state {
2274 ++ RXE_MR_STATE_ZOMBIE,
2275 ++ RXE_MR_STATE_INVALID,
2276 ++ RXE_MR_STATE_FREE,
2277 ++ RXE_MR_STATE_VALID,
2278 + };
2279 +
2280 +-enum rxe_mem_type {
2281 +- RXE_MEM_TYPE_NONE,
2282 +- RXE_MEM_TYPE_DMA,
2283 +- RXE_MEM_TYPE_MR,
2284 +- RXE_MEM_TYPE_MW,
2285 ++enum rxe_mr_type {
2286 ++ RXE_MR_TYPE_NONE,
2287 ++ RXE_MR_TYPE_DMA,
2288 ++ RXE_MR_TYPE_MR,
2289 ++ RXE_MR_TYPE_MW,
2290 + };
2291 +
2292 + #define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
2293 +@@ -287,17 +287,14 @@ struct rxe_map {
2294 + struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
2295 + };
2296 +
2297 +-struct rxe_mem {
2298 ++struct rxe_mr {
2299 + struct rxe_pool_entry pelem;
2300 +- union {
2301 +- struct ib_mr ibmr;
2302 +- struct ib_mw ibmw;
2303 +- };
2304 ++ struct ib_mr ibmr;
2305 +
2306 + struct ib_umem *umem;
2307 +
2308 +- enum rxe_mem_state state;
2309 +- enum rxe_mem_type type;
2310 ++ enum rxe_mr_state state;
2311 ++ enum rxe_mr_type type;
2312 + u64 va;
2313 + u64 iova;
2314 + size_t length;
2315 +@@ -318,6 +315,17 @@ struct rxe_mem {
2316 + struct rxe_map **map;
2317 + };
2318 +
2319 ++enum rxe_mw_state {
2320 ++ RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
2321 ++ RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
2322 ++ RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
2323 ++};
2324 ++
2325 ++struct rxe_mw {
2326 ++ struct ib_mw ibmw;
2327 ++ struct rxe_pool_entry pelem;
2328 ++};
2329 ++
2330 + struct rxe_mc_grp {
2331 + struct rxe_pool_entry pelem;
2332 + spinlock_t mcg_lock; /* guard group */
2333 +@@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
2334 + return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
2335 + }
2336 +
2337 +-static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
2338 ++static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
2339 + {
2340 +- return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
2341 ++ return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
2342 + }
2343 +
2344 +-static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
2345 ++static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
2346 + {
2347 +- return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
2348 ++ return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
2349 + }
2350 +
2351 +-static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
2352 ++static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
2353 + {
2354 + return to_rpd(mr->ibmr.pd);
2355 + }
2356 +
2357 +-static inline u32 mr_lkey(struct rxe_mem *mr)
2358 ++static inline u32 mr_lkey(struct rxe_mr *mr)
2359 + {
2360 + return mr->ibmr.lkey;
2361 + }
2362 +
2363 +-static inline u32 mr_rkey(struct rxe_mem *mr)
2364 ++static inline u32 mr_rkey(struct rxe_mr *mr)
2365 + {
2366 + return mr->ibmr.rkey;
2367 + }
2368 +diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
2369 +index e389d44e5591d..8a00c06e5f56f 100644
2370 +--- a/drivers/infiniband/sw/siw/siw_verbs.c
2371 ++++ b/drivers/infiniband/sw/siw/siw_verbs.c
2372 +@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
2373 + struct siw_ucontext *uctx =
2374 + rdma_udata_to_drv_context(udata, struct siw_ucontext,
2375 + base_ucontext);
2376 +- struct siw_cq *scq = NULL, *rcq = NULL;
2377 + unsigned long flags;
2378 + int num_sqe, num_rqe, rv = 0;
2379 + size_t length;
2380 +@@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
2381 + rv = -EINVAL;
2382 + goto err_out;
2383 + }
2384 +- scq = to_siw_cq(attrs->send_cq);
2385 +- rcq = to_siw_cq(attrs->recv_cq);
2386 +
2387 +- if (!scq || (!rcq && !attrs->srq)) {
2388 ++ if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
2389 + siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
2390 + rv = -EINVAL;
2391 + goto err_out;
2392 +@@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
2393 + else {
2394 + /* Zero sized SQ is not supported */
2395 + rv = -EINVAL;
2396 +- goto err_out;
2397 ++ goto err_out_xa;
2398 + }
2399 + if (num_rqe)
2400 + num_rqe = roundup_pow_of_two(num_rqe);
2401 +@@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
2402 + }
2403 + }
2404 + qp->pd = pd;
2405 +- qp->scq = scq;
2406 +- qp->rcq = rcq;
2407 ++ qp->scq = to_siw_cq(attrs->send_cq);
2408 ++ qp->rcq = to_siw_cq(attrs->recv_cq);
2409 +
2410 + if (attrs->srq) {
2411 + /*
2412 +diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
2413 +index fc433e63b1dc0..b1590cb4a1887 100644
2414 +--- a/drivers/leds/leds-lp5523.c
2415 ++++ b/drivers/leds/leds-lp5523.c
2416 +@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
2417 + usleep_range(3000, 6000);
2418 + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
2419 + if (ret)
2420 +- return ret;
2421 ++ goto out;
2422 + status &= LP5523_ENG_STATUS_MASK;
2423 +
2424 + if (status != LP5523_ENG_STATUS_MASK) {
2425 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
2426 +index 11890db71f3fe..962f7df0691ef 100644
2427 +--- a/drivers/md/dm-snap.c
2428 ++++ b/drivers/md/dm-snap.c
2429 +@@ -1408,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2430 +
2431 + if (!s->store->chunk_size) {
2432 + ti->error = "Chunk size not set";
2433 ++ r = -EINVAL;
2434 + goto bad_read_metadata;
2435 + }
2436 +
2437 +diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
2438 +index 83bd9a412a560..1e3b68a8743af 100644
2439 +--- a/drivers/media/platform/rcar_drif.c
2440 ++++ b/drivers/media/platform/rcar_drif.c
2441 +@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
2442 + {
2443 + struct rcar_drif_sdr *sdr = video_drvdata(file);
2444 +
2445 +- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
2446 + f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
2447 + f->fmt.sdr.buffersize = sdr->fmt->buffersize;
2448 +
2449 +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
2450 +index 926408b41270c..7a6f01ace78ac 100644
2451 +--- a/drivers/misc/eeprom/at24.c
2452 ++++ b/drivers/misc/eeprom/at24.c
2453 +@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
2454 + at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
2455 + if (IS_ERR(at24->nvmem)) {
2456 + pm_runtime_disable(dev);
2457 +- regulator_disable(at24->vcc_reg);
2458 ++ if (!pm_runtime_status_suspended(dev))
2459 ++ regulator_disable(at24->vcc_reg);
2460 + return PTR_ERR(at24->nvmem);
2461 + }
2462 +
2463 +@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
2464 + err = at24_read(at24, 0, &test_byte, 1);
2465 + if (err) {
2466 + pm_runtime_disable(dev);
2467 +- regulator_disable(at24->vcc_reg);
2468 ++ if (!pm_runtime_status_suspended(dev))
2469 ++ regulator_disable(at24->vcc_reg);
2470 + return -ENODEV;
2471 + }
2472 +
2473 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
2474 +index 9152242778f5e..ecdedd87f8ccf 100644
2475 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
2476 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
2477 +@@ -5546,6 +5546,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
2478 + struct hl_cs_job *job;
2479 + u32 cb_size, ctl, err_cause;
2480 + struct hl_cb *cb;
2481 ++ u64 id;
2482 + int rc;
2483 +
2484 + cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
2485 +@@ -5612,8 +5613,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
2486 + }
2487 +
2488 + release_cb:
2489 ++ id = cb->id;
2490 + hl_cb_put(cb);
2491 +- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
2492 ++ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
2493 +
2494 + return rc;
2495 + }
2496 +diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
2497 +index 2bdf560ee681b..0f9ea75b0b189 100644
2498 +--- a/drivers/misc/ics932s401.c
2499 ++++ b/drivers/misc/ics932s401.c
2500 +@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
2501 + for (i = 0; i < NUM_MIRRORED_REGS; i++) {
2502 + temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
2503 + if (temp < 0)
2504 +- data->regs[regs_to_copy[i]] = 0;
2505 ++ temp = 0;
2506 + data->regs[regs_to_copy[i]] = temp >> 8;
2507 + }
2508 +
2509 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
2510 +index b8b771b643cc8..016a6106151a5 100644
2511 +--- a/drivers/mmc/host/meson-gx-mmc.c
2512 ++++ b/drivers/mmc/host/meson-gx-mmc.c
2513 +@@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
2514 + if (host->dram_access_quirk)
2515 + return;
2516 +
2517 +- if (data->blocks > 1) {
2518 ++ /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
2519 ++ if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
2520 + /*
2521 + * In block mode DMA descriptor format, "length" field indicates
2522 + * number of blocks and there is no way to pass DMA size that
2523 +@@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
2524 + for_each_sg(data->sg, sg, data->sg_len, i) {
2525 + /* check for 8 byte alignment */
2526 + if (sg->offset % 8) {
2527 +- WARN_ONCE(1, "unaligned scatterlist buffer\n");
2528 ++ dev_warn_once(mmc_dev(mmc),
2529 ++ "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
2530 ++ sg->offset);
2531 + return;
2532 + }
2533 + }
2534 +diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
2535 +index 4a0f69b97a78f..7572119225068 100644
2536 +--- a/drivers/mmc/host/sdhci-pci-gli.c
2537 ++++ b/drivers/mmc/host/sdhci-pci-gli.c
2538 +@@ -587,8 +587,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
2539 + *
2540 + * Wait 5ms after set 1.8V signal enable in Host Control 2 register
2541 + * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
2542 ++ *
2543 ++ * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
2544 ++ * slightly longer than 5ms before the control register reports that
2545 ++ * 1.8V is ready, and far longer still before the card will actually
2546 ++ * work reliably.
2547 + */
2548 +- usleep_range(5000, 5500);
2549 ++ usleep_range(100000, 110000);
2550 + }
2551 +
2552 + static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
2553 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
2554 +index d8a3ecaed3fc6..d8f0863b39342 100644
2555 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
2556 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
2557 +@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
2558 + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
2559 + skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
2560 + if (!skb)
2561 +- break;
2562 ++ goto error;
2563 + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
2564 + skb_put(skb, QLCNIC_ILB_PKT_SIZE);
2565 + adapter->ahw->diag_cnt = 0;
2566 +@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
2567 + cnt++;
2568 + }
2569 + if (cnt != i) {
2570 ++error:
2571 + dev_err(&adapter->pdev->dev,
2572 + "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
2573 + if (mode != QLCNIC_ILB_MODE)
2574 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
2575 +index 0e1ca2cba3c7c..e18dee7fe6876 100644
2576 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
2577 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
2578 +@@ -30,7 +30,7 @@ struct sunxi_priv_data {
2579 + static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
2580 + {
2581 + struct sunxi_priv_data *gmac = priv;
2582 +- int ret;
2583 ++ int ret = 0;
2584 +
2585 + if (gmac->regulator) {
2586 + ret = regulator_enable(gmac->regulator);
2587 +@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
2588 + } else {
2589 + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
2590 + ret = clk_prepare(gmac->tx_clk);
2591 +- if (ret)
2592 +- return ret;
2593 ++ if (ret && gmac->regulator)
2594 ++ regulator_disable(gmac->regulator);
2595 + }
2596 +
2597 +- return 0;
2598 ++ return ret;
2599 + }
2600 +
2601 + static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
2602 +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
2603 +index 707ccdd03b19e..74e748662ec01 100644
2604 +--- a/drivers/net/ethernet/sun/niu.c
2605 ++++ b/drivers/net/ethernet/sun/niu.c
2606 +@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
2607 + "VPD_SCAN: Reading in property [%s] len[%d]\n",
2608 + namebuf, prop_len);
2609 + for (i = 0; i < prop_len; i++) {
2610 +- err = niu_pci_eeprom_read(np, off + i);
2611 +- if (err >= 0)
2612 +- *prop_buf = err;
2613 +- ++prop_buf;
2614 ++ err = niu_pci_eeprom_read(np, off + i);
2615 ++ if (err < 0)
2616 ++ return err;
2617 ++ *prop_buf++ = err;
2618 + }
2619 + }
2620 +
2621 +@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
2622 + }
2623 +
2624 + /* ESPC_PIO_EN_ENABLE must be set */
2625 +-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
2626 ++static int niu_pci_vpd_fetch(struct niu *np, u32 start)
2627 + {
2628 + u32 offset;
2629 + int err;
2630 +
2631 + err = niu_pci_eeprom_read16_swp(np, start + 1);
2632 + if (err < 0)
2633 +- return;
2634 ++ return err;
2635 +
2636 + offset = err + 3;
2637 +
2638 +@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
2639 + u32 end;
2640 +
2641 + err = niu_pci_eeprom_read(np, here);
2642 ++ if (err < 0)
2643 ++ return err;
2644 + if (err != 0x90)
2645 +- return;
2646 ++ return -EINVAL;
2647 +
2648 + err = niu_pci_eeprom_read16_swp(np, here + 1);
2649 + if (err < 0)
2650 +- return;
2651 ++ return err;
2652 +
2653 + here = start + offset + 3;
2654 + end = start + offset + err;
2655 +@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
2656 + offset += err;
2657 +
2658 + err = niu_pci_vpd_scan_props(np, here, end);
2659 +- if (err < 0 || err == 1)
2660 +- return;
2661 ++ if (err < 0)
2662 ++ return err;
2663 ++ if (err == 1)
2664 ++ return -EINVAL;
2665 + }
2666 ++ return 0;
2667 + }
2668 +
2669 + /* ESPC_PIO_EN_ENABLE must be set */
2670 +@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
2671 + offset = niu_pci_vpd_offset(np);
2672 + netif_printk(np, probe, KERN_DEBUG, np->dev,
2673 + "%s() VPD offset [%08x]\n", __func__, offset);
2674 +- if (offset)
2675 +- niu_pci_vpd_fetch(np, offset);
2676 ++ if (offset) {
2677 ++ err = niu_pci_vpd_fetch(np, offset);
2678 ++ if (err < 0)
2679 ++ return err;
2680 ++ }
2681 + nw64(ESPC_PIO_EN, 0);
2682 +
2683 + if (np->flags & NIU_FLAGS_VPD_VALID) {
2684 +diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
2685 +index 6e8bd99e8911d..1866f6c2acab1 100644
2686 +--- a/drivers/net/wireless/realtek/rtlwifi/base.c
2687 ++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
2688 +@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
2689 + static void rtl_fwevt_wq_callback(struct work_struct *work);
2690 + static void rtl_c2hcmd_wq_callback(struct work_struct *work);
2691 +
2692 +-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
2693 ++static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
2694 + {
2695 + struct rtl_priv *rtlpriv = rtl_priv(hw);
2696 ++ struct workqueue_struct *wq;
2697 ++
2698 ++ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
2699 ++ if (!wq)
2700 ++ return -ENOMEM;
2701 +
2702 + /* <1> timer */
2703 + timer_setup(&rtlpriv->works.watchdog_timer,
2704 +@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
2705 + rtl_easy_concurrent_retrytimer_callback, 0);
2706 + /* <2> work queue */
2707 + rtlpriv->works.hw = hw;
2708 +- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
2709 +- if (unlikely(!rtlpriv->works.rtl_wq)) {
2710 +- pr_err("Failed to allocate work queue\n");
2711 +- return;
2712 +- }
2713 ++ rtlpriv->works.rtl_wq = wq;
2714 +
2715 + INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
2716 + rtl_watchdog_wq_callback);
2717 +@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
2718 + rtl_swlps_rfon_wq_callback);
2719 + INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
2720 + INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
2721 ++ return 0;
2722 + }
2723 +
2724 + void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
2725 +@@ -565,9 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
2726 + rtlmac->link_state = MAC80211_NOLINK;
2727 +
2728 + /* <6> init deferred work */
2729 +- _rtl_init_deferred_work(hw);
2730 +-
2731 +- return 0;
2732 ++ return _rtl_init_deferred_work(hw);
2733 + }
2734 + EXPORT_SYMBOL_GPL(rtl_init_core);
2735 +
2736 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2737 +index d5d7e0cdd78d8..091b2e77d39ba 100644
2738 +--- a/drivers/nvme/host/core.c
2739 ++++ b/drivers/nvme/host/core.c
2740 +@@ -3190,7 +3190,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2741 + ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2742 + }
2743 +
2744 +- ret = nvme_mpath_init(ctrl, id);
2745 ++ ret = nvme_mpath_init_identify(ctrl, id);
2746 + kfree(id);
2747 +
2748 + if (ret < 0)
2749 +@@ -4580,6 +4580,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
2750 + min(default_ps_max_latency_us, (unsigned long)S32_MAX));
2751 +
2752 + nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
2753 ++ nvme_mpath_init_ctrl(ctrl);
2754 +
2755 + return 0;
2756 + out_free_name:
2757 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
2758 +index 6ffa8de2a0d77..5eee603bc2493 100644
2759 +--- a/drivers/nvme/host/fc.c
2760 ++++ b/drivers/nvme/host/fc.c
2761 +@@ -2460,6 +2460,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2762 + static void
2763 + __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2764 + {
2765 ++ int q;
2766 ++
2767 ++ /*
2768 ++ * if aborting io, the queues are no longer good, mark them
2769 ++ * all as not live.
2770 ++ */
2771 ++ if (ctrl->ctrl.queue_count > 1) {
2772 ++ for (q = 1; q < ctrl->ctrl.queue_count; q++)
2773 ++ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2774 ++ }
2775 ++ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2776 ++
2777 + /*
2778 + * If io queues are present, stop them and terminate all outstanding
2779 + * ios on them. As FC allocates FC exchange for each io, the
2780 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2781 +index ec1e454848e58..56852e6edd81a 100644
2782 +--- a/drivers/nvme/host/multipath.c
2783 ++++ b/drivers/nvme/host/multipath.c
2784 +@@ -709,9 +709,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
2785 + put_disk(head->disk);
2786 + }
2787 +
2788 +-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2789 ++void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
2790 + {
2791 +- int error;
2792 ++ mutex_init(&ctrl->ana_lock);
2793 ++ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
2794 ++ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
2795 ++}
2796 ++
2797 ++int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2798 ++{
2799 ++ size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
2800 ++ size_t ana_log_size;
2801 ++ int error = 0;
2802 +
2803 + /* check if multipath is enabled and we have the capability */
2804 + if (!multipath || !ctrl->subsys ||
2805 +@@ -723,37 +732,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2806 + ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
2807 + ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
2808 +
2809 +- mutex_init(&ctrl->ana_lock);
2810 +- timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
2811 +- ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
2812 +- ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
2813 +- ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
2814 +-
2815 +- if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
2816 ++ ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
2817 ++ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
2818 ++ ctrl->max_namespaces * sizeof(__le32);
2819 ++ if (ana_log_size > max_transfer_size) {
2820 + dev_err(ctrl->device,
2821 +- "ANA log page size (%zd) larger than MDTS (%d).\n",
2822 +- ctrl->ana_log_size,
2823 +- ctrl->max_hw_sectors << SECTOR_SHIFT);
2824 ++ "ANA log page size (%zd) larger than MDTS (%zd).\n",
2825 ++ ana_log_size, max_transfer_size);
2826 + dev_err(ctrl->device, "disabling ANA support.\n");
2827 +- return 0;
2828 ++ goto out_uninit;
2829 + }
2830 +-
2831 +- INIT_WORK(&ctrl->ana_work, nvme_ana_work);
2832 +- kfree(ctrl->ana_log_buf);
2833 +- ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
2834 +- if (!ctrl->ana_log_buf) {
2835 +- error = -ENOMEM;
2836 +- goto out;
2837 ++ if (ana_log_size > ctrl->ana_log_size) {
2838 ++ nvme_mpath_stop(ctrl);
2839 ++ kfree(ctrl->ana_log_buf);
2840 ++ ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
2841 ++ if (!ctrl->ana_log_buf)
2842 ++ return -ENOMEM;
2843 + }
2844 +-
2845 ++ ctrl->ana_log_size = ana_log_size;
2846 + error = nvme_read_ana_log(ctrl);
2847 + if (error)
2848 +- goto out_free_ana_log_buf;
2849 ++ goto out_uninit;
2850 + return 0;
2851 +-out_free_ana_log_buf:
2852 +- kfree(ctrl->ana_log_buf);
2853 +- ctrl->ana_log_buf = NULL;
2854 +-out:
2855 ++
2856 ++out_uninit:
2857 ++ nvme_mpath_uninit(ctrl);
2858 + return error;
2859 + }
2860 +
2861 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2862 +index 07b34175c6ce6..447b0720aef5e 100644
2863 +--- a/drivers/nvme/host/nvme.h
2864 ++++ b/drivers/nvme/host/nvme.h
2865 +@@ -668,7 +668,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
2866 + int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
2867 + void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
2868 + void nvme_mpath_remove_disk(struct nvme_ns_head *head);
2869 +-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
2870 ++int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
2871 ++void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
2872 + void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
2873 + void nvme_mpath_stop(struct nvme_ctrl *ctrl);
2874 + bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
2875 +@@ -742,7 +743,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
2876 + static inline void nvme_trace_bio_complete(struct request *req)
2877 + {
2878 + }
2879 +-static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
2880 ++static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
2881 ++{
2882 ++}
2883 ++static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
2884 + struct nvme_id_ctrl *id)
2885 + {
2886 + if (ctrl->subsys->cmic & (1 << 3))
2887 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
2888 +index d7d7c81d07014..8c2ae6284c3b2 100644
2889 +--- a/drivers/nvme/host/tcp.c
2890 ++++ b/drivers/nvme/host/tcp.c
2891 +@@ -940,7 +940,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
2892 + if (ret <= 0)
2893 + return ret;
2894 +
2895 +- nvme_tcp_advance_req(req, ret);
2896 + if (queue->data_digest)
2897 + nvme_tcp_ddgst_update(queue->snd_hash, page,
2898 + offset, ret);
2899 +@@ -957,6 +956,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
2900 + }
2901 + return 1;
2902 + }
2903 ++ nvme_tcp_advance_req(req, ret);
2904 + }
2905 + return -EAGAIN;
2906 + }
2907 +@@ -1137,7 +1137,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
2908 + pending = true;
2909 + else if (unlikely(result < 0))
2910 + break;
2911 +- }
2912 ++ } else
2913 ++ pending = !llist_empty(&queue->req_list);
2914 +
2915 + result = nvme_tcp_try_recv(queue);
2916 + if (result > 0)
2917 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
2918 +index a027433b8be84..348057fdc568f 100644
2919 +--- a/drivers/nvme/target/core.c
2920 ++++ b/drivers/nvme/target/core.c
2921 +@@ -1371,7 +1371,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
2922 + goto out_free_changed_ns_list;
2923 +
2924 + if (subsys->cntlid_min > subsys->cntlid_max)
2925 +- goto out_free_changed_ns_list;
2926 ++ goto out_free_sqs;
2927 +
2928 + ret = ida_simple_get(&cntlid_ida,
2929 + subsys->cntlid_min, subsys->cntlid_max,
2930 +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
2931 +index 715d4376c9979..7fdbdc496597d 100644
2932 +--- a/drivers/nvme/target/io-cmd-file.c
2933 ++++ b/drivers/nvme/target/io-cmd-file.c
2934 +@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
2935 +
2936 + ns->file = filp_open(ns->device_path, flags, 0);
2937 + if (IS_ERR(ns->file)) {
2938 +- pr_err("failed to open file %s: (%ld)\n",
2939 +- ns->device_path, PTR_ERR(ns->file));
2940 +- return PTR_ERR(ns->file);
2941 ++ ret = PTR_ERR(ns->file);
2942 ++ pr_err("failed to open file %s: (%d)\n",
2943 ++ ns->device_path, ret);
2944 ++ ns->file = NULL;
2945 ++ return ret;
2946 + }
2947 +
2948 + ret = nvmet_file_ns_revalidate(ns);
2949 +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
2950 +index 3e189e753bcf5..14913a4588ecc 100644
2951 +--- a/drivers/nvme/target/loop.c
2952 ++++ b/drivers/nvme/target/loop.c
2953 +@@ -588,8 +588,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
2954 +
2955 + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
2956 + 0 /* no quirks, we're perfect! */);
2957 +- if (ret)
2958 ++ if (ret) {
2959 ++ kfree(ctrl);
2960 + goto out;
2961 ++ }
2962 +
2963 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2964 + WARN_ON_ONCE(1);
2965 +diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
2966 +index bbc4e71a16ff8..38800e86ed8ad 100644
2967 +--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
2968 ++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
2969 +@@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
2970 + if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
2971 + return NULL;
2972 +
2973 ++ /* Make sure 'avail->idx' is visible already. */
2974 ++ virtio_rmb(false);
2975 ++
2976 + idx = vring->next_avail % vr->num;
2977 + head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
2978 + if (WARN_ON(head >= vr->num))
2979 +@@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
2980 + * done or not. Add a memory barrier here to make sure the update above
2981 + * completes before updating the idx.
2982 + */
2983 +- mb();
2984 ++ virtio_mb(false);
2985 + vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
2986 + }
2987 +
2988 +@@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
2989 + desc = NULL;
2990 + fifo->vring[is_rx] = NULL;
2991 +
2992 ++ /*
2993 ++ * Make sure the load/store are in order before
2994 ++ * returning back to virtio.
2995 ++ */
2996 ++ virtio_mb(false);
2997 ++
2998 + /* Notify upper layer that packet is done. */
2999 + spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
3000 + vring_interrupt(0, vring->vq);
3001 +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
3002 +index 461ec61530ebf..205a096e9ceee 100644
3003 +--- a/drivers/platform/x86/Kconfig
3004 ++++ b/drivers/platform/x86/Kconfig
3005 +@@ -688,7 +688,7 @@ config INTEL_HID_EVENT
3006 +
3007 + config INTEL_INT0002_VGPIO
3008 + tristate "Intel ACPI INT0002 Virtual GPIO driver"
3009 +- depends on GPIOLIB && ACPI
3010 ++ depends on GPIOLIB && ACPI && PM_SLEEP
3011 + select GPIOLIB_IRQCHIP
3012 + help
3013 + Some peripherals on Bay Trail and Cherry Trail platforms signal a
3014 +diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
3015 +index 27a298b7c541b..c97bd4a452422 100644
3016 +--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
3017 ++++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
3018 +@@ -271,7 +271,8 @@ int init_dell_smbios_wmi(void)
3019 +
3020 + void exit_dell_smbios_wmi(void)
3021 + {
3022 +- wmi_driver_unregister(&dell_smbios_wmi_driver);
3023 ++ if (wmi_supported)
3024 ++ wmi_driver_unregister(&dell_smbios_wmi_driver);
3025 + }
3026 +
3027 + MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
3028 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
3029 +index 6cb5ad4be231d..3878172909219 100644
3030 +--- a/drivers/platform/x86/ideapad-laptop.c
3031 ++++ b/drivers/platform/x86/ideapad-laptop.c
3032 +@@ -57,8 +57,8 @@ enum {
3033 + };
3034 +
3035 + enum {
3036 +- SMBC_CONSERVATION_ON = 3,
3037 +- SMBC_CONSERVATION_OFF = 5,
3038 ++ SBMC_CONSERVATION_ON = 3,
3039 ++ SBMC_CONSERVATION_OFF = 5,
3040 + };
3041 +
3042 + enum {
3043 +@@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
3044 + return eval_int(handle, "GBMD", res);
3045 + }
3046 +
3047 +-static int exec_smbc(acpi_handle handle, unsigned long arg)
3048 ++static int exec_sbmc(acpi_handle handle, unsigned long arg)
3049 + {
3050 +- return exec_simple_method(handle, "SMBC", arg);
3051 ++ return exec_simple_method(handle, "SBMC", arg);
3052 + }
3053 +
3054 + static int eval_hals(acpi_handle handle, unsigned long *res)
3055 +@@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
3056 + if (err)
3057 + return err;
3058 +
3059 +- err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
3060 ++ err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
3061 + if (err)
3062 + return err;
3063 +
3064 +@@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
3065 + {
3066 + struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
3067 + struct ideapad_private *priv = dytc->priv;
3068 ++ unsigned long output;
3069 + int err;
3070 +
3071 + err = mutex_lock_interruptible(&dytc->mutex);
3072 +@@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
3073 +
3074 + /* Determine if we are in CQL mode. This alters the commands we do */
3075 + err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
3076 +- NULL);
3077 ++ &output);
3078 + if (err)
3079 + goto unlock;
3080 + }
3081 +diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
3082 +index 289c6655d425d..569342aa8926e 100644
3083 +--- a/drivers/platform/x86/intel_int0002_vgpio.c
3084 ++++ b/drivers/platform/x86/intel_int0002_vgpio.c
3085 +@@ -51,6 +51,12 @@
3086 + #define GPE0A_STS_PORT 0x420
3087 + #define GPE0A_EN_PORT 0x428
3088 +
3089 ++struct int0002_data {
3090 ++ struct gpio_chip chip;
3091 ++ int parent_irq;
3092 ++ int wake_enable_count;
3093 ++};
3094 ++
3095 + /*
3096 + * As this is not a real GPIO at all, but just a hack to model an event in
3097 + * ACPI the get / set functions are dummy functions.
3098 +@@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
3099 + static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
3100 + {
3101 + struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
3102 +- struct platform_device *pdev = to_platform_device(chip->parent);
3103 +- int irq = platform_get_irq(pdev, 0);
3104 ++ struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
3105 +
3106 +- /* Propagate to parent irq */
3107 ++ /*
3108 ++ * Applying of the wakeup flag to our parent IRQ is delayed till system
3109 ++ * suspend, because we only want to do this when using s2idle.
3110 ++ */
3111 + if (on)
3112 +- enable_irq_wake(irq);
3113 ++ int0002->wake_enable_count++;
3114 + else
3115 +- disable_irq_wake(irq);
3116 ++ int0002->wake_enable_count--;
3117 +
3118 + return 0;
3119 + }
3120 +@@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
3121 + return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
3122 + }
3123 +
3124 +-static struct irq_chip int0002_byt_irqchip = {
3125 ++static struct irq_chip int0002_irqchip = {
3126 + .name = DRV_NAME,
3127 + .irq_ack = int0002_irq_ack,
3128 + .irq_mask = int0002_irq_mask,
3129 +@@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
3130 + .irq_set_wake = int0002_irq_set_wake,
3131 + };
3132 +
3133 +-static struct irq_chip int0002_cht_irqchip = {
3134 +- .name = DRV_NAME,
3135 +- .irq_ack = int0002_irq_ack,
3136 +- .irq_mask = int0002_irq_mask,
3137 +- .irq_unmask = int0002_irq_unmask,
3138 +- /*
3139 +- * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
3140 +- * and we don't want to mess with the ACPI SCI irq settings.
3141 +- */
3142 +- .flags = IRQCHIP_SKIP_SET_WAKE,
3143 +-};
3144 +-
3145 + static const struct x86_cpu_id int0002_cpu_ids[] = {
3146 +- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip),
3147 +- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip),
3148 ++ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
3149 ++ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
3150 + {}
3151 + };
3152 +
3153 +@@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
3154 + {
3155 + struct device *dev = &pdev->dev;
3156 + const struct x86_cpu_id *cpu_id;
3157 +- struct gpio_chip *chip;
3158 ++ struct int0002_data *int0002;
3159 + struct gpio_irq_chip *girq;
3160 ++ struct gpio_chip *chip;
3161 + int irq, ret;
3162 +
3163 + /* Menlow has a different INT0002 device? <sigh> */
3164 +@@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
3165 + if (irq < 0)
3166 + return irq;
3167 +
3168 +- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
3169 +- if (!chip)
3170 ++ int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
3171 ++ if (!int0002)
3172 + return -ENOMEM;
3173 +
3174 ++ int0002->parent_irq = irq;
3175 ++
3176 ++ chip = &int0002->chip;
3177 + chip->label = DRV_NAME;
3178 + chip->parent = dev;
3179 + chip->owner = THIS_MODULE;
3180 +@@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
3181 + }
3182 +
3183 + girq = &chip->irq;
3184 +- girq->chip = (struct irq_chip *)cpu_id->driver_data;
3185 ++ girq->chip = &int0002_irqchip;
3186 + /* This let us handle the parent IRQ in the driver */
3187 + girq->parent_handler = NULL;
3188 + girq->num_parents = 0;
3189 +@@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
3190 +
3191 + acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
3192 + device_init_wakeup(dev, true);
3193 ++ dev_set_drvdata(dev, int0002);
3194 + return 0;
3195 + }
3196 +
3197 +@@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
3198 + return 0;
3199 + }
3200 +
3201 ++static int int0002_suspend(struct device *dev)
3202 ++{
3203 ++ struct int0002_data *int0002 = dev_get_drvdata(dev);
3204 ++
3205 ++ /*
3206 ++ * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
3207 ++ * muck with it when firmware based suspend is used, otherwise we may
3208 ++ * cause spurious wakeups from firmware managed suspend.
3209 ++ */
3210 ++ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
3211 ++ enable_irq_wake(int0002->parent_irq);
3212 ++
3213 ++ return 0;
3214 ++}
3215 ++
3216 ++static int int0002_resume(struct device *dev)
3217 ++{
3218 ++ struct int0002_data *int0002 = dev_get_drvdata(dev);
3219 ++
3220 ++ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
3221 ++ disable_irq_wake(int0002->parent_irq);
3222 ++
3223 ++ return 0;
3224 ++}
3225 ++
3226 ++static const struct dev_pm_ops int0002_pm_ops = {
3227 ++ .suspend = int0002_suspend,
3228 ++ .resume = int0002_resume,
3229 ++};
3230 ++
3231 + static const struct acpi_device_id int0002_acpi_ids[] = {
3232 + { "INT0002", 0 },
3233 + { },
3234 +@@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
3235 + .driver = {
3236 + .name = DRV_NAME,
3237 + .acpi_match_table = int0002_acpi_ids,
3238 ++ .pm = &int0002_pm_ops,
3239 + },
3240 + .probe = int0002_probe,
3241 + .remove = int0002_remove,
3242 +diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
3243 +index 50ec53d67a4c0..db4c265287ae6 100644
3244 +--- a/drivers/rapidio/rio_cm.c
3245 ++++ b/drivers/rapidio/rio_cm.c
3246 +@@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
3247 + return -ENODEV;
3248 + }
3249 +
3250 ++ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
3251 ++ if (!cm->rx_wq) {
3252 ++ rio_release_inb_mbox(mport, cmbox);
3253 ++ rio_release_outb_mbox(mport, cmbox);
3254 ++ kfree(cm);
3255 ++ return -ENOMEM;
3256 ++ }
3257 ++
3258 + /*
3259 + * Allocate and register inbound messaging buffers to be ready
3260 + * to receive channel and system management requests
3261 +@@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
3262 + cm->rx_slots = RIOCM_RX_RING_SIZE;
3263 + mutex_init(&cm->rx_lock);
3264 + riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
3265 +- cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
3266 +- if (!cm->rx_wq) {
3267 +- riocm_error("failed to allocate IBMBOX_%d on %s",
3268 +- cmbox, mport->name);
3269 +- rio_release_outb_mbox(mport, cmbox);
3270 +- kfree(cm);
3271 +- return -ENOMEM;
3272 +- }
3273 +-
3274 + INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
3275 +
3276 + cm->tx_slot = 0;
3277 +diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
3278 +index aef6c1ee8bb0e..82becae142299 100644
3279 +--- a/drivers/rtc/rtc-pcf85063.c
3280 ++++ b/drivers/rtc/rtc-pcf85063.c
3281 +@@ -478,6 +478,7 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
3282 + {
3283 + struct clk *clk;
3284 + struct clk_init_data init;
3285 ++ struct device_node *node = pcf85063->rtc->dev.parent->of_node;
3286 +
3287 + init.name = "pcf85063-clkout";
3288 + init.ops = &pcf85063_clkout_ops;
3289 +@@ -487,15 +488,13 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
3290 + pcf85063->clkout_hw.init = &init;
3291 +
3292 + /* optional override of the clockname */
3293 +- of_property_read_string(pcf85063->rtc->dev.of_node,
3294 +- "clock-output-names", &init.name);
3295 ++ of_property_read_string(node, "clock-output-names", &init.name);
3296 +
3297 + /* register the clock */
3298 + clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
3299 +
3300 + if (!IS_ERR(clk))
3301 +- of_clk_add_provider(pcf85063->rtc->dev.of_node,
3302 +- of_clk_src_simple_get, clk);
3303 ++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
3304 +
3305 + return clk;
3306 + }
3307 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
3308 +index cec27f2ef70d7..e5076f09d5ed4 100644
3309 +--- a/drivers/scsi/qedf/qedf_main.c
3310 ++++ b/drivers/scsi/qedf/qedf_main.c
3311 +@@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
3312 + if (linkmode_intersects(link->supported_caps, sup_caps))
3313 + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
3314 +
3315 +- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
3316 ++ if (lport->host && lport->host->shost_data)
3317 ++ fc_host_supported_speeds(lport->host) =
3318 ++ lport->link_supported_speeds;
3319 + }
3320 +
3321 + static void qedf_bw_update(void *dev)
3322 +diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
3323 +index 0677295957bc5..615e44af1ca60 100644
3324 +--- a/drivers/scsi/qla2xxx/qla_nx.c
3325 ++++ b/drivers/scsi/qla2xxx/qla_nx.c
3326 +@@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
3327 + return ret;
3328 + }
3329 +
3330 +- if (qla82xx_flash_set_write_enable(ha))
3331 ++ ret = qla82xx_flash_set_write_enable(ha);
3332 ++ if (ret < 0)
3333 + goto done_write;
3334 +
3335 + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
3336 +diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
3337 +index 0aa58131e7915..d0626773eb386 100644
3338 +--- a/drivers/scsi/ufs/ufs-hisi.c
3339 ++++ b/drivers/scsi/ufs/ufs-hisi.c
3340 +@@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
3341 + host->hba = hba;
3342 + ufshcd_set_variant(hba, host);
3343 +
3344 +- host->rst = devm_reset_control_get(dev, "rst");
3345 ++ host->rst = devm_reset_control_get(dev, "rst");
3346 + if (IS_ERR(host->rst)) {
3347 + dev_err(dev, "%s: failed to get reset control\n", __func__);
3348 +- return PTR_ERR(host->rst);
3349 ++ err = PTR_ERR(host->rst);
3350 ++ goto error;
3351 + }
3352 +
3353 + ufs_hisi_set_pm_lvl(hba);
3354 +
3355 + err = ufs_hisi_get_resource(host);
3356 +- if (err) {
3357 +- ufshcd_set_variant(hba, NULL);
3358 +- return err;
3359 +- }
3360 ++ if (err)
3361 ++ goto error;
3362 +
3363 + return 0;
3364 ++
3365 ++error:
3366 ++ ufshcd_set_variant(hba, NULL);
3367 ++ return err;
3368 + }
3369 +
3370 + static int ufs_hi3660_init(struct ufs_hba *hba)
3371 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3372 +index 0c71a159d08f1..e1e510882ff42 100644
3373 +--- a/drivers/scsi/ufs/ufshcd.c
3374 ++++ b/drivers/scsi/ufs/ufshcd.c
3375 +@@ -2849,7 +2849,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3376 + * ufshcd_exec_dev_cmd - API for sending device management requests
3377 + * @hba: UFS hba
3378 + * @cmd_type: specifies the type (NOP, Query...)
3379 +- * @timeout: time in seconds
3380 ++ * @timeout: timeout in milliseconds
3381 + *
3382 + * NOTE: Since there is only one available tag for device management commands,
3383 + * it is expected you hold the hba->dev_cmd.lock mutex.
3384 +@@ -2879,6 +2879,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3385 + }
3386 + tag = req->tag;
3387 + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
3388 ++ /* Set the timeout such that the SCSI error handler is not activated. */
3389 ++ req->timeout = msecs_to_jiffies(2 * timeout);
3390 ++ blk_mq_start_request(req);
3391 +
3392 + init_completion(&wait);
3393 + lrbp = &hba->lrb[tag];
3394 +diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
3395 +index 337c8d82f74eb..6d0f7062bb870 100644
3396 +--- a/drivers/tee/amdtee/amdtee_private.h
3397 ++++ b/drivers/tee/amdtee/amdtee_private.h
3398 +@@ -21,6 +21,7 @@
3399 + #define TEEC_SUCCESS 0x00000000
3400 + #define TEEC_ERROR_GENERIC 0xFFFF0000
3401 + #define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
3402 ++#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
3403 + #define TEEC_ERROR_COMMUNICATION 0xFFFF000E
3404 +
3405 + #define TEEC_ORIGIN_COMMS 0x00000002
3406 +@@ -93,6 +94,18 @@ struct amdtee_shm_data {
3407 + u32 buf_id;
3408 + };
3409 +
3410 ++/**
3411 ++ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
3412 ++ * Processor
3413 ++ * @ta_handle: Handle to TA loaded in TEE
3414 ++ * @refcount: Reference count for the loaded TA
3415 ++ */
3416 ++struct amdtee_ta_data {
3417 ++ struct list_head list_node;
3418 ++ u32 ta_handle;
3419 ++ u32 refcount;
3420 ++};
3421 ++
3422 + #define LOWER_TWO_BYTE_MASK 0x0000FFFF
3423 +
3424 + /**
3425 +diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
3426 +index 096dd4d92d39c..07f36ac834c88 100644
3427 +--- a/drivers/tee/amdtee/call.c
3428 ++++ b/drivers/tee/amdtee/call.c
3429 +@@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
3430 + return ret;
3431 + }
3432 +
3433 ++static DEFINE_MUTEX(ta_refcount_mutex);
3434 ++static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
3435 ++
3436 ++static u32 get_ta_refcount(u32 ta_handle)
3437 ++{
3438 ++ struct amdtee_ta_data *ta_data;
3439 ++ u32 count = 0;
3440 ++
3441 ++ /* Caller must hold a mutex */
3442 ++ list_for_each_entry(ta_data, &ta_list, list_node)
3443 ++ if (ta_data->ta_handle == ta_handle)
3444 ++ return ++ta_data->refcount;
3445 ++
3446 ++ ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
3447 ++ if (ta_data) {
3448 ++ ta_data->ta_handle = ta_handle;
3449 ++ ta_data->refcount = 1;
3450 ++ count = ta_data->refcount;
3451 ++ list_add(&ta_data->list_node, &ta_list);
3452 ++ }
3453 ++
3454 ++ return count;
3455 ++}
3456 ++
3457 ++static u32 put_ta_refcount(u32 ta_handle)
3458 ++{
3459 ++ struct amdtee_ta_data *ta_data;
3460 ++ u32 count = 0;
3461 ++
3462 ++ /* Caller must hold a mutex */
3463 ++ list_for_each_entry(ta_data, &ta_list, list_node)
3464 ++ if (ta_data->ta_handle == ta_handle) {
3465 ++ count = --ta_data->refcount;
3466 ++ if (count == 0) {
3467 ++ list_del(&ta_data->list_node);
3468 ++ kfree(ta_data);
3469 ++ break;
3470 ++ }
3471 ++ }
3472 ++
3473 ++ return count;
3474 ++}
3475 ++
3476 + int handle_unload_ta(u32 ta_handle)
3477 + {
3478 + struct tee_cmd_unload_ta cmd = {0};
3479 +- u32 status;
3480 ++ u32 status, count;
3481 + int ret;
3482 +
3483 + if (!ta_handle)
3484 + return -EINVAL;
3485 +
3486 ++ mutex_lock(&ta_refcount_mutex);
3487 ++
3488 ++ count = put_ta_refcount(ta_handle);
3489 ++
3490 ++ if (count) {
3491 ++ pr_debug("unload ta: not unloading %u count %u\n",
3492 ++ ta_handle, count);
3493 ++ ret = -EBUSY;
3494 ++ goto unlock;
3495 ++ }
3496 ++
3497 + cmd.ta_handle = ta_handle;
3498 +
3499 + ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
3500 +@@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
3501 + if (!ret && status != 0) {
3502 + pr_err("unload ta: status = 0x%x\n", status);
3503 + ret = -EBUSY;
3504 ++ } else {
3505 ++ pr_debug("unloaded ta handle %u\n", ta_handle);
3506 + }
3507 +
3508 ++unlock:
3509 ++ mutex_unlock(&ta_refcount_mutex);
3510 + return ret;
3511 + }
3512 +
3513 +@@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
3514 +
3515 + int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
3516 + {
3517 +- struct tee_cmd_load_ta cmd = {0};
3518 ++ struct tee_cmd_unload_ta unload_cmd = {};
3519 ++ struct tee_cmd_load_ta load_cmd = {};
3520 + phys_addr_t blob;
3521 + int ret;
3522 +
3523 +@@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
3524 + return -EINVAL;
3525 + }
3526 +
3527 +- cmd.hi_addr = upper_32_bits(blob);
3528 +- cmd.low_addr = lower_32_bits(blob);
3529 +- cmd.size = size;
3530 ++ load_cmd.hi_addr = upper_32_bits(blob);
3531 ++ load_cmd.low_addr = lower_32_bits(blob);
3532 ++ load_cmd.size = size;
3533 +
3534 +- ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
3535 +- sizeof(cmd), &arg->ret);
3536 ++ mutex_lock(&ta_refcount_mutex);
3537 ++
3538 ++ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
3539 ++ sizeof(load_cmd), &arg->ret);
3540 + if (ret) {
3541 + arg->ret_origin = TEEC_ORIGIN_COMMS;
3542 + arg->ret = TEEC_ERROR_COMMUNICATION;
3543 +- } else {
3544 +- set_session_id(cmd.ta_handle, 0, &arg->session);
3545 ++ } else if (arg->ret == TEEC_SUCCESS) {
3546 ++ ret = get_ta_refcount(load_cmd.ta_handle);
3547 ++ if (!ret) {
3548 ++ arg->ret_origin = TEEC_ORIGIN_COMMS;
3549 ++ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
3550 ++
3551 ++ /* Unload the TA on error */
3552 ++ unload_cmd.ta_handle = load_cmd.ta_handle;
3553 ++ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
3554 ++ (void *)&unload_cmd,
3555 ++ sizeof(unload_cmd), &ret);
3556 ++ } else {
3557 ++ set_session_id(load_cmd.ta_handle, 0, &arg->session);
3558 ++ }
3559 + }
3560 ++ mutex_unlock(&ta_refcount_mutex);
3561 +
3562 + pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
3563 +- cmd.ta_handle, arg->ret_origin, arg->ret);
3564 ++ load_cmd.ta_handle, arg->ret_origin, arg->ret);
3565 +
3566 + return 0;
3567 + }
3568 +diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
3569 +index 8a6a8f30bb427..da6b88e80dc07 100644
3570 +--- a/drivers/tee/amdtee/core.c
3571 ++++ b/drivers/tee/amdtee/core.c
3572 +@@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
3573 + continue;
3574 +
3575 + handle_close_session(sess->ta_handle, sess->session_info[i]);
3576 ++ handle_unload_ta(sess->ta_handle);
3577 + }
3578 +
3579 +- /* Unload Trusted Application once all sessions are closed */
3580 +- handle_unload_ta(sess->ta_handle);
3581 + kfree(sess);
3582 + }
3583 +
3584 +@@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
3585 + struct amdtee_session *sess = container_of(ref, struct amdtee_session,
3586 + refcount);
3587 +
3588 +- /* Unload the TA from TEE */
3589 +- handle_unload_ta(sess->ta_handle);
3590 + mutex_lock(&session_list_mutex);
3591 + list_del(&sess->list_node);
3592 + mutex_unlock(&session_list_mutex);
3593 +@@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
3594 + {
3595 + struct amdtee_context_data *ctxdata = ctx->data;
3596 + struct amdtee_session *sess = NULL;
3597 +- u32 session_info;
3598 ++ u32 session_info, ta_handle;
3599 + size_t ta_size;
3600 + int rc, i;
3601 + void *ta;
3602 +@@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
3603 + if (arg->ret != TEEC_SUCCESS)
3604 + goto out;
3605 +
3606 ++ ta_handle = get_ta_handle(arg->session);
3607 ++
3608 + mutex_lock(&session_list_mutex);
3609 + sess = alloc_session(ctxdata, arg->session);
3610 + mutex_unlock(&session_list_mutex);
3611 +
3612 + if (!sess) {
3613 ++ handle_unload_ta(ta_handle);
3614 + rc = -ENOMEM;
3615 + goto out;
3616 + }
3617 +@@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
3618 +
3619 + if (i >= TEE_NUM_SESSIONS) {
3620 + pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
3621 ++ handle_unload_ta(ta_handle);
3622 + kref_put(&sess->refcount, destroy_session);
3623 + rc = -ENOMEM;
3624 + goto out;
3625 +@@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
3626 + spin_lock(&sess->lock);
3627 + clear_bit(i, sess->sess_mask);
3628 + spin_unlock(&sess->lock);
3629 ++ handle_unload_ta(ta_handle);
3630 + kref_put(&sess->refcount, destroy_session);
3631 + goto out;
3632 + }
3633 +
3634 + sess->session_info[i] = session_info;
3635 +- set_session_id(sess->ta_handle, i, &arg->session);
3636 ++ set_session_id(ta_handle, i, &arg->session);
3637 + out:
3638 + free_pages((u64)ta, get_order(ta_size));
3639 + return rc;
3640 +@@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
3641 +
3642 + /* Close the session */
3643 + handle_close_session(ta_handle, session_info);
3644 ++ handle_unload_ta(ta_handle);
3645 +
3646 + kref_put(&sess->refcount, destroy_session);
3647 +
3648 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
3649 +index e0c00a1b07639..51b0ecabf2ec9 100644
3650 +--- a/drivers/tty/serial/mvebu-uart.c
3651 ++++ b/drivers/tty/serial/mvebu-uart.c
3652 +@@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
3653 + return -EINVAL;
3654 + }
3655 +
3656 +- if (!match)
3657 +- return -ENODEV;
3658 +-
3659 + /* Assume that all UART ports have a DT alias or none has */
3660 + id = of_alias_get_id(pdev->dev.of_node, "serial");
3661 + if (!pdev->dev.of_node || id < 0)
3662 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3663 +index 0cc360da5426a..53cbf2c3f0330 100644
3664 +--- a/drivers/tty/vt/vt.c
3665 ++++ b/drivers/tty/vt/vt.c
3666 +@@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
3667 + /* Resizes the resolution of the display adapater */
3668 + int err = 0;
3669 +
3670 +- if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
3671 ++ if (vc->vc_sw->con_resize)
3672 + err = vc->vc_sw->con_resize(vc, width, height, user);
3673 +
3674 + return err;
3675 +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
3676 +index 89aeaf3c1bca6..0e0cd9e9e589e 100644
3677 +--- a/drivers/tty/vt/vt_ioctl.c
3678 ++++ b/drivers/tty/vt/vt_ioctl.c
3679 +@@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
3680 + if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
3681 + return -EFAULT;
3682 +
3683 +- if (v.v_vlin)
3684 +- pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
3685 +- if (v.v_clin)
3686 +- pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
3687 ++ /* FIXME: Should check the copies properly */
3688 ++ if (!v.v_vlin)
3689 ++ v.v_vlin = vc->vc_scan_lines;
3690 ++
3691 ++ if (v.v_clin) {
3692 ++ int rows = v.v_vlin / v.v_clin;
3693 ++ if (v.v_rows != rows) {
3694 ++ if (v.v_rows) /* Parameters don't add up */
3695 ++ return -EINVAL;
3696 ++ v.v_rows = rows;
3697 ++ }
3698 ++ }
3699 ++
3700 ++ if (v.v_vcol && v.v_ccol) {
3701 ++ int cols = v.v_vcol / v.v_ccol;
3702 ++ if (v.v_cols != cols) {
3703 ++ if (v.v_cols)
3704 ++ return -EINVAL;
3705 ++ v.v_cols = cols;
3706 ++ }
3707 ++ }
3708 ++
3709 ++ if (v.v_clin > 32)
3710 ++ return -EINVAL;
3711 +
3712 +- console_lock();
3713 + for (i = 0; i < MAX_NR_CONSOLES; i++) {
3714 +- vc = vc_cons[i].d;
3715 ++ struct vc_data *vcp;
3716 +
3717 +- if (vc) {
3718 +- vc->vc_resize_user = 1;
3719 +- vc_resize(vc, v.v_cols, v.v_rows);
3720 ++ if (!vc_cons[i].d)
3721 ++ continue;
3722 ++ console_lock();
3723 ++ vcp = vc_cons[i].d;
3724 ++ if (vcp) {
3725 ++ int ret;
3726 ++ int save_scan_lines = vcp->vc_scan_lines;
3727 ++ int save_cell_height = vcp->vc_cell_height;
3728 ++
3729 ++ if (v.v_vlin)
3730 ++ vcp->vc_scan_lines = v.v_vlin;
3731 ++ if (v.v_clin)
3732 ++ vcp->vc_cell_height = v.v_clin;
3733 ++ vcp->vc_resize_user = 1;
3734 ++ ret = vc_resize(vcp, v.v_cols, v.v_rows);
3735 ++ if (ret) {
3736 ++ vcp->vc_scan_lines = save_scan_lines;
3737 ++ vcp->vc_cell_height = save_cell_height;
3738 ++ console_unlock();
3739 ++ return ret;
3740 ++ }
3741 + }
3742 ++ console_unlock();
3743 + }
3744 +- console_unlock();
3745 +
3746 + return 0;
3747 + }
3748 +diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
3749 +index 0330ba99730e2..652fe25475878 100644
3750 +--- a/drivers/uio/uio_hv_generic.c
3751 ++++ b/drivers/uio/uio_hv_generic.c
3752 +@@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
3753 + pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
3754 + if (pdata->recv_buf == NULL) {
3755 + ret = -ENOMEM;
3756 +- goto fail_close;
3757 ++ goto fail_free_ring;
3758 + }
3759 +
3760 + ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
3761 + RECV_BUFFER_SIZE, &pdata->recv_gpadl);
3762 +- if (ret)
3763 ++ if (ret) {
3764 ++ vfree(pdata->recv_buf);
3765 + goto fail_close;
3766 ++ }
3767 +
3768 + /* put Global Physical Address Label in name */
3769 + snprintf(pdata->recv_name, sizeof(pdata->recv_name),
3770 +@@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
3771 +
3772 + ret = vmbus_establish_gpadl(channel, pdata->send_buf,
3773 + SEND_BUFFER_SIZE, &pdata->send_gpadl);
3774 +- if (ret)
3775 ++ if (ret) {
3776 ++ vfree(pdata->send_buf);
3777 + goto fail_close;
3778 ++ }
3779 +
3780 + snprintf(pdata->send_name, sizeof(pdata->send_name),
3781 + "send:%u", pdata->send_gpadl);
3782 +@@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
3783 +
3784 + fail_close:
3785 + hv_uio_cleanup(dev, pdata);
3786 ++fail_free_ring:
3787 ++ vmbus_free_ring(dev->channel);
3788 +
3789 + return ret;
3790 + }
3791 +diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
3792 +index c7d681fef198d..3bb0b00754679 100644
3793 +--- a/drivers/uio/uio_pci_generic.c
3794 ++++ b/drivers/uio/uio_pci_generic.c
3795 +@@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
3796 + }
3797 +
3798 + if (pdev->irq && !pci_intx_mask_supported(pdev))
3799 +- return -ENOMEM;
3800 ++ return -ENODEV;
3801 +
3802 + gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
3803 + if (!gdev)
3804 +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
3805 +index 962c12be97741..631eb918f8e14 100644
3806 +--- a/drivers/video/console/vgacon.c
3807 ++++ b/drivers/video/console/vgacon.c
3808 +@@ -383,7 +383,7 @@ static void vgacon_init(struct vc_data *c, int init)
3809 + vc_resize(c, vga_video_num_columns, vga_video_num_lines);
3810 +
3811 + c->vc_scan_lines = vga_scan_lines;
3812 +- c->vc_font.height = vga_video_font_height;
3813 ++ c->vc_font.height = c->vc_cell_height = vga_video_font_height;
3814 + c->vc_complement_mask = 0x7700;
3815 + if (vga_512_chars)
3816 + c->vc_hi_font_mask = 0x0800;
3817 +@@ -518,32 +518,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
3818 + switch (CUR_SIZE(c->vc_cursor_type)) {
3819 + case CUR_UNDERLINE:
3820 + vgacon_set_cursor_size(c->state.x,
3821 +- c->vc_font.height -
3822 +- (c->vc_font.height <
3823 ++ c->vc_cell_height -
3824 ++ (c->vc_cell_height <
3825 + 10 ? 2 : 3),
3826 +- c->vc_font.height -
3827 +- (c->vc_font.height <
3828 ++ c->vc_cell_height -
3829 ++ (c->vc_cell_height <
3830 + 10 ? 1 : 2));
3831 + break;
3832 + case CUR_TWO_THIRDS:
3833 + vgacon_set_cursor_size(c->state.x,
3834 +- c->vc_font.height / 3,
3835 +- c->vc_font.height -
3836 +- (c->vc_font.height <
3837 ++ c->vc_cell_height / 3,
3838 ++ c->vc_cell_height -
3839 ++ (c->vc_cell_height <
3840 + 10 ? 1 : 2));
3841 + break;
3842 + case CUR_LOWER_THIRD:
3843 + vgacon_set_cursor_size(c->state.x,
3844 +- (c->vc_font.height * 2) / 3,
3845 +- c->vc_font.height -
3846 +- (c->vc_font.height <
3847 ++ (c->vc_cell_height * 2) / 3,
3848 ++ c->vc_cell_height -
3849 ++ (c->vc_cell_height <
3850 + 10 ? 1 : 2));
3851 + break;
3852 + case CUR_LOWER_HALF:
3853 + vgacon_set_cursor_size(c->state.x,
3854 +- c->vc_font.height / 2,
3855 +- c->vc_font.height -
3856 +- (c->vc_font.height <
3857 ++ c->vc_cell_height / 2,
3858 ++ c->vc_cell_height -
3859 ++ (c->vc_cell_height <
3860 + 10 ? 1 : 2));
3861 + break;
3862 + case CUR_NONE:
3863 +@@ -554,7 +554,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
3864 + break;
3865 + default:
3866 + vgacon_set_cursor_size(c->state.x, 1,
3867 +- c->vc_font.height);
3868 ++ c->vc_cell_height);
3869 + break;
3870 + }
3871 + break;
3872 +@@ -565,13 +565,13 @@ static int vgacon_doresize(struct vc_data *c,
3873 + unsigned int width, unsigned int height)
3874 + {
3875 + unsigned long flags;
3876 +- unsigned int scanlines = height * c->vc_font.height;
3877 ++ unsigned int scanlines = height * c->vc_cell_height;
3878 + u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
3879 +
3880 + raw_spin_lock_irqsave(&vga_lock, flags);
3881 +
3882 + vgacon_xres = width * VGA_FONTWIDTH;
3883 +- vgacon_yres = height * c->vc_font.height;
3884 ++ vgacon_yres = height * c->vc_cell_height;
3885 + if (vga_video_type >= VIDEO_TYPE_VGAC) {
3886 + outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
3887 + max_scan = inb_p(vga_video_port_val);
3888 +@@ -626,9 +626,9 @@ static int vgacon_doresize(struct vc_data *c,
3889 + static int vgacon_switch(struct vc_data *c)
3890 + {
3891 + int x = c->vc_cols * VGA_FONTWIDTH;
3892 +- int y = c->vc_rows * c->vc_font.height;
3893 ++ int y = c->vc_rows * c->vc_cell_height;
3894 + int rows = screen_info.orig_video_lines * vga_default_font_height/
3895 +- c->vc_font.height;
3896 ++ c->vc_cell_height;
3897 + /*
3898 + * We need to save screen size here as it's the only way
3899 + * we can spot the screen has been resized and we need to
3900 +@@ -1041,7 +1041,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
3901 + cursor_size_lastto = 0;
3902 + c->vc_sw->con_cursor(c, CM_DRAW);
3903 + }
3904 +- c->vc_font.height = fontheight;
3905 ++ c->vc_font.height = c->vc_cell_height = fontheight;
3906 + vc_resize(c, 0, rows); /* Adjust console size */
3907 + }
3908 + }
3909 +@@ -1089,12 +1089,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
3910 + if ((width << 1) * height > vga_vram_size)
3911 + return -EINVAL;
3912 +
3913 ++ if (user) {
3914 ++ /*
3915 ++ * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
3916 ++ * the video mode! Set the new defaults then and go away.
3917 ++ */
3918 ++ screen_info.orig_video_cols = width;
3919 ++ screen_info.orig_video_lines = height;
3920 ++ vga_default_font_height = c->vc_cell_height;
3921 ++ return 0;
3922 ++ }
3923 + if (width % 2 || width > screen_info.orig_video_cols ||
3924 + height > (screen_info.orig_video_lines * vga_default_font_height)/
3925 +- c->vc_font.height)
3926 +- /* let svgatextmode tinker with video timings and
3927 +- return success */
3928 +- return (user) ? 0 : -EINVAL;
3929 ++ c->vc_cell_height)
3930 ++ return -EINVAL;
3931 +
3932 + if (con_is_visible(c) && !vga_is_gfx) /* who knows */
3933 + vgacon_doresize(c, width, height);
3934 +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
3935 +index 3406067985b1f..22bb3892f6bd1 100644
3936 +--- a/drivers/video/fbdev/core/fbcon.c
3937 ++++ b/drivers/video/fbdev/core/fbcon.c
3938 +@@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
3939 + return -EINVAL;
3940 +
3941 + pr_debug("resize now %ix%i\n", var.xres, var.yres);
3942 +- if (con_is_visible(vc)) {
3943 ++ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
3944 + var.activate = FB_ACTIVATE_NOW |
3945 + FB_ACTIVATE_FORCE;
3946 + fb_set_var(info, &var);
3947 +diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
3948 +index 8bbac7182ad32..bd3d07aa4f0ec 100644
3949 +--- a/drivers/video/fbdev/hgafb.c
3950 ++++ b/drivers/video/fbdev/hgafb.c
3951 +@@ -286,7 +286,7 @@ static int hga_card_detect(void)
3952 +
3953 + hga_vram = ioremap(0xb0000, hga_vram_len);
3954 + if (!hga_vram)
3955 +- goto error;
3956 ++ return -ENOMEM;
3957 +
3958 + if (request_region(0x3b0, 12, "hgafb"))
3959 + release_io_ports = 1;
3960 +@@ -346,13 +346,18 @@ static int hga_card_detect(void)
3961 + hga_type_name = "Hercules";
3962 + break;
3963 + }
3964 +- return 1;
3965 ++ return 0;
3966 + error:
3967 + if (release_io_ports)
3968 + release_region(0x3b0, 12);
3969 + if (release_io_port)
3970 + release_region(0x3bf, 1);
3971 +- return 0;
3972 ++
3973 ++ iounmap(hga_vram);
3974 ++
3975 ++ pr_err("hgafb: HGA card not detected.\n");
3976 ++
3977 ++ return -EINVAL;
3978 + }
3979 +
3980 + /**
3981 +@@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
3982 + static int hgafb_probe(struct platform_device *pdev)
3983 + {
3984 + struct fb_info *info;
3985 ++ int ret;
3986 +
3987 +- if (! hga_card_detect()) {
3988 +- printk(KERN_INFO "hgafb: HGA card not detected.\n");
3989 +- if (hga_vram)
3990 +- iounmap(hga_vram);
3991 +- return -EINVAL;
3992 +- }
3993 ++ ret = hga_card_detect();
3994 ++ if (ret)
3995 ++ return ret;
3996 +
3997 + printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
3998 + hga_type_name, hga_vram_len/1024);
3999 +diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
4000 +index 3ac053b884958..e04411701ec85 100644
4001 +--- a/drivers/video/fbdev/imsttfb.c
4002 ++++ b/drivers/video/fbdev/imsttfb.c
4003 +@@ -1512,11 +1512,6 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4004 + info->fix.smem_start = addr;
4005 + info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
4006 + 0x400000 : 0x800000);
4007 +- if (!info->screen_base) {
4008 +- release_mem_region(addr, size);
4009 +- framebuffer_release(info);
4010 +- return -ENOMEM;
4011 +- }
4012 + info->fix.mmio_start = addr + 0x800000;
4013 + par->dc_regs = ioremap(addr + 0x800000, 0x1000);
4014 + par->cmap_regs_phys = addr + 0x840000;
4015 +diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
4016 +index 5447b5ab7c766..1221cfd914cb0 100644
4017 +--- a/drivers/xen/xen-pciback/vpci.c
4018 ++++ b/drivers/xen/xen-pciback/vpci.c
4019 +@@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
4020 + struct pci_dev *dev, int devid,
4021 + publish_pci_dev_cb publish_cb)
4022 + {
4023 +- int err = 0, slot, func = -1;
4024 ++ int err = 0, slot, func = PCI_FUNC(dev->devfn);
4025 + struct pci_dev_entry *t, *dev_entry;
4026 + struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
4027 +
4028 +@@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
4029 +
4030 + /*
4031 + * Keep multi-function devices together on the virtual PCI bus, except
4032 +- * virtual functions.
4033 ++ * that we want to keep virtual functions at func 0 on their own. They
4034 ++ * aren't multi-function devices and hence their presence at func 0
4035 ++ * may cause guests to not scan the other functions.
4036 + */
4037 +- if (!dev->is_virtfn) {
4038 ++ if (!dev->is_virtfn || func) {
4039 + for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
4040 + if (list_empty(&vpci_dev->dev_list[slot]))
4041 + continue;
4042 +
4043 + t = list_entry(list_first(&vpci_dev->dev_list[slot]),
4044 + struct pci_dev_entry, list);
4045 ++ if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
4046 ++ continue;
4047 +
4048 + if (match_slot(dev, t->dev)) {
4049 + dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
4050 +- slot, PCI_FUNC(dev->devfn));
4051 ++ slot, func);
4052 + list_add_tail(&dev_entry->list,
4053 + &vpci_dev->dev_list[slot]);
4054 +- func = PCI_FUNC(dev->devfn);
4055 + goto unlock;
4056 + }
4057 + }
4058 +@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
4059 + slot);
4060 + list_add_tail(&dev_entry->list,
4061 + &vpci_dev->dev_list[slot]);
4062 +- func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
4063 + goto unlock;
4064 + }
4065 + }
4066 +diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
4067 +index 5188f02e75fb3..c09c7ebd6968d 100644
4068 +--- a/drivers/xen/xen-pciback/xenbus.c
4069 ++++ b/drivers/xen/xen-pciback/xenbus.c
4070 +@@ -359,7 +359,8 @@ out:
4071 + return err;
4072 + }
4073 +
4074 +-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
4075 ++static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
4076 ++ enum xenbus_state state)
4077 + {
4078 + int err = 0;
4079 + int num_devs;
4080 +@@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
4081 + dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
4082 +
4083 + mutex_lock(&pdev->dev_lock);
4084 +- /* Make sure we only reconfigure once */
4085 +- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
4086 +- XenbusStateReconfiguring)
4087 ++ if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
4088 + goto out;
4089 +
4090 + err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
4091 +@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
4092 + }
4093 + }
4094 +
4095 ++ if (state != XenbusStateReconfiguring)
4096 ++ /* Make sure we only reconfigure once. */
4097 ++ goto out;
4098 ++
4099 + err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
4100 + if (err) {
4101 + xenbus_dev_fatal(pdev->xdev, err,
4102 +@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
4103 + break;
4104 +
4105 + case XenbusStateReconfiguring:
4106 +- xen_pcibk_reconfigure(pdev);
4107 ++ xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
4108 + break;
4109 +
4110 + case XenbusStateConnected:
4111 +@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
4112 + xen_pcibk_setup_backend(pdev);
4113 + break;
4114 +
4115 ++ case XenbusStateInitialised:
4116 ++ /*
4117 ++ * We typically move to Initialised when the first device was
4118 ++ * added. Hence subsequent devices getting added may need
4119 ++ * reconfiguring.
4120 ++ */
4121 ++ xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
4122 ++ break;
4123 ++
4124 + default:
4125 + break;
4126 + }
4127 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4128 +index 8c4d2eaa5d58b..81b93c9c659b7 100644
4129 +--- a/fs/btrfs/inode.c
4130 ++++ b/fs/btrfs/inode.c
4131 +@@ -3253,6 +3253,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
4132 + inode = list_first_entry(&fs_info->delayed_iputs,
4133 + struct btrfs_inode, delayed_iput);
4134 + run_delayed_iput_locked(fs_info, inode);
4135 ++ cond_resched_lock(&fs_info->delayed_iput_lock);
4136 + }
4137 + spin_unlock(&fs_info->delayed_iput_lock);
4138 + }
4139 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4140 +index 47e76e79b3d6b..53624fca0747a 100644
4141 +--- a/fs/btrfs/tree-log.c
4142 ++++ b/fs/btrfs/tree-log.c
4143 +@@ -6457,6 +6457,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
4144 + (!old_dir || old_dir->logged_trans < trans->transid))
4145 + return;
4146 +
4147 ++ /*
4148 ++ * If we are doing a rename (old_dir is not NULL) from a directory that
4149 ++ * was previously logged, make sure the next log attempt on the directory
4150 ++ * is not skipped and logs the inode again. This is because the log may
4151 ++ * not currently be authoritative for a range including the old
4152 ++ * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
4153 ++ * sure after a log replay we do not end up with both the new and old
4154 ++ * dentries around (in case the inode is a directory we would have a
4155 ++ * directory with two hard links and 2 inode references for different
4156 ++ * parents). The next log attempt of old_dir will happen at
4157 ++ * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
4158 ++ * below, because we have previously set inode->last_unlink_trans to the
4159 ++ * current transaction ID, either here or at btrfs_record_unlink_dir() in
4160 ++ * case inode is a directory.
4161 ++ */
4162 ++ if (old_dir)
4163 ++ old_dir->logged_trans = 0;
4164 ++
4165 + btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
4166 + ctx.logging_new_name = true;
4167 + /*
4168 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
4169 +index 5df6daacc230b..e9a530da4255c 100644
4170 +--- a/fs/cifs/smb2ops.c
4171 ++++ b/fs/cifs/smb2ops.c
4172 +@@ -1822,6 +1822,8 @@ smb2_copychunk_range(const unsigned int xid,
4173 + cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
4174 +
4175 + /* Request server copy to target from src identified by key */
4176 ++ kfree(retbuf);
4177 ++ retbuf = NULL;
4178 + rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
4179 + trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
4180 + true /* is_fsctl */, (char *)pcchunk,
4181 +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
4182 +index 943e523f4c9df..3d8623139538b 100644
4183 +--- a/fs/ecryptfs/crypto.c
4184 ++++ b/fs/ecryptfs/crypto.c
4185 +@@ -296,10 +296,8 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
4186 + struct extent_crypt_result ecr;
4187 + int rc = 0;
4188 +
4189 +- if (!crypt_stat || !crypt_stat->tfm
4190 +- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
4191 +- return -EINVAL;
4192 +-
4193 ++ BUG_ON(!crypt_stat || !crypt_stat->tfm
4194 ++ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
4195 + if (unlikely(ecryptfs_verbosity > 0)) {
4196 + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
4197 + crypt_stat->key_size);
4198 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
4199 +index 99df69b848224..c63d0a7f7ba4f 100644
4200 +--- a/fs/hugetlbfs/inode.c
4201 ++++ b/fs/hugetlbfs/inode.c
4202 +@@ -532,7 +532,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
4203 + * the subpool and global reserve usage count can need
4204 + * to be adjusted.
4205 + */
4206 +- VM_BUG_ON(PagePrivate(page));
4207 ++ VM_BUG_ON(HPageRestoreReserve(page));
4208 + remove_huge_page(page);
4209 + freed++;
4210 + if (!truncate_op) {
4211 +diff --git a/fs/namespace.c b/fs/namespace.c
4212 +index 56bb5a5fdc0d0..4d2e827ddb598 100644
4213 +--- a/fs/namespace.c
4214 ++++ b/fs/namespace.c
4215 +@@ -3853,8 +3853,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4216 + if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4217 + return -EINVAL;
4218 +
4219 ++ /* Don't yet support filesystem mountable in user namespaces. */
4220 ++ if (m->mnt_sb->s_user_ns != &init_user_ns)
4221 ++ return -EINVAL;
4222 ++
4223 + /* We're not controlling the superblock. */
4224 +- if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
4225 ++ if (!capable(CAP_SYS_ADMIN))
4226 + return -EPERM;
4227 +
4228 + /* Mount has already been visible in the filesystem hierarchy. */
4229 +diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
4230 +index 153734816b49c..d5b9c8d40c18e 100644
4231 +--- a/include/linux/console_struct.h
4232 ++++ b/include/linux/console_struct.h
4233 +@@ -101,6 +101,7 @@ struct vc_data {
4234 + unsigned int vc_rows;
4235 + unsigned int vc_size_row; /* Bytes per row */
4236 + unsigned int vc_scan_lines; /* # of scan lines */
4237 ++ unsigned int vc_cell_height; /* CRTC character cell height */
4238 + unsigned long vc_origin; /* [!] Start of real screen */
4239 + unsigned long vc_scr_end; /* [!] End of real screen */
4240 + unsigned long vc_visible_origin; /* [!] Top of visible window */
4241 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
4242 +index 8031464ed4ae2..4e4e61111500c 100644
4243 +--- a/ipc/mqueue.c
4244 ++++ b/ipc/mqueue.c
4245 +@@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
4246 + struct mqueue_inode_info *info,
4247 + struct ext_wait_queue *this)
4248 + {
4249 ++ struct task_struct *task;
4250 ++
4251 + list_del(&this->list);
4252 +- get_task_struct(this->task);
4253 ++ task = get_task_struct(this->task);
4254 +
4255 + /* see MQ_BARRIER for purpose/pairing */
4256 + smp_store_release(&this->state, STATE_READY);
4257 +- wake_q_add_safe(wake_q, this->task);
4258 ++ wake_q_add_safe(wake_q, task);
4259 + }
4260 +
4261 + /* pipelined_send() - send a message directly to the task waiting in
4262 +diff --git a/ipc/msg.c b/ipc/msg.c
4263 +index acd1bc7af55a2..6e6c8e0c9380e 100644
4264 +--- a/ipc/msg.c
4265 ++++ b/ipc/msg.c
4266 +@@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
4267 + struct msg_receiver *msr, *t;
4268 +
4269 + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
4270 +- get_task_struct(msr->r_tsk);
4271 ++ struct task_struct *r_tsk;
4272 ++
4273 ++ r_tsk = get_task_struct(msr->r_tsk);
4274 +
4275 + /* see MSG_BARRIER for purpose/pairing */
4276 + smp_store_release(&msr->r_msg, ERR_PTR(res));
4277 +- wake_q_add_safe(wake_q, msr->r_tsk);
4278 ++ wake_q_add_safe(wake_q, r_tsk);
4279 + }
4280 + }
4281 +
4282 +diff --git a/ipc/sem.c b/ipc/sem.c
4283 +index f6c30a85dadf9..7d9c06b0ad6e2 100644
4284 +--- a/ipc/sem.c
4285 ++++ b/ipc/sem.c
4286 +@@ -784,12 +784,14 @@ would_block:
4287 + static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
4288 + struct wake_q_head *wake_q)
4289 + {
4290 +- get_task_struct(q->sleeper);
4291 ++ struct task_struct *sleeper;
4292 ++
4293 ++ sleeper = get_task_struct(q->sleeper);
4294 +
4295 + /* see SEM_BARRIER_2 for purpuse/pairing */
4296 + smp_store_release(&q->status, error);
4297 +
4298 +- wake_q_add_safe(wake_q, q->sleeper);
4299 ++ wake_q_add_safe(wake_q, sleeper);
4300 + }
4301 +
4302 + static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
4303 +diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
4304 +index 209ad8dcfcecf..62a52be8f6ba9 100644
4305 +--- a/kernel/kcsan/debugfs.c
4306 ++++ b/kernel/kcsan/debugfs.c
4307 +@@ -261,9 +261,10 @@ static const struct file_operations debugfs_ops =
4308 + .release = single_release
4309 + };
4310 +
4311 +-static void __init kcsan_debugfs_init(void)
4312 ++static int __init kcsan_debugfs_init(void)
4313 + {
4314 + debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
4315 ++ return 0;
4316 + }
4317 +
4318 + late_initcall(kcsan_debugfs_init);
4319 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
4320 +index f160f1c97ca1e..f39c383c71804 100644
4321 +--- a/kernel/locking/lockdep.c
4322 ++++ b/kernel/locking/lockdep.c
4323 +@@ -5731,7 +5731,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
4324 + {
4325 + unsigned long flags;
4326 +
4327 +- trace_lock_acquired(lock, ip);
4328 ++ trace_lock_contended(lock, ip);
4329 +
4330 + if (unlikely(!lock_stat || !lockdep_enabled()))
4331 + return;
4332 +@@ -5749,7 +5749,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4333 + {
4334 + unsigned long flags;
4335 +
4336 +- trace_lock_contended(lock, ip);
4337 ++ trace_lock_acquired(lock, ip);
4338 +
4339 + if (unlikely(!lock_stat || !lockdep_enabled()))
4340 + return;
4341 +diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
4342 +index a7276aaf2abc0..db9301591e3fc 100644
4343 +--- a/kernel/locking/mutex-debug.c
4344 ++++ b/kernel/locking/mutex-debug.c
4345 +@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4346 + task->blocked_on = waiter;
4347 + }
4348 +
4349 +-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4350 ++void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4351 + struct task_struct *task)
4352 + {
4353 + DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
4354 +@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4355 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
4356 + task->blocked_on = NULL;
4357 +
4358 +- list_del_init(&waiter->list);
4359 ++ INIT_LIST_HEAD(&waiter->list);
4360 + waiter->task = NULL;
4361 + }
4362 +
4363 +diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
4364 +index 1edd3f45a4ecb..53e631e1d76da 100644
4365 +--- a/kernel/locking/mutex-debug.h
4366 ++++ b/kernel/locking/mutex-debug.h
4367 +@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
4368 + extern void debug_mutex_add_waiter(struct mutex *lock,
4369 + struct mutex_waiter *waiter,
4370 + struct task_struct *task);
4371 +-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4372 ++extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4373 + struct task_struct *task);
4374 + extern void debug_mutex_unlock(struct mutex *lock);
4375 + extern void debug_mutex_init(struct mutex *lock, const char *name,
4376 +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
4377 +index 622ebdfcd083b..3899157c13b10 100644
4378 +--- a/kernel/locking/mutex.c
4379 ++++ b/kernel/locking/mutex.c
4380 +@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
4381 + * Add @waiter to a given location in the lock wait_list and set the
4382 + * FLAG_WAITERS flag if it's the first waiter.
4383 + */
4384 +-static void __sched
4385 ++static void
4386 + __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4387 + struct list_head *list)
4388 + {
4389 +@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
4390 + __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
4391 + }
4392 +
4393 ++static void
4394 ++__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
4395 ++{
4396 ++ list_del(&waiter->list);
4397 ++ if (likely(list_empty(&lock->wait_list)))
4398 ++ __mutex_clear_flag(lock, MUTEX_FLAGS);
4399 ++
4400 ++ debug_mutex_remove_waiter(lock, waiter, current);
4401 ++}
4402 ++
4403 + /*
4404 + * Give up ownership to a specific task, when @task = NULL, this is equivalent
4405 + * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
4406 +@@ -1061,9 +1071,7 @@ acquired:
4407 + __ww_mutex_check_waiters(lock, ww_ctx);
4408 + }
4409 +
4410 +- mutex_remove_waiter(lock, &waiter, current);
4411 +- if (likely(list_empty(&lock->wait_list)))
4412 +- __mutex_clear_flag(lock, MUTEX_FLAGS);
4413 ++ __mutex_remove_waiter(lock, &waiter);
4414 +
4415 + debug_mutex_free_waiter(&waiter);
4416 +
4417 +@@ -1080,7 +1088,7 @@ skip_wait:
4418 +
4419 + err:
4420 + __set_current_state(TASK_RUNNING);
4421 +- mutex_remove_waiter(lock, &waiter, current);
4422 ++ __mutex_remove_waiter(lock, &waiter);
4423 + err_early_kill:
4424 + spin_unlock(&lock->wait_lock);
4425 + debug_mutex_free_waiter(&waiter);
4426 +diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
4427 +index 1c2287d3fa719..f0c710b1d1927 100644
4428 +--- a/kernel/locking/mutex.h
4429 ++++ b/kernel/locking/mutex.h
4430 +@@ -10,12 +10,10 @@
4431 + * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
4432 + */
4433 +
4434 +-#define mutex_remove_waiter(lock, waiter, task) \
4435 +- __list_del((waiter)->list.prev, (waiter)->list.next)
4436 +-
4437 + #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
4438 + #define debug_mutex_free_waiter(waiter) do { } while (0)
4439 + #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
4440 ++#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
4441 + #define debug_mutex_unlock(lock) do { } while (0)
4442 + #define debug_mutex_init(lock, name, key) do { } while (0)
4443 +
4444 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
4445 +index 61db50f7ca866..5f50fdd1d855e 100644
4446 +--- a/kernel/ptrace.c
4447 ++++ b/kernel/ptrace.c
4448 +@@ -169,6 +169,21 @@ void __ptrace_unlink(struct task_struct *child)
4449 + spin_unlock(&child->sighand->siglock);
4450 + }
4451 +
4452 ++static bool looks_like_a_spurious_pid(struct task_struct *task)
4453 ++{
4454 ++ if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
4455 ++ return false;
4456 ++
4457 ++ if (task_pid_vnr(task) == task->ptrace_message)
4458 ++ return false;
4459 ++ /*
4460 ++ * The tracee changed its pid but the PTRACE_EVENT_EXEC event
4461 ++ * was not wait()'ed, most probably debugger targets the old
4462 ++ * leader which was destroyed in de_thread().
4463 ++ */
4464 ++ return true;
4465 ++}
4466 ++
4467 + /* Ensure that nothing can wake it up, even SIGKILL */
4468 + static bool ptrace_freeze_traced(struct task_struct *task)
4469 + {
4470 +@@ -179,7 +194,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
4471 + return ret;
4472 +
4473 + spin_lock_irq(&task->sighand->siglock);
4474 +- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
4475 ++ if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
4476 ++ !__fatal_signal_pending(task)) {
4477 + task->state = __TASK_TRACED;
4478 + ret = true;
4479 + }
4480 +diff --git a/mm/gup.c b/mm/gup.c
4481 +index 333f5dfd89423..4164a70160e31 100644
4482 +--- a/mm/gup.c
4483 ++++ b/mm/gup.c
4484 +@@ -1535,10 +1535,6 @@ struct page *get_dump_page(unsigned long addr)
4485 + FOLL_FORCE | FOLL_DUMP | FOLL_GET);
4486 + if (locked)
4487 + mmap_read_unlock(mm);
4488 +-
4489 +- if (ret == 1 && is_page_poisoned(page))
4490 +- return NULL;
4491 +-
4492 + return (ret == 1) ? page : NULL;
4493 + }
4494 + #endif /* CONFIG_ELF_CORE */
4495 +diff --git a/mm/internal.h b/mm/internal.h
4496 +index cb3c5e0a7799f..1432feec62df0 100644
4497 +--- a/mm/internal.h
4498 ++++ b/mm/internal.h
4499 +@@ -97,26 +97,6 @@ static inline void set_page_refcounted(struct page *page)
4500 + set_page_count(page, 1);
4501 + }
4502 +
4503 +-/*
4504 +- * When kernel touch the user page, the user page may be have been marked
4505 +- * poison but still mapped in user space, if without this page, the kernel
4506 +- * can guarantee the data integrity and operation success, the kernel is
4507 +- * better to check the posion status and avoid touching it, be good not to
4508 +- * panic, coredump for process fatal signal is a sample case matching this
4509 +- * scenario. Or if kernel can't guarantee the data integrity, it's better
4510 +- * not to call this function, let kernel touch the poison page and get to
4511 +- * panic.
4512 +- */
4513 +-static inline bool is_page_poisoned(struct page *page)
4514 +-{
4515 +- if (PageHWPoison(page))
4516 +- return true;
4517 +- else if (PageHuge(page) && PageHWPoison(compound_head(page)))
4518 +- return true;
4519 +-
4520 +- return false;
4521 +-}
4522 +-
4523 + extern unsigned long highest_memmap_pfn;
4524 +
4525 + /*
4526 +diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
4527 +index 9a3d451402d7b..28b2314f1cf12 100644
4528 +--- a/mm/userfaultfd.c
4529 ++++ b/mm/userfaultfd.c
4530 +@@ -362,38 +362,38 @@ out:
4531 + * If a reservation for the page existed in the reservation
4532 + * map of a private mapping, the map was modified to indicate
4533 + * the reservation was consumed when the page was allocated.
4534 +- * We clear the PagePrivate flag now so that the global
4535 ++ * We clear the HPageRestoreReserve flag now so that the global
4536 + * reserve count will not be incremented in free_huge_page.
4537 + * The reservation map will still indicate the reservation
4538 + * was consumed and possibly prevent later page allocation.
4539 + * This is better than leaking a global reservation. If no
4540 +- * reservation existed, it is still safe to clear PagePrivate
4541 +- * as no adjustments to reservation counts were made during
4542 +- * allocation.
4543 ++ * reservation existed, it is still safe to clear
4544 ++ * HPageRestoreReserve as no adjustments to reservation counts
4545 ++ * were made during allocation.
4546 + *
4547 + * The reservation map for shared mappings indicates which
4548 + * pages have reservations. When a huge page is allocated
4549 + * for an address with a reservation, no change is made to
4550 +- * the reserve map. In this case PagePrivate will be set
4551 +- * to indicate that the global reservation count should be
4552 ++ * the reserve map. In this case HPageRestoreReserve will be
4553 ++ * set to indicate that the global reservation count should be
4554 + * incremented when the page is freed. This is the desired
4555 + * behavior. However, when a huge page is allocated for an
4556 + * address without a reservation a reservation entry is added
4557 +- * to the reservation map, and PagePrivate will not be set.
4558 +- * When the page is freed, the global reserve count will NOT
4559 +- * be incremented and it will appear as though we have leaked
4560 +- * reserved page. In this case, set PagePrivate so that the
4561 +- * global reserve count will be incremented to match the
4562 +- * reservation map entry which was created.
4563 ++ * to the reservation map, and HPageRestoreReserve will not be
4564 ++ * set. When the page is freed, the global reserve count will
4565 ++ * NOT be incremented and it will appear as though we have
4566 ++ * leaked reserved page. In this case, set HPageRestoreReserve
4567 ++ * so that the global reserve count will be incremented to
4568 ++ * match the reservation map entry which was created.
4569 + *
4570 + * Note that vm_alloc_shared is based on the flags of the vma
4571 + * for which the page was originally allocated. dst_vma could
4572 + * be different or NULL on error.
4573 + */
4574 + if (vm_alloc_shared)
4575 +- SetPagePrivate(page);
4576 ++ SetHPageRestoreReserve(page);
4577 + else
4578 +- ClearPagePrivate(page);
4579 ++ ClearHPageRestoreReserve(page);
4580 + put_page(page);
4581 + }
4582 + BUG_ON(copied < 0);
4583 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
4584 +index b0c1ee110eff9..e03cc284161c4 100644
4585 +--- a/net/bluetooth/smp.c
4586 ++++ b/net/bluetooth/smp.c
4587 +@@ -2732,6 +2732,15 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
4588 + if (skb->len < sizeof(*key))
4589 + return SMP_INVALID_PARAMS;
4590 +
4591 ++ /* Check if remote and local public keys are the same and debug key is
4592 ++ * not in use.
4593 ++ */
4594 ++ if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) &&
4595 ++ !crypto_memneq(key, smp->local_pk, 64)) {
4596 ++ bt_dev_err(hdev, "Remote and local public keys are identical");
4597 ++ return SMP_UNSPECIFIED;
4598 ++ }
4599 ++
4600 + memcpy(smp->remote_pk, key, 64);
4601 +
4602 + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
4603 +diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
4604 +index 25778765cbfe9..9897bd26a4388 100644
4605 +--- a/sound/firewire/Kconfig
4606 ++++ b/sound/firewire/Kconfig
4607 +@@ -38,7 +38,7 @@ config SND_OXFW
4608 + * Mackie(Loud) Onyx 1640i (former model)
4609 + * Mackie(Loud) Onyx Satellite
4610 + * Mackie(Loud) Tapco Link.Firewire
4611 +- * Mackie(Loud) d.2 pro/d.4 pro
4612 ++ * Mackie(Loud) d.4 pro
4613 + * Mackie(Loud) U.420/U.420d
4614 + * TASCAM FireOne
4615 + * Stanton Controllers & Systems 1 Deck/Mixer
4616 +@@ -84,7 +84,7 @@ config SND_BEBOB
4617 + * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
4618 + * BridgeCo RDAudio1/Audio5
4619 + * Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
4620 +- * Mackie d.2 (FireWire Option)
4621 ++ * Mackie d.2 (FireWire Option) and d.2 Pro
4622 + * Stanton FinalScratch 2 (ScratchAmp)
4623 + * Tascam IF-FW/DM
4624 + * Behringer XENIX UFX 1204/1604
4625 +diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
4626 +index 26e7cb555d3c5..aa53c13b89d34 100644
4627 +--- a/sound/firewire/amdtp-stream-trace.h
4628 ++++ b/sound/firewire/amdtp-stream-trace.h
4629 +@@ -14,8 +14,8 @@
4630 + #include <linux/tracepoint.h>
4631 +
4632 + TRACE_EVENT(amdtp_packet,
4633 +- TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
4634 +- TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
4635 ++ TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
4636 ++ TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
4637 + TP_STRUCT__entry(
4638 + __field(unsigned int, second)
4639 + __field(unsigned int, cycle)
4640 +@@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
4641 + __entry->payload_quadlets = payload_length / sizeof(__be32);
4642 + __entry->data_blocks = data_blocks;
4643 + __entry->data_block_counter = data_block_counter,
4644 +- __entry->packet_index = s->packet_index;
4645 ++ __entry->packet_index = packet_index;
4646 + __entry->irq = !!in_interrupt();
4647 + __entry->index = index;
4648 + ),
4649 +diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
4650 +index 4e2f2bb7879fb..e0faa6601966c 100644
4651 +--- a/sound/firewire/amdtp-stream.c
4652 ++++ b/sound/firewire/amdtp-stream.c
4653 +@@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
4654 + }
4655 +
4656 + trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
4657 +- data_block_counter, index);
4658 ++ data_block_counter, s->packet_index, index);
4659 + }
4660 +
4661 + static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
4662 +@@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
4663 + unsigned int *payload_length,
4664 + unsigned int *data_blocks,
4665 + unsigned int *data_block_counter,
4666 +- unsigned int *syt, unsigned int index)
4667 ++ unsigned int *syt, unsigned int packet_index, unsigned int index)
4668 + {
4669 + const __be32 *cip_header;
4670 ++ unsigned int cip_header_size;
4671 + int err;
4672 +
4673 + *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
4674 +- if (*payload_length > s->ctx_data.tx.ctx_header_size +
4675 +- s->ctx_data.tx.max_ctx_payload_length) {
4676 ++
4677 ++ if (!(s->flags & CIP_NO_HEADER))
4678 ++ cip_header_size = 8;
4679 ++ else
4680 ++ cip_header_size = 0;
4681 ++
4682 ++ if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
4683 + dev_err(&s->unit->device,
4684 + "Detect jumbo payload: %04x %04x\n",
4685 +- *payload_length, s->ctx_data.tx.max_ctx_payload_length);
4686 ++ *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
4687 + return -EIO;
4688 + }
4689 +
4690 +- if (!(s->flags & CIP_NO_HEADER)) {
4691 ++ if (cip_header_size > 0) {
4692 + cip_header = ctx_header + 2;
4693 + err = check_cip_header(s, cip_header, *payload_length,
4694 + data_blocks, data_block_counter, syt);
4695 +@@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
4696 + }
4697 +
4698 + trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
4699 +- *data_block_counter, index);
4700 ++ *data_block_counter, packet_index, index);
4701 +
4702 + return err;
4703 + }
4704 +@@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
4705 + unsigned int packets)
4706 + {
4707 + unsigned int dbc = s->data_block_counter;
4708 ++ unsigned int packet_index = s->packet_index;
4709 ++ unsigned int queue_size = s->queue_size;
4710 + int i;
4711 + int err;
4712 +
4713 + for (i = 0; i < packets; ++i) {
4714 + struct pkt_desc *desc = descs + i;
4715 +- unsigned int index = (s->packet_index + i) % s->queue_size;
4716 + unsigned int cycle;
4717 + unsigned int payload_length;
4718 + unsigned int data_blocks;
4719 +@@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
4720 + cycle = compute_cycle_count(ctx_header[1]);
4721 +
4722 + err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
4723 +- &data_blocks, &dbc, &syt, i);
4724 ++ &data_blocks, &dbc, &syt, packet_index, i);
4725 + if (err < 0)
4726 + return err;
4727 +
4728 +@@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
4729 + desc->syt = syt;
4730 + desc->data_blocks = data_blocks;
4731 + desc->data_block_counter = dbc;
4732 +- desc->ctx_payload = s->buffer.packets[index].buffer;
4733 ++ desc->ctx_payload = s->buffer.packets[packet_index].buffer;
4734 +
4735 + if (!(s->flags & CIP_DBC_IS_END_EVENT))
4736 + dbc = (dbc + desc->data_blocks) & 0xff;
4737 +
4738 + ctx_header +=
4739 + s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
4740 ++
4741 ++ packet_index = (packet_index + 1) % queue_size;
4742 + }
4743 +
4744 + s->data_block_counter = dbc;
4745 +@@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
4746 + s->data_block_counter = 0;
4747 + }
4748 +
4749 +- /* initialize packet buffer */
4750 ++ // initialize packet buffer.
4751 ++ max_ctx_payload_size = amdtp_stream_get_max_payload(s);
4752 + if (s->direction == AMDTP_IN_STREAM) {
4753 + dir = DMA_FROM_DEVICE;
4754 + type = FW_ISO_CONTEXT_RECEIVE;
4755 +- if (!(s->flags & CIP_NO_HEADER))
4756 ++ if (!(s->flags & CIP_NO_HEADER)) {
4757 ++ max_ctx_payload_size -= 8;
4758 + ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
4759 +- else
4760 ++ } else {
4761 + ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
4762 +-
4763 +- max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
4764 +- ctx_header_size;
4765 ++ }
4766 + } else {
4767 + dir = DMA_TO_DEVICE;
4768 + type = FW_ISO_CONTEXT_TRANSMIT;
4769 + ctx_header_size = 0; // No effect for IT context.
4770 +
4771 +- max_ctx_payload_size = amdtp_stream_get_max_payload(s);
4772 + if (!(s->flags & CIP_NO_HEADER))
4773 + max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
4774 + }
4775 +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
4776 +index 2c8e3392a4903..daeecfa8b9aac 100644
4777 +--- a/sound/firewire/bebob/bebob.c
4778 ++++ b/sound/firewire/bebob/bebob.c
4779 +@@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
4780 + SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
4781 + /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
4782 + SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
4783 +- /* Mackie, d.2 (Firewire Option) */
4784 ++ // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
4785 + SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
4786 + /* Stanton, ScratchAmp */
4787 + SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
4788 +diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
4789 +index 0916864511d50..27c13b9cc9efd 100644
4790 +--- a/sound/firewire/dice/dice-alesis.c
4791 ++++ b/sound/firewire/dice/dice-alesis.c
4792 +@@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
4793 + static const unsigned int
4794 + alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
4795 + {10, 10, 4}, /* Tx0 = Analog + S/PDIF. */
4796 +- {16, 8, 0}, /* Tx1 = ADAT1 + ADAT2. */
4797 ++ {16, 4, 0}, /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
4798 + };
4799 +
4800 + int snd_dice_detect_alesis_formats(struct snd_dice *dice)
4801 +diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
4802 +index a8875d24ba2aa..43a3bcb15b3d1 100644
4803 +--- a/sound/firewire/dice/dice-tcelectronic.c
4804 ++++ b/sound/firewire/dice/dice-tcelectronic.c
4805 +@@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
4806 + };
4807 +
4808 + static const struct dice_tc_spec konnekt_live = {
4809 +- .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
4810 +- .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
4811 ++ .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
4812 ++ .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
4813 + .has_midi = true,
4814 + };
4815 +
4816 +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
4817 +index 1f1e3236efb8e..9eea25c46dc7e 100644
4818 +--- a/sound/firewire/oxfw/oxfw.c
4819 ++++ b/sound/firewire/oxfw/oxfw.c
4820 +@@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
4821 + * Onyx-i series (former models): 0x081216
4822 + * Mackie Onyx Satellite: 0x00200f
4823 + * Tapco LINK.firewire 4x6: 0x000460
4824 +- * d.2 pro: Unknown
4825 + * d.4 pro: Unknown
4826 + * U.420: Unknown
4827 + * U.420d: Unknown
4828 +diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
4829 +index 6c9d534ce8b61..95290ffe5c6e7 100644
4830 +--- a/sound/isa/sb/sb8.c
4831 ++++ b/sound/isa/sb/sb8.c
4832 +@@ -95,10 +95,6 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
4833 +
4834 + /* block the 0x388 port to avoid PnP conflicts */
4835 + acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
4836 +- if (!acard->fm_res) {
4837 +- err = -EBUSY;
4838 +- goto _err;
4839 +- }
4840 +
4841 + if (port[dev] != SNDRV_AUTO_PORT) {
4842 + if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
4843 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4844 +index 1fe70f2fe4fe8..43a63db4ab6ad 100644
4845 +--- a/sound/pci/hda/patch_realtek.c
4846 ++++ b/sound/pci/hda/patch_realtek.c
4847 +@@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4848 + case 0x10ec0282:
4849 + case 0x10ec0283:
4850 + case 0x10ec0286:
4851 +- case 0x10ec0287:
4852 + case 0x10ec0288:
4853 + case 0x10ec0285:
4854 + case 0x10ec0298:
4855 +@@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4856 + case 0x10ec0275:
4857 + alc_update_coef_idx(codec, 0xe, 0, 1<<0);
4858 + break;
4859 ++ case 0x10ec0287:
4860 ++ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
4861 ++ alc_write_coef_idx(codec, 0x8, 0x4ab7);
4862 ++ break;
4863 + case 0x10ec0293:
4864 + alc_update_coef_idx(codec, 0xa, 1<<13, 0);
4865 + break;
4866 +@@ -5717,6 +5720,18 @@ static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
4867 + spec->gen.preferred_dacs = preferred_pairs;
4868 + }
4869 +
4870 ++static void alc295_fixup_asus_dacs(struct hda_codec *codec,
4871 ++ const struct hda_fixup *fix, int action)
4872 ++{
4873 ++ static const hda_nid_t preferred_pairs[] = {
4874 ++ 0x17, 0x02, 0x21, 0x03, 0
4875 ++ };
4876 ++ struct alc_spec *spec = codec->spec;
4877 ++
4878 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE)
4879 ++ spec->gen.preferred_dacs = preferred_pairs;
4880 ++}
4881 ++
4882 + static void alc_shutup_dell_xps13(struct hda_codec *codec)
4883 + {
4884 + struct alc_spec *spec = codec->spec;
4885 +@@ -6232,6 +6247,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
4886 + }
4887 + }
4888 +
4889 ++static void alc294_gu502_toggle_output(struct hda_codec *codec,
4890 ++ struct hda_jack_callback *cb)
4891 ++{
4892 ++ /* Windows sets 0x10 to 0x8420 for Node 0x20 which is
4893 ++ * responsible from changes between speakers and headphones
4894 ++ */
4895 ++ if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
4896 ++ alc_write_coef_idx(codec, 0x10, 0x8420);
4897 ++ else
4898 ++ alc_write_coef_idx(codec, 0x10, 0x0a20);
4899 ++}
4900 ++
4901 ++static void alc294_fixup_gu502_hp(struct hda_codec *codec,
4902 ++ const struct hda_fixup *fix, int action)
4903 ++{
4904 ++ if (!is_jack_detectable(codec, 0x21))
4905 ++ return;
4906 ++
4907 ++ switch (action) {
4908 ++ case HDA_FIXUP_ACT_PRE_PROBE:
4909 ++ snd_hda_jack_detect_enable_callback(codec, 0x21,
4910 ++ alc294_gu502_toggle_output);
4911 ++ break;
4912 ++ case HDA_FIXUP_ACT_INIT:
4913 ++ alc294_gu502_toggle_output(codec, NULL);
4914 ++ break;
4915 ++ }
4916 ++}
4917 ++
4918 + static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
4919 + const struct hda_fixup *fix, int action)
4920 + {
4921 +@@ -6449,6 +6493,9 @@ enum {
4922 + ALC294_FIXUP_ASUS_GX502_HP,
4923 + ALC294_FIXUP_ASUS_GX502_PINS,
4924 + ALC294_FIXUP_ASUS_GX502_VERBS,
4925 ++ ALC294_FIXUP_ASUS_GU502_HP,
4926 ++ ALC294_FIXUP_ASUS_GU502_PINS,
4927 ++ ALC294_FIXUP_ASUS_GU502_VERBS,
4928 + ALC285_FIXUP_HP_GPIO_LED,
4929 + ALC285_FIXUP_HP_MUTE_LED,
4930 + ALC236_FIXUP_HP_GPIO_LED,
4931 +@@ -6485,6 +6532,9 @@ enum {
4932 + ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
4933 + ALC256_FIXUP_ACER_HEADSET_MIC,
4934 + ALC285_FIXUP_IDEAPAD_S740_COEF,
4935 ++ ALC295_FIXUP_ASUS_DACS,
4936 ++ ALC295_FIXUP_HP_OMEN,
4937 ++ ALC285_FIXUP_HP_SPECTRE_X360,
4938 + };
4939 +
4940 + static const struct hda_fixup alc269_fixups[] = {
4941 +@@ -7687,6 +7737,35 @@ static const struct hda_fixup alc269_fixups[] = {
4942 + .type = HDA_FIXUP_FUNC,
4943 + .v.func = alc294_fixup_gx502_hp,
4944 + },
4945 ++ [ALC294_FIXUP_ASUS_GU502_PINS] = {
4946 ++ .type = HDA_FIXUP_PINS,
4947 ++ .v.pins = (const struct hda_pintbl[]) {
4948 ++ { 0x19, 0x01a11050 }, /* rear HP mic */
4949 ++ { 0x1a, 0x01a11830 }, /* rear external mic */
4950 ++ { 0x21, 0x012110f0 }, /* rear HP out */
4951 ++ { }
4952 ++ },
4953 ++ .chained = true,
4954 ++ .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
4955 ++ },
4956 ++ [ALC294_FIXUP_ASUS_GU502_VERBS] = {
4957 ++ .type = HDA_FIXUP_VERBS,
4958 ++ .v.verbs = (const struct hda_verb[]) {
4959 ++ /* set 0x15 to HP-OUT ctrl */
4960 ++ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
4961 ++ /* unmute the 0x15 amp */
4962 ++ { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
4963 ++ /* set 0x1b to HP-OUT */
4964 ++ { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
4965 ++ { }
4966 ++ },
4967 ++ .chained = true,
4968 ++ .chain_id = ALC294_FIXUP_ASUS_GU502_HP
4969 ++ },
4970 ++ [ALC294_FIXUP_ASUS_GU502_HP] = {
4971 ++ .type = HDA_FIXUP_FUNC,
4972 ++ .v.func = alc294_fixup_gu502_hp,
4973 ++ },
4974 + [ALC294_FIXUP_ASUS_COEF_1B] = {
4975 + .type = HDA_FIXUP_VERBS,
4976 + .v.verbs = (const struct hda_verb[]) {
4977 +@@ -7983,6 +8062,39 @@ static const struct hda_fixup alc269_fixups[] = {
4978 + .chained = true,
4979 + .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
4980 + },
4981 ++ [ALC295_FIXUP_ASUS_DACS] = {
4982 ++ .type = HDA_FIXUP_FUNC,
4983 ++ .v.func = alc295_fixup_asus_dacs,
4984 ++ },
4985 ++ [ALC295_FIXUP_HP_OMEN] = {
4986 ++ .type = HDA_FIXUP_PINS,
4987 ++ .v.pins = (const struct hda_pintbl[]) {
4988 ++ { 0x12, 0xb7a60130 },
4989 ++ { 0x13, 0x40000000 },
4990 ++ { 0x14, 0x411111f0 },
4991 ++ { 0x16, 0x411111f0 },
4992 ++ { 0x17, 0x90170110 },
4993 ++ { 0x18, 0x411111f0 },
4994 ++ { 0x19, 0x02a11030 },
4995 ++ { 0x1a, 0x411111f0 },
4996 ++ { 0x1b, 0x04a19030 },
4997 ++ { 0x1d, 0x40600001 },
4998 ++ { 0x1e, 0x411111f0 },
4999 ++ { 0x21, 0x03211020 },
5000 ++ {}
5001 ++ },
5002 ++ .chained = true,
5003 ++ .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
5004 ++ },
5005 ++ [ALC285_FIXUP_HP_SPECTRE_X360] = {
5006 ++ .type = HDA_FIXUP_PINS,
5007 ++ .v.pins = (const struct hda_pintbl[]) {
5008 ++ { 0x14, 0x90170110 }, /* enable top speaker */
5009 ++ {}
5010 ++ },
5011 ++ .chained = true,
5012 ++ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
5013 ++ },
5014 + };
5015 +
5016 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5017 +@@ -8141,7 +8253,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5018 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5019 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5020 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5021 ++ SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
5022 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5023 ++ SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
5024 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
5025 + SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
5026 + SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
5027 +@@ -8181,6 +8295,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5028 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
5029 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
5030 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
5031 ++ SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
5032 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
5033 + SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
5034 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
5035 +@@ -8198,6 +8313,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5036 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
5037 + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
5038 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
5039 ++ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
5040 + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
5041 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
5042 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
5043 +@@ -8254,12 +8370,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5044 + SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5045 + SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5046 + SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5047 ++ SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5048 + SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5049 ++ SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5050 ++ SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5051 + SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5052 + SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5053 + SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5054 + SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5055 + SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5056 ++ SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5057 ++ SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5058 ++ SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5059 ++ SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5060 + SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5061 + SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5062 + SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5063 +@@ -8277,9 +8400,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5064 + SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5065 + SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5066 + SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5067 ++ SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5068 + SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5069 + SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5070 + SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5071 ++ SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5072 ++ SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5073 ++ SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5074 ++ SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5075 ++ SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5076 ++ SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5077 ++ SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
5078 + SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
5079 + SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
5080 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
5081 +@@ -8544,6 +8675,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5082 + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
5083 + {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
5084 + {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
5085 ++ {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
5086 ++ {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
5087 + {}
5088 + };
5089 + #define ALC225_STANDARD_PINS \
5090 +diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
5091 +index 35903d1a1cbd2..5b124c4ad5725 100644
5092 +--- a/sound/pci/intel8x0.c
5093 ++++ b/sound/pci/intel8x0.c
5094 +@@ -331,6 +331,7 @@ struct ichdev {
5095 + unsigned int ali_slot; /* ALI DMA slot */
5096 + struct ac97_pcm *pcm;
5097 + int pcm_open_flag;
5098 ++ unsigned int prepared:1;
5099 + unsigned int suspended: 1;
5100 + };
5101 +
5102 +@@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
5103 + int status, civ, i, step;
5104 + int ack = 0;
5105 +
5106 ++ if (!ichdev->prepared || ichdev->suspended)
5107 ++ return;
5108 ++
5109 + spin_lock_irqsave(&chip->reg_lock, flags);
5110 + status = igetbyte(chip, port + ichdev->roff_sr);
5111 + civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
5112 +@@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
5113 + if (ichdev->pcm_open_flag) {
5114 + snd_ac97_pcm_close(ichdev->pcm);
5115 + ichdev->pcm_open_flag = 0;
5116 ++ ichdev->prepared = 0;
5117 + }
5118 + err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
5119 + params_channels(hw_params),
5120 +@@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
5121 + if (ichdev->pcm_open_flag) {
5122 + snd_ac97_pcm_close(ichdev->pcm);
5123 + ichdev->pcm_open_flag = 0;
5124 ++ ichdev->prepared = 0;
5125 + }
5126 + return 0;
5127 + }
5128 +@@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
5129 + ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
5130 + }
5131 + snd_intel8x0_setup_periods(chip, ichdev);
5132 ++ ichdev->prepared = 1;
5133 + return 0;
5134 + }
5135 +
5136 +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
5137 +index a030dd65eb280..9602929b7de90 100644
5138 +--- a/sound/usb/line6/driver.c
5139 ++++ b/sound/usb/line6/driver.c
5140 +@@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
5141 + line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
5142 + if (!line6->buffer_message)
5143 + return -ENOMEM;
5144 ++
5145 ++ ret = line6_init_midi(line6);
5146 ++ if (ret < 0)
5147 ++ return ret;
5148 + } else {
5149 + ret = line6_hwdep_init(line6);
5150 + if (ret < 0)
5151 +diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
5152 +index cd44cb5f1310c..16e644330c4d6 100644
5153 +--- a/sound/usb/line6/pod.c
5154 ++++ b/sound/usb/line6/pod.c
5155 +@@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
5156 + if (err < 0)
5157 + return err;
5158 +
5159 +- /* initialize MIDI subsystem: */
5160 +- err = line6_init_midi(line6);
5161 +- if (err < 0)
5162 +- return err;
5163 +-
5164 + /* initialize PCM subsystem: */
5165 + err = line6_init_pcm(line6, &pod_pcm_properties);
5166 + if (err < 0)
5167 +diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
5168 +index ed158f04de80f..c2245aa93b08f 100644
5169 +--- a/sound/usb/line6/variax.c
5170 ++++ b/sound/usb/line6/variax.c
5171 +@@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
5172 + const struct usb_device_id *id)
5173 + {
5174 + struct usb_line6_variax *variax = line6_to_variax(line6);
5175 +- int err;
5176 +
5177 + line6->process_message = line6_variax_process_message;
5178 + line6->disconnect = line6_variax_disconnect;
5179 +@@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
5180 + if (variax->buffer_activate == NULL)
5181 + return -ENOMEM;
5182 +
5183 +- /* initialize MIDI subsystem: */
5184 +- err = line6_init_midi(&variax->line6);
5185 +- if (err < 0)
5186 +- return err;
5187 +-
5188 + /* initiate startup procedure: */
5189 + schedule_delayed_work(&line6->startup_work,
5190 + msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
5191 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
5192 +index cd46ca7cd28de..fa91290ad89db 100644
5193 +--- a/sound/usb/midi.c
5194 ++++ b/sound/usb/midi.c
5195 +@@ -1889,8 +1889,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
5196 + ms_ep = find_usb_ms_endpoint_descriptor(hostep);
5197 + if (!ms_ep)
5198 + continue;
5199 ++ if (ms_ep->bLength <= sizeof(*ms_ep))
5200 ++ continue;
5201 + if (ms_ep->bNumEmbMIDIJack > 0x10)
5202 + continue;
5203 ++ if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
5204 ++ continue;
5205 + if (usb_endpoint_dir_out(ep)) {
5206 + if (endpoints[epidx].out_ep) {
5207 + if (++epidx >= MIDI_MAX_ENDPOINTS) {
5208 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
5209 +index 7c6e83eee71dc..8b8bee3c3dd63 100644
5210 +--- a/sound/usb/quirks.c
5211 ++++ b/sound/usb/quirks.c
5212 +@@ -1511,6 +1511,10 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
5213 + case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
5214 + pioneer_djm_set_format_quirk(subs, 0x0082);
5215 + break;
5216 ++ case USB_ID(0x08e4, 0x017f): /* Pioneer DJM-750 */
5217 ++ case USB_ID(0x08e4, 0x0163): /* Pioneer DJM-850 */
5218 ++ pioneer_djm_set_format_quirk(subs, 0x0086);
5219 ++ break;
5220 + }
5221 + }
5222 +
5223 +diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
5224 +index cf69b2fcce59e..dd61118df66ed 100644
5225 +--- a/tools/testing/selftests/exec/Makefile
5226 ++++ b/tools/testing/selftests/exec/Makefile
5227 +@@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
5228 + cp $< $@
5229 + chmod -x $@
5230 + $(OUTPUT)/load_address_4096: load_address.c
5231 +- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
5232 ++ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
5233 + $(OUTPUT)/load_address_2097152: load_address.c
5234 +- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
5235 ++ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
5236 + $(OUTPUT)/load_address_16777216: load_address.c
5237 +- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
5238 ++ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
5239 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
5240 +index 98c3b647f54dc..e3d5c77a86121 100644
5241 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
5242 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
5243 +@@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
5244 + # define SYSCALL_RET_SET(_regs, _val) \
5245 + do { \
5246 + typeof(_val) _result = (_val); \
5247 +- /* \
5248 +- * A syscall error is signaled by CR0 SO bit \
5249 +- * and the code is stored as a positive value. \
5250 +- */ \
5251 +- if (_result < 0) { \
5252 +- SYSCALL_RET(_regs) = -_result; \
5253 +- (_regs).ccr |= 0x10000000; \
5254 +- } else { \
5255 ++ if ((_regs.trap & 0xfff0) == 0x3000) { \
5256 ++ /* \
5257 ++ * scv 0 system call uses -ve result \
5258 ++ * for error, so no need to adjust. \
5259 ++ */ \
5260 + SYSCALL_RET(_regs) = _result; \
5261 +- (_regs).ccr &= ~0x10000000; \
5262 ++ } else { \
5263 ++ /* \
5264 ++ * A syscall error is signaled by the \
5265 ++ * CR0 SO bit and the code is stored as \
5266 ++ * a positive value. \
5267 ++ */ \
5268 ++ if (_result < 0) { \
5269 ++ SYSCALL_RET(_regs) = -_result; \
5270 ++ (_regs).ccr |= 0x10000000; \
5271 ++ } else { \
5272 ++ SYSCALL_RET(_regs) = _result; \
5273 ++ (_regs).ccr &= ~0x10000000; \
5274 ++ } \
5275 + } \
5276 + } while (0)
5277 + # define SYSCALL_RET_SET_ON_PTRACE_EXIT