Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.8 commit in: /
Date: Wed, 26 Aug 2020 11:18:39
Message-Id: 1598440680.40de4f8c83267312ae7a05f4ef6c57d178753cf4.mpagano@gentoo
1 commit: 40de4f8c83267312ae7a05f4ef6c57d178753cf4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 26 11:18:00 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 26 11:18:00 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=40de4f8c
7
8 Linux patch 5.8.4
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1003_linux-5.8.4.patch | 5194 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5198 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index bacfc9f..17d6b16 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,6 +55,10 @@ Patch: 1002_linux-5.8.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.8.3
23
24 +Patch: 1003_linux-5.8.4.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.8.4
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1003_linux-5.8.4.patch b/1003_linux-5.8.4.patch
33 new file mode 100644
34 index 0000000..fc30996
35 --- /dev/null
36 +++ b/1003_linux-5.8.4.patch
37 @@ -0,0 +1,5194 @@
38 +diff --git a/Makefile b/Makefile
39 +index 6001ed2b14c3a..9a7a416f2d84e 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 8
46 +-SUBLEVEL = 3
47 ++SUBLEVEL = 4
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
52 +index a4d0c19f1e796..640e1a2f57b42 100644
53 +--- a/arch/alpha/include/asm/io.h
54 ++++ b/arch/alpha/include/asm/io.h
55 +@@ -489,10 +489,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
56 + }
57 + #endif
58 +
59 +-#define ioread16be(p) be16_to_cpu(ioread16(p))
60 +-#define ioread32be(p) be32_to_cpu(ioread32(p))
61 +-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
62 +-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
63 ++#define ioread16be(p) swab16(ioread16(p))
64 ++#define ioread32be(p) swab32(ioread32(p))
65 ++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
66 ++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
67 +
68 + #define inb_p inb
69 + #define inw_p inw
70 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
71 +index 70f5905954dde..91e377770a6b8 100644
72 +--- a/arch/arm64/Makefile
73 ++++ b/arch/arm64/Makefile
74 +@@ -158,6 +158,7 @@ zinstall install:
75 + PHONY += vdso_install
76 + vdso_install:
77 + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
78 ++ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
79 +
80 + # We use MRPROPER_FILES and CLEAN_FILES now
81 + archclean:
82 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
83 +index e21d4a01372fe..759d62343e1d0 100644
84 +--- a/arch/arm64/include/asm/kvm_host.h
85 ++++ b/arch/arm64/include/asm/kvm_host.h
86 +@@ -443,7 +443,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
87 +
88 + #define KVM_ARCH_WANT_MMU_NOTIFIER
89 + int kvm_unmap_hva_range(struct kvm *kvm,
90 +- unsigned long start, unsigned long end);
91 ++ unsigned long start, unsigned long end, unsigned flags);
92 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
93 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
94 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
95 +diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
96 +index 5139a5f192568..d6adb4677c25f 100644
97 +--- a/arch/arm64/kernel/vdso32/Makefile
98 ++++ b/arch/arm64/kernel/vdso32/Makefile
99 +@@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
100 + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
101 +
102 + # Install commands for the unstripped file
103 +-quiet_cmd_vdso_install = INSTALL $@
104 ++quiet_cmd_vdso_install = INSTALL32 $@
105 + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
106 +
107 + vdso.so: $(obj)/vdso.so.dbg
108 +diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
109 +index 31058e6e7c2a3..bd47f06739d6c 100644
110 +--- a/arch/arm64/kvm/mmu.c
111 ++++ b/arch/arm64/kvm/mmu.c
112 +@@ -365,7 +365,8 @@ static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd,
113 + * destroying the VM), otherwise another faulting VCPU may come in and mess
114 + * with things behind our backs.
115 + */
116 +-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
117 ++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
118 ++ bool may_block)
119 + {
120 + pgd_t *pgd;
121 + phys_addr_t addr = start, end = start + size;
122 +@@ -390,11 +391,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
123 + * If the range is too large, release the kvm->mmu_lock
124 + * to prevent starvation and lockup detector warnings.
125 + */
126 +- if (next != end)
127 ++ if (may_block && next != end)
128 + cond_resched_lock(&kvm->mmu_lock);
129 + } while (pgd++, addr = next, addr != end);
130 + }
131 +
132 ++static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
133 ++{
134 ++ __unmap_stage2_range(kvm, start, size, true);
135 ++}
136 ++
137 + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
138 + phys_addr_t addr, phys_addr_t end)
139 + {
140 +@@ -2198,18 +2204,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
141 +
142 + static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
143 + {
144 +- unmap_stage2_range(kvm, gpa, size);
145 ++ unsigned flags = *(unsigned *)data;
146 ++ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
147 ++
148 ++ __unmap_stage2_range(kvm, gpa, size, may_block);
149 + return 0;
150 + }
151 +
152 + int kvm_unmap_hva_range(struct kvm *kvm,
153 +- unsigned long start, unsigned long end)
154 ++ unsigned long start, unsigned long end, unsigned flags)
155 + {
156 + if (!kvm->arch.pgd)
157 + return 0;
158 +
159 + trace_kvm_unmap_hva_range(start, end);
160 +- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
161 ++ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
162 + return 0;
163 + }
164 +
165 +diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
166 +index 10850897a91c4..779b6972aa84b 100644
167 +--- a/arch/ia64/include/asm/pgtable.h
168 ++++ b/arch/ia64/include/asm/pgtable.h
169 +@@ -366,6 +366,15 @@ pgd_index (unsigned long address)
170 + }
171 + #define pgd_index pgd_index
172 +
173 ++/*
174 ++ * In the kernel's mapped region we know everything is in region number 5, so
175 ++ * as an optimisation its PGD already points to the area for that region.
176 ++ * However, this also means that we cannot use pgd_index() and we must
177 ++ * never add the region here.
178 ++ */
179 ++#define pgd_offset_k(addr) \
180 ++ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
181 ++
182 + /* Look up a pgd entry in the gate area. On IA-64, the gate-area
183 + resides in the kernel-mapped segment, hence we use pgd_offset_k()
184 + here. */
185 +diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
186 +index 9138a624c5c81..692f90e7fecc1 100644
187 +--- a/arch/m68k/include/asm/m53xxacr.h
188 ++++ b/arch/m68k/include/asm/m53xxacr.h
189 +@@ -89,9 +89,9 @@
190 + * coherency though in all cases. And for copyback caches we will need
191 + * to push cached data as well.
192 + */
193 +-#define CACHE_INIT CACR_CINVA
194 +-#define CACHE_INVALIDATE CACR_CINVA
195 +-#define CACHE_INVALIDATED CACR_CINVA
196 ++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
197 ++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
198 ++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
199 +
200 + #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
201 + (0x000f0000) + \
202 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
203 +index 363e7a89d1738..ef1d25d49ec87 100644
204 +--- a/arch/mips/include/asm/kvm_host.h
205 ++++ b/arch/mips/include/asm/kvm_host.h
206 +@@ -981,7 +981,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
207 +
208 + #define KVM_ARCH_WANT_MMU_NOTIFIER
209 + int kvm_unmap_hva_range(struct kvm *kvm,
210 +- unsigned long start, unsigned long end);
211 ++ unsigned long start, unsigned long end, unsigned flags);
212 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
213 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
214 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
215 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
216 +index 7b537fa2035df..588b21245e00b 100644
217 +--- a/arch/mips/kernel/setup.c
218 ++++ b/arch/mips/kernel/setup.c
219 +@@ -497,7 +497,7 @@ static void __init mips_parse_crashkernel(void)
220 + if (ret != 0 || crash_size <= 0)
221 + return;
222 +
223 +- if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
224 ++ if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
225 + pr_warn("Invalid memory region reserved for crash kernel\n");
226 + return;
227 + }
228 +diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
229 +index 49bd160f4d85c..0783ac9b32405 100644
230 +--- a/arch/mips/kvm/mmu.c
231 ++++ b/arch/mips/kvm/mmu.c
232 +@@ -518,7 +518,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
233 + return 1;
234 + }
235 +
236 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
237 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
238 ++ unsigned flags)
239 + {
240 + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
241 +
242 +diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
243 +index 925cf89cbf4ba..6bfc87915d5db 100644
244 +--- a/arch/powerpc/include/asm/fixmap.h
245 ++++ b/arch/powerpc/include/asm/fixmap.h
246 +@@ -52,7 +52,7 @@ enum fixed_addresses {
247 + FIX_HOLE,
248 + /* reserve the top 128K for early debugging purposes */
249 + FIX_EARLY_DEBUG_TOP = FIX_HOLE,
250 +- FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
251 ++ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
252 + #ifdef CONFIG_HIGHMEM
253 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
254 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
255 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
256 +index 7e2d061d04451..bccf0ba2da2ef 100644
257 +--- a/arch/powerpc/include/asm/kvm_host.h
258 ++++ b/arch/powerpc/include/asm/kvm_host.h
259 +@@ -58,7 +58,8 @@
260 + #define KVM_ARCH_WANT_MMU_NOTIFIER
261 +
262 + extern int kvm_unmap_hva_range(struct kvm *kvm,
263 +- unsigned long start, unsigned long end);
264 ++ unsigned long start, unsigned long end,
265 ++ unsigned flags);
266 + extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
267 + extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
268 + extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
269 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
270 +index 9d3faac53295e..5ed658ae121ab 100644
271 +--- a/arch/powerpc/kernel/setup-common.c
272 ++++ b/arch/powerpc/kernel/setup-common.c
273 +@@ -311,6 +311,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
274 + min = pvr & 0xFF;
275 + break;
276 + case 0x004e: /* POWER9 bits 12-15 give chip type */
277 ++ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
278 + maj = (pvr >> 8) & 0x0F;
279 + min = pvr & 0xFF;
280 + break;
281 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
282 +index 41fedec69ac35..49db50d1db04c 100644
283 +--- a/arch/powerpc/kvm/book3s.c
284 ++++ b/arch/powerpc/kvm/book3s.c
285 +@@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
286 + kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
287 + }
288 +
289 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
290 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
291 ++ unsigned flags)
292 + {
293 + return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
294 + }
295 +diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
296 +index d6c1069e9954a..ed0c9c43d0cf1 100644
297 +--- a/arch/powerpc/kvm/e500_mmu_host.c
298 ++++ b/arch/powerpc/kvm/e500_mmu_host.c
299 +@@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
300 + return 0;
301 + }
302 +
303 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
304 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
305 ++ unsigned flags)
306 + {
307 + /* kvm_unmap_hva flushes everything anyways */
308 + kvm_unmap_hva(kvm, start);
309 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
310 +index 6d4ee03d476a9..ec04fc7f5a641 100644
311 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
312 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
313 +@@ -107,22 +107,28 @@ static int pseries_cpu_disable(void)
314 + */
315 + static void pseries_cpu_die(unsigned int cpu)
316 + {
317 +- int tries;
318 + int cpu_status = 1;
319 + unsigned int pcpu = get_hard_smp_processor_id(cpu);
320 ++ unsigned long timeout = jiffies + msecs_to_jiffies(120000);
321 +
322 +- for (tries = 0; tries < 25; tries++) {
323 ++ while (true) {
324 + cpu_status = smp_query_cpu_stopped(pcpu);
325 + if (cpu_status == QCSS_STOPPED ||
326 + cpu_status == QCSS_HARDWARE_ERROR)
327 + break;
328 +- cpu_relax();
329 +
330 ++ if (time_after(jiffies, timeout)) {
331 ++ pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
332 ++ cpu, pcpu);
333 ++ timeout = jiffies + msecs_to_jiffies(120000);
334 ++ }
335 ++
336 ++ cond_resched();
337 + }
338 +
339 +- if (cpu_status != 0) {
340 +- printk("Querying DEAD? cpu %i (%i) shows %i\n",
341 +- cpu, pcpu, cpu_status);
342 ++ if (cpu_status == QCSS_HARDWARE_ERROR) {
343 ++ pr_warn("CPU %i (hwid %i) reported error while dying\n",
344 ++ cpu, pcpu);
345 + }
346 +
347 + /* Isolation and deallocation are definitely done by
348 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
349 +index f3736fcd98fcb..13c86a292c6d7 100644
350 +--- a/arch/powerpc/platforms/pseries/ras.c
351 ++++ b/arch/powerpc/platforms/pseries/ras.c
352 +@@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
353 + case EPOW_SHUTDOWN_ON_UPS:
354 + pr_emerg("Loss of system power detected. System is running on"
355 + " UPS/battery. Check RTAS error log for details\n");
356 +- orderly_poweroff(true);
357 + break;
358 +
359 + case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
360 +diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
361 +index e6f8016b366ab..f3586e31ed1ec 100644
362 +--- a/arch/riscv/kernel/vmlinux.lds.S
363 ++++ b/arch/riscv/kernel/vmlinux.lds.S
364 +@@ -22,6 +22,7 @@ SECTIONS
365 + /* Beginning of code and text segment */
366 + . = LOAD_OFFSET;
367 + _start = .;
368 ++ _stext = .;
369 + HEAD_TEXT_SECTION
370 + . = ALIGN(PAGE_SIZE);
371 +
372 +@@ -54,7 +55,6 @@ SECTIONS
373 + . = ALIGN(SECTION_ALIGN);
374 + .text : {
375 + _text = .;
376 +- _stext = .;
377 + TEXT_TEXT
378 + SCHED_TEXT
379 + CPUIDLE_TEXT
380 +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
381 +index 3cc15c0662983..2924f236d89c6 100644
382 +--- a/arch/s390/kernel/ptrace.c
383 ++++ b/arch/s390/kernel/ptrace.c
384 +@@ -1310,7 +1310,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
385 + cb->pc == 1 &&
386 + cb->qc == 0 &&
387 + cb->reserved2 == 0 &&
388 +- cb->key == PAGE_DEFAULT_KEY &&
389 + cb->reserved3 == 0 &&
390 + cb->reserved4 == 0 &&
391 + cb->reserved5 == 0 &&
392 +@@ -1374,7 +1373,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
393 + kfree(data);
394 + return -EINVAL;
395 + }
396 +-
397 ++ /*
398 ++ * Override access key in any case, since user space should
399 ++ * not be able to set it, nor should it care about it.
400 ++ */
401 ++ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
402 + preempt_disable();
403 + if (!target->thread.ri_cb)
404 + target->thread.ri_cb = data;
405 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
406 +index 125c7f6e87150..1788a5454b6fc 100644
407 +--- a/arch/s390/kernel/runtime_instr.c
408 ++++ b/arch/s390/kernel/runtime_instr.c
409 +@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
410 + cb->k = 1;
411 + cb->ps = 1;
412 + cb->pc = 1;
413 +- cb->key = PAGE_DEFAULT_KEY;
414 ++ cb->key = PAGE_DEFAULT_KEY >> 4;
415 + cb->v = 1;
416 + }
417 +
418 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
419 +index 3902c9f6f2d63..4b62d6b550246 100644
420 +--- a/arch/s390/pci/pci.c
421 ++++ b/arch/s390/pci/pci.c
422 +@@ -672,6 +672,19 @@ int zpci_disable_device(struct zpci_dev *zdev)
423 + }
424 + EXPORT_SYMBOL_GPL(zpci_disable_device);
425 +
426 ++void zpci_remove_device(struct zpci_dev *zdev)
427 ++{
428 ++ struct zpci_bus *zbus = zdev->zbus;
429 ++ struct pci_dev *pdev;
430 ++
431 ++ pdev = pci_get_slot(zbus->bus, zdev->devfn);
432 ++ if (pdev) {
433 ++ if (pdev->is_virtfn)
434 ++ return zpci_remove_virtfn(pdev, zdev->vfn);
435 ++ pci_stop_and_remove_bus_device_locked(pdev);
436 ++ }
437 ++}
438 ++
439 + int zpci_create_device(struct zpci_dev *zdev)
440 + {
441 + int rc;
442 +@@ -716,13 +729,8 @@ void zpci_release_device(struct kref *kref)
443 + {
444 + struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
445 +
446 +- if (zdev->zbus->bus) {
447 +- struct pci_dev *pdev;
448 +-
449 +- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
450 +- if (pdev)
451 +- pci_stop_and_remove_bus_device_locked(pdev);
452 +- }
453 ++ if (zdev->zbus->bus)
454 ++ zpci_remove_device(zdev);
455 +
456 + switch (zdev->state) {
457 + case ZPCI_FN_STATE_ONLINE:
458 +diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
459 +index 642a993846889..5967f30141563 100644
460 +--- a/arch/s390/pci/pci_bus.c
461 ++++ b/arch/s390/pci/pci_bus.c
462 +@@ -132,13 +132,14 @@ static int zpci_bus_link_virtfn(struct pci_dev *pdev,
463 + {
464 + int rc;
465 +
466 +- virtfn->physfn = pci_dev_get(pdev);
467 + rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
468 +- if (rc) {
469 +- pci_dev_put(pdev);
470 +- virtfn->physfn = NULL;
471 ++ if (rc)
472 + return rc;
473 +- }
474 ++
475 ++ virtfn->is_virtfn = 1;
476 ++ virtfn->multifunction = 0;
477 ++ virtfn->physfn = pci_dev_get(pdev);
478 ++
479 + return 0;
480 + }
481 +
482 +@@ -151,9 +152,9 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
483 + int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
484 + int rc = 0;
485 +
486 +- virtfn->is_virtfn = 1;
487 +- virtfn->multifunction = 0;
488 +- WARN_ON(vfid < 0);
489 ++ if (!zbus->multifunction)
490 ++ return 0;
491 ++
492 + /* If the parent PF for the given VF is also configured in the
493 + * instance, it must be on the same zbus.
494 + * We can then identify the parent PF by checking what
495 +@@ -165,11 +166,17 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
496 + zdev = zbus->function[i];
497 + if (zdev && zdev->is_physfn) {
498 + pdev = pci_get_slot(zbus->bus, zdev->devfn);
499 ++ if (!pdev)
500 ++ continue;
501 + cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
502 + if (cand_devfn == virtfn->devfn) {
503 + rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
504 ++ /* balance pci_get_slot() */
505 ++ pci_dev_put(pdev);
506 + break;
507 + }
508 ++ /* balance pci_get_slot() */
509 ++ pci_dev_put(pdev);
510 + }
511 + }
512 + return rc;
513 +@@ -178,12 +185,23 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
514 + static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
515 + struct pci_dev *virtfn, int vfn)
516 + {
517 +- virtfn->is_virtfn = 1;
518 +- virtfn->multifunction = 0;
519 + return 0;
520 + }
521 + #endif
522 +
523 ++void pcibios_bus_add_device(struct pci_dev *pdev)
524 ++{
525 ++ struct zpci_dev *zdev = to_zpci(pdev);
526 ++
527 ++ /*
528 ++ * With pdev->no_vf_scan the common PCI probing code does not
529 ++ * perform PF/VF linking.
530 ++ */
531 ++ if (zdev->vfn)
532 ++ zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
533 ++
534 ++}
535 ++
536 + static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
537 + {
538 + struct pci_bus *bus;
539 +@@ -214,20 +232,10 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
540 + }
541 +
542 + pdev = pci_scan_single_device(bus, zdev->devfn);
543 +- if (pdev) {
544 +- if (!zdev->is_physfn) {
545 +- rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
546 +- if (rc)
547 +- goto failed_with_pdev;
548 +- }
549 ++ if (pdev)
550 + pci_bus_add_device(pdev);
551 +- }
552 +- return 0;
553 +
554 +-failed_with_pdev:
555 +- pci_stop_and_remove_bus_device(pdev);
556 +- pci_dev_put(pdev);
557 +- return rc;
558 ++ return 0;
559 + }
560 +
561 + static void zpci_bus_add_devices(struct zpci_bus *zbus)
562 +diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
563 +index 89be3c354b7bc..4972433df4581 100644
564 +--- a/arch/s390/pci/pci_bus.h
565 ++++ b/arch/s390/pci/pci_bus.h
566 +@@ -29,3 +29,16 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
567 +
568 + return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
569 + }
570 ++
571 ++#ifdef CONFIG_PCI_IOV
572 ++static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
573 ++{
574 ++
575 ++ pci_lock_rescan_remove();
576 ++ /* Linux' vfid's start at 0 vfn at 1 */
577 ++ pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
578 ++ pci_unlock_rescan_remove();
579 ++}
580 ++#else /* CONFIG_PCI_IOV */
581 ++static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
582 ++#endif /* CONFIG_PCI_IOV */
583 +diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
584 +index fdebd286f4023..9a3a291cad432 100644
585 +--- a/arch/s390/pci/pci_event.c
586 ++++ b/arch/s390/pci/pci_event.c
587 +@@ -92,6 +92,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
588 + ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
589 + break;
590 + }
591 ++ /* the configuration request may be stale */
592 ++ if (zdev->state != ZPCI_FN_STATE_STANDBY)
593 ++ break;
594 + zdev->fh = ccdf->fh;
595 + zdev->state = ZPCI_FN_STATE_CONFIGURED;
596 + ret = zpci_enable_device(zdev);
597 +@@ -118,7 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
598 + if (!zdev)
599 + break;
600 + if (pdev)
601 +- pci_stop_and_remove_bus_device_locked(pdev);
602 ++ zpci_remove_device(zdev);
603 +
604 + ret = zpci_disable_device(zdev);
605 + if (ret)
606 +@@ -137,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
607 + /* Give the driver a hint that the function is
608 + * already unusable. */
609 + pdev->error_state = pci_channel_io_perm_failure;
610 +- pci_stop_and_remove_bus_device_locked(pdev);
611 ++ zpci_remove_device(zdev);
612 + }
613 +
614 + zdev->state = ZPCI_FN_STATE_STANDBY;
615 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
616 +index be5363b215409..c6908a3d551e1 100644
617 +--- a/arch/x86/include/asm/kvm_host.h
618 ++++ b/arch/x86/include/asm/kvm_host.h
619 +@@ -1641,7 +1641,8 @@ asmlinkage void kvm_spurious_fault(void);
620 + _ASM_EXTABLE(666b, 667b)
621 +
622 + #define KVM_ARCH_WANT_MMU_NOTIFIER
623 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
624 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
625 ++ unsigned flags);
626 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
627 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
628 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
629 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
630 +index 6d6a0ae7800c6..9516a958e7801 100644
631 +--- a/arch/x86/kvm/mmu/mmu.c
632 ++++ b/arch/x86/kvm/mmu/mmu.c
633 +@@ -1971,7 +1971,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
634 + return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
635 + }
636 +
637 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
638 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
639 ++ unsigned flags)
640 + {
641 + return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
642 + }
643 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
644 +index 4fe976c2495ea..f7304132d5907 100644
645 +--- a/arch/x86/kvm/x86.c
646 ++++ b/arch/x86/kvm/x86.c
647 +@@ -967,7 +967,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
648 + {
649 + unsigned long old_cr4 = kvm_read_cr4(vcpu);
650 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
651 +- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
652 ++ X86_CR4_SMEP;
653 +
654 + if (kvm_valid_cr4(vcpu, cr4))
655 + return 1;
656 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
657 +index e3f1ca3160684..db34fee931388 100644
658 +--- a/arch/x86/pci/xen.c
659 ++++ b/arch/x86/pci/xen.c
660 +@@ -26,6 +26,7 @@
661 + #include <asm/xen/pci.h>
662 + #include <asm/xen/cpuid.h>
663 + #include <asm/apic.h>
664 ++#include <asm/acpi.h>
665 + #include <asm/i8259.h>
666 +
667 + static int xen_pcifront_enable_irq(struct pci_dev *dev)
668 +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
669 +index 8e364c4c67683..7caa658373563 100644
670 +--- a/arch/x86/platform/efi/efi_64.c
671 ++++ b/arch/x86/platform/efi/efi_64.c
672 +@@ -268,6 +268,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
673 + npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
674 + rodata = __pa(__start_rodata);
675 + pfn = rodata >> PAGE_SHIFT;
676 ++
677 ++ pf = _PAGE_NX | _PAGE_ENC;
678 + if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
679 + pr_err("Failed to map kernel rodata 1:1\n");
680 + return 1;
681 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
682 +index 7e0f7880b21a6..c7540ad28995b 100644
683 +--- a/drivers/cpufreq/intel_pstate.c
684 ++++ b/drivers/cpufreq/intel_pstate.c
685 +@@ -1572,6 +1572,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
686 +
687 + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
688 + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
689 ++ cpu->pstate.turbo_pstate = phy_max;
690 + } else {
691 + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
692 + }
693 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
694 +index 5860ca41185cf..2acd9f9284a26 100644
695 +--- a/drivers/edac/i7core_edac.c
696 ++++ b/drivers/edac/i7core_edac.c
697 +@@ -1710,9 +1710,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
698 + if (uncorrected_error) {
699 + core_err_cnt = 1;
700 + if (ripv)
701 +- tp_event = HW_EVENT_ERR_FATAL;
702 +- else
703 + tp_event = HW_EVENT_ERR_UNCORRECTED;
704 ++ else
705 ++ tp_event = HW_EVENT_ERR_FATAL;
706 + } else {
707 + tp_event = HW_EVENT_ERR_CORRECTED;
708 + }
709 +diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
710 +index c1f2e6deb021a..4b44ea6b03adf 100644
711 +--- a/drivers/edac/pnd2_edac.c
712 ++++ b/drivers/edac/pnd2_edac.c
713 +@@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
714 + u32 optypenum = GET_BITFIELD(m->status, 4, 6);
715 + int rc;
716 +
717 +- tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
718 ++ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
719 + HW_EVENT_ERR_CORRECTED;
720 +
721 + /*
722 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
723 +index d414698ca3242..c5ab634cb6a49 100644
724 +--- a/drivers/edac/sb_edac.c
725 ++++ b/drivers/edac/sb_edac.c
726 +@@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
727 + if (uncorrected_error) {
728 + core_err_cnt = 1;
729 + if (ripv) {
730 +- tp_event = HW_EVENT_ERR_FATAL;
731 +- } else {
732 + tp_event = HW_EVENT_ERR_UNCORRECTED;
733 ++ } else {
734 ++ tp_event = HW_EVENT_ERR_FATAL;
735 + }
736 + } else {
737 + tp_event = HW_EVENT_ERR_CORRECTED;
738 +diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
739 +index 6d8d6dc626bfe..2b4ce8e5ac2fa 100644
740 +--- a/drivers/edac/skx_common.c
741 ++++ b/drivers/edac/skx_common.c
742 +@@ -493,9 +493,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
743 + if (uncorrected_error) {
744 + core_err_cnt = 1;
745 + if (ripv) {
746 +- tp_event = HW_EVENT_ERR_FATAL;
747 +- } else {
748 + tp_event = HW_EVENT_ERR_UNCORRECTED;
749 ++ } else {
750 ++ tp_event = HW_EVENT_ERR_FATAL;
751 + }
752 + } else {
753 + tp_event = HW_EVENT_ERR_CORRECTED;
754 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
755 +index fdd1db025dbfd..3aa07c3b51369 100644
756 +--- a/drivers/firmware/efi/efi.c
757 ++++ b/drivers/firmware/efi/efi.c
758 +@@ -381,6 +381,7 @@ static int __init efisubsys_init(void)
759 + efi_kobj = kobject_create_and_add("efi", firmware_kobj);
760 + if (!efi_kobj) {
761 + pr_err("efi: Firmware registration failed.\n");
762 ++ destroy_workqueue(efi_rts_wq);
763 + return -ENOMEM;
764 + }
765 +
766 +@@ -424,6 +425,7 @@ err_unregister:
767 + generic_ops_unregister();
768 + err_put:
769 + kobject_put(efi_kobj);
770 ++ destroy_workqueue(efi_rts_wq);
771 + return error;
772 + }
773 +
774 +diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
775 +index 6bca70bbb43d0..f735db55adc03 100644
776 +--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
777 ++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
778 +@@ -187,20 +187,28 @@ int efi_printk(const char *fmt, ...)
779 + */
780 + efi_status_t efi_parse_options(char const *cmdline)
781 + {
782 +- size_t len = strlen(cmdline) + 1;
783 ++ size_t len;
784 + efi_status_t status;
785 + char *str, *buf;
786 +
787 ++ if (!cmdline)
788 ++ return EFI_SUCCESS;
789 ++
790 ++ len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1;
791 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
792 + if (status != EFI_SUCCESS)
793 + return status;
794 +
795 +- str = skip_spaces(memcpy(buf, cmdline, len));
796 ++ memcpy(buf, cmdline, len - 1);
797 ++ buf[len - 1] = '\0';
798 ++ str = skip_spaces(buf);
799 +
800 + while (*str) {
801 + char *param, *val;
802 +
803 + str = next_arg(str, &param, &val);
804 ++ if (!val && !strcmp(param, "--"))
805 ++ break;
806 +
807 + if (!strcmp(param, "nokaslr")) {
808 + efi_nokaslr = true;
809 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
810 +index d399e58931705..74459927f97f7 100644
811 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
812 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
813 +@@ -465,7 +465,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
814 + unsigned int pages;
815 + int i, r;
816 +
817 +- *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
818 ++ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
819 + if (!*sgt)
820 + return -ENOMEM;
821 +
822 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
823 +index 195d621145ba5..0a39a8558b294 100644
824 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
825 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
826 +@@ -2184,6 +2184,7 @@ void amdgpu_dm_update_connector_after_detect(
827 +
828 + drm_connector_update_edid_property(connector,
829 + aconnector->edid);
830 ++ drm_add_edid_modes(connector, aconnector->edid);
831 +
832 + if (aconnector->dc_link->aux_mode)
833 + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
834 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
835 +index 841cc051b7d01..31aa31c280ee6 100644
836 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
837 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
838 +@@ -3265,12 +3265,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
839 + core_link_set_avmute(pipe_ctx, true);
840 + }
841 +
842 ++ dc->hwss.blank_stream(pipe_ctx);
843 + #if defined(CONFIG_DRM_AMD_DC_HDCP)
844 + update_psp_stream_config(pipe_ctx, true);
845 + #endif
846 +
847 +- dc->hwss.blank_stream(pipe_ctx);
848 +-
849 + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
850 + deallocate_mst_payload(pipe_ctx);
851 +
852 +@@ -3298,11 +3297,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
853 + write_i2c_redriver_setting(pipe_ctx, false);
854 + }
855 + }
856 +-
857 +- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
858 +-
859 + dc->hwss.disable_stream(pipe_ctx);
860 +
861 ++ disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
862 + if (pipe_ctx->stream->timing.flags.DSC) {
863 + if (dc_is_dp_signal(pipe_ctx->stream->signal))
864 + dp_set_dsc_enable(pipe_ctx, false);
865 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
866 +index 6124af571bff6..91cd884d6f257 100644
867 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
868 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
869 +@@ -1102,10 +1102,6 @@ static inline enum link_training_result perform_link_training_int(
870 + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
871 + dpcd_set_training_pattern(link, dpcd_pattern);
872 +
873 +- /* delay 5ms after notifying sink of idle pattern before switching output */
874 +- if (link->connector_signal != SIGNAL_TYPE_EDP)
875 +- msleep(5);
876 +-
877 + /* 4. mainlink output idle pattern*/
878 + dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
879 +
880 +@@ -1555,12 +1551,6 @@ bool perform_link_training_with_retries(
881 + struct dc_link *link = stream->link;
882 + enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
883 +
884 +- /* We need to do this before the link training to ensure the idle pattern in SST
885 +- * mode will be sent right after the link training
886 +- */
887 +- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
888 +- pipe_ctx->stream_res.stream_enc->id, true);
889 +-
890 + for (j = 0; j < attempts; ++j) {
891 +
892 + dp_enable_link_phy(
893 +@@ -1577,6 +1567,12 @@ bool perform_link_training_with_retries(
894 +
895 + dp_set_panel_mode(link, panel_mode);
896 +
897 ++ /* We need to do this before the link training to ensure the idle pattern in SST
898 ++ * mode will be sent right after the link training
899 ++ */
900 ++ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
901 ++ pipe_ctx->stream_res.stream_enc->id, true);
902 ++
903 + if (link->aux_access_disabled) {
904 + dc_link_dp_perform_link_training_skip_aux(link, link_setting);
905 + return true;
906 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
907 +index 70ec691e14d2d..99c68ca9c7e00 100644
908 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
909 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
910 +@@ -49,7 +49,7 @@
911 + #define DCN_PANEL_CNTL_REG_LIST()\
912 + DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
913 + DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
914 +- DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
915 ++ DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
916 + SR(BL_PWM_CNTL), \
917 + SR(BL_PWM_CNTL2), \
918 + SR(BL_PWM_PERIOD_CNTL), \
919 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
920 +index 2af1d74d16ad8..b77e9dc160863 100644
921 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
922 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
923 +@@ -1069,17 +1069,8 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
924 + link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
925 + }
926 +
927 +- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
928 ++ if (dc_is_dp_signal(pipe_ctx->stream->signal))
929 + pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
930 +-
931 +- /*
932 +- * After output is idle pattern some sinks need time to recognize the stream
933 +- * has changed or they enter protection state and hang.
934 +- */
935 +- if (!dc_is_embedded_signal(pipe_ctx->stream->signal))
936 +- msleep(60);
937 +- }
938 +-
939 + }
940 +
941 +
942 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
943 +index da5333d165ace..ec63cb8533607 100644
944 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
945 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
946 +@@ -1386,8 +1386,8 @@ static void dcn20_update_dchubp_dpp(
947 +
948 + /* Any updates are handled in dc interface, just need to apply existing for plane enable */
949 + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
950 +- pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
951 +- && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
952 ++ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
953 ++ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
954 + dc->hwss.set_cursor_position(pipe_ctx);
955 + dc->hwss.set_cursor_attribute(pipe_ctx);
956 +
957 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
958 +index cef1aa938ab54..2d9055eb3ce92 100644
959 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
960 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
961 +@@ -3097,7 +3097,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
962 + int vlevel = 0;
963 + int pipe_split_from[MAX_PIPES];
964 + int pipe_cnt = 0;
965 +- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
966 ++ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
967 + DC_LOGGER_INIT(dc->ctx->logger);
968 +
969 + BW_VAL_TRACE_COUNT();
970 +diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
971 +index 89ef9f6860e5b..16df2a485dd0d 100644
972 +--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
973 ++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
974 +@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
975 + */
976 + static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
977 + {
978 ++ if (arg1.value == 0)
979 ++ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
980 ++
981 + return dc_fixpt_exp(
982 + dc_fixpt_mul(
983 + dc_fixpt_log(arg1),
984 +diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
985 +index b7ba22dddcad9..83509106f3ba9 100644
986 +--- a/drivers/gpu/drm/ast/ast_drv.c
987 ++++ b/drivers/gpu/drm/ast/ast_drv.c
988 +@@ -59,7 +59,6 @@ static struct drm_driver driver;
989 + static const struct pci_device_id pciidlist[] = {
990 + AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
991 + AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
992 +- /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
993 + {0, 0, 0},
994 + };
995 +
996 +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
997 +index 656d591b154b3..09f2659e29118 100644
998 +--- a/drivers/gpu/drm/ast/ast_drv.h
999 ++++ b/drivers/gpu/drm/ast/ast_drv.h
1000 +@@ -52,7 +52,6 @@
1001 +
1002 + #define PCI_CHIP_AST2000 0x2000
1003 + #define PCI_CHIP_AST2100 0x2010
1004 +-#define PCI_CHIP_AST1180 0x1180
1005 +
1006 +
1007 + enum ast_chip {
1008 +@@ -64,7 +63,6 @@ enum ast_chip {
1009 + AST2300,
1010 + AST2400,
1011 + AST2500,
1012 +- AST1180,
1013 + };
1014 +
1015 + enum ast_tx_chip {
1016 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
1017 +index e5398e3dabe70..99c11b51f0207 100644
1018 +--- a/drivers/gpu/drm/ast/ast_main.c
1019 ++++ b/drivers/gpu/drm/ast/ast_main.c
1020 +@@ -142,50 +142,42 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1021 + ast_detect_config_mode(dev, &scu_rev);
1022 +
1023 + /* Identify chipset */
1024 +- if (dev->pdev->device == PCI_CHIP_AST1180) {
1025 +- ast->chip = AST1100;
1026 +- DRM_INFO("AST 1180 detected\n");
1027 +- } else {
1028 +- if (dev->pdev->revision >= 0x40) {
1029 +- ast->chip = AST2500;
1030 +- DRM_INFO("AST 2500 detected\n");
1031 +- } else if (dev->pdev->revision >= 0x30) {
1032 +- ast->chip = AST2400;
1033 +- DRM_INFO("AST 2400 detected\n");
1034 +- } else if (dev->pdev->revision >= 0x20) {
1035 +- ast->chip = AST2300;
1036 +- DRM_INFO("AST 2300 detected\n");
1037 +- } else if (dev->pdev->revision >= 0x10) {
1038 +- switch (scu_rev & 0x0300) {
1039 +- case 0x0200:
1040 +- ast->chip = AST1100;
1041 +- DRM_INFO("AST 1100 detected\n");
1042 +- break;
1043 +- case 0x0100:
1044 +- ast->chip = AST2200;
1045 +- DRM_INFO("AST 2200 detected\n");
1046 +- break;
1047 +- case 0x0000:
1048 +- ast->chip = AST2150;
1049 +- DRM_INFO("AST 2150 detected\n");
1050 +- break;
1051 +- default:
1052 +- ast->chip = AST2100;
1053 +- DRM_INFO("AST 2100 detected\n");
1054 +- break;
1055 +- }
1056 +- ast->vga2_clone = false;
1057 +- } else {
1058 +- ast->chip = AST2000;
1059 +- DRM_INFO("AST 2000 detected\n");
1060 ++ if (dev->pdev->revision >= 0x40) {
1061 ++ ast->chip = AST2500;
1062 ++ DRM_INFO("AST 2500 detected\n");
1063 ++ } else if (dev->pdev->revision >= 0x30) {
1064 ++ ast->chip = AST2400;
1065 ++ DRM_INFO("AST 2400 detected\n");
1066 ++ } else if (dev->pdev->revision >= 0x20) {
1067 ++ ast->chip = AST2300;
1068 ++ DRM_INFO("AST 2300 detected\n");
1069 ++ } else if (dev->pdev->revision >= 0x10) {
1070 ++ switch (scu_rev & 0x0300) {
1071 ++ case 0x0200:
1072 ++ ast->chip = AST1100;
1073 ++ DRM_INFO("AST 1100 detected\n");
1074 ++ break;
1075 ++ case 0x0100:
1076 ++ ast->chip = AST2200;
1077 ++ DRM_INFO("AST 2200 detected\n");
1078 ++ break;
1079 ++ case 0x0000:
1080 ++ ast->chip = AST2150;
1081 ++ DRM_INFO("AST 2150 detected\n");
1082 ++ break;
1083 ++ default:
1084 ++ ast->chip = AST2100;
1085 ++ DRM_INFO("AST 2100 detected\n");
1086 ++ break;
1087 + }
1088 ++ ast->vga2_clone = false;
1089 ++ } else {
1090 ++ ast->chip = AST2000;
1091 ++ DRM_INFO("AST 2000 detected\n");
1092 + }
1093 +
1094 + /* Check if we support wide screen */
1095 + switch (ast->chip) {
1096 +- case AST1180:
1097 +- ast->support_wide_screen = true;
1098 +- break;
1099 + case AST2000:
1100 + ast->support_wide_screen = false;
1101 + break;
1102 +@@ -466,19 +458,17 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
1103 +
1104 + ast_detect_chip(dev, &need_post);
1105 +
1106 ++ ret = ast_get_dram_info(dev);
1107 ++ if (ret)
1108 ++ goto out_free;
1109 ++ ast->vram_size = ast_get_vram_info(dev);
1110 ++ DRM_INFO("dram MCLK=%u Mhz type=%d bus_width=%d size=%08x\n",
1111 ++ ast->mclk, ast->dram_type,
1112 ++ ast->dram_bus_width, ast->vram_size);
1113 ++
1114 + if (need_post)
1115 + ast_post_gpu(dev);
1116 +
1117 +- if (ast->chip != AST1180) {
1118 +- ret = ast_get_dram_info(dev);
1119 +- if (ret)
1120 +- goto out_free;
1121 +- ast->vram_size = ast_get_vram_info(dev);
1122 +- DRM_INFO("dram MCLK=%u Mhz type=%d bus_width=%d size=%08x\n",
1123 +- ast->mclk, ast->dram_type,
1124 +- ast->dram_bus_width, ast->vram_size);
1125 +- }
1126 +-
1127 + ret = ast_mm_init(ast);
1128 + if (ret)
1129 + goto out_free;
1130 +@@ -496,8 +486,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
1131 + ast->chip == AST2200 ||
1132 + ast->chip == AST2300 ||
1133 + ast->chip == AST2400 ||
1134 +- ast->chip == AST2500 ||
1135 +- ast->chip == AST1180) {
1136 ++ ast->chip == AST2500) {
1137 + dev->mode_config.max_width = 1920;
1138 + dev->mode_config.max_height = 2048;
1139 + } else {
1140 +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
1141 +index 3a3a511670c9c..73fd76cec5120 100644
1142 +--- a/drivers/gpu/drm/ast/ast_mode.c
1143 ++++ b/drivers/gpu/drm/ast/ast_mode.c
1144 +@@ -769,9 +769,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
1145 + {
1146 + struct ast_private *ast = crtc->dev->dev_private;
1147 +
1148 +- if (ast->chip == AST1180)
1149 +- return;
1150 +-
1151 + /* TODO: Maybe control display signal generation with
1152 + * Sync Enable (bit CR17.7).
1153 + */
1154 +@@ -793,16 +790,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
1155 + static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
1156 + struct drm_crtc_state *state)
1157 + {
1158 +- struct ast_private *ast = crtc->dev->dev_private;
1159 + struct ast_crtc_state *ast_state;
1160 + const struct drm_format_info *format;
1161 + bool succ;
1162 +
1163 +- if (ast->chip == AST1180) {
1164 +- DRM_ERROR("AST 1180 modesetting not supported\n");
1165 +- return -EINVAL;
1166 +- }
1167 +-
1168 + if (!state->enable)
1169 + return 0; /* no mode checks if CRTC is being disabled */
1170 +
1171 +@@ -1044,7 +1035,7 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
1172 +
1173 + if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
1174 + (ast->chip == AST2300) || (ast->chip == AST2400) ||
1175 +- (ast->chip == AST2500) || (ast->chip == AST1180)) {
1176 ++ (ast->chip == AST2500)) {
1177 + if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
1178 + return MODE_OK;
1179 +
1180 +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
1181 +index 2d1b186197432..af0c8ebb009a1 100644
1182 +--- a/drivers/gpu/drm/ast/ast_post.c
1183 ++++ b/drivers/gpu/drm/ast/ast_post.c
1184 +@@ -58,13 +58,9 @@ bool ast_is_vga_enabled(struct drm_device *dev)
1185 + struct ast_private *ast = dev->dev_private;
1186 + u8 ch;
1187 +
1188 +- if (ast->chip == AST1180) {
1189 +- /* TODO 1180 */
1190 +- } else {
1191 +- ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
1192 +- return !!(ch & 0x01);
1193 +- }
1194 +- return false;
1195 ++ ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
1196 ++
1197 ++ return !!(ch & 0x01);
1198 + }
1199 +
1200 + static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
1201 +diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
1202 +index 962ded9ce73fd..9792220ddbe2e 100644
1203 +--- a/drivers/gpu/drm/i915/i915_pmu.c
1204 ++++ b/drivers/gpu/drm/i915/i915_pmu.c
1205 +@@ -441,8 +441,10 @@ static u64 count_interrupts(struct drm_i915_private *i915)
1206 +
1207 + static void i915_pmu_event_destroy(struct perf_event *event)
1208 + {
1209 +- WARN_ON(event->parent);
1210 +- module_put(THIS_MODULE);
1211 ++ struct drm_i915_private *i915 =
1212 ++ container_of(event->pmu, typeof(*i915), pmu.base);
1213 ++
1214 ++ drm_WARN_ON(&i915->drm, event->parent);
1215 + }
1216 +
1217 + static int
1218 +@@ -534,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event)
1219 + if (ret)
1220 + return ret;
1221 +
1222 +- if (!event->parent) {
1223 +- __module_get(THIS_MODULE);
1224 ++ if (!event->parent)
1225 + event->destroy = i915_pmu_event_destroy;
1226 +- }
1227 +
1228 + return 0;
1229 + }
1230 +@@ -1058,8 +1058,10 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1231 +
1232 + static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1233 + {
1234 +- WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
1235 +- WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
1236 ++ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
1237 ++
1238 ++ drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
1239 ++ drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
1240 + cpuhp_remove_multi_state(pmu->cpuhp.slot);
1241 + pmu->cpuhp.slot = CPUHP_INVALID;
1242 + }
1243 +@@ -1121,6 +1123,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
1244 + if (!pmu->base.attr_groups)
1245 + goto err_attr;
1246 +
1247 ++ pmu->base.module = THIS_MODULE;
1248 + pmu->base.task_ctx_nr = perf_invalid_context;
1249 + pmu->base.event_init = i915_pmu_event_init;
1250 + pmu->base.add = i915_pmu_event_add;
1251 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
1252 +index 444b77490a42a..7debf2ca42522 100644
1253 +--- a/drivers/gpu/drm/panel/panel-simple.c
1254 ++++ b/drivers/gpu/drm/panel/panel-simple.c
1255 +@@ -1717,7 +1717,7 @@ static const struct drm_display_mode frida_frd350h54004_mode = {
1256 + .vsync_end = 240 + 2 + 6,
1257 + .vtotal = 240 + 2 + 6 + 2,
1258 + .vrefresh = 60,
1259 +- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
1260 ++ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
1261 + };
1262 +
1263 + static const struct panel_desc frida_frd350h54004 = {
1264 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
1265 +index fa03fab02076d..33526c5df0e8c 100644
1266 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
1267 ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
1268 +@@ -505,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
1269 + int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
1270 + void *buf, int len, int write)
1271 + {
1272 +- unsigned long offset = (addr) - vma->vm_start;
1273 + struct ttm_buffer_object *bo = vma->vm_private_data;
1274 ++ unsigned long offset = (addr) - vma->vm_start +
1275 ++ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
1276 ++ << PAGE_SHIFT);
1277 + int ret;
1278 +
1279 + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
1280 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
1281 +index ec1a8ebb6f1bf..fa39d140adc6c 100644
1282 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
1283 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
1284 +@@ -230,32 +230,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
1285 + return 0;
1286 + }
1287 +
1288 +-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
1289 +- uint32_t handle, uint64_t *offset)
1290 +-{
1291 +- struct drm_gem_object *obj;
1292 +- int ret;
1293 +-
1294 +- obj = drm_gem_object_lookup(file, handle);
1295 +- if (!obj)
1296 +- return -ENOENT;
1297 +-
1298 +- if (!obj->filp) {
1299 +- ret = -EINVAL;
1300 +- goto unref;
1301 +- }
1302 +-
1303 +- ret = drm_gem_create_mmap_offset(obj);
1304 +- if (ret)
1305 +- goto unref;
1306 +-
1307 +- *offset = drm_vma_node_offset_addr(&obj->vma_node);
1308 +-unref:
1309 +- drm_gem_object_put_unlocked(obj);
1310 +-
1311 +- return ret;
1312 +-}
1313 +-
1314 + static struct drm_ioctl_desc vgem_ioctls[] = {
1315 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
1316 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
1317 +@@ -446,7 +420,6 @@ static struct drm_driver vgem_driver = {
1318 + .fops = &vgem_driver_fops,
1319 +
1320 + .dumb_create = vgem_gem_dumb_create,
1321 +- .dumb_map_offset = vgem_gem_dumb_map,
1322 +
1323 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1324 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1325 +diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
1326 +index 5df722072ba0b..19c5bc01eb790 100644
1327 +--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
1328 ++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
1329 +@@ -179,6 +179,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
1330 +
1331 + virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
1332 + vfpriv->ctx_id, buflist, out_fence);
1333 ++ dma_fence_put(&out_fence->f);
1334 + virtio_gpu_notify(vgdev);
1335 + return 0;
1336 +
1337 +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
1338 +index b12fbc857f942..5c41e13496a02 100644
1339 +--- a/drivers/infiniband/hw/bnxt_re/main.c
1340 ++++ b/drivers/infiniband/hw/bnxt_re/main.c
1341 +@@ -811,7 +811,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
1342 + struct ib_event event;
1343 + unsigned int flags;
1344 +
1345 +- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1346 ++ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
1347 ++ rdma_is_kernel_res(&qp->ib_qp.res)) {
1348 + flags = bnxt_re_lock_cqs(qp);
1349 + bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1350 + bnxt_re_unlock_cqs(qp, flags);
1351 +diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
1352 +index facff133139a9..3ba299cfd0b51 100644
1353 +--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
1354 ++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
1355 +@@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
1356 + case IB_WR_ATOMIC_CMP_AND_SWP:
1357 + case IB_WR_ATOMIC_FETCH_AND_ADD:
1358 + case IB_WR_RDMA_WRITE:
1359 ++ case IB_WR_RDMA_WRITE_WITH_IMM:
1360 + switch (prev->wr.opcode) {
1361 + case IB_WR_TID_RDMA_WRITE:
1362 + req = wqe_to_tid_req(prev);
1363 +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
1364 +index 479fa557993e7..c69453a62767c 100644
1365 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h
1366 ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
1367 +@@ -66,8 +66,6 @@
1368 + #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
1369 + #define HNS_ROCE_MIN_CQE_CNT 16
1370 +
1371 +-#define HNS_ROCE_RESERVED_SGE 1
1372 +-
1373 + #define HNS_ROCE_MAX_IRQ_NUM 128
1374 +
1375 + #define HNS_ROCE_SGE_IN_WQE 2
1376 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1377 +index eb71b941d21b7..38a48ab3e1d02 100644
1378 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1379 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1380 +@@ -629,7 +629,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1381 +
1382 + wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
1383 +
1384 +- if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
1385 ++ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
1386 + ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
1387 + wr->num_sge, hr_qp->rq.max_gs);
1388 + ret = -EINVAL;
1389 +@@ -649,7 +649,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1390 + if (wr->num_sge < hr_qp->rq.max_gs) {
1391 + dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
1392 + dseg->addr = 0;
1393 +- dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
1394 + }
1395 +
1396 + /* rq support inline data */
1397 +@@ -783,8 +782,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
1398 + }
1399 +
1400 + if (wr->num_sge < srq->max_gs) {
1401 +- dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
1402 +- dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
1403 ++ dseg[i].len = 0;
1404 ++ dseg[i].lkey = cpu_to_le32(0x100);
1405 + dseg[i].addr = 0;
1406 + }
1407 +
1408 +@@ -5098,7 +5097,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
1409 +
1410 + attr->srq_limit = limit_wl;
1411 + attr->max_wr = srq->wqe_cnt - 1;
1412 +- attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
1413 ++ attr->max_sge = srq->max_gs;
1414 +
1415 + out:
1416 + hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1417 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1418 +index e6c385ced1872..4f840997c6c73 100644
1419 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1420 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1421 +@@ -92,9 +92,7 @@
1422 + #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
1423 + #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
1424 + #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
1425 +-#define HNS_ROCE_INVALID_LKEY 0x0
1426 +-#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
1427 +-
1428 ++#define HNS_ROCE_INVALID_LKEY 0x100
1429 + #define HNS_ROCE_CMQ_TX_TIMEOUT 30000
1430 + #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
1431 + #define HNS_ROCE_V2_RSV_QPS 8
1432 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
1433 +index a0a47bd669759..4edea397b6b80 100644
1434 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
1435 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
1436 +@@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
1437 + return -EINVAL;
1438 + }
1439 +
1440 +- hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
1441 +- HNS_ROCE_RESERVED_SGE);
1442 ++ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
1443 +
1444 + if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
1445 + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
1446 +@@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
1447 + hr_qp->rq_inl_buf.wqe_cnt = 0;
1448 +
1449 + cap->max_recv_wr = cnt;
1450 +- cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
1451 ++ cap->max_recv_sge = hr_qp->rq.max_gs;
1452 +
1453 + return 0;
1454 + }
1455 +diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
1456 +index f40a000e94ee7..b9e2dbd372b66 100644
1457 +--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
1458 ++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
1459 +@@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
1460 + spin_lock_init(&srq->lock);
1461 +
1462 + srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
1463 +- srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
1464 ++ srq->max_gs = init_attr->attr.max_sge;
1465 +
1466 + if (udata) {
1467 + ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1468 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
1469 +index 527ae0b9a191e..0b4a3039f312f 100644
1470 +--- a/drivers/input/mouse/psmouse-base.c
1471 ++++ b/drivers/input/mouse/psmouse-base.c
1472 +@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
1473 + {
1474 + int type = *((unsigned int *)kp->arg);
1475 +
1476 +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
1477 ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
1478 + }
1479 +
1480 + static int __init psmouse_init(void)
1481 +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
1482 +index fadbdeeb44955..293867b9e7961 100644
1483 +--- a/drivers/media/pci/ttpci/budget-core.c
1484 ++++ b/drivers/media/pci/ttpci/budget-core.c
1485 +@@ -369,20 +369,25 @@ static int budget_register(struct budget *budget)
1486 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
1487 +
1488 + if (ret < 0)
1489 +- return ret;
1490 ++ goto err_release_dmx;
1491 +
1492 + budget->mem_frontend.source = DMX_MEMORY_FE;
1493 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
1494 + if (ret < 0)
1495 +- return ret;
1496 ++ goto err_release_dmx;
1497 +
1498 + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
1499 + if (ret < 0)
1500 +- return ret;
1501 ++ goto err_release_dmx;
1502 +
1503 + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
1504 +
1505 + return 0;
1506 ++
1507 ++err_release_dmx:
1508 ++ dvb_dmxdev_release(&budget->dmxdev);
1509 ++ dvb_dmx_release(&budget->demux);
1510 ++ return ret;
1511 + }
1512 +
1513 + static void budget_unregister(struct budget *budget)
1514 +diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
1515 +index 00d19859db500..b11cfbe166dd3 100644
1516 +--- a/drivers/media/platform/coda/coda-jpeg.c
1517 ++++ b/drivers/media/platform/coda/coda-jpeg.c
1518 +@@ -327,8 +327,11 @@ int coda_jpeg_decode_header(struct coda_ctx *ctx, struct vb2_buffer *vb)
1519 + "only 8-bit quantization tables supported\n");
1520 + continue;
1521 + }
1522 +- if (!ctx->params.jpeg_qmat_tab[i])
1523 ++ if (!ctx->params.jpeg_qmat_tab[i]) {
1524 + ctx->params.jpeg_qmat_tab[i] = kmalloc(64, GFP_KERNEL);
1525 ++ if (!ctx->params.jpeg_qmat_tab[i])
1526 ++ return -ENOMEM;
1527 ++ }
1528 + memcpy(ctx->params.jpeg_qmat_tab[i],
1529 + quantization_tables[i].start, 64);
1530 + }
1531 +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
1532 +index d38d2bbb6f0f8..7000f0bf0b353 100644
1533 +--- a/drivers/media/platform/davinci/vpss.c
1534 ++++ b/drivers/media/platform/davinci/vpss.c
1535 +@@ -505,19 +505,31 @@ static void vpss_exit(void)
1536 +
1537 + static int __init vpss_init(void)
1538 + {
1539 ++ int ret;
1540 ++
1541 + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
1542 + return -EBUSY;
1543 +
1544 + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
1545 + if (unlikely(!oper_cfg.vpss_regs_base2)) {
1546 +- release_mem_region(VPSS_CLK_CTRL, 4);
1547 +- return -ENOMEM;
1548 ++ ret = -ENOMEM;
1549 ++ goto err_ioremap;
1550 + }
1551 +
1552 + writel(VPSS_CLK_CTRL_VENCCLKEN |
1553 +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
1554 ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
1555 ++
1556 ++ ret = platform_driver_register(&vpss_driver);
1557 ++ if (ret)
1558 ++ goto err_pd_register;
1559 ++
1560 ++ return 0;
1561 +
1562 +- return platform_driver_register(&vpss_driver);
1563 ++err_pd_register:
1564 ++ iounmap(oper_cfg.vpss_regs_base2);
1565 ++err_ioremap:
1566 ++ release_mem_region(VPSS_CLK_CTRL, 4);
1567 ++ return ret;
1568 + }
1569 + subsys_initcall(vpss_init);
1570 + module_exit(vpss_exit);
1571 +diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
1572 +index 3fdc9f964a3c6..2483641799dfb 100644
1573 +--- a/drivers/media/platform/qcom/camss/camss.c
1574 ++++ b/drivers/media/platform/qcom/camss/camss.c
1575 +@@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss)
1576 + return num_subdevs;
1577 +
1578 + err_cleanup:
1579 +- v4l2_async_notifier_cleanup(&camss->notifier);
1580 + of_node_put(node);
1581 + return ret;
1582 + }
1583 +@@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev)
1584 + camss->csid_num = 4;
1585 + camss->vfe_num = 2;
1586 + } else {
1587 +- return -EINVAL;
1588 ++ ret = -EINVAL;
1589 ++ goto err_free;
1590 + }
1591 +
1592 + camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
1593 + sizeof(*camss->csiphy), GFP_KERNEL);
1594 +- if (!camss->csiphy)
1595 +- return -ENOMEM;
1596 ++ if (!camss->csiphy) {
1597 ++ ret = -ENOMEM;
1598 ++ goto err_free;
1599 ++ }
1600 +
1601 + camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
1602 + GFP_KERNEL);
1603 +- if (!camss->csid)
1604 +- return -ENOMEM;
1605 ++ if (!camss->csid) {
1606 ++ ret = -ENOMEM;
1607 ++ goto err_free;
1608 ++ }
1609 +
1610 + camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
1611 + GFP_KERNEL);
1612 +- if (!camss->vfe)
1613 +- return -ENOMEM;
1614 ++ if (!camss->vfe) {
1615 ++ ret = -ENOMEM;
1616 ++ goto err_free;
1617 ++ }
1618 +
1619 + v4l2_async_notifier_init(&camss->notifier);
1620 +
1621 + num_subdevs = camss_of_parse_ports(camss);
1622 +- if (num_subdevs < 0)
1623 +- return num_subdevs;
1624 ++ if (num_subdevs < 0) {
1625 ++ ret = num_subdevs;
1626 ++ goto err_cleanup;
1627 ++ }
1628 +
1629 + ret = camss_init_subdevices(camss);
1630 + if (ret < 0)
1631 +@@ -936,6 +944,8 @@ err_register_entities:
1632 + v4l2_device_unregister(&camss->v4l2_dev);
1633 + err_cleanup:
1634 + v4l2_async_notifier_cleanup(&camss->notifier);
1635 ++err_free:
1636 ++ kfree(camss);
1637 +
1638 + return ret;
1639 + }
1640 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1641 +index f88cb097b022a..500aa3e19a4c7 100644
1642 +--- a/drivers/net/bonding/bond_main.c
1643 ++++ b/drivers/net/bonding/bond_main.c
1644 +@@ -2084,7 +2084,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1645 + int ret;
1646 +
1647 + ret = __bond_release_one(bond_dev, slave_dev, false, true);
1648 +- if (ret == 0 && !bond_has_slaves(bond)) {
1649 ++ if (ret == 0 && !bond_has_slaves(bond) &&
1650 ++ bond_dev->reg_state != NETREG_UNREGISTERING) {
1651 + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1652 + netdev_info(bond_dev, "Destroying bond\n");
1653 + bond_remove_proc_entry(bond);
1654 +@@ -2824,6 +2825,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
1655 + if (bond_time_in_interval(bond, last_rx, 1)) {
1656 + bond_propose_link_state(slave, BOND_LINK_UP);
1657 + commit++;
1658 ++ } else if (slave->link == BOND_LINK_BACK) {
1659 ++ bond_propose_link_state(slave, BOND_LINK_FAIL);
1660 ++ commit++;
1661 + }
1662 + continue;
1663 + }
1664 +@@ -2932,6 +2936,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
1665 +
1666 + continue;
1667 +
1668 ++ case BOND_LINK_FAIL:
1669 ++ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
1670 ++ BOND_SLAVE_NOTIFY_NOW);
1671 ++ bond_set_slave_inactive_flags(slave,
1672 ++ BOND_SLAVE_NOTIFY_NOW);
1673 ++
1674 ++ /* A slave has just been enslaved and has become
1675 ++ * the current active slave.
1676 ++ */
1677 ++ if (rtnl_dereference(bond->curr_active_slave))
1678 ++ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
1679 ++ continue;
1680 ++
1681 + default:
1682 + slave_err(bond->dev, slave->dev,
1683 + "impossible: link_new_state %d on slave\n",
1684 +@@ -2982,8 +2999,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
1685 + return should_notify_rtnl;
1686 + }
1687 +
1688 +- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
1689 +-
1690 + bond_for_each_slave_rcu(bond, slave, iter) {
1691 + if (!found && !before && bond_slave_is_up(slave))
1692 + before = slave;
1693 +@@ -4431,13 +4446,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
1694 + return ret;
1695 + }
1696 +
1697 ++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
1698 ++{
1699 ++ if (speed == 0 || speed == SPEED_UNKNOWN)
1700 ++ speed = slave->speed;
1701 ++ else
1702 ++ speed = min(speed, slave->speed);
1703 ++
1704 ++ return speed;
1705 ++}
1706 ++
1707 + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
1708 + struct ethtool_link_ksettings *cmd)
1709 + {
1710 + struct bonding *bond = netdev_priv(bond_dev);
1711 +- unsigned long speed = 0;
1712 + struct list_head *iter;
1713 + struct slave *slave;
1714 ++ u32 speed = 0;
1715 +
1716 + cmd->base.duplex = DUPLEX_UNKNOWN;
1717 + cmd->base.port = PORT_OTHER;
1718 +@@ -4449,8 +4474,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
1719 + */
1720 + bond_for_each_slave(bond, slave, iter) {
1721 + if (bond_slave_can_tx(slave)) {
1722 +- if (slave->speed != SPEED_UNKNOWN)
1723 +- speed += slave->speed;
1724 ++ if (slave->speed != SPEED_UNKNOWN) {
1725 ++ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
1726 ++ speed = bond_mode_bcast_speed(slave,
1727 ++ speed);
1728 ++ else
1729 ++ speed += slave->speed;
1730 ++ }
1731 + if (cmd->base.duplex == DUPLEX_UNKNOWN &&
1732 + slave->duplex != DUPLEX_UNKNOWN)
1733 + cmd->base.duplex = slave->duplex;
1734 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1735 +index 1df05841ab6b1..86869337223a8 100644
1736 +--- a/drivers/net/dsa/b53/b53_common.c
1737 ++++ b/drivers/net/dsa/b53/b53_common.c
1738 +@@ -1555,6 +1555,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
1739 + return ret;
1740 +
1741 + switch (ret) {
1742 ++ case -ETIMEDOUT:
1743 ++ return ret;
1744 + case -ENOSPC:
1745 + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
1746 + addr, vid);
1747 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1748 +index dda4b8fc9525e..000f57198352d 100644
1749 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1750 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1751 +@@ -2177,13 +2177,10 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
1752 + int i;
1753 +
1754 + for (i = first_index; i < first_index + count; i++) {
1755 +- /* Check if napi was initialized before */
1756 +- if (!ENA_IS_XDP_INDEX(adapter, i) ||
1757 +- adapter->ena_napi[i].xdp_ring)
1758 +- netif_napi_del(&adapter->ena_napi[i].napi);
1759 +- else
1760 +- WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
1761 +- adapter->ena_napi[i].xdp_ring);
1762 ++ netif_napi_del(&adapter->ena_napi[i].napi);
1763 ++
1764 ++ WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
1765 ++ adapter->ena_napi[i].xdp_ring);
1766 + }
1767 + }
1768 +
1769 +@@ -3523,16 +3520,14 @@ static void ena_fw_reset_device(struct work_struct *work)
1770 + {
1771 + struct ena_adapter *adapter =
1772 + container_of(work, struct ena_adapter, reset_task);
1773 +- struct pci_dev *pdev = adapter->pdev;
1774 +
1775 +- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1776 +- dev_err(&pdev->dev,
1777 +- "device reset schedule while reset bit is off\n");
1778 +- return;
1779 +- }
1780 + rtnl_lock();
1781 +- ena_destroy_device(adapter, false);
1782 +- ena_restore_device(adapter);
1783 ++
1784 ++ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1785 ++ ena_destroy_device(adapter, false);
1786 ++ ena_restore_device(adapter);
1787 ++ }
1788 ++
1789 + rtnl_unlock();
1790 + }
1791 +
1792 +@@ -4366,8 +4361,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
1793 + netdev->rx_cpu_rmap = NULL;
1794 + }
1795 + #endif /* CONFIG_RFS_ACCEL */
1796 +- del_timer_sync(&adapter->timer_service);
1797 +
1798 ++ /* Make sure timer and reset routine won't be called after
1799 ++ * freeing device resources.
1800 ++ */
1801 ++ del_timer_sync(&adapter->timer_service);
1802 + cancel_work_sync(&adapter->reset_task);
1803 +
1804 + rtnl_lock(); /* lock released inside the below if-else block */
1805 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
1806 +index 66e67b24a887c..62e271aea4a50 100644
1807 +--- a/drivers/net/ethernet/cortina/gemini.c
1808 ++++ b/drivers/net/ethernet/cortina/gemini.c
1809 +@@ -2389,7 +2389,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1810 +
1811 + dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
1812 +
1813 +- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
1814 ++ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
1815 + if (!netdev) {
1816 + dev_err(dev, "Can't allocate ethernet device #%d\n", id);
1817 + return -ENOMEM;
1818 +@@ -2521,7 +2521,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1819 + }
1820 +
1821 + port->netdev = NULL;
1822 +- free_netdev(netdev);
1823 + return ret;
1824 + }
1825 +
1826 +@@ -2530,7 +2529,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
1827 + struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
1828 +
1829 + gemini_port_remove(port);
1830 +- free_netdev(port->netdev);
1831 + return 0;
1832 + }
1833 +
1834 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1835 +index cc7fbfc093548..534fcc71a2a53 100644
1836 +--- a/drivers/net/ethernet/freescale/fec_main.c
1837 ++++ b/drivers/net/ethernet/freescale/fec_main.c
1838 +@@ -3714,11 +3714,11 @@ failed_mii_init:
1839 + failed_irq:
1840 + failed_init:
1841 + fec_ptp_stop(pdev);
1842 +- if (fep->reg_phy)
1843 +- regulator_disable(fep->reg_phy);
1844 + failed_reset:
1845 + pm_runtime_put_noidle(&pdev->dev);
1846 + pm_runtime_disable(&pdev->dev);
1847 ++ if (fep->reg_phy)
1848 ++ regulator_disable(fep->reg_phy);
1849 + failed_regulator:
1850 + clk_disable_unprepare(fep->clk_ahb);
1851 + failed_clk_ahb:
1852 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1853 +index aa5f1c0aa7215..0921785a10795 100644
1854 +--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1855 ++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1856 +@@ -1211,7 +1211,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
1857 + #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
1858 + #define I40E_AQC_SET_VSI_DEFAULT 0x08
1859 + #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1860 +-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
1861 ++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
1862 + __le16 seid;
1863 + #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1864 + __le16 vlan_tag;
1865 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
1866 +index 45b90eb11adba..21e44c6cd5eac 100644
1867 +--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
1868 ++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
1869 +@@ -1969,6 +1969,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1870 + return status;
1871 + }
1872 +
1873 ++/**
1874 ++ * i40e_is_aq_api_ver_ge
1875 ++ * @aq: pointer to AdminQ info containing HW API version to compare
1876 ++ * @maj: API major value
1877 ++ * @min: API minor value
1878 ++ *
1879 ++ * Assert whether current HW API version is greater/equal than provided.
1880 ++ **/
1881 ++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1882 ++ u16 min)
1883 ++{
1884 ++ return (aq->api_maj_ver > maj ||
1885 ++ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1886 ++}
1887 ++
1888 + /**
1889 + * i40e_aq_add_vsi
1890 + * @hw: pointer to the hw struct
1891 +@@ -2094,18 +2109,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1892 +
1893 + if (set) {
1894 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1895 +- if (rx_only_promisc &&
1896 +- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
1897 +- (hw->aq.api_maj_ver > 1)))
1898 +- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
1899 ++ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1900 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1901 + }
1902 +
1903 + cmd->promiscuous_flags = cpu_to_le16(flags);
1904 +
1905 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1906 +- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
1907 +- (hw->aq.api_maj_ver > 1))
1908 +- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
1909 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1910 ++ cmd->valid_flags |=
1911 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1912 +
1913 + cmd->seid = cpu_to_le16(seid);
1914 + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1915 +@@ -2202,11 +2215,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1916 + i40e_fill_default_direct_cmd_desc(&desc,
1917 + i40e_aqc_opc_set_vsi_promiscuous_modes);
1918 +
1919 +- if (enable)
1920 ++ if (enable) {
1921 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1922 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1923 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1924 ++ }
1925 +
1926 + cmd->promiscuous_flags = cpu_to_le16(flags);
1927 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1928 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1929 ++ cmd->valid_flags |=
1930 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1931 + cmd->seid = cpu_to_le16(seid);
1932 + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1933 +
1934 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1935 +index 56ecd6c3f2362..6af6367e7cac2 100644
1936 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1937 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1938 +@@ -15352,6 +15352,9 @@ static void i40e_remove(struct pci_dev *pdev)
1939 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
1940 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
1941 +
1942 ++ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1943 ++ usleep_range(1000, 2000);
1944 ++
1945 + /* no more scheduling of any task */
1946 + set_bit(__I40E_SUSPENDED, pf->state);
1947 + set_bit(__I40E_DOWN, pf->state);
1948 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
1949 +index 6919c50e449a2..63259ecd41e5b 100644
1950 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
1951 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
1952 +@@ -5158,6 +5158,8 @@ static int igc_probe(struct pci_dev *pdev,
1953 + device_set_wakeup_enable(&adapter->pdev->dev,
1954 + adapter->flags & IGC_FLAG_WOL_SUPPORTED);
1955 +
1956 ++ igc_ptp_init(adapter);
1957 ++
1958 + /* reset the hardware with the new settings */
1959 + igc_reset(adapter);
1960 +
1961 +@@ -5174,9 +5176,6 @@ static int igc_probe(struct pci_dev *pdev,
1962 + /* carrier off reporting is important to ethtool even BEFORE open */
1963 + netif_carrier_off(netdev);
1964 +
1965 +- /* do hw tstamp init after resetting */
1966 +- igc_ptp_init(adapter);
1967 +-
1968 + /* Check if Media Autosense is enabled */
1969 + adapter->ei = *ei;
1970 +
1971 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
1972 +index 0d746f8588c81..61e38853aa47d 100644
1973 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
1974 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
1975 +@@ -608,8 +608,6 @@ void igc_ptp_init(struct igc_adapter *adapter)
1976 + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
1977 + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
1978 +
1979 +- igc_ptp_reset(adapter);
1980 +-
1981 + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
1982 + &adapter->pdev->dev);
1983 + if (IS_ERR(adapter->ptp_clock)) {
1984 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1985 +index 0d779bba1b019..6b81c04ab5e29 100644
1986 +--- a/drivers/net/hyperv/netvsc_drv.c
1987 ++++ b/drivers/net/hyperv/netvsc_drv.c
1988 +@@ -502,7 +502,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
1989 + int rc;
1990 +
1991 + skb->dev = vf_netdev;
1992 +- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
1993 ++ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
1994 +
1995 + rc = dev_queue_xmit(skb);
1996 + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
1997 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
1998 +index 15e87c097b0b3..5bca94c990061 100644
1999 +--- a/drivers/net/ipvlan/ipvlan_main.c
2000 ++++ b/drivers/net/ipvlan/ipvlan_main.c
2001 +@@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
2002 + kfree(port);
2003 + }
2004 +
2005 ++#define IPVLAN_ALWAYS_ON_OFLOADS \
2006 ++ (NETIF_F_SG | NETIF_F_HW_CSUM | \
2007 ++ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
2008 ++
2009 ++#define IPVLAN_ALWAYS_ON \
2010 ++ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
2011 ++
2012 + #define IPVLAN_FEATURES \
2013 +- (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
2014 ++ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
2015 + NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
2016 + NETIF_F_GRO | NETIF_F_RXCSUM | \
2017 + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
2018 +
2019 ++ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
2020 ++
2021 + #define IPVLAN_STATE_MASK \
2022 + ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
2023 +
2024 +@@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
2025 + dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
2026 + (phy_dev->state & IPVLAN_STATE_MASK);
2027 + dev->features = phy_dev->features & IPVLAN_FEATURES;
2028 +- dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
2029 ++ dev->features |= IPVLAN_ALWAYS_ON;
2030 ++ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
2031 ++ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
2032 + dev->hw_enc_features |= dev->features;
2033 + dev->gso_max_size = phy_dev->gso_max_size;
2034 + dev->gso_max_segs = phy_dev->gso_max_segs;
2035 +@@ -227,7 +238,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
2036 + {
2037 + struct ipvl_dev *ipvlan = netdev_priv(dev);
2038 +
2039 +- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
2040 ++ features |= NETIF_F_ALL_FOR_ALL;
2041 ++ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
2042 ++ features = netdev_increment_features(ipvlan->phy_dev->features,
2043 ++ features, features);
2044 ++ features |= IPVLAN_ALWAYS_ON;
2045 ++ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
2046 ++
2047 ++ return features;
2048 + }
2049 +
2050 + static void ipvlan_change_rx_flags(struct net_device *dev, int change)
2051 +@@ -734,10 +752,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
2052 +
2053 + case NETDEV_FEAT_CHANGE:
2054 + list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
2055 +- ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
2056 + ipvlan->dev->gso_max_size = dev->gso_max_size;
2057 + ipvlan->dev->gso_max_segs = dev->gso_max_segs;
2058 +- netdev_features_change(ipvlan->dev);
2059 ++ netdev_update_features(ipvlan->dev);
2060 + }
2061 + break;
2062 +
2063 +diff --git a/drivers/of/address.c b/drivers/of/address.c
2064 +index 8eea3f6e29a44..340d3051b1ce2 100644
2065 +--- a/drivers/of/address.c
2066 ++++ b/drivers/of/address.c
2067 +@@ -980,6 +980,11 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
2068 + /* Don't error out as we'd break some existing DTs */
2069 + continue;
2070 + }
2071 ++ if (range.cpu_addr == OF_BAD_ADDR) {
2072 ++ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
2073 ++ range.bus_addr, node);
2074 ++ continue;
2075 ++ }
2076 + dma_offset = range.cpu_addr - range.bus_addr;
2077 +
2078 + /* Take lower and upper limits */
2079 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
2080 +index dfbd3d10410ca..8c90f78717723 100644
2081 +--- a/drivers/opp/core.c
2082 ++++ b/drivers/opp/core.c
2083 +@@ -862,8 +862,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
2084 + * have OPP table for the device, while others don't and
2085 + * opp_set_rate() just needs to behave like clk_set_rate().
2086 + */
2087 +- if (!_get_opp_count(opp_table))
2088 +- return 0;
2089 ++ if (!_get_opp_count(opp_table)) {
2090 ++ ret = 0;
2091 ++ goto put_opp_table;
2092 ++ }
2093 +
2094 + if (!opp_table->required_opp_tables && !opp_table->regulators &&
2095 + !opp_table->paths) {
2096 +@@ -874,7 +876,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
2097 +
2098 + ret = _set_opp_bw(opp_table, NULL, dev, true);
2099 + if (ret)
2100 +- return ret;
2101 ++ goto put_opp_table;
2102 +
2103 + if (opp_table->regulator_enabled) {
2104 + regulator_disable(opp_table->regulators[0]);
2105 +@@ -901,10 +903,13 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
2106 +
2107 + /* Return early if nothing to do */
2108 + if (old_freq == freq) {
2109 +- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
2110 +- __func__, freq);
2111 +- ret = 0;
2112 +- goto put_opp_table;
2113 ++ if (!opp_table->required_opp_tables && !opp_table->regulators &&
2114 ++ !opp_table->paths) {
2115 ++ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
2116 ++ __func__, freq);
2117 ++ ret = 0;
2118 ++ goto put_opp_table;
2119 ++ }
2120 + }
2121 +
2122 + /*
2123 +diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
2124 +index b59f84918fe06..c9e790c74051f 100644
2125 +--- a/drivers/pci/hotplug/s390_pci_hpc.c
2126 ++++ b/drivers/pci/hotplug/s390_pci_hpc.c
2127 +@@ -83,21 +83,19 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
2128 + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
2129 + hotplug_slot);
2130 + struct pci_dev *pdev;
2131 +- struct zpci_bus *zbus = zdev->zbus;
2132 + int rc;
2133 +
2134 + if (!zpci_fn_configured(zdev->state))
2135 + return -EIO;
2136 +
2137 +- pdev = pci_get_slot(zbus->bus, zdev->devfn);
2138 +- if (pdev) {
2139 +- if (pci_num_vf(pdev))
2140 +- return -EBUSY;
2141 +-
2142 +- pci_stop_and_remove_bus_device_locked(pdev);
2143 ++ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
2144 ++ if (pdev && pci_num_vf(pdev)) {
2145 + pci_dev_put(pdev);
2146 ++ return -EBUSY;
2147 + }
2148 +
2149 ++ zpci_remove_device(zdev);
2150 ++
2151 + rc = zpci_disable_device(zdev);
2152 + if (rc)
2153 + return rc;
2154 +diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
2155 +index 27797157fcb3f..6349d2cd36805 100644
2156 +--- a/drivers/rtc/rtc-goldfish.c
2157 ++++ b/drivers/rtc/rtc-goldfish.c
2158 +@@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
2159 + rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
2160 + writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
2161 + writel(rtc_alarm64, base + TIMER_ALARM_LOW);
2162 ++ writel(1, base + TIMER_IRQ_ENABLED);
2163 + } else {
2164 + /*
2165 + * if this function was called with enabled=0
2166 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
2167 +index c795f22249d8f..140186fe1d1e0 100644
2168 +--- a/drivers/s390/scsi/zfcp_fsf.c
2169 ++++ b/drivers/s390/scsi/zfcp_fsf.c
2170 +@@ -434,7 +434,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
2171 + return;
2172 + }
2173 +
2174 +- del_timer(&req->timer);
2175 ++ del_timer_sync(&req->timer);
2176 + zfcp_fsf_protstatus_eval(req);
2177 + zfcp_fsf_fsfstatus_eval(req);
2178 + req->handler(req);
2179 +@@ -867,7 +867,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
2180 + req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
2181 + req->issued = get_tod_clock();
2182 + if (zfcp_qdio_send(qdio, &req->qdio_req)) {
2183 +- del_timer(&req->timer);
2184 ++ del_timer_sync(&req->timer);
2185 + /* lookup request again, list might have changed */
2186 + zfcp_reqlist_find_rm(adapter->req_list, req_id);
2187 + zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
2188 +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
2189 +index 2b865c6423e29..e00dc4693fcbd 100644
2190 +--- a/drivers/scsi/libfc/fc_disc.c
2191 ++++ b/drivers/scsi/libfc/fc_disc.c
2192 +@@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
2193 +
2194 + if (PTR_ERR(fp) == -FC_EX_CLOSED)
2195 + goto out;
2196 +- if (IS_ERR(fp))
2197 +- goto redisc;
2198 ++ if (IS_ERR(fp)) {
2199 ++ mutex_lock(&disc->disc_mutex);
2200 ++ fc_disc_restart(disc);
2201 ++ mutex_unlock(&disc->disc_mutex);
2202 ++ goto out;
2203 ++ }
2204 +
2205 + cp = fc_frame_payload_get(fp, sizeof(*cp));
2206 + if (!cp)
2207 +@@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
2208 + new_rdata->disc_id = disc->disc_id;
2209 + fc_rport_login(new_rdata);
2210 + }
2211 +- goto out;
2212 ++ goto free_fp;
2213 + }
2214 + rdata->disc_id = disc->disc_id;
2215 + mutex_unlock(&rdata->rp_mutex);
2216 +@@ -626,6 +630,8 @@ redisc:
2217 + fc_disc_restart(disc);
2218 + mutex_unlock(&disc->disc_mutex);
2219 + }
2220 ++free_fp:
2221 ++ fc_frame_free(fp);
2222 + out:
2223 + kref_put(&rdata->kref, fc_rport_destroy);
2224 + if (!IS_ERR(fp))
2225 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2226 +index e92fad99338cd..5c7c22d0fab4b 100644
2227 +--- a/drivers/scsi/qla2xxx/qla_os.c
2228 ++++ b/drivers/scsi/qla2xxx/qla_os.c
2229 +@@ -2829,10 +2829,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2230 + /* This may fail but that's ok */
2231 + pci_enable_pcie_error_reporting(pdev);
2232 +
2233 +- /* Turn off T10-DIF when FC-NVMe is enabled */
2234 +- if (ql2xnvmeenable)
2235 +- ql2xenabledif = 0;
2236 +-
2237 + ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
2238 + if (!ha) {
2239 + ql_log_pci(ql_log_fatal, pdev, 0x0009,
2240 +diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
2241 +index 46bb905b4d6a9..eafe0db98d542 100644
2242 +--- a/drivers/scsi/ufs/ti-j721e-ufs.c
2243 ++++ b/drivers/scsi/ufs/ti-j721e-ufs.c
2244 +@@ -38,6 +38,7 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
2245 + /* Select MPHY refclk frequency */
2246 + clk = devm_clk_get(dev, NULL);
2247 + if (IS_ERR(clk)) {
2248 ++ ret = PTR_ERR(clk);
2249 + dev_err(dev, "Cannot claim MPHY clock.\n");
2250 + goto clk_err;
2251 + }
2252 +diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
2253 +index e3175a63c676b..e80d5f26a4424 100644
2254 +--- a/drivers/scsi/ufs/ufs_quirks.h
2255 ++++ b/drivers/scsi/ufs/ufs_quirks.h
2256 +@@ -12,6 +12,7 @@
2257 + #define UFS_ANY_VENDOR 0xFFFF
2258 + #define UFS_ANY_MODEL "ANY_MODEL"
2259 +
2260 ++#define UFS_VENDOR_MICRON 0x12C
2261 + #define UFS_VENDOR_TOSHIBA 0x198
2262 + #define UFS_VENDOR_SAMSUNG 0x1CE
2263 + #define UFS_VENDOR_SKHYNIX 0x1AD
2264 +diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
2265 +index 8f78a81514991..b220666774ce8 100644
2266 +--- a/drivers/scsi/ufs/ufshcd-pci.c
2267 ++++ b/drivers/scsi/ufs/ufshcd-pci.c
2268 +@@ -67,11 +67,23 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
2269 + return err;
2270 + }
2271 +
2272 ++static int ufs_intel_ehl_init(struct ufs_hba *hba)
2273 ++{
2274 ++ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
2275 ++ return 0;
2276 ++}
2277 ++
2278 + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
2279 + .name = "intel-pci",
2280 + .link_startup_notify = ufs_intel_link_startup_notify,
2281 + };
2282 +
2283 ++static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
2284 ++ .name = "intel-pci",
2285 ++ .init = ufs_intel_ehl_init,
2286 ++ .link_startup_notify = ufs_intel_link_startup_notify,
2287 ++};
2288 ++
2289 + #ifdef CONFIG_PM_SLEEP
2290 + /**
2291 + * ufshcd_pci_suspend - suspend power management function
2292 +@@ -200,8 +212,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
2293 + static const struct pci_device_id ufshcd_pci_tbl[] = {
2294 + { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2295 + { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
2296 +- { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
2297 +- { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
2298 ++ { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
2299 ++ { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
2300 + { } /* terminate list */
2301 + };
2302 +
2303 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2304 +index e412e43d23821..136b863bc1d45 100644
2305 +--- a/drivers/scsi/ufs/ufshcd.c
2306 ++++ b/drivers/scsi/ufs/ufshcd.c
2307 +@@ -216,6 +216,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
2308 +
2309 + static struct ufs_dev_fix ufs_fixups[] = {
2310 + /* UFS cards deviations table */
2311 ++ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
2312 ++ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
2313 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
2314 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
2315 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
2316 +@@ -672,7 +674,11 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
2317 + */
2318 + static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
2319 + {
2320 +- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
2321 ++ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
2322 ++ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
2323 ++ else
2324 ++ ufshcd_writel(hba, ~(1 << pos),
2325 ++ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
2326 + }
2327 +
2328 + /**
2329 +@@ -682,7 +688,10 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
2330 + */
2331 + static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
2332 + {
2333 +- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
2334 ++ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
2335 ++ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
2336 ++ else
2337 ++ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
2338 + }
2339 +
2340 + /**
2341 +@@ -2166,8 +2175,14 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2342 + return sg_segments;
2343 +
2344 + if (sg_segments) {
2345 +- lrbp->utr_descriptor_ptr->prd_table_length =
2346 +- cpu_to_le16((u16)sg_segments);
2347 ++
2348 ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2349 ++ lrbp->utr_descriptor_ptr->prd_table_length =
2350 ++ cpu_to_le16((sg_segments *
2351 ++ sizeof(struct ufshcd_sg_entry)));
2352 ++ else
2353 ++ lrbp->utr_descriptor_ptr->prd_table_length =
2354 ++ cpu_to_le16((u16) (sg_segments));
2355 +
2356 + prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2357 +
2358 +@@ -3514,11 +3529,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2359 + cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2360 +
2361 + /* Response upiu and prdt offset should be in double words */
2362 +- utrdlp[i].response_upiu_offset =
2363 +- cpu_to_le16(response_offset >> 2);
2364 +- utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
2365 +- utrdlp[i].response_upiu_length =
2366 +- cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2367 ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2368 ++ utrdlp[i].response_upiu_offset =
2369 ++ cpu_to_le16(response_offset);
2370 ++ utrdlp[i].prd_table_offset =
2371 ++ cpu_to_le16(prdt_offset);
2372 ++ utrdlp[i].response_upiu_length =
2373 ++ cpu_to_le16(ALIGNED_UPIU_SIZE);
2374 ++ } else {
2375 ++ utrdlp[i].response_upiu_offset =
2376 ++ cpu_to_le16(response_offset >> 2);
2377 ++ utrdlp[i].prd_table_offset =
2378 ++ cpu_to_le16(prdt_offset >> 2);
2379 ++ utrdlp[i].response_upiu_length =
2380 ++ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2381 ++ }
2382 +
2383 + ufshcd_init_lrb(hba, &hba->lrb[i], i);
2384 + }
2385 +@@ -3548,6 +3573,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2386 + "dme-link-startup: error code %d\n", ret);
2387 + return ret;
2388 + }
2389 ++/**
2390 ++ * ufshcd_dme_reset - UIC command for DME_RESET
2391 ++ * @hba: per adapter instance
2392 ++ *
2393 ++ * DME_RESET command is issued in order to reset UniPro stack.
2394 ++ * This function now deals with cold reset.
2395 ++ *
2396 ++ * Returns 0 on success, non-zero value on failure
2397 ++ */
2398 ++static int ufshcd_dme_reset(struct ufs_hba *hba)
2399 ++{
2400 ++ struct uic_command uic_cmd = {0};
2401 ++ int ret;
2402 ++
2403 ++ uic_cmd.command = UIC_CMD_DME_RESET;
2404 ++
2405 ++ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2406 ++ if (ret)
2407 ++ dev_err(hba->dev,
2408 ++ "dme-reset: error code %d\n", ret);
2409 ++
2410 ++ return ret;
2411 ++}
2412 ++
2413 ++/**
2414 ++ * ufshcd_dme_enable - UIC command for DME_ENABLE
2415 ++ * @hba: per adapter instance
2416 ++ *
2417 ++ * DME_ENABLE command is issued in order to enable UniPro stack.
2418 ++ *
2419 ++ * Returns 0 on success, non-zero value on failure
2420 ++ */
2421 ++static int ufshcd_dme_enable(struct ufs_hba *hba)
2422 ++{
2423 ++ struct uic_command uic_cmd = {0};
2424 ++ int ret;
2425 ++
2426 ++ uic_cmd.command = UIC_CMD_DME_ENABLE;
2427 ++
2428 ++ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2429 ++ if (ret)
2430 ++ dev_err(hba->dev,
2431 ++ "dme-reset: error code %d\n", ret);
2432 ++
2433 ++ return ret;
2434 ++}
2435 +
2436 + static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2437 + {
2438 +@@ -4272,7 +4343,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
2439 + }
2440 +
2441 + /**
2442 +- * ufshcd_hba_enable - initialize the controller
2443 ++ * ufshcd_hba_execute_hce - initialize the controller
2444 + * @hba: per adapter instance
2445 + *
2446 + * The controller resets itself and controller firmware initialization
2447 +@@ -4281,7 +4352,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
2448 + *
2449 + * Returns 0 on success, non-zero value on failure
2450 + */
2451 +-int ufshcd_hba_enable(struct ufs_hba *hba)
2452 ++static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
2453 + {
2454 + int retry;
2455 +
2456 +@@ -4329,6 +4400,32 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
2457 +
2458 + return 0;
2459 + }
2460 ++
2461 ++int ufshcd_hba_enable(struct ufs_hba *hba)
2462 ++{
2463 ++ int ret;
2464 ++
2465 ++ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
2466 ++ ufshcd_set_link_off(hba);
2467 ++ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
2468 ++
2469 ++ /* enable UIC related interrupts */
2470 ++ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
2471 ++ ret = ufshcd_dme_reset(hba);
2472 ++ if (!ret) {
2473 ++ ret = ufshcd_dme_enable(hba);
2474 ++ if (!ret)
2475 ++ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
2476 ++ if (ret)
2477 ++ dev_err(hba->dev,
2478 ++ "Host controller enable failed with non-hce\n");
2479 ++ }
2480 ++ } else {
2481 ++ ret = ufshcd_hba_execute_hce(hba);
2482 ++ }
2483 ++
2484 ++ return ret;
2485 ++}
2486 + EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
2487 +
2488 + static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
2489 +@@ -4727,6 +4824,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2490 + /* overall command status of utrd */
2491 + ocs = ufshcd_get_tr_ocs(lrbp);
2492 +
2493 ++ if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
2494 ++ if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
2495 ++ MASK_RSP_UPIU_RESULT)
2496 ++ ocs = OCS_SUCCESS;
2497 ++ }
2498 ++
2499 + switch (ocs) {
2500 + case OCS_SUCCESS:
2501 + result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2502 +@@ -4905,7 +5008,8 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
2503 + * false interrupt if device completes another request after resetting
2504 + * aggregation and before reading the DB.
2505 + */
2506 +- if (ufshcd_is_intr_aggr_allowed(hba))
2507 ++ if (ufshcd_is_intr_aggr_allowed(hba) &&
2508 ++ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
2509 + ufshcd_reset_intr_aggr(hba);
2510 +
2511 + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2512 +@@ -5909,7 +6013,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
2513 + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
2514 + } while (intr_status && --retries);
2515 +
2516 +- if (retval == IRQ_NONE) {
2517 ++ if (enabled_intr_status && retval == IRQ_NONE) {
2518 + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
2519 + __func__, intr_status);
2520 + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
2521 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
2522 +index 16187be98a94c..4bf98c2295372 100644
2523 +--- a/drivers/scsi/ufs/ufshcd.h
2524 ++++ b/drivers/scsi/ufs/ufshcd.h
2525 +@@ -520,6 +520,41 @@ enum ufshcd_quirks {
2526 + * ops (get_ufs_hci_version) to get the correct version.
2527 + */
2528 + UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
2529 ++
2530 ++ /*
2531 ++ * Clear handling for transfer/task request list is just opposite.
2532 ++ */
2533 ++ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
2534 ++
2535 ++ /*
2536 ++ * This quirk needs to be enabled if host controller doesn't allow
2537 ++ * that the interrupt aggregation timer and counter are reset by s/w.
2538 ++ */
2539 ++ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
2540 ++
2541 ++ /*
2542 ++ * This quirks needs to be enabled if host controller cannot be
2543 ++ * enabled via HCE register.
2544 ++ */
2545 ++ UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
2546 ++
2547 ++ /*
2548 ++ * This quirk needs to be enabled if the host controller regards
2549 ++ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
2550 ++ */
2551 ++ UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
2552 ++
2553 ++ /*
2554 ++ * This quirk needs to be enabled if the host controller reports
2555 ++ * OCS FATAL ERROR with device error through sense data
2556 ++ */
2557 ++ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
2558 ++
2559 ++ /*
2560 ++ * This quirk needs to be enabled if the host controller has
2561 ++ * auto-hibernate capability but it doesn't work.
2562 ++ */
2563 ++ UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
2564 + };
2565 +
2566 + enum ufshcd_caps {
2567 +@@ -786,7 +821,8 @@ return true;
2568 +
2569 + static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
2570 + {
2571 +- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
2572 ++ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
2573 ++ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
2574 + }
2575 +
2576 + static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
2577 +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
2578 +index 8f1f8fca79e37..8eb053803429c 100644
2579 +--- a/drivers/spi/Kconfig
2580 ++++ b/drivers/spi/Kconfig
2581 +@@ -999,4 +999,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
2582 +
2583 + endif # SPI_SLAVE
2584 +
2585 ++config SPI_DYNAMIC
2586 ++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
2587 ++
2588 + endif # SPI
2589 +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
2590 +index 4c643dfc7fbbc..9672cda2f8031 100644
2591 +--- a/drivers/spi/spi-stm32.c
2592 ++++ b/drivers/spi/spi-stm32.c
2593 +@@ -13,6 +13,7 @@
2594 + #include <linux/iopoll.h>
2595 + #include <linux/module.h>
2596 + #include <linux/of_platform.h>
2597 ++#include <linux/pinctrl/consumer.h>
2598 + #include <linux/pm_runtime.h>
2599 + #include <linux/reset.h>
2600 + #include <linux/spi/spi.h>
2601 +@@ -1996,6 +1997,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
2602 +
2603 + pm_runtime_disable(&pdev->dev);
2604 +
2605 ++ pinctrl_pm_select_sleep_state(&pdev->dev);
2606 ++
2607 + return 0;
2608 + }
2609 +
2610 +@@ -2007,13 +2010,18 @@ static int stm32_spi_runtime_suspend(struct device *dev)
2611 +
2612 + clk_disable_unprepare(spi->clk);
2613 +
2614 +- return 0;
2615 ++ return pinctrl_pm_select_sleep_state(dev);
2616 + }
2617 +
2618 + static int stm32_spi_runtime_resume(struct device *dev)
2619 + {
2620 + struct spi_master *master = dev_get_drvdata(dev);
2621 + struct stm32_spi *spi = spi_master_get_devdata(master);
2622 ++ int ret;
2623 ++
2624 ++ ret = pinctrl_pm_select_default_state(dev);
2625 ++ if (ret)
2626 ++ return ret;
2627 +
2628 + return clk_prepare_enable(spi->clk);
2629 + }
2630 +@@ -2043,10 +2051,23 @@ static int stm32_spi_resume(struct device *dev)
2631 + return ret;
2632 +
2633 + ret = spi_master_resume(master);
2634 +- if (ret)
2635 ++ if (ret) {
2636 + clk_disable_unprepare(spi->clk);
2637 ++ return ret;
2638 ++ }
2639 +
2640 +- return ret;
2641 ++ ret = pm_runtime_get_sync(dev);
2642 ++ if (ret) {
2643 ++ dev_err(dev, "Unable to power device:%d\n", ret);
2644 ++ return ret;
2645 ++ }
2646 ++
2647 ++ spi->cfg->config(spi);
2648 ++
2649 ++ pm_runtime_mark_last_busy(dev);
2650 ++ pm_runtime_put_autosuspend(dev);
2651 ++
2652 ++ return 0;
2653 + }
2654 + #endif
2655 +
2656 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
2657 +index 8158e281f3540..5c5a95792c0d3 100644
2658 +--- a/drivers/spi/spi.c
2659 ++++ b/drivers/spi/spi.c
2660 +@@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
2661 + */
2662 + static DEFINE_MUTEX(board_lock);
2663 +
2664 ++/*
2665 ++ * Prevents addition of devices with same chip select and
2666 ++ * addition of devices below an unregistering controller.
2667 ++ */
2668 ++static DEFINE_MUTEX(spi_add_lock);
2669 ++
2670 + /**
2671 + * spi_alloc_device - Allocate a new SPI device
2672 + * @ctlr: Controller to which device is connected
2673 +@@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data)
2674 + */
2675 + int spi_add_device(struct spi_device *spi)
2676 + {
2677 +- static DEFINE_MUTEX(spi_add_lock);
2678 + struct spi_controller *ctlr = spi->controller;
2679 + struct device *dev = ctlr->dev.parent;
2680 + int status;
2681 +@@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi)
2682 + goto done;
2683 + }
2684 +
2685 ++ /* Controller may unregister concurrently */
2686 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
2687 ++ !device_is_registered(&ctlr->dev)) {
2688 ++ status = -ENODEV;
2689 ++ goto done;
2690 ++ }
2691 ++
2692 + /* Descriptors take precedence */
2693 + if (ctlr->cs_gpiods)
2694 + spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
2695 +@@ -2764,6 +2776,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
2696 + struct spi_controller *found;
2697 + int id = ctlr->bus_num;
2698 +
2699 ++ /* Prevent addition of new devices, unregister existing ones */
2700 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2701 ++ mutex_lock(&spi_add_lock);
2702 ++
2703 + device_for_each_child(&ctlr->dev, NULL, __unregister);
2704 +
2705 + /* First make sure that this controller was ever added */
2706 +@@ -2784,6 +2800,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
2707 + if (found == ctlr)
2708 + idr_remove(&spi_master_idr, id);
2709 + mutex_unlock(&board_lock);
2710 ++
2711 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2712 ++ mutex_unlock(&spi_add_lock);
2713 + }
2714 + EXPORT_SYMBOL_GPL(spi_unregister_controller);
2715 +
2716 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
2717 +index 560bfec933bc3..63cca0e1e9123 100644
2718 +--- a/drivers/target/target_core_user.c
2719 ++++ b/drivers/target/target_core_user.c
2720 +@@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
2721 + size = round_up(size+offset, PAGE_SIZE);
2722 +
2723 + while (size) {
2724 +- flush_dcache_page(virt_to_page(start));
2725 ++ flush_dcache_page(vmalloc_to_page(start));
2726 + start += PAGE_SIZE;
2727 + size -= PAGE_SIZE;
2728 + }
2729 +diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
2730 +index 86a02aff8735f..61ca8ab165dc1 100644
2731 +--- a/drivers/vfio/pci/vfio_pci_private.h
2732 ++++ b/drivers/vfio/pci/vfio_pci_private.h
2733 +@@ -33,12 +33,14 @@
2734 +
2735 + struct vfio_pci_ioeventfd {
2736 + struct list_head next;
2737 ++ struct vfio_pci_device *vdev;
2738 + struct virqfd *virqfd;
2739 + void __iomem *addr;
2740 + uint64_t data;
2741 + loff_t pos;
2742 + int bar;
2743 + int count;
2744 ++ bool test_mem;
2745 + };
2746 +
2747 + struct vfio_pci_irq_ctx {
2748 +diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
2749 +index 916b184df3a5b..9e353c484ace2 100644
2750 +--- a/drivers/vfio/pci/vfio_pci_rdwr.c
2751 ++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
2752 +@@ -37,17 +37,70 @@
2753 + #define vfio_ioread8 ioread8
2754 + #define vfio_iowrite8 iowrite8
2755 +
2756 ++#define VFIO_IOWRITE(size) \
2757 ++static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \
2758 ++ bool test_mem, u##size val, void __iomem *io) \
2759 ++{ \
2760 ++ if (test_mem) { \
2761 ++ down_read(&vdev->memory_lock); \
2762 ++ if (!__vfio_pci_memory_enabled(vdev)) { \
2763 ++ up_read(&vdev->memory_lock); \
2764 ++ return -EIO; \
2765 ++ } \
2766 ++ } \
2767 ++ \
2768 ++ vfio_iowrite##size(val, io); \
2769 ++ \
2770 ++ if (test_mem) \
2771 ++ up_read(&vdev->memory_lock); \
2772 ++ \
2773 ++ return 0; \
2774 ++}
2775 ++
2776 ++VFIO_IOWRITE(8)
2777 ++VFIO_IOWRITE(16)
2778 ++VFIO_IOWRITE(32)
2779 ++#ifdef iowrite64
2780 ++VFIO_IOWRITE(64)
2781 ++#endif
2782 ++
2783 ++#define VFIO_IOREAD(size) \
2784 ++static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \
2785 ++ bool test_mem, u##size *val, void __iomem *io) \
2786 ++{ \
2787 ++ if (test_mem) { \
2788 ++ down_read(&vdev->memory_lock); \
2789 ++ if (!__vfio_pci_memory_enabled(vdev)) { \
2790 ++ up_read(&vdev->memory_lock); \
2791 ++ return -EIO; \
2792 ++ } \
2793 ++ } \
2794 ++ \
2795 ++ *val = vfio_ioread##size(io); \
2796 ++ \
2797 ++ if (test_mem) \
2798 ++ up_read(&vdev->memory_lock); \
2799 ++ \
2800 ++ return 0; \
2801 ++}
2802 ++
2803 ++VFIO_IOREAD(8)
2804 ++VFIO_IOREAD(16)
2805 ++VFIO_IOREAD(32)
2806 ++
2807 + /*
2808 + * Read or write from an __iomem region (MMIO or I/O port) with an excluded
2809 + * range which is inaccessible. The excluded range drops writes and fills
2810 + * reads with -1. This is intended for handling MSI-X vector tables and
2811 + * leftover space for ROM BARs.
2812 + */
2813 +-static ssize_t do_io_rw(void __iomem *io, char __user *buf,
2814 ++static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
2815 ++ void __iomem *io, char __user *buf,
2816 + loff_t off, size_t count, size_t x_start,
2817 + size_t x_end, bool iswrite)
2818 + {
2819 + ssize_t done = 0;
2820 ++ int ret;
2821 +
2822 + while (count) {
2823 + size_t fillable, filled;
2824 +@@ -66,9 +119,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
2825 + if (copy_from_user(&val, buf, 4))
2826 + return -EFAULT;
2827 +
2828 +- vfio_iowrite32(val, io + off);
2829 ++ ret = vfio_pci_iowrite32(vdev, test_mem,
2830 ++ val, io + off);
2831 ++ if (ret)
2832 ++ return ret;
2833 + } else {
2834 +- val = vfio_ioread32(io + off);
2835 ++ ret = vfio_pci_ioread32(vdev, test_mem,
2836 ++ &val, io + off);
2837 ++ if (ret)
2838 ++ return ret;
2839 +
2840 + if (copy_to_user(buf, &val, 4))
2841 + return -EFAULT;
2842 +@@ -82,9 +141,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
2843 + if (copy_from_user(&val, buf, 2))
2844 + return -EFAULT;
2845 +
2846 +- vfio_iowrite16(val, io + off);
2847 ++ ret = vfio_pci_iowrite16(vdev, test_mem,
2848 ++ val, io + off);
2849 ++ if (ret)
2850 ++ return ret;
2851 + } else {
2852 +- val = vfio_ioread16(io + off);
2853 ++ ret = vfio_pci_ioread16(vdev, test_mem,
2854 ++ &val, io + off);
2855 ++ if (ret)
2856 ++ return ret;
2857 +
2858 + if (copy_to_user(buf, &val, 2))
2859 + return -EFAULT;
2860 +@@ -98,9 +163,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
2861 + if (copy_from_user(&val, buf, 1))
2862 + return -EFAULT;
2863 +
2864 +- vfio_iowrite8(val, io + off);
2865 ++ ret = vfio_pci_iowrite8(vdev, test_mem,
2866 ++ val, io + off);
2867 ++ if (ret)
2868 ++ return ret;
2869 + } else {
2870 +- val = vfio_ioread8(io + off);
2871 ++ ret = vfio_pci_ioread8(vdev, test_mem,
2872 ++ &val, io + off);
2873 ++ if (ret)
2874 ++ return ret;
2875 +
2876 + if (copy_to_user(buf, &val, 1))
2877 + return -EFAULT;
2878 +@@ -178,14 +249,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2879 +
2880 + count = min(count, (size_t)(end - pos));
2881 +
2882 +- if (res->flags & IORESOURCE_MEM) {
2883 +- down_read(&vdev->memory_lock);
2884 +- if (!__vfio_pci_memory_enabled(vdev)) {
2885 +- up_read(&vdev->memory_lock);
2886 +- return -EIO;
2887 +- }
2888 +- }
2889 +-
2890 + if (bar == PCI_ROM_RESOURCE) {
2891 + /*
2892 + * The ROM can fill less space than the BAR, so we start the
2893 +@@ -213,7 +276,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2894 + x_end = vdev->msix_offset + vdev->msix_size;
2895 + }
2896 +
2897 +- done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
2898 ++ done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
2899 ++ count, x_start, x_end, iswrite);
2900 +
2901 + if (done >= 0)
2902 + *ppos += done;
2903 +@@ -221,9 +285,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
2904 + if (bar == PCI_ROM_RESOURCE)
2905 + pci_unmap_rom(pdev, io);
2906 + out:
2907 +- if (res->flags & IORESOURCE_MEM)
2908 +- up_read(&vdev->memory_lock);
2909 +-
2910 + return done;
2911 + }
2912 +
2913 +@@ -278,7 +339,12 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
2914 + return ret;
2915 + }
2916 +
2917 +- done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
2918 ++ /*
2919 ++ * VGA MMIO is a legacy, non-BAR resource that hopefully allows
2920 ++ * probing, so we don't currently worry about access in relation
2921 ++ * to the memory enable bit in the command register.
2922 ++ */
2923 ++ done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
2924 +
2925 + vga_put(vdev->pdev, rsrc);
2926 +
2927 +@@ -296,17 +362,21 @@ static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
2928 +
2929 + switch (ioeventfd->count) {
2930 + case 1:
2931 +- vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
2932 ++ vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
2933 ++ ioeventfd->data, ioeventfd->addr);
2934 + break;
2935 + case 2:
2936 +- vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
2937 ++ vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
2938 ++ ioeventfd->data, ioeventfd->addr);
2939 + break;
2940 + case 4:
2941 +- vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
2942 ++ vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
2943 ++ ioeventfd->data, ioeventfd->addr);
2944 + break;
2945 + #ifdef iowrite64
2946 + case 8:
2947 +- vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
2948 ++ vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
2949 ++ ioeventfd->data, ioeventfd->addr);
2950 + break;
2951 + #endif
2952 + }
2953 +@@ -378,11 +448,13 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
2954 + goto out_unlock;
2955 + }
2956 +
2957 ++ ioeventfd->vdev = vdev;
2958 + ioeventfd->addr = vdev->barmap[bar] + pos;
2959 + ioeventfd->data = data;
2960 + ioeventfd->pos = pos;
2961 + ioeventfd->bar = bar;
2962 + ioeventfd->count = count;
2963 ++ ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
2964 +
2965 + ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
2966 + NULL, NULL, &ioeventfd->virqfd, fd);
2967 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
2968 +index 5e556ac9102a5..f48f0db908a46 100644
2969 +--- a/drivers/vfio/vfio_iommu_type1.c
2970 ++++ b/drivers/vfio/vfio_iommu_type1.c
2971 +@@ -1422,13 +1422,16 @@ static int vfio_bus_type(struct device *dev, void *data)
2972 + static int vfio_iommu_replay(struct vfio_iommu *iommu,
2973 + struct vfio_domain *domain)
2974 + {
2975 +- struct vfio_domain *d;
2976 ++ struct vfio_domain *d = NULL;
2977 + struct rb_node *n;
2978 + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2979 + int ret;
2980 +
2981 + /* Arbitrarily pick the first domain in the list for lookups */
2982 +- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
2983 ++ if (!list_empty(&iommu->domain_list))
2984 ++ d = list_first_entry(&iommu->domain_list,
2985 ++ struct vfio_domain, next);
2986 ++
2987 + n = rb_first(&iommu->dma_list);
2988 +
2989 + for (; n; n = rb_next(n)) {
2990 +@@ -1446,6 +1449,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
2991 + phys_addr_t p;
2992 + dma_addr_t i;
2993 +
2994 ++ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
2995 ++ ret = -EINVAL;
2996 ++ goto unwind;
2997 ++ }
2998 ++
2999 + phys = iommu_iova_to_phys(d->domain, iova);
3000 +
3001 + if (WARN_ON(!phys)) {
3002 +@@ -1475,7 +1483,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
3003 + if (npage <= 0) {
3004 + WARN_ON(!npage);
3005 + ret = (int)npage;
3006 +- return ret;
3007 ++ goto unwind;
3008 + }
3009 +
3010 + phys = pfn << PAGE_SHIFT;
3011 +@@ -1484,14 +1492,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
3012 +
3013 + ret = iommu_map(domain->domain, iova, phys,
3014 + size, dma->prot | domain->prot);
3015 +- if (ret)
3016 +- return ret;
3017 ++ if (ret) {
3018 ++ if (!dma->iommu_mapped)
3019 ++ vfio_unpin_pages_remote(dma, iova,
3020 ++ phys >> PAGE_SHIFT,
3021 ++ size >> PAGE_SHIFT,
3022 ++ true);
3023 ++ goto unwind;
3024 ++ }
3025 +
3026 + iova += size;
3027 + }
3028 ++ }
3029 ++
3030 ++ /* All dmas are now mapped, defer to second tree walk for unwind */
3031 ++ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
3032 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
3033 ++
3034 + dma->iommu_mapped = true;
3035 + }
3036 ++
3037 + return 0;
3038 ++
3039 ++unwind:
3040 ++ for (; n; n = rb_prev(n)) {
3041 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
3042 ++ dma_addr_t iova;
3043 ++
3044 ++ if (dma->iommu_mapped) {
3045 ++ iommu_unmap(domain->domain, dma->iova, dma->size);
3046 ++ continue;
3047 ++ }
3048 ++
3049 ++ iova = dma->iova;
3050 ++ while (iova < dma->iova + dma->size) {
3051 ++ phys_addr_t phys, p;
3052 ++ size_t size;
3053 ++ dma_addr_t i;
3054 ++
3055 ++ phys = iommu_iova_to_phys(domain->domain, iova);
3056 ++ if (!phys) {
3057 ++ iova += PAGE_SIZE;
3058 ++ continue;
3059 ++ }
3060 ++
3061 ++ size = PAGE_SIZE;
3062 ++ p = phys + size;
3063 ++ i = iova + size;
3064 ++ while (i < dma->iova + dma->size &&
3065 ++ p == iommu_iova_to_phys(domain->domain, i)) {
3066 ++ size += PAGE_SIZE;
3067 ++ p += PAGE_SIZE;
3068 ++ i += PAGE_SIZE;
3069 ++ }
3070 ++
3071 ++ iommu_unmap(domain->domain, iova, size);
3072 ++ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
3073 ++ size >> PAGE_SHIFT, true);
3074 ++ }
3075 ++ }
3076 ++
3077 ++ return ret;
3078 + }
3079 +
3080 + /*
3081 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
3082 +index 65491ae74808d..e57c00824965c 100644
3083 +--- a/drivers/video/fbdev/efifb.c
3084 ++++ b/drivers/video/fbdev/efifb.c
3085 +@@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev)
3086 + info->apertures->ranges[0].base = efifb_fix.smem_start;
3087 + info->apertures->ranges[0].size = size_remap;
3088 +
3089 +- if (efi_enabled(EFI_BOOT) &&
3090 ++ if (efi_enabled(EFI_MEMMAP) &&
3091 + !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
3092 + if ((efifb_fix.smem_start + efifb_fix.smem_len) >
3093 + (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
3094 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
3095 +index 58b96baa8d488..4f7c73e6052f6 100644
3096 +--- a/drivers/virtio/virtio_ring.c
3097 ++++ b/drivers/virtio/virtio_ring.c
3098 +@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
3099 + {
3100 + struct vring_virtqueue *vq = to_vvq(_vq);
3101 +
3102 ++ if (unlikely(vq->broken))
3103 ++ return false;
3104 ++
3105 + virtio_mb(vq->weak_barriers);
3106 + return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
3107 + virtqueue_poll_split(_vq, last_used_idx);
3108 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
3109 +index b6d27762c6f8c..5fbadd07819bd 100644
3110 +--- a/drivers/xen/swiotlb-xen.c
3111 ++++ b/drivers/xen/swiotlb-xen.c
3112 +@@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3113 + int order = get_order(size);
3114 + phys_addr_t phys;
3115 + u64 dma_mask = DMA_BIT_MASK(32);
3116 ++ struct page *page;
3117 +
3118 + if (hwdev && hwdev->coherent_dma_mask)
3119 + dma_mask = hwdev->coherent_dma_mask;
3120 +@@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3121 + /* Convert the size to actually allocated. */
3122 + size = 1UL << (order + XEN_PAGE_SHIFT);
3123 +
3124 ++ if (is_vmalloc_addr(vaddr))
3125 ++ page = vmalloc_to_page(vaddr);
3126 ++ else
3127 ++ page = virt_to_page(vaddr);
3128 ++
3129 + if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
3130 + range_straddles_page_boundary(phys, size)) &&
3131 +- TestClearPageXenRemapped(virt_to_page(vaddr)))
3132 ++ TestClearPageXenRemapped(page))
3133 + xen_destroy_contiguous_region(phys, order);
3134 +
3135 + xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
3136 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
3137 +index b79879aacc02e..7b784af604fd9 100644
3138 +--- a/fs/afs/dynroot.c
3139 ++++ b/fs/afs/dynroot.c
3140 +@@ -382,15 +382,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
3141 + net->dynroot_sb = NULL;
3142 + mutex_unlock(&net->proc_cells_lock);
3143 +
3144 +- inode_lock(root->d_inode);
3145 +-
3146 +- /* Remove all the pins for dirs created for manually added cells */
3147 +- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
3148 +- if (subdir->d_fsdata) {
3149 +- subdir->d_fsdata = NULL;
3150 +- dput(subdir);
3151 ++ if (root) {
3152 ++ inode_lock(root->d_inode);
3153 ++
3154 ++ /* Remove all the pins for dirs created for manually added cells */
3155 ++ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
3156 ++ if (subdir->d_fsdata) {
3157 ++ subdir->d_fsdata = NULL;
3158 ++ dput(subdir);
3159 ++ }
3160 + }
3161 +- }
3162 +
3163 +- inode_unlock(root->d_inode);
3164 ++ inode_unlock(root->d_inode);
3165 ++ }
3166 + }
3167 +diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
3168 +index 24fd163c6323e..97cab12b0a6c2 100644
3169 +--- a/fs/afs/fs_operation.c
3170 ++++ b/fs/afs/fs_operation.c
3171 +@@ -235,6 +235,7 @@ int afs_put_operation(struct afs_operation *op)
3172 + afs_end_cursor(&op->ac);
3173 + afs_put_serverlist(op->net, op->server_list);
3174 + afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
3175 ++ key_put(op->key);
3176 + kfree(op);
3177 + return ret;
3178 + }
3179 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3180 +index dea971f9d89ee..946f9a92658ab 100644
3181 +--- a/fs/ceph/mds_client.c
3182 ++++ b/fs/ceph/mds_client.c
3183 +@@ -4361,7 +4361,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
3184 + goto err_mdsc;
3185 + }
3186 +
3187 +- fsc->mdsc = mdsc;
3188 + init_completion(&mdsc->safe_umount_waiters);
3189 + init_waitqueue_head(&mdsc->session_close_wq);
3190 + INIT_LIST_HEAD(&mdsc->waiting_for_map);
3191 +@@ -4416,6 +4415,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
3192 +
3193 + strscpy(mdsc->nodename, utsname()->nodename,
3194 + sizeof(mdsc->nodename));
3195 ++
3196 ++ fsc->mdsc = mdsc;
3197 + return 0;
3198 +
3199 + err_mdsmap:
3200 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
3201 +index 12eebcdea9c8a..e0decff22ae27 100644
3202 +--- a/fs/eventpoll.c
3203 ++++ b/fs/eventpoll.c
3204 +@@ -1994,9 +1994,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
3205 + * not already there, and calling reverse_path_check()
3206 + * during ep_insert().
3207 + */
3208 +- if (list_empty(&epi->ffd.file->f_tfile_llink))
3209 ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
3210 ++ get_file(epi->ffd.file);
3211 + list_add(&epi->ffd.file->f_tfile_llink,
3212 + &tfile_check_list);
3213 ++ }
3214 + }
3215 + }
3216 + mutex_unlock(&ep->mtx);
3217 +@@ -2040,6 +2042,7 @@ static void clear_tfile_check_list(void)
3218 + file = list_first_entry(&tfile_check_list, struct file,
3219 + f_tfile_llink);
3220 + list_del_init(&file->f_tfile_llink);
3221 ++ fput(file);
3222 + }
3223 + INIT_LIST_HEAD(&tfile_check_list);
3224 + }
3225 +@@ -2200,25 +2203,22 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
3226 + full_check = 1;
3227 + if (is_file_epoll(tf.file)) {
3228 + error = -ELOOP;
3229 +- if (ep_loop_check(ep, tf.file) != 0) {
3230 +- clear_tfile_check_list();
3231 ++ if (ep_loop_check(ep, tf.file) != 0)
3232 + goto error_tgt_fput;
3233 +- }
3234 +- } else
3235 ++ } else {
3236 ++ get_file(tf.file);
3237 + list_add(&tf.file->f_tfile_llink,
3238 + &tfile_check_list);
3239 ++ }
3240 + error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
3241 +- if (error) {
3242 +-out_del:
3243 +- list_del(&tf.file->f_tfile_llink);
3244 ++ if (error)
3245 + goto error_tgt_fput;
3246 +- }
3247 + if (is_file_epoll(tf.file)) {
3248 + tep = tf.file->private_data;
3249 + error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
3250 + if (error) {
3251 + mutex_unlock(&ep->mtx);
3252 +- goto out_del;
3253 ++ goto error_tgt_fput;
3254 + }
3255 + }
3256 + }
3257 +@@ -2239,8 +2239,6 @@ out_del:
3258 + error = ep_insert(ep, epds, tf.file, fd, full_check);
3259 + } else
3260 + error = -EEXIST;
3261 +- if (full_check)
3262 +- clear_tfile_check_list();
3263 + break;
3264 + case EPOLL_CTL_DEL:
3265 + if (epi)
3266 +@@ -2263,8 +2261,10 @@ out_del:
3267 + mutex_unlock(&ep->mtx);
3268 +
3269 + error_tgt_fput:
3270 +- if (full_check)
3271 ++ if (full_check) {
3272 ++ clear_tfile_check_list();
3273 + mutex_unlock(&epmutex);
3274 ++ }
3275 +
3276 + fdput(tf);
3277 + error_fput:
3278 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
3279 +index 16e9b2fda03ae..e830a9d4e10d3 100644
3280 +--- a/fs/ext4/block_validity.c
3281 ++++ b/fs/ext4/block_validity.c
3282 +@@ -24,6 +24,7 @@ struct ext4_system_zone {
3283 + struct rb_node node;
3284 + ext4_fsblk_t start_blk;
3285 + unsigned int count;
3286 ++ u32 ino;
3287 + };
3288 +
3289 + static struct kmem_cache *ext4_system_zone_cachep;
3290 +@@ -45,7 +46,8 @@ void ext4_exit_system_zone(void)
3291 + static inline int can_merge(struct ext4_system_zone *entry1,
3292 + struct ext4_system_zone *entry2)
3293 + {
3294 +- if ((entry1->start_blk + entry1->count) == entry2->start_blk)
3295 ++ if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
3296 ++ entry1->ino == entry2->ino)
3297 + return 1;
3298 + return 0;
3299 + }
3300 +@@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks)
3301 + */
3302 + static int add_system_zone(struct ext4_system_blocks *system_blks,
3303 + ext4_fsblk_t start_blk,
3304 +- unsigned int count)
3305 ++ unsigned int count, u32 ino)
3306 + {
3307 +- struct ext4_system_zone *new_entry = NULL, *entry;
3308 ++ struct ext4_system_zone *new_entry, *entry;
3309 + struct rb_node **n = &system_blks->root.rb_node, *node;
3310 + struct rb_node *parent = NULL, *new_node = NULL;
3311 +
3312 +@@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
3313 + n = &(*n)->rb_left;
3314 + else if (start_blk >= (entry->start_blk + entry->count))
3315 + n = &(*n)->rb_right;
3316 +- else {
3317 +- if (start_blk + count > (entry->start_blk +
3318 +- entry->count))
3319 +- entry->count = (start_blk + count -
3320 +- entry->start_blk);
3321 +- new_node = *n;
3322 +- new_entry = rb_entry(new_node, struct ext4_system_zone,
3323 +- node);
3324 +- break;
3325 +- }
3326 ++ else /* Unexpected overlap of system zones. */
3327 ++ return -EFSCORRUPTED;
3328 + }
3329 +
3330 +- if (!new_entry) {
3331 +- new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
3332 +- GFP_KERNEL);
3333 +- if (!new_entry)
3334 +- return -ENOMEM;
3335 +- new_entry->start_blk = start_blk;
3336 +- new_entry->count = count;
3337 +- new_node = &new_entry->node;
3338 +-
3339 +- rb_link_node(new_node, parent, n);
3340 +- rb_insert_color(new_node, &system_blks->root);
3341 +- }
3342 ++ new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
3343 ++ GFP_KERNEL);
3344 ++ if (!new_entry)
3345 ++ return -ENOMEM;
3346 ++ new_entry->start_blk = start_blk;
3347 ++ new_entry->count = count;
3348 ++ new_entry->ino = ino;
3349 ++ new_node = &new_entry->node;
3350 ++
3351 ++ rb_link_node(new_node, parent, n);
3352 ++ rb_insert_color(new_node, &system_blks->root);
3353 +
3354 + /* Can we merge to the left? */
3355 + node = rb_prev(new_node);
3356 +@@ -159,7 +152,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
3357 + static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
3358 + struct ext4_system_blocks *system_blks,
3359 + ext4_fsblk_t start_blk,
3360 +- unsigned int count)
3361 ++ unsigned int count, ino_t ino)
3362 + {
3363 + struct ext4_system_zone *entry;
3364 + struct rb_node *n;
3365 +@@ -180,7 +173,7 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
3366 + else if (start_blk >= (entry->start_blk + entry->count))
3367 + n = n->rb_right;
3368 + else
3369 +- return 0;
3370 ++ return entry->ino == ino;
3371 + }
3372 + return 1;
3373 + }
3374 +@@ -214,19 +207,18 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
3375 + if (n == 0) {
3376 + i++;
3377 + } else {
3378 +- if (!ext4_data_block_valid_rcu(sbi, system_blks,
3379 +- map.m_pblk, n)) {
3380 +- err = -EFSCORRUPTED;
3381 +- __ext4_error(sb, __func__, __LINE__, -err,
3382 +- map.m_pblk, "blocks %llu-%llu "
3383 +- "from inode %u overlap system zone",
3384 +- map.m_pblk,
3385 +- map.m_pblk + map.m_len - 1, ino);
3386 ++ err = add_system_zone(system_blks, map.m_pblk, n, ino);
3387 ++ if (err < 0) {
3388 ++ if (err == -EFSCORRUPTED) {
3389 ++ __ext4_error(sb, __func__, __LINE__,
3390 ++ -err, map.m_pblk,
3391 ++ "blocks %llu-%llu from inode %u overlap system zone",
3392 ++ map.m_pblk,
3393 ++ map.m_pblk + map.m_len - 1,
3394 ++ ino);
3395 ++ }
3396 + break;
3397 + }
3398 +- err = add_system_zone(system_blks, map.m_pblk, n);
3399 +- if (err < 0)
3400 +- break;
3401 + i += n;
3402 + }
3403 + }
3404 +@@ -280,19 +272,19 @@ int ext4_setup_system_zone(struct super_block *sb)
3405 + ((i < 5) || ((i % flex_size) == 0)))
3406 + add_system_zone(system_blks,
3407 + ext4_group_first_block_no(sb, i),
3408 +- ext4_bg_num_gdb(sb, i) + 1);
3409 ++ ext4_bg_num_gdb(sb, i) + 1, 0);
3410 + gdp = ext4_get_group_desc(sb, i, NULL);
3411 + ret = add_system_zone(system_blks,
3412 +- ext4_block_bitmap(sb, gdp), 1);
3413 ++ ext4_block_bitmap(sb, gdp), 1, 0);
3414 + if (ret)
3415 + goto err;
3416 + ret = add_system_zone(system_blks,
3417 +- ext4_inode_bitmap(sb, gdp), 1);
3418 ++ ext4_inode_bitmap(sb, gdp), 1, 0);
3419 + if (ret)
3420 + goto err;
3421 + ret = add_system_zone(system_blks,
3422 + ext4_inode_table(sb, gdp),
3423 +- sbi->s_itb_per_group);
3424 ++ sbi->s_itb_per_group, 0);
3425 + if (ret)
3426 + goto err;
3427 + }
3428 +@@ -341,7 +333,7 @@ void ext4_release_system_zone(struct super_block *sb)
3429 + call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
3430 + }
3431 +
3432 +-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
3433 ++int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
3434 + unsigned int count)
3435 + {
3436 + struct ext4_system_blocks *system_blks;
3437 +@@ -353,9 +345,9 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
3438 + * mount option.
3439 + */
3440 + rcu_read_lock();
3441 +- system_blks = rcu_dereference(sbi->system_blks);
3442 +- ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
3443 +- count);
3444 ++ system_blks = rcu_dereference(EXT4_SB(inode->i_sb)->system_blks);
3445 ++ ret = ext4_data_block_valid_rcu(EXT4_SB(inode->i_sb), system_blks,
3446 ++ start_blk, count, inode->i_ino);
3447 + rcu_read_unlock();
3448 + return ret;
3449 + }
3450 +@@ -374,8 +366,7 @@ int ext4_check_blockref(const char *function, unsigned int line,
3451 + while (bref < p+max) {
3452 + blk = le32_to_cpu(*bref++);
3453 + if (blk &&
3454 +- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
3455 +- blk, 1))) {
3456 ++ unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
3457 + ext4_error_inode(inode, function, line, blk,
3458 + "invalid block");
3459 + return -EFSCORRUPTED;
3460 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3461 +index 42f5060f3cdf1..42815304902b8 100644
3462 +--- a/fs/ext4/ext4.h
3463 ++++ b/fs/ext4/ext4.h
3464 +@@ -3363,9 +3363,9 @@ extern void ext4_release_system_zone(struct super_block *sb);
3465 + extern int ext4_setup_system_zone(struct super_block *sb);
3466 + extern int __init ext4_init_system_zone(void);
3467 + extern void ext4_exit_system_zone(void);
3468 +-extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
3469 +- ext4_fsblk_t start_blk,
3470 +- unsigned int count);
3471 ++extern int ext4_inode_block_valid(struct inode *inode,
3472 ++ ext4_fsblk_t start_blk,
3473 ++ unsigned int count);
3474 + extern int ext4_check_blockref(const char *, unsigned int,
3475 + struct inode *, __le32 *, unsigned int);
3476 +
3477 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3478 +index 221f240eae604..d75054570e44c 100644
3479 +--- a/fs/ext4/extents.c
3480 ++++ b/fs/ext4/extents.c
3481 +@@ -340,7 +340,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
3482 + */
3483 + if (lblock + len <= lblock)
3484 + return 0;
3485 +- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
3486 ++ return ext4_inode_block_valid(inode, block, len);
3487 + }
3488 +
3489 + static int ext4_valid_extent_idx(struct inode *inode,
3490 +@@ -348,7 +348,7 @@ static int ext4_valid_extent_idx(struct inode *inode,
3491 + {
3492 + ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
3493 +
3494 +- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
3495 ++ return ext4_inode_block_valid(inode, block, 1);
3496 + }
3497 +
3498 + static int ext4_valid_extent_entries(struct inode *inode,
3499 +@@ -507,14 +507,10 @@ __read_extent_tree_block(const char *function, unsigned int line,
3500 + }
3501 + if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
3502 + return bh;
3503 +- if (!ext4_has_feature_journal(inode->i_sb) ||
3504 +- (inode->i_ino !=
3505 +- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
3506 +- err = __ext4_ext_check(function, line, inode,
3507 +- ext_block_hdr(bh), depth, pblk);
3508 +- if (err)
3509 +- goto errout;
3510 +- }
3511 ++ err = __ext4_ext_check(function, line, inode,
3512 ++ ext_block_hdr(bh), depth, pblk);
3513 ++ if (err)
3514 ++ goto errout;
3515 + set_buffer_verified(bh);
3516 + /*
3517 + * If this is a leaf block, cache all of its entries
3518 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
3519 +index 2a01e31a032c4..8f742b53f1d40 100644
3520 +--- a/fs/ext4/file.c
3521 ++++ b/fs/ext4/file.c
3522 +@@ -428,6 +428,10 @@ restart:
3523 + */
3524 + if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
3525 + !ext4_overwrite_io(inode, offset, count))) {
3526 ++ if (iocb->ki_flags & IOCB_NOWAIT) {
3527 ++ ret = -EAGAIN;
3528 ++ goto out;
3529 ++ }
3530 + inode_unlock_shared(inode);
3531 + *ilock_shared = false;
3532 + inode_lock(inode);
3533 +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
3534 +index be2b66eb65f7a..4026418257121 100644
3535 +--- a/fs/ext4/indirect.c
3536 ++++ b/fs/ext4/indirect.c
3537 +@@ -858,8 +858,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
3538 + else if (ext4_should_journal_data(inode))
3539 + flags |= EXT4_FREE_BLOCKS_FORGET;
3540 +
3541 +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
3542 +- count)) {
3543 ++ if (!ext4_inode_block_valid(inode, block_to_free, count)) {
3544 + EXT4_ERROR_INODE(inode, "attempt to clear invalid "
3545 + "blocks %llu len %lu",
3546 + (unsigned long long) block_to_free, count);
3547 +@@ -1004,8 +1003,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
3548 + if (!nr)
3549 + continue; /* A hole */
3550 +
3551 +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
3552 +- nr, 1)) {
3553 ++ if (!ext4_inode_block_valid(inode, nr, 1)) {
3554 + EXT4_ERROR_INODE(inode,
3555 + "invalid indirect mapped "
3556 + "block %lu (level %d)",
3557 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3558 +index 10dd470876b30..92573f8540ab7 100644
3559 +--- a/fs/ext4/inode.c
3560 ++++ b/fs/ext4/inode.c
3561 +@@ -394,8 +394,7 @@ static int __check_block_validity(struct inode *inode, const char *func,
3562 + (inode->i_ino ==
3563 + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
3564 + return 0;
3565 +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
3566 +- map->m_len)) {
3567 ++ if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
3568 + ext4_error_inode(inode, func, line, map->m_pblk,
3569 + "lblock %lu mapped to illegal pblock %llu "
3570 + "(length %d)", (unsigned long) map->m_lblk,
3571 +@@ -4760,7 +4759,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
3572 +
3573 + ret = 0;
3574 + if (ei->i_file_acl &&
3575 +- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3576 ++ !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
3577 + ext4_error_inode(inode, function, line, 0,
3578 + "iget: bad extended attribute block %llu",
3579 + ei->i_file_acl);
3580 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3581 +index c0a331e2feb02..38719c156573c 100644
3582 +--- a/fs/ext4/mballoc.c
3583 ++++ b/fs/ext4/mballoc.c
3584 +@@ -3090,7 +3090,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3585 + block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3586 +
3587 + len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3588 +- if (!ext4_data_block_valid(sbi, block, len)) {
3589 ++ if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3590 + ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3591 + "fs metadata", block, block+len);
3592 + /* File system mounted not to panic on error
3593 +@@ -4915,7 +4915,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
3594 +
3595 + sbi = EXT4_SB(sb);
3596 + if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
3597 +- !ext4_data_block_valid(sbi, block, count)) {
3598 ++ !ext4_inode_block_valid(inode, block, count)) {
3599 + ext4_error(sb, "Freeing blocks not in datazone - "
3600 + "block = %llu, count = %lu", block, count);
3601 + goto error_return;
3602 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3603 +index 56738b538ddf4..a91a5bb8c3a2b 100644
3604 +--- a/fs/ext4/namei.c
3605 ++++ b/fs/ext4/namei.c
3606 +@@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
3607 + ext4_match(dir, fname, de)) {
3608 + /* found a match - just to be sure, do
3609 + * a full check */
3610 +- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
3611 +- bh->b_size, offset))
3612 ++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
3613 ++ buf_size, offset))
3614 + return -1;
3615 + *res_dir = de;
3616 + return 1;
3617 +@@ -1858,7 +1858,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
3618 + blocksize, hinfo, map);
3619 + map -= count;
3620 + dx_sort_map(map, count);
3621 +- /* Split the existing block in the middle, size-wise */
3622 ++ /* Ensure that neither split block is over half full */
3623 + size = 0;
3624 + move = 0;
3625 + for (i = count-1; i >= 0; i--) {
3626 +@@ -1868,8 +1868,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
3627 + size += map[i].size;
3628 + move++;
3629 + }
3630 +- /* map index at which we will split */
3631 +- split = count - move;
3632 ++ /*
3633 ++ * map index at which we will split
3634 ++ *
3635 ++ * If the sum of active entries didn't exceed half the block size, just
3636 ++ * split it in half by count; each resulting block will have at least
3637 ++ * half the space free.
3638 ++ */
3639 ++ if (i > 0)
3640 ++ split = count - move;
3641 ++ else
3642 ++ split = count/2;
3643 ++
3644 + hash2 = map[split].hash;
3645 + continued = hash2 == map[split - 1].hash;
3646 + dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
3647 +@@ -2472,7 +2482,7 @@ int ext4_generic_delete_entry(handle_t *handle,
3648 + de = (struct ext4_dir_entry_2 *)entry_buf;
3649 + while (i < buf_size - csum_size) {
3650 + if (ext4_check_dir_entry(dir, NULL, de, bh,
3651 +- bh->b_data, bh->b_size, i))
3652 ++ entry_buf, buf_size, i))
3653 + return -EFSCORRUPTED;
3654 + if (de == de_del) {
3655 + if (pde)
3656 +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
3657 +index f6fbe61b1251e..2390f7943f6c8 100644
3658 +--- a/fs/f2fs/compress.c
3659 ++++ b/fs/f2fs/compress.c
3660 +@@ -1310,6 +1310,12 @@ retry_write:
3661 + congestion_wait(BLK_RW_ASYNC,
3662 + DEFAULT_IO_TIMEOUT);
3663 + lock_page(cc->rpages[i]);
3664 ++
3665 ++ if (!PageDirty(cc->rpages[i])) {
3666 ++ unlock_page(cc->rpages[i]);
3667 ++ continue;
3668 ++ }
3669 ++
3670 + clear_page_dirty_for_io(cc->rpages[i]);
3671 + goto retry_write;
3672 + }
3673 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
3674 +index 03e24df1c84f5..e61ce7fb0958b 100644
3675 +--- a/fs/f2fs/node.c
3676 ++++ b/fs/f2fs/node.c
3677 +@@ -1924,8 +1924,12 @@ continue_unlock:
3678 + goto continue_unlock;
3679 + }
3680 +
3681 +- /* flush inline_data, if it's async context. */
3682 +- if (do_balance && is_inline_node(page)) {
3683 ++ /* flush inline_data/inode, if it's async context. */
3684 ++ if (!do_balance)
3685 ++ goto write_node;
3686 ++
3687 ++ /* flush inline_data */
3688 ++ if (is_inline_node(page)) {
3689 + clear_inline_node(page);
3690 + unlock_page(page);
3691 + flush_inline_data(sbi, ino_of_node(page));
3692 +@@ -1938,7 +1942,7 @@ continue_unlock:
3693 + if (flush_dirty_inode(page))
3694 + goto lock_node;
3695 + }
3696 +-
3697 ++write_node:
3698 + f2fs_wait_on_page_writeback(page, NODE, true, true);
3699 +
3700 + if (!clear_page_dirty_for_io(page))
3701 +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
3702 +index bbfe18c074179..f7e3304b78029 100644
3703 +--- a/fs/fat/fatent.c
3704 ++++ b/fs/fat/fatent.c
3705 +@@ -657,6 +657,9 @@ static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
3706 + unsigned long ra_pages = sb->s_bdi->ra_pages;
3707 + unsigned int reada_blocks;
3708 +
3709 ++ if (fatent->entry >= ent_limit)
3710 ++ return;
3711 ++
3712 + if (ra_pages > sb->s_bdi->io_pages)
3713 + ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
3714 + reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
3715 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3716 +index f926d94867f7b..dd8ad87540ef7 100644
3717 +--- a/fs/io_uring.c
3718 ++++ b/fs/io_uring.c
3719 +@@ -7609,6 +7609,33 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
3720 + return found;
3721 + }
3722 +
3723 ++static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
3724 ++{
3725 ++ return io_match_link(container_of(work, struct io_kiocb, work), data);
3726 ++}
3727 ++
3728 ++static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
3729 ++{
3730 ++ enum io_wq_cancel cret;
3731 ++
3732 ++ /* cancel this particular work, if it's running */
3733 ++ cret = io_wq_cancel_work(ctx->io_wq, &req->work);
3734 ++ if (cret != IO_WQ_CANCEL_NOTFOUND)
3735 ++ return;
3736 ++
3737 ++ /* find links that hold this pending, cancel those */
3738 ++ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
3739 ++ if (cret != IO_WQ_CANCEL_NOTFOUND)
3740 ++ return;
3741 ++
3742 ++ /* if we have a poll link holding this pending, cancel that */
3743 ++ if (io_poll_remove_link(ctx, req))
3744 ++ return;
3745 ++
3746 ++ /* final option, timeout link is holding this req pending */
3747 ++ io_timeout_remove_link(ctx, req);
3748 ++}
3749 ++
3750 + static void io_uring_cancel_files(struct io_ring_ctx *ctx,
3751 + struct files_struct *files)
3752 + {
3753 +@@ -7665,10 +7692,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
3754 + continue;
3755 + }
3756 + } else {
3757 +- io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
3758 +- /* could be a link, check and remove if it is */
3759 +- if (!io_poll_remove_link(ctx, cancel_req))
3760 +- io_timeout_remove_link(ctx, cancel_req);
3761 ++ /* cancel this request, or head link requests */
3762 ++ io_attempt_cancel(ctx, cancel_req);
3763 + io_put_req(cancel_req);
3764 + }
3765 +
3766 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
3767 +index e4944436e733d..5493a0da23ddd 100644
3768 +--- a/fs/jbd2/journal.c
3769 ++++ b/fs/jbd2/journal.c
3770 +@@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
3771 + int ret;
3772 +
3773 + /* Buffer got discarded which means block device got invalidated */
3774 +- if (!buffer_mapped(bh))
3775 ++ if (!buffer_mapped(bh)) {
3776 ++ unlock_buffer(bh);
3777 + return -EIO;
3778 ++ }
3779 +
3780 + trace_jbd2_write_superblock(journal, write_flags);
3781 + if (!(journal->j_flags & JBD2_BARRIER))
3782 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
3783 +index f20cff1194bb6..776493713153f 100644
3784 +--- a/fs/jffs2/dir.c
3785 ++++ b/fs/jffs2/dir.c
3786 +@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
3787 + int ret;
3788 + uint32_t now = JFFS2_NOW();
3789 +
3790 ++ mutex_lock(&f->sem);
3791 + for (fd = f->dents ; fd; fd = fd->next) {
3792 +- if (fd->ino)
3793 ++ if (fd->ino) {
3794 ++ mutex_unlock(&f->sem);
3795 + return -ENOTEMPTY;
3796 ++ }
3797 + }
3798 ++ mutex_unlock(&f->sem);
3799 +
3800 + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
3801 + dentry->d_name.len, f, now);
3802 +diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
3803 +index 6b2b4362089e6..b57b3ffcbc327 100644
3804 +--- a/fs/romfs/storage.c
3805 ++++ b/fs/romfs/storage.c
3806 +@@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
3807 + size_t limit;
3808 +
3809 + limit = romfs_maxsize(sb);
3810 +- if (pos >= limit)
3811 ++ if (pos >= limit || buflen > limit - pos)
3812 + return -EIO;
3813 +- if (buflen > limit - pos)
3814 +- buflen = limit - pos;
3815 +
3816 + #ifdef CONFIG_ROMFS_ON_MTD
3817 + if (sb->s_mtd)
3818 +diff --git a/fs/signalfd.c b/fs/signalfd.c
3819 +index 44b6845b071c3..5b78719be4455 100644
3820 +--- a/fs/signalfd.c
3821 ++++ b/fs/signalfd.c
3822 +@@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
3823 + {
3824 + sigset_t mask;
3825 +
3826 +- if (sizemask != sizeof(sigset_t) ||
3827 +- copy_from_user(&mask, user_mask, sizeof(mask)))
3828 ++ if (sizemask != sizeof(sigset_t))
3829 + return -EINVAL;
3830 ++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
3831 ++ return -EFAULT;
3832 + return do_signalfd4(ufd, &mask, flags);
3833 + }
3834 +
3835 +@@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
3836 + {
3837 + sigset_t mask;
3838 +
3839 +- if (sizemask != sizeof(sigset_t) ||
3840 +- copy_from_user(&mask, user_mask, sizeof(mask)))
3841 ++ if (sizemask != sizeof(sigset_t))
3842 + return -EINVAL;
3843 ++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
3844 ++ return -EFAULT;
3845 + return do_signalfd4(ufd, &mask, 0);
3846 + }
3847 +
3848 +diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
3849 +index 76bb1c846845e..8a19773b5a0b7 100644
3850 +--- a/fs/squashfs/block.c
3851 ++++ b/fs/squashfs/block.c
3852 +@@ -87,7 +87,11 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
3853 + int error, i;
3854 + struct bio *bio;
3855 +
3856 +- bio = bio_alloc(GFP_NOIO, page_count);
3857 ++ if (page_count <= BIO_MAX_PAGES)
3858 ++ bio = bio_alloc(GFP_NOIO, page_count);
3859 ++ else
3860 ++ bio = bio_kmalloc(GFP_NOIO, page_count);
3861 ++
3862 + if (!bio)
3863 + return -ENOMEM;
3864 +
3865 +diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
3866 +index e9f810fc67317..43585850f1546 100644
3867 +--- a/fs/xfs/xfs_sysfs.h
3868 ++++ b/fs/xfs/xfs_sysfs.h
3869 +@@ -32,9 +32,11 @@ xfs_sysfs_init(
3870 + struct xfs_kobj *parent_kobj,
3871 + const char *name)
3872 + {
3873 ++ struct kobject *parent;
3874 ++
3875 ++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
3876 + init_completion(&kobj->complete);
3877 +- return kobject_init_and_add(&kobj->kobject, ktype,
3878 +- &parent_kobj->kobject, "%s", name);
3879 ++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
3880 + }
3881 +
3882 + static inline void
3883 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
3884 +index c0f73b82c0551..ed0ce8b301b40 100644
3885 +--- a/fs/xfs/xfs_trans_dquot.c
3886 ++++ b/fs/xfs/xfs_trans_dquot.c
3887 +@@ -647,7 +647,7 @@ xfs_trans_dqresv(
3888 + }
3889 + }
3890 + if (ninos > 0) {
3891 +- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
3892 ++ total_count = dqp->q_res_icount + ninos;
3893 + timer = be32_to_cpu(dqp->q_core.d_itimer);
3894 + warns = be16_to_cpu(dqp->q_core.d_iwarns);
3895 + warnlimit = defq->iwarnlimit;
3896 +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
3897 +index 56c1e8eb7bb0a..8075f6ae185a1 100644
3898 +--- a/include/linux/pgtable.h
3899 ++++ b/include/linux/pgtable.h
3900 +@@ -117,7 +117,9 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
3901 + * a shortcut which implies the use of the kernel's pgd, instead
3902 + * of a process's
3903 + */
3904 ++#ifndef pgd_offset_k
3905 + #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
3906 ++#endif
3907 +
3908 + /*
3909 + * In many cases it is known that a virtual address is mapped at PMD or PTE
3910 +diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
3911 +index 917d88edb7b9d..a8ec3b6093fcb 100644
3912 +--- a/include/linux/sched/user.h
3913 ++++ b/include/linux/sched/user.h
3914 +@@ -36,6 +36,9 @@ struct user_struct {
3915 + defined(CONFIG_NET) || defined(CONFIG_IO_URING)
3916 + atomic_long_t locked_vm;
3917 + #endif
3918 ++#ifdef CONFIG_WATCH_QUEUE
3919 ++ atomic_t nr_watches; /* The number of watches this user currently has */
3920 ++#endif
3921 +
3922 + /* Miscellaneous per-user rate limit */
3923 + struct ratelimit_state ratelimit;
3924 +diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
3925 +index ac7869a389990..a4a0fb4f94cc1 100644
3926 +--- a/kernel/bpf/task_iter.c
3927 ++++ b/kernel/bpf/task_iter.c
3928 +@@ -177,10 +177,11 @@ again:
3929 + f = fcheck_files(curr_files, curr_fd);
3930 + if (!f)
3931 + continue;
3932 ++ if (!get_file_rcu(f))
3933 ++ continue;
3934 +
3935 + /* set info->fd */
3936 + info->fd = curr_fd;
3937 +- get_file(f);
3938 + rcu_read_unlock();
3939 + return f;
3940 + }
3941 +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
3942 +index 5f8b0c52fd2ef..661333c2893d5 100644
3943 +--- a/kernel/events/uprobes.c
3944 ++++ b/kernel/events/uprobes.c
3945 +@@ -205,7 +205,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
3946 + try_to_free_swap(old_page);
3947 + page_vma_mapped_walk_done(&pvmw);
3948 +
3949 +- if (vma->vm_flags & VM_LOCKED)
3950 ++ if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
3951 + munlock_vma_page(old_page);
3952 + put_page(old_page);
3953 +
3954 +diff --git a/kernel/relay.c b/kernel/relay.c
3955 +index 72fe443ea78f0..fb4e0c530c080 100644
3956 +--- a/kernel/relay.c
3957 ++++ b/kernel/relay.c
3958 +@@ -197,6 +197,7 @@ free_buf:
3959 + static void relay_destroy_channel(struct kref *kref)
3960 + {
3961 + struct rchan *chan = container_of(kref, struct rchan, kref);
3962 ++ free_percpu(chan->buf);
3963 + kfree(chan);
3964 + }
3965 +
3966 +diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
3967 +index f74020f6bd9d5..0ef8f65bd2d71 100644
3968 +--- a/kernel/watch_queue.c
3969 ++++ b/kernel/watch_queue.c
3970 +@@ -393,6 +393,7 @@ static void free_watch(struct rcu_head *rcu)
3971 + struct watch *watch = container_of(rcu, struct watch, rcu);
3972 +
3973 + put_watch_queue(rcu_access_pointer(watch->queue));
3974 ++ atomic_dec(&watch->cred->user->nr_watches);
3975 + put_cred(watch->cred);
3976 + }
3977 +
3978 +@@ -452,6 +453,13 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
3979 + watch->cred = get_current_cred();
3980 + rcu_assign_pointer(watch->watch_list, wlist);
3981 +
3982 ++ if (atomic_inc_return(&watch->cred->user->nr_watches) >
3983 ++ task_rlimit(current, RLIMIT_NOFILE)) {
3984 ++ atomic_dec(&watch->cred->user->nr_watches);
3985 ++ put_cred(watch->cred);
3986 ++ return -EAGAIN;
3987 ++ }
3988 ++
3989 + spin_lock_bh(&wqueue->lock);
3990 + kref_get(&wqueue->usage);
3991 + kref_get(&watch->usage);
3992 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
3993 +index ac04b332a373a..1d6a9b0b6a9fd 100644
3994 +--- a/mm/khugepaged.c
3995 ++++ b/mm/khugepaged.c
3996 +@@ -466,7 +466,7 @@ int __khugepaged_enter(struct mm_struct *mm)
3997 + return -ENOMEM;
3998 +
3999 + /* __khugepaged_exit() must not run from under us */
4000 +- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
4001 ++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
4002 + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
4003 + free_mm_slot(mm_slot);
4004 + return 0;
4005 +diff --git a/mm/memory.c b/mm/memory.c
4006 +index 3ecad55103adb..a279c1a26af7e 100644
4007 +--- a/mm/memory.c
4008 ++++ b/mm/memory.c
4009 +@@ -4248,6 +4248,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4010 + vmf->flags & FAULT_FLAG_WRITE)) {
4011 + update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4012 + } else {
4013 ++ /* Skip spurious TLB flush for retried page fault */
4014 ++ if (vmf->flags & FAULT_FLAG_TRIED)
4015 ++ goto unlock;
4016 + /*
4017 + * This is needed only for protection faults but the arch code
4018 + * is not yet telling us if this is a protection fault or not.
4019 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4020 +index e028b87ce2942..d809242f671f0 100644
4021 +--- a/mm/page_alloc.c
4022 ++++ b/mm/page_alloc.c
4023 +@@ -1306,6 +1306,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
4024 + struct page *page, *tmp;
4025 + LIST_HEAD(head);
4026 +
4027 ++ /*
4028 ++ * Ensure proper count is passed which otherwise would stuck in the
4029 ++ * below while (list_empty(list)) loop.
4030 ++ */
4031 ++ count = min(pcp->count, count);
4032 + while (count) {
4033 + struct list_head *list;
4034 +
4035 +@@ -7881,7 +7886,7 @@ int __meminit init_per_zone_wmark_min(void)
4036 +
4037 + return 0;
4038 + }
4039 +-core_initcall(init_per_zone_wmark_min)
4040 ++postcore_initcall(init_per_zone_wmark_min)
4041 +
4042 + /*
4043 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4044 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
4045 +index 5a2b55c8dd9a7..128d20d2d6cb6 100644
4046 +--- a/mm/vmalloc.c
4047 ++++ b/mm/vmalloc.c
4048 +@@ -102,6 +102,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
4049 + if (pmd_none_or_clear_bad(pmd))
4050 + continue;
4051 + vunmap_pte_range(pmd, addr, next, mask);
4052 ++
4053 ++ cond_resched();
4054 + } while (pmd++, addr = next, addr != end);
4055 + }
4056 +
4057 +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
4058 +index f7587428febdd..bf9fd6ee88fe0 100644
4059 +--- a/net/can/j1939/socket.c
4060 ++++ b/net/can/j1939/socket.c
4061 +@@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk)
4062 + spin_lock_init(&jsk->sk_session_queue_lock);
4063 + INIT_LIST_HEAD(&jsk->sk_session_queue);
4064 + sk->sk_destruct = j1939_sk_sock_destruct;
4065 ++ sk->sk_protocol = CAN_J1939;
4066 +
4067 + return 0;
4068 + }
4069 +@@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
4070 + goto out_release_sock;
4071 + }
4072 +
4073 ++ if (!ndev->ml_priv) {
4074 ++ netdev_warn_once(ndev,
4075 ++ "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
4076 ++ dev_put(ndev);
4077 ++ ret = -ENODEV;
4078 ++ goto out_release_sock;
4079 ++ }
4080 ++
4081 + priv = j1939_netdev_start(ndev);
4082 + dev_put(ndev);
4083 + if (IS_ERR(priv)) {
4084 +@@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
4085 + static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
4086 + const struct j1939_sock *jsk, int peer)
4087 + {
4088 ++ /* There are two holes (2 bytes and 3 bytes) to clear to avoid
4089 ++ * leaking kernel information to user space.
4090 ++ */
4091 ++ memset(addr, 0, J1939_MIN_NAMELEN);
4092 ++
4093 + addr->can_family = AF_CAN;
4094 + addr->can_ifindex = jsk->ifindex;
4095 + addr->can_addr.j1939.pgn = jsk->addr.pgn;
4096 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
4097 +index 9f99af5b0b11e..dbd215cbc53d8 100644
4098 +--- a/net/can/j1939/transport.c
4099 ++++ b/net/can/j1939/transport.c
4100 +@@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session,
4101 + skb_queue_tail(&session->skb_queue, skb);
4102 + }
4103 +
4104 +-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
4105 ++static struct
4106 ++sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
4107 ++ unsigned int offset_start)
4108 + {
4109 + struct j1939_priv *priv = session->priv;
4110 ++ struct j1939_sk_buff_cb *do_skcb;
4111 + struct sk_buff *skb = NULL;
4112 + struct sk_buff *do_skb;
4113 +- struct j1939_sk_buff_cb *do_skcb;
4114 +- unsigned int offset_start;
4115 + unsigned long flags;
4116 +
4117 +- offset_start = session->pkt.dpo * 7;
4118 +-
4119 + spin_lock_irqsave(&session->skb_queue.lock, flags);
4120 + skb_queue_walk(&session->skb_queue, do_skb) {
4121 + do_skcb = j1939_skb_to_cb(do_skb);
4122 +@@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
4123 + return skb;
4124 + }
4125 +
4126 ++static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
4127 ++{
4128 ++ unsigned int offset_start;
4129 ++
4130 ++ offset_start = session->pkt.dpo * 7;
4131 ++ return j1939_session_skb_find_by_offset(session, offset_start);
4132 ++}
4133 ++
4134 + /* see if we are receiver
4135 + * returns 0 for broadcasts, although we will receive them
4136 + */
4137 +@@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session)
4138 + return ret;
4139 +
4140 + session->last_txcmd = dat[0];
4141 +- if (dat[0] == J1939_TP_CMD_BAM)
4142 ++ if (dat[0] == J1939_TP_CMD_BAM) {
4143 + j1939_tp_schedule_txtimer(session, 50);
4144 +-
4145 +- j1939_tp_set_rxtimeout(session, 1250);
4146 ++ j1939_tp_set_rxtimeout(session, 250);
4147 ++ } else {
4148 ++ j1939_tp_set_rxtimeout(session, 1250);
4149 ++ }
4150 +
4151 + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
4152 +
4153 +@@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
4154 + int ret = 0;
4155 + u8 dat[8];
4156 +
4157 +- se_skb = j1939_session_skb_find(session);
4158 ++ se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
4159 + if (!se_skb)
4160 + return -ENOBUFS;
4161 +
4162 +@@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session)
4163 + if (len > 7)
4164 + len = 7;
4165 +
4166 ++ if (offset + len > se_skb->len) {
4167 ++ netdev_err_once(priv->ndev,
4168 ++ "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
4169 ++ __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
4170 ++ return -EOVERFLOW;
4171 ++ }
4172 ++
4173 ++ if (!len) {
4174 ++ ret = -ENOBUFS;
4175 ++ break;
4176 ++ }
4177 ++
4178 + memcpy(&dat[1], &tpdat[offset], len);
4179 + ret = j1939_tp_tx_dat(session, dat, len + 1);
4180 + if (ret < 0) {
4181 +@@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session,
4182 + lockdep_assert_held(&session->priv->active_session_list_lock);
4183 +
4184 + session->err = j1939_xtp_abort_to_errno(priv, err);
4185 ++ session->state = J1939_SESSION_WAITING_ABORT;
4186 + /* do not send aborts on incoming broadcasts */
4187 + if (!j1939_cb_is_broadcast(&session->skcb)) {
4188 +- session->state = J1939_SESSION_WAITING_ABORT;
4189 + j1939_xtp_tx_abort(priv, &session->skcb,
4190 + !session->transmission,
4191 + err, session->skcb.addr.pgn);
4192 +@@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
4193 + * cleanup including propagation of the error to user space.
4194 + */
4195 + break;
4196 ++ case -EOVERFLOW:
4197 ++ j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
4198 ++ break;
4199 + case 0:
4200 + session->tx_retry = 0;
4201 + break;
4202 +@@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
4203 + return;
4204 + }
4205 + session = j1939_xtp_rx_rts_session_new(priv, skb);
4206 +- if (!session)
4207 ++ if (!session) {
4208 ++ if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
4209 ++ netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
4210 ++ __func__);
4211 + return;
4212 ++ }
4213 + } else {
4214 + if (j1939_xtp_rx_rts_session_active(session, skb)) {
4215 + j1939_session_put(session);
4216 +@@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
4217 + }
4218 + session->last_cmd = cmd;
4219 +
4220 +- j1939_tp_set_rxtimeout(session, 1250);
4221 +-
4222 +- if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
4223 +- j1939_session_txtimer_cancel(session);
4224 +- j1939_tp_schedule_txtimer(session, 0);
4225 ++ if (cmd == J1939_TP_CMD_BAM) {
4226 ++ if (!session->transmission)
4227 ++ j1939_tp_set_rxtimeout(session, 750);
4228 ++ } else {
4229 ++ if (!session->transmission) {
4230 ++ j1939_session_txtimer_cancel(session);
4231 ++ j1939_tp_schedule_txtimer(session, 0);
4232 ++ }
4233 ++ j1939_tp_set_rxtimeout(session, 1250);
4234 + }
4235 +
4236 + j1939_session_put(session);
4237 +@@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4238 + int offset;
4239 + int nbytes;
4240 + bool final = false;
4241 ++ bool remain = false;
4242 + bool do_cts_eoma = false;
4243 + int packet;
4244 +
4245 +@@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4246 + __func__, session);
4247 + goto out_session_cancel;
4248 + }
4249 +- se_skb = j1939_session_skb_find(session);
4250 ++
4251 ++ se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
4252 + if (!se_skb) {
4253 + netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
4254 + session);
4255 +@@ -1777,6 +1811,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4256 + j1939_cb_is_broadcast(&session->skcb)) {
4257 + if (session->pkt.rx >= session->pkt.total)
4258 + final = true;
4259 ++ else
4260 ++ remain = true;
4261 + } else {
4262 + /* never final, an EOMA must follow */
4263 + if (session->pkt.rx >= session->pkt.last)
4264 +@@ -1784,7 +1820,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4265 + }
4266 +
4267 + if (final) {
4268 ++ j1939_session_timers_cancel(session);
4269 + j1939_session_completed(session);
4270 ++ } else if (remain) {
4271 ++ if (!session->transmission)
4272 ++ j1939_tp_set_rxtimeout(session, 750);
4273 + } else if (do_cts_eoma) {
4274 + j1939_tp_set_rxtimeout(session, 1250);
4275 + if (!session->transmission)
4276 +@@ -1829,6 +1869,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
4277 + else
4278 + j1939_xtp_rx_dat_one(session, skb);
4279 + }
4280 ++
4281 ++ if (j1939_cb_is_broadcast(skcb)) {
4282 ++ session = j1939_session_get_by_addr(priv, &skcb->addr, false,
4283 ++ false);
4284 ++ if (session)
4285 ++ j1939_xtp_rx_dat_one(session, skb);
4286 ++ }
4287 + }
4288 +
4289 + /* j1939 main intf */
4290 +@@ -1920,7 +1967,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
4291 + if (j1939_tp_im_transmitter(skcb))
4292 + j1939_xtp_rx_rts(priv, skb, true);
4293 +
4294 +- if (j1939_tp_im_receiver(skcb))
4295 ++ if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
4296 + j1939_xtp_rx_rts(priv, skb, false);
4297 +
4298 + break;
4299 +@@ -1984,7 +2031,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
4300 + {
4301 + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
4302 +
4303 +- if (!j1939_tp_im_involved_anydir(skcb))
4304 ++ if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
4305 + return 0;
4306 +
4307 + switch (skcb->addr.pgn) {
4308 +@@ -2017,6 +2064,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
4309 + if (!skb->sk)
4310 + return;
4311 +
4312 ++ if (skb->sk->sk_family != AF_CAN ||
4313 ++ skb->sk->sk_protocol != CAN_J1939)
4314 ++ return;
4315 ++
4316 + j1939_session_list_lock(priv);
4317 + session = j1939_session_get_simple(priv, skb);
4318 + j1939_session_list_unlock(priv);
4319 +diff --git a/net/core/filter.c b/net/core/filter.c
4320 +index 82e1b5b061675..a69e79327c29e 100644
4321 +--- a/net/core/filter.c
4322 ++++ b/net/core/filter.c
4323 +@@ -8249,15 +8249,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
4324 + /* Helper macro for adding read access to tcp_sock or sock fields. */
4325 + #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
4326 + do { \
4327 ++ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
4328 + BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
4329 + sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
4330 ++ if (si->dst_reg == reg || si->src_reg == reg) \
4331 ++ reg--; \
4332 ++ if (si->dst_reg == reg || si->src_reg == reg) \
4333 ++ reg--; \
4334 ++ if (si->dst_reg == si->src_reg) { \
4335 ++ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
4336 ++ offsetof(struct bpf_sock_ops_kern, \
4337 ++ temp)); \
4338 ++ fullsock_reg = reg; \
4339 ++ jmp += 2; \
4340 ++ } \
4341 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
4342 + struct bpf_sock_ops_kern, \
4343 + is_fullsock), \
4344 +- si->dst_reg, si->src_reg, \
4345 ++ fullsock_reg, si->src_reg, \
4346 + offsetof(struct bpf_sock_ops_kern, \
4347 + is_fullsock)); \
4348 +- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
4349 ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
4350 ++ if (si->dst_reg == si->src_reg) \
4351 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
4352 ++ offsetof(struct bpf_sock_ops_kern, \
4353 ++ temp)); \
4354 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
4355 + struct bpf_sock_ops_kern, sk),\
4356 + si->dst_reg, si->src_reg, \
4357 +@@ -8266,6 +8282,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
4358 + OBJ_FIELD), \
4359 + si->dst_reg, si->dst_reg, \
4360 + offsetof(OBJ, OBJ_FIELD)); \
4361 ++ if (si->dst_reg == si->src_reg) { \
4362 ++ *insn++ = BPF_JMP_A(1); \
4363 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
4364 ++ offsetof(struct bpf_sock_ops_kern, \
4365 ++ temp)); \
4366 ++ } \
4367 ++ } while (0)
4368 ++
4369 ++#define SOCK_OPS_GET_SK() \
4370 ++ do { \
4371 ++ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
4372 ++ if (si->dst_reg == reg || si->src_reg == reg) \
4373 ++ reg--; \
4374 ++ if (si->dst_reg == reg || si->src_reg == reg) \
4375 ++ reg--; \
4376 ++ if (si->dst_reg == si->src_reg) { \
4377 ++ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
4378 ++ offsetof(struct bpf_sock_ops_kern, \
4379 ++ temp)); \
4380 ++ fullsock_reg = reg; \
4381 ++ jmp += 2; \
4382 ++ } \
4383 ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
4384 ++ struct bpf_sock_ops_kern, \
4385 ++ is_fullsock), \
4386 ++ fullsock_reg, si->src_reg, \
4387 ++ offsetof(struct bpf_sock_ops_kern, \
4388 ++ is_fullsock)); \
4389 ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
4390 ++ if (si->dst_reg == si->src_reg) \
4391 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
4392 ++ offsetof(struct bpf_sock_ops_kern, \
4393 ++ temp)); \
4394 ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
4395 ++ struct bpf_sock_ops_kern, sk),\
4396 ++ si->dst_reg, si->src_reg, \
4397 ++ offsetof(struct bpf_sock_ops_kern, sk));\
4398 ++ if (si->dst_reg == si->src_reg) { \
4399 ++ *insn++ = BPF_JMP_A(1); \
4400 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
4401 ++ offsetof(struct bpf_sock_ops_kern, \
4402 ++ temp)); \
4403 ++ } \
4404 + } while (0)
4405 +
4406 + #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
4407 +@@ -8552,17 +8611,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
4408 + SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
4409 + break;
4410 + case offsetof(struct bpf_sock_ops, sk):
4411 +- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4412 +- struct bpf_sock_ops_kern,
4413 +- is_fullsock),
4414 +- si->dst_reg, si->src_reg,
4415 +- offsetof(struct bpf_sock_ops_kern,
4416 +- is_fullsock));
4417 +- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
4418 +- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4419 +- struct bpf_sock_ops_kern, sk),
4420 +- si->dst_reg, si->src_reg,
4421 +- offsetof(struct bpf_sock_ops_kern, sk));
4422 ++ SOCK_OPS_GET_SK();
4423 + break;
4424 + }
4425 + return insn - insn_buf;
4426 +diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
4427 +index 07782836fad6e..3c48cdc8935df 100644
4428 +--- a/net/netfilter/nft_exthdr.c
4429 ++++ b/net/netfilter/nft_exthdr.c
4430 +@@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
4431 +
4432 + err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
4433 + if (priv->flags & NFT_EXTHDR_F_PRESENT) {
4434 +- *dest = (err >= 0);
4435 ++ nft_reg_store8(dest, err >= 0);
4436 + return;
4437 + } else if (err < 0) {
4438 + goto err;
4439 +@@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
4440 +
4441 + err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
4442 + if (priv->flags & NFT_EXTHDR_F_PRESENT) {
4443 +- *dest = (err >= 0);
4444 ++ nft_reg_store8(dest, err >= 0);
4445 + return;
4446 + } else if (err < 0) {
4447 + goto err;
4448 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
4449 +index e426fedb9524f..ac16d83f2d26c 100644
4450 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
4451 ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
4452 +@@ -265,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
4453 + {
4454 + struct svc_rdma_recv_ctxt *ctxt;
4455 +
4456 ++ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
4457 ++ return 0;
4458 + ctxt = svc_rdma_recv_ctxt_get(rdma);
4459 + if (!ctxt)
4460 + return -ENOMEM;
4461 +diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
4462 +index 23d1cb01a41ae..5ceb93010a973 100644
4463 +--- a/scripts/kconfig/qconf.cc
4464 ++++ b/scripts/kconfig/qconf.cc
4465 +@@ -864,40 +864,40 @@ void ConfigList::focusInEvent(QFocusEvent *e)
4466 +
4467 + void ConfigList::contextMenuEvent(QContextMenuEvent *e)
4468 + {
4469 +- if (e->y() <= header()->geometry().bottom()) {
4470 +- if (!headerPopup) {
4471 +- QAction *action;
4472 +-
4473 +- headerPopup = new QMenu(this);
4474 +- action = new QAction("Show Name", this);
4475 +- action->setCheckable(true);
4476 +- connect(action, SIGNAL(toggled(bool)),
4477 +- parent(), SLOT(setShowName(bool)));
4478 +- connect(parent(), SIGNAL(showNameChanged(bool)),
4479 +- action, SLOT(setOn(bool)));
4480 +- action->setChecked(showName);
4481 +- headerPopup->addAction(action);
4482 +- action = new QAction("Show Range", this);
4483 +- action->setCheckable(true);
4484 +- connect(action, SIGNAL(toggled(bool)),
4485 +- parent(), SLOT(setShowRange(bool)));
4486 +- connect(parent(), SIGNAL(showRangeChanged(bool)),
4487 +- action, SLOT(setOn(bool)));
4488 +- action->setChecked(showRange);
4489 +- headerPopup->addAction(action);
4490 +- action = new QAction("Show Data", this);
4491 +- action->setCheckable(true);
4492 +- connect(action, SIGNAL(toggled(bool)),
4493 +- parent(), SLOT(setShowData(bool)));
4494 +- connect(parent(), SIGNAL(showDataChanged(bool)),
4495 +- action, SLOT(setOn(bool)));
4496 +- action->setChecked(showData);
4497 +- headerPopup->addAction(action);
4498 +- }
4499 +- headerPopup->exec(e->globalPos());
4500 +- e->accept();
4501 +- } else
4502 +- e->ignore();
4503 ++ if (!headerPopup) {
4504 ++ QAction *action;
4505 ++
4506 ++ headerPopup = new QMenu(this);
4507 ++ action = new QAction("Show Name", this);
4508 ++ action->setCheckable(true);
4509 ++ connect(action, SIGNAL(toggled(bool)),
4510 ++ parent(), SLOT(setShowName(bool)));
4511 ++ connect(parent(), SIGNAL(showNameChanged(bool)),
4512 ++ action, SLOT(setChecked(bool)));
4513 ++ action->setChecked(showName);
4514 ++ headerPopup->addAction(action);
4515 ++
4516 ++ action = new QAction("Show Range", this);
4517 ++ action->setCheckable(true);
4518 ++ connect(action, SIGNAL(toggled(bool)),
4519 ++ parent(), SLOT(setShowRange(bool)));
4520 ++ connect(parent(), SIGNAL(showRangeChanged(bool)),
4521 ++ action, SLOT(setChecked(bool)));
4522 ++ action->setChecked(showRange);
4523 ++ headerPopup->addAction(action);
4524 ++
4525 ++ action = new QAction("Show Data", this);
4526 ++ action->setCheckable(true);
4527 ++ connect(action, SIGNAL(toggled(bool)),
4528 ++ parent(), SLOT(setShowData(bool)));
4529 ++ connect(parent(), SIGNAL(showDataChanged(bool)),
4530 ++ action, SLOT(setChecked(bool)));
4531 ++ action->setChecked(showData);
4532 ++ headerPopup->addAction(action);
4533 ++ }
4534 ++
4535 ++ headerPopup->exec(e->globalPos());
4536 ++ e->accept();
4537 + }
4538 +
4539 + ConfigView*ConfigView::viewList;
4540 +@@ -1228,7 +1228,6 @@ void ConfigInfoView::clicked(const QUrl &url)
4541 + struct menu *m = NULL;
4542 +
4543 + if (count < 1) {
4544 +- qInfo() << "Clicked link is empty";
4545 + delete[] data;
4546 + return;
4547 + }
4548 +@@ -1241,7 +1240,6 @@ void ConfigInfoView::clicked(const QUrl &url)
4549 + strcat(data, "$");
4550 + result = sym_re_search(data);
4551 + if (!result) {
4552 +- qInfo() << "Clicked symbol is invalid:" << data;
4553 + delete[] data;
4554 + return;
4555 + }
4556 +@@ -1275,7 +1273,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
4557 +
4558 + action->setCheckable(true);
4559 + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
4560 +- connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
4561 ++ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
4562 + action->setChecked(showDebug());
4563 + popup->addSeparator();
4564 + popup->addAction(action);
4565 +diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
4566 +index 09ddab5f5caeb..9766f6af87430 100644
4567 +--- a/sound/hda/hdac_bus.c
4568 ++++ b/sound/hda/hdac_bus.c
4569 +@@ -46,6 +46,18 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
4570 + INIT_LIST_HEAD(&bus->hlink_list);
4571 + init_waitqueue_head(&bus->rirb_wq);
4572 + bus->irq = -1;
4573 ++
4574 ++ /*
4575 ++ * Default value of '8' is as per the HD audio specification (Rev 1.0a).
4576 ++ * Following relation is used to derive STRIPE control value.
4577 ++ * For sample rate <= 48K:
4578 ++ * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
4579 ++ * For sample rate > 48K:
4580 ++ * { ((num_channels * bits_per_sample * rate/48000) /
4581 ++ * number of SDOs) >= 8 }
4582 ++ */
4583 ++ bus->sdo_limit = 8;
4584 ++
4585 + return 0;
4586 + }
4587 + EXPORT_SYMBOL_GPL(snd_hdac_bus_init);
4588 +diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
4589 +index 011b17cc1efa2..b98449fd92f3b 100644
4590 +--- a/sound/hda/hdac_controller.c
4591 ++++ b/sound/hda/hdac_controller.c
4592 +@@ -529,17 +529,6 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
4593 +
4594 + bus->chip_init = true;
4595 +
4596 +- /*
4597 +- * Default value of '8' is as per the HD audio specification (Rev 1.0a).
4598 +- * Following relation is used to derive STRIPE control value.
4599 +- * For sample rate <= 48K:
4600 +- * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
4601 +- * For sample rate > 48K:
4602 +- * { ((num_channels * bits_per_sample * rate/48000) /
4603 +- * number of SDOs) >= 8 }
4604 +- */
4605 +- bus->sdo_limit = 8;
4606 +-
4607 + return true;
4608 + }
4609 + EXPORT_SYMBOL_GPL(snd_hdac_bus_init_chip);
4610 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4611 +index 8626e59f1e6a9..b10d005786d07 100644
4612 +--- a/sound/pci/hda/patch_realtek.c
4613 ++++ b/sound/pci/hda/patch_realtek.c
4614 +@@ -7696,6 +7696,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4615 + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
4616 + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
4617 + SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
4618 ++ SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
4619 ++ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
4620 + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
4621 + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
4622 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
4623 +diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c
4624 +index 623dfd3ea7051..7b14d9a81b97a 100644
4625 +--- a/sound/soc/amd/renoir/acp3x-pdm-dma.c
4626 ++++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c
4627 +@@ -314,40 +314,30 @@ static int acp_pdm_dma_close(struct snd_soc_component *component,
4628 + return 0;
4629 + }
4630 +
4631 +-static int acp_pdm_dai_hw_params(struct snd_pcm_substream *substream,
4632 +- struct snd_pcm_hw_params *params,
4633 +- struct snd_soc_dai *dai)
4634 ++static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
4635 ++ int cmd, struct snd_soc_dai *dai)
4636 + {
4637 + struct pdm_stream_instance *rtd;
4638 ++ int ret;
4639 ++ bool pdm_status;
4640 + unsigned int ch_mask;
4641 +
4642 + rtd = substream->runtime->private_data;
4643 +- switch (params_channels(params)) {
4644 ++ ret = 0;
4645 ++ switch (substream->runtime->channels) {
4646 + case TWO_CH:
4647 + ch_mask = 0x00;
4648 + break;
4649 + default:
4650 + return -EINVAL;
4651 + }
4652 +- rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
4653 +- rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
4654 +- ACP_WOV_PDM_DECIMATION_FACTOR);
4655 +- return 0;
4656 +-}
4657 +-
4658 +-static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
4659 +- int cmd, struct snd_soc_dai *dai)
4660 +-{
4661 +- struct pdm_stream_instance *rtd;
4662 +- int ret;
4663 +- bool pdm_status;
4664 +-
4665 +- rtd = substream->runtime->private_data;
4666 +- ret = 0;
4667 + switch (cmd) {
4668 + case SNDRV_PCM_TRIGGER_START:
4669 + case SNDRV_PCM_TRIGGER_RESUME:
4670 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
4671 ++ rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
4672 ++ rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
4673 ++ ACP_WOV_PDM_DECIMATION_FACTOR);
4674 + rtd->bytescount = acp_pdm_get_byte_count(rtd,
4675 + substream->stream);
4676 + pdm_status = check_pdm_dma_status(rtd->acp_base);
4677 +@@ -369,7 +359,6 @@ static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
4678 + }
4679 +
4680 + static struct snd_soc_dai_ops acp_pdm_dai_ops = {
4681 +- .hw_params = acp_pdm_dai_hw_params,
4682 + .trigger = acp_pdm_dai_trigger,
4683 + };
4684 +
4685 +diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
4686 +index 85bc7ae4d2671..26cf372ccda6f 100644
4687 +--- a/sound/soc/codecs/msm8916-wcd-analog.c
4688 ++++ b/sound/soc/codecs/msm8916-wcd-analog.c
4689 +@@ -19,8 +19,8 @@
4690 +
4691 + #define CDC_D_REVISION1 (0xf000)
4692 + #define CDC_D_PERPH_SUBTYPE (0xf005)
4693 +-#define CDC_D_INT_EN_SET (0x015)
4694 +-#define CDC_D_INT_EN_CLR (0x016)
4695 ++#define CDC_D_INT_EN_SET (0xf015)
4696 ++#define CDC_D_INT_EN_CLR (0xf016)
4697 + #define MBHC_SWITCH_INT BIT(7)
4698 + #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
4699 + #define MBHC_BUTTON_PRESS_DET BIT(5)
4700 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4701 +index 8817eaae6bb7a..b520e3aeaf3de 100644
4702 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4703 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4704 +@@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
4705 +
4706 + ret_val = power_up_sst(stream);
4707 + if (ret_val < 0)
4708 +- return ret_val;
4709 ++ goto out_power_up;
4710 +
4711 + /* Make sure, that the period size is always even */
4712 + snd_pcm_hw_constraint_step(substream->runtime, 0,
4713 +@@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
4714 + return snd_pcm_hw_constraint_integer(runtime,
4715 + SNDRV_PCM_HW_PARAM_PERIODS);
4716 + out_ops:
4717 +- kfree(stream);
4718 + mutex_unlock(&sst_lock);
4719 ++out_power_up:
4720 ++ kfree(stream);
4721 + return ret_val;
4722 + }
4723 +
4724 +diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
4725 +index 2a5302f1db98a..0168af8492727 100644
4726 +--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
4727 ++++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
4728 +@@ -1150,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
4729 + }
4730 +
4731 + static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
4732 +- SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
4733 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
4734 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
4735 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
4736 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
4737 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
4738 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
4739 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
4740 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
4741 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
4742 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
4743 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
4744 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
4745 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
4746 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
4747 ++ SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4748 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4749 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4750 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4751 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4752 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4753 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4754 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
4755 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4756 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4757 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4758 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4759 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4760 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4761 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
4762 + SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
4763 +- 0, 0, 0, 0),
4764 ++ 0, SND_SOC_NOPM, 0, 0),
4765 + SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
4766 +- 0, 0, 0, 0),
4767 ++ 0, SND_SOC_NOPM, 0, 0),
4768 + SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
4769 +- 0, 0, 0, 0),
4770 ++ 0, SND_SOC_NOPM, 0, 0),
4771 + SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
4772 +- 0, 0, 0, 0),
4773 ++ 0, SND_SOC_NOPM, 0, 0),
4774 + SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
4775 +- 0, 0, 0, 0),
4776 ++ 0, SND_SOC_NOPM, 0, 0),
4777 + SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
4778 +- 0, 0, 0, 0),
4779 ++ 0, SND_SOC_NOPM, 0, 0),
4780 + SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
4781 + "Secondary MI2S Playback SD1",
4782 +- 0, 0, 0, 0),
4783 ++ 0, SND_SOC_NOPM, 0, 0),
4784 + SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
4785 +- 0, 0, 0, 0),
4786 ++ 0, SND_SOC_NOPM, 0, 0),
4787 + SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
4788 +- 0, 0, 0, 0),
4789 ++ 0, SND_SOC_NOPM, 0, 0),
4790 +
4791 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
4792 +- 0, 0, 0, 0),
4793 ++ 0, SND_SOC_NOPM, 0, 0),
4794 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
4795 +- 0, 0, 0, 0),
4796 ++ 0, SND_SOC_NOPM, 0, 0),
4797 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
4798 +- 0, 0, 0, 0),
4799 ++ 0, SND_SOC_NOPM, 0, 0),
4800 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
4801 +- 0, 0, 0, 0),
4802 ++ 0, SND_SOC_NOPM, 0, 0),
4803 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
4804 +- 0, 0, 0, 0),
4805 ++ 0, SND_SOC_NOPM, 0, 0),
4806 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
4807 +- 0, 0, 0, 0),
4808 ++ 0, SND_SOC_NOPM, 0, 0),
4809 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
4810 +- 0, 0, 0, 0),
4811 ++ 0, SND_SOC_NOPM, 0, 0),
4812 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
4813 +- 0, 0, 0, 0),
4814 ++ 0, SND_SOC_NOPM, 0, 0),
4815 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
4816 +- 0, 0, 0, 0),
4817 ++ 0, SND_SOC_NOPM, 0, 0),
4818 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
4819 +- 0, 0, 0, 0),
4820 ++ 0, SND_SOC_NOPM, 0, 0),
4821 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
4822 +- 0, 0, 0, 0),
4823 ++ 0, SND_SOC_NOPM, 0, 0),
4824 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
4825 +- 0, 0, 0, 0),
4826 ++ 0, SND_SOC_NOPM, 0, 0),
4827 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
4828 +- 0, 0, 0, 0),
4829 ++ 0, SND_SOC_NOPM, 0, 0),
4830 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
4831 +- 0, 0, 0, 0),
4832 ++ 0, SND_SOC_NOPM, 0, 0),
4833 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
4834 +- 0, 0, 0, 0),
4835 ++ 0, SND_SOC_NOPM, 0, 0),
4836 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
4837 +- 0, 0, 0, 0),
4838 ++ 0, SND_SOC_NOPM, 0, 0),
4839 +
4840 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
4841 +- 0, 0, 0, 0),
4842 ++ 0, SND_SOC_NOPM, 0, 0),
4843 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
4844 +- 0, 0, 0, 0),
4845 ++ 0, SND_SOC_NOPM, 0, 0),
4846 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
4847 +- 0, 0, 0, 0),
4848 ++ 0, SND_SOC_NOPM, 0, 0),
4849 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
4850 +- 0, 0, 0, 0),
4851 ++ 0, SND_SOC_NOPM, 0, 0),
4852 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
4853 +- 0, 0, 0, 0),
4854 ++ 0, SND_SOC_NOPM, 0, 0),
4855 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
4856 +- 0, 0, 0, 0),
4857 ++ 0, SND_SOC_NOPM, 0, 0),
4858 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
4859 +- 0, 0, 0, 0),
4860 ++ 0, SND_SOC_NOPM, 0, 0),
4861 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
4862 +- 0, 0, 0, 0),
4863 ++ 0, SND_SOC_NOPM, 0, 0),
4864 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
4865 +- 0, 0, 0, 0),
4866 ++ 0, SND_SOC_NOPM, 0, 0),
4867 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
4868 +- 0, 0, 0, 0),
4869 ++ 0, SND_SOC_NOPM, 0, 0),
4870 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
4871 +- 0, 0, 0, 0),
4872 ++ 0, SND_SOC_NOPM, 0, 0),
4873 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
4874 +- 0, 0, 0, 0),
4875 ++ 0, SND_SOC_NOPM, 0, 0),
4876 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
4877 +- 0, 0, 0, 0),
4878 ++ 0, SND_SOC_NOPM, 0, 0),
4879 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
4880 +- 0, 0, 0, 0),
4881 ++ 0, SND_SOC_NOPM, 0, 0),
4882 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
4883 +- 0, 0, 0, 0),
4884 ++ 0, SND_SOC_NOPM, 0, 0),
4885 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
4886 +- 0, 0, 0, 0),
4887 ++ 0, SND_SOC_NOPM, 0, 0),
4888 +
4889 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
4890 +- 0, 0, 0, 0),
4891 ++ 0, SND_SOC_NOPM, 0, 0),
4892 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
4893 +- 0, 0, 0, 0),
4894 ++ 0, SND_SOC_NOPM, 0, 0),
4895 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
4896 +- 0, 0, 0, 0),
4897 ++ 0, SND_SOC_NOPM, 0, 0),
4898 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
4899 +- 0, 0, 0, 0),
4900 ++ 0, SND_SOC_NOPM, 0, 0),
4901 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
4902 +- 0, 0, 0, 0),
4903 ++ 0, SND_SOC_NOPM, 0, 0),
4904 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
4905 +- 0, 0, 0, 0),
4906 ++ 0, SND_SOC_NOPM, 0, 0),
4907 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
4908 +- 0, 0, 0, 0),
4909 ++ 0, SND_SOC_NOPM, 0, 0),
4910 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
4911 +- 0, 0, 0, 0),
4912 ++ 0, SND_SOC_NOPM, 0, 0),
4913 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
4914 +- 0, 0, 0, 0),
4915 ++ 0, SND_SOC_NOPM, 0, 0),
4916 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
4917 +- 0, 0, 0, 0),
4918 ++ 0, SND_SOC_NOPM, 0, 0),
4919 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
4920 +- 0, 0, 0, 0),
4921 ++ 0, SND_SOC_NOPM, 0, 0),
4922 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
4923 +- 0, 0, 0, 0),
4924 ++ 0, SND_SOC_NOPM, 0, 0),
4925 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
4926 +- 0, 0, 0, 0),
4927 ++ 0, SND_SOC_NOPM, 0, 0),
4928 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
4929 +- 0, 0, 0, 0),
4930 ++ 0, SND_SOC_NOPM, 0, 0),
4931 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
4932 +- 0, 0, 0, 0),
4933 ++ 0, SND_SOC_NOPM, 0, 0),
4934 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
4935 +- 0, 0, 0, 0),
4936 ++ 0, SND_SOC_NOPM, 0, 0),
4937 +
4938 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
4939 +- 0, 0, 0, 0),
4940 ++ 0, SND_SOC_NOPM, 0, 0),
4941 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
4942 +- 0, 0, 0, 0),
4943 ++ 0, SND_SOC_NOPM, 0, 0),
4944 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
4945 +- 0, 0, 0, 0),
4946 ++ 0, SND_SOC_NOPM, 0, 0),
4947 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
4948 +- 0, 0, 0, 0),
4949 ++ 0, SND_SOC_NOPM, 0, 0),
4950 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
4951 +- 0, 0, 0, 0),
4952 ++ 0, SND_SOC_NOPM, 0, 0),
4953 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
4954 +- 0, 0, 0, 0),
4955 ++ 0, SND_SOC_NOPM, 0, 0),
4956 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
4957 +- 0, 0, 0, 0),
4958 ++ 0, SND_SOC_NOPM, 0, 0),
4959 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
4960 +- 0, 0, 0, 0),
4961 ++ 0, SND_SOC_NOPM, 0, 0),
4962 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
4963 +- 0, 0, 0, 0),
4964 ++ 0, SND_SOC_NOPM, 0, 0),
4965 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
4966 +- 0, 0, 0, 0),
4967 ++ 0, SND_SOC_NOPM, 0, 0),
4968 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
4969 +- 0, 0, 0, 0),
4970 ++ 0, SND_SOC_NOPM, 0, 0),
4971 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
4972 +- 0, 0, 0, 0),
4973 ++ 0, SND_SOC_NOPM, 0, 0),
4974 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
4975 +- 0, 0, 0, 0),
4976 ++ 0, SND_SOC_NOPM, 0, 0),
4977 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
4978 +- 0, 0, 0, 0),
4979 ++ 0, SND_SOC_NOPM, 0, 0),
4980 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
4981 +- 0, 0, 0, 0),
4982 ++ 0, SND_SOC_NOPM, 0, 0),
4983 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
4984 +- 0, 0, 0, 0),
4985 ++ 0, SND_SOC_NOPM, 0, 0),
4986 +
4987 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
4988 +- 0, 0, 0, 0),
4989 ++ 0, SND_SOC_NOPM, 0, 0),
4990 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
4991 +- 0, 0, 0, 0),
4992 ++ 0, SND_SOC_NOPM, 0, 0),
4993 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
4994 +- 0, 0, 0, 0),
4995 ++ 0, SND_SOC_NOPM, 0, 0),
4996 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
4997 +- 0, 0, 0, 0),
4998 ++ 0, SND_SOC_NOPM, 0, 0),
4999 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
5000 +- 0, 0, 0, 0),
5001 ++ 0, SND_SOC_NOPM, 0, 0),
5002 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
5003 +- 0, 0, 0, 0),
5004 ++ 0, SND_SOC_NOPM, 0, 0),
5005 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
5006 +- 0, 0, 0, 0),
5007 ++ 0, SND_SOC_NOPM, 0, 0),
5008 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
5009 +- 0, 0, 0, 0),
5010 ++ 0, SND_SOC_NOPM, 0, 0),
5011 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
5012 +- 0, 0, 0, 0),
5013 ++ 0, SND_SOC_NOPM, 0, 0),
5014 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
5015 +- 0, 0, 0, 0),
5016 ++ 0, SND_SOC_NOPM, 0, 0),
5017 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
5018 +- 0, 0, 0, 0),
5019 ++ 0, SND_SOC_NOPM, 0, 0),
5020 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
5021 +- 0, 0, 0, 0),
5022 ++ 0, SND_SOC_NOPM, 0, 0),
5023 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
5024 +- 0, 0, 0, 0),
5025 ++ 0, SND_SOC_NOPM, 0, 0),
5026 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
5027 +- 0, 0, 0, 0),
5028 ++ 0, SND_SOC_NOPM, 0, 0),
5029 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
5030 +- 0, 0, 0, 0),
5031 ++ 0, SND_SOC_NOPM, 0, 0),
5032 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
5033 +- 0, 0, 0, 0),
5034 +- SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0),
5035 ++ 0, SND_SOC_NOPM, 0, 0),
5036 ++ SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0),
5037 + };
5038 +
5039 + static const struct snd_soc_component_driver q6afe_dai_component = {
5040 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
5041 +index 46e50612b92c1..750e6a30444eb 100644
5042 +--- a/sound/soc/qcom/qdsp6/q6routing.c
5043 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
5044 +@@ -973,6 +973,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
5045 + return 0;
5046 + }
5047 +
5048 ++static unsigned int q6routing_reg_read(struct snd_soc_component *component,
5049 ++ unsigned int reg)
5050 ++{
5051 ++ /* default value */
5052 ++ return 0;
5053 ++}
5054 ++
5055 ++static int q6routing_reg_write(struct snd_soc_component *component,
5056 ++ unsigned int reg, unsigned int val)
5057 ++{
5058 ++ /* dummy */
5059 ++ return 0;
5060 ++}
5061 ++
5062 + static const struct snd_soc_component_driver msm_soc_routing_component = {
5063 + .probe = msm_routing_probe,
5064 + .name = DRV_NAME,
5065 +@@ -981,6 +995,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
5066 + .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
5067 + .dapm_routes = intercon,
5068 + .num_dapm_routes = ARRAY_SIZE(intercon),
5069 ++ .read = q6routing_reg_read,
5070 ++ .write = q6routing_reg_write,
5071 + };
5072 +
5073 + static int q6pcm_routing_probe(struct platform_device *pdev)
5074 +diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
5075 +index 540ffde0b03a3..0be1330b4c1ba 100644
5076 +--- a/tools/bpf/bpftool/gen.c
5077 ++++ b/tools/bpf/bpftool/gen.c
5078 +@@ -400,7 +400,7 @@ static int do_skeleton(int argc, char **argv)
5079 + { \n\
5080 + struct %1$s *obj; \n\
5081 + \n\
5082 +- obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\
5083 ++ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
5084 + if (!obj) \n\
5085 + return NULL; \n\
5086 + if (%1$s__create_skeleton(obj)) \n\
5087 +@@ -464,7 +464,7 @@ static int do_skeleton(int argc, char **argv)
5088 + { \n\
5089 + struct bpf_object_skeleton *s; \n\
5090 + \n\
5091 +- s = (typeof(s))calloc(1, sizeof(*s)); \n\
5092 ++ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
5093 + if (!s) \n\
5094 + return -1; \n\
5095 + obj->skeleton = s; \n\
5096 +@@ -482,7 +482,7 @@ static int do_skeleton(int argc, char **argv)
5097 + /* maps */ \n\
5098 + s->map_cnt = %zu; \n\
5099 + s->map_skel_sz = sizeof(*s->maps); \n\
5100 +- s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
5101 ++ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
5102 + if (!s->maps) \n\
5103 + goto err; \n\
5104 + ",
5105 +@@ -518,7 +518,7 @@ static int do_skeleton(int argc, char **argv)
5106 + /* programs */ \n\
5107 + s->prog_cnt = %zu; \n\
5108 + s->prog_skel_sz = sizeof(*s->progs); \n\
5109 +- s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
5110 ++ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
5111 + if (!s->progs) \n\
5112 + goto err; \n\
5113 + ",
5114 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
5115 +index 11e4725b8b1c0..e7642a6e39f9e 100644
5116 +--- a/tools/lib/bpf/libbpf.c
5117 ++++ b/tools/lib/bpf/libbpf.c
5118 +@@ -5025,7 +5025,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
5119 + static int bpf_object__collect_map_relos(struct bpf_object *obj,
5120 + GElf_Shdr *shdr, Elf_Data *data)
5121 + {
5122 +- int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
5123 ++ const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
5124 ++ int i, j, nrels, new_sz;
5125 + const struct btf_var_secinfo *vi = NULL;
5126 + const struct btf_type *sec, *var, *def;
5127 + const struct btf_member *member;
5128 +@@ -5074,7 +5075,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
5129 +
5130 + vi = btf_var_secinfos(sec) + map->btf_var_idx;
5131 + if (vi->offset <= rel.r_offset &&
5132 +- rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
5133 ++ rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
5134 + break;
5135 + }
5136 + if (j == obj->nr_maps) {
5137 +@@ -5110,17 +5111,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
5138 + return -EINVAL;
5139 +
5140 + moff = rel.r_offset - vi->offset - moff;
5141 +- if (moff % ptr_sz)
5142 ++ /* here we use BPF pointer size, which is always 64 bit, as we
5143 ++ * are parsing ELF that was built for BPF target
5144 ++ */
5145 ++ if (moff % bpf_ptr_sz)
5146 + return -EINVAL;
5147 +- moff /= ptr_sz;
5148 ++ moff /= bpf_ptr_sz;
5149 + if (moff >= map->init_slots_sz) {
5150 + new_sz = moff + 1;
5151 +- tmp = realloc(map->init_slots, new_sz * ptr_sz);
5152 ++ tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
5153 + if (!tmp)
5154 + return -ENOMEM;
5155 + map->init_slots = tmp;
5156 + memset(map->init_slots + map->init_slots_sz, 0,
5157 +- (new_sz - map->init_slots_sz) * ptr_sz);
5158 ++ (new_sz - map->init_slots_sz) * host_ptr_sz);
5159 + map->init_slots_sz = new_sz;
5160 + }
5161 + map->init_slots[moff] = targ_map;
5162 +diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
5163 +index 1bb204cee853f..9a0946ddb705a 100644
5164 +--- a/tools/testing/selftests/bpf/.gitignore
5165 ++++ b/tools/testing/selftests/bpf/.gitignore
5166 +@@ -6,7 +6,6 @@ test_lpm_map
5167 + test_tag
5168 + FEATURE-DUMP.libbpf
5169 + fixdep
5170 +-test_align
5171 + test_dev_cgroup
5172 + /test_progs*
5173 + test_tcpbpf_user
5174 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
5175 +index 4f322d5388757..50965cc7bf098 100644
5176 +--- a/tools/testing/selftests/bpf/Makefile
5177 ++++ b/tools/testing/selftests/bpf/Makefile
5178 +@@ -32,7 +32,7 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread
5179 +
5180 + # Order correspond to 'make run_tests' order
5181 + TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
5182 +- test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
5183 ++ test_verifier_log test_dev_cgroup test_tcpbpf_user \
5184 + test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
5185 + test_cgroup_storage \
5186 + test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
5187 +diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
5188 +index 8a637ca7d73a4..05853b0b88318 100644
5189 +--- a/tools/testing/selftests/cgroup/cgroup_util.c
5190 ++++ b/tools/testing/selftests/cgroup/cgroup_util.c
5191 +@@ -106,7 +106,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
5192 +
5193 + /* Handle the case of comparing against empty string */
5194 + if (!expected)
5195 +- size = 32;
5196 ++ return -1;
5197 + else
5198 + size = strlen(expected) + 1;
5199 +
5200 +diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
5201 +index 8162c58a1234e..b8d14f9db5f9e 100644
5202 +--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
5203 ++++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
5204 +@@ -40,11 +40,11 @@ static void guest_code(void)
5205 +
5206 + /* Single step test, covers 2 basic instructions and 2 emulated */
5207 + asm volatile("ss_start: "
5208 +- "xor %%rax,%%rax\n\t"
5209 ++ "xor %%eax,%%eax\n\t"
5210 + "cpuid\n\t"
5211 + "movl $0x1a0,%%ecx\n\t"
5212 + "rdmsr\n\t"
5213 +- : : : "rax", "ecx");
5214 ++ : : : "eax", "ebx", "ecx", "edx");
5215 +
5216 + /* DR6.BD test */
5217 + asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
5218 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5219 +index 0a68c9d3d3ab1..9e925675a8868 100644
5220 +--- a/virt/kvm/kvm_main.c
5221 ++++ b/virt/kvm/kvm_main.c
5222 +@@ -427,7 +427,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
5223 + * count is also read inside the mmu_lock critical section.
5224 + */
5225 + kvm->mmu_notifier_count++;
5226 +- need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
5227 ++ need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
5228 ++ range->flags);
5229 + need_tlb_flush |= kvm->tlbs_dirty;
5230 + /* we've to flush the tlb before the pages can be freed */
5231 + if (need_tlb_flush)