Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.7 commit in: /
Date: Wed, 26 Aug 2020 11:17:44
Message-Id: 1598440632.b450d96181ba92bf82a87234f78c4273df4d711b.mpagano@gentoo
1 commit: b450d96181ba92bf82a87234f78c4273df4d711b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 26 11:17:12 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 26 11:17:12 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b450d961
7
8 Linux patch 5.7.18
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1017_linux-5.7.18.patch | 4218 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4222 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 18ff2b2..1ab468f 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -111,6 +111,10 @@ Patch: 1016_linux-5.7.17.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.7.17
23
24 +Patch: 1017_linux-5.7.18.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.7.18
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1017_linux-5.7.18.patch b/1017_linux-5.7.18.patch
33 new file mode 100644
34 index 0000000..8256bbf
35 --- /dev/null
36 +++ b/1017_linux-5.7.18.patch
37 @@ -0,0 +1,4218 @@
38 +diff --git a/Makefile b/Makefile
39 +index c0d34d03ab5f1..b56456c45c97f 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 7
46 +-SUBLEVEL = 17
47 ++SUBLEVEL = 18
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
52 +index e6225cf40de57..b09dd6bc98a12 100644
53 +--- a/arch/alpha/include/asm/io.h
54 ++++ b/arch/alpha/include/asm/io.h
55 +@@ -490,10 +490,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
56 + }
57 + #endif
58 +
59 +-#define ioread16be(p) be16_to_cpu(ioread16(p))
60 +-#define ioread32be(p) be32_to_cpu(ioread32(p))
61 +-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
62 +-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
63 ++#define ioread16be(p) swab16(ioread16(p))
64 ++#define ioread32be(p) swab32(ioread32(p))
65 ++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
66 ++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
67 +
68 + #define inb_p inb
69 + #define inw_p inw
70 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
71 +index 85e4149cc5d5c..d3c7ffa72902d 100644
72 +--- a/arch/arm64/Makefile
73 ++++ b/arch/arm64/Makefile
74 +@@ -156,6 +156,7 @@ zinstall install:
75 + PHONY += vdso_install
76 + vdso_install:
77 + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
78 ++ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
79 +
80 + # We use MRPROPER_FILES and CLEAN_FILES now
81 + archclean:
82 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
83 +index 26fca93cd6972..397e20a359752 100644
84 +--- a/arch/arm64/include/asm/kvm_host.h
85 ++++ b/arch/arm64/include/asm/kvm_host.h
86 +@@ -440,7 +440,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
87 +
88 + #define KVM_ARCH_WANT_MMU_NOTIFIER
89 + int kvm_unmap_hva_range(struct kvm *kvm,
90 +- unsigned long start, unsigned long end);
91 ++ unsigned long start, unsigned long end, unsigned flags);
92 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
93 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
94 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
95 +diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
96 +index 0433bb58ce52c..601c075f1f476 100644
97 +--- a/arch/arm64/kernel/vdso32/Makefile
98 ++++ b/arch/arm64/kernel/vdso32/Makefile
99 +@@ -201,7 +201,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
100 + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
101 +
102 + # Install commands for the unstripped file
103 +-quiet_cmd_vdso_install = INSTALL $@
104 ++quiet_cmd_vdso_install = INSTALL32 $@
105 + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
106 +
107 + vdso.so: $(obj)/vdso.so.dbg
108 +diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
109 +index 9138a624c5c81..692f90e7fecc1 100644
110 +--- a/arch/m68k/include/asm/m53xxacr.h
111 ++++ b/arch/m68k/include/asm/m53xxacr.h
112 +@@ -89,9 +89,9 @@
113 + * coherency though in all cases. And for copyback caches we will need
114 + * to push cached data as well.
115 + */
116 +-#define CACHE_INIT CACR_CINVA
117 +-#define CACHE_INVALIDATE CACR_CINVA
118 +-#define CACHE_INVALIDATED CACR_CINVA
119 ++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
120 ++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
121 ++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
122 +
123 + #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
124 + (0x000f0000) + \
125 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
126 +index caa2b936125cc..8861e9d4eb1f9 100644
127 +--- a/arch/mips/include/asm/kvm_host.h
128 ++++ b/arch/mips/include/asm/kvm_host.h
129 +@@ -939,7 +939,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
130 +
131 + #define KVM_ARCH_WANT_MMU_NOTIFIER
132 + int kvm_unmap_hva_range(struct kvm *kvm,
133 +- unsigned long start, unsigned long end);
134 ++ unsigned long start, unsigned long end, unsigned flags);
135 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
136 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
137 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
138 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
139 +index 573509e0f2d4e..3ace115740dd1 100644
140 +--- a/arch/mips/kernel/setup.c
141 ++++ b/arch/mips/kernel/setup.c
142 +@@ -497,7 +497,7 @@ static void __init mips_parse_crashkernel(void)
143 + if (ret != 0 || crash_size <= 0)
144 + return;
145 +
146 +- if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
147 ++ if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
148 + pr_warn("Invalid memory region reserved for crash kernel\n");
149 + return;
150 + }
151 +diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
152 +index 7dad7a293eae9..2514e51d908b4 100644
153 +--- a/arch/mips/kvm/mmu.c
154 ++++ b/arch/mips/kvm/mmu.c
155 +@@ -518,7 +518,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
156 + return 1;
157 + }
158 +
159 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
160 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
161 ++ unsigned flags)
162 + {
163 + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
164 +
165 +diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
166 +index 77ab25a199740..e808461e6532e 100644
167 +--- a/arch/powerpc/include/asm/fixmap.h
168 ++++ b/arch/powerpc/include/asm/fixmap.h
169 +@@ -52,7 +52,7 @@ enum fixed_addresses {
170 + FIX_HOLE,
171 + /* reserve the top 128K for early debugging purposes */
172 + FIX_EARLY_DEBUG_TOP = FIX_HOLE,
173 +- FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
174 ++ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
175 + #ifdef CONFIG_HIGHMEM
176 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
177 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
178 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
179 +index 1dc63101ffe18..b82e46ecd7fbd 100644
180 +--- a/arch/powerpc/include/asm/kvm_host.h
181 ++++ b/arch/powerpc/include/asm/kvm_host.h
182 +@@ -58,7 +58,8 @@
183 + #define KVM_ARCH_WANT_MMU_NOTIFIER
184 +
185 + extern int kvm_unmap_hva_range(struct kvm *kvm,
186 +- unsigned long start, unsigned long end);
187 ++ unsigned long start, unsigned long end,
188 ++ unsigned flags);
189 + extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
190 + extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
191 + extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
192 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
193 +index 5690a1f9b9767..13f107dff880e 100644
194 +--- a/arch/powerpc/kvm/book3s.c
195 ++++ b/arch/powerpc/kvm/book3s.c
196 +@@ -837,7 +837,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
197 + kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
198 + }
199 +
200 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
201 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
202 ++ unsigned flags)
203 + {
204 + return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
205 + }
206 +diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
207 +index df9989cf7ba3f..9b402c345154f 100644
208 +--- a/arch/powerpc/kvm/e500_mmu_host.c
209 ++++ b/arch/powerpc/kvm/e500_mmu_host.c
210 +@@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
211 + return 0;
212 + }
213 +
214 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
215 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
216 ++ unsigned flags)
217 + {
218 + /* kvm_unmap_hva flushes everything anyways */
219 + kvm_unmap_hva(kvm, start);
220 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
221 +index 6d4ee03d476a9..ec04fc7f5a641 100644
222 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
223 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
224 +@@ -107,22 +107,28 @@ static int pseries_cpu_disable(void)
225 + */
226 + static void pseries_cpu_die(unsigned int cpu)
227 + {
228 +- int tries;
229 + int cpu_status = 1;
230 + unsigned int pcpu = get_hard_smp_processor_id(cpu);
231 ++ unsigned long timeout = jiffies + msecs_to_jiffies(120000);
232 +
233 +- for (tries = 0; tries < 25; tries++) {
234 ++ while (true) {
235 + cpu_status = smp_query_cpu_stopped(pcpu);
236 + if (cpu_status == QCSS_STOPPED ||
237 + cpu_status == QCSS_HARDWARE_ERROR)
238 + break;
239 +- cpu_relax();
240 +
241 ++ if (time_after(jiffies, timeout)) {
242 ++ pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
243 ++ cpu, pcpu);
244 ++ timeout = jiffies + msecs_to_jiffies(120000);
245 ++ }
246 ++
247 ++ cond_resched();
248 + }
249 +
250 +- if (cpu_status != 0) {
251 +- printk("Querying DEAD? cpu %i (%i) shows %i\n",
252 +- cpu, pcpu, cpu_status);
253 ++ if (cpu_status == QCSS_HARDWARE_ERROR) {
254 ++ pr_warn("CPU %i (hwid %i) reported error while dying\n",
255 ++ cpu, pcpu);
256 + }
257 +
258 + /* Isolation and deallocation are definitely done by
259 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
260 +index 16ba5c542e55c..988e9b75ff642 100644
261 +--- a/arch/powerpc/platforms/pseries/ras.c
262 ++++ b/arch/powerpc/platforms/pseries/ras.c
263 +@@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
264 + case EPOW_SHUTDOWN_ON_UPS:
265 + pr_emerg("Loss of system power detected. System is running on"
266 + " UPS/battery. Check RTAS error log for details\n");
267 +- orderly_poweroff(true);
268 + break;
269 +
270 + case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
271 +diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
272 +index 0339b6bbe11ab..bf3f34dbe630b 100644
273 +--- a/arch/riscv/kernel/vmlinux.lds.S
274 ++++ b/arch/riscv/kernel/vmlinux.lds.S
275 +@@ -22,6 +22,7 @@ SECTIONS
276 + /* Beginning of code and text segment */
277 + . = LOAD_OFFSET;
278 + _start = .;
279 ++ _stext = .;
280 + HEAD_TEXT_SECTION
281 + . = ALIGN(PAGE_SIZE);
282 +
283 +@@ -49,7 +50,6 @@ SECTIONS
284 + . = ALIGN(SECTION_ALIGN);
285 + .text : {
286 + _text = .;
287 +- _stext = .;
288 + TEXT_TEXT
289 + SCHED_TEXT
290 + CPUIDLE_TEXT
291 +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
292 +index e007224b65bb2..a266ffed04df5 100644
293 +--- a/arch/s390/kernel/ptrace.c
294 ++++ b/arch/s390/kernel/ptrace.c
295 +@@ -1311,7 +1311,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
296 + cb->pc == 1 &&
297 + cb->qc == 0 &&
298 + cb->reserved2 == 0 &&
299 +- cb->key == PAGE_DEFAULT_KEY &&
300 + cb->reserved3 == 0 &&
301 + cb->reserved4 == 0 &&
302 + cb->reserved5 == 0 &&
303 +@@ -1375,7 +1374,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
304 + kfree(data);
305 + return -EINVAL;
306 + }
307 +-
308 ++ /*
309 ++ * Override access key in any case, since user space should
310 ++ * not be able to set it, nor should it care about it.
311 ++ */
312 ++ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
313 + preempt_disable();
314 + if (!target->thread.ri_cb)
315 + target->thread.ri_cb = data;
316 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
317 +index 125c7f6e87150..1788a5454b6fc 100644
318 +--- a/arch/s390/kernel/runtime_instr.c
319 ++++ b/arch/s390/kernel/runtime_instr.c
320 +@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
321 + cb->k = 1;
322 + cb->ps = 1;
323 + cb->pc = 1;
324 +- cb->key = PAGE_DEFAULT_KEY;
325 ++ cb->key = PAGE_DEFAULT_KEY >> 4;
326 + cb->v = 1;
327 + }
328 +
329 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
330 +index 86e2e0272c576..d4c5d1d6c6f55 100644
331 +--- a/arch/x86/include/asm/kvm_host.h
332 ++++ b/arch/x86/include/asm/kvm_host.h
333 +@@ -1606,7 +1606,8 @@ asmlinkage void kvm_spurious_fault(void);
334 + _ASM_EXTABLE(666b, 667b)
335 +
336 + #define KVM_ARCH_WANT_MMU_NOTIFIER
337 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
338 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
339 ++ unsigned flags);
340 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
341 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
342 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
343 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
344 +index 70cf2c1a1423c..59d096cacb26c 100644
345 +--- a/arch/x86/kvm/mmu/mmu.c
346 ++++ b/arch/x86/kvm/mmu/mmu.c
347 +@@ -1972,7 +1972,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
348 + return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
349 + }
350 +
351 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
352 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
353 ++ unsigned flags)
354 + {
355 + return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
356 + }
357 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
358 +index 51ccb4dfaad26..be195e63f1e69 100644
359 +--- a/arch/x86/kvm/x86.c
360 ++++ b/arch/x86/kvm/x86.c
361 +@@ -956,7 +956,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
362 + {
363 + unsigned long old_cr4 = kvm_read_cr4(vcpu);
364 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
365 +- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
366 ++ X86_CR4_SMEP;
367 +
368 + if (kvm_valid_cr4(vcpu, cr4))
369 + return 1;
370 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
371 +index 91220cc258547..5c11ae66b5d8e 100644
372 +--- a/arch/x86/pci/xen.c
373 ++++ b/arch/x86/pci/xen.c
374 +@@ -26,6 +26,7 @@
375 + #include <asm/xen/pci.h>
376 + #include <asm/xen/cpuid.h>
377 + #include <asm/apic.h>
378 ++#include <asm/acpi.h>
379 + #include <asm/i8259.h>
380 +
381 + static int xen_pcifront_enable_irq(struct pci_dev *dev)
382 +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
383 +index c5e393f8bb3f6..3b0a84c88b7d9 100644
384 +--- a/arch/x86/platform/efi/efi_64.c
385 ++++ b/arch/x86/platform/efi/efi_64.c
386 +@@ -269,6 +269,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
387 + npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
388 + rodata = __pa(__start_rodata);
389 + pfn = rodata >> PAGE_SHIFT;
390 ++
391 ++ pf = _PAGE_NX | _PAGE_ENC;
392 + if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
393 + pr_err("Failed to map kernel rodata 1:1\n");
394 + return 1;
395 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
396 +index 4d3429b2058fc..8c4d86032c7a3 100644
397 +--- a/drivers/cpufreq/intel_pstate.c
398 ++++ b/drivers/cpufreq/intel_pstate.c
399 +@@ -1572,6 +1572,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
400 +
401 + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
402 + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
403 ++ cpu->pstate.turbo_pstate = phy_max;
404 + } else {
405 + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
406 + }
407 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
408 +index b3135b208f9a0..0710b1a069270 100644
409 +--- a/drivers/edac/i7core_edac.c
410 ++++ b/drivers/edac/i7core_edac.c
411 +@@ -1710,9 +1710,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
412 + if (uncorrected_error) {
413 + core_err_cnt = 1;
414 + if (ripv)
415 +- tp_event = HW_EVENT_ERR_FATAL;
416 +- else
417 + tp_event = HW_EVENT_ERR_UNCORRECTED;
418 ++ else
419 ++ tp_event = HW_EVENT_ERR_FATAL;
420 + } else {
421 + tp_event = HW_EVENT_ERR_CORRECTED;
422 + }
423 +diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
424 +index bc47328eb4858..fdf214ab8ce44 100644
425 +--- a/drivers/edac/pnd2_edac.c
426 ++++ b/drivers/edac/pnd2_edac.c
427 +@@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
428 + u32 optypenum = GET_BITFIELD(m->status, 4, 6);
429 + int rc;
430 +
431 +- tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
432 ++ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
433 + HW_EVENT_ERR_CORRECTED;
434 +
435 + /*
436 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
437 +index 7d51c82be62ba..6ab863ab6d862 100644
438 +--- a/drivers/edac/sb_edac.c
439 ++++ b/drivers/edac/sb_edac.c
440 +@@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
441 + if (uncorrected_error) {
442 + core_err_cnt = 1;
443 + if (ripv) {
444 +- tp_event = HW_EVENT_ERR_FATAL;
445 +- } else {
446 + tp_event = HW_EVENT_ERR_UNCORRECTED;
447 ++ } else {
448 ++ tp_event = HW_EVENT_ERR_FATAL;
449 + }
450 + } else {
451 + tp_event = HW_EVENT_ERR_CORRECTED;
452 +diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
453 +index 412c651bef26b..dfeefacc90d6d 100644
454 +--- a/drivers/edac/skx_common.c
455 ++++ b/drivers/edac/skx_common.c
456 +@@ -494,9 +494,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
457 + if (uncorrected_error) {
458 + core_err_cnt = 1;
459 + if (ripv) {
460 +- tp_event = HW_EVENT_ERR_FATAL;
461 +- } else {
462 + tp_event = HW_EVENT_ERR_UNCORRECTED;
463 ++ } else {
464 ++ tp_event = HW_EVENT_ERR_FATAL;
465 + }
466 + } else {
467 + tp_event = HW_EVENT_ERR_CORRECTED;
468 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
469 +index 99446b3847265..9a0b614e99073 100644
470 +--- a/drivers/firmware/efi/efi.c
471 ++++ b/drivers/firmware/efi/efi.c
472 +@@ -381,6 +381,7 @@ static int __init efisubsys_init(void)
473 + efi_kobj = kobject_create_and_add("efi", firmware_kobj);
474 + if (!efi_kobj) {
475 + pr_err("efi: Firmware registration failed.\n");
476 ++ destroy_workqueue(efi_rts_wq);
477 + return -ENOMEM;
478 + }
479 +
480 +@@ -424,6 +425,7 @@ err_unregister:
481 + generic_ops_unregister();
482 + err_put:
483 + kobject_put(efi_kobj);
484 ++ destroy_workqueue(efi_rts_wq);
485 + return error;
486 + }
487 +
488 +diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
489 +index 9f34c72429397..cac64fdfc3ae4 100644
490 +--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
491 ++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
492 +@@ -73,10 +73,14 @@ void efi_printk(char *str)
493 + */
494 + efi_status_t efi_parse_options(char const *cmdline)
495 + {
496 +- size_t len = strlen(cmdline) + 1;
497 ++ size_t len;
498 + efi_status_t status;
499 + char *str, *buf;
500 +
501 ++ if (!cmdline)
502 ++ return EFI_SUCCESS;
503 ++
504 ++ len = strlen(cmdline) + 1;
505 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
506 + if (status != EFI_SUCCESS)
507 + return status;
508 +@@ -87,6 +91,8 @@ efi_status_t efi_parse_options(char const *cmdline)
509 + char *param, *val;
510 +
511 + str = next_arg(str, &param, &val);
512 ++ if (!val && !strcmp(param, "--"))
513 ++ break;
514 +
515 + if (!strcmp(param, "nokaslr")) {
516 + efi_nokaslr = true;
517 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
518 +index 7cb4fe479614e..debad34015913 100644
519 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
520 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
521 +@@ -1983,6 +1983,7 @@ void amdgpu_dm_update_connector_after_detect(
522 +
523 + drm_connector_update_edid_property(connector,
524 + aconnector->edid);
525 ++ drm_add_edid_modes(connector, aconnector->edid);
526 +
527 + if (aconnector->dc_link->aux_mode)
528 + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
529 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
530 +index 3f157bcc174b9..92079e2fa515a 100644
531 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
532 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
533 +@@ -3113,12 +3113,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
534 + dc_is_virtual_signal(pipe_ctx->stream->signal))
535 + return;
536 +
537 ++ dc->hwss.blank_stream(pipe_ctx);
538 + #if defined(CONFIG_DRM_AMD_DC_HDCP)
539 + update_psp_stream_config(pipe_ctx, true);
540 + #endif
541 +
542 +- dc->hwss.blank_stream(pipe_ctx);
543 +-
544 + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
545 + deallocate_mst_payload(pipe_ctx);
546 +
547 +@@ -3146,11 +3145,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
548 + write_i2c_redriver_setting(pipe_ctx, false);
549 + }
550 + }
551 +-
552 +- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
553 +-
554 + dc->hwss.disable_stream(pipe_ctx);
555 +
556 ++ disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
557 + if (pipe_ctx->stream->timing.flags.DSC) {
558 + if (dc_is_dp_signal(pipe_ctx->stream->signal))
559 + dp_set_dsc_enable(pipe_ctx, false);
560 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
561 +index 1ada01322cd2c..caa090d0b6acc 100644
562 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
563 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
564 +@@ -1103,10 +1103,6 @@ static inline enum link_training_result perform_link_training_int(
565 + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
566 + dpcd_set_training_pattern(link, dpcd_pattern);
567 +
568 +- /* delay 5ms after notifying sink of idle pattern before switching output */
569 +- if (link->connector_signal != SIGNAL_TYPE_EDP)
570 +- msleep(5);
571 +-
572 + /* 4. mainlink output idle pattern*/
573 + dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
574 +
575 +@@ -1556,12 +1552,6 @@ bool perform_link_training_with_retries(
576 + struct dc_link *link = stream->link;
577 + enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
578 +
579 +- /* We need to do this before the link training to ensure the idle pattern in SST
580 +- * mode will be sent right after the link training
581 +- */
582 +- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
583 +- pipe_ctx->stream_res.stream_enc->id, true);
584 +-
585 + for (j = 0; j < attempts; ++j) {
586 +
587 + dp_enable_link_phy(
588 +@@ -1578,6 +1568,12 @@ bool perform_link_training_with_retries(
589 +
590 + dp_set_panel_mode(link, panel_mode);
591 +
592 ++ /* We need to do this before the link training to ensure the idle pattern in SST
593 ++ * mode will be sent right after the link training
594 ++ */
595 ++ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
596 ++ pipe_ctx->stream_res.stream_enc->id, true);
597 ++
598 + if (link->aux_access_disabled) {
599 + dc_link_dp_perform_link_training_skip_aux(link, link_setting);
600 + return true;
601 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
602 +index 24ca592c90df5..10527593868cc 100644
603 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
604 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
605 +@@ -1090,17 +1090,8 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
606 + dc_link_set_abm_disable(link);
607 + }
608 +
609 +- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
610 ++ if (dc_is_dp_signal(pipe_ctx->stream->signal))
611 + pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
612 +-
613 +- /*
614 +- * After output is idle pattern some sinks need time to recognize the stream
615 +- * has changed or they enter protection state and hang.
616 +- */
617 +- if (!dc_is_embedded_signal(pipe_ctx->stream->signal))
618 +- msleep(60);
619 +- }
620 +-
621 + }
622 +
623 +
624 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
625 +index c4fa13e4eaf96..ab93cecb78f68 100644
626 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
627 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
628 +@@ -1386,8 +1386,8 @@ static void dcn20_update_dchubp_dpp(
629 +
630 + /* Any updates are handled in dc interface, just need to apply existing for plane enable */
631 + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
632 +- pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
633 +- && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
634 ++ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
635 ++ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
636 + dc->hwss.set_cursor_position(pipe_ctx);
637 + dc->hwss.set_cursor_attribute(pipe_ctx);
638 +
639 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
640 +index 2719cdecc1cb0..d37ede03510ff 100644
641 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
642 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
643 +@@ -3031,7 +3031,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
644 + int vlevel = 0;
645 + int pipe_split_from[MAX_PIPES];
646 + int pipe_cnt = 0;
647 +- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
648 ++ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
649 + DC_LOGGER_INIT(dc->ctx->logger);
650 +
651 + BW_VAL_TRACE_COUNT();
652 +diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
653 +index 89ef9f6860e5b..16df2a485dd0d 100644
654 +--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
655 ++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
656 +@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
657 + */
658 + static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
659 + {
660 ++ if (arg1.value == 0)
661 ++ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
662 ++
663 + return dc_fixpt_exp(
664 + dc_fixpt_mul(
665 + dc_fixpt_log(arg1),
666 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
667 +index 346e3f9fd505a..a68eff1fb4297 100644
668 +--- a/drivers/gpu/drm/panel/panel-simple.c
669 ++++ b/drivers/gpu/drm/panel/panel-simple.c
670 +@@ -1537,7 +1537,7 @@ static const struct drm_display_mode frida_frd350h54004_mode = {
671 + .vsync_end = 240 + 2 + 6,
672 + .vtotal = 240 + 2 + 6 + 2,
673 + .vrefresh = 60,
674 +- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
675 ++ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
676 + };
677 +
678 + static const struct panel_desc frida_frd350h54004 = {
679 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
680 +index 72100b84c7a90..b08fdfa4291b2 100644
681 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
682 ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
683 +@@ -505,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
684 + int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
685 + void *buf, int len, int write)
686 + {
687 +- unsigned long offset = (addr) - vma->vm_start;
688 + struct ttm_buffer_object *bo = vma->vm_private_data;
689 ++ unsigned long offset = (addr) - vma->vm_start +
690 ++ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
691 ++ << PAGE_SHIFT);
692 + int ret;
693 +
694 + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
695 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
696 +index 909eba43664a2..204d1df5a21d1 100644
697 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
698 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
699 +@@ -229,32 +229,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
700 + return 0;
701 + }
702 +
703 +-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
704 +- uint32_t handle, uint64_t *offset)
705 +-{
706 +- struct drm_gem_object *obj;
707 +- int ret;
708 +-
709 +- obj = drm_gem_object_lookup(file, handle);
710 +- if (!obj)
711 +- return -ENOENT;
712 +-
713 +- if (!obj->filp) {
714 +- ret = -EINVAL;
715 +- goto unref;
716 +- }
717 +-
718 +- ret = drm_gem_create_mmap_offset(obj);
719 +- if (ret)
720 +- goto unref;
721 +-
722 +- *offset = drm_vma_node_offset_addr(&obj->vma_node);
723 +-unref:
724 +- drm_gem_object_put_unlocked(obj);
725 +-
726 +- return ret;
727 +-}
728 +-
729 + static struct drm_ioctl_desc vgem_ioctls[] = {
730 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
731 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
732 +@@ -448,7 +422,6 @@ static struct drm_driver vgem_driver = {
733 + .fops = &vgem_driver_fops,
734 +
735 + .dumb_create = vgem_gem_dumb_create,
736 +- .dumb_map_offset = vgem_gem_dumb_map,
737 +
738 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
739 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
740 +diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
741 +index 512daff920387..1fc3fa00685d0 100644
742 +--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
743 ++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
744 +@@ -180,6 +180,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
745 +
746 + virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
747 + vfpriv->ctx_id, buflist, out_fence);
748 ++ dma_fence_put(&out_fence->f);
749 + virtio_gpu_notify(vgdev);
750 + return 0;
751 +
752 +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
753 +index b12fbc857f942..5c41e13496a02 100644
754 +--- a/drivers/infiniband/hw/bnxt_re/main.c
755 ++++ b/drivers/infiniband/hw/bnxt_re/main.c
756 +@@ -811,7 +811,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
757 + struct ib_event event;
758 + unsigned int flags;
759 +
760 +- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
761 ++ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
762 ++ rdma_is_kernel_res(&qp->ib_qp.res)) {
763 + flags = bnxt_re_lock_cqs(qp);
764 + bnxt_qplib_add_flush_qp(&qp->qplib_qp);
765 + bnxt_re_unlock_cqs(qp, flags);
766 +diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
767 +index 7c6fd720fb2ea..c018fc633cca3 100644
768 +--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
769 ++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
770 +@@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
771 + case IB_WR_ATOMIC_CMP_AND_SWP:
772 + case IB_WR_ATOMIC_FETCH_AND_ADD:
773 + case IB_WR_RDMA_WRITE:
774 ++ case IB_WR_RDMA_WRITE_WITH_IMM:
775 + switch (prev->wr.opcode) {
776 + case IB_WR_TID_RDMA_WRITE:
777 + req = wqe_to_tid_req(prev);
778 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
779 +index 527ae0b9a191e..0b4a3039f312f 100644
780 +--- a/drivers/input/mouse/psmouse-base.c
781 ++++ b/drivers/input/mouse/psmouse-base.c
782 +@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
783 + {
784 + int type = *((unsigned int *)kp->arg);
785 +
786 +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
787 ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
788 + }
789 +
790 + static int __init psmouse_init(void)
791 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
792 +index b4d23d9f30f9b..d5477faa14edd 100644
793 +--- a/drivers/md/bcache/super.c
794 ++++ b/drivers/md/bcache/super.c
795 +@@ -825,19 +825,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
796 + struct request_queue *q;
797 + const size_t max_stripes = min_t(size_t, INT_MAX,
798 + SIZE_MAX / sizeof(atomic_t));
799 +- size_t n;
800 ++ uint64_t n;
801 + int idx;
802 +
803 + if (!d->stripe_size)
804 + d->stripe_size = 1 << 31;
805 +
806 +- d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
807 +-
808 +- if (!d->nr_stripes || d->nr_stripes > max_stripes) {
809 +- pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
810 +- (unsigned int)d->nr_stripes);
811 ++ n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
812 ++ if (!n || n > max_stripes) {
813 ++ pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
814 ++ n);
815 + return -ENOMEM;
816 + }
817 ++ d->nr_stripes = n;
818 +
819 + n = d->nr_stripes * sizeof(atomic_t);
820 + d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
821 +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
822 +index fadbdeeb44955..293867b9e7961 100644
823 +--- a/drivers/media/pci/ttpci/budget-core.c
824 ++++ b/drivers/media/pci/ttpci/budget-core.c
825 +@@ -369,20 +369,25 @@ static int budget_register(struct budget *budget)
826 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
827 +
828 + if (ret < 0)
829 +- return ret;
830 ++ goto err_release_dmx;
831 +
832 + budget->mem_frontend.source = DMX_MEMORY_FE;
833 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
834 + if (ret < 0)
835 +- return ret;
836 ++ goto err_release_dmx;
837 +
838 + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
839 + if (ret < 0)
840 +- return ret;
841 ++ goto err_release_dmx;
842 +
843 + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
844 +
845 + return 0;
846 ++
847 ++err_release_dmx:
848 ++ dvb_dmxdev_release(&budget->dmxdev);
849 ++ dvb_dmx_release(&budget->demux);
850 ++ return ret;
851 + }
852 +
853 + static void budget_unregister(struct budget *budget)
854 +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
855 +index d38d2bbb6f0f8..7000f0bf0b353 100644
856 +--- a/drivers/media/platform/davinci/vpss.c
857 ++++ b/drivers/media/platform/davinci/vpss.c
858 +@@ -505,19 +505,31 @@ static void vpss_exit(void)
859 +
860 + static int __init vpss_init(void)
861 + {
862 ++ int ret;
863 ++
864 + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
865 + return -EBUSY;
866 +
867 + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
868 + if (unlikely(!oper_cfg.vpss_regs_base2)) {
869 +- release_mem_region(VPSS_CLK_CTRL, 4);
870 +- return -ENOMEM;
871 ++ ret = -ENOMEM;
872 ++ goto err_ioremap;
873 + }
874 +
875 + writel(VPSS_CLK_CTRL_VENCCLKEN |
876 +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
877 ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
878 ++
879 ++ ret = platform_driver_register(&vpss_driver);
880 ++ if (ret)
881 ++ goto err_pd_register;
882 ++
883 ++ return 0;
884 +
885 +- return platform_driver_register(&vpss_driver);
886 ++err_pd_register:
887 ++ iounmap(oper_cfg.vpss_regs_base2);
888 ++err_ioremap:
889 ++ release_mem_region(VPSS_CLK_CTRL, 4);
890 ++ return ret;
891 + }
892 + subsys_initcall(vpss_init);
893 + module_exit(vpss_exit);
894 +diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
895 +index 3fdc9f964a3c6..2483641799dfb 100644
896 +--- a/drivers/media/platform/qcom/camss/camss.c
897 ++++ b/drivers/media/platform/qcom/camss/camss.c
898 +@@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss)
899 + return num_subdevs;
900 +
901 + err_cleanup:
902 +- v4l2_async_notifier_cleanup(&camss->notifier);
903 + of_node_put(node);
904 + return ret;
905 + }
906 +@@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev)
907 + camss->csid_num = 4;
908 + camss->vfe_num = 2;
909 + } else {
910 +- return -EINVAL;
911 ++ ret = -EINVAL;
912 ++ goto err_free;
913 + }
914 +
915 + camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
916 + sizeof(*camss->csiphy), GFP_KERNEL);
917 +- if (!camss->csiphy)
918 +- return -ENOMEM;
919 ++ if (!camss->csiphy) {
920 ++ ret = -ENOMEM;
921 ++ goto err_free;
922 ++ }
923 +
924 + camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
925 + GFP_KERNEL);
926 +- if (!camss->csid)
927 +- return -ENOMEM;
928 ++ if (!camss->csid) {
929 ++ ret = -ENOMEM;
930 ++ goto err_free;
931 ++ }
932 +
933 + camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
934 + GFP_KERNEL);
935 +- if (!camss->vfe)
936 +- return -ENOMEM;
937 ++ if (!camss->vfe) {
938 ++ ret = -ENOMEM;
939 ++ goto err_free;
940 ++ }
941 +
942 + v4l2_async_notifier_init(&camss->notifier);
943 +
944 + num_subdevs = camss_of_parse_ports(camss);
945 +- if (num_subdevs < 0)
946 +- return num_subdevs;
947 ++ if (num_subdevs < 0) {
948 ++ ret = num_subdevs;
949 ++ goto err_cleanup;
950 ++ }
951 +
952 + ret = camss_init_subdevices(camss);
953 + if (ret < 0)
954 +@@ -936,6 +944,8 @@ err_register_entities:
955 + v4l2_device_unregister(&camss->v4l2_dev);
956 + err_cleanup:
957 + v4l2_async_notifier_cleanup(&camss->notifier);
958 ++err_free:
959 ++ kfree(camss);
960 +
961 + return ret;
962 + }
963 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
964 +index 6b40b5ab143a7..07624e89b96d6 100644
965 +--- a/drivers/net/bonding/bond_main.c
966 ++++ b/drivers/net/bonding/bond_main.c
967 +@@ -2084,7 +2084,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
968 + int ret;
969 +
970 + ret = __bond_release_one(bond_dev, slave_dev, false, true);
971 +- if (ret == 0 && !bond_has_slaves(bond)) {
972 ++ if (ret == 0 && !bond_has_slaves(bond) &&
973 ++ bond_dev->reg_state != NETREG_UNREGISTERING) {
974 + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
975 + netdev_info(bond_dev, "Destroying bond\n");
976 + bond_remove_proc_entry(bond);
977 +@@ -2824,6 +2825,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
978 + if (bond_time_in_interval(bond, last_rx, 1)) {
979 + bond_propose_link_state(slave, BOND_LINK_UP);
980 + commit++;
981 ++ } else if (slave->link == BOND_LINK_BACK) {
982 ++ bond_propose_link_state(slave, BOND_LINK_FAIL);
983 ++ commit++;
984 + }
985 + continue;
986 + }
987 +@@ -2932,6 +2936,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
988 +
989 + continue;
990 +
991 ++ case BOND_LINK_FAIL:
992 ++ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
993 ++ BOND_SLAVE_NOTIFY_NOW);
994 ++ bond_set_slave_inactive_flags(slave,
995 ++ BOND_SLAVE_NOTIFY_NOW);
996 ++
997 ++ /* A slave has just been enslaved and has become
998 ++ * the current active slave.
999 ++ */
1000 ++ if (rtnl_dereference(bond->curr_active_slave))
1001 ++ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
1002 ++ continue;
1003 ++
1004 + default:
1005 + slave_err(bond->dev, slave->dev,
1006 + "impossible: link_new_state %d on slave\n",
1007 +@@ -2982,8 +2999,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
1008 + return should_notify_rtnl;
1009 + }
1010 +
1011 +- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
1012 +-
1013 + bond_for_each_slave_rcu(bond, slave, iter) {
1014 + if (!found && !before && bond_slave_is_up(slave))
1015 + before = slave;
1016 +@@ -4336,13 +4351,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
1017 + return ret;
1018 + }
1019 +
1020 ++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
1021 ++{
1022 ++ if (speed == 0 || speed == SPEED_UNKNOWN)
1023 ++ speed = slave->speed;
1024 ++ else
1025 ++ speed = min(speed, slave->speed);
1026 ++
1027 ++ return speed;
1028 ++}
1029 ++
1030 + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
1031 + struct ethtool_link_ksettings *cmd)
1032 + {
1033 + struct bonding *bond = netdev_priv(bond_dev);
1034 +- unsigned long speed = 0;
1035 + struct list_head *iter;
1036 + struct slave *slave;
1037 ++ u32 speed = 0;
1038 +
1039 + cmd->base.duplex = DUPLEX_UNKNOWN;
1040 + cmd->base.port = PORT_OTHER;
1041 +@@ -4354,8 +4379,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
1042 + */
1043 + bond_for_each_slave(bond, slave, iter) {
1044 + if (bond_slave_can_tx(slave)) {
1045 +- if (slave->speed != SPEED_UNKNOWN)
1046 +- speed += slave->speed;
1047 ++ if (slave->speed != SPEED_UNKNOWN) {
1048 ++ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
1049 ++ speed = bond_mode_bcast_speed(slave,
1050 ++ speed);
1051 ++ else
1052 ++ speed += slave->speed;
1053 ++ }
1054 + if (cmd->base.duplex == DUPLEX_UNKNOWN &&
1055 + slave->duplex != DUPLEX_UNKNOWN)
1056 + cmd->base.duplex = slave->duplex;
1057 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1058 +index c283593bef17e..dc1979096302b 100644
1059 +--- a/drivers/net/dsa/b53/b53_common.c
1060 ++++ b/drivers/net/dsa/b53/b53_common.c
1061 +@@ -1556,6 +1556,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
1062 + return ret;
1063 +
1064 + switch (ret) {
1065 ++ case -ETIMEDOUT:
1066 ++ return ret;
1067 + case -ENOSPC:
1068 + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
1069 + addr, vid);
1070 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1071 +index 15ce93be05eac..c501a4edc34d6 100644
1072 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1073 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1074 +@@ -2166,13 +2166,10 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
1075 + int i;
1076 +
1077 + for (i = first_index; i < first_index + count; i++) {
1078 +- /* Check if napi was initialized before */
1079 +- if (!ENA_IS_XDP_INDEX(adapter, i) ||
1080 +- adapter->ena_napi[i].xdp_ring)
1081 +- netif_napi_del(&adapter->ena_napi[i].napi);
1082 +- else
1083 +- WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
1084 +- adapter->ena_napi[i].xdp_ring);
1085 ++ netif_napi_del(&adapter->ena_napi[i].napi);
1086 ++
1087 ++ WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
1088 ++ adapter->ena_napi[i].xdp_ring);
1089 + }
1090 + }
1091 +
1092 +@@ -3508,16 +3505,14 @@ static void ena_fw_reset_device(struct work_struct *work)
1093 + {
1094 + struct ena_adapter *adapter =
1095 + container_of(work, struct ena_adapter, reset_task);
1096 +- struct pci_dev *pdev = adapter->pdev;
1097 +
1098 +- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1099 +- dev_err(&pdev->dev,
1100 +- "device reset schedule while reset bit is off\n");
1101 +- return;
1102 +- }
1103 + rtnl_lock();
1104 +- ena_destroy_device(adapter, false);
1105 +- ena_restore_device(adapter);
1106 ++
1107 ++ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1108 ++ ena_destroy_device(adapter, false);
1109 ++ ena_restore_device(adapter);
1110 ++ }
1111 ++
1112 + rtnl_unlock();
1113 + }
1114 +
1115 +@@ -4351,8 +4346,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
1116 + netdev->rx_cpu_rmap = NULL;
1117 + }
1118 + #endif /* CONFIG_RFS_ACCEL */
1119 +- del_timer_sync(&adapter->timer_service);
1120 +
1121 ++ /* Make sure timer and reset routine won't be called after
1122 ++ * freeing device resources.
1123 ++ */
1124 ++ del_timer_sync(&adapter->timer_service);
1125 + cancel_work_sync(&adapter->reset_task);
1126 +
1127 + rtnl_lock(); /* lock released inside the below if-else block */
1128 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
1129 +index 5359fb40578db..e641890e9702f 100644
1130 +--- a/drivers/net/ethernet/cortina/gemini.c
1131 ++++ b/drivers/net/ethernet/cortina/gemini.c
1132 +@@ -2388,7 +2388,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1133 +
1134 + dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
1135 +
1136 +- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
1137 ++ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
1138 + if (!netdev) {
1139 + dev_err(dev, "Can't allocate ethernet device #%d\n", id);
1140 + return -ENOMEM;
1141 +@@ -2520,7 +2520,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1142 + }
1143 +
1144 + port->netdev = NULL;
1145 +- free_netdev(netdev);
1146 + return ret;
1147 + }
1148 +
1149 +@@ -2529,7 +2528,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
1150 + struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
1151 +
1152 + gemini_port_remove(port);
1153 +- free_netdev(port->netdev);
1154 + return 0;
1155 + }
1156 +
1157 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1158 +index bf73bc9bf35b9..76abafd099e22 100644
1159 +--- a/drivers/net/ethernet/freescale/fec_main.c
1160 ++++ b/drivers/net/ethernet/freescale/fec_main.c
1161 +@@ -3719,11 +3719,11 @@ failed_mii_init:
1162 + failed_irq:
1163 + failed_init:
1164 + fec_ptp_stop(pdev);
1165 +- if (fep->reg_phy)
1166 +- regulator_disable(fep->reg_phy);
1167 + failed_reset:
1168 + pm_runtime_put_noidle(&pdev->dev);
1169 + pm_runtime_disable(&pdev->dev);
1170 ++ if (fep->reg_phy)
1171 ++ regulator_disable(fep->reg_phy);
1172 + failed_regulator:
1173 + clk_disable_unprepare(fep->clk_ahb);
1174 + failed_clk_ahb:
1175 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1176 +index aa5f1c0aa7215..0921785a10795 100644
1177 +--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1178 ++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1179 +@@ -1211,7 +1211,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
1180 + #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
1181 + #define I40E_AQC_SET_VSI_DEFAULT 0x08
1182 + #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1183 +-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
1184 ++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
1185 + __le16 seid;
1186 + #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1187 + __le16 vlan_tag;
1188 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
1189 +index 45b90eb11adba..21e44c6cd5eac 100644
1190 +--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
1191 ++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
1192 +@@ -1969,6 +1969,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1193 + return status;
1194 + }
1195 +
1196 ++/**
1197 ++ * i40e_is_aq_api_ver_ge
1198 ++ * @aq: pointer to AdminQ info containing HW API version to compare
1199 ++ * @maj: API major value
1200 ++ * @min: API minor value
1201 ++ *
1202 ++ * Assert whether current HW API version is greater/equal than provided.
1203 ++ **/
1204 ++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1205 ++ u16 min)
1206 ++{
1207 ++ return (aq->api_maj_ver > maj ||
1208 ++ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1209 ++}
1210 ++
1211 + /**
1212 + * i40e_aq_add_vsi
1213 + * @hw: pointer to the hw struct
1214 +@@ -2094,18 +2109,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1215 +
1216 + if (set) {
1217 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1218 +- if (rx_only_promisc &&
1219 +- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
1220 +- (hw->aq.api_maj_ver > 1)))
1221 +- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
1222 ++ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1223 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1224 + }
1225 +
1226 + cmd->promiscuous_flags = cpu_to_le16(flags);
1227 +
1228 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1229 +- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
1230 +- (hw->aq.api_maj_ver > 1))
1231 +- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
1232 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1233 ++ cmd->valid_flags |=
1234 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1235 +
1236 + cmd->seid = cpu_to_le16(seid);
1237 + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1238 +@@ -2202,11 +2215,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1239 + i40e_fill_default_direct_cmd_desc(&desc,
1240 + i40e_aqc_opc_set_vsi_promiscuous_modes);
1241 +
1242 +- if (enable)
1243 ++ if (enable) {
1244 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1245 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1246 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1247 ++ }
1248 +
1249 + cmd->promiscuous_flags = cpu_to_le16(flags);
1250 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1251 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1252 ++ cmd->valid_flags |=
1253 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1254 + cmd->seid = cpu_to_le16(seid);
1255 + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1256 +
1257 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1258 +index 80dc5fcb82db7..deb2d77ef975e 100644
1259 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1260 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1261 +@@ -15344,6 +15344,9 @@ static void i40e_remove(struct pci_dev *pdev)
1262 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
1263 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
1264 +
1265 ++ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1266 ++ usleep_range(1000, 2000);
1267 ++
1268 + /* no more scheduling of any task */
1269 + set_bit(__I40E_SUSPENDED, pf->state);
1270 + set_bit(__I40E_DOWN, pf->state);
1271 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
1272 +index c7020ff2f490d..2ec89c99b6444 100644
1273 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
1274 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
1275 +@@ -4801,6 +4801,8 @@ static int igc_probe(struct pci_dev *pdev,
1276 + device_set_wakeup_enable(&adapter->pdev->dev,
1277 + adapter->flags & IGC_FLAG_WOL_SUPPORTED);
1278 +
1279 ++ igc_ptp_init(adapter);
1280 ++
1281 + /* reset the hardware with the new settings */
1282 + igc_reset(adapter);
1283 +
1284 +@@ -4817,9 +4819,6 @@ static int igc_probe(struct pci_dev *pdev,
1285 + /* carrier off reporting is important to ethtool even BEFORE open */
1286 + netif_carrier_off(netdev);
1287 +
1288 +- /* do hw tstamp init after resetting */
1289 +- igc_ptp_init(adapter);
1290 +-
1291 + /* Check if Media Autosense is enabled */
1292 + adapter->ei = *ei;
1293 +
1294 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
1295 +index f99c514ad0f47..4f67bdd1948b5 100644
1296 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
1297 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
1298 +@@ -620,8 +620,6 @@ void igc_ptp_init(struct igc_adapter *adapter)
1299 + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
1300 + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
1301 +
1302 +- igc_ptp_reset(adapter);
1303 +-
1304 + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
1305 + &adapter->pdev->dev);
1306 + if (IS_ERR(adapter->ptp_clock)) {
1307 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1308 +index b8b7fc13b3dc4..016fec19063a5 100644
1309 +--- a/drivers/net/hyperv/netvsc_drv.c
1310 ++++ b/drivers/net/hyperv/netvsc_drv.c
1311 +@@ -502,7 +502,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
1312 + int rc;
1313 +
1314 + skb->dev = vf_netdev;
1315 +- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
1316 ++ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
1317 +
1318 + rc = dev_queue_xmit(skb);
1319 + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
1320 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
1321 +index f195f278a83aa..7768f1120c1f6 100644
1322 +--- a/drivers/net/ipvlan/ipvlan_main.c
1323 ++++ b/drivers/net/ipvlan/ipvlan_main.c
1324 +@@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
1325 + kfree(port);
1326 + }
1327 +
1328 ++#define IPVLAN_ALWAYS_ON_OFLOADS \
1329 ++ (NETIF_F_SG | NETIF_F_HW_CSUM | \
1330 ++ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
1331 ++
1332 ++#define IPVLAN_ALWAYS_ON \
1333 ++ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
1334 ++
1335 + #define IPVLAN_FEATURES \
1336 +- (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
1337 ++ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
1338 + NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
1339 + NETIF_F_GRO | NETIF_F_RXCSUM | \
1340 + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
1341 +
1342 ++ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
1343 ++
1344 + #define IPVLAN_STATE_MASK \
1345 + ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
1346 +
1347 +@@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
1348 + dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
1349 + (phy_dev->state & IPVLAN_STATE_MASK);
1350 + dev->features = phy_dev->features & IPVLAN_FEATURES;
1351 +- dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
1352 ++ dev->features |= IPVLAN_ALWAYS_ON;
1353 ++ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
1354 ++ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
1355 + dev->hw_enc_features |= dev->features;
1356 + dev->gso_max_size = phy_dev->gso_max_size;
1357 + dev->gso_max_segs = phy_dev->gso_max_segs;
1358 +@@ -225,7 +236,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
1359 + {
1360 + struct ipvl_dev *ipvlan = netdev_priv(dev);
1361 +
1362 +- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
1363 ++ features |= NETIF_F_ALL_FOR_ALL;
1364 ++ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
1365 ++ features = netdev_increment_features(ipvlan->phy_dev->features,
1366 ++ features, features);
1367 ++ features |= IPVLAN_ALWAYS_ON;
1368 ++ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
1369 ++
1370 ++ return features;
1371 + }
1372 +
1373 + static void ipvlan_change_rx_flags(struct net_device *dev, int change)
1374 +@@ -732,10 +750,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
1375 +
1376 + case NETDEV_FEAT_CHANGE:
1377 + list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
1378 +- ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
1379 + ipvlan->dev->gso_max_size = dev->gso_max_size;
1380 + ipvlan->dev->gso_max_segs = dev->gso_max_segs;
1381 +- netdev_features_change(ipvlan->dev);
1382 ++ netdev_update_features(ipvlan->dev);
1383 + }
1384 + break;
1385 +
1386 +diff --git a/drivers/of/address.c b/drivers/of/address.c
1387 +index 8eea3f6e29a44..340d3051b1ce2 100644
1388 +--- a/drivers/of/address.c
1389 ++++ b/drivers/of/address.c
1390 +@@ -980,6 +980,11 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
1391 + /* Don't error out as we'd break some existing DTs */
1392 + continue;
1393 + }
1394 ++ if (range.cpu_addr == OF_BAD_ADDR) {
1395 ++ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
1396 ++ range.bus_addr, node);
1397 ++ continue;
1398 ++ }
1399 + dma_offset = range.cpu_addr - range.bus_addr;
1400 +
1401 + /* Take lower and upper limits */
1402 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
1403 +index e4f01e7771a22..a55d083e5be21 100644
1404 +--- a/drivers/opp/core.c
1405 ++++ b/drivers/opp/core.c
1406 +@@ -817,15 +817,23 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1407 + }
1408 +
1409 + if (unlikely(!target_freq)) {
1410 +- if (opp_table->required_opp_tables) {
1411 +- ret = _set_required_opps(dev, opp_table, NULL);
1412 +- } else if (!_get_opp_count(opp_table)) {
1413 +- return 0;
1414 +- } else {
1415 ++ /*
1416 ++ * Some drivers need to support cases where some platforms may
1417 ++ * have OPP table for the device, while others don't and
1418 ++ * opp_set_rate() just needs to behave like clk_set_rate().
1419 ++ */
1420 ++ if (!_get_opp_count(opp_table)) {
1421 ++ ret = 0;
1422 ++ goto put_opp_table;
1423 ++ }
1424 ++
1425 ++ if (!opp_table->required_opp_tables) {
1426 + dev_err(dev, "target frequency can't be 0\n");
1427 + ret = -EINVAL;
1428 ++ goto put_opp_table;
1429 + }
1430 +
1431 ++ ret = _set_required_opps(dev, opp_table, NULL);
1432 + goto put_opp_table;
1433 + }
1434 +
1435 +@@ -845,10 +853,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1436 +
1437 + /* Return early if nothing to do */
1438 + if (old_freq == freq) {
1439 +- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
1440 +- __func__, freq);
1441 +- ret = 0;
1442 +- goto put_opp_table;
1443 ++ if (!opp_table->required_opp_tables && !opp_table->regulators) {
1444 ++ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
1445 ++ __func__, freq);
1446 ++ ret = 0;
1447 ++ goto put_opp_table;
1448 ++ }
1449 + }
1450 +
1451 + /*
1452 +diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
1453 +index cb6b0ad7ec3f2..5dd92147f1680 100644
1454 +--- a/drivers/rtc/rtc-goldfish.c
1455 ++++ b/drivers/rtc/rtc-goldfish.c
1456 +@@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
1457 + rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
1458 + writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
1459 + writel(rtc_alarm64, base + TIMER_ALARM_LOW);
1460 ++ writel(1, base + TIMER_IRQ_ENABLED);
1461 + } else {
1462 + /*
1463 + * if this function was called with enabled=0
1464 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1465 +index 111fe3fc32d76..e5d2299a7a39b 100644
1466 +--- a/drivers/s390/scsi/zfcp_fsf.c
1467 ++++ b/drivers/s390/scsi/zfcp_fsf.c
1468 +@@ -430,7 +430,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
1469 + return;
1470 + }
1471 +
1472 +- del_timer(&req->timer);
1473 ++ del_timer_sync(&req->timer);
1474 + zfcp_fsf_protstatus_eval(req);
1475 + zfcp_fsf_fsfstatus_eval(req);
1476 + req->handler(req);
1477 +@@ -905,7 +905,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
1478 + req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
1479 + req->issued = get_tod_clock();
1480 + if (zfcp_qdio_send(qdio, &req->qdio_req)) {
1481 +- del_timer(&req->timer);
1482 ++ del_timer_sync(&req->timer);
1483 + /* lookup request again, list might have changed */
1484 + zfcp_reqlist_find_rm(adapter->req_list, req_id);
1485 + zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
1486 +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
1487 +index 2b865c6423e29..e00dc4693fcbd 100644
1488 +--- a/drivers/scsi/libfc/fc_disc.c
1489 ++++ b/drivers/scsi/libfc/fc_disc.c
1490 +@@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1491 +
1492 + if (PTR_ERR(fp) == -FC_EX_CLOSED)
1493 + goto out;
1494 +- if (IS_ERR(fp))
1495 +- goto redisc;
1496 ++ if (IS_ERR(fp)) {
1497 ++ mutex_lock(&disc->disc_mutex);
1498 ++ fc_disc_restart(disc);
1499 ++ mutex_unlock(&disc->disc_mutex);
1500 ++ goto out;
1501 ++ }
1502 +
1503 + cp = fc_frame_payload_get(fp, sizeof(*cp));
1504 + if (!cp)
1505 +@@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1506 + new_rdata->disc_id = disc->disc_id;
1507 + fc_rport_login(new_rdata);
1508 + }
1509 +- goto out;
1510 ++ goto free_fp;
1511 + }
1512 + rdata->disc_id = disc->disc_id;
1513 + mutex_unlock(&rdata->rp_mutex);
1514 +@@ -626,6 +630,8 @@ redisc:
1515 + fc_disc_restart(disc);
1516 + mutex_unlock(&disc->disc_mutex);
1517 + }
1518 ++free_fp:
1519 ++ fc_frame_free(fp);
1520 + out:
1521 + kref_put(&rdata->kref, fc_rport_destroy);
1522 + if (!IS_ERR(fp))
1523 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1524 +index 1120d133204c2..20e3048276a01 100644
1525 +--- a/drivers/scsi/qla2xxx/qla_os.c
1526 ++++ b/drivers/scsi/qla2xxx/qla_os.c
1527 +@@ -2829,10 +2829,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1528 + /* This may fail but that's ok */
1529 + pci_enable_pcie_error_reporting(pdev);
1530 +
1531 +- /* Turn off T10-DIF when FC-NVMe is enabled */
1532 +- if (ql2xnvmeenable)
1533 +- ql2xenabledif = 0;
1534 +-
1535 + ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1536 + if (!ha) {
1537 + ql_log_pci(ql_log_fatal, pdev, 0x0009,
1538 +diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
1539 +index 46bb905b4d6a9..eafe0db98d542 100644
1540 +--- a/drivers/scsi/ufs/ti-j721e-ufs.c
1541 ++++ b/drivers/scsi/ufs/ti-j721e-ufs.c
1542 +@@ -38,6 +38,7 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
1543 + /* Select MPHY refclk frequency */
1544 + clk = devm_clk_get(dev, NULL);
1545 + if (IS_ERR(clk)) {
1546 ++ ret = PTR_ERR(clk);
1547 + dev_err(dev, "Cannot claim MPHY clock.\n");
1548 + goto clk_err;
1549 + }
1550 +diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
1551 +index df7a1e6805a3b..c3af72c58805d 100644
1552 +--- a/drivers/scsi/ufs/ufs_quirks.h
1553 ++++ b/drivers/scsi/ufs/ufs_quirks.h
1554 +@@ -12,6 +12,7 @@
1555 + #define UFS_ANY_VENDOR 0xFFFF
1556 + #define UFS_ANY_MODEL "ANY_MODEL"
1557 +
1558 ++#define UFS_VENDOR_MICRON 0x12C
1559 + #define UFS_VENDOR_TOSHIBA 0x198
1560 + #define UFS_VENDOR_SAMSUNG 0x1CE
1561 + #define UFS_VENDOR_SKHYNIX 0x1AD
1562 +diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
1563 +index 8f78a81514991..b220666774ce8 100644
1564 +--- a/drivers/scsi/ufs/ufshcd-pci.c
1565 ++++ b/drivers/scsi/ufs/ufshcd-pci.c
1566 +@@ -67,11 +67,23 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
1567 + return err;
1568 + }
1569 +
1570 ++static int ufs_intel_ehl_init(struct ufs_hba *hba)
1571 ++{
1572 ++ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
1573 ++ return 0;
1574 ++}
1575 ++
1576 + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
1577 + .name = "intel-pci",
1578 + .link_startup_notify = ufs_intel_link_startup_notify,
1579 + };
1580 +
1581 ++static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
1582 ++ .name = "intel-pci",
1583 ++ .init = ufs_intel_ehl_init,
1584 ++ .link_startup_notify = ufs_intel_link_startup_notify,
1585 ++};
1586 ++
1587 + #ifdef CONFIG_PM_SLEEP
1588 + /**
1589 + * ufshcd_pci_suspend - suspend power management function
1590 +@@ -200,8 +212,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
1591 + static const struct pci_device_id ufshcd_pci_tbl[] = {
1592 + { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
1593 + { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
1594 +- { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
1595 +- { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
1596 ++ { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
1597 ++ { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
1598 + { } /* terminate list */
1599 + };
1600 +
1601 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1602 +index 477b6cfff381b..3b80d692dd2e7 100644
1603 +--- a/drivers/scsi/ufs/ufshcd.c
1604 ++++ b/drivers/scsi/ufs/ufshcd.c
1605 +@@ -211,6 +211,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
1606 +
1607 + static struct ufs_dev_fix ufs_fixups[] = {
1608 + /* UFS cards deviations table */
1609 ++ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
1610 ++ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
1611 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
1612 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
1613 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
1614 +@@ -645,7 +647,11 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
1615 + */
1616 + static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
1617 + {
1618 +- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1619 ++ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
1620 ++ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1621 ++ else
1622 ++ ufshcd_writel(hba, ~(1 << pos),
1623 ++ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1624 + }
1625 +
1626 + /**
1627 +@@ -655,7 +661,10 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
1628 + */
1629 + static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
1630 + {
1631 +- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
1632 ++ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
1633 ++ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
1634 ++ else
1635 ++ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
1636 + }
1637 +
1638 + /**
1639 +@@ -2149,8 +2158,14 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1640 + return sg_segments;
1641 +
1642 + if (sg_segments) {
1643 +- lrbp->utr_descriptor_ptr->prd_table_length =
1644 +- cpu_to_le16((u16)sg_segments);
1645 ++
1646 ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1647 ++ lrbp->utr_descriptor_ptr->prd_table_length =
1648 ++ cpu_to_le16((sg_segments *
1649 ++ sizeof(struct ufshcd_sg_entry)));
1650 ++ else
1651 ++ lrbp->utr_descriptor_ptr->prd_table_length =
1652 ++ cpu_to_le16((u16) (sg_segments));
1653 +
1654 + prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1655 +
1656 +@@ -3496,11 +3511,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1657 + cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1658 +
1659 + /* Response upiu and prdt offset should be in double words */
1660 +- utrdlp[i].response_upiu_offset =
1661 +- cpu_to_le16(response_offset >> 2);
1662 +- utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
1663 +- utrdlp[i].response_upiu_length =
1664 +- cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
1665 ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
1666 ++ utrdlp[i].response_upiu_offset =
1667 ++ cpu_to_le16(response_offset);
1668 ++ utrdlp[i].prd_table_offset =
1669 ++ cpu_to_le16(prdt_offset);
1670 ++ utrdlp[i].response_upiu_length =
1671 ++ cpu_to_le16(ALIGNED_UPIU_SIZE);
1672 ++ } else {
1673 ++ utrdlp[i].response_upiu_offset =
1674 ++ cpu_to_le16(response_offset >> 2);
1675 ++ utrdlp[i].prd_table_offset =
1676 ++ cpu_to_le16(prdt_offset >> 2);
1677 ++ utrdlp[i].response_upiu_length =
1678 ++ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
1679 ++ }
1680 +
1681 + ufshcd_init_lrb(hba, &hba->lrb[i], i);
1682 + }
1683 +@@ -3530,6 +3555,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1684 + "dme-link-startup: error code %d\n", ret);
1685 + return ret;
1686 + }
1687 ++/**
1688 ++ * ufshcd_dme_reset - UIC command for DME_RESET
1689 ++ * @hba: per adapter instance
1690 ++ *
1691 ++ * DME_RESET command is issued in order to reset UniPro stack.
1692 ++ * This function now deals with cold reset.
1693 ++ *
1694 ++ * Returns 0 on success, non-zero value on failure
1695 ++ */
1696 ++static int ufshcd_dme_reset(struct ufs_hba *hba)
1697 ++{
1698 ++ struct uic_command uic_cmd = {0};
1699 ++ int ret;
1700 ++
1701 ++ uic_cmd.command = UIC_CMD_DME_RESET;
1702 ++
1703 ++ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1704 ++ if (ret)
1705 ++ dev_err(hba->dev,
1706 ++ "dme-reset: error code %d\n", ret);
1707 ++
1708 ++ return ret;
1709 ++}
1710 ++
1711 ++/**
1712 ++ * ufshcd_dme_enable - UIC command for DME_ENABLE
1713 ++ * @hba: per adapter instance
1714 ++ *
1715 ++ * DME_ENABLE command is issued in order to enable UniPro stack.
1716 ++ *
1717 ++ * Returns 0 on success, non-zero value on failure
1718 ++ */
1719 ++static int ufshcd_dme_enable(struct ufs_hba *hba)
1720 ++{
1721 ++ struct uic_command uic_cmd = {0};
1722 ++ int ret;
1723 ++
1724 ++ uic_cmd.command = UIC_CMD_DME_ENABLE;
1725 ++
1726 ++ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1727 ++ if (ret)
1728 ++ dev_err(hba->dev,
1729 ++ "dme-reset: error code %d\n", ret);
1730 ++
1731 ++ return ret;
1732 ++}
1733 +
1734 + static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
1735 + {
1736 +@@ -4247,7 +4318,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
1737 + }
1738 +
1739 + /**
1740 +- * ufshcd_hba_enable - initialize the controller
1741 ++ * ufshcd_hba_execute_hce - initialize the controller
1742 + * @hba: per adapter instance
1743 + *
1744 + * The controller resets itself and controller firmware initialization
1745 +@@ -4256,7 +4327,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
1746 + *
1747 + * Returns 0 on success, non-zero value on failure
1748 + */
1749 +-int ufshcd_hba_enable(struct ufs_hba *hba)
1750 ++static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
1751 + {
1752 + int retry;
1753 +
1754 +@@ -4304,6 +4375,32 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
1755 +
1756 + return 0;
1757 + }
1758 ++
1759 ++int ufshcd_hba_enable(struct ufs_hba *hba)
1760 ++{
1761 ++ int ret;
1762 ++
1763 ++ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
1764 ++ ufshcd_set_link_off(hba);
1765 ++ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
1766 ++
1767 ++ /* enable UIC related interrupts */
1768 ++ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1769 ++ ret = ufshcd_dme_reset(hba);
1770 ++ if (!ret) {
1771 ++ ret = ufshcd_dme_enable(hba);
1772 ++ if (!ret)
1773 ++ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
1774 ++ if (ret)
1775 ++ dev_err(hba->dev,
1776 ++ "Host controller enable failed with non-hce\n");
1777 ++ }
1778 ++ } else {
1779 ++ ret = ufshcd_hba_execute_hce(hba);
1780 ++ }
1781 ++
1782 ++ return ret;
1783 ++}
1784 + EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
1785 +
1786 + static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
1787 +@@ -4702,6 +4799,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1788 + /* overall command status of utrd */
1789 + ocs = ufshcd_get_tr_ocs(lrbp);
1790 +
1791 ++ if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
1792 ++ if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
1793 ++ MASK_RSP_UPIU_RESULT)
1794 ++ ocs = OCS_SUCCESS;
1795 ++ }
1796 ++
1797 + switch (ocs) {
1798 + case OCS_SUCCESS:
1799 + result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1800 +@@ -4880,7 +4983,8 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
1801 + * false interrupt if device completes another request after resetting
1802 + * aggregation and before reading the DB.
1803 + */
1804 +- if (ufshcd_is_intr_aggr_allowed(hba))
1805 ++ if (ufshcd_is_intr_aggr_allowed(hba) &&
1806 ++ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
1807 + ufshcd_reset_intr_aggr(hba);
1808 +
1809 + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1810 +@@ -5699,7 +5803,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
1811 + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
1812 + } while (intr_status && --retries);
1813 +
1814 +- if (retval == IRQ_NONE) {
1815 ++ if (enabled_intr_status && retval == IRQ_NONE) {
1816 + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
1817 + __func__, intr_status);
1818 + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
1819 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
1820 +index 2315ecc209272..ccbeae4f8325d 100644
1821 +--- a/drivers/scsi/ufs/ufshcd.h
1822 ++++ b/drivers/scsi/ufs/ufshcd.h
1823 +@@ -518,6 +518,41 @@ enum ufshcd_quirks {
1824 + * ops (get_ufs_hci_version) to get the correct version.
1825 + */
1826 + UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
1827 ++
1828 ++ /*
1829 ++ * Clear handling for transfer/task request list is just opposite.
1830 ++ */
1831 ++ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
1832 ++
1833 ++ /*
1834 ++ * This quirk needs to be enabled if host controller doesn't allow
1835 ++ * that the interrupt aggregation timer and counter are reset by s/w.
1836 ++ */
1837 ++ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
1838 ++
1839 ++ /*
1840 ++ * This quirks needs to be enabled if host controller cannot be
1841 ++ * enabled via HCE register.
1842 ++ */
1843 ++ UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
1844 ++
1845 ++ /*
1846 ++ * This quirk needs to be enabled if the host controller regards
1847 ++ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
1848 ++ */
1849 ++ UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
1850 ++
1851 ++ /*
1852 ++ * This quirk needs to be enabled if the host controller reports
1853 ++ * OCS FATAL ERROR with device error through sense data
1854 ++ */
1855 ++ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
1856 ++
1857 ++ /*
1858 ++ * This quirk needs to be enabled if the host controller has
1859 ++ * auto-hibernate capability but it doesn't work.
1860 ++ */
1861 ++ UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
1862 + };
1863 +
1864 + enum ufshcd_caps {
1865 +@@ -767,7 +802,8 @@ return true;
1866 +
1867 + static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
1868 + {
1869 +- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
1870 ++ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
1871 ++ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
1872 + }
1873 +
1874 + static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
1875 +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
1876 +index 741b9140992a8..11279dcc4a3e9 100644
1877 +--- a/drivers/spi/Kconfig
1878 ++++ b/drivers/spi/Kconfig
1879 +@@ -989,4 +989,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
1880 +
1881 + endif # SPI_SLAVE
1882 +
1883 ++config SPI_DYNAMIC
1884 ++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
1885 ++
1886 + endif # SPI
1887 +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
1888 +index 44ac6eb3298d4..e29818abbeaf4 100644
1889 +--- a/drivers/spi/spi-stm32.c
1890 ++++ b/drivers/spi/spi-stm32.c
1891 +@@ -13,6 +13,7 @@
1892 + #include <linux/iopoll.h>
1893 + #include <linux/module.h>
1894 + #include <linux/of_platform.h>
1895 ++#include <linux/pinctrl/consumer.h>
1896 + #include <linux/pm_runtime.h>
1897 + #include <linux/reset.h>
1898 + #include <linux/spi/spi.h>
1899 +@@ -1985,6 +1986,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
1900 +
1901 + pm_runtime_disable(&pdev->dev);
1902 +
1903 ++ pinctrl_pm_select_sleep_state(&pdev->dev);
1904 ++
1905 + return 0;
1906 + }
1907 +
1908 +@@ -1996,13 +1999,18 @@ static int stm32_spi_runtime_suspend(struct device *dev)
1909 +
1910 + clk_disable_unprepare(spi->clk);
1911 +
1912 +- return 0;
1913 ++ return pinctrl_pm_select_sleep_state(dev);
1914 + }
1915 +
1916 + static int stm32_spi_runtime_resume(struct device *dev)
1917 + {
1918 + struct spi_master *master = dev_get_drvdata(dev);
1919 + struct stm32_spi *spi = spi_master_get_devdata(master);
1920 ++ int ret;
1921 ++
1922 ++ ret = pinctrl_pm_select_default_state(dev);
1923 ++ if (ret)
1924 ++ return ret;
1925 +
1926 + return clk_prepare_enable(spi->clk);
1927 + }
1928 +@@ -2032,10 +2040,23 @@ static int stm32_spi_resume(struct device *dev)
1929 + return ret;
1930 +
1931 + ret = spi_master_resume(master);
1932 +- if (ret)
1933 ++ if (ret) {
1934 + clk_disable_unprepare(spi->clk);
1935 ++ return ret;
1936 ++ }
1937 +
1938 +- return ret;
1939 ++ ret = pm_runtime_get_sync(dev);
1940 ++ if (ret) {
1941 ++ dev_err(dev, "Unable to power device:%d\n", ret);
1942 ++ return ret;
1943 ++ }
1944 ++
1945 ++ spi->cfg->config(spi);
1946 ++
1947 ++ pm_runtime_mark_last_busy(dev);
1948 ++ pm_runtime_put_autosuspend(dev);
1949 ++
1950 ++ return 0;
1951 + }
1952 + #endif
1953 +
1954 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
1955 +index 299384c91917a..a6e16c138845a 100644
1956 +--- a/drivers/spi/spi.c
1957 ++++ b/drivers/spi/spi.c
1958 +@@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
1959 + */
1960 + static DEFINE_MUTEX(board_lock);
1961 +
1962 ++/*
1963 ++ * Prevents addition of devices with same chip select and
1964 ++ * addition of devices below an unregistering controller.
1965 ++ */
1966 ++static DEFINE_MUTEX(spi_add_lock);
1967 ++
1968 + /**
1969 + * spi_alloc_device - Allocate a new SPI device
1970 + * @ctlr: Controller to which device is connected
1971 +@@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data)
1972 + */
1973 + int spi_add_device(struct spi_device *spi)
1974 + {
1975 +- static DEFINE_MUTEX(spi_add_lock);
1976 + struct spi_controller *ctlr = spi->controller;
1977 + struct device *dev = ctlr->dev.parent;
1978 + int status;
1979 +@@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi)
1980 + goto done;
1981 + }
1982 +
1983 ++ /* Controller may unregister concurrently */
1984 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
1985 ++ !device_is_registered(&ctlr->dev)) {
1986 ++ status = -ENODEV;
1987 ++ goto done;
1988 ++ }
1989 ++
1990 + /* Descriptors take precedence */
1991 + if (ctlr->cs_gpiods)
1992 + spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
1993 +@@ -2761,6 +2773,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
1994 + struct spi_controller *found;
1995 + int id = ctlr->bus_num;
1996 +
1997 ++ /* Prevent addition of new devices, unregister existing ones */
1998 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1999 ++ mutex_lock(&spi_add_lock);
2000 ++
2001 + device_for_each_child(&ctlr->dev, NULL, __unregister);
2002 +
2003 + /* First make sure that this controller was ever added */
2004 +@@ -2781,6 +2797,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
2005 + if (found == ctlr)
2006 + idr_remove(&spi_master_idr, id);
2007 + mutex_unlock(&board_lock);
2008 ++
2009 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2010 ++ mutex_unlock(&spi_add_lock);
2011 + }
2012 + EXPORT_SYMBOL_GPL(spi_unregister_controller);
2013 +
2014 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
2015 +index b63a1e0c4aa6d..a55114975b00d 100644
2016 +--- a/drivers/target/target_core_user.c
2017 ++++ b/drivers/target/target_core_user.c
2018 +@@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
2019 + size = round_up(size+offset, PAGE_SIZE);
2020 +
2021 + while (size) {
2022 +- flush_dcache_page(virt_to_page(start));
2023 ++ flush_dcache_page(vmalloc_to_page(start));
2024 + start += PAGE_SIZE;
2025 + size -= PAGE_SIZE;
2026 + }
2027 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
2028 +index cc1d64765ce79..c244e0ecf9f42 100644
2029 +--- a/drivers/vfio/vfio_iommu_type1.c
2030 ++++ b/drivers/vfio/vfio_iommu_type1.c
2031 +@@ -1149,13 +1149,16 @@ static int vfio_bus_type(struct device *dev, void *data)
2032 + static int vfio_iommu_replay(struct vfio_iommu *iommu,
2033 + struct vfio_domain *domain)
2034 + {
2035 +- struct vfio_domain *d;
2036 ++ struct vfio_domain *d = NULL;
2037 + struct rb_node *n;
2038 + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2039 + int ret;
2040 +
2041 + /* Arbitrarily pick the first domain in the list for lookups */
2042 +- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
2043 ++ if (!list_empty(&iommu->domain_list))
2044 ++ d = list_first_entry(&iommu->domain_list,
2045 ++ struct vfio_domain, next);
2046 ++
2047 + n = rb_first(&iommu->dma_list);
2048 +
2049 + for (; n; n = rb_next(n)) {
2050 +@@ -1173,6 +1176,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
2051 + phys_addr_t p;
2052 + dma_addr_t i;
2053 +
2054 ++ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
2055 ++ ret = -EINVAL;
2056 ++ goto unwind;
2057 ++ }
2058 ++
2059 + phys = iommu_iova_to_phys(d->domain, iova);
2060 +
2061 + if (WARN_ON(!phys)) {
2062 +@@ -1202,7 +1210,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
2063 + if (npage <= 0) {
2064 + WARN_ON(!npage);
2065 + ret = (int)npage;
2066 +- return ret;
2067 ++ goto unwind;
2068 + }
2069 +
2070 + phys = pfn << PAGE_SHIFT;
2071 +@@ -1211,14 +1219,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
2072 +
2073 + ret = iommu_map(domain->domain, iova, phys,
2074 + size, dma->prot | domain->prot);
2075 +- if (ret)
2076 +- return ret;
2077 ++ if (ret) {
2078 ++ if (!dma->iommu_mapped)
2079 ++ vfio_unpin_pages_remote(dma, iova,
2080 ++ phys >> PAGE_SHIFT,
2081 ++ size >> PAGE_SHIFT,
2082 ++ true);
2083 ++ goto unwind;
2084 ++ }
2085 +
2086 + iova += size;
2087 + }
2088 ++ }
2089 ++
2090 ++ /* All dmas are now mapped, defer to second tree walk for unwind */
2091 ++ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
2092 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
2093 ++
2094 + dma->iommu_mapped = true;
2095 + }
2096 ++
2097 + return 0;
2098 ++
2099 ++unwind:
2100 ++ for (; n; n = rb_prev(n)) {
2101 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
2102 ++ dma_addr_t iova;
2103 ++
2104 ++ if (dma->iommu_mapped) {
2105 ++ iommu_unmap(domain->domain, dma->iova, dma->size);
2106 ++ continue;
2107 ++ }
2108 ++
2109 ++ iova = dma->iova;
2110 ++ while (iova < dma->iova + dma->size) {
2111 ++ phys_addr_t phys, p;
2112 ++ size_t size;
2113 ++ dma_addr_t i;
2114 ++
2115 ++ phys = iommu_iova_to_phys(domain->domain, iova);
2116 ++ if (!phys) {
2117 ++ iova += PAGE_SIZE;
2118 ++ continue;
2119 ++ }
2120 ++
2121 ++ size = PAGE_SIZE;
2122 ++ p = phys + size;
2123 ++ i = iova + size;
2124 ++ while (i < dma->iova + dma->size &&
2125 ++ p == iommu_iova_to_phys(domain->domain, i)) {
2126 ++ size += PAGE_SIZE;
2127 ++ p += PAGE_SIZE;
2128 ++ i += PAGE_SIZE;
2129 ++ }
2130 ++
2131 ++ iommu_unmap(domain->domain, iova, size);
2132 ++ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
2133 ++ size >> PAGE_SHIFT, true);
2134 ++ }
2135 ++ }
2136 ++
2137 ++ return ret;
2138 + }
2139 +
2140 + /*
2141 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2142 +index 65491ae74808d..e57c00824965c 100644
2143 +--- a/drivers/video/fbdev/efifb.c
2144 ++++ b/drivers/video/fbdev/efifb.c
2145 +@@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev)
2146 + info->apertures->ranges[0].base = efifb_fix.smem_start;
2147 + info->apertures->ranges[0].size = size_remap;
2148 +
2149 +- if (efi_enabled(EFI_BOOT) &&
2150 ++ if (efi_enabled(EFI_MEMMAP) &&
2151 + !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
2152 + if ((efifb_fix.smem_start + efifb_fix.smem_len) >
2153 + (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
2154 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
2155 +index 58b96baa8d488..4f7c73e6052f6 100644
2156 +--- a/drivers/virtio/virtio_ring.c
2157 ++++ b/drivers/virtio/virtio_ring.c
2158 +@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2159 + {
2160 + struct vring_virtqueue *vq = to_vvq(_vq);
2161 +
2162 ++ if (unlikely(vq->broken))
2163 ++ return false;
2164 ++
2165 + virtio_mb(vq->weak_barriers);
2166 + return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2167 + virtqueue_poll_split(_vq, last_used_idx);
2168 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
2169 +index 17240c5325a30..6ad87b5c95ed3 100644
2170 +--- a/drivers/xen/preempt.c
2171 ++++ b/drivers/xen/preempt.c
2172 +@@ -27,7 +27,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
2173 + asmlinkage __visible void xen_maybe_preempt_hcall(void)
2174 + {
2175 + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
2176 +- && need_resched())) {
2177 ++ && need_resched() && !preempt_count())) {
2178 + /*
2179 + * Clear flag as we may be rescheduled on a different
2180 + * cpu.
2181 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2182 +index b6d27762c6f8c..5fbadd07819bd 100644
2183 +--- a/drivers/xen/swiotlb-xen.c
2184 ++++ b/drivers/xen/swiotlb-xen.c
2185 +@@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2186 + int order = get_order(size);
2187 + phys_addr_t phys;
2188 + u64 dma_mask = DMA_BIT_MASK(32);
2189 ++ struct page *page;
2190 +
2191 + if (hwdev && hwdev->coherent_dma_mask)
2192 + dma_mask = hwdev->coherent_dma_mask;
2193 +@@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2194 + /* Convert the size to actually allocated. */
2195 + size = 1UL << (order + XEN_PAGE_SHIFT);
2196 +
2197 ++ if (is_vmalloc_addr(vaddr))
2198 ++ page = vmalloc_to_page(vaddr);
2199 ++ else
2200 ++ page = virt_to_page(vaddr);
2201 ++
2202 + if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
2203 + range_straddles_page_boundary(phys, size)) &&
2204 +- TestClearPageXenRemapped(virt_to_page(vaddr)))
2205 ++ TestClearPageXenRemapped(page))
2206 + xen_destroy_contiguous_region(phys, order);
2207 +
2208 + xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
2209 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
2210 +index 7503899c0a1b5..f07e53ab808e3 100644
2211 +--- a/fs/afs/dynroot.c
2212 ++++ b/fs/afs/dynroot.c
2213 +@@ -289,15 +289,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
2214 + net->dynroot_sb = NULL;
2215 + mutex_unlock(&net->proc_cells_lock);
2216 +
2217 +- inode_lock(root->d_inode);
2218 +-
2219 +- /* Remove all the pins for dirs created for manually added cells */
2220 +- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
2221 +- if (subdir->d_fsdata) {
2222 +- subdir->d_fsdata = NULL;
2223 +- dput(subdir);
2224 ++ if (root) {
2225 ++ inode_lock(root->d_inode);
2226 ++
2227 ++ /* Remove all the pins for dirs created for manually added cells */
2228 ++ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
2229 ++ if (subdir->d_fsdata) {
2230 ++ subdir->d_fsdata = NULL;
2231 ++ dput(subdir);
2232 ++ }
2233 + }
2234 +- }
2235 +
2236 +- inode_unlock(root->d_inode);
2237 ++ inode_unlock(root->d_inode);
2238 ++ }
2239 + }
2240 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
2241 +index 95272ae36b058..e32935b68d0a4 100644
2242 +--- a/fs/ceph/mds_client.c
2243 ++++ b/fs/ceph/mds_client.c
2244 +@@ -4337,7 +4337,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
2245 + return -ENOMEM;
2246 + }
2247 +
2248 +- fsc->mdsc = mdsc;
2249 + init_completion(&mdsc->safe_umount_waiters);
2250 + init_waitqueue_head(&mdsc->session_close_wq);
2251 + INIT_LIST_HEAD(&mdsc->waiting_for_map);
2252 +@@ -4390,6 +4389,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
2253 +
2254 + strscpy(mdsc->nodename, utsname()->nodename,
2255 + sizeof(mdsc->nodename));
2256 ++
2257 ++ fsc->mdsc = mdsc;
2258 + return 0;
2259 + }
2260 +
2261 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2262 +index 12eebcdea9c8a..e0decff22ae27 100644
2263 +--- a/fs/eventpoll.c
2264 ++++ b/fs/eventpoll.c
2265 +@@ -1994,9 +1994,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2266 + * not already there, and calling reverse_path_check()
2267 + * during ep_insert().
2268 + */
2269 +- if (list_empty(&epi->ffd.file->f_tfile_llink))
2270 ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
2271 ++ get_file(epi->ffd.file);
2272 + list_add(&epi->ffd.file->f_tfile_llink,
2273 + &tfile_check_list);
2274 ++ }
2275 + }
2276 + }
2277 + mutex_unlock(&ep->mtx);
2278 +@@ -2040,6 +2042,7 @@ static void clear_tfile_check_list(void)
2279 + file = list_first_entry(&tfile_check_list, struct file,
2280 + f_tfile_llink);
2281 + list_del_init(&file->f_tfile_llink);
2282 ++ fput(file);
2283 + }
2284 + INIT_LIST_HEAD(&tfile_check_list);
2285 + }
2286 +@@ -2200,25 +2203,22 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2287 + full_check = 1;
2288 + if (is_file_epoll(tf.file)) {
2289 + error = -ELOOP;
2290 +- if (ep_loop_check(ep, tf.file) != 0) {
2291 +- clear_tfile_check_list();
2292 ++ if (ep_loop_check(ep, tf.file) != 0)
2293 + goto error_tgt_fput;
2294 +- }
2295 +- } else
2296 ++ } else {
2297 ++ get_file(tf.file);
2298 + list_add(&tf.file->f_tfile_llink,
2299 + &tfile_check_list);
2300 ++ }
2301 + error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2302 +- if (error) {
2303 +-out_del:
2304 +- list_del(&tf.file->f_tfile_llink);
2305 ++ if (error)
2306 + goto error_tgt_fput;
2307 +- }
2308 + if (is_file_epoll(tf.file)) {
2309 + tep = tf.file->private_data;
2310 + error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
2311 + if (error) {
2312 + mutex_unlock(&ep->mtx);
2313 +- goto out_del;
2314 ++ goto error_tgt_fput;
2315 + }
2316 + }
2317 + }
2318 +@@ -2239,8 +2239,6 @@ out_del:
2319 + error = ep_insert(ep, epds, tf.file, fd, full_check);
2320 + } else
2321 + error = -EEXIST;
2322 +- if (full_check)
2323 +- clear_tfile_check_list();
2324 + break;
2325 + case EPOLL_CTL_DEL:
2326 + if (epi)
2327 +@@ -2263,8 +2261,10 @@ out_del:
2328 + mutex_unlock(&ep->mtx);
2329 +
2330 + error_tgt_fput:
2331 +- if (full_check)
2332 ++ if (full_check) {
2333 ++ clear_tfile_check_list();
2334 + mutex_unlock(&epmutex);
2335 ++ }
2336 +
2337 + fdput(tf);
2338 + error_fput:
2339 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
2340 +index 16e9b2fda03ae..e830a9d4e10d3 100644
2341 +--- a/fs/ext4/block_validity.c
2342 ++++ b/fs/ext4/block_validity.c
2343 +@@ -24,6 +24,7 @@ struct ext4_system_zone {
2344 + struct rb_node node;
2345 + ext4_fsblk_t start_blk;
2346 + unsigned int count;
2347 ++ u32 ino;
2348 + };
2349 +
2350 + static struct kmem_cache *ext4_system_zone_cachep;
2351 +@@ -45,7 +46,8 @@ void ext4_exit_system_zone(void)
2352 + static inline int can_merge(struct ext4_system_zone *entry1,
2353 + struct ext4_system_zone *entry2)
2354 + {
2355 +- if ((entry1->start_blk + entry1->count) == entry2->start_blk)
2356 ++ if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
2357 ++ entry1->ino == entry2->ino)
2358 + return 1;
2359 + return 0;
2360 + }
2361 +@@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks)
2362 + */
2363 + static int add_system_zone(struct ext4_system_blocks *system_blks,
2364 + ext4_fsblk_t start_blk,
2365 +- unsigned int count)
2366 ++ unsigned int count, u32 ino)
2367 + {
2368 +- struct ext4_system_zone *new_entry = NULL, *entry;
2369 ++ struct ext4_system_zone *new_entry, *entry;
2370 + struct rb_node **n = &system_blks->root.rb_node, *node;
2371 + struct rb_node *parent = NULL, *new_node = NULL;
2372 +
2373 +@@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
2374 + n = &(*n)->rb_left;
2375 + else if (start_blk >= (entry->start_blk + entry->count))
2376 + n = &(*n)->rb_right;
2377 +- else {
2378 +- if (start_blk + count > (entry->start_blk +
2379 +- entry->count))
2380 +- entry->count = (start_blk + count -
2381 +- entry->start_blk);
2382 +- new_node = *n;
2383 +- new_entry = rb_entry(new_node, struct ext4_system_zone,
2384 +- node);
2385 +- break;
2386 +- }
2387 ++ else /* Unexpected overlap of system zones. */
2388 ++ return -EFSCORRUPTED;
2389 + }
2390 +
2391 +- if (!new_entry) {
2392 +- new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
2393 +- GFP_KERNEL);
2394 +- if (!new_entry)
2395 +- return -ENOMEM;
2396 +- new_entry->start_blk = start_blk;
2397 +- new_entry->count = count;
2398 +- new_node = &new_entry->node;
2399 +-
2400 +- rb_link_node(new_node, parent, n);
2401 +- rb_insert_color(new_node, &system_blks->root);
2402 +- }
2403 ++ new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
2404 ++ GFP_KERNEL);
2405 ++ if (!new_entry)
2406 ++ return -ENOMEM;
2407 ++ new_entry->start_blk = start_blk;
2408 ++ new_entry->count = count;
2409 ++ new_entry->ino = ino;
2410 ++ new_node = &new_entry->node;
2411 ++
2412 ++ rb_link_node(new_node, parent, n);
2413 ++ rb_insert_color(new_node, &system_blks->root);
2414 +
2415 + /* Can we merge to the left? */
2416 + node = rb_prev(new_node);
2417 +@@ -159,7 +152,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
2418 + static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
2419 + struct ext4_system_blocks *system_blks,
2420 + ext4_fsblk_t start_blk,
2421 +- unsigned int count)
2422 ++ unsigned int count, ino_t ino)
2423 + {
2424 + struct ext4_system_zone *entry;
2425 + struct rb_node *n;
2426 +@@ -180,7 +173,7 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
2427 + else if (start_blk >= (entry->start_blk + entry->count))
2428 + n = n->rb_right;
2429 + else
2430 +- return 0;
2431 ++ return entry->ino == ino;
2432 + }
2433 + return 1;
2434 + }
2435 +@@ -214,19 +207,18 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
2436 + if (n == 0) {
2437 + i++;
2438 + } else {
2439 +- if (!ext4_data_block_valid_rcu(sbi, system_blks,
2440 +- map.m_pblk, n)) {
2441 +- err = -EFSCORRUPTED;
2442 +- __ext4_error(sb, __func__, __LINE__, -err,
2443 +- map.m_pblk, "blocks %llu-%llu "
2444 +- "from inode %u overlap system zone",
2445 +- map.m_pblk,
2446 +- map.m_pblk + map.m_len - 1, ino);
2447 ++ err = add_system_zone(system_blks, map.m_pblk, n, ino);
2448 ++ if (err < 0) {
2449 ++ if (err == -EFSCORRUPTED) {
2450 ++ __ext4_error(sb, __func__, __LINE__,
2451 ++ -err, map.m_pblk,
2452 ++ "blocks %llu-%llu from inode %u overlap system zone",
2453 ++ map.m_pblk,
2454 ++ map.m_pblk + map.m_len - 1,
2455 ++ ino);
2456 ++ }
2457 + break;
2458 + }
2459 +- err = add_system_zone(system_blks, map.m_pblk, n);
2460 +- if (err < 0)
2461 +- break;
2462 + i += n;
2463 + }
2464 + }
2465 +@@ -280,19 +272,19 @@ int ext4_setup_system_zone(struct super_block *sb)
2466 + ((i < 5) || ((i % flex_size) == 0)))
2467 + add_system_zone(system_blks,
2468 + ext4_group_first_block_no(sb, i),
2469 +- ext4_bg_num_gdb(sb, i) + 1);
2470 ++ ext4_bg_num_gdb(sb, i) + 1, 0);
2471 + gdp = ext4_get_group_desc(sb, i, NULL);
2472 + ret = add_system_zone(system_blks,
2473 +- ext4_block_bitmap(sb, gdp), 1);
2474 ++ ext4_block_bitmap(sb, gdp), 1, 0);
2475 + if (ret)
2476 + goto err;
2477 + ret = add_system_zone(system_blks,
2478 +- ext4_inode_bitmap(sb, gdp), 1);
2479 ++ ext4_inode_bitmap(sb, gdp), 1, 0);
2480 + if (ret)
2481 + goto err;
2482 + ret = add_system_zone(system_blks,
2483 + ext4_inode_table(sb, gdp),
2484 +- sbi->s_itb_per_group);
2485 ++ sbi->s_itb_per_group, 0);
2486 + if (ret)
2487 + goto err;
2488 + }
2489 +@@ -341,7 +333,7 @@ void ext4_release_system_zone(struct super_block *sb)
2490 + call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
2491 + }
2492 +
2493 +-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
2494 ++int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
2495 + unsigned int count)
2496 + {
2497 + struct ext4_system_blocks *system_blks;
2498 +@@ -353,9 +345,9 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
2499 + * mount option.
2500 + */
2501 + rcu_read_lock();
2502 +- system_blks = rcu_dereference(sbi->system_blks);
2503 +- ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
2504 +- count);
2505 ++ system_blks = rcu_dereference(EXT4_SB(inode->i_sb)->system_blks);
2506 ++ ret = ext4_data_block_valid_rcu(EXT4_SB(inode->i_sb), system_blks,
2507 ++ start_blk, count, inode->i_ino);
2508 + rcu_read_unlock();
2509 + return ret;
2510 + }
2511 +@@ -374,8 +366,7 @@ int ext4_check_blockref(const char *function, unsigned int line,
2512 + while (bref < p+max) {
2513 + blk = le32_to_cpu(*bref++);
2514 + if (blk &&
2515 +- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
2516 +- blk, 1))) {
2517 ++ unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
2518 + ext4_error_inode(inode, function, line, blk,
2519 + "invalid block");
2520 + return -EFSCORRUPTED;
2521 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2522 +index 51a85b50033a7..98b44322a3c18 100644
2523 +--- a/fs/ext4/ext4.h
2524 ++++ b/fs/ext4/ext4.h
2525 +@@ -3338,9 +3338,9 @@ extern void ext4_release_system_zone(struct super_block *sb);
2526 + extern int ext4_setup_system_zone(struct super_block *sb);
2527 + extern int __init ext4_init_system_zone(void);
2528 + extern void ext4_exit_system_zone(void);
2529 +-extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
2530 +- ext4_fsblk_t start_blk,
2531 +- unsigned int count);
2532 ++extern int ext4_inode_block_valid(struct inode *inode,
2533 ++ ext4_fsblk_t start_blk,
2534 ++ unsigned int count);
2535 + extern int ext4_check_blockref(const char *, unsigned int,
2536 + struct inode *, __le32 *, unsigned int);
2537 +
2538 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2539 +index d5453072eb635..910574aa6a903 100644
2540 +--- a/fs/ext4/extents.c
2541 ++++ b/fs/ext4/extents.c
2542 +@@ -337,7 +337,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
2543 + */
2544 + if (lblock + len <= lblock)
2545 + return 0;
2546 +- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
2547 ++ return ext4_inode_block_valid(inode, block, len);
2548 + }
2549 +
2550 + static int ext4_valid_extent_idx(struct inode *inode,
2551 +@@ -345,7 +345,7 @@ static int ext4_valid_extent_idx(struct inode *inode,
2552 + {
2553 + ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
2554 +
2555 +- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
2556 ++ return ext4_inode_block_valid(inode, block, 1);
2557 + }
2558 +
2559 + static int ext4_valid_extent_entries(struct inode *inode,
2560 +@@ -500,14 +500,10 @@ __read_extent_tree_block(const char *function, unsigned int line,
2561 + }
2562 + if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
2563 + return bh;
2564 +- if (!ext4_has_feature_journal(inode->i_sb) ||
2565 +- (inode->i_ino !=
2566 +- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
2567 +- err = __ext4_ext_check(function, line, inode,
2568 +- ext_block_hdr(bh), depth, pblk);
2569 +- if (err)
2570 +- goto errout;
2571 +- }
2572 ++ err = __ext4_ext_check(function, line, inode,
2573 ++ ext_block_hdr(bh), depth, pblk);
2574 ++ if (err)
2575 ++ goto errout;
2576 + set_buffer_verified(bh);
2577 + /*
2578 + * If this is a leaf block, cache all of its entries
2579 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
2580 +index 2a01e31a032c4..8f742b53f1d40 100644
2581 +--- a/fs/ext4/file.c
2582 ++++ b/fs/ext4/file.c
2583 +@@ -428,6 +428,10 @@ restart:
2584 + */
2585 + if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
2586 + !ext4_overwrite_io(inode, offset, count))) {
2587 ++ if (iocb->ki_flags & IOCB_NOWAIT) {
2588 ++ ret = -EAGAIN;
2589 ++ goto out;
2590 ++ }
2591 + inode_unlock_shared(inode);
2592 + *ilock_shared = false;
2593 + inode_lock(inode);
2594 +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
2595 +index be2b66eb65f7a..4026418257121 100644
2596 +--- a/fs/ext4/indirect.c
2597 ++++ b/fs/ext4/indirect.c
2598 +@@ -858,8 +858,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
2599 + else if (ext4_should_journal_data(inode))
2600 + flags |= EXT4_FREE_BLOCKS_FORGET;
2601 +
2602 +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
2603 +- count)) {
2604 ++ if (!ext4_inode_block_valid(inode, block_to_free, count)) {
2605 + EXT4_ERROR_INODE(inode, "attempt to clear invalid "
2606 + "blocks %llu len %lu",
2607 + (unsigned long long) block_to_free, count);
2608 +@@ -1004,8 +1003,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
2609 + if (!nr)
2610 + continue; /* A hole */
2611 +
2612 +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
2613 +- nr, 1)) {
2614 ++ if (!ext4_inode_block_valid(inode, nr, 1)) {
2615 + EXT4_ERROR_INODE(inode,
2616 + "invalid indirect mapped "
2617 + "block %lu (level %d)",
2618 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2619 +index 87430d276bccc..d074ee4a7085a 100644
2620 +--- a/fs/ext4/inode.c
2621 ++++ b/fs/ext4/inode.c
2622 +@@ -384,8 +384,7 @@ static int __check_block_validity(struct inode *inode, const char *func,
2623 + (inode->i_ino ==
2624 + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
2625 + return 0;
2626 +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
2627 +- map->m_len)) {
2628 ++ if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
2629 + ext4_error_inode(inode, func, line, map->m_pblk,
2630 + "lblock %lu mapped to illegal pblock %llu "
2631 + "(length %d)", (unsigned long) map->m_lblk,
2632 +@@ -4747,7 +4746,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
2633 +
2634 + ret = 0;
2635 + if (ei->i_file_acl &&
2636 +- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
2637 ++ !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
2638 + ext4_error_inode(inode, function, line, 0,
2639 + "iget: bad extended attribute block %llu",
2640 + ei->i_file_acl);
2641 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2642 +index 30d5d97548c42..0461e82aba352 100644
2643 +--- a/fs/ext4/mballoc.c
2644 ++++ b/fs/ext4/mballoc.c
2645 +@@ -2992,7 +2992,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2646 + block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2647 +
2648 + len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2649 +- if (!ext4_data_block_valid(sbi, block, len)) {
2650 ++ if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
2651 + ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2652 + "fs metadata", block, block+len);
2653 + /* File system mounted not to panic on error
2654 +@@ -4759,7 +4759,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
2655 +
2656 + sbi = EXT4_SB(sb);
2657 + if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
2658 +- !ext4_data_block_valid(sbi, block, count)) {
2659 ++ !ext4_inode_block_valid(inode, block, count)) {
2660 + ext4_error(sb, "Freeing blocks not in datazone - "
2661 + "block = %llu, count = %lu", block, count);
2662 + goto error_return;
2663 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2664 +index 56738b538ddf4..a91a5bb8c3a2b 100644
2665 +--- a/fs/ext4/namei.c
2666 ++++ b/fs/ext4/namei.c
2667 +@@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
2668 + ext4_match(dir, fname, de)) {
2669 + /* found a match - just to be sure, do
2670 + * a full check */
2671 +- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
2672 +- bh->b_size, offset))
2673 ++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
2674 ++ buf_size, offset))
2675 + return -1;
2676 + *res_dir = de;
2677 + return 1;
2678 +@@ -1858,7 +1858,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
2679 + blocksize, hinfo, map);
2680 + map -= count;
2681 + dx_sort_map(map, count);
2682 +- /* Split the existing block in the middle, size-wise */
2683 ++ /* Ensure that neither split block is over half full */
2684 + size = 0;
2685 + move = 0;
2686 + for (i = count-1; i >= 0; i--) {
2687 +@@ -1868,8 +1868,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
2688 + size += map[i].size;
2689 + move++;
2690 + }
2691 +- /* map index at which we will split */
2692 +- split = count - move;
2693 ++ /*
2694 ++ * map index at which we will split
2695 ++ *
2696 ++ * If the sum of active entries didn't exceed half the block size, just
2697 ++ * split it in half by count; each resulting block will have at least
2698 ++ * half the space free.
2699 ++ */
2700 ++ if (i > 0)
2701 ++ split = count - move;
2702 ++ else
2703 ++ split = count/2;
2704 ++
2705 + hash2 = map[split].hash;
2706 + continued = hash2 == map[split - 1].hash;
2707 + dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
2708 +@@ -2472,7 +2482,7 @@ int ext4_generic_delete_entry(handle_t *handle,
2709 + de = (struct ext4_dir_entry_2 *)entry_buf;
2710 + while (i < buf_size - csum_size) {
2711 + if (ext4_check_dir_entry(dir, NULL, de, bh,
2712 +- bh->b_data, bh->b_size, i))
2713 ++ entry_buf, buf_size, i))
2714 + return -EFSCORRUPTED;
2715 + if (de == de_del) {
2716 + if (pde)
2717 +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
2718 +index 527d50edcb956..b397121dfa107 100644
2719 +--- a/fs/f2fs/compress.c
2720 ++++ b/fs/f2fs/compress.c
2721 +@@ -1207,6 +1207,12 @@ retry_write:
2722 + congestion_wait(BLK_RW_ASYNC,
2723 + DEFAULT_IO_TIMEOUT);
2724 + lock_page(cc->rpages[i]);
2725 ++
2726 ++ if (!PageDirty(cc->rpages[i])) {
2727 ++ unlock_page(cc->rpages[i]);
2728 ++ continue;
2729 ++ }
2730 ++
2731 + clear_page_dirty_for_io(cc->rpages[i]);
2732 + goto retry_write;
2733 + }
2734 +diff --git a/fs/io-wq.c b/fs/io-wq.c
2735 +index 4023c98468608..2bfa9117bc289 100644
2736 +--- a/fs/io-wq.c
2737 ++++ b/fs/io-wq.c
2738 +@@ -907,13 +907,15 @@ void io_wq_cancel_all(struct io_wq *wq)
2739 + struct io_cb_cancel_data {
2740 + work_cancel_fn *fn;
2741 + void *data;
2742 ++ int nr_running;
2743 ++ int nr_pending;
2744 ++ bool cancel_all;
2745 + };
2746 +
2747 + static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
2748 + {
2749 + struct io_cb_cancel_data *match = data;
2750 + unsigned long flags;
2751 +- bool ret = false;
2752 +
2753 + /*
2754 + * Hold the lock to avoid ->cur_work going out of scope, caller
2755 +@@ -924,74 +926,90 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
2756 + !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
2757 + match->fn(worker->cur_work, match->data)) {
2758 + send_sig(SIGINT, worker->task, 1);
2759 +- ret = true;
2760 ++ match->nr_running++;
2761 + }
2762 + spin_unlock_irqrestore(&worker->lock, flags);
2763 +
2764 +- return ret;
2765 ++ return match->nr_running && !match->cancel_all;
2766 + }
2767 +
2768 +-static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
2769 +- struct io_cb_cancel_data *match)
2770 ++static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
2771 ++ struct io_cb_cancel_data *match)
2772 + {
2773 + struct io_wq_work_node *node, *prev;
2774 + struct io_wq_work *work;
2775 + unsigned long flags;
2776 +- bool found = false;
2777 +
2778 +- /*
2779 +- * First check pending list, if we're lucky we can just remove it
2780 +- * from there. CANCEL_OK means that the work is returned as-new,
2781 +- * no completion will be posted for it.
2782 +- */
2783 ++retry:
2784 + spin_lock_irqsave(&wqe->lock, flags);
2785 + wq_list_for_each(node, prev, &wqe->work_list) {
2786 + work = container_of(node, struct io_wq_work, list);
2787 ++ if (!match->fn(work, match->data))
2788 ++ continue;
2789 +
2790 +- if (match->fn(work, match->data)) {
2791 +- wq_list_del(&wqe->work_list, node, prev);
2792 +- found = true;
2793 +- break;
2794 +- }
2795 +- }
2796 +- spin_unlock_irqrestore(&wqe->lock, flags);
2797 +-
2798 +- if (found) {
2799 ++ wq_list_del(&wqe->work_list, node, prev);
2800 ++ spin_unlock_irqrestore(&wqe->lock, flags);
2801 + io_run_cancel(work, wqe);
2802 +- return IO_WQ_CANCEL_OK;
2803 ++ match->nr_pending++;
2804 ++ if (!match->cancel_all)
2805 ++ return;
2806 ++
2807 ++ /* not safe to continue after unlock */
2808 ++ goto retry;
2809 + }
2810 ++ spin_unlock_irqrestore(&wqe->lock, flags);
2811 ++}
2812 +
2813 +- /*
2814 +- * Now check if a free (going busy) or busy worker has the work
2815 +- * currently running. If we find it there, we'll return CANCEL_RUNNING
2816 +- * as an indication that we attempt to signal cancellation. The
2817 +- * completion will run normally in this case.
2818 +- */
2819 ++static void io_wqe_cancel_running_work(struct io_wqe *wqe,
2820 ++ struct io_cb_cancel_data *match)
2821 ++{
2822 + rcu_read_lock();
2823 +- found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
2824 ++ io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
2825 + rcu_read_unlock();
2826 +- return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
2827 + }
2828 +
2829 + enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
2830 +- void *data)
2831 ++ void *data, bool cancel_all)
2832 + {
2833 + struct io_cb_cancel_data match = {
2834 +- .fn = cancel,
2835 +- .data = data,
2836 ++ .fn = cancel,
2837 ++ .data = data,
2838 ++ .cancel_all = cancel_all,
2839 + };
2840 +- enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
2841 + int node;
2842 +
2843 ++ /*
2844 ++ * First check pending list, if we're lucky we can just remove it
2845 ++ * from there. CANCEL_OK means that the work is returned as-new,
2846 ++ * no completion will be posted for it.
2847 ++ */
2848 + for_each_node(node) {
2849 + struct io_wqe *wqe = wq->wqes[node];
2850 +
2851 +- ret = io_wqe_cancel_work(wqe, &match);
2852 +- if (ret != IO_WQ_CANCEL_NOTFOUND)
2853 +- break;
2854 ++ io_wqe_cancel_pending_work(wqe, &match);
2855 ++ if (match.nr_pending && !match.cancel_all)
2856 ++ return IO_WQ_CANCEL_OK;
2857 + }
2858 +
2859 +- return ret;
2860 ++ /*
2861 ++ * Now check if a free (going busy) or busy worker has the work
2862 ++ * currently running. If we find it there, we'll return CANCEL_RUNNING
2863 ++ * as an indication that we attempt to signal cancellation. The
2864 ++ * completion will run normally in this case.
2865 ++ */
2866 ++ for_each_node(node) {
2867 ++ struct io_wqe *wqe = wq->wqes[node];
2868 ++
2869 ++ io_wqe_cancel_running_work(wqe, &match);
2870 ++ if (match.nr_running && !match.cancel_all)
2871 ++ return IO_WQ_CANCEL_RUNNING;
2872 ++ }
2873 ++
2874 ++ if (match.nr_running)
2875 ++ return IO_WQ_CANCEL_RUNNING;
2876 ++ if (match.nr_pending)
2877 ++ return IO_WQ_CANCEL_OK;
2878 ++ return IO_WQ_CANCEL_NOTFOUND;
2879 + }
2880 +
2881 + static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
2882 +@@ -1001,21 +1019,7 @@ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
2883 +
2884 + enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
2885 + {
2886 +- return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
2887 +-}
2888 +-
2889 +-static bool io_wq_pid_match(struct io_wq_work *work, void *data)
2890 +-{
2891 +- pid_t pid = (pid_t) (unsigned long) data;
2892 +-
2893 +- return work->task_pid == pid;
2894 +-}
2895 +-
2896 +-enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
2897 +-{
2898 +- void *data = (void *) (unsigned long) pid;
2899 +-
2900 +- return io_wq_cancel_cb(wq, io_wq_pid_match, data);
2901 ++ return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
2902 + }
2903 +
2904 + struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
2905 +diff --git a/fs/io-wq.h b/fs/io-wq.h
2906 +index 5ba12de7572f0..df8a4cd3236db 100644
2907 +--- a/fs/io-wq.h
2908 ++++ b/fs/io-wq.h
2909 +@@ -129,12 +129,11 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)
2910 +
2911 + void io_wq_cancel_all(struct io_wq *wq);
2912 + enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
2913 +-enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
2914 +
2915 + typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
2916 +
2917 + enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
2918 +- void *data);
2919 ++ void *data, bool cancel_all);
2920 +
2921 + struct task_struct *io_wq_get_task(struct io_wq *wq);
2922 +
2923 +diff --git a/fs/io_uring.c b/fs/io_uring.c
2924 +index b33d4a97a8774..0822a16bed9aa 100644
2925 +--- a/fs/io_uring.c
2926 ++++ b/fs/io_uring.c
2927 +@@ -5023,7 +5023,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
2928 + enum io_wq_cancel cancel_ret;
2929 + int ret = 0;
2930 +
2931 +- cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
2932 ++ cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
2933 + switch (cancel_ret) {
2934 + case IO_WQ_CANCEL_OK:
2935 + ret = 0;
2936 +@@ -7659,6 +7659,33 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
2937 + return found;
2938 + }
2939 +
2940 ++static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
2941 ++{
2942 ++ return io_match_link(container_of(work, struct io_kiocb, work), data);
2943 ++}
2944 ++
2945 ++static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
2946 ++{
2947 ++ enum io_wq_cancel cret;
2948 ++
2949 ++ /* cancel this particular work, if it's running */
2950 ++ cret = io_wq_cancel_work(ctx->io_wq, &req->work);
2951 ++ if (cret != IO_WQ_CANCEL_NOTFOUND)
2952 ++ return;
2953 ++
2954 ++ /* find links that hold this pending, cancel those */
2955 ++ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
2956 ++ if (cret != IO_WQ_CANCEL_NOTFOUND)
2957 ++ return;
2958 ++
2959 ++ /* if we have a poll link holding this pending, cancel that */
2960 ++ if (io_poll_remove_link(ctx, req))
2961 ++ return;
2962 ++
2963 ++ /* final option, timeout link is holding this req pending */
2964 ++ io_timeout_remove_link(ctx, req);
2965 ++}
2966 ++
2967 + static void io_uring_cancel_files(struct io_ring_ctx *ctx,
2968 + struct files_struct *files)
2969 + {
2970 +@@ -7708,10 +7735,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
2971 + continue;
2972 + }
2973 + } else {
2974 +- io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
2975 +- /* could be a link, check and remove if it is */
2976 +- if (!io_poll_remove_link(ctx, cancel_req))
2977 +- io_timeout_remove_link(ctx, cancel_req);
2978 ++ /* cancel this request, or head link requests */
2979 ++ io_attempt_cancel(ctx, cancel_req);
2980 + io_put_req(cancel_req);
2981 + }
2982 +
2983 +@@ -7720,6 +7745,13 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
2984 + }
2985 + }
2986 +
2987 ++static bool io_cancel_pid_cb(struct io_wq_work *work, void *data)
2988 ++{
2989 ++ pid_t pid = (pid_t) (unsigned long) data;
2990 ++
2991 ++ return work->task_pid == pid;
2992 ++}
2993 ++
2994 + static int io_uring_flush(struct file *file, void *data)
2995 + {
2996 + struct io_ring_ctx *ctx = file->private_data;
2997 +@@ -7729,8 +7761,11 @@ static int io_uring_flush(struct file *file, void *data)
2998 + /*
2999 + * If the task is going away, cancel work it may have pending
3000 + */
3001 +- if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
3002 +- io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
3003 ++ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
3004 ++ void *data = (void *) (unsigned long)task_pid_vnr(current);
3005 ++
3006 ++ io_wq_cancel_cb(ctx->io_wq, io_cancel_pid_cb, data, true);
3007 ++ }
3008 +
3009 + return 0;
3010 + }
3011 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
3012 +index e4944436e733d..5493a0da23ddd 100644
3013 +--- a/fs/jbd2/journal.c
3014 ++++ b/fs/jbd2/journal.c
3015 +@@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
3016 + int ret;
3017 +
3018 + /* Buffer got discarded which means block device got invalidated */
3019 +- if (!buffer_mapped(bh))
3020 ++ if (!buffer_mapped(bh)) {
3021 ++ unlock_buffer(bh);
3022 + return -EIO;
3023 ++ }
3024 +
3025 + trace_jbd2_write_superblock(journal, write_flags);
3026 + if (!(journal->j_flags & JBD2_BARRIER))
3027 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
3028 +index f20cff1194bb6..776493713153f 100644
3029 +--- a/fs/jffs2/dir.c
3030 ++++ b/fs/jffs2/dir.c
3031 +@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
3032 + int ret;
3033 + uint32_t now = JFFS2_NOW();
3034 +
3035 ++ mutex_lock(&f->sem);
3036 + for (fd = f->dents ; fd; fd = fd->next) {
3037 +- if (fd->ino)
3038 ++ if (fd->ino) {
3039 ++ mutex_unlock(&f->sem);
3040 + return -ENOTEMPTY;
3041 ++ }
3042 + }
3043 ++ mutex_unlock(&f->sem);
3044 +
3045 + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
3046 + dentry->d_name.len, f, now);
3047 +diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
3048 +index 6b2b4362089e6..b57b3ffcbc327 100644
3049 +--- a/fs/romfs/storage.c
3050 ++++ b/fs/romfs/storage.c
3051 +@@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
3052 + size_t limit;
3053 +
3054 + limit = romfs_maxsize(sb);
3055 +- if (pos >= limit)
3056 ++ if (pos >= limit || buflen > limit - pos)
3057 + return -EIO;
3058 +- if (buflen > limit - pos)
3059 +- buflen = limit - pos;
3060 +
3061 + #ifdef CONFIG_ROMFS_ON_MTD
3062 + if (sb->s_mtd)
3063 +diff --git a/fs/signalfd.c b/fs/signalfd.c
3064 +index 44b6845b071c3..5b78719be4455 100644
3065 +--- a/fs/signalfd.c
3066 ++++ b/fs/signalfd.c
3067 +@@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
3068 + {
3069 + sigset_t mask;
3070 +
3071 +- if (sizemask != sizeof(sigset_t) ||
3072 +- copy_from_user(&mask, user_mask, sizeof(mask)))
3073 ++ if (sizemask != sizeof(sigset_t))
3074 + return -EINVAL;
3075 ++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
3076 ++ return -EFAULT;
3077 + return do_signalfd4(ufd, &mask, flags);
3078 + }
3079 +
3080 +@@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
3081 + {
3082 + sigset_t mask;
3083 +
3084 +- if (sizemask != sizeof(sigset_t) ||
3085 +- copy_from_user(&mask, user_mask, sizeof(mask)))
3086 ++ if (sizemask != sizeof(sigset_t))
3087 + return -EINVAL;
3088 ++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
3089 ++ return -EFAULT;
3090 + return do_signalfd4(ufd, &mask, 0);
3091 + }
3092 +
3093 +diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
3094 +index e9f810fc67317..43585850f1546 100644
3095 +--- a/fs/xfs/xfs_sysfs.h
3096 ++++ b/fs/xfs/xfs_sysfs.h
3097 +@@ -32,9 +32,11 @@ xfs_sysfs_init(
3098 + struct xfs_kobj *parent_kobj,
3099 + const char *name)
3100 + {
3101 ++ struct kobject *parent;
3102 ++
3103 ++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
3104 + init_completion(&kobj->complete);
3105 +- return kobject_init_and_add(&kobj->kobject, ktype,
3106 +- &parent_kobj->kobject, "%s", name);
3107 ++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
3108 + }
3109 +
3110 + static inline void
3111 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
3112 +index d1b9869bc5fa6..af3636a99bf60 100644
3113 +--- a/fs/xfs/xfs_trans_dquot.c
3114 ++++ b/fs/xfs/xfs_trans_dquot.c
3115 +@@ -647,7 +647,7 @@ xfs_trans_dqresv(
3116 + }
3117 + }
3118 + if (ninos > 0) {
3119 +- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
3120 ++ total_count = dqp->q_res_icount + ninos;
3121 + timer = be32_to_cpu(dqp->q_core.d_itimer);
3122 + warns = be16_to_cpu(dqp->q_core.d_iwarns);
3123 + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
3124 +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
3125 +index 8e1f7165162c3..d92d3e729bc7f 100644
3126 +--- a/kernel/events/uprobes.c
3127 ++++ b/kernel/events/uprobes.c
3128 +@@ -211,7 +211,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
3129 + try_to_free_swap(old_page);
3130 + page_vma_mapped_walk_done(&pvmw);
3131 +
3132 +- if (vma->vm_flags & VM_LOCKED)
3133 ++ if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
3134 + munlock_vma_page(old_page);
3135 + put_page(old_page);
3136 +
3137 +diff --git a/kernel/relay.c b/kernel/relay.c
3138 +index 4b760ec163426..d3940becf2fc3 100644
3139 +--- a/kernel/relay.c
3140 ++++ b/kernel/relay.c
3141 +@@ -197,6 +197,7 @@ free_buf:
3142 + static void relay_destroy_channel(struct kref *kref)
3143 + {
3144 + struct rchan *chan = container_of(kref, struct rchan, kref);
3145 ++ free_percpu(chan->buf);
3146 + kfree(chan);
3147 + }
3148 +
3149 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
3150 +index 38874fe112d58..cb17091d0a202 100644
3151 +--- a/mm/khugepaged.c
3152 ++++ b/mm/khugepaged.c
3153 +@@ -400,7 +400,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
3154 +
3155 + static inline int khugepaged_test_exit(struct mm_struct *mm)
3156 + {
3157 +- return atomic_read(&mm->mm_users) == 0;
3158 ++ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
3159 + }
3160 +
3161 + static bool hugepage_vma_check(struct vm_area_struct *vma,
3162 +@@ -435,7 +435,7 @@ int __khugepaged_enter(struct mm_struct *mm)
3163 + return -ENOMEM;
3164 +
3165 + /* __khugepaged_exit() must not run from under us */
3166 +- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
3167 ++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
3168 + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
3169 + free_mm_slot(mm_slot);
3170 + return 0;
3171 +@@ -1016,9 +1016,6 @@ static void collapse_huge_page(struct mm_struct *mm,
3172 + * handled by the anon_vma lock + PG_lock.
3173 + */
3174 + down_write(&mm->mmap_sem);
3175 +- result = SCAN_ANY_PROCESS;
3176 +- if (!mmget_still_valid(mm))
3177 +- goto out;
3178 + result = hugepage_vma_revalidate(mm, address, &vma);
3179 + if (result)
3180 + goto out;
3181 +diff --git a/mm/memory.c b/mm/memory.c
3182 +index 22d218bc56c8a..44d848b291b48 100644
3183 +--- a/mm/memory.c
3184 ++++ b/mm/memory.c
3185 +@@ -4237,6 +4237,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
3186 + vmf->flags & FAULT_FLAG_WRITE)) {
3187 + update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
3188 + } else {
3189 ++ /* Skip spurious TLB flush for retried page fault */
3190 ++ if (vmf->flags & FAULT_FLAG_TRIED)
3191 ++ goto unlock;
3192 + /*
3193 + * This is needed only for protection faults but the arch code
3194 + * is not yet telling us if this is a protection fault or not.
3195 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3196 +index d0c0d9364aa6d..398dd6c90ad0f 100644
3197 +--- a/mm/page_alloc.c
3198 ++++ b/mm/page_alloc.c
3199 +@@ -1308,6 +1308,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
3200 + struct page *page, *tmp;
3201 + LIST_HEAD(head);
3202 +
3203 ++ /*
3204 ++ * Ensure proper count is passed which otherwise would stuck in the
3205 ++ * below while (list_empty(list)) loop.
3206 ++ */
3207 ++ count = min(pcp->count, count);
3208 + while (count) {
3209 + struct list_head *list;
3210 +
3211 +@@ -7959,7 +7964,7 @@ int __meminit init_per_zone_wmark_min(void)
3212 +
3213 + return 0;
3214 + }
3215 +-core_initcall(init_per_zone_wmark_min)
3216 ++postcore_initcall(init_per_zone_wmark_min)
3217 +
3218 + /*
3219 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
3220 +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
3221 +index f7587428febdd..bf9fd6ee88fe0 100644
3222 +--- a/net/can/j1939/socket.c
3223 ++++ b/net/can/j1939/socket.c
3224 +@@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk)
3225 + spin_lock_init(&jsk->sk_session_queue_lock);
3226 + INIT_LIST_HEAD(&jsk->sk_session_queue);
3227 + sk->sk_destruct = j1939_sk_sock_destruct;
3228 ++ sk->sk_protocol = CAN_J1939;
3229 +
3230 + return 0;
3231 + }
3232 +@@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3233 + goto out_release_sock;
3234 + }
3235 +
3236 ++ if (!ndev->ml_priv) {
3237 ++ netdev_warn_once(ndev,
3238 ++ "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
3239 ++ dev_put(ndev);
3240 ++ ret = -ENODEV;
3241 ++ goto out_release_sock;
3242 ++ }
3243 ++
3244 + priv = j1939_netdev_start(ndev);
3245 + dev_put(ndev);
3246 + if (IS_ERR(priv)) {
3247 +@@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
3248 + static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
3249 + const struct j1939_sock *jsk, int peer)
3250 + {
3251 ++ /* There are two holes (2 bytes and 3 bytes) to clear to avoid
3252 ++ * leaking kernel information to user space.
3253 ++ */
3254 ++ memset(addr, 0, J1939_MIN_NAMELEN);
3255 ++
3256 + addr->can_family = AF_CAN;
3257 + addr->can_ifindex = jsk->ifindex;
3258 + addr->can_addr.j1939.pgn = jsk->addr.pgn;
3259 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
3260 +index 9f99af5b0b11e..dbd215cbc53d8 100644
3261 +--- a/net/can/j1939/transport.c
3262 ++++ b/net/can/j1939/transport.c
3263 +@@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session,
3264 + skb_queue_tail(&session->skb_queue, skb);
3265 + }
3266 +
3267 +-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
3268 ++static struct
3269 ++sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
3270 ++ unsigned int offset_start)
3271 + {
3272 + struct j1939_priv *priv = session->priv;
3273 ++ struct j1939_sk_buff_cb *do_skcb;
3274 + struct sk_buff *skb = NULL;
3275 + struct sk_buff *do_skb;
3276 +- struct j1939_sk_buff_cb *do_skcb;
3277 +- unsigned int offset_start;
3278 + unsigned long flags;
3279 +
3280 +- offset_start = session->pkt.dpo * 7;
3281 +-
3282 + spin_lock_irqsave(&session->skb_queue.lock, flags);
3283 + skb_queue_walk(&session->skb_queue, do_skb) {
3284 + do_skcb = j1939_skb_to_cb(do_skb);
3285 +@@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
3286 + return skb;
3287 + }
3288 +
3289 ++static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
3290 ++{
3291 ++ unsigned int offset_start;
3292 ++
3293 ++ offset_start = session->pkt.dpo * 7;
3294 ++ return j1939_session_skb_find_by_offset(session, offset_start);
3295 ++}
3296 ++
3297 + /* see if we are receiver
3298 + * returns 0 for broadcasts, although we will receive them
3299 + */
3300 +@@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session)
3301 + return ret;
3302 +
3303 + session->last_txcmd = dat[0];
3304 +- if (dat[0] == J1939_TP_CMD_BAM)
3305 ++ if (dat[0] == J1939_TP_CMD_BAM) {
3306 + j1939_tp_schedule_txtimer(session, 50);
3307 +-
3308 +- j1939_tp_set_rxtimeout(session, 1250);
3309 ++ j1939_tp_set_rxtimeout(session, 250);
3310 ++ } else {
3311 ++ j1939_tp_set_rxtimeout(session, 1250);
3312 ++ }
3313 +
3314 + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
3315 +
3316 +@@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
3317 + int ret = 0;
3318 + u8 dat[8];
3319 +
3320 +- se_skb = j1939_session_skb_find(session);
3321 ++ se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
3322 + if (!se_skb)
3323 + return -ENOBUFS;
3324 +
3325 +@@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session)
3326 + if (len > 7)
3327 + len = 7;
3328 +
3329 ++ if (offset + len > se_skb->len) {
3330 ++ netdev_err_once(priv->ndev,
3331 ++ "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
3332 ++ __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
3333 ++ return -EOVERFLOW;
3334 ++ }
3335 ++
3336 ++ if (!len) {
3337 ++ ret = -ENOBUFS;
3338 ++ break;
3339 ++ }
3340 ++
3341 + memcpy(&dat[1], &tpdat[offset], len);
3342 + ret = j1939_tp_tx_dat(session, dat, len + 1);
3343 + if (ret < 0) {
3344 +@@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session,
3345 + lockdep_assert_held(&session->priv->active_session_list_lock);
3346 +
3347 + session->err = j1939_xtp_abort_to_errno(priv, err);
3348 ++ session->state = J1939_SESSION_WAITING_ABORT;
3349 + /* do not send aborts on incoming broadcasts */
3350 + if (!j1939_cb_is_broadcast(&session->skcb)) {
3351 +- session->state = J1939_SESSION_WAITING_ABORT;
3352 + j1939_xtp_tx_abort(priv, &session->skcb,
3353 + !session->transmission,
3354 + err, session->skcb.addr.pgn);
3355 +@@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
3356 + * cleanup including propagation of the error to user space.
3357 + */
3358 + break;
3359 ++ case -EOVERFLOW:
3360 ++ j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
3361 ++ break;
3362 + case 0:
3363 + session->tx_retry = 0;
3364 + break;
3365 +@@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
3366 + return;
3367 + }
3368 + session = j1939_xtp_rx_rts_session_new(priv, skb);
3369 +- if (!session)
3370 ++ if (!session) {
3371 ++ if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
3372 ++ netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
3373 ++ __func__);
3374 + return;
3375 ++ }
3376 + } else {
3377 + if (j1939_xtp_rx_rts_session_active(session, skb)) {
3378 + j1939_session_put(session);
3379 +@@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
3380 + }
3381 + session->last_cmd = cmd;
3382 +
3383 +- j1939_tp_set_rxtimeout(session, 1250);
3384 +-
3385 +- if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
3386 +- j1939_session_txtimer_cancel(session);
3387 +- j1939_tp_schedule_txtimer(session, 0);
3388 ++ if (cmd == J1939_TP_CMD_BAM) {
3389 ++ if (!session->transmission)
3390 ++ j1939_tp_set_rxtimeout(session, 750);
3391 ++ } else {
3392 ++ if (!session->transmission) {
3393 ++ j1939_session_txtimer_cancel(session);
3394 ++ j1939_tp_schedule_txtimer(session, 0);
3395 ++ }
3396 ++ j1939_tp_set_rxtimeout(session, 1250);
3397 + }
3398 +
3399 + j1939_session_put(session);
3400 +@@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
3401 + int offset;
3402 + int nbytes;
3403 + bool final = false;
3404 ++ bool remain = false;
3405 + bool do_cts_eoma = false;
3406 + int packet;
3407 +
3408 +@@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
3409 + __func__, session);
3410 + goto out_session_cancel;
3411 + }
3412 +- se_skb = j1939_session_skb_find(session);
3413 ++
3414 ++ se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
3415 + if (!se_skb) {
3416 + netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
3417 + session);
3418 +@@ -1777,6 +1811,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
3419 + j1939_cb_is_broadcast(&session->skcb)) {
3420 + if (session->pkt.rx >= session->pkt.total)
3421 + final = true;
3422 ++ else
3423 ++ remain = true;
3424 + } else {
3425 + /* never final, an EOMA must follow */
3426 + if (session->pkt.rx >= session->pkt.last)
3427 +@@ -1784,7 +1820,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
3428 + }
3429 +
3430 + if (final) {
3431 ++ j1939_session_timers_cancel(session);
3432 + j1939_session_completed(session);
3433 ++ } else if (remain) {
3434 ++ if (!session->transmission)
3435 ++ j1939_tp_set_rxtimeout(session, 750);
3436 + } else if (do_cts_eoma) {
3437 + j1939_tp_set_rxtimeout(session, 1250);
3438 + if (!session->transmission)
3439 +@@ -1829,6 +1869,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
3440 + else
3441 + j1939_xtp_rx_dat_one(session, skb);
3442 + }
3443 ++
3444 ++ if (j1939_cb_is_broadcast(skcb)) {
3445 ++ session = j1939_session_get_by_addr(priv, &skcb->addr, false,
3446 ++ false);
3447 ++ if (session)
3448 ++ j1939_xtp_rx_dat_one(session, skb);
3449 ++ }
3450 + }
3451 +
3452 + /* j1939 main intf */
3453 +@@ -1920,7 +1967,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
3454 + if (j1939_tp_im_transmitter(skcb))
3455 + j1939_xtp_rx_rts(priv, skb, true);
3456 +
3457 +- if (j1939_tp_im_receiver(skcb))
3458 ++ if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
3459 + j1939_xtp_rx_rts(priv, skb, false);
3460 +
3461 + break;
3462 +@@ -1984,7 +2031,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
3463 + {
3464 + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
3465 +
3466 +- if (!j1939_tp_im_involved_anydir(skcb))
3467 ++ if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
3468 + return 0;
3469 +
3470 + switch (skcb->addr.pgn) {
3471 +@@ -2017,6 +2064,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
3472 + if (!skb->sk)
3473 + return;
3474 +
3475 ++ if (skb->sk->sk_family != AF_CAN ||
3476 ++ skb->sk->sk_protocol != CAN_J1939)
3477 ++ return;
3478 ++
3479 + j1939_session_list_lock(priv);
3480 + session = j1939_session_get_simple(priv, skb);
3481 + j1939_session_list_unlock(priv);
3482 +diff --git a/net/core/filter.c b/net/core/filter.c
3483 +index cebbb6ba9ed92..9c03702600128 100644
3484 +--- a/net/core/filter.c
3485 ++++ b/net/core/filter.c
3486 +@@ -8066,15 +8066,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
3487 + /* Helper macro for adding read access to tcp_sock or sock fields. */
3488 + #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
3489 + do { \
3490 ++ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
3491 + BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
3492 + sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
3493 ++ if (si->dst_reg == reg || si->src_reg == reg) \
3494 ++ reg--; \
3495 ++ if (si->dst_reg == reg || si->src_reg == reg) \
3496 ++ reg--; \
3497 ++ if (si->dst_reg == si->src_reg) { \
3498 ++ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
3499 ++ offsetof(struct bpf_sock_ops_kern, \
3500 ++ temp)); \
3501 ++ fullsock_reg = reg; \
3502 ++ jmp += 2; \
3503 ++ } \
3504 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
3505 + struct bpf_sock_ops_kern, \
3506 + is_fullsock), \
3507 +- si->dst_reg, si->src_reg, \
3508 ++ fullsock_reg, si->src_reg, \
3509 + offsetof(struct bpf_sock_ops_kern, \
3510 + is_fullsock)); \
3511 +- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
3512 ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
3513 ++ if (si->dst_reg == si->src_reg) \
3514 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
3515 ++ offsetof(struct bpf_sock_ops_kern, \
3516 ++ temp)); \
3517 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
3518 + struct bpf_sock_ops_kern, sk),\
3519 + si->dst_reg, si->src_reg, \
3520 +@@ -8083,6 +8099,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
3521 + OBJ_FIELD), \
3522 + si->dst_reg, si->dst_reg, \
3523 + offsetof(OBJ, OBJ_FIELD)); \
3524 ++ if (si->dst_reg == si->src_reg) { \
3525 ++ *insn++ = BPF_JMP_A(1); \
3526 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
3527 ++ offsetof(struct bpf_sock_ops_kern, \
3528 ++ temp)); \
3529 ++ } \
3530 ++ } while (0)
3531 ++
3532 ++#define SOCK_OPS_GET_SK() \
3533 ++ do { \
3534 ++ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
3535 ++ if (si->dst_reg == reg || si->src_reg == reg) \
3536 ++ reg--; \
3537 ++ if (si->dst_reg == reg || si->src_reg == reg) \
3538 ++ reg--; \
3539 ++ if (si->dst_reg == si->src_reg) { \
3540 ++ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
3541 ++ offsetof(struct bpf_sock_ops_kern, \
3542 ++ temp)); \
3543 ++ fullsock_reg = reg; \
3544 ++ jmp += 2; \
3545 ++ } \
3546 ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
3547 ++ struct bpf_sock_ops_kern, \
3548 ++ is_fullsock), \
3549 ++ fullsock_reg, si->src_reg, \
3550 ++ offsetof(struct bpf_sock_ops_kern, \
3551 ++ is_fullsock)); \
3552 ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
3553 ++ if (si->dst_reg == si->src_reg) \
3554 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
3555 ++ offsetof(struct bpf_sock_ops_kern, \
3556 ++ temp)); \
3557 ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
3558 ++ struct bpf_sock_ops_kern, sk),\
3559 ++ si->dst_reg, si->src_reg, \
3560 ++ offsetof(struct bpf_sock_ops_kern, sk));\
3561 ++ if (si->dst_reg == si->src_reg) { \
3562 ++ *insn++ = BPF_JMP_A(1); \
3563 ++ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
3564 ++ offsetof(struct bpf_sock_ops_kern, \
3565 ++ temp)); \
3566 ++ } \
3567 + } while (0)
3568 +
3569 + #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
3570 +@@ -8369,17 +8428,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
3571 + SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
3572 + break;
3573 + case offsetof(struct bpf_sock_ops, sk):
3574 +- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3575 +- struct bpf_sock_ops_kern,
3576 +- is_fullsock),
3577 +- si->dst_reg, si->src_reg,
3578 +- offsetof(struct bpf_sock_ops_kern,
3579 +- is_fullsock));
3580 +- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
3581 +- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3582 +- struct bpf_sock_ops_kern, sk),
3583 +- si->dst_reg, si->src_reg,
3584 +- offsetof(struct bpf_sock_ops_kern, sk));
3585 ++ SOCK_OPS_GET_SK();
3586 + break;
3587 + }
3588 + return insn - insn_buf;
3589 +diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
3590 +index 07782836fad6e..3c48cdc8935df 100644
3591 +--- a/net/netfilter/nft_exthdr.c
3592 ++++ b/net/netfilter/nft_exthdr.c
3593 +@@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
3594 +
3595 + err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
3596 + if (priv->flags & NFT_EXTHDR_F_PRESENT) {
3597 +- *dest = (err >= 0);
3598 ++ nft_reg_store8(dest, err >= 0);
3599 + return;
3600 + } else if (err < 0) {
3601 + goto err;
3602 +@@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
3603 +
3604 + err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
3605 + if (priv->flags & NFT_EXTHDR_F_PRESENT) {
3606 +- *dest = (err >= 0);
3607 ++ nft_reg_store8(dest, err >= 0);
3608 + return;
3609 + } else if (err < 0) {
3610 + goto err;
3611 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
3612 +index efa5fcb5793f7..952b8f1908500 100644
3613 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
3614 ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
3615 +@@ -265,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
3616 + {
3617 + struct svc_rdma_recv_ctxt *ctxt;
3618 +
3619 ++ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
3620 ++ return 0;
3621 + ctxt = svc_rdma_recv_ctxt_get(rdma);
3622 + if (!ctxt)
3623 + return -ENOMEM;
3624 +diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
3625 +index c0ac8f7b5f1ab..1eb076c7eae17 100644
3626 +--- a/scripts/kconfig/qconf.cc
3627 ++++ b/scripts/kconfig/qconf.cc
3628 +@@ -881,40 +881,40 @@ void ConfigList::focusInEvent(QFocusEvent *e)
3629 +
3630 + void ConfigList::contextMenuEvent(QContextMenuEvent *e)
3631 + {
3632 +- if (e->y() <= header()->geometry().bottom()) {
3633 +- if (!headerPopup) {
3634 +- QAction *action;
3635 +-
3636 +- headerPopup = new QMenu(this);
3637 +- action = new QAction("Show Name", this);
3638 +- action->setCheckable(true);
3639 +- connect(action, SIGNAL(toggled(bool)),
3640 +- parent(), SLOT(setShowName(bool)));
3641 +- connect(parent(), SIGNAL(showNameChanged(bool)),
3642 +- action, SLOT(setOn(bool)));
3643 +- action->setChecked(showName);
3644 +- headerPopup->addAction(action);
3645 +- action = new QAction("Show Range", this);
3646 +- action->setCheckable(true);
3647 +- connect(action, SIGNAL(toggled(bool)),
3648 +- parent(), SLOT(setShowRange(bool)));
3649 +- connect(parent(), SIGNAL(showRangeChanged(bool)),
3650 +- action, SLOT(setOn(bool)));
3651 +- action->setChecked(showRange);
3652 +- headerPopup->addAction(action);
3653 +- action = new QAction("Show Data", this);
3654 +- action->setCheckable(true);
3655 +- connect(action, SIGNAL(toggled(bool)),
3656 +- parent(), SLOT(setShowData(bool)));
3657 +- connect(parent(), SIGNAL(showDataChanged(bool)),
3658 +- action, SLOT(setOn(bool)));
3659 +- action->setChecked(showData);
3660 +- headerPopup->addAction(action);
3661 +- }
3662 +- headerPopup->exec(e->globalPos());
3663 +- e->accept();
3664 +- } else
3665 +- e->ignore();
3666 ++ if (!headerPopup) {
3667 ++ QAction *action;
3668 ++
3669 ++ headerPopup = new QMenu(this);
3670 ++ action = new QAction("Show Name", this);
3671 ++ action->setCheckable(true);
3672 ++ connect(action, SIGNAL(toggled(bool)),
3673 ++ parent(), SLOT(setShowName(bool)));
3674 ++ connect(parent(), SIGNAL(showNameChanged(bool)),
3675 ++ action, SLOT(setChecked(bool)));
3676 ++ action->setChecked(showName);
3677 ++ headerPopup->addAction(action);
3678 ++
3679 ++ action = new QAction("Show Range", this);
3680 ++ action->setCheckable(true);
3681 ++ connect(action, SIGNAL(toggled(bool)),
3682 ++ parent(), SLOT(setShowRange(bool)));
3683 ++ connect(parent(), SIGNAL(showRangeChanged(bool)),
3684 ++ action, SLOT(setChecked(bool)));
3685 ++ action->setChecked(showRange);
3686 ++ headerPopup->addAction(action);
3687 ++
3688 ++ action = new QAction("Show Data", this);
3689 ++ action->setCheckable(true);
3690 ++ connect(action, SIGNAL(toggled(bool)),
3691 ++ parent(), SLOT(setShowData(bool)));
3692 ++ connect(parent(), SIGNAL(showDataChanged(bool)),
3693 ++ action, SLOT(setChecked(bool)));
3694 ++ action->setChecked(showData);
3695 ++ headerPopup->addAction(action);
3696 ++ }
3697 ++
3698 ++ headerPopup->exec(e->globalPos());
3699 ++ e->accept();
3700 + }
3701 +
3702 + ConfigView*ConfigView::viewList;
3703 +@@ -1240,7 +1240,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
3704 +
3705 + action->setCheckable(true);
3706 + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
3707 +- connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
3708 ++ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
3709 + action->setChecked(showDebug());
3710 + popup->addSeparator();
3711 + popup->addAction(action);
3712 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3713 +index 313eecfb91b44..fe6db8b171e41 100644
3714 +--- a/sound/pci/hda/patch_realtek.c
3715 ++++ b/sound/pci/hda/patch_realtek.c
3716 +@@ -7666,6 +7666,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3717 + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
3718 + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3719 + SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3720 ++ SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3721 ++ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3722 + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
3723 + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3724 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
3725 +diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
3726 +index 85bc7ae4d2671..26cf372ccda6f 100644
3727 +--- a/sound/soc/codecs/msm8916-wcd-analog.c
3728 ++++ b/sound/soc/codecs/msm8916-wcd-analog.c
3729 +@@ -19,8 +19,8 @@
3730 +
3731 + #define CDC_D_REVISION1 (0xf000)
3732 + #define CDC_D_PERPH_SUBTYPE (0xf005)
3733 +-#define CDC_D_INT_EN_SET (0x015)
3734 +-#define CDC_D_INT_EN_CLR (0x016)
3735 ++#define CDC_D_INT_EN_SET (0xf015)
3736 ++#define CDC_D_INT_EN_CLR (0xf016)
3737 + #define MBHC_SWITCH_INT BIT(7)
3738 + #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
3739 + #define MBHC_BUTTON_PRESS_DET BIT(5)
3740 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3741 +index 82f2b6357778d..a3cb05d925846 100644
3742 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3743 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3744 +@@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
3745 +
3746 + ret_val = power_up_sst(stream);
3747 + if (ret_val < 0)
3748 +- return ret_val;
3749 ++ goto out_power_up;
3750 +
3751 + /* Make sure, that the period size is always even */
3752 + snd_pcm_hw_constraint_step(substream->runtime, 0,
3753 +@@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
3754 + return snd_pcm_hw_constraint_integer(runtime,
3755 + SNDRV_PCM_HW_PARAM_PERIODS);
3756 + out_ops:
3757 +- kfree(stream);
3758 + mutex_unlock(&sst_lock);
3759 ++out_power_up:
3760 ++ kfree(stream);
3761 + return ret_val;
3762 + }
3763 +
3764 +diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
3765 +index 2a5302f1db98a..0168af8492727 100644
3766 +--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
3767 ++++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
3768 +@@ -1150,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
3769 + }
3770 +
3771 + static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
3772 +- SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
3773 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
3774 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
3775 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
3776 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
3777 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
3778 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
3779 +- SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
3780 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
3781 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
3782 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
3783 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
3784 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
3785 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
3786 +- SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
3787 ++ SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3788 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3789 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3790 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3791 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3792 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3793 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3794 ++ SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3795 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3796 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3797 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3798 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3799 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3800 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3801 ++ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3802 + SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
3803 +- 0, 0, 0, 0),
3804 ++ 0, SND_SOC_NOPM, 0, 0),
3805 + SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
3806 +- 0, 0, 0, 0),
3807 ++ 0, SND_SOC_NOPM, 0, 0),
3808 + SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
3809 +- 0, 0, 0, 0),
3810 ++ 0, SND_SOC_NOPM, 0, 0),
3811 + SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
3812 +- 0, 0, 0, 0),
3813 ++ 0, SND_SOC_NOPM, 0, 0),
3814 + SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
3815 +- 0, 0, 0, 0),
3816 ++ 0, SND_SOC_NOPM, 0, 0),
3817 + SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
3818 +- 0, 0, 0, 0),
3819 ++ 0, SND_SOC_NOPM, 0, 0),
3820 + SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
3821 + "Secondary MI2S Playback SD1",
3822 +- 0, 0, 0, 0),
3823 ++ 0, SND_SOC_NOPM, 0, 0),
3824 + SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
3825 +- 0, 0, 0, 0),
3826 ++ 0, SND_SOC_NOPM, 0, 0),
3827 + SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
3828 +- 0, 0, 0, 0),
3829 ++ 0, SND_SOC_NOPM, 0, 0),
3830 +
3831 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
3832 +- 0, 0, 0, 0),
3833 ++ 0, SND_SOC_NOPM, 0, 0),
3834 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
3835 +- 0, 0, 0, 0),
3836 ++ 0, SND_SOC_NOPM, 0, 0),
3837 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
3838 +- 0, 0, 0, 0),
3839 ++ 0, SND_SOC_NOPM, 0, 0),
3840 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
3841 +- 0, 0, 0, 0),
3842 ++ 0, SND_SOC_NOPM, 0, 0),
3843 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
3844 +- 0, 0, 0, 0),
3845 ++ 0, SND_SOC_NOPM, 0, 0),
3846 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
3847 +- 0, 0, 0, 0),
3848 ++ 0, SND_SOC_NOPM, 0, 0),
3849 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
3850 +- 0, 0, 0, 0),
3851 ++ 0, SND_SOC_NOPM, 0, 0),
3852 + SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
3853 +- 0, 0, 0, 0),
3854 ++ 0, SND_SOC_NOPM, 0, 0),
3855 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
3856 +- 0, 0, 0, 0),
3857 ++ 0, SND_SOC_NOPM, 0, 0),
3858 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
3859 +- 0, 0, 0, 0),
3860 ++ 0, SND_SOC_NOPM, 0, 0),
3861 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
3862 +- 0, 0, 0, 0),
3863 ++ 0, SND_SOC_NOPM, 0, 0),
3864 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
3865 +- 0, 0, 0, 0),
3866 ++ 0, SND_SOC_NOPM, 0, 0),
3867 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
3868 +- 0, 0, 0, 0),
3869 ++ 0, SND_SOC_NOPM, 0, 0),
3870 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
3871 +- 0, 0, 0, 0),
3872 ++ 0, SND_SOC_NOPM, 0, 0),
3873 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
3874 +- 0, 0, 0, 0),
3875 ++ 0, SND_SOC_NOPM, 0, 0),
3876 + SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
3877 +- 0, 0, 0, 0),
3878 ++ 0, SND_SOC_NOPM, 0, 0),
3879 +
3880 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
3881 +- 0, 0, 0, 0),
3882 ++ 0, SND_SOC_NOPM, 0, 0),
3883 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
3884 +- 0, 0, 0, 0),
3885 ++ 0, SND_SOC_NOPM, 0, 0),
3886 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
3887 +- 0, 0, 0, 0),
3888 ++ 0, SND_SOC_NOPM, 0, 0),
3889 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
3890 +- 0, 0, 0, 0),
3891 ++ 0, SND_SOC_NOPM, 0, 0),
3892 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
3893 +- 0, 0, 0, 0),
3894 ++ 0, SND_SOC_NOPM, 0, 0),
3895 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
3896 +- 0, 0, 0, 0),
3897 ++ 0, SND_SOC_NOPM, 0, 0),
3898 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
3899 +- 0, 0, 0, 0),
3900 ++ 0, SND_SOC_NOPM, 0, 0),
3901 + SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
3902 +- 0, 0, 0, 0),
3903 ++ 0, SND_SOC_NOPM, 0, 0),
3904 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
3905 +- 0, 0, 0, 0),
3906 ++ 0, SND_SOC_NOPM, 0, 0),
3907 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
3908 +- 0, 0, 0, 0),
3909 ++ 0, SND_SOC_NOPM, 0, 0),
3910 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
3911 +- 0, 0, 0, 0),
3912 ++ 0, SND_SOC_NOPM, 0, 0),
3913 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
3914 +- 0, 0, 0, 0),
3915 ++ 0, SND_SOC_NOPM, 0, 0),
3916 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
3917 +- 0, 0, 0, 0),
3918 ++ 0, SND_SOC_NOPM, 0, 0),
3919 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
3920 +- 0, 0, 0, 0),
3921 ++ 0, SND_SOC_NOPM, 0, 0),
3922 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
3923 +- 0, 0, 0, 0),
3924 ++ 0, SND_SOC_NOPM, 0, 0),
3925 + SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
3926 +- 0, 0, 0, 0),
3927 ++ 0, SND_SOC_NOPM, 0, 0),
3928 +
3929 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
3930 +- 0, 0, 0, 0),
3931 ++ 0, SND_SOC_NOPM, 0, 0),
3932 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
3933 +- 0, 0, 0, 0),
3934 ++ 0, SND_SOC_NOPM, 0, 0),
3935 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
3936 +- 0, 0, 0, 0),
3937 ++ 0, SND_SOC_NOPM, 0, 0),
3938 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
3939 +- 0, 0, 0, 0),
3940 ++ 0, SND_SOC_NOPM, 0, 0),
3941 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
3942 +- 0, 0, 0, 0),
3943 ++ 0, SND_SOC_NOPM, 0, 0),
3944 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
3945 +- 0, 0, 0, 0),
3946 ++ 0, SND_SOC_NOPM, 0, 0),
3947 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
3948 +- 0, 0, 0, 0),
3949 ++ 0, SND_SOC_NOPM, 0, 0),
3950 + SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
3951 +- 0, 0, 0, 0),
3952 ++ 0, SND_SOC_NOPM, 0, 0),
3953 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
3954 +- 0, 0, 0, 0),
3955 ++ 0, SND_SOC_NOPM, 0, 0),
3956 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
3957 +- 0, 0, 0, 0),
3958 ++ 0, SND_SOC_NOPM, 0, 0),
3959 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
3960 +- 0, 0, 0, 0),
3961 ++ 0, SND_SOC_NOPM, 0, 0),
3962 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
3963 +- 0, 0, 0, 0),
3964 ++ 0, SND_SOC_NOPM, 0, 0),
3965 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
3966 +- 0, 0, 0, 0),
3967 ++ 0, SND_SOC_NOPM, 0, 0),
3968 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
3969 +- 0, 0, 0, 0),
3970 ++ 0, SND_SOC_NOPM, 0, 0),
3971 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
3972 +- 0, 0, 0, 0),
3973 ++ 0, SND_SOC_NOPM, 0, 0),
3974 + SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
3975 +- 0, 0, 0, 0),
3976 ++ 0, SND_SOC_NOPM, 0, 0),
3977 +
3978 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
3979 +- 0, 0, 0, 0),
3980 ++ 0, SND_SOC_NOPM, 0, 0),
3981 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
3982 +- 0, 0, 0, 0),
3983 ++ 0, SND_SOC_NOPM, 0, 0),
3984 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
3985 +- 0, 0, 0, 0),
3986 ++ 0, SND_SOC_NOPM, 0, 0),
3987 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
3988 +- 0, 0, 0, 0),
3989 ++ 0, SND_SOC_NOPM, 0, 0),
3990 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
3991 +- 0, 0, 0, 0),
3992 ++ 0, SND_SOC_NOPM, 0, 0),
3993 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
3994 +- 0, 0, 0, 0),
3995 ++ 0, SND_SOC_NOPM, 0, 0),
3996 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
3997 +- 0, 0, 0, 0),
3998 ++ 0, SND_SOC_NOPM, 0, 0),
3999 + SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
4000 +- 0, 0, 0, 0),
4001 ++ 0, SND_SOC_NOPM, 0, 0),
4002 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
4003 +- 0, 0, 0, 0),
4004 ++ 0, SND_SOC_NOPM, 0, 0),
4005 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
4006 +- 0, 0, 0, 0),
4007 ++ 0, SND_SOC_NOPM, 0, 0),
4008 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
4009 +- 0, 0, 0, 0),
4010 ++ 0, SND_SOC_NOPM, 0, 0),
4011 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
4012 +- 0, 0, 0, 0),
4013 ++ 0, SND_SOC_NOPM, 0, 0),
4014 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
4015 +- 0, 0, 0, 0),
4016 ++ 0, SND_SOC_NOPM, 0, 0),
4017 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
4018 +- 0, 0, 0, 0),
4019 ++ 0, SND_SOC_NOPM, 0, 0),
4020 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
4021 +- 0, 0, 0, 0),
4022 ++ 0, SND_SOC_NOPM, 0, 0),
4023 + SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
4024 +- 0, 0, 0, 0),
4025 ++ 0, SND_SOC_NOPM, 0, 0),
4026 +
4027 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
4028 +- 0, 0, 0, 0),
4029 ++ 0, SND_SOC_NOPM, 0, 0),
4030 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
4031 +- 0, 0, 0, 0),
4032 ++ 0, SND_SOC_NOPM, 0, 0),
4033 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
4034 +- 0, 0, 0, 0),
4035 ++ 0, SND_SOC_NOPM, 0, 0),
4036 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
4037 +- 0, 0, 0, 0),
4038 ++ 0, SND_SOC_NOPM, 0, 0),
4039 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
4040 +- 0, 0, 0, 0),
4041 ++ 0, SND_SOC_NOPM, 0, 0),
4042 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
4043 +- 0, 0, 0, 0),
4044 ++ 0, SND_SOC_NOPM, 0, 0),
4045 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
4046 +- 0, 0, 0, 0),
4047 ++ 0, SND_SOC_NOPM, 0, 0),
4048 + SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
4049 +- 0, 0, 0, 0),
4050 ++ 0, SND_SOC_NOPM, 0, 0),
4051 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
4052 +- 0, 0, 0, 0),
4053 ++ 0, SND_SOC_NOPM, 0, 0),
4054 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
4055 +- 0, 0, 0, 0),
4056 ++ 0, SND_SOC_NOPM, 0, 0),
4057 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
4058 +- 0, 0, 0, 0),
4059 ++ 0, SND_SOC_NOPM, 0, 0),
4060 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
4061 +- 0, 0, 0, 0),
4062 ++ 0, SND_SOC_NOPM, 0, 0),
4063 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
4064 +- 0, 0, 0, 0),
4065 ++ 0, SND_SOC_NOPM, 0, 0),
4066 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
4067 +- 0, 0, 0, 0),
4068 ++ 0, SND_SOC_NOPM, 0, 0),
4069 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
4070 +- 0, 0, 0, 0),
4071 ++ 0, SND_SOC_NOPM, 0, 0),
4072 + SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
4073 +- 0, 0, 0, 0),
4074 +- SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0),
4075 ++ 0, SND_SOC_NOPM, 0, 0),
4076 ++ SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0),
4077 + };
4078 +
4079 + static const struct snd_soc_component_driver q6afe_dai_component = {
4080 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
4081 +index 46e50612b92c1..750e6a30444eb 100644
4082 +--- a/sound/soc/qcom/qdsp6/q6routing.c
4083 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
4084 +@@ -973,6 +973,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
4085 + return 0;
4086 + }
4087 +
4088 ++static unsigned int q6routing_reg_read(struct snd_soc_component *component,
4089 ++ unsigned int reg)
4090 ++{
4091 ++ /* default value */
4092 ++ return 0;
4093 ++}
4094 ++
4095 ++static int q6routing_reg_write(struct snd_soc_component *component,
4096 ++ unsigned int reg, unsigned int val)
4097 ++{
4098 ++ /* dummy */
4099 ++ return 0;
4100 ++}
4101 ++
4102 + static const struct snd_soc_component_driver msm_soc_routing_component = {
4103 + .probe = msm_routing_probe,
4104 + .name = DRV_NAME,
4105 +@@ -981,6 +995,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
4106 + .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
4107 + .dapm_routes = intercon,
4108 + .num_dapm_routes = ARRAY_SIZE(intercon),
4109 ++ .read = q6routing_reg_read,
4110 ++ .write = q6routing_reg_write,
4111 + };
4112 +
4113 + static int q6pcm_routing_probe(struct platform_device *pdev)
4114 +diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
4115 +index 5ff951e08c740..52ebe400e9ca4 100644
4116 +--- a/tools/bpf/bpftool/gen.c
4117 ++++ b/tools/bpf/bpftool/gen.c
4118 +@@ -402,7 +402,7 @@ static int do_skeleton(int argc, char **argv)
4119 + { \n\
4120 + struct %1$s *obj; \n\
4121 + \n\
4122 +- obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\
4123 ++ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
4124 + if (!obj) \n\
4125 + return NULL; \n\
4126 + if (%1$s__create_skeleton(obj)) \n\
4127 +@@ -466,7 +466,7 @@ static int do_skeleton(int argc, char **argv)
4128 + { \n\
4129 + struct bpf_object_skeleton *s; \n\
4130 + \n\
4131 +- s = (typeof(s))calloc(1, sizeof(*s)); \n\
4132 ++ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
4133 + if (!s) \n\
4134 + return -1; \n\
4135 + obj->skeleton = s; \n\
4136 +@@ -484,7 +484,7 @@ static int do_skeleton(int argc, char **argv)
4137 + /* maps */ \n\
4138 + s->map_cnt = %zu; \n\
4139 + s->map_skel_sz = sizeof(*s->maps); \n\
4140 +- s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
4141 ++ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
4142 + if (!s->maps) \n\
4143 + goto err; \n\
4144 + ",
4145 +@@ -520,7 +520,7 @@ static int do_skeleton(int argc, char **argv)
4146 + /* programs */ \n\
4147 + s->prog_cnt = %zu; \n\
4148 + s->prog_skel_sz = sizeof(*s->progs); \n\
4149 +- s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
4150 ++ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
4151 + if (!s->progs) \n\
4152 + goto err; \n\
4153 + ",
4154 +diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
4155 +index 8a637ca7d73a4..05853b0b88318 100644
4156 +--- a/tools/testing/selftests/cgroup/cgroup_util.c
4157 ++++ b/tools/testing/selftests/cgroup/cgroup_util.c
4158 +@@ -106,7 +106,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
4159 +
4160 + /* Handle the case of comparing against empty string */
4161 + if (!expected)
4162 +- size = 32;
4163 ++ return -1;
4164 + else
4165 + size = strlen(expected) + 1;
4166 +
4167 +diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
4168 +index 8162c58a1234e..b8d14f9db5f9e 100644
4169 +--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
4170 ++++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
4171 +@@ -40,11 +40,11 @@ static void guest_code(void)
4172 +
4173 + /* Single step test, covers 2 basic instructions and 2 emulated */
4174 + asm volatile("ss_start: "
4175 +- "xor %%rax,%%rax\n\t"
4176 ++ "xor %%eax,%%eax\n\t"
4177 + "cpuid\n\t"
4178 + "movl $0x1a0,%%ecx\n\t"
4179 + "rdmsr\n\t"
4180 +- : : : "rax", "ecx");
4181 ++ : : : "eax", "ebx", "ecx", "edx");
4182 +
4183 + /* DR6.BD test */
4184 + asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
4185 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
4186 +index 8a9d13e8e904f..b005685a6de42 100644
4187 +--- a/virt/kvm/arm/mmu.c
4188 ++++ b/virt/kvm/arm/mmu.c
4189 +@@ -331,7 +331,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
4190 + * destroying the VM), otherwise another faulting VCPU may come in and mess
4191 + * with things behind our backs.
4192 + */
4193 +-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
4194 ++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
4195 ++ bool may_block)
4196 + {
4197 + pgd_t *pgd;
4198 + phys_addr_t addr = start, end = start + size;
4199 +@@ -356,11 +357,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
4200 + * If the range is too large, release the kvm->mmu_lock
4201 + * to prevent starvation and lockup detector warnings.
4202 + */
4203 +- if (next != end)
4204 ++ if (may_block && next != end)
4205 + cond_resched_lock(&kvm->mmu_lock);
4206 + } while (pgd++, addr = next, addr != end);
4207 + }
4208 +
4209 ++static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
4210 ++{
4211 ++ __unmap_stage2_range(kvm, start, size, true);
4212 ++}
4213 ++
4214 + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
4215 + phys_addr_t addr, phys_addr_t end)
4216 + {
4217 +@@ -2041,18 +2047,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
4218 +
4219 + static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
4220 + {
4221 +- unmap_stage2_range(kvm, gpa, size);
4222 ++ unsigned flags = *(unsigned *)data;
4223 ++ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
4224 ++
4225 ++ __unmap_stage2_range(kvm, gpa, size, may_block);
4226 + return 0;
4227 + }
4228 +
4229 + int kvm_unmap_hva_range(struct kvm *kvm,
4230 +- unsigned long start, unsigned long end)
4231 ++ unsigned long start, unsigned long end, unsigned flags)
4232 + {
4233 + if (!kvm->arch.pgd)
4234 + return 0;
4235 +
4236 + trace_kvm_unmap_hva_range(start, end);
4237 +- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
4238 ++ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
4239 + return 0;
4240 + }
4241 +
4242 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4243 +index 77aa91fb08d2b..66b7a9dbb77dc 100644
4244 +--- a/virt/kvm/kvm_main.c
4245 ++++ b/virt/kvm/kvm_main.c
4246 +@@ -428,7 +428,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
4247 + * count is also read inside the mmu_lock critical section.
4248 + */
4249 + kvm->mmu_notifier_count++;
4250 +- need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
4251 ++ need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
4252 ++ range->flags);
4253 + need_tlb_flush |= kvm->tlbs_dirty;
4254 + /* we've to flush the tlb before the pages can be freed */
4255 + if (need_tlb_flush)