Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 26 Aug 2020 11:15:46
Message-Id: 1598440526.286936af5f7b3c95f620e434ebb58ba5f7d7095d.mpagano@gentoo
1 commit: 286936af5f7b3c95f620e434ebb58ba5f7d7095d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 26 11:15:26 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 26 11:15:26 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=286936af
7
8 Linux patch 4.19.142
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1141_linux-4.19.142.patch | 2088 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2092 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1680f0b..4adb8c6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -603,6 +603,10 @@ Patch: 1140_linux-4.19.141.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.141
23
24 +Patch: 1141_linux-4.19.142.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.142
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1141_linux-4.19.142.patch b/1141_linux-4.19.142.patch
33 new file mode 100644
34 index 0000000..84d26b3
35 --- /dev/null
36 +++ b/1141_linux-4.19.142.patch
37 @@ -0,0 +1,2088 @@
38 +diff --git a/Makefile b/Makefile
39 +index 5b64e11419846..e5e46aecf357f 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 141
47 ++SUBLEVEL = 142
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
52 +index eb09d5aee9106..0bba9e991189d 100644
53 +--- a/arch/alpha/include/asm/io.h
54 ++++ b/arch/alpha/include/asm/io.h
55 +@@ -507,10 +507,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
56 + }
57 + #endif
58 +
59 +-#define ioread16be(p) be16_to_cpu(ioread16(p))
60 +-#define ioread32be(p) be32_to_cpu(ioread32(p))
61 +-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
62 +-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
63 ++#define ioread16be(p) swab16(ioread16(p))
64 ++#define ioread32be(p) swab32(ioread32(p))
65 ++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
66 ++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
67 +
68 + #define inb_p inb
69 + #define inw_p inw
70 +diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
71 +index c9128bb187f9a..471859cbfe0bb 100644
72 +--- a/arch/arm/include/asm/kvm_host.h
73 ++++ b/arch/arm/include/asm/kvm_host.h
74 +@@ -234,7 +234,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
75 +
76 + #define KVM_ARCH_WANT_MMU_NOTIFIER
77 + int kvm_unmap_hva_range(struct kvm *kvm,
78 +- unsigned long start, unsigned long end);
79 ++ unsigned long start, unsigned long end, bool blockable);
80 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
81 +
82 + unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
83 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
84 +index e9afdfcb8403c..5e720742d6479 100644
85 +--- a/arch/arm64/include/asm/kvm_host.h
86 ++++ b/arch/arm64/include/asm/kvm_host.h
87 +@@ -370,7 +370,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
88 +
89 + #define KVM_ARCH_WANT_MMU_NOTIFIER
90 + int kvm_unmap_hva_range(struct kvm *kvm,
91 +- unsigned long start, unsigned long end);
92 ++ unsigned long start, unsigned long end, bool blockable);
93 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
94 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
95 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
96 +diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
97 +index 9138a624c5c81..692f90e7fecc1 100644
98 +--- a/arch/m68k/include/asm/m53xxacr.h
99 ++++ b/arch/m68k/include/asm/m53xxacr.h
100 +@@ -89,9 +89,9 @@
101 + * coherency though in all cases. And for copyback caches we will need
102 + * to push cached data as well.
103 + */
104 +-#define CACHE_INIT CACR_CINVA
105 +-#define CACHE_INVALIDATE CACR_CINVA
106 +-#define CACHE_INVALIDATED CACR_CINVA
107 ++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
108 ++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
109 ++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
110 +
111 + #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
112 + (0x000f0000) + \
113 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
114 +index 2b3fdfc9e0e77..c254761cb8ad9 100644
115 +--- a/arch/mips/include/asm/kvm_host.h
116 ++++ b/arch/mips/include/asm/kvm_host.h
117 +@@ -936,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
118 +
119 + #define KVM_ARCH_WANT_MMU_NOTIFIER
120 + int kvm_unmap_hva_range(struct kvm *kvm,
121 +- unsigned long start, unsigned long end);
122 ++ unsigned long start, unsigned long end, bool blockable);
123 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
124 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
125 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
126 +diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
127 +index d8dcdb3504059..098a7afd4d384 100644
128 +--- a/arch/mips/kvm/mmu.c
129 ++++ b/arch/mips/kvm/mmu.c
130 +@@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
131 + return 1;
132 + }
133 +
134 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
135 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
136 ++ bool blockable)
137 + {
138 + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
139 +
140 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
141 +index 2f95e38f05491..7b54d8412367e 100644
142 +--- a/arch/powerpc/include/asm/kvm_host.h
143 ++++ b/arch/powerpc/include/asm/kvm_host.h
144 +@@ -68,7 +68,8 @@
145 + #define KVM_ARCH_WANT_MMU_NOTIFIER
146 +
147 + extern int kvm_unmap_hva_range(struct kvm *kvm,
148 +- unsigned long start, unsigned long end);
149 ++ unsigned long start, unsigned long end,
150 ++ bool blockable);
151 + extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
152 + extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
153 + extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
154 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
155 +index cc05f346e0421..bc9d1321dc730 100644
156 +--- a/arch/powerpc/kvm/book3s.c
157 ++++ b/arch/powerpc/kvm/book3s.c
158 +@@ -812,7 +812,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
159 + kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
160 + }
161 +
162 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
163 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
164 ++ bool blockable)
165 + {
166 + return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
167 + }
168 +diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
169 +index 8f2985e46f6f1..bbb02195dc530 100644
170 +--- a/arch/powerpc/kvm/e500_mmu_host.c
171 ++++ b/arch/powerpc/kvm/e500_mmu_host.c
172 +@@ -737,7 +737,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
173 + return 0;
174 + }
175 +
176 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
177 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
178 ++ bool blockable)
179 + {
180 + /* kvm_unmap_hva flushes everything anyways */
181 + kvm_unmap_hva(kvm, start);
182 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
183 +index e81a285f3a6ce..e827108680f21 100644
184 +--- a/arch/powerpc/platforms/pseries/ras.c
185 ++++ b/arch/powerpc/platforms/pseries/ras.c
186 +@@ -118,7 +118,6 @@ static void handle_system_shutdown(char event_modifier)
187 + case EPOW_SHUTDOWN_ON_UPS:
188 + pr_emerg("Loss of system power detected. System is running on"
189 + " UPS/battery. Check RTAS error log for details\n");
190 +- orderly_poweroff(true);
191 + break;
192 +
193 + case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
194 +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
195 +index 65fefbf61e1ca..3ffa2847c110b 100644
196 +--- a/arch/s390/kernel/ptrace.c
197 ++++ b/arch/s390/kernel/ptrace.c
198 +@@ -1286,7 +1286,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
199 + cb->pc == 1 &&
200 + cb->qc == 0 &&
201 + cb->reserved2 == 0 &&
202 +- cb->key == PAGE_DEFAULT_KEY &&
203 + cb->reserved3 == 0 &&
204 + cb->reserved4 == 0 &&
205 + cb->reserved5 == 0 &&
206 +@@ -1350,7 +1349,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
207 + kfree(data);
208 + return -EINVAL;
209 + }
210 +-
211 ++ /*
212 ++ * Override access key in any case, since user space should
213 ++ * not be able to set it, nor should it care about it.
214 ++ */
215 ++ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
216 + preempt_disable();
217 + if (!target->thread.ri_cb)
218 + target->thread.ri_cb = data;
219 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
220 +index 125c7f6e87150..1788a5454b6fc 100644
221 +--- a/arch/s390/kernel/runtime_instr.c
222 ++++ b/arch/s390/kernel/runtime_instr.c
223 +@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
224 + cb->k = 1;
225 + cb->ps = 1;
226 + cb->pc = 1;
227 +- cb->key = PAGE_DEFAULT_KEY;
228 ++ cb->key = PAGE_DEFAULT_KEY >> 4;
229 + cb->v = 1;
230 + }
231 +
232 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
233 +index ce7b3b22ae86b..4876411a072a7 100644
234 +--- a/arch/x86/include/asm/kvm_host.h
235 ++++ b/arch/x86/include/asm/kvm_host.h
236 +@@ -1465,7 +1465,8 @@ asmlinkage void __noreturn kvm_spurious_fault(void);
237 + ____kvm_handle_fault_on_reboot(insn, "")
238 +
239 + #define KVM_ARCH_WANT_MMU_NOTIFIER
240 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
241 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
242 ++ bool blockable);
243 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
244 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
245 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
246 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
247 +index 92ff656e18101..a2ff5c214738a 100644
248 +--- a/arch/x86/kvm/mmu.c
249 ++++ b/arch/x86/kvm/mmu.c
250 +@@ -1956,7 +1956,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
251 + return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
252 + }
253 +
254 +-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
255 ++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
256 ++ bool blockable)
257 + {
258 + return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
259 + }
260 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
261 +index 5b2440e591fc1..430a4bc66f604 100644
262 +--- a/arch/x86/kvm/x86.c
263 ++++ b/arch/x86/kvm/x86.c
264 +@@ -857,7 +857,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
265 + {
266 + unsigned long old_cr4 = kvm_read_cr4(vcpu);
267 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
268 +- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
269 ++ X86_CR4_SMEP;
270 +
271 + if (kvm_valid_cr4(vcpu, cr4))
272 + return 1;
273 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
274 +index 9112d1cb397bb..22da9bfd8a458 100644
275 +--- a/arch/x86/pci/xen.c
276 ++++ b/arch/x86/pci/xen.c
277 +@@ -25,6 +25,7 @@
278 + #include <asm/xen/pci.h>
279 + #include <asm/xen/cpuid.h>
280 + #include <asm/apic.h>
281 ++#include <asm/acpi.h>
282 + #include <asm/i8259.h>
283 +
284 + static int xen_pcifront_enable_irq(struct pci_dev *dev)
285 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
286 +index 8353ab9bd31bd..c5cf9e77fe862 100644
287 +--- a/drivers/clk/clk.c
288 ++++ b/drivers/clk/clk.c
289 +@@ -40,6 +40,17 @@ static HLIST_HEAD(clk_root_list);
290 + static HLIST_HEAD(clk_orphan_list);
291 + static LIST_HEAD(clk_notifier_list);
292 +
293 ++static struct hlist_head *all_lists[] = {
294 ++ &clk_root_list,
295 ++ &clk_orphan_list,
296 ++ NULL,
297 ++};
298 ++
299 ++static struct hlist_head *orphan_list[] = {
300 ++ &clk_orphan_list,
301 ++ NULL,
302 ++};
303 ++
304 + /*** private data structures ***/
305 +
306 + struct clk_core {
307 +@@ -2618,17 +2629,6 @@ static int inited = 0;
308 + static DEFINE_MUTEX(clk_debug_lock);
309 + static HLIST_HEAD(clk_debug_list);
310 +
311 +-static struct hlist_head *all_lists[] = {
312 +- &clk_root_list,
313 +- &clk_orphan_list,
314 +- NULL,
315 +-};
316 +-
317 +-static struct hlist_head *orphan_list[] = {
318 +- &clk_orphan_list,
319 +- NULL,
320 +-};
321 +-
322 + static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
323 + int level)
324 + {
325 +@@ -3328,6 +3328,34 @@ static const struct clk_ops clk_nodrv_ops = {
326 + .set_parent = clk_nodrv_set_parent,
327 + };
328 +
329 ++static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
330 ++ struct clk_core *target)
331 ++{
332 ++ int i;
333 ++ struct clk_core *child;
334 ++
335 ++ for (i = 0; i < root->num_parents; i++)
336 ++ if (root->parents[i] == target)
337 ++ root->parents[i] = NULL;
338 ++
339 ++ hlist_for_each_entry(child, &root->children, child_node)
340 ++ clk_core_evict_parent_cache_subtree(child, target);
341 ++}
342 ++
343 ++/* Remove this clk from all parent caches */
344 ++static void clk_core_evict_parent_cache(struct clk_core *core)
345 ++{
346 ++ struct hlist_head **lists;
347 ++ struct clk_core *root;
348 ++
349 ++ lockdep_assert_held(&prepare_lock);
350 ++
351 ++ for (lists = all_lists; *lists; lists++)
352 ++ hlist_for_each_entry(root, *lists, child_node)
353 ++ clk_core_evict_parent_cache_subtree(root, core);
354 ++
355 ++}
356 ++
357 + /**
358 + * clk_unregister - unregister a currently registered clock
359 + * @clk: clock to unregister
360 +@@ -3366,6 +3394,8 @@ void clk_unregister(struct clk *clk)
361 + clk_core_set_parent_nolock(child, NULL);
362 + }
363 +
364 ++ clk_core_evict_parent_cache(clk->core);
365 ++
366 + hlist_del_init(&clk->core->child_node);
367 +
368 + if (clk->core->prepare_count)
369 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
370 +index e7b3d4ed8eff4..99166000ffb77 100644
371 +--- a/drivers/cpufreq/intel_pstate.c
372 ++++ b/drivers/cpufreq/intel_pstate.c
373 +@@ -1431,6 +1431,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
374 +
375 + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
376 + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
377 ++ cpu->pstate.turbo_pstate = phy_max;
378 + } else {
379 + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
380 + }
381 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
382 +index de1bc38ab39fb..a8180f9090fae 100644
383 +--- a/drivers/firmware/efi/efi.c
384 ++++ b/drivers/firmware/efi/efi.c
385 +@@ -359,6 +359,7 @@ static int __init efisubsys_init(void)
386 + efi_kobj = kobject_create_and_add("efi", firmware_kobj);
387 + if (!efi_kobj) {
388 + pr_err("efi: Firmware registration failed.\n");
389 ++ destroy_workqueue(efi_rts_wq);
390 + return -ENOMEM;
391 + }
392 +
393 +@@ -395,6 +396,7 @@ err_unregister:
394 + generic_ops_unregister();
395 + err_put:
396 + kobject_put(efi_kobj);
397 ++ destroy_workqueue(efi_rts_wq);
398 + return error;
399 + }
400 +
401 +diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
402 +index 52a73332befb9..343f869c5277d 100644
403 +--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
404 ++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
405 +@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
406 + */
407 + static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
408 + {
409 ++ if (arg1.value == 0)
410 ++ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
411 ++
412 + return dc_fixpt_exp(
413 + dc_fixpt_mul(
414 + dc_fixpt_log(arg1),
415 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
416 +index 4709f08f39e49..1c1a435d354bc 100644
417 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
418 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
419 +@@ -219,32 +219,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
420 + return 0;
421 + }
422 +
423 +-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
424 +- uint32_t handle, uint64_t *offset)
425 +-{
426 +- struct drm_gem_object *obj;
427 +- int ret;
428 +-
429 +- obj = drm_gem_object_lookup(file, handle);
430 +- if (!obj)
431 +- return -ENOENT;
432 +-
433 +- if (!obj->filp) {
434 +- ret = -EINVAL;
435 +- goto unref;
436 +- }
437 +-
438 +- ret = drm_gem_create_mmap_offset(obj);
439 +- if (ret)
440 +- goto unref;
441 +-
442 +- *offset = drm_vma_node_offset_addr(&obj->vma_node);
443 +-unref:
444 +- drm_gem_object_put_unlocked(obj);
445 +-
446 +- return ret;
447 +-}
448 +-
449 + static struct drm_ioctl_desc vgem_ioctls[] = {
450 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
451 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
452 +@@ -438,7 +412,6 @@ static struct drm_driver vgem_driver = {
453 + .fops = &vgem_driver_fops,
454 +
455 + .dumb_create = vgem_gem_dumb_create,
456 +- .dumb_map_offset = vgem_gem_dumb_map,
457 +
458 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
459 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
460 +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
461 +index 589b0d4677d52..f1b666c80f368 100644
462 +--- a/drivers/infiniband/hw/bnxt_re/main.c
463 ++++ b/drivers/infiniband/hw/bnxt_re/main.c
464 +@@ -753,7 +753,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
465 + struct ib_event event;
466 + unsigned int flags;
467 +
468 +- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
469 ++ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
470 ++ rdma_is_kernel_res(&qp->ib_qp.res)) {
471 + flags = bnxt_re_lock_cqs(qp);
472 + bnxt_qplib_add_flush_qp(&qp->qplib_qp);
473 + bnxt_re_unlock_cqs(qp, flags);
474 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
475 +index d3ff1fc09af71..a9040c0fb4c3f 100644
476 +--- a/drivers/input/mouse/psmouse-base.c
477 ++++ b/drivers/input/mouse/psmouse-base.c
478 +@@ -2044,7 +2044,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
479 + {
480 + int type = *((unsigned int *)kp->arg);
481 +
482 +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
483 ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
484 + }
485 +
486 + static int __init psmouse_init(void)
487 +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
488 +index b3dc45b91101d..9b545c7431685 100644
489 +--- a/drivers/media/pci/ttpci/budget-core.c
490 ++++ b/drivers/media/pci/ttpci/budget-core.c
491 +@@ -383,20 +383,25 @@ static int budget_register(struct budget *budget)
492 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
493 +
494 + if (ret < 0)
495 +- return ret;
496 ++ goto err_release_dmx;
497 +
498 + budget->mem_frontend.source = DMX_MEMORY_FE;
499 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
500 + if (ret < 0)
501 +- return ret;
502 ++ goto err_release_dmx;
503 +
504 + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
505 + if (ret < 0)
506 +- return ret;
507 ++ goto err_release_dmx;
508 +
509 + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
510 +
511 + return 0;
512 ++
513 ++err_release_dmx:
514 ++ dvb_dmxdev_release(&budget->dmxdev);
515 ++ dvb_dmx_release(&budget->demux);
516 ++ return ret;
517 + }
518 +
519 + static void budget_unregister(struct budget *budget)
520 +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
521 +index 89a86c19579b8..50fc71d0cb9f3 100644
522 +--- a/drivers/media/platform/davinci/vpss.c
523 ++++ b/drivers/media/platform/davinci/vpss.c
524 +@@ -514,19 +514,31 @@ static void vpss_exit(void)
525 +
526 + static int __init vpss_init(void)
527 + {
528 ++ int ret;
529 ++
530 + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
531 + return -EBUSY;
532 +
533 + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
534 + if (unlikely(!oper_cfg.vpss_regs_base2)) {
535 +- release_mem_region(VPSS_CLK_CTRL, 4);
536 +- return -ENOMEM;
537 ++ ret = -ENOMEM;
538 ++ goto err_ioremap;
539 + }
540 +
541 + writel(VPSS_CLK_CTRL_VENCCLKEN |
542 +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
543 ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
544 ++
545 ++ ret = platform_driver_register(&vpss_driver);
546 ++ if (ret)
547 ++ goto err_pd_register;
548 ++
549 ++ return 0;
550 +
551 +- return platform_driver_register(&vpss_driver);
552 ++err_pd_register:
553 ++ iounmap(oper_cfg.vpss_regs_base2);
554 ++err_ioremap:
555 ++ release_mem_region(VPSS_CLK_CTRL, 4);
556 ++ return ret;
557 + }
558 + subsys_initcall(vpss_init);
559 + module_exit(vpss_exit);
560 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
561 +index 11429df743067..d32e32e791741 100644
562 +--- a/drivers/net/bonding/bond_main.c
563 ++++ b/drivers/net/bonding/bond_main.c
564 +@@ -2029,7 +2029,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
565 + int ret;
566 +
567 + ret = __bond_release_one(bond_dev, slave_dev, false, true);
568 +- if (ret == 0 && !bond_has_slaves(bond)) {
569 ++ if (ret == 0 && !bond_has_slaves(bond) &&
570 ++ bond_dev->reg_state != NETREG_UNREGISTERING) {
571 + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
572 + netdev_info(bond_dev, "Destroying bond %s\n",
573 + bond_dev->name);
574 +@@ -2772,6 +2773,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
575 + if (bond_time_in_interval(bond, last_rx, 1)) {
576 + bond_propose_link_state(slave, BOND_LINK_UP);
577 + commit++;
578 ++ } else if (slave->link == BOND_LINK_BACK) {
579 ++ bond_propose_link_state(slave, BOND_LINK_FAIL);
580 ++ commit++;
581 + }
582 + continue;
583 + }
584 +@@ -2882,6 +2886,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
585 +
586 + continue;
587 +
588 ++ case BOND_LINK_FAIL:
589 ++ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
590 ++ BOND_SLAVE_NOTIFY_NOW);
591 ++ bond_set_slave_inactive_flags(slave,
592 ++ BOND_SLAVE_NOTIFY_NOW);
593 ++
594 ++ /* A slave has just been enslaved and has become
595 ++ * the current active slave.
596 ++ */
597 ++ if (rtnl_dereference(bond->curr_active_slave))
598 ++ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
599 ++ continue;
600 ++
601 + default:
602 + netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
603 + slave->link_new_state, slave->dev->name);
604 +@@ -2931,8 +2948,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
605 + return should_notify_rtnl;
606 + }
607 +
608 +- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
609 +-
610 + bond_for_each_slave_rcu(bond, slave, iter) {
611 + if (!found && !before && bond_slave_is_up(slave))
612 + before = slave;
613 +@@ -4200,13 +4215,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
614 + return ret;
615 + }
616 +
617 ++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
618 ++{
619 ++ if (speed == 0 || speed == SPEED_UNKNOWN)
620 ++ speed = slave->speed;
621 ++ else
622 ++ speed = min(speed, slave->speed);
623 ++
624 ++ return speed;
625 ++}
626 ++
627 + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
628 + struct ethtool_link_ksettings *cmd)
629 + {
630 + struct bonding *bond = netdev_priv(bond_dev);
631 +- unsigned long speed = 0;
632 + struct list_head *iter;
633 + struct slave *slave;
634 ++ u32 speed = 0;
635 +
636 + cmd->base.duplex = DUPLEX_UNKNOWN;
637 + cmd->base.port = PORT_OTHER;
638 +@@ -4218,8 +4243,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
639 + */
640 + bond_for_each_slave(bond, slave, iter) {
641 + if (bond_slave_can_tx(slave)) {
642 +- if (slave->speed != SPEED_UNKNOWN)
643 +- speed += slave->speed;
644 ++ if (slave->speed != SPEED_UNKNOWN) {
645 ++ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
646 ++ speed = bond_mode_bcast_speed(slave,
647 ++ speed);
648 ++ else
649 ++ speed += slave->speed;
650 ++ }
651 + if (cmd->base.duplex == DUPLEX_UNKNOWN &&
652 + slave->duplex != DUPLEX_UNKNOWN)
653 + cmd->base.duplex = slave->duplex;
654 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
655 +index 11f3993ab7f30..294be86420b6d 100644
656 +--- a/drivers/net/dsa/b53/b53_common.c
657 ++++ b/drivers/net/dsa/b53/b53_common.c
658 +@@ -1335,6 +1335,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
659 + return ret;
660 +
661 + switch (ret) {
662 ++ case -ETIMEDOUT:
663 ++ return ret;
664 + case -ENOSPC:
665 + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
666 + addr, vid);
667 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
668 +index 8736718b17359..55cc70ba5b093 100644
669 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
670 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
671 +@@ -2647,16 +2647,14 @@ static void ena_fw_reset_device(struct work_struct *work)
672 + {
673 + struct ena_adapter *adapter =
674 + container_of(work, struct ena_adapter, reset_task);
675 +- struct pci_dev *pdev = adapter->pdev;
676 +
677 +- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
678 +- dev_err(&pdev->dev,
679 +- "device reset schedule while reset bit is off\n");
680 +- return;
681 +- }
682 + rtnl_lock();
683 +- ena_destroy_device(adapter, false);
684 +- ena_restore_device(adapter);
685 ++
686 ++ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
687 ++ ena_destroy_device(adapter, false);
688 ++ ena_restore_device(adapter);
689 ++ }
690 ++
691 + rtnl_unlock();
692 + }
693 +
694 +@@ -3392,8 +3390,11 @@ static void ena_remove(struct pci_dev *pdev)
695 + netdev->rx_cpu_rmap = NULL;
696 + }
697 + #endif /* CONFIG_RFS_ACCEL */
698 +- del_timer_sync(&adapter->timer_service);
699 +
700 ++ /* Make sure timer and reset routine won't be called after
701 ++ * freeing device resources.
702 ++ */
703 ++ del_timer_sync(&adapter->timer_service);
704 + cancel_work_sync(&adapter->reset_task);
705 +
706 + unregister_netdev(netdev);
707 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
708 +index f402af39da42a..16de0fa92ab74 100644
709 +--- a/drivers/net/ethernet/cortina/gemini.c
710 ++++ b/drivers/net/ethernet/cortina/gemini.c
711 +@@ -2392,7 +2392,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
712 +
713 + dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
714 +
715 +- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
716 ++ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
717 + if (!netdev) {
718 + dev_err(dev, "Can't allocate ethernet device #%d\n", id);
719 + return -ENOMEM;
720 +@@ -2526,7 +2526,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
721 + }
722 +
723 + port->netdev = NULL;
724 +- free_netdev(netdev);
725 + return ret;
726 + }
727 +
728 +@@ -2535,7 +2534,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
729 + struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
730 +
731 + gemini_port_remove(port);
732 +- free_netdev(port->netdev);
733 + return 0;
734 + }
735 +
736 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
737 +index 48c58f93b124b..3b6da228140e3 100644
738 +--- a/drivers/net/ethernet/freescale/fec_main.c
739 ++++ b/drivers/net/ethernet/freescale/fec_main.c
740 +@@ -3659,11 +3659,11 @@ failed_mii_init:
741 + failed_irq:
742 + failed_init:
743 + fec_ptp_stop(pdev);
744 +- if (fep->reg_phy)
745 +- regulator_disable(fep->reg_phy);
746 + failed_reset:
747 + pm_runtime_put_noidle(&pdev->dev);
748 + pm_runtime_disable(&pdev->dev);
749 ++ if (fep->reg_phy)
750 ++ regulator_disable(fep->reg_phy);
751 + failed_regulator:
752 + clk_disable_unprepare(fep->clk_ahb);
753 + failed_clk_ahb:
754 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
755 +index 80e3eec6134ee..a5e5e7e14e6c5 100644
756 +--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
757 ++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
758 +@@ -1206,7 +1206,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
759 + #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
760 + #define I40E_AQC_SET_VSI_DEFAULT 0x08
761 + #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
762 +-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
763 ++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
764 + __le16 seid;
765 + #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
766 + __le16 vlan_tag;
767 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
768 +index eb0ae6ab01e26..e75b4c4872c09 100644
769 +--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
770 ++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
771 +@@ -1970,6 +1970,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
772 + return status;
773 + }
774 +
775 ++/**
776 ++ * i40e_is_aq_api_ver_ge
777 ++ * @aq: pointer to AdminQ info containing HW API version to compare
778 ++ * @maj: API major value
779 ++ * @min: API minor value
780 ++ *
781 ++ * Assert whether current HW API version is greater/equal than provided.
782 ++ **/
783 ++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
784 ++ u16 min)
785 ++{
786 ++ return (aq->api_maj_ver > maj ||
787 ++ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
788 ++}
789 ++
790 + /**
791 + * i40e_aq_add_vsi
792 + * @hw: pointer to the hw struct
793 +@@ -2095,18 +2110,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
794 +
795 + if (set) {
796 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
797 +- if (rx_only_promisc &&
798 +- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
799 +- (hw->aq.api_maj_ver > 1)))
800 +- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
801 ++ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
802 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
803 + }
804 +
805 + cmd->promiscuous_flags = cpu_to_le16(flags);
806 +
807 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
808 +- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
809 +- (hw->aq.api_maj_ver > 1))
810 +- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
811 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
812 ++ cmd->valid_flags |=
813 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
814 +
815 + cmd->seid = cpu_to_le16(seid);
816 + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
817 +@@ -2203,11 +2216,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
818 + i40e_fill_default_direct_cmd_desc(&desc,
819 + i40e_aqc_opc_set_vsi_promiscuous_modes);
820 +
821 +- if (enable)
822 ++ if (enable) {
823 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
824 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
825 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
826 ++ }
827 +
828 + cmd->promiscuous_flags = cpu_to_le16(flags);
829 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
830 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
831 ++ cmd->valid_flags |=
832 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
833 + cmd->seid = cpu_to_le16(seid);
834 + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
835 +
836 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
837 +index a74b01bf581e9..3200c75b9ed2a 100644
838 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
839 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
840 +@@ -14152,6 +14152,9 @@ static void i40e_remove(struct pci_dev *pdev)
841 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
842 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
843 +
844 ++ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
845 ++ usleep_range(1000, 2000);
846 ++
847 + /* no more scheduling of any task */
848 + set_bit(__I40E_SUSPENDED, pf->state);
849 + set_bit(__I40E_DOWN, pf->state);
850 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
851 +index e33cbb793b638..4a5d99ecb89d3 100644
852 +--- a/drivers/net/hyperv/netvsc_drv.c
853 ++++ b/drivers/net/hyperv/netvsc_drv.c
854 +@@ -513,7 +513,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
855 + int rc;
856 +
857 + skb->dev = vf_netdev;
858 +- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
859 ++ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
860 +
861 + rc = dev_queue_xmit(skb);
862 + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
863 +diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
864 +index a1c44d0c85578..30cbe22c57a8e 100644
865 +--- a/drivers/rtc/rtc-goldfish.c
866 ++++ b/drivers/rtc/rtc-goldfish.c
867 +@@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
868 + rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
869 + writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
870 + writel(rtc_alarm64, base + TIMER_ALARM_LOW);
871 ++ writel(1, base + TIMER_IRQ_ENABLED);
872 + } else {
873 + /*
874 + * if this function was called with enabled=0
875 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
876 +index 91aa4bfcf8d61..5bb278a604ed2 100644
877 +--- a/drivers/s390/scsi/zfcp_fsf.c
878 ++++ b/drivers/s390/scsi/zfcp_fsf.c
879 +@@ -403,7 +403,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
880 + return;
881 + }
882 +
883 +- del_timer(&req->timer);
884 ++ del_timer_sync(&req->timer);
885 + zfcp_fsf_protstatus_eval(req);
886 + zfcp_fsf_fsfstatus_eval(req);
887 + req->handler(req);
888 +@@ -758,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
889 + req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
890 + req->issued = get_tod_clock();
891 + if (zfcp_qdio_send(qdio, &req->qdio_req)) {
892 +- del_timer(&req->timer);
893 ++ del_timer_sync(&req->timer);
894 + /* lookup request again, list might have changed */
895 + zfcp_reqlist_find_rm(adapter->req_list, req_id);
896 + zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
897 +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
898 +index 8839f509b19ab..78cf5b32bca67 100644
899 +--- a/drivers/scsi/libfc/fc_disc.c
900 ++++ b/drivers/scsi/libfc/fc_disc.c
901 +@@ -593,8 +593,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
902 +
903 + if (PTR_ERR(fp) == -FC_EX_CLOSED)
904 + goto out;
905 +- if (IS_ERR(fp))
906 +- goto redisc;
907 ++ if (IS_ERR(fp)) {
908 ++ mutex_lock(&disc->disc_mutex);
909 ++ fc_disc_restart(disc);
910 ++ mutex_unlock(&disc->disc_mutex);
911 ++ goto out;
912 ++ }
913 +
914 + cp = fc_frame_payload_get(fp, sizeof(*cp));
915 + if (!cp)
916 +@@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
917 + new_rdata->disc_id = disc->disc_id;
918 + fc_rport_login(new_rdata);
919 + }
920 +- goto out;
921 ++ goto free_fp;
922 + }
923 + rdata->disc_id = disc->disc_id;
924 + mutex_unlock(&rdata->rp_mutex);
925 +@@ -638,6 +642,8 @@ redisc:
926 + fc_disc_restart(disc);
927 + mutex_unlock(&disc->disc_mutex);
928 + }
929 ++free_fp:
930 ++ fc_frame_free(fp);
931 + out:
932 + kref_put(&rdata->kref, fc_rport_destroy);
933 + if (!IS_ERR(fp))
934 +diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
935 +index 5d2dfdb41a6ff..758d3a67047df 100644
936 +--- a/drivers/scsi/ufs/ufs_quirks.h
937 ++++ b/drivers/scsi/ufs/ufs_quirks.h
938 +@@ -21,6 +21,7 @@
939 + #define UFS_ANY_VENDOR 0xFFFF
940 + #define UFS_ANY_MODEL "ANY_MODEL"
941 +
942 ++#define UFS_VENDOR_MICRON 0x12C
943 + #define UFS_VENDOR_TOSHIBA 0x198
944 + #define UFS_VENDOR_SAMSUNG 0x1CE
945 + #define UFS_VENDOR_SKHYNIX 0x1AD
946 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
947 +index bd21c9cdf8183..ab628fd37e026 100644
948 +--- a/drivers/scsi/ufs/ufshcd.c
949 ++++ b/drivers/scsi/ufs/ufshcd.c
950 +@@ -218,6 +218,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
951 +
952 + static struct ufs_dev_fix ufs_fixups[] = {
953 + /* UFS cards deviations table */
954 ++ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
955 ++ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
956 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
957 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
958 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
959 +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
960 +index 671d078349cc6..0a7fd56c1ed9d 100644
961 +--- a/drivers/spi/Kconfig
962 ++++ b/drivers/spi/Kconfig
963 +@@ -817,4 +817,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
964 +
965 + endif # SPI_SLAVE
966 +
967 ++config SPI_DYNAMIC
968 ++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
969 ++
970 + endif # SPI
971 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
972 +index f589d8100e957..92e6b6774d98e 100644
973 +--- a/drivers/spi/spi.c
974 ++++ b/drivers/spi/spi.c
975 +@@ -432,6 +432,12 @@ static LIST_HEAD(spi_controller_list);
976 + */
977 + static DEFINE_MUTEX(board_lock);
978 +
979 ++/*
980 ++ * Prevents addition of devices with same chip select and
981 ++ * addition of devices below an unregistering controller.
982 ++ */
983 ++static DEFINE_MUTEX(spi_add_lock);
984 ++
985 + /**
986 + * spi_alloc_device - Allocate a new SPI device
987 + * @ctlr: Controller to which device is connected
988 +@@ -510,7 +516,6 @@ static int spi_dev_check(struct device *dev, void *data)
989 + */
990 + int spi_add_device(struct spi_device *spi)
991 + {
992 +- static DEFINE_MUTEX(spi_add_lock);
993 + struct spi_controller *ctlr = spi->controller;
994 + struct device *dev = ctlr->dev.parent;
995 + int status;
996 +@@ -538,6 +543,13 @@ int spi_add_device(struct spi_device *spi)
997 + goto done;
998 + }
999 +
1000 ++ /* Controller may unregister concurrently */
1001 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
1002 ++ !device_is_registered(&ctlr->dev)) {
1003 ++ status = -ENODEV;
1004 ++ goto done;
1005 ++ }
1006 ++
1007 + if (ctlr->cs_gpios)
1008 + spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
1009 +
1010 +@@ -2306,6 +2318,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
1011 + struct spi_controller *found;
1012 + int id = ctlr->bus_num;
1013 +
1014 ++ /* Prevent addition of new devices, unregister existing ones */
1015 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1016 ++ mutex_lock(&spi_add_lock);
1017 ++
1018 + device_for_each_child(&ctlr->dev, NULL, __unregister);
1019 +
1020 + /* First make sure that this controller was ever added */
1021 +@@ -2326,6 +2342,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
1022 + if (found == ctlr)
1023 + idr_remove(&spi_master_idr, id);
1024 + mutex_unlock(&board_lock);
1025 ++
1026 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1027 ++ mutex_unlock(&spi_add_lock);
1028 + }
1029 + EXPORT_SYMBOL_GPL(spi_unregister_controller);
1030 +
1031 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
1032 +index 8da89925a874d..9c05e820857aa 100644
1033 +--- a/drivers/target/target_core_user.c
1034 ++++ b/drivers/target/target_core_user.c
1035 +@@ -612,7 +612,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
1036 + size = round_up(size+offset, PAGE_SIZE);
1037 +
1038 + while (size) {
1039 +- flush_dcache_page(virt_to_page(start));
1040 ++ flush_dcache_page(vmalloc_to_page(start));
1041 + start += PAGE_SIZE;
1042 + size -= PAGE_SIZE;
1043 + }
1044 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1045 +index 6dbdadb936a89..52083b710b87e 100644
1046 +--- a/drivers/vfio/vfio_iommu_type1.c
1047 ++++ b/drivers/vfio/vfio_iommu_type1.c
1048 +@@ -1193,13 +1193,16 @@ static int vfio_bus_type(struct device *dev, void *data)
1049 + static int vfio_iommu_replay(struct vfio_iommu *iommu,
1050 + struct vfio_domain *domain)
1051 + {
1052 +- struct vfio_domain *d;
1053 ++ struct vfio_domain *d = NULL;
1054 + struct rb_node *n;
1055 + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1056 + int ret;
1057 +
1058 + /* Arbitrarily pick the first domain in the list for lookups */
1059 +- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
1060 ++ if (!list_empty(&iommu->domain_list))
1061 ++ d = list_first_entry(&iommu->domain_list,
1062 ++ struct vfio_domain, next);
1063 ++
1064 + n = rb_first(&iommu->dma_list);
1065 +
1066 + for (; n; n = rb_next(n)) {
1067 +@@ -1217,6 +1220,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
1068 + phys_addr_t p;
1069 + dma_addr_t i;
1070 +
1071 ++ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
1072 ++ ret = -EINVAL;
1073 ++ goto unwind;
1074 ++ }
1075 ++
1076 + phys = iommu_iova_to_phys(d->domain, iova);
1077 +
1078 + if (WARN_ON(!phys)) {
1079 +@@ -1246,7 +1254,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
1080 + if (npage <= 0) {
1081 + WARN_ON(!npage);
1082 + ret = (int)npage;
1083 +- return ret;
1084 ++ goto unwind;
1085 + }
1086 +
1087 + phys = pfn << PAGE_SHIFT;
1088 +@@ -1255,14 +1263,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
1089 +
1090 + ret = iommu_map(domain->domain, iova, phys,
1091 + size, dma->prot | domain->prot);
1092 +- if (ret)
1093 +- return ret;
1094 ++ if (ret) {
1095 ++ if (!dma->iommu_mapped)
1096 ++ vfio_unpin_pages_remote(dma, iova,
1097 ++ phys >> PAGE_SHIFT,
1098 ++ size >> PAGE_SHIFT,
1099 ++ true);
1100 ++ goto unwind;
1101 ++ }
1102 +
1103 + iova += size;
1104 + }
1105 ++ }
1106 ++
1107 ++ /* All dmas are now mapped, defer to second tree walk for unwind */
1108 ++ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
1109 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1110 ++
1111 + dma->iommu_mapped = true;
1112 + }
1113 ++
1114 + return 0;
1115 ++
1116 ++unwind:
1117 ++ for (; n; n = rb_prev(n)) {
1118 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1119 ++ dma_addr_t iova;
1120 ++
1121 ++ if (dma->iommu_mapped) {
1122 ++ iommu_unmap(domain->domain, dma->iova, dma->size);
1123 ++ continue;
1124 ++ }
1125 ++
1126 ++ iova = dma->iova;
1127 ++ while (iova < dma->iova + dma->size) {
1128 ++ phys_addr_t phys, p;
1129 ++ size_t size;
1130 ++ dma_addr_t i;
1131 ++
1132 ++ phys = iommu_iova_to_phys(domain->domain, iova);
1133 ++ if (!phys) {
1134 ++ iova += PAGE_SIZE;
1135 ++ continue;
1136 ++ }
1137 ++
1138 ++ size = PAGE_SIZE;
1139 ++ p = phys + size;
1140 ++ i = iova + size;
1141 ++ while (i < dma->iova + dma->size &&
1142 ++ p == iommu_iova_to_phys(domain->domain, i)) {
1143 ++ size += PAGE_SIZE;
1144 ++ p += PAGE_SIZE;
1145 ++ i += PAGE_SIZE;
1146 ++ }
1147 ++
1148 ++ iommu_unmap(domain->domain, iova, size);
1149 ++ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
1150 ++ size >> PAGE_SHIFT, true);
1151 ++ }
1152 ++ }
1153 ++
1154 ++ return ret;
1155 + }
1156 +
1157 + /*
1158 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
1159 +index cc1006375cacb..f50cc1a7c31a9 100644
1160 +--- a/drivers/video/fbdev/efifb.c
1161 ++++ b/drivers/video/fbdev/efifb.c
1162 +@@ -449,7 +449,7 @@ static int efifb_probe(struct platform_device *dev)
1163 + info->apertures->ranges[0].base = efifb_fix.smem_start;
1164 + info->apertures->ranges[0].size = size_remap;
1165 +
1166 +- if (efi_enabled(EFI_BOOT) &&
1167 ++ if (efi_enabled(EFI_MEMMAP) &&
1168 + !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
1169 + if ((efifb_fix.smem_start + efifb_fix.smem_len) >
1170 + (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
1171 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1172 +index 6228b48d1e127..df7980aef927a 100644
1173 +--- a/drivers/virtio/virtio_ring.c
1174 ++++ b/drivers/virtio/virtio_ring.c
1175 +@@ -828,6 +828,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1176 + {
1177 + struct vring_virtqueue *vq = to_vvq(_vq);
1178 +
1179 ++ if (unlikely(vq->broken))
1180 ++ return false;
1181 ++
1182 + virtio_mb(vq->weak_barriers);
1183 + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
1184 + }
1185 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
1186 +index 5f6b77ea34fb5..128375ff80b8c 100644
1187 +--- a/drivers/xen/preempt.c
1188 ++++ b/drivers/xen/preempt.c
1189 +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
1190 + asmlinkage __visible void xen_maybe_preempt_hcall(void)
1191 + {
1192 + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
1193 +- && need_resched())) {
1194 ++ && need_resched() && !preempt_count())) {
1195 + /*
1196 + * Clear flag as we may be rescheduled on a different
1197 + * cpu.
1198 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
1199 +index 069273a2483f9..fc6c42eeb659c 100644
1200 +--- a/fs/afs/dynroot.c
1201 ++++ b/fs/afs/dynroot.c
1202 +@@ -299,15 +299,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
1203 + net->dynroot_sb = NULL;
1204 + mutex_unlock(&net->proc_cells_lock);
1205 +
1206 +- inode_lock(root->d_inode);
1207 +-
1208 +- /* Remove all the pins for dirs created for manually added cells */
1209 +- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
1210 +- if (subdir->d_fsdata) {
1211 +- subdir->d_fsdata = NULL;
1212 +- dput(subdir);
1213 ++ if (root) {
1214 ++ inode_lock(root->d_inode);
1215 ++
1216 ++ /* Remove all the pins for dirs created for manually added cells */
1217 ++ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
1218 ++ if (subdir->d_fsdata) {
1219 ++ subdir->d_fsdata = NULL;
1220 ++ dput(subdir);
1221 ++ }
1222 + }
1223 +- }
1224 +
1225 +- inode_unlock(root->d_inode);
1226 ++ inode_unlock(root->d_inode);
1227 ++ }
1228 + }
1229 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1230 +index 15cb96ad15d8c..554727d82d432 100644
1231 +--- a/fs/btrfs/ctree.h
1232 ++++ b/fs/btrfs/ctree.h
1233 +@@ -3271,6 +3271,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
1234 + int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
1235 + unsigned long new_flags);
1236 + int btrfs_sync_fs(struct super_block *sb, int wait);
1237 ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1238 ++ u64 subvol_objectid);
1239 +
1240 + static inline __printf(2, 3) __cold
1241 + void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
1242 +diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
1243 +index 1f3755b3a37ae..665ec85cb09b8 100644
1244 +--- a/fs/btrfs/export.c
1245 ++++ b/fs/btrfs/export.c
1246 +@@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
1247 + return type;
1248 + }
1249 +
1250 +-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1251 +- u64 root_objectid, u32 generation,
1252 +- int check_generation)
1253 ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1254 ++ u64 root_objectid, u32 generation,
1255 ++ int check_generation)
1256 + {
1257 + struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1258 + struct btrfs_root *root;
1259 +@@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
1260 + return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
1261 + }
1262 +
1263 +-static struct dentry *btrfs_get_parent(struct dentry *child)
1264 ++struct dentry *btrfs_get_parent(struct dentry *child)
1265 + {
1266 + struct inode *dir = d_inode(child);
1267 + struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
1268 +diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
1269 +index 57488ecd7d4ef..f32f4113c976a 100644
1270 +--- a/fs/btrfs/export.h
1271 ++++ b/fs/btrfs/export.h
1272 +@@ -18,4 +18,9 @@ struct btrfs_fid {
1273 + u64 parent_root_objectid;
1274 + } __attribute__ ((packed));
1275 +
1276 ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1277 ++ u64 root_objectid, u32 generation,
1278 ++ int check_generation);
1279 ++struct dentry *btrfs_get_parent(struct dentry *child);
1280 ++
1281 + #endif
1282 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1283 +index 1656ef0e959f0..bdfe159a60da6 100644
1284 +--- a/fs/btrfs/inode.c
1285 ++++ b/fs/btrfs/inode.c
1286 +@@ -628,7 +628,21 @@ cont:
1287 + PAGE_SET_WRITEBACK |
1288 + page_error_op |
1289 + PAGE_END_WRITEBACK);
1290 +- goto free_pages_out;
1291 ++
1292 ++ /*
1293 ++ * Ensure we only free the compressed pages if we have
1294 ++ * them allocated, as we can still reach here with
1295 ++ * inode_need_compress() == false.
1296 ++ */
1297 ++ if (pages) {
1298 ++ for (i = 0; i < nr_pages; i++) {
1299 ++ WARN_ON(pages[i]->mapping);
1300 ++ put_page(pages[i]);
1301 ++ }
1302 ++ kfree(pages);
1303 ++ }
1304 ++
1305 ++ return;
1306 + }
1307 + }
1308 +
1309 +@@ -706,13 +720,6 @@ cleanup_and_bail_uncompressed:
1310 + *num_added += 1;
1311 +
1312 + return;
1313 +-
1314 +-free_pages_out:
1315 +- for (i = 0; i < nr_pages; i++) {
1316 +- WARN_ON(pages[i]->mapping);
1317 +- put_page(pages[i]);
1318 +- }
1319 +- kfree(pages);
1320 + }
1321 +
1322 + static void free_async_extent_pages(struct async_extent *async_extent)
1323 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1324 +index ed539496089f1..4d2810a32b4a9 100644
1325 +--- a/fs/btrfs/super.c
1326 ++++ b/fs/btrfs/super.c
1327 +@@ -1000,8 +1000,8 @@ out:
1328 + return error;
1329 + }
1330 +
1331 +-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1332 +- u64 subvol_objectid)
1333 ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1334 ++ u64 subvol_objectid)
1335 + {
1336 + struct btrfs_root *root = fs_info->tree_root;
1337 + struct btrfs_root *fs_root;
1338 +@@ -1282,6 +1282,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1339 + {
1340 + struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1341 + const char *compress_type;
1342 ++ const char *subvol_name;
1343 +
1344 + if (btrfs_test_opt(info, DEGRADED))
1345 + seq_puts(seq, ",degraded");
1346 +@@ -1366,8 +1367,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1347 + seq_puts(seq, ",ref_verify");
1348 + seq_printf(seq, ",subvolid=%llu",
1349 + BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1350 +- seq_puts(seq, ",subvol=");
1351 +- seq_dentry(seq, dentry, " \t\n\\");
1352 ++ subvol_name = btrfs_get_subvol_name_from_objectid(info,
1353 ++ BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1354 ++ if (!IS_ERR(subvol_name)) {
1355 ++ seq_puts(seq, ",subvol=");
1356 ++ seq_escape(seq, subvol_name, " \t\n\\");
1357 ++ kfree(subvol_name);
1358 ++ }
1359 + return 0;
1360 + }
1361 +
1362 +@@ -1412,8 +1418,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1363 + goto out;
1364 + }
1365 + }
1366 +- subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
1367 +- subvol_objectid);
1368 ++ subvol_name = btrfs_get_subvol_name_from_objectid(
1369 ++ btrfs_sb(mnt->mnt_sb), subvol_objectid);
1370 + if (IS_ERR(subvol_name)) {
1371 + root = ERR_CAST(subvol_name);
1372 + subvol_name = NULL;
1373 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
1374 +index aefb0169d46d7..afec808a763b1 100644
1375 +--- a/fs/btrfs/sysfs.c
1376 ++++ b/fs/btrfs/sysfs.c
1377 +@@ -10,6 +10,7 @@
1378 + #include <linux/kobject.h>
1379 + #include <linux/bug.h>
1380 + #include <linux/debugfs.h>
1381 ++#include <linux/sched/mm.h>
1382 +
1383 + #include "ctree.h"
1384 + #include "disk-io.h"
1385 +@@ -766,7 +767,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
1386 + {
1387 + int error = 0;
1388 + struct btrfs_device *dev;
1389 ++ unsigned int nofs_flag;
1390 +
1391 ++ nofs_flag = memalloc_nofs_save();
1392 + list_for_each_entry(dev, &fs_devices->devices, dev_list) {
1393 + struct hd_struct *disk;
1394 + struct kobject *disk_kobj;
1395 +@@ -785,6 +788,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
1396 + if (error)
1397 + break;
1398 + }
1399 ++ memalloc_nofs_restore(nofs_flag);
1400 +
1401 + return error;
1402 + }
1403 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1404 +index a2e903203bf9f..0fa14d8b9c64c 100644
1405 +--- a/fs/ceph/mds_client.c
1406 ++++ b/fs/ceph/mds_client.c
1407 +@@ -3682,7 +3682,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
1408 + return -ENOMEM;
1409 + }
1410 +
1411 +- fsc->mdsc = mdsc;
1412 + init_completion(&mdsc->safe_umount_waiters);
1413 + init_waitqueue_head(&mdsc->session_close_wq);
1414 + INIT_LIST_HEAD(&mdsc->waiting_for_map);
1415 +@@ -3723,6 +3722,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
1416 +
1417 + strscpy(mdsc->nodename, utsname()->nodename,
1418 + sizeof(mdsc->nodename));
1419 ++
1420 ++ fsc->mdsc = mdsc;
1421 + return 0;
1422 + }
1423 +
1424 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1425 +index 58f48ea0db234..f988ccd064a22 100644
1426 +--- a/fs/eventpoll.c
1427 ++++ b/fs/eventpoll.c
1428 +@@ -1890,9 +1890,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1429 + * not already there, and calling reverse_path_check()
1430 + * during ep_insert().
1431 + */
1432 +- if (list_empty(&epi->ffd.file->f_tfile_llink))
1433 ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
1434 ++ get_file(epi->ffd.file);
1435 + list_add(&epi->ffd.file->f_tfile_llink,
1436 + &tfile_check_list);
1437 ++ }
1438 + }
1439 + }
1440 + mutex_unlock(&ep->mtx);
1441 +@@ -1936,6 +1938,7 @@ static void clear_tfile_check_list(void)
1442 + file = list_first_entry(&tfile_check_list, struct file,
1443 + f_tfile_llink);
1444 + list_del_init(&file->f_tfile_llink);
1445 ++ fput(file);
1446 + }
1447 + INIT_LIST_HEAD(&tfile_check_list);
1448 + }
1449 +@@ -2091,13 +2094,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1450 + mutex_lock(&epmutex);
1451 + if (is_file_epoll(tf.file)) {
1452 + error = -ELOOP;
1453 +- if (ep_loop_check(ep, tf.file) != 0) {
1454 +- clear_tfile_check_list();
1455 ++ if (ep_loop_check(ep, tf.file) != 0)
1456 + goto error_tgt_fput;
1457 +- }
1458 +- } else
1459 ++ } else {
1460 ++ get_file(tf.file);
1461 + list_add(&tf.file->f_tfile_llink,
1462 + &tfile_check_list);
1463 ++ }
1464 + mutex_lock_nested(&ep->mtx, 0);
1465 + if (is_file_epoll(tf.file)) {
1466 + tep = tf.file->private_data;
1467 +@@ -2121,8 +2124,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1468 + error = ep_insert(ep, &epds, tf.file, fd, full_check);
1469 + } else
1470 + error = -EEXIST;
1471 +- if (full_check)
1472 +- clear_tfile_check_list();
1473 + break;
1474 + case EPOLL_CTL_DEL:
1475 + if (epi)
1476 +@@ -2145,8 +2146,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1477 + mutex_unlock(&ep->mtx);
1478 +
1479 + error_tgt_fput:
1480 +- if (full_check)
1481 ++ if (full_check) {
1482 ++ clear_tfile_check_list();
1483 + mutex_unlock(&epmutex);
1484 ++ }
1485 +
1486 + fdput(tf);
1487 + error_fput:
1488 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
1489 +index d203cc935ff83..552164034d340 100644
1490 +--- a/fs/ext4/block_validity.c
1491 ++++ b/fs/ext4/block_validity.c
1492 +@@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
1493 + ext4_fsblk_t start_blk,
1494 + unsigned int count)
1495 + {
1496 +- struct ext4_system_zone *new_entry = NULL, *entry;
1497 ++ struct ext4_system_zone *new_entry, *entry;
1498 + struct rb_node **n = &system_blks->root.rb_node, *node;
1499 + struct rb_node *parent = NULL, *new_node = NULL;
1500 +
1501 +@@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
1502 + n = &(*n)->rb_left;
1503 + else if (start_blk >= (entry->start_blk + entry->count))
1504 + n = &(*n)->rb_right;
1505 +- else {
1506 +- if (start_blk + count > (entry->start_blk +
1507 +- entry->count))
1508 +- entry->count = (start_blk + count -
1509 +- entry->start_blk);
1510 +- new_node = *n;
1511 +- new_entry = rb_entry(new_node, struct ext4_system_zone,
1512 +- node);
1513 +- break;
1514 +- }
1515 ++ else /* Unexpected overlap of system zones. */
1516 ++ return -EFSCORRUPTED;
1517 + }
1518 +
1519 +- if (!new_entry) {
1520 +- new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
1521 +- GFP_KERNEL);
1522 +- if (!new_entry)
1523 +- return -ENOMEM;
1524 +- new_entry->start_blk = start_blk;
1525 +- new_entry->count = count;
1526 +- new_node = &new_entry->node;
1527 +-
1528 +- rb_link_node(new_node, parent, n);
1529 +- rb_insert_color(new_node, &system_blks->root);
1530 +- }
1531 ++ new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
1532 ++ GFP_KERNEL);
1533 ++ if (!new_entry)
1534 ++ return -ENOMEM;
1535 ++ new_entry->start_blk = start_blk;
1536 ++ new_entry->count = count;
1537 ++ new_node = &new_entry->node;
1538 ++
1539 ++ rb_link_node(new_node, parent, n);
1540 ++ rb_insert_color(new_node, &system_blks->root);
1541 +
1542 + /* Can we merge to the left? */
1543 + node = rb_prev(new_node);
1544 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1545 +index a8f2e3549bb95..186a2dd05bd87 100644
1546 +--- a/fs/ext4/namei.c
1547 ++++ b/fs/ext4/namei.c
1548 +@@ -1309,8 +1309,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1549 + ext4_match(fname, de)) {
1550 + /* found a match - just to be sure, do
1551 + * a full check */
1552 +- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
1553 +- bh->b_size, offset))
1554 ++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
1555 ++ buf_size, offset))
1556 + return -1;
1557 + *res_dir = de;
1558 + return 1;
1559 +@@ -1732,7 +1732,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1560 + blocksize, hinfo, map);
1561 + map -= count;
1562 + dx_sort_map(map, count);
1563 +- /* Split the existing block in the middle, size-wise */
1564 ++ /* Ensure that neither split block is over half full */
1565 + size = 0;
1566 + move = 0;
1567 + for (i = count-1; i >= 0; i--) {
1568 +@@ -1742,8 +1742,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1569 + size += map[i].size;
1570 + move++;
1571 + }
1572 +- /* map index at which we will split */
1573 +- split = count - move;
1574 ++ /*
1575 ++ * map index at which we will split
1576 ++ *
1577 ++ * If the sum of active entries didn't exceed half the block size, just
1578 ++ * split it in half by count; each resulting block will have at least
1579 ++ * half the space free.
1580 ++ */
1581 ++ if (i > 0)
1582 ++ split = count - move;
1583 ++ else
1584 ++ split = count/2;
1585 ++
1586 + hash2 = map[split].hash;
1587 + continued = hash2 == map[split - 1].hash;
1588 + dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
1589 +@@ -2344,7 +2354,7 @@ int ext4_generic_delete_entry(handle_t *handle,
1590 + de = (struct ext4_dir_entry_2 *)entry_buf;
1591 + while (i < buf_size - csum_size) {
1592 + if (ext4_check_dir_entry(dir, NULL, de, bh,
1593 +- bh->b_data, bh->b_size, i))
1594 ++ entry_buf, buf_size, i))
1595 + return -EFSCORRUPTED;
1596 + if (de == de_del) {
1597 + if (pde)
1598 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
1599 +index a15a22d209090..8a50722bca29e 100644
1600 +--- a/fs/jbd2/journal.c
1601 ++++ b/fs/jbd2/journal.c
1602 +@@ -1370,8 +1370,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
1603 + int ret;
1604 +
1605 + /* Buffer got discarded which means block device got invalidated */
1606 +- if (!buffer_mapped(bh))
1607 ++ if (!buffer_mapped(bh)) {
1608 ++ unlock_buffer(bh);
1609 + return -EIO;
1610 ++ }
1611 +
1612 + trace_jbd2_write_superblock(journal, write_flags);
1613 + if (!(journal->j_flags & JBD2_BARRIER))
1614 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
1615 +index f20cff1194bb6..776493713153f 100644
1616 +--- a/fs/jffs2/dir.c
1617 ++++ b/fs/jffs2/dir.c
1618 +@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
1619 + int ret;
1620 + uint32_t now = JFFS2_NOW();
1621 +
1622 ++ mutex_lock(&f->sem);
1623 + for (fd = f->dents ; fd; fd = fd->next) {
1624 +- if (fd->ino)
1625 ++ if (fd->ino) {
1626 ++ mutex_unlock(&f->sem);
1627 + return -ENOTEMPTY;
1628 ++ }
1629 + }
1630 ++ mutex_unlock(&f->sem);
1631 +
1632 + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
1633 + dentry->d_name.len, f, now);
1634 +diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
1635 +index f86f51f99aceb..1dcadd22b440d 100644
1636 +--- a/fs/romfs/storage.c
1637 ++++ b/fs/romfs/storage.c
1638 +@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
1639 + size_t limit;
1640 +
1641 + limit = romfs_maxsize(sb);
1642 +- if (pos >= limit)
1643 ++ if (pos >= limit || buflen > limit - pos)
1644 + return -EIO;
1645 +- if (buflen > limit - pos)
1646 +- buflen = limit - pos;
1647 +
1648 + #ifdef CONFIG_ROMFS_ON_MTD
1649 + if (sb->s_mtd)
1650 +diff --git a/fs/signalfd.c b/fs/signalfd.c
1651 +index 4fcd1498acf52..3c40a3bf772ce 100644
1652 +--- a/fs/signalfd.c
1653 ++++ b/fs/signalfd.c
1654 +@@ -313,9 +313,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
1655 + {
1656 + sigset_t mask;
1657 +
1658 +- if (sizemask != sizeof(sigset_t) ||
1659 +- copy_from_user(&mask, user_mask, sizeof(mask)))
1660 ++ if (sizemask != sizeof(sigset_t))
1661 + return -EINVAL;
1662 ++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
1663 ++ return -EFAULT;
1664 + return do_signalfd4(ufd, &mask, flags);
1665 + }
1666 +
1667 +@@ -324,9 +325,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
1668 + {
1669 + sigset_t mask;
1670 +
1671 +- if (sizemask != sizeof(sigset_t) ||
1672 +- copy_from_user(&mask, user_mask, sizeof(mask)))
1673 ++ if (sizemask != sizeof(sigset_t))
1674 + return -EINVAL;
1675 ++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
1676 ++ return -EFAULT;
1677 + return do_signalfd4(ufd, &mask, 0);
1678 + }
1679 +
1680 +diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
1681 +index e9f810fc67317..43585850f1546 100644
1682 +--- a/fs/xfs/xfs_sysfs.h
1683 ++++ b/fs/xfs/xfs_sysfs.h
1684 +@@ -32,9 +32,11 @@ xfs_sysfs_init(
1685 + struct xfs_kobj *parent_kobj,
1686 + const char *name)
1687 + {
1688 ++ struct kobject *parent;
1689 ++
1690 ++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
1691 + init_completion(&kobj->complete);
1692 +- return kobject_init_and_add(&kobj->kobject, ktype,
1693 +- &parent_kobj->kobject, "%s", name);
1694 ++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
1695 + }
1696 +
1697 + static inline void
1698 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
1699 +index c23257a26c2b8..b8f05d5909b59 100644
1700 +--- a/fs/xfs/xfs_trans_dquot.c
1701 ++++ b/fs/xfs/xfs_trans_dquot.c
1702 +@@ -657,7 +657,7 @@ xfs_trans_dqresv(
1703 + }
1704 + }
1705 + if (ninos > 0) {
1706 +- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
1707 ++ total_count = dqp->q_res_icount + ninos;
1708 + timer = be32_to_cpu(dqp->q_core.d_itimer);
1709 + warns = be16_to_cpu(dqp->q_core.d_iwarns);
1710 + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
1711 +diff --git a/kernel/kthread.c b/kernel/kthread.c
1712 +index 087d18d771b53..b786eda90bb56 100644
1713 +--- a/kernel/kthread.c
1714 ++++ b/kernel/kthread.c
1715 +@@ -190,8 +190,15 @@ static void __kthread_parkme(struct kthread *self)
1716 + if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
1717 + break;
1718 +
1719 ++ /*
1720 ++ * Thread is going to call schedule(), do not preempt it,
1721 ++ * or the caller of kthread_park() may spend more time in
1722 ++ * wait_task_inactive().
1723 ++ */
1724 ++ preempt_disable();
1725 + complete(&self->parked);
1726 +- schedule();
1727 ++ schedule_preempt_disabled();
1728 ++ preempt_enable();
1729 + }
1730 + __set_current_state(TASK_RUNNING);
1731 + }
1732 +@@ -236,8 +243,14 @@ static int kthread(void *_create)
1733 + /* OK, tell user we're spawned, wait for stop or wakeup */
1734 + __set_current_state(TASK_UNINTERRUPTIBLE);
1735 + create->result = current;
1736 ++ /*
1737 ++ * Thread is going to call schedule(), do not preempt it,
1738 ++ * or the creator may spend more time in wait_task_inactive().
1739 ++ */
1740 ++ preempt_disable();
1741 + complete(done);
1742 +- schedule();
1743 ++ schedule_preempt_disabled();
1744 ++ preempt_enable();
1745 +
1746 + ret = -EINTR;
1747 + if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
1748 +diff --git a/kernel/relay.c b/kernel/relay.c
1749 +index 13c19f39e31e2..735cb208f023b 100644
1750 +--- a/kernel/relay.c
1751 ++++ b/kernel/relay.c
1752 +@@ -197,6 +197,7 @@ free_buf:
1753 + static void relay_destroy_channel(struct kref *kref)
1754 + {
1755 + struct rchan *chan = container_of(kref, struct rchan, kref);
1756 ++ free_percpu(chan->buf);
1757 + kfree(chan);
1758 + }
1759 +
1760 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1761 +index e068c7f75a849..8a5708f31aa07 100644
1762 +--- a/mm/hugetlb.c
1763 ++++ b/mm/hugetlb.c
1764 +@@ -4650,25 +4650,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
1765 + void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1766 + unsigned long *start, unsigned long *end)
1767 + {
1768 +- unsigned long check_addr = *start;
1769 ++ unsigned long a_start, a_end;
1770 +
1771 + if (!(vma->vm_flags & VM_MAYSHARE))
1772 + return;
1773 +
1774 +- for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
1775 +- unsigned long a_start = check_addr & PUD_MASK;
1776 +- unsigned long a_end = a_start + PUD_SIZE;
1777 ++ /* Extend the range to be PUD aligned for a worst case scenario */
1778 ++ a_start = ALIGN_DOWN(*start, PUD_SIZE);
1779 ++ a_end = ALIGN(*end, PUD_SIZE);
1780 +
1781 +- /*
1782 +- * If sharing is possible, adjust start/end if necessary.
1783 +- */
1784 +- if (range_in_vma(vma, a_start, a_end)) {
1785 +- if (a_start < *start)
1786 +- *start = a_start;
1787 +- if (a_end > *end)
1788 +- *end = a_end;
1789 +- }
1790 +- }
1791 ++ /*
1792 ++ * Intersect the range with the vma range, since pmd sharing won't be
1793 ++ * across vma after all
1794 ++ */
1795 ++ *start = max(vma->vm_start, a_start);
1796 ++ *end = min(vma->vm_end, a_end);
1797 + }
1798 +
1799 + /*
1800 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
1801 +index 483c4573695a9..f37be43f8caeb 100644
1802 +--- a/mm/khugepaged.c
1803 ++++ b/mm/khugepaged.c
1804 +@@ -394,7 +394,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
1805 +
1806 + static inline int khugepaged_test_exit(struct mm_struct *mm)
1807 + {
1808 +- return atomic_read(&mm->mm_users) == 0;
1809 ++ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
1810 + }
1811 +
1812 + static bool hugepage_vma_check(struct vm_area_struct *vma,
1813 +@@ -427,7 +427,7 @@ int __khugepaged_enter(struct mm_struct *mm)
1814 + return -ENOMEM;
1815 +
1816 + /* __khugepaged_exit() must not run from under us */
1817 +- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
1818 ++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
1819 + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1820 + free_mm_slot(mm_slot);
1821 + return 0;
1822 +@@ -1005,9 +1005,6 @@ static void collapse_huge_page(struct mm_struct *mm,
1823 + * handled by the anon_vma lock + PG_lock.
1824 + */
1825 + down_write(&mm->mmap_sem);
1826 +- result = SCAN_ANY_PROCESS;
1827 +- if (!mmget_still_valid(mm))
1828 +- goto out;
1829 + result = hugepage_vma_revalidate(mm, address, &vma);
1830 + if (result)
1831 + goto out;
1832 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1833 +index 7181dfe764405..5717ee66c8b38 100644
1834 +--- a/mm/page_alloc.c
1835 ++++ b/mm/page_alloc.c
1836 +@@ -1115,6 +1115,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1837 + struct page *page, *tmp;
1838 + LIST_HEAD(head);
1839 +
1840 ++ /*
1841 ++ * Ensure proper count is passed which otherwise would stuck in the
1842 ++ * below while (list_empty(list)) loop.
1843 ++ */
1844 ++ count = min(pcp->count, count);
1845 + while (count) {
1846 + struct list_head *list;
1847 +
1848 +@@ -7395,7 +7400,7 @@ int __meminit init_per_zone_wmark_min(void)
1849 +
1850 + return 0;
1851 + }
1852 +-core_initcall(init_per_zone_wmark_min)
1853 ++postcore_initcall(init_per_zone_wmark_min)
1854 +
1855 + /*
1856 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1857 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1858 +index 16c8174658fd1..252495ff9010d 100644
1859 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1860 ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1861 +@@ -268,6 +268,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
1862 + {
1863 + struct svc_rdma_recv_ctxt *ctxt;
1864 +
1865 ++ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
1866 ++ return 0;
1867 + ctxt = svc_rdma_recv_ctxt_get(rdma);
1868 + if (!ctxt)
1869 + return -ENOMEM;
1870 +diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
1871 +index 8f004db6f6034..1ee33d2e15bf8 100644
1872 +--- a/scripts/kconfig/qconf.cc
1873 ++++ b/scripts/kconfig/qconf.cc
1874 +@@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e)
1875 +
1876 + void ConfigList::contextMenuEvent(QContextMenuEvent *e)
1877 + {
1878 +- if (e->y() <= header()->geometry().bottom()) {
1879 +- if (!headerPopup) {
1880 +- QAction *action;
1881 +-
1882 +- headerPopup = new QMenu(this);
1883 +- action = new QAction("Show Name", this);
1884 +- action->setCheckable(true);
1885 +- connect(action, SIGNAL(toggled(bool)),
1886 +- parent(), SLOT(setShowName(bool)));
1887 +- connect(parent(), SIGNAL(showNameChanged(bool)),
1888 +- action, SLOT(setOn(bool)));
1889 +- action->setChecked(showName);
1890 +- headerPopup->addAction(action);
1891 +- action = new QAction("Show Range", this);
1892 +- action->setCheckable(true);
1893 +- connect(action, SIGNAL(toggled(bool)),
1894 +- parent(), SLOT(setShowRange(bool)));
1895 +- connect(parent(), SIGNAL(showRangeChanged(bool)),
1896 +- action, SLOT(setOn(bool)));
1897 +- action->setChecked(showRange);
1898 +- headerPopup->addAction(action);
1899 +- action = new QAction("Show Data", this);
1900 +- action->setCheckable(true);
1901 +- connect(action, SIGNAL(toggled(bool)),
1902 +- parent(), SLOT(setShowData(bool)));
1903 +- connect(parent(), SIGNAL(showDataChanged(bool)),
1904 +- action, SLOT(setOn(bool)));
1905 +- action->setChecked(showData);
1906 +- headerPopup->addAction(action);
1907 +- }
1908 +- headerPopup->exec(e->globalPos());
1909 +- e->accept();
1910 +- } else
1911 +- e->ignore();
1912 ++ if (!headerPopup) {
1913 ++ QAction *action;
1914 ++
1915 ++ headerPopup = new QMenu(this);
1916 ++ action = new QAction("Show Name", this);
1917 ++ action->setCheckable(true);
1918 ++ connect(action, SIGNAL(toggled(bool)),
1919 ++ parent(), SLOT(setShowName(bool)));
1920 ++ connect(parent(), SIGNAL(showNameChanged(bool)),
1921 ++ action, SLOT(setChecked(bool)));
1922 ++ action->setChecked(showName);
1923 ++ headerPopup->addAction(action);
1924 ++
1925 ++ action = new QAction("Show Range", this);
1926 ++ action->setCheckable(true);
1927 ++ connect(action, SIGNAL(toggled(bool)),
1928 ++ parent(), SLOT(setShowRange(bool)));
1929 ++ connect(parent(), SIGNAL(showRangeChanged(bool)),
1930 ++ action, SLOT(setChecked(bool)));
1931 ++ action->setChecked(showRange);
1932 ++ headerPopup->addAction(action);
1933 ++
1934 ++ action = new QAction("Show Data", this);
1935 ++ action->setCheckable(true);
1936 ++ connect(action, SIGNAL(toggled(bool)),
1937 ++ parent(), SLOT(setShowData(bool)));
1938 ++ connect(parent(), SIGNAL(showDataChanged(bool)),
1939 ++ action, SLOT(setChecked(bool)));
1940 ++ action->setChecked(showData);
1941 ++ headerPopup->addAction(action);
1942 ++ }
1943 ++
1944 ++ headerPopup->exec(e->globalPos());
1945 ++ e->accept();
1946 + }
1947 +
1948 + ConfigView*ConfigView::viewList;
1949 +@@ -1228,7 +1228,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
1950 +
1951 + action->setCheckable(true);
1952 + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
1953 +- connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
1954 ++ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
1955 + action->setChecked(showDebug());
1956 + popup->addSeparator();
1957 + popup->addAction(action);
1958 +diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
1959 +index cbdb6d4bb91ef..f4aba065c9257 100644
1960 +--- a/sound/soc/codecs/msm8916-wcd-analog.c
1961 ++++ b/sound/soc/codecs/msm8916-wcd-analog.c
1962 +@@ -16,8 +16,8 @@
1963 +
1964 + #define CDC_D_REVISION1 (0xf000)
1965 + #define CDC_D_PERPH_SUBTYPE (0xf005)
1966 +-#define CDC_D_INT_EN_SET (0x015)
1967 +-#define CDC_D_INT_EN_CLR (0x016)
1968 ++#define CDC_D_INT_EN_SET (0xf015)
1969 ++#define CDC_D_INT_EN_CLR (0xf016)
1970 + #define MBHC_SWITCH_INT BIT(7)
1971 + #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
1972 + #define MBHC_BUTTON_PRESS_DET BIT(5)
1973 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1974 +index 6868e71e3a3f0..0572c3c964506 100644
1975 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1976 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1977 +@@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
1978 +
1979 + ret_val = power_up_sst(stream);
1980 + if (ret_val < 0)
1981 +- return ret_val;
1982 ++ goto out_power_up;
1983 +
1984 + /* Make sure, that the period size is always even */
1985 + snd_pcm_hw_constraint_step(substream->runtime, 0,
1986 +@@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
1987 + return snd_pcm_hw_constraint_integer(runtime,
1988 + SNDRV_PCM_HW_PARAM_PERIODS);
1989 + out_ops:
1990 +- kfree(stream);
1991 + mutex_unlock(&sst_lock);
1992 ++out_power_up:
1993 ++ kfree(stream);
1994 + return ret_val;
1995 + }
1996 +
1997 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
1998 +index c6b51571be945..44eee18c658ae 100644
1999 +--- a/sound/soc/qcom/qdsp6/q6routing.c
2000 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
2001 +@@ -968,6 +968,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
2002 + return 0;
2003 + }
2004 +
2005 ++static unsigned int q6routing_reg_read(struct snd_soc_component *component,
2006 ++ unsigned int reg)
2007 ++{
2008 ++ /* default value */
2009 ++ return 0;
2010 ++}
2011 ++
2012 ++static int q6routing_reg_write(struct snd_soc_component *component,
2013 ++ unsigned int reg, unsigned int val)
2014 ++{
2015 ++ /* dummy */
2016 ++ return 0;
2017 ++}
2018 ++
2019 + static const struct snd_soc_component_driver msm_soc_routing_component = {
2020 + .ops = &q6pcm_routing_ops,
2021 + .probe = msm_routing_probe,
2022 +@@ -976,6 +990,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
2023 + .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
2024 + .dapm_routes = intercon,
2025 + .num_dapm_routes = ARRAY_SIZE(intercon),
2026 ++ .read = q6routing_reg_read,
2027 ++ .write = q6routing_reg_write,
2028 + };
2029 +
2030 + static int q6pcm_routing_probe(struct platform_device *pdev)
2031 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
2032 +index 60169196b9481..4da4ec2552463 100644
2033 +--- a/tools/perf/util/probe-finder.c
2034 ++++ b/tools/perf/util/probe-finder.c
2035 +@@ -1351,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
2036 + tf.ntevs = 0;
2037 +
2038 + ret = debuginfo__find_probes(dbg, &tf.pf);
2039 +- if (ret < 0) {
2040 ++ if (ret < 0 || tf.ntevs == 0) {
2041 + for (i = 0; i < tf.ntevs; i++)
2042 + clear_probe_trace_event(&tf.tevs[i]);
2043 + zfree(tevs);
2044 +diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
2045 +index 075cb0c730149..90418d79ef676 100644
2046 +--- a/tools/testing/selftests/cgroup/cgroup_util.c
2047 ++++ b/tools/testing/selftests/cgroup/cgroup_util.c
2048 +@@ -95,7 +95,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
2049 +
2050 + /* Handle the case of comparing against empty string */
2051 + if (!expected)
2052 +- size = 32;
2053 ++ return -1;
2054 + else
2055 + size = strlen(expected) + 1;
2056 +
2057 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
2058 +index a5bc10d30618f..41d6285c3da99 100644
2059 +--- a/virt/kvm/arm/mmu.c
2060 ++++ b/virt/kvm/arm/mmu.c
2061 +@@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
2062 + * destroying the VM), otherwise another faulting VCPU may come in and mess
2063 + * with things behind our backs.
2064 + */
2065 +-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
2066 ++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
2067 ++ bool may_block)
2068 + {
2069 + pgd_t *pgd;
2070 + phys_addr_t addr = start, end = start + size;
2071 +@@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
2072 + * If the range is too large, release the kvm->mmu_lock
2073 + * to prevent starvation and lockup detector warnings.
2074 + */
2075 +- if (next != end)
2076 ++ if (may_block && next != end)
2077 + cond_resched_lock(&kvm->mmu_lock);
2078 + } while (pgd++, addr = next, addr != end);
2079 + }
2080 +
2081 ++static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
2082 ++{
2083 ++ __unmap_stage2_range(kvm, start, size, true);
2084 ++}
2085 ++
2086 + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
2087 + phys_addr_t addr, phys_addr_t end)
2088 + {
2089 +@@ -1820,18 +1826,20 @@ static int handle_hva_to_gpa(struct kvm *kvm,
2090 +
2091 + static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2092 + {
2093 +- unmap_stage2_range(kvm, gpa, size);
2094 ++ bool may_block = *(bool *)data;
2095 ++
2096 ++ __unmap_stage2_range(kvm, gpa, size, may_block);
2097 + return 0;
2098 + }
2099 +
2100 + int kvm_unmap_hva_range(struct kvm *kvm,
2101 +- unsigned long start, unsigned long end)
2102 ++ unsigned long start, unsigned long end, bool blockable)
2103 + {
2104 + if (!kvm->arch.pgd)
2105 + return 0;
2106 +
2107 + trace_kvm_unmap_hva_range(start, end);
2108 +- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2109 ++ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable);
2110 + return 0;
2111 + }
2112 +
2113 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2114 +index 1218ea663c6d2..2155b52b17eca 100644
2115 +--- a/virt/kvm/kvm_main.c
2116 ++++ b/virt/kvm/kvm_main.c
2117 +@@ -410,7 +410,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
2118 + * count is also read inside the mmu_lock critical section.
2119 + */
2120 + kvm->mmu_notifier_count++;
2121 +- need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
2122 ++ need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable);
2123 + need_tlb_flush |= kvm->tlbs_dirty;
2124 + /* we've to flush the tlb before the pages can be freed */
2125 + if (need_tlb_flush)