Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 26 Aug 2020 11:14:52
Message-Id: 1598440471.160539b70010e941a701f6a04687797753ffb9f6.mpagano@gentoo
1 commit: 160539b70010e941a701f6a04687797753ffb9f6
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 26 11:14:31 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 26 11:14:31 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=160539b7
7
8 Linux patch 4.14.195
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1194_linux-4.14.195.patch | 1484 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1488 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ac7660d..7d4b532 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -819,6 +819,10 @@ Patch: 1193_linux-4.14.194.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.194
23
24 +Patch: 1194_linux-4.14.195.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.195
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1194_linux-4.14.195.patch b/1194_linux-4.14.195.patch
33 new file mode 100644
34 index 0000000..b7cb544
35 --- /dev/null
36 +++ b/1194_linux-4.14.195.patch
37 @@ -0,0 +1,1484 @@
38 +diff --git a/Makefile b/Makefile
39 +index 8e2a1418c5ae6..a5946969f4fcb 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 194
47 ++SUBLEVEL = 195
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
52 +index d123ff90f7a83..9995bed6e92e2 100644
53 +--- a/arch/alpha/include/asm/io.h
54 ++++ b/arch/alpha/include/asm/io.h
55 +@@ -493,10 +493,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
56 + }
57 + #endif
58 +
59 +-#define ioread16be(p) be16_to_cpu(ioread16(p))
60 +-#define ioread32be(p) be32_to_cpu(ioread32(p))
61 +-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
62 +-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
63 ++#define ioread16be(p) swab16(ioread16(p))
64 ++#define ioread32be(p) swab32(ioread32(p))
65 ++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
66 ++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
67 +
68 + #define inb_p inb
69 + #define inw_p inw
70 +diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
71 +index 9138a624c5c81..692f90e7fecc1 100644
72 +--- a/arch/m68k/include/asm/m53xxacr.h
73 ++++ b/arch/m68k/include/asm/m53xxacr.h
74 +@@ -89,9 +89,9 @@
75 + * coherency though in all cases. And for copyback caches we will need
76 + * to push cached data as well.
77 + */
78 +-#define CACHE_INIT CACR_CINVA
79 +-#define CACHE_INVALIDATE CACR_CINVA
80 +-#define CACHE_INVALIDATED CACR_CINVA
81 ++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
82 ++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
83 ++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
84 +
85 + #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
86 + (0x000f0000) + \
87 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
88 +index 5fc8a010fdf07..ebe97e5500ee5 100644
89 +--- a/arch/powerpc/mm/fault.c
90 ++++ b/arch/powerpc/mm/fault.c
91 +@@ -22,6 +22,7 @@
92 + #include <linux/errno.h>
93 + #include <linux/string.h>
94 + #include <linux/types.h>
95 ++#include <linux/pagemap.h>
96 + #include <linux/ptrace.h>
97 + #include <linux/mman.h>
98 + #include <linux/mm.h>
99 +@@ -66,15 +67,11 @@ static inline bool notify_page_fault(struct pt_regs *regs)
100 + }
101 +
102 + /*
103 +- * Check whether the instruction at regs->nip is a store using
104 ++ * Check whether the instruction inst is a store using
105 + * an update addressing form which will update r1.
106 + */
107 +-static bool store_updates_sp(struct pt_regs *regs)
108 ++static bool store_updates_sp(unsigned int inst)
109 + {
110 +- unsigned int inst;
111 +-
112 +- if (get_user(inst, (unsigned int __user *)regs->nip))
113 +- return false;
114 + /* check for 1 in the rA field */
115 + if (((inst >> 16) & 0x1f) != 1)
116 + return false;
117 +@@ -227,20 +224,24 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
118 + return is_exec || (address >= TASK_SIZE);
119 + }
120 +
121 ++// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
122 ++#define SIGFRAME_MAX_SIZE (4096 + 128)
123 ++
124 + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
125 +- struct vm_area_struct *vma,
126 +- bool store_update_sp)
127 ++ struct vm_area_struct *vma, unsigned int flags,
128 ++ bool *must_retry)
129 + {
130 + /*
131 + * N.B. The POWER/Open ABI allows programs to access up to
132 + * 288 bytes below the stack pointer.
133 +- * The kernel signal delivery code writes up to about 1.5kB
134 ++ * The kernel signal delivery code writes a bit over 4KB
135 + * below the stack pointer (r1) before decrementing it.
136 + * The exec code can write slightly over 640kB to the stack
137 + * before setting the user r1. Thus we allow the stack to
138 + * expand to 1MB without further checks.
139 + */
140 + if (address + 0x100000 < vma->vm_end) {
141 ++ unsigned int __user *nip = (unsigned int __user *)regs->nip;
142 + /* get user regs even if this fault is in kernel mode */
143 + struct pt_regs *uregs = current->thread.regs;
144 + if (uregs == NULL)
145 +@@ -258,8 +259,22 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
146 + * between the last mapped region and the stack will
147 + * expand the stack rather than segfaulting.
148 + */
149 +- if (address + 2048 < uregs->gpr[1] && !store_update_sp)
150 +- return true;
151 ++ if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
152 ++ return false;
153 ++
154 ++ if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
155 ++ access_ok(VERIFY_READ, nip, sizeof(*nip))) {
156 ++ unsigned int inst;
157 ++ int res;
158 ++
159 ++ pagefault_disable();
160 ++ res = __get_user_inatomic(inst, nip);
161 ++ pagefault_enable();
162 ++ if (!res)
163 ++ return !store_updates_sp(inst);
164 ++ *must_retry = true;
165 ++ }
166 ++ return true;
167 + }
168 + return false;
169 + }
170 +@@ -392,7 +407,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
171 + int is_user = user_mode(regs);
172 + int is_write = page_fault_is_write(error_code);
173 + int fault, major = 0;
174 +- bool store_update_sp = false;
175 ++ bool must_retry = false;
176 +
177 + if (notify_page_fault(regs))
178 + return 0;
179 +@@ -439,9 +454,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
180 + * can result in fault, which will cause a deadlock when called with
181 + * mmap_sem held
182 + */
183 +- if (is_write && is_user)
184 +- store_update_sp = store_updates_sp(regs);
185 +-
186 + if (is_user)
187 + flags |= FAULT_FLAG_USER;
188 + if (is_write)
189 +@@ -488,8 +500,17 @@ retry:
190 + return bad_area(regs, address);
191 +
192 + /* The stack is being expanded, check if it's valid */
193 +- if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp)))
194 +- return bad_area(regs, address);
195 ++ if (unlikely(bad_stack_expansion(regs, address, vma, flags,
196 ++ &must_retry))) {
197 ++ if (!must_retry)
198 ++ return bad_area(regs, address);
199 ++
200 ++ up_read(&mm->mmap_sem);
201 ++ if (fault_in_pages_readable((const char __user *)regs->nip,
202 ++ sizeof(unsigned int)))
203 ++ return bad_area_nosemaphore(regs, address);
204 ++ goto retry;
205 ++ }
206 +
207 + /* Try to expand it */
208 + if (unlikely(expand_stack(vma, address)))
209 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
210 +index 5ec935521204a..8d20d49b252a0 100644
211 +--- a/arch/powerpc/platforms/pseries/ras.c
212 ++++ b/arch/powerpc/platforms/pseries/ras.c
213 +@@ -115,7 +115,6 @@ static void handle_system_shutdown(char event_modifier)
214 + case EPOW_SHUTDOWN_ON_UPS:
215 + pr_emerg("Loss of system power detected. System is running on"
216 + " UPS/battery. Check RTAS error log for details\n");
217 +- orderly_poweroff(true);
218 + break;
219 +
220 + case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
221 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
222 +index 44b6f23cc851d..4289c519af1be 100644
223 +--- a/drivers/clk/clk.c
224 ++++ b/drivers/clk/clk.c
225 +@@ -39,6 +39,17 @@ static HLIST_HEAD(clk_root_list);
226 + static HLIST_HEAD(clk_orphan_list);
227 + static LIST_HEAD(clk_notifier_list);
228 +
229 ++static struct hlist_head *all_lists[] = {
230 ++ &clk_root_list,
231 ++ &clk_orphan_list,
232 ++ NULL,
233 ++};
234 ++
235 ++static struct hlist_head *orphan_list[] = {
236 ++ &clk_orphan_list,
237 ++ NULL,
238 ++};
239 ++
240 + /*** private data structures ***/
241 +
242 + struct clk_core {
243 +@@ -1993,17 +2004,6 @@ static int inited = 0;
244 + static DEFINE_MUTEX(clk_debug_lock);
245 + static HLIST_HEAD(clk_debug_list);
246 +
247 +-static struct hlist_head *all_lists[] = {
248 +- &clk_root_list,
249 +- &clk_orphan_list,
250 +- NULL,
251 +-};
252 +-
253 +-static struct hlist_head *orphan_list[] = {
254 +- &clk_orphan_list,
255 +- NULL,
256 +-};
257 +-
258 + static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
259 + int level)
260 + {
261 +@@ -2735,6 +2735,34 @@ static const struct clk_ops clk_nodrv_ops = {
262 + .set_parent = clk_nodrv_set_parent,
263 + };
264 +
265 ++static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
266 ++ struct clk_core *target)
267 ++{
268 ++ int i;
269 ++ struct clk_core *child;
270 ++
271 ++ for (i = 0; i < root->num_parents; i++)
272 ++ if (root->parents[i] == target)
273 ++ root->parents[i] = NULL;
274 ++
275 ++ hlist_for_each_entry(child, &root->children, child_node)
276 ++ clk_core_evict_parent_cache_subtree(child, target);
277 ++}
278 ++
279 ++/* Remove this clk from all parent caches */
280 ++static void clk_core_evict_parent_cache(struct clk_core *core)
281 ++{
282 ++ struct hlist_head **lists;
283 ++ struct clk_core *root;
284 ++
285 ++ lockdep_assert_held(&prepare_lock);
286 ++
287 ++ for (lists = all_lists; *lists; lists++)
288 ++ hlist_for_each_entry(root, *lists, child_node)
289 ++ clk_core_evict_parent_cache_subtree(root, core);
290 ++
291 ++}
292 ++
293 + /**
294 + * clk_unregister - unregister a currently registered clock
295 + * @clk: clock to unregister
296 +@@ -2773,6 +2801,8 @@ void clk_unregister(struct clk *clk)
297 + clk_core_set_parent(child, NULL);
298 + }
299 +
300 ++ clk_core_evict_parent_cache(clk->core);
301 ++
302 + hlist_del_init(&clk->core->child_node);
303 +
304 + if (clk->core->prepare_count)
305 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
306 +index 1aa0b05c8cbdf..5c41dc9aaa46d 100644
307 +--- a/drivers/cpufreq/intel_pstate.c
308 ++++ b/drivers/cpufreq/intel_pstate.c
309 +@@ -1378,6 +1378,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
310 +
311 + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
312 + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
313 ++ cpu->pstate.turbo_pstate = phy_max;
314 + } else {
315 + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
316 + }
317 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
318 +index aa592277d5108..67037eb9a80ee 100644
319 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
320 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
321 +@@ -220,32 +220,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
322 + return 0;
323 + }
324 +
325 +-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
326 +- uint32_t handle, uint64_t *offset)
327 +-{
328 +- struct drm_gem_object *obj;
329 +- int ret;
330 +-
331 +- obj = drm_gem_object_lookup(file, handle);
332 +- if (!obj)
333 +- return -ENOENT;
334 +-
335 +- if (!obj->filp) {
336 +- ret = -EINVAL;
337 +- goto unref;
338 +- }
339 +-
340 +- ret = drm_gem_create_mmap_offset(obj);
341 +- if (ret)
342 +- goto unref;
343 +-
344 +- *offset = drm_vma_node_offset_addr(&obj->vma_node);
345 +-unref:
346 +- drm_gem_object_put_unlocked(obj);
347 +-
348 +- return ret;
349 +-}
350 +-
351 + static struct drm_ioctl_desc vgem_ioctls[] = {
352 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
353 + DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
354 +@@ -439,7 +413,6 @@ static struct drm_driver vgem_driver = {
355 + .fops = &vgem_driver_fops,
356 +
357 + .dumb_create = vgem_gem_dumb_create,
358 +- .dumb_map_offset = vgem_gem_dumb_map,
359 +
360 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
361 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
362 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
363 +index 8ac9e03c05b45..ca8f726dab2e7 100644
364 +--- a/drivers/input/mouse/psmouse-base.c
365 ++++ b/drivers/input/mouse/psmouse-base.c
366 +@@ -2012,7 +2012,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
367 + {
368 + int type = *((unsigned int *)kp->arg);
369 +
370 +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
371 ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
372 + }
373 +
374 + static int __init psmouse_init(void)
375 +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
376 +index 97499b2af7144..20524376b83be 100644
377 +--- a/drivers/media/pci/ttpci/budget-core.c
378 ++++ b/drivers/media/pci/ttpci/budget-core.c
379 +@@ -383,20 +383,25 @@ static int budget_register(struct budget *budget)
380 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
381 +
382 + if (ret < 0)
383 +- return ret;
384 ++ goto err_release_dmx;
385 +
386 + budget->mem_frontend.source = DMX_MEMORY_FE;
387 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
388 + if (ret < 0)
389 +- return ret;
390 ++ goto err_release_dmx;
391 +
392 + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
393 + if (ret < 0)
394 +- return ret;
395 ++ goto err_release_dmx;
396 +
397 + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
398 +
399 + return 0;
400 ++
401 ++err_release_dmx:
402 ++ dvb_dmxdev_release(&budget->dmxdev);
403 ++ dvb_dmx_release(&budget->demux);
404 ++ return ret;
405 + }
406 +
407 + static void budget_unregister(struct budget *budget)
408 +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
409 +index 2ee4cd9e6d80f..d984f45c03149 100644
410 +--- a/drivers/media/platform/davinci/vpss.c
411 ++++ b/drivers/media/platform/davinci/vpss.c
412 +@@ -514,19 +514,31 @@ static void vpss_exit(void)
413 +
414 + static int __init vpss_init(void)
415 + {
416 ++ int ret;
417 ++
418 + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
419 + return -EBUSY;
420 +
421 + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
422 + if (unlikely(!oper_cfg.vpss_regs_base2)) {
423 +- release_mem_region(VPSS_CLK_CTRL, 4);
424 +- return -ENOMEM;
425 ++ ret = -ENOMEM;
426 ++ goto err_ioremap;
427 + }
428 +
429 + writel(VPSS_CLK_CTRL_VENCCLKEN |
430 +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
431 ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
432 ++
433 ++ ret = platform_driver_register(&vpss_driver);
434 ++ if (ret)
435 ++ goto err_pd_register;
436 ++
437 ++ return 0;
438 +
439 +- return platform_driver_register(&vpss_driver);
440 ++err_pd_register:
441 ++ iounmap(oper_cfg.vpss_regs_base2);
442 ++err_ioremap:
443 ++ release_mem_region(VPSS_CLK_CTRL, 4);
444 ++ return ret;
445 + }
446 + subsys_initcall(vpss_init);
447 + module_exit(vpss_exit);
448 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
449 +index 1f867e275408e..861d2c0a521a4 100644
450 +--- a/drivers/net/bonding/bond_main.c
451 ++++ b/drivers/net/bonding/bond_main.c
452 +@@ -2010,7 +2010,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
453 + int ret;
454 +
455 + ret = __bond_release_one(bond_dev, slave_dev, false, true);
456 +- if (ret == 0 && !bond_has_slaves(bond)) {
457 ++ if (ret == 0 && !bond_has_slaves(bond) &&
458 ++ bond_dev->reg_state != NETREG_UNREGISTERING) {
459 + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
460 + netdev_info(bond_dev, "Destroying bond %s\n",
461 + bond_dev->name);
462 +@@ -2752,6 +2753,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
463 + if (bond_time_in_interval(bond, last_rx, 1)) {
464 + bond_propose_link_state(slave, BOND_LINK_UP);
465 + commit++;
466 ++ } else if (slave->link == BOND_LINK_BACK) {
467 ++ bond_propose_link_state(slave, BOND_LINK_FAIL);
468 ++ commit++;
469 + }
470 + continue;
471 + }
472 +@@ -2862,6 +2866,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
473 +
474 + continue;
475 +
476 ++ case BOND_LINK_FAIL:
477 ++ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
478 ++ BOND_SLAVE_NOTIFY_NOW);
479 ++ bond_set_slave_inactive_flags(slave,
480 ++ BOND_SLAVE_NOTIFY_NOW);
481 ++
482 ++ /* A slave has just been enslaved and has become
483 ++ * the current active slave.
484 ++ */
485 ++ if (rtnl_dereference(bond->curr_active_slave))
486 ++ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
487 ++ continue;
488 ++
489 + default:
490 + netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
491 + slave->link_new_state, slave->dev->name);
492 +@@ -2911,8 +2928,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
493 + return should_notify_rtnl;
494 + }
495 +
496 +- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
497 +-
498 + bond_for_each_slave_rcu(bond, slave, iter) {
499 + if (!found && !before && bond_slave_is_up(slave))
500 + before = slave;
501 +@@ -4156,13 +4171,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
502 + return ret;
503 + }
504 +
505 ++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
506 ++{
507 ++ if (speed == 0 || speed == SPEED_UNKNOWN)
508 ++ speed = slave->speed;
509 ++ else
510 ++ speed = min(speed, slave->speed);
511 ++
512 ++ return speed;
513 ++}
514 ++
515 + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
516 + struct ethtool_link_ksettings *cmd)
517 + {
518 + struct bonding *bond = netdev_priv(bond_dev);
519 +- unsigned long speed = 0;
520 + struct list_head *iter;
521 + struct slave *slave;
522 ++ u32 speed = 0;
523 +
524 + cmd->base.duplex = DUPLEX_UNKNOWN;
525 + cmd->base.port = PORT_OTHER;
526 +@@ -4174,8 +4199,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
527 + */
528 + bond_for_each_slave(bond, slave, iter) {
529 + if (bond_slave_can_tx(slave)) {
530 +- if (slave->speed != SPEED_UNKNOWN)
531 +- speed += slave->speed;
532 ++ if (slave->speed != SPEED_UNKNOWN) {
533 ++ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
534 ++ speed = bond_mode_bcast_speed(slave,
535 ++ speed);
536 ++ else
537 ++ speed += slave->speed;
538 ++ }
539 + if (cmd->base.duplex == DUPLEX_UNKNOWN &&
540 + slave->duplex != DUPLEX_UNKNOWN)
541 + cmd->base.duplex = slave->duplex;
542 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
543 +index 274d369151107..5c3fa0be8844e 100644
544 +--- a/drivers/net/dsa/b53/b53_common.c
545 ++++ b/drivers/net/dsa/b53/b53_common.c
546 +@@ -1160,6 +1160,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
547 + return ret;
548 +
549 + switch (ret) {
550 ++ case -ETIMEDOUT:
551 ++ return ret;
552 + case -ENOSPC:
553 + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
554 + addr, vid);
555 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
556 +index 8ba915cc4c2e4..22f964ef859e5 100644
557 +--- a/drivers/net/ethernet/freescale/fec_main.c
558 ++++ b/drivers/net/ethernet/freescale/fec_main.c
559 +@@ -3536,11 +3536,11 @@ failed_mii_init:
560 + failed_irq:
561 + failed_init:
562 + fec_ptp_stop(pdev);
563 +- if (fep->reg_phy)
564 +- regulator_disable(fep->reg_phy);
565 + failed_reset:
566 + pm_runtime_put_noidle(&pdev->dev);
567 + pm_runtime_disable(&pdev->dev);
568 ++ if (fep->reg_phy)
569 ++ regulator_disable(fep->reg_phy);
570 + failed_regulator:
571 + clk_disable_unprepare(fep->clk_ahb);
572 + failed_clk_ahb:
573 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
574 +index 5d5f422cbae55..f82da2b47d9a5 100644
575 +--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
576 ++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
577 +@@ -1175,7 +1175,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
578 + #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
579 + #define I40E_AQC_SET_VSI_DEFAULT 0x08
580 + #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
581 +-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
582 ++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
583 + __le16 seid;
584 + #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
585 + __le16 vlan_tag;
586 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
587 +index 111426ba5fbce..3fd2dfaf2bd53 100644
588 +--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
589 ++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
590 +@@ -1914,6 +1914,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
591 + return status;
592 + }
593 +
594 ++/**
595 ++ * i40e_is_aq_api_ver_ge
596 ++ * @aq: pointer to AdminQ info containing HW API version to compare
597 ++ * @maj: API major value
598 ++ * @min: API minor value
599 ++ *
600 ++ * Assert whether current HW API version is greater/equal than provided.
601 ++ **/
602 ++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
603 ++ u16 min)
604 ++{
605 ++ return (aq->api_maj_ver > maj ||
606 ++ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
607 ++}
608 ++
609 + /**
610 + * i40e_aq_add_vsi
611 + * @hw: pointer to the hw struct
612 +@@ -2039,18 +2054,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
613 +
614 + if (set) {
615 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
616 +- if (rx_only_promisc &&
617 +- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
618 +- (hw->aq.api_maj_ver > 1)))
619 +- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
620 ++ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
621 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
622 + }
623 +
624 + cmd->promiscuous_flags = cpu_to_le16(flags);
625 +
626 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
627 +- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
628 +- (hw->aq.api_maj_ver > 1))
629 +- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
630 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
631 ++ cmd->valid_flags |=
632 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
633 +
634 + cmd->seid = cpu_to_le16(seid);
635 + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
636 +@@ -2147,11 +2160,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
637 + i40e_fill_default_direct_cmd_desc(&desc,
638 + i40e_aqc_opc_set_vsi_promiscuous_modes);
639 +
640 +- if (enable)
641 ++ if (enable) {
642 + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
643 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
644 ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
645 ++ }
646 +
647 + cmd->promiscuous_flags = cpu_to_le16(flags);
648 + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
649 ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
650 ++ cmd->valid_flags |=
651 ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
652 + cmd->seid = cpu_to_le16(seid);
653 + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
654 +
655 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
656 +index aa2b446d6ad0f..f4475cbf8ce86 100644
657 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
658 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
659 +@@ -11822,6 +11822,9 @@ static void i40e_remove(struct pci_dev *pdev)
660 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
661 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
662 +
663 ++ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
664 ++ usleep_range(1000, 2000);
665 ++
666 + /* no more scheduling of any task */
667 + set_bit(__I40E_SUSPENDED, pf->state);
668 + set_bit(__I40E_DOWN, pf->state);
669 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
670 +index 10c3480c2da89..dbc6c9ed1c8f8 100644
671 +--- a/drivers/net/hyperv/netvsc_drv.c
672 ++++ b/drivers/net/hyperv/netvsc_drv.c
673 +@@ -500,7 +500,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
674 + int rc;
675 +
676 + skb->dev = vf_netdev;
677 +- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
678 ++ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
679 +
680 + rc = dev_queue_xmit(skb);
681 + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
682 +diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
683 +index a1c44d0c85578..30cbe22c57a8e 100644
684 +--- a/drivers/rtc/rtc-goldfish.c
685 ++++ b/drivers/rtc/rtc-goldfish.c
686 +@@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
687 + rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
688 + writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
689 + writel(rtc_alarm64, base + TIMER_ALARM_LOW);
690 ++ writel(1, base + TIMER_IRQ_ENABLED);
691 + } else {
692 + /*
693 + * if this function was called with enabled=0
694 +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
695 +index 28b50ab2fbb01..62f83cc151b22 100644
696 +--- a/drivers/scsi/libfc/fc_disc.c
697 ++++ b/drivers/scsi/libfc/fc_disc.c
698 +@@ -605,8 +605,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
699 +
700 + if (PTR_ERR(fp) == -FC_EX_CLOSED)
701 + goto out;
702 +- if (IS_ERR(fp))
703 +- goto redisc;
704 ++ if (IS_ERR(fp)) {
705 ++ mutex_lock(&disc->disc_mutex);
706 ++ fc_disc_restart(disc);
707 ++ mutex_unlock(&disc->disc_mutex);
708 ++ goto out;
709 ++ }
710 +
711 + cp = fc_frame_payload_get(fp, sizeof(*cp));
712 + if (!cp)
713 +@@ -633,7 +637,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
714 + new_rdata->disc_id = disc->disc_id;
715 + fc_rport_login(new_rdata);
716 + }
717 +- goto out;
718 ++ goto free_fp;
719 + }
720 + rdata->disc_id = disc->disc_id;
721 + mutex_unlock(&rdata->rp_mutex);
722 +@@ -650,6 +654,8 @@ redisc:
723 + fc_disc_restart(disc);
724 + mutex_unlock(&disc->disc_mutex);
725 + }
726 ++free_fp:
727 ++ fc_frame_free(fp);
728 + out:
729 + kref_put(&rdata->kref, fc_rport_destroy);
730 + if (!IS_ERR(fp))
731 +diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
732 +index 71f73d1d1ad1f..6c944fbefd40a 100644
733 +--- a/drivers/scsi/ufs/ufs_quirks.h
734 ++++ b/drivers/scsi/ufs/ufs_quirks.h
735 +@@ -21,6 +21,7 @@
736 + #define UFS_ANY_VENDOR 0xFFFF
737 + #define UFS_ANY_MODEL "ANY_MODEL"
738 +
739 ++#define UFS_VENDOR_MICRON 0x12C
740 + #define UFS_VENDOR_TOSHIBA 0x198
741 + #define UFS_VENDOR_SAMSUNG 0x1CE
742 + #define UFS_VENDOR_SKHYNIX 0x1AD
743 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
744 +index 1e2a97a10033b..11e917b44a0f1 100644
745 +--- a/drivers/scsi/ufs/ufshcd.c
746 ++++ b/drivers/scsi/ufs/ufshcd.c
747 +@@ -189,6 +189,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
748 +
749 + static struct ufs_dev_fix ufs_fixups[] = {
750 + /* UFS cards deviations table */
751 ++ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
752 ++ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
753 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
754 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
755 + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
756 +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
757 +index a75f2a2cf7805..4b6a1629969f3 100644
758 +--- a/drivers/spi/Kconfig
759 ++++ b/drivers/spi/Kconfig
760 +@@ -827,4 +827,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
761 +
762 + endif # SPI_SLAVE
763 +
764 ++config SPI_DYNAMIC
765 ++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
766 ++
767 + endif # SPI
768 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
769 +index 49eee894f51d4..ab6a4f85bcde7 100644
770 +--- a/drivers/spi/spi.c
771 ++++ b/drivers/spi/spi.c
772 +@@ -428,6 +428,12 @@ static LIST_HEAD(spi_controller_list);
773 + */
774 + static DEFINE_MUTEX(board_lock);
775 +
776 ++/*
777 ++ * Prevents addition of devices with same chip select and
778 ++ * addition of devices below an unregistering controller.
779 ++ */
780 ++static DEFINE_MUTEX(spi_add_lock);
781 ++
782 + /**
783 + * spi_alloc_device - Allocate a new SPI device
784 + * @ctlr: Controller to which device is connected
785 +@@ -506,7 +512,6 @@ static int spi_dev_check(struct device *dev, void *data)
786 + */
787 + int spi_add_device(struct spi_device *spi)
788 + {
789 +- static DEFINE_MUTEX(spi_add_lock);
790 + struct spi_controller *ctlr = spi->controller;
791 + struct device *dev = ctlr->dev.parent;
792 + int status;
793 +@@ -534,6 +539,13 @@ int spi_add_device(struct spi_device *spi)
794 + goto done;
795 + }
796 +
797 ++ /* Controller may unregister concurrently */
798 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
799 ++ !device_is_registered(&ctlr->dev)) {
800 ++ status = -ENODEV;
801 ++ goto done;
802 ++ }
803 ++
804 + if (ctlr->cs_gpios)
805 + spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
806 +
807 +@@ -2265,6 +2277,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
808 + struct spi_controller *found;
809 + int id = ctlr->bus_num;
810 +
811 ++ /* Prevent addition of new devices, unregister existing ones */
812 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
813 ++ mutex_lock(&spi_add_lock);
814 ++
815 + device_for_each_child(&ctlr->dev, NULL, __unregister);
816 +
817 + /* First make sure that this controller was ever added */
818 +@@ -2285,6 +2301,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
819 + if (found == ctlr)
820 + idr_remove(&spi_master_idr, id);
821 + mutex_unlock(&board_lock);
822 ++
823 ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
824 ++ mutex_unlock(&spi_add_lock);
825 + }
826 + EXPORT_SYMBOL_GPL(spi_unregister_controller);
827 +
828 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
829 +index 35a3750a6ddd3..f22425501bc16 100644
830 +--- a/drivers/vfio/vfio_iommu_type1.c
831 ++++ b/drivers/vfio/vfio_iommu_type1.c
832 +@@ -1086,13 +1086,16 @@ static int vfio_bus_type(struct device *dev, void *data)
833 + static int vfio_iommu_replay(struct vfio_iommu *iommu,
834 + struct vfio_domain *domain)
835 + {
836 +- struct vfio_domain *d;
837 ++ struct vfio_domain *d = NULL;
838 + struct rb_node *n;
839 + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
840 + int ret;
841 +
842 + /* Arbitrarily pick the first domain in the list for lookups */
843 +- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
844 ++ if (!list_empty(&iommu->domain_list))
845 ++ d = list_first_entry(&iommu->domain_list,
846 ++ struct vfio_domain, next);
847 ++
848 + n = rb_first(&iommu->dma_list);
849 +
850 + for (; n; n = rb_next(n)) {
851 +@@ -1110,6 +1113,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
852 + phys_addr_t p;
853 + dma_addr_t i;
854 +
855 ++ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
856 ++ ret = -EINVAL;
857 ++ goto unwind;
858 ++ }
859 ++
860 + phys = iommu_iova_to_phys(d->domain, iova);
861 +
862 + if (WARN_ON(!phys)) {
863 +@@ -1139,7 +1147,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
864 + if (npage <= 0) {
865 + WARN_ON(!npage);
866 + ret = (int)npage;
867 +- return ret;
868 ++ goto unwind;
869 + }
870 +
871 + phys = pfn << PAGE_SHIFT;
872 +@@ -1148,14 +1156,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
873 +
874 + ret = iommu_map(domain->domain, iova, phys,
875 + size, dma->prot | domain->prot);
876 +- if (ret)
877 +- return ret;
878 ++ if (ret) {
879 ++ if (!dma->iommu_mapped)
880 ++ vfio_unpin_pages_remote(dma, iova,
881 ++ phys >> PAGE_SHIFT,
882 ++ size >> PAGE_SHIFT,
883 ++ true);
884 ++ goto unwind;
885 ++ }
886 +
887 + iova += size;
888 + }
889 ++ }
890 ++
891 ++ /* All dmas are now mapped, defer to second tree walk for unwind */
892 ++ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
893 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
894 ++
895 + dma->iommu_mapped = true;
896 + }
897 ++
898 + return 0;
899 ++
900 ++unwind:
901 ++ for (; n; n = rb_prev(n)) {
902 ++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
903 ++ dma_addr_t iova;
904 ++
905 ++ if (dma->iommu_mapped) {
906 ++ iommu_unmap(domain->domain, dma->iova, dma->size);
907 ++ continue;
908 ++ }
909 ++
910 ++ iova = dma->iova;
911 ++ while (iova < dma->iova + dma->size) {
912 ++ phys_addr_t phys, p;
913 ++ size_t size;
914 ++ dma_addr_t i;
915 ++
916 ++ phys = iommu_iova_to_phys(domain->domain, iova);
917 ++ if (!phys) {
918 ++ iova += PAGE_SIZE;
919 ++ continue;
920 ++ }
921 ++
922 ++ size = PAGE_SIZE;
923 ++ p = phys + size;
924 ++ i = iova + size;
925 ++ while (i < dma->iova + dma->size &&
926 ++ p == iommu_iova_to_phys(domain->domain, i)) {
927 ++ size += PAGE_SIZE;
928 ++ p += PAGE_SIZE;
929 ++ i += PAGE_SIZE;
930 ++ }
931 ++
932 ++ iommu_unmap(domain->domain, iova, size);
933 ++ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
934 ++ size >> PAGE_SHIFT, true);
935 ++ }
936 ++ }
937 ++
938 ++ return ret;
939 + }
940 +
941 + /*
942 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
943 +index b82bb0b081615..51278f8bd3ab3 100644
944 +--- a/drivers/virtio/virtio_ring.c
945 ++++ b/drivers/virtio/virtio_ring.c
946 +@@ -829,6 +829,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
947 + {
948 + struct vring_virtqueue *vq = to_vvq(_vq);
949 +
950 ++ if (unlikely(vq->broken))
951 ++ return false;
952 ++
953 + virtio_mb(vq->weak_barriers);
954 + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
955 + }
956 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
957 +index 5f6b77ea34fb5..128375ff80b8c 100644
958 +--- a/drivers/xen/preempt.c
959 ++++ b/drivers/xen/preempt.c
960 +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
961 + asmlinkage __visible void xen_maybe_preempt_hcall(void)
962 + {
963 + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
964 +- && need_resched())) {
965 ++ && need_resched() && !preempt_count())) {
966 + /*
967 + * Clear flag as we may be rescheduled on a different
968 + * cpu.
969 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
970 +index 5412b12491cb8..de951987fd23d 100644
971 +--- a/fs/btrfs/ctree.h
972 ++++ b/fs/btrfs/ctree.h
973 +@@ -3262,6 +3262,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
974 + int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
975 + unsigned long new_flags);
976 + int btrfs_sync_fs(struct super_block *sb, int wait);
977 ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
978 ++ u64 subvol_objectid);
979 +
980 + static inline __printf(2, 3)
981 + void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
982 +diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
983 +index 3aeb5770f8965..b6ce765aa7f33 100644
984 +--- a/fs/btrfs/export.c
985 ++++ b/fs/btrfs/export.c
986 +@@ -56,9 +56,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
987 + return type;
988 + }
989 +
990 +-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
991 +- u64 root_objectid, u32 generation,
992 +- int check_generation)
993 ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
994 ++ u64 root_objectid, u32 generation,
995 ++ int check_generation)
996 + {
997 + struct btrfs_fs_info *fs_info = btrfs_sb(sb);
998 + struct btrfs_root *root;
999 +@@ -151,7 +151,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
1000 + return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
1001 + }
1002 +
1003 +-static struct dentry *btrfs_get_parent(struct dentry *child)
1004 ++struct dentry *btrfs_get_parent(struct dentry *child)
1005 + {
1006 + struct inode *dir = d_inode(child);
1007 + struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
1008 +diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
1009 +index 91b3908e7c549..15db024621414 100644
1010 +--- a/fs/btrfs/export.h
1011 ++++ b/fs/btrfs/export.h
1012 +@@ -17,4 +17,9 @@ struct btrfs_fid {
1013 + u64 parent_root_objectid;
1014 + } __attribute__ ((packed));
1015 +
1016 ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1017 ++ u64 root_objectid, u32 generation,
1018 ++ int check_generation);
1019 ++struct dentry *btrfs_get_parent(struct dentry *child);
1020 ++
1021 + #endif
1022 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1023 +index 57908ee964a20..17856e92b93d1 100644
1024 +--- a/fs/btrfs/inode.c
1025 ++++ b/fs/btrfs/inode.c
1026 +@@ -629,7 +629,21 @@ cont:
1027 + btrfs_free_reserved_data_space_noquota(inode,
1028 + start,
1029 + end - start + 1);
1030 +- goto free_pages_out;
1031 ++
1032 ++ /*
1033 ++ * Ensure we only free the compressed pages if we have
1034 ++ * them allocated, as we can still reach here with
1035 ++ * inode_need_compress() == false.
1036 ++ */
1037 ++ if (pages) {
1038 ++ for (i = 0; i < nr_pages; i++) {
1039 ++ WARN_ON(pages[i]->mapping);
1040 ++ put_page(pages[i]);
1041 ++ }
1042 ++ kfree(pages);
1043 ++ }
1044 ++
1045 ++ return;
1046 + }
1047 + }
1048 +
1049 +@@ -708,13 +722,6 @@ cleanup_and_bail_uncompressed:
1050 + *num_added += 1;
1051 +
1052 + return;
1053 +-
1054 +-free_pages_out:
1055 +- for (i = 0; i < nr_pages; i++) {
1056 +- WARN_ON(pages[i]->mapping);
1057 +- put_page(pages[i]);
1058 +- }
1059 +- kfree(pages);
1060 + }
1061 +
1062 + static void free_async_extent_pages(struct async_extent *async_extent)
1063 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1064 +index 17a8463ef35c1..eb64d4b159e07 100644
1065 +--- a/fs/btrfs/super.c
1066 ++++ b/fs/btrfs/super.c
1067 +@@ -939,8 +939,8 @@ out:
1068 + return error;
1069 + }
1070 +
1071 +-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1072 +- u64 subvol_objectid)
1073 ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1074 ++ u64 subvol_objectid)
1075 + {
1076 + struct btrfs_root *root = fs_info->tree_root;
1077 + struct btrfs_root *fs_root;
1078 +@@ -1221,6 +1221,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1079 + {
1080 + struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1081 + char *compress_type;
1082 ++ const char *subvol_name;
1083 +
1084 + if (btrfs_test_opt(info, DEGRADED))
1085 + seq_puts(seq, ",degraded");
1086 +@@ -1307,8 +1308,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1087 + #endif
1088 + seq_printf(seq, ",subvolid=%llu",
1089 + BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1090 +- seq_puts(seq, ",subvol=");
1091 +- seq_dentry(seq, dentry, " \t\n\\");
1092 ++ subvol_name = btrfs_get_subvol_name_from_objectid(info,
1093 ++ BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1094 ++ if (!IS_ERR(subvol_name)) {
1095 ++ seq_puts(seq, ",subvol=");
1096 ++ seq_escape(seq, subvol_name, " \t\n\\");
1097 ++ kfree(subvol_name);
1098 ++ }
1099 + return 0;
1100 + }
1101 +
1102 +@@ -1427,8 +1433,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1103 + goto out;
1104 + }
1105 + }
1106 +- subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
1107 +- subvol_objectid);
1108 ++ subvol_name = btrfs_get_subvol_name_from_objectid(
1109 ++ btrfs_sb(mnt->mnt_sb), subvol_objectid);
1110 + if (IS_ERR(subvol_name)) {
1111 + root = ERR_CAST(subvol_name);
1112 + subvol_name = NULL;
1113 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
1114 +index f05341bda1d14..383546ff62f04 100644
1115 +--- a/fs/btrfs/sysfs.c
1116 ++++ b/fs/btrfs/sysfs.c
1117 +@@ -25,6 +25,7 @@
1118 + #include <linux/bug.h>
1119 + #include <linux/genhd.h>
1120 + #include <linux/debugfs.h>
1121 ++#include <linux/sched/mm.h>
1122 +
1123 + #include "ctree.h"
1124 + #include "disk-io.h"
1125 +@@ -749,7 +750,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
1126 + {
1127 + int error = 0;
1128 + struct btrfs_device *dev;
1129 ++ unsigned int nofs_flag;
1130 +
1131 ++ nofs_flag = memalloc_nofs_save();
1132 + list_for_each_entry(dev, &fs_devices->devices, dev_list) {
1133 + struct hd_struct *disk;
1134 + struct kobject *disk_kobj;
1135 +@@ -768,6 +771,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
1136 + if (error)
1137 + break;
1138 + }
1139 ++ memalloc_nofs_restore(nofs_flag);
1140 +
1141 + return error;
1142 + }
1143 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1144 +index c291bf61afb9c..00f0902e27e88 100644
1145 +--- a/fs/eventpoll.c
1146 ++++ b/fs/eventpoll.c
1147 +@@ -1900,9 +1900,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1148 + * not already there, and calling reverse_path_check()
1149 + * during ep_insert().
1150 + */
1151 +- if (list_empty(&epi->ffd.file->f_tfile_llink))
1152 ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
1153 ++ get_file(epi->ffd.file);
1154 + list_add(&epi->ffd.file->f_tfile_llink,
1155 + &tfile_check_list);
1156 ++ }
1157 + }
1158 + }
1159 + mutex_unlock(&ep->mtx);
1160 +@@ -1946,6 +1948,7 @@ static void clear_tfile_check_list(void)
1161 + file = list_first_entry(&tfile_check_list, struct file,
1162 + f_tfile_llink);
1163 + list_del_init(&file->f_tfile_llink);
1164 ++ fput(file);
1165 + }
1166 + INIT_LIST_HEAD(&tfile_check_list);
1167 + }
1168 +@@ -2096,13 +2099,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1169 + mutex_lock(&epmutex);
1170 + if (is_file_epoll(tf.file)) {
1171 + error = -ELOOP;
1172 +- if (ep_loop_check(ep, tf.file) != 0) {
1173 +- clear_tfile_check_list();
1174 ++ if (ep_loop_check(ep, tf.file) != 0)
1175 + goto error_tgt_fput;
1176 +- }
1177 +- } else
1178 ++ } else {
1179 ++ get_file(tf.file);
1180 + list_add(&tf.file->f_tfile_llink,
1181 + &tfile_check_list);
1182 ++ }
1183 + mutex_lock_nested(&ep->mtx, 0);
1184 + if (is_file_epoll(tf.file)) {
1185 + tep = tf.file->private_data;
1186 +@@ -2126,8 +2129,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1187 + error = ep_insert(ep, &epds, tf.file, fd, full_check);
1188 + } else
1189 + error = -EEXIST;
1190 +- if (full_check)
1191 +- clear_tfile_check_list();
1192 + break;
1193 + case EPOLL_CTL_DEL:
1194 + if (epi)
1195 +@@ -2150,8 +2151,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1196 + mutex_unlock(&ep->mtx);
1197 +
1198 + error_tgt_fput:
1199 +- if (full_check)
1200 ++ if (full_check) {
1201 ++ clear_tfile_check_list();
1202 + mutex_unlock(&epmutex);
1203 ++ }
1204 +
1205 + fdput(tf);
1206 + error_fput:
1207 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1208 +index 161099f39ab9c..3f999053457b6 100644
1209 +--- a/fs/ext4/namei.c
1210 ++++ b/fs/ext4/namei.c
1211 +@@ -1308,8 +1308,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1212 + ext4_match(fname, de)) {
1213 + /* found a match - just to be sure, do
1214 + * a full check */
1215 +- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
1216 +- bh->b_size, offset))
1217 ++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
1218 ++ buf_size, offset))
1219 + return -1;
1220 + *res_dir = de;
1221 + return 1;
1222 +@@ -1741,7 +1741,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1223 + blocksize, hinfo, map);
1224 + map -= count;
1225 + dx_sort_map(map, count);
1226 +- /* Split the existing block in the middle, size-wise */
1227 ++ /* Ensure that neither split block is over half full */
1228 + size = 0;
1229 + move = 0;
1230 + for (i = count-1; i >= 0; i--) {
1231 +@@ -1751,8 +1751,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1232 + size += map[i].size;
1233 + move++;
1234 + }
1235 +- /* map index at which we will split */
1236 +- split = count - move;
1237 ++ /*
1238 ++ * map index at which we will split
1239 ++ *
1240 ++ * If the sum of active entries didn't exceed half the block size, just
1241 ++ * split it in half by count; each resulting block will have at least
1242 ++ * half the space free.
1243 ++ */
1244 ++ if (i > 0)
1245 ++ split = count - move;
1246 ++ else
1247 ++ split = count/2;
1248 ++
1249 + hash2 = map[split].hash;
1250 + continued = hash2 == map[split - 1].hash;
1251 + dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
1252 +@@ -2353,7 +2363,7 @@ int ext4_generic_delete_entry(handle_t *handle,
1253 + de = (struct ext4_dir_entry_2 *)entry_buf;
1254 + while (i < buf_size - csum_size) {
1255 + if (ext4_check_dir_entry(dir, NULL, de, bh,
1256 +- bh->b_data, bh->b_size, i))
1257 ++ entry_buf, buf_size, i))
1258 + return -EFSCORRUPTED;
1259 + if (de == de_del) {
1260 + if (pde)
1261 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
1262 +index 6e054b368b5fe..93a466cf58ba7 100644
1263 +--- a/fs/jbd2/journal.c
1264 ++++ b/fs/jbd2/journal.c
1265 +@@ -1356,8 +1356,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
1266 + int ret;
1267 +
1268 + /* Buffer got discarded which means block device got invalidated */
1269 +- if (!buffer_mapped(bh))
1270 ++ if (!buffer_mapped(bh)) {
1271 ++ unlock_buffer(bh);
1272 + return -EIO;
1273 ++ }
1274 +
1275 + trace_jbd2_write_superblock(journal, write_flags);
1276 + if (!(journal->j_flags & JBD2_BARRIER))
1277 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
1278 +index e5a6deb38e1e1..f4a5ec92f5dc7 100644
1279 +--- a/fs/jffs2/dir.c
1280 ++++ b/fs/jffs2/dir.c
1281 +@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
1282 + int ret;
1283 + uint32_t now = get_seconds();
1284 +
1285 ++ mutex_lock(&f->sem);
1286 + for (fd = f->dents ; fd; fd = fd->next) {
1287 +- if (fd->ino)
1288 ++ if (fd->ino) {
1289 ++ mutex_unlock(&f->sem);
1290 + return -ENOTEMPTY;
1291 ++ }
1292 + }
1293 ++ mutex_unlock(&f->sem);
1294 +
1295 + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
1296 + dentry->d_name.len, f, now);
1297 +diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
1298 +index f86f51f99aceb..1dcadd22b440d 100644
1299 +--- a/fs/romfs/storage.c
1300 ++++ b/fs/romfs/storage.c
1301 +@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
1302 + size_t limit;
1303 +
1304 + limit = romfs_maxsize(sb);
1305 +- if (pos >= limit)
1306 ++ if (pos >= limit || buflen > limit - pos)
1307 + return -EIO;
1308 +- if (buflen > limit - pos)
1309 +- buflen = limit - pos;
1310 +
1311 + #ifdef CONFIG_ROMFS_ON_MTD
1312 + if (sb->s_mtd)
1313 +diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
1314 +index d04637181ef21..980c9429abec5 100644
1315 +--- a/fs/xfs/xfs_sysfs.h
1316 ++++ b/fs/xfs/xfs_sysfs.h
1317 +@@ -44,9 +44,11 @@ xfs_sysfs_init(
1318 + struct xfs_kobj *parent_kobj,
1319 + const char *name)
1320 + {
1321 ++ struct kobject *parent;
1322 ++
1323 ++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
1324 + init_completion(&kobj->complete);
1325 +- return kobject_init_and_add(&kobj->kobject, ktype,
1326 +- &parent_kobj->kobject, "%s", name);
1327 ++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
1328 + }
1329 +
1330 + static inline void
1331 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
1332 +index c3d547211d160..9c42e50a5cb7e 100644
1333 +--- a/fs/xfs/xfs_trans_dquot.c
1334 ++++ b/fs/xfs/xfs_trans_dquot.c
1335 +@@ -669,7 +669,7 @@ xfs_trans_dqresv(
1336 + }
1337 + }
1338 + if (ninos > 0) {
1339 +- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
1340 ++ total_count = dqp->q_res_icount + ninos;
1341 + timer = be32_to_cpu(dqp->q_core.d_itimer);
1342 + warns = be16_to_cpu(dqp->q_core.d_iwarns);
1343 + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
1344 +diff --git a/kernel/relay.c b/kernel/relay.c
1345 +index b141ce697679f..53c2a1a4b057f 100644
1346 +--- a/kernel/relay.c
1347 ++++ b/kernel/relay.c
1348 +@@ -196,6 +196,7 @@ free_buf:
1349 + static void relay_destroy_channel(struct kref *kref)
1350 + {
1351 + struct rchan *chan = container_of(kref, struct rchan, kref);
1352 ++ free_percpu(chan->buf);
1353 + kfree(chan);
1354 + }
1355 +
1356 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1357 +index d6464045d3b97..194125cf2d2b9 100644
1358 +--- a/mm/hugetlb.c
1359 ++++ b/mm/hugetlb.c
1360 +@@ -4575,25 +4575,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
1361 + void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1362 + unsigned long *start, unsigned long *end)
1363 + {
1364 +- unsigned long check_addr = *start;
1365 ++ unsigned long a_start, a_end;
1366 +
1367 + if (!(vma->vm_flags & VM_MAYSHARE))
1368 + return;
1369 +
1370 +- for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
1371 +- unsigned long a_start = check_addr & PUD_MASK;
1372 +- unsigned long a_end = a_start + PUD_SIZE;
1373 ++ /* Extend the range to be PUD aligned for a worst case scenario */
1374 ++ a_start = ALIGN_DOWN(*start, PUD_SIZE);
1375 ++ a_end = ALIGN(*end, PUD_SIZE);
1376 +
1377 +- /*
1378 +- * If sharing is possible, adjust start/end if necessary.
1379 +- */
1380 +- if (range_in_vma(vma, a_start, a_end)) {
1381 +- if (a_start < *start)
1382 +- *start = a_start;
1383 +- if (a_end > *end)
1384 +- *end = a_end;
1385 +- }
1386 +- }
1387 ++ /*
1388 ++ * Intersect the range with the vma range, since pmd sharing won't be
1389 ++ * across vma after all
1390 ++ */
1391 ++ *start = max(vma->vm_start, a_start);
1392 ++ *end = min(vma->vm_end, a_end);
1393 + }
1394 +
1395 + /*
1396 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
1397 +index 04b4c38d0c184..9dfe364d4c0d1 100644
1398 +--- a/mm/khugepaged.c
1399 ++++ b/mm/khugepaged.c
1400 +@@ -394,7 +394,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
1401 +
1402 + static inline int khugepaged_test_exit(struct mm_struct *mm)
1403 + {
1404 +- return atomic_read(&mm->mm_users) == 0;
1405 ++ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
1406 + }
1407 +
1408 + int __khugepaged_enter(struct mm_struct *mm)
1409 +@@ -407,7 +407,7 @@ int __khugepaged_enter(struct mm_struct *mm)
1410 + return -ENOMEM;
1411 +
1412 + /* __khugepaged_exit() must not run from under us */
1413 +- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
1414 ++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
1415 + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1416 + free_mm_slot(mm_slot);
1417 + return 0;
1418 +@@ -1006,9 +1006,6 @@ static void collapse_huge_page(struct mm_struct *mm,
1419 + * handled by the anon_vma lock + PG_lock.
1420 + */
1421 + down_write(&mm->mmap_sem);
1422 +- result = SCAN_ANY_PROCESS;
1423 +- if (!mmget_still_valid(mm))
1424 +- goto out;
1425 + result = hugepage_vma_revalidate(mm, address, &vma);
1426 + if (result)
1427 + goto out;
1428 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1429 +index e992afe3a58e9..a3958b4fec6cb 100644
1430 +--- a/mm/page_alloc.c
1431 ++++ b/mm/page_alloc.c
1432 +@@ -1114,6 +1114,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1433 + spin_lock(&zone->lock);
1434 + isolated_pageblocks = has_isolate_pageblock(zone);
1435 +
1436 ++ /*
1437 ++ * Ensure proper count is passed which otherwise would stuck in the
1438 ++ * below while (list_empty(list)) loop.
1439 ++ */
1440 ++ count = min(pcp->count, count);
1441 + while (count) {
1442 + struct page *page;
1443 + struct list_head *list;
1444 +@@ -7018,7 +7023,7 @@ int __meminit init_per_zone_wmark_min(void)
1445 +
1446 + return 0;
1447 + }
1448 +-core_initcall(init_per_zone_wmark_min)
1449 ++postcore_initcall(init_per_zone_wmark_min)
1450 +
1451 + /*
1452 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1453 +diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
1454 +index 3633eb30dd135..4f949ad50d6a7 100644
1455 +--- a/sound/soc/codecs/msm8916-wcd-analog.c
1456 ++++ b/sound/soc/codecs/msm8916-wcd-analog.c
1457 +@@ -16,8 +16,8 @@
1458 +
1459 + #define CDC_D_REVISION1 (0xf000)
1460 + #define CDC_D_PERPH_SUBTYPE (0xf005)
1461 +-#define CDC_D_INT_EN_SET (0x015)
1462 +-#define CDC_D_INT_EN_CLR (0x016)
1463 ++#define CDC_D_INT_EN_SET (0xf015)
1464 ++#define CDC_D_INT_EN_CLR (0xf016)
1465 + #define MBHC_SWITCH_INT BIT(7)
1466 + #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
1467 + #define MBHC_BUTTON_PRESS_DET BIT(5)
1468 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1469 +index 4558c8b930363..3a645fc425cd4 100644
1470 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1471 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1472 +@@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
1473 +
1474 + ret_val = power_up_sst(stream);
1475 + if (ret_val < 0)
1476 +- return ret_val;
1477 ++ goto out_power_up;
1478 +
1479 + /* Make sure, that the period size is always even */
1480 + snd_pcm_hw_constraint_step(substream->runtime, 0,
1481 +@@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
1482 + return snd_pcm_hw_constraint_integer(runtime,
1483 + SNDRV_PCM_HW_PARAM_PERIODS);
1484 + out_ops:
1485 +- kfree(stream);
1486 + mutex_unlock(&sst_lock);
1487 ++out_power_up:
1488 ++ kfree(stream);
1489 + return ret_val;
1490 + }
1491 +
1492 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
1493 +index 8f7f9d05f38c0..bfa6d9d215569 100644
1494 +--- a/tools/perf/util/probe-finder.c
1495 ++++ b/tools/perf/util/probe-finder.c
1496 +@@ -1354,7 +1354,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1497 + tf.ntevs = 0;
1498 +
1499 + ret = debuginfo__find_probes(dbg, &tf.pf);
1500 +- if (ret < 0) {
1501 ++ if (ret < 0 || tf.ntevs == 0) {
1502 + for (i = 0; i < tf.ntevs; i++)
1503 + clear_probe_trace_event(&tf.tevs[i]);
1504 + zfree(tevs);
1505 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
1506 +index 3814cdad643a5..7fe673248e984 100644
1507 +--- a/virt/kvm/arm/mmu.c
1508 ++++ b/virt/kvm/arm/mmu.c
1509 +@@ -307,12 +307,6 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
1510 + next = stage2_pgd_addr_end(addr, end);
1511 + if (!stage2_pgd_none(*pgd))
1512 + unmap_stage2_puds(kvm, pgd, addr, next);
1513 +- /*
1514 +- * If the range is too large, release the kvm->mmu_lock
1515 +- * to prevent starvation and lockup detector warnings.
1516 +- */
1517 +- if (next != end)
1518 +- cond_resched_lock(&kvm->mmu_lock);
1519 + } while (pgd++, addr = next, addr != end);
1520 + }
1521 +