Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 26 Aug 2020 11:12:54
Message-Id: 1598440356.eb8231c271266cfd53d5404c03a12517c643c7c7.mpagano@gentoo
1 commit: eb8231c271266cfd53d5404c03a12517c643c7c7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 26 11:12:36 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 26 11:12:36 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eb8231c2
7
8 Linux patch 4.4.234
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1233_linux-4.4.234.patch | 934 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 938 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 9a38dca..16dd710 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -975,6 +975,10 @@ Patch: 1232_linux-4.4.233.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.233
23
24 +Patch: 1233_linux-4.4.234.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.234
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1233_linux-4.4.234.patch b/1233_linux-4.4.234.patch
33 new file mode 100644
34 index 0000000..2762e22
35 --- /dev/null
36 +++ b/1233_linux-4.4.234.patch
37 @@ -0,0 +1,934 @@
38 +diff --git a/Makefile b/Makefile
39 +index 8f363a3bcaf81..573b646a19936 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 233
46 ++SUBLEVEL = 234
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
51 +index ff4049155c840..355aec0867f4d 100644
52 +--- a/arch/alpha/include/asm/io.h
53 ++++ b/arch/alpha/include/asm/io.h
54 +@@ -491,10 +491,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
55 + }
56 + #endif
57 +
58 +-#define ioread16be(p) be16_to_cpu(ioread16(p))
59 +-#define ioread32be(p) be32_to_cpu(ioread32(p))
60 +-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
61 +-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
62 ++#define ioread16be(p) swab16(ioread16(p))
63 ++#define ioread32be(p) swab32(ioread32(p))
64 ++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
65 ++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
66 +
67 + #define inb_p inb
68 + #define inw_p inw
69 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
70 +index e0267532bd4e0..edd392fdc14bb 100644
71 +--- a/arch/arm/kvm/mmu.c
72 ++++ b/arch/arm/kvm/mmu.c
73 +@@ -300,14 +300,6 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
74 + next = kvm_pgd_addr_end(addr, end);
75 + if (!pgd_none(*pgd))
76 + unmap_puds(kvm, pgd, addr, next);
77 +- /*
78 +- * If we are dealing with a large range in
79 +- * stage2 table, release the kvm->mmu_lock
80 +- * to prevent starvation and lockup detector
81 +- * warnings.
82 +- */
83 +- if (kvm && (next != end))
84 +- cond_resched_lock(&kvm->mmu_lock);
85 + } while (pgd++, addr = next, addr != end);
86 + }
87 +
88 +diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
89 +index 3177ce8331d69..baee0c77b9818 100644
90 +--- a/arch/m68k/include/asm/m53xxacr.h
91 ++++ b/arch/m68k/include/asm/m53xxacr.h
92 +@@ -88,9 +88,9 @@
93 + * coherency though in all cases. And for copyback caches we will need
94 + * to push cached data as well.
95 + */
96 +-#define CACHE_INIT CACR_CINVA
97 +-#define CACHE_INVALIDATE CACR_CINVA
98 +-#define CACHE_INVALIDATED CACR_CINVA
99 ++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
100 ++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
101 ++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
102 +
103 + #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
104 + (0x000f0000) + \
105 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
106 +index d1f860ca03ade..101c202c813c8 100644
107 +--- a/arch/powerpc/mm/fault.c
108 ++++ b/arch/powerpc/mm/fault.c
109 +@@ -192,6 +192,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
110 + return MM_FAULT_CONTINUE;
111 + }
112 +
113 ++// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
114 ++#define SIGFRAME_MAX_SIZE (4096 + 128)
115 ++
116 + /*
117 + * For 600- and 800-family processors, the error_code parameter is DSISR
118 + * for a data fault, SRR1 for an instruction fault. For 400-family processors
119 +@@ -341,7 +344,7 @@ retry:
120 + /*
121 + * N.B. The POWER/Open ABI allows programs to access up to
122 + * 288 bytes below the stack pointer.
123 +- * The kernel signal delivery code writes up to about 1.5kB
124 ++ * The kernel signal delivery code writes up to about 4kB
125 + * below the stack pointer (r1) before decrementing it.
126 + * The exec code can write slightly over 640kB to the stack
127 + * before setting the user r1. Thus we allow the stack to
128 +@@ -365,7 +368,7 @@ retry:
129 + * between the last mapped region and the stack will
130 + * expand the stack rather than segfaulting.
131 + */
132 +- if (address + 2048 < uregs->gpr[1] && !store_update_sp)
133 ++ if (address + SIGFRAME_MAX_SIZE < uregs->gpr[1] && !store_update_sp)
134 + goto bad_area;
135 + }
136 + if (expand_stack(vma, address))
137 +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
138 +index 31ca56e593f58..b9dc2ef64ed88 100644
139 +--- a/drivers/gpu/drm/imx/imx-ldb.c
140 ++++ b/drivers/gpu/drm/imx/imx-ldb.c
141 +@@ -305,6 +305,7 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
142 + {
143 + struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
144 + struct imx_ldb *ldb = imx_ldb_ch->ldb;
145 ++ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
146 + int mux, ret;
147 +
148 + /*
149 +@@ -321,14 +322,14 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
150 +
151 + drm_panel_disable(imx_ldb_ch->panel);
152 +
153 +- if (imx_ldb_ch == &ldb->channel[0])
154 ++ if (imx_ldb_ch == &ldb->channel[0] || dual)
155 + ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
156 +- else if (imx_ldb_ch == &ldb->channel[1])
157 ++ if (imx_ldb_ch == &ldb->channel[1] || dual)
158 + ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
159 +
160 + regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
161 +
162 +- if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
163 ++ if (dual) {
164 + clk_disable_unprepare(ldb->clk[0]);
165 + clk_disable_unprepare(ldb->clk[1]);
166 + }
167 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
168 +index ad18dab0ac476..5bd9633541b07 100644
169 +--- a/drivers/input/mouse/psmouse-base.c
170 ++++ b/drivers/input/mouse/psmouse-base.c
171 +@@ -1911,7 +1911,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
172 + {
173 + int type = *((unsigned int *)kp->arg);
174 +
175 +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
176 ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
177 + }
178 +
179 + static int __init psmouse_init(void)
180 +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
181 +index e9674b40007c1..6107c469efa07 100644
182 +--- a/drivers/media/pci/ttpci/budget-core.c
183 ++++ b/drivers/media/pci/ttpci/budget-core.c
184 +@@ -386,20 +386,25 @@ static int budget_register(struct budget *budget)
185 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
186 +
187 + if (ret < 0)
188 +- return ret;
189 ++ goto err_release_dmx;
190 +
191 + budget->mem_frontend.source = DMX_MEMORY_FE;
192 + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
193 + if (ret < 0)
194 +- return ret;
195 ++ goto err_release_dmx;
196 +
197 + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
198 + if (ret < 0)
199 +- return ret;
200 ++ goto err_release_dmx;
201 +
202 + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
203 +
204 + return 0;
205 ++
206 ++err_release_dmx:
207 ++ dvb_dmxdev_release(&budget->dmxdev);
208 ++ dvb_dmx_release(&budget->demux);
209 ++ return ret;
210 + }
211 +
212 + static void budget_unregister(struct budget *budget)
213 +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
214 +index c2c68988e38ac..9884b34d6f406 100644
215 +--- a/drivers/media/platform/davinci/vpss.c
216 ++++ b/drivers/media/platform/davinci/vpss.c
217 +@@ -519,19 +519,31 @@ static void vpss_exit(void)
218 +
219 + static int __init vpss_init(void)
220 + {
221 ++ int ret;
222 ++
223 + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
224 + return -EBUSY;
225 +
226 + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
227 + if (unlikely(!oper_cfg.vpss_regs_base2)) {
228 +- release_mem_region(VPSS_CLK_CTRL, 4);
229 +- return -ENOMEM;
230 ++ ret = -ENOMEM;
231 ++ goto err_ioremap;
232 + }
233 +
234 + writel(VPSS_CLK_CTRL_VENCCLKEN |
235 +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
236 ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
237 ++
238 ++ ret = platform_driver_register(&vpss_driver);
239 ++ if (ret)
240 ++ goto err_pd_register;
241 ++
242 ++ return 0;
243 +
244 +- return platform_driver_register(&vpss_driver);
245 ++err_pd_register:
246 ++ iounmap(oper_cfg.vpss_regs_base2);
247 ++err_ioremap:
248 ++ release_mem_region(VPSS_CLK_CTRL, 4);
249 ++ return ret;
250 + }
251 + subsys_initcall(vpss_init);
252 + module_exit(vpss_exit);
253 +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
254 +index 880a9068ca126..ef06af4e3611d 100644
255 +--- a/drivers/scsi/libfc/fc_disc.c
256 ++++ b/drivers/scsi/libfc/fc_disc.c
257 +@@ -595,8 +595,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
258 + mutex_lock(&disc->disc_mutex);
259 + if (PTR_ERR(fp) == -FC_EX_CLOSED)
260 + goto out;
261 +- if (IS_ERR(fp))
262 +- goto redisc;
263 ++ if (IS_ERR(fp)) {
264 ++ mutex_lock(&disc->disc_mutex);
265 ++ fc_disc_restart(disc);
266 ++ mutex_unlock(&disc->disc_mutex);
267 ++ goto out;
268 ++ }
269 +
270 + cp = fc_frame_payload_get(fp, sizeof(*cp));
271 + if (!cp)
272 +@@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
273 + new_rdata->disc_id = disc->disc_id;
274 + lport->tt.rport_login(new_rdata);
275 + }
276 +- goto out;
277 ++ goto free_fp;
278 + }
279 + rdata->disc_id = disc->disc_id;
280 + lport->tt.rport_login(rdata);
281 +@@ -635,6 +639,8 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
282 + redisc:
283 + fc_disc_restart(disc);
284 + }
285 ++free_fp:
286 ++ fc_frame_free(fp);
287 + out:
288 + mutex_unlock(&disc->disc_mutex);
289 + kref_put(&rdata->kref, lport->tt.rport_destroy);
290 +diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
291 +index 9200a8668b498..a57c3a5f4bf8b 100644
292 +--- a/drivers/video/fbdev/omap2/dss/dss.c
293 ++++ b/drivers/video/fbdev/omap2/dss/dss.c
294 +@@ -843,7 +843,7 @@ static const struct dss_features omap34xx_dss_feats = {
295 + };
296 +
297 + static const struct dss_features omap3630_dss_feats = {
298 +- .fck_div_max = 32,
299 ++ .fck_div_max = 31,
300 + .dss_fck_multiplier = 1,
301 + .parent_clk_name = "dpll4_ck",
302 + .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
303 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
304 +index a01a41a412693..6b3565feddb21 100644
305 +--- a/drivers/virtio/virtio_ring.c
306 ++++ b/drivers/virtio/virtio_ring.c
307 +@@ -603,6 +603,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
308 + {
309 + struct vring_virtqueue *vq = to_vvq(_vq);
310 +
311 ++ if (unlikely(vq->broken))
312 ++ return false;
313 ++
314 + virtio_mb(vq->weak_barriers);
315 + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
316 + }
317 +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
318 +index 2048aad91add8..2b12ef019ae02 100644
319 +--- a/drivers/watchdog/f71808e_wdt.c
320 ++++ b/drivers/watchdog/f71808e_wdt.c
321 +@@ -642,9 +642,9 @@ static int __init watchdog_init(int sioaddr)
322 + * into the module have been registered yet.
323 + */
324 + watchdog.sioaddr = sioaddr;
325 +- watchdog.ident.options = WDIOC_SETTIMEOUT
326 +- | WDIOF_MAGICCLOSE
327 +- | WDIOF_KEEPALIVEPING;
328 ++ watchdog.ident.options = WDIOF_MAGICCLOSE
329 ++ | WDIOF_KEEPALIVEPING
330 ++ | WDIOF_CARDRESET;
331 +
332 + snprintf(watchdog.ident.identity,
333 + sizeof(watchdog.ident.identity), "%s watchdog",
334 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
335 +index 5f6b77ea34fb5..128375ff80b8c 100644
336 +--- a/drivers/xen/preempt.c
337 ++++ b/drivers/xen/preempt.c
338 +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
339 + asmlinkage __visible void xen_maybe_preempt_hcall(void)
340 + {
341 + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
342 +- && need_resched())) {
343 ++ && need_resched() && !preempt_count())) {
344 + /*
345 + * Clear flag as we may be rescheduled on a different
346 + * cpu.
347 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
348 +index 0b06d4942da77..8fb9a1e0048be 100644
349 +--- a/fs/btrfs/ctree.h
350 ++++ b/fs/btrfs/ctree.h
351 +@@ -4096,6 +4096,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
352 + /* super.c */
353 + int btrfs_parse_options(struct btrfs_root *root, char *options);
354 + int btrfs_sync_fs(struct super_block *sb, int wait);
355 ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
356 ++ u64 subvol_objectid);
357 +
358 + #ifdef CONFIG_PRINTK
359 + __printf(2, 3)
360 +diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
361 +index 2513a7f533342..92f80ed642194 100644
362 +--- a/fs/btrfs/export.c
363 ++++ b/fs/btrfs/export.c
364 +@@ -55,9 +55,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
365 + return type;
366 + }
367 +
368 +-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
369 +- u64 root_objectid, u32 generation,
370 +- int check_generation)
371 ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
372 ++ u64 root_objectid, u32 generation,
373 ++ int check_generation)
374 + {
375 + struct btrfs_fs_info *fs_info = btrfs_sb(sb);
376 + struct btrfs_root *root;
377 +@@ -150,7 +150,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
378 + return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
379 + }
380 +
381 +-static struct dentry *btrfs_get_parent(struct dentry *child)
382 ++struct dentry *btrfs_get_parent(struct dentry *child)
383 + {
384 + struct inode *dir = d_inode(child);
385 + struct btrfs_root *root = BTRFS_I(dir)->root;
386 +diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
387 +index 074348a95841f..7a305e5549991 100644
388 +--- a/fs/btrfs/export.h
389 ++++ b/fs/btrfs/export.h
390 +@@ -16,4 +16,9 @@ struct btrfs_fid {
391 + u64 parent_root_objectid;
392 + } __attribute__ ((packed));
393 +
394 ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
395 ++ u64 root_objectid, u32 generation,
396 ++ int check_generation);
397 ++struct dentry *btrfs_get_parent(struct dentry *child);
398 ++
399 + #endif
400 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
401 +index 404051bf5cba9..77e6ce0e1e351 100644
402 +--- a/fs/btrfs/super.c
403 ++++ b/fs/btrfs/super.c
404 +@@ -843,8 +843,8 @@ out:
405 + return error;
406 + }
407 +
408 +-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
409 +- u64 subvol_objectid)
410 ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
411 ++ u64 subvol_objectid)
412 + {
413 + struct btrfs_root *root = fs_info->tree_root;
414 + struct btrfs_root *fs_root;
415 +@@ -1120,6 +1120,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
416 + struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
417 + struct btrfs_root *root = info->tree_root;
418 + char *compress_type;
419 ++ const char *subvol_name;
420 +
421 + if (btrfs_test_opt(root, DEGRADED))
422 + seq_puts(seq, ",degraded");
423 +@@ -1204,8 +1205,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
424 + #endif
425 + seq_printf(seq, ",subvolid=%llu",
426 + BTRFS_I(d_inode(dentry))->root->root_key.objectid);
427 +- seq_puts(seq, ",subvol=");
428 +- seq_dentry(seq, dentry, " \t\n\\");
429 ++ subvol_name = btrfs_get_subvol_name_from_objectid(info,
430 ++ BTRFS_I(d_inode(dentry))->root->root_key.objectid);
431 ++ if (!IS_ERR(subvol_name)) {
432 ++ seq_puts(seq, ",subvol=");
433 ++ seq_escape(seq, subvol_name, " \t\n\\");
434 ++ kfree(subvol_name);
435 ++ }
436 + return 0;
437 + }
438 +
439 +@@ -1323,8 +1329,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
440 + goto out;
441 + }
442 + }
443 +- subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
444 +- subvol_objectid);
445 ++ subvol_name = btrfs_get_subvol_name_from_objectid(
446 ++ btrfs_sb(mnt->mnt_sb), subvol_objectid);
447 + if (IS_ERR(subvol_name)) {
448 + root = ERR_CAST(subvol_name);
449 + subvol_name = NULL;
450 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
451 +index 240d9ceb8d0c6..b8959d0d4c723 100644
452 +--- a/fs/eventpoll.c
453 ++++ b/fs/eventpoll.c
454 +@@ -1719,9 +1719,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
455 + * not already there, and calling reverse_path_check()
456 + * during ep_insert().
457 + */
458 +- if (list_empty(&epi->ffd.file->f_tfile_llink))
459 ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
460 ++ get_file(epi->ffd.file);
461 + list_add(&epi->ffd.file->f_tfile_llink,
462 + &tfile_check_list);
463 ++ }
464 + }
465 + }
466 + mutex_unlock(&ep->mtx);
467 +@@ -1765,6 +1767,7 @@ static void clear_tfile_check_list(void)
468 + file = list_first_entry(&tfile_check_list, struct file,
469 + f_tfile_llink);
470 + list_del_init(&file->f_tfile_llink);
471 ++ fput(file);
472 + }
473 + INIT_LIST_HEAD(&tfile_check_list);
474 + }
475 +@@ -1902,13 +1905,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
476 + mutex_lock(&epmutex);
477 + if (is_file_epoll(tf.file)) {
478 + error = -ELOOP;
479 +- if (ep_loop_check(ep, tf.file) != 0) {
480 +- clear_tfile_check_list();
481 ++ if (ep_loop_check(ep, tf.file) != 0)
482 + goto error_tgt_fput;
483 +- }
484 +- } else
485 ++ } else {
486 ++ get_file(tf.file);
487 + list_add(&tf.file->f_tfile_llink,
488 + &tfile_check_list);
489 ++ }
490 + mutex_lock_nested(&ep->mtx, 0);
491 + if (is_file_epoll(tf.file)) {
492 + tep = tf.file->private_data;
493 +@@ -1932,8 +1935,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
494 + error = ep_insert(ep, &epds, tf.file, fd, full_check);
495 + } else
496 + error = -EEXIST;
497 +- if (full_check)
498 +- clear_tfile_check_list();
499 + break;
500 + case EPOLL_CTL_DEL:
501 + if (epi)
502 +@@ -1954,8 +1955,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
503 + mutex_unlock(&ep->mtx);
504 +
505 + error_tgt_fput:
506 +- if (full_check)
507 ++ if (full_check) {
508 ++ clear_tfile_check_list();
509 + mutex_unlock(&epmutex);
510 ++ }
511 +
512 + fdput(tf);
513 + error_fput:
514 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
515 +index 566a8b08ccdd6..061b026e464c5 100644
516 +--- a/fs/ext4/namei.c
517 ++++ b/fs/ext4/namei.c
518 +@@ -1226,19 +1226,18 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
519 + }
520 +
521 + /*
522 +- * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
523 ++ * Test whether a directory entry matches the filename being searched for.
524 + *
525 +- * `len <= EXT4_NAME_LEN' is guaranteed by caller.
526 +- * `de != NULL' is guaranteed by caller.
527 ++ * Return: %true if the directory entry matches, otherwise %false.
528 + */
529 +-static inline int ext4_match(struct ext4_filename *fname,
530 +- struct ext4_dir_entry_2 *de)
531 ++static inline bool ext4_match(const struct ext4_filename *fname,
532 ++ const struct ext4_dir_entry_2 *de)
533 + {
534 + const void *name = fname_name(fname);
535 + u32 len = fname_len(fname);
536 +
537 + if (!de->inode)
538 +- return 0;
539 ++ return false;
540 +
541 + #ifdef CONFIG_EXT4_FS_ENCRYPTION
542 + if (unlikely(!name)) {
543 +@@ -1270,48 +1269,31 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
544 + struct ext4_dir_entry_2 * de;
545 + char * dlimit;
546 + int de_len;
547 +- int res;
548 +
549 + de = (struct ext4_dir_entry_2 *)search_buf;
550 + dlimit = search_buf + buf_size;
551 + while ((char *) de < dlimit) {
552 + /* this code is executed quadratically often */
553 + /* do minimal checking `by hand' */
554 +- if ((char *) de + de->name_len <= dlimit) {
555 +- res = ext4_match(fname, de);
556 +- if (res < 0) {
557 +- res = -1;
558 +- goto return_result;
559 +- }
560 +- if (res > 0) {
561 +- /* found a match - just to be sure, do
562 +- * a full check */
563 +- if (ext4_check_dir_entry(dir, NULL, de, bh,
564 +- bh->b_data,
565 +- bh->b_size, offset)) {
566 +- res = -1;
567 +- goto return_result;
568 +- }
569 +- *res_dir = de;
570 +- res = 1;
571 +- goto return_result;
572 +- }
573 +-
574 ++ if ((char *) de + de->name_len <= dlimit &&
575 ++ ext4_match(fname, de)) {
576 ++ /* found a match - just to be sure, do
577 ++ * a full check */
578 ++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
579 ++ buf_size, offset))
580 ++ return -1;
581 ++ *res_dir = de;
582 ++ return 1;
583 + }
584 + /* prevent looping on a bad block */
585 + de_len = ext4_rec_len_from_disk(de->rec_len,
586 + dir->i_sb->s_blocksize);
587 +- if (de_len <= 0) {
588 +- res = -1;
589 +- goto return_result;
590 +- }
591 ++ if (de_len <= 0)
592 ++ return -1;
593 + offset += de_len;
594 + de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
595 + }
596 +-
597 +- res = 0;
598 +-return_result:
599 +- return res;
600 ++ return 0;
601 + }
602 +
603 + static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
604 +@@ -1748,7 +1730,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
605 + blocksize, hinfo, map);
606 + map -= count;
607 + dx_sort_map(map, count);
608 +- /* Split the existing block in the middle, size-wise */
609 ++ /* Ensure that neither split block is over half full */
610 + size = 0;
611 + move = 0;
612 + for (i = count-1; i >= 0; i--) {
613 +@@ -1758,8 +1740,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
614 + size += map[i].size;
615 + move++;
616 + }
617 +- /* map index at which we will split */
618 +- split = count - move;
619 ++ /*
620 ++ * map index at which we will split
621 ++ *
622 ++ * If the sum of active entries didn't exceed half the block size, just
623 ++ * split it in half by count; each resulting block will have at least
624 ++ * half the space free.
625 ++ */
626 ++ if (i > 0)
627 ++ split = count - move;
628 ++ else
629 ++ split = count/2;
630 ++
631 + hash2 = map[split].hash;
632 + continued = hash2 == map[split - 1].hash;
633 + dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
634 +@@ -1824,24 +1816,15 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
635 + int nlen, rlen;
636 + unsigned int offset = 0;
637 + char *top;
638 +- int res;
639 +
640 + de = (struct ext4_dir_entry_2 *)buf;
641 + top = buf + buf_size - reclen;
642 + while ((char *) de <= top) {
643 + if (ext4_check_dir_entry(dir, NULL, de, bh,
644 +- buf, buf_size, offset)) {
645 +- res = -EFSCORRUPTED;
646 +- goto return_result;
647 +- }
648 +- /* Provide crypto context and crypto buffer to ext4 match */
649 +- res = ext4_match(fname, de);
650 +- if (res < 0)
651 +- goto return_result;
652 +- if (res > 0) {
653 +- res = -EEXIST;
654 +- goto return_result;
655 +- }
656 ++ buf, buf_size, offset))
657 ++ return -EFSCORRUPTED;
658 ++ if (ext4_match(fname, de))
659 ++ return -EEXIST;
660 + nlen = EXT4_DIR_REC_LEN(de->name_len);
661 + rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
662 + if ((de->inode ? rlen - nlen : rlen) >= reclen)
663 +@@ -1849,15 +1832,11 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
664 + de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
665 + offset += rlen;
666 + }
667 +-
668 + if ((char *) de > top)
669 +- res = -ENOSPC;
670 +- else {
671 +- *dest_de = de;
672 +- res = 0;
673 +- }
674 +-return_result:
675 +- return res;
676 ++ return -ENOSPC;
677 ++
678 ++ *dest_de = de;
679 ++ return 0;
680 + }
681 +
682 + int ext4_insert_dentry(struct inode *dir,
683 +@@ -2343,7 +2322,7 @@ int ext4_generic_delete_entry(handle_t *handle,
684 + de = (struct ext4_dir_entry_2 *)entry_buf;
685 + while (i < buf_size - csum_size) {
686 + if (ext4_check_dir_entry(dir, NULL, de, bh,
687 +- bh->b_data, bh->b_size, i))
688 ++ entry_buf, buf_size, i))
689 + return -EFSCORRUPTED;
690 + if (de == de_del) {
691 + if (pde)
692 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
693 +index e273171696972..7a3368929245d 100644
694 +--- a/fs/jffs2/dir.c
695 ++++ b/fs/jffs2/dir.c
696 +@@ -588,10 +588,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
697 + int ret;
698 + uint32_t now = get_seconds();
699 +
700 ++ mutex_lock(&f->sem);
701 + for (fd = f->dents ; fd; fd = fd->next) {
702 +- if (fd->ino)
703 ++ if (fd->ino) {
704 ++ mutex_unlock(&f->sem);
705 + return -ENOTEMPTY;
706 ++ }
707 + }
708 ++ mutex_unlock(&f->sem);
709 +
710 + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
711 + dentry->d_name.len, f, now);
712 +diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
713 +index f86f51f99aceb..1dcadd22b440d 100644
714 +--- a/fs/romfs/storage.c
715 ++++ b/fs/romfs/storage.c
716 +@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
717 + size_t limit;
718 +
719 + limit = romfs_maxsize(sb);
720 +- if (pos >= limit)
721 ++ if (pos >= limit || buflen > limit - pos)
722 + return -EIO;
723 +- if (buflen > limit - pos)
724 +- buflen = limit - pos;
725 +
726 + #ifdef CONFIG_ROMFS_ON_MTD
727 + if (sb->s_mtd)
728 +diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
729 +index be692e59938db..c457b010c623d 100644
730 +--- a/fs/xfs/xfs_sysfs.h
731 ++++ b/fs/xfs/xfs_sysfs.h
732 +@@ -44,9 +44,11 @@ xfs_sysfs_init(
733 + struct xfs_kobj *parent_kobj,
734 + const char *name)
735 + {
736 ++ struct kobject *parent;
737 ++
738 ++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
739 + init_completion(&kobj->complete);
740 +- return kobject_init_and_add(&kobj->kobject, ktype,
741 +- &parent_kobj->kobject, "%s", name);
742 ++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
743 + }
744 +
745 + static inline void
746 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
747 +index ce78534a047ee..bb8de2dddabe2 100644
748 +--- a/fs/xfs/xfs_trans_dquot.c
749 ++++ b/fs/xfs/xfs_trans_dquot.c
750 +@@ -662,7 +662,7 @@ xfs_trans_dqresv(
751 + }
752 + }
753 + if (ninos > 0) {
754 +- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
755 ++ total_count = dqp->q_res_icount + ninos;
756 + timer = be32_to_cpu(dqp->q_core.d_itimer);
757 + warns = be16_to_cpu(dqp->q_core.d_iwarns);
758 + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
759 +diff --git a/include/linux/mm.h b/include/linux/mm.h
760 +index 03cf5526e4456..2b17d2fca4299 100644
761 +--- a/include/linux/mm.h
762 ++++ b/include/linux/mm.h
763 +@@ -1123,6 +1123,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
764 + * followed by taking the mmap_sem for writing before modifying the
765 + * vmas or anything the coredump pretends not to change from under it.
766 + *
767 ++ * It also has to be called when mmgrab() is used in the context of
768 ++ * the process, but then the mm_count refcount is transferred outside
769 ++ * the context of the process to run down_write() on that pinned mm.
770 ++ *
771 + * NOTE: find_extend_vma() called from GUP context is the only place
772 + * that can modify the "mm" (notably the vm_start/end) under mmap_sem
773 + * for reading and outside the context of the process, so it is also
774 +diff --git a/include/net/sock.h b/include/net/sock.h
775 +index 426a57874964c..31198b32d9122 100644
776 +--- a/include/net/sock.h
777 ++++ b/include/net/sock.h
778 +@@ -779,6 +779,8 @@ static inline int sk_memalloc_socks(void)
779 + {
780 + return static_key_false(&memalloc_socks);
781 + }
782 ++
783 ++void __receive_sock(struct file *file);
784 + #else
785 +
786 + static inline int sk_memalloc_socks(void)
787 +@@ -786,6 +788,8 @@ static inline int sk_memalloc_socks(void)
788 + return 0;
789 + }
790 +
791 ++static inline void __receive_sock(struct file *file)
792 ++{ }
793 + #endif
794 +
795 + static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
796 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
797 +index 465786cd6490e..f38d24bb8a1bc 100644
798 +--- a/mm/huge_memory.c
799 ++++ b/mm/huge_memory.c
800 +@@ -2136,7 +2136,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
801 +
802 + static inline int khugepaged_test_exit(struct mm_struct *mm)
803 + {
804 +- return atomic_read(&mm->mm_users) == 0;
805 ++ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
806 + }
807 +
808 + int __khugepaged_enter(struct mm_struct *mm)
809 +@@ -2149,7 +2149,7 @@ int __khugepaged_enter(struct mm_struct *mm)
810 + return -ENOMEM;
811 +
812 + /* __khugepaged_exit() must not run from under us */
813 +- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
814 ++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
815 + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
816 + free_mm_slot(mm_slot);
817 + return 0;
818 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
819 +index 3a1501e854832..baac9a09ec0a1 100644
820 +--- a/mm/hugetlb.c
821 ++++ b/mm/hugetlb.c
822 +@@ -4257,6 +4257,7 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
823 + return false;
824 + }
825 +
826 ++#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
827 + /*
828 + * Determine if start,end range within vma could be mapped by shared pmd.
829 + * If yes, adjust start and end to cover range associated with possible
830 +@@ -4265,25 +4266,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
831 + void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
832 + unsigned long *start, unsigned long *end)
833 + {
834 +- unsigned long check_addr = *start;
835 ++ unsigned long a_start, a_end;
836 +
837 + if (!(vma->vm_flags & VM_MAYSHARE))
838 + return;
839 +
840 +- for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
841 +- unsigned long a_start = check_addr & PUD_MASK;
842 +- unsigned long a_end = a_start + PUD_SIZE;
843 ++ /* Extend the range to be PUD aligned for a worst case scenario */
844 ++ a_start = ALIGN_DOWN(*start, PUD_SIZE);
845 ++ a_end = ALIGN(*end, PUD_SIZE);
846 +
847 +- /*
848 +- * If sharing is possible, adjust start/end if necessary.
849 +- */
850 +- if (range_in_vma(vma, a_start, a_end)) {
851 +- if (a_start < *start)
852 +- *start = a_start;
853 +- if (a_end > *end)
854 +- *end = a_end;
855 +- }
856 +- }
857 ++ /*
858 ++ * Intersect the range with the vma range, since pmd sharing won't be
859 ++ * across vma after all
860 ++ */
861 ++ *start = max(vma->vm_start, a_start);
862 ++ *end = min(vma->vm_end, a_end);
863 + }
864 +
865 + /*
866 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
867 +index df589416ace6c..14bab5fa1b656 100644
868 +--- a/mm/page_alloc.c
869 ++++ b/mm/page_alloc.c
870 +@@ -843,6 +843,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
871 + if (nr_scanned)
872 + __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
873 +
874 ++ /*
875 ++ * Ensure proper count is passed which otherwise would stuck in the
876 ++ * below while (list_empty(list)) loop.
877 ++ */
878 ++ count = min(pcp->count, count);
879 + while (to_free) {
880 + struct page *page;
881 + struct list_head *list;
882 +@@ -6285,7 +6290,7 @@ int __meminit init_per_zone_wmark_min(void)
883 + setup_per_zone_inactive_ratio();
884 + return 0;
885 + }
886 +-core_initcall(init_per_zone_wmark_min)
887 ++postcore_initcall(init_per_zone_wmark_min)
888 +
889 + /*
890 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
891 +diff --git a/net/compat.c b/net/compat.c
892 +index d676840104556..20c5e5f215f23 100644
893 +--- a/net/compat.c
894 ++++ b/net/compat.c
895 +@@ -284,6 +284,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
896 + break;
897 + }
898 + /* Bump the usage count and install the file. */
899 ++ __receive_sock(fp[i]);
900 + fd_install(new_fd, get_file(fp[i]));
901 + }
902 +
903 +diff --git a/net/core/sock.c b/net/core/sock.c
904 +index 120d5058d81ae..82f9a7dbea6fe 100644
905 +--- a/net/core/sock.c
906 ++++ b/net/core/sock.c
907 +@@ -2275,6 +2275,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *
908 + }
909 + EXPORT_SYMBOL(sock_no_mmap);
910 +
911 ++/*
912 ++ * When a file is received (via SCM_RIGHTS, etc), we must bump the
913 ++ * various sock-based usage counts.
914 ++ */
915 ++void __receive_sock(struct file *file)
916 ++{
917 ++ struct socket *sock;
918 ++ int error;
919 ++
920 ++ /*
921 ++ * The resulting value of "error" is ignored here since we only
922 ++ * need to take action when the file is a socket and testing
923 ++ * "sock" for NULL is sufficient.
924 ++ */
925 ++ sock = sock_from_file(file, &error);
926 ++ if (sock) {
927 ++ sock_update_netprioidx(sock->sk);
928 ++ sock_update_classid(sock->sk);
929 ++ }
930 ++}
931 ++
932 + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
933 + {
934 + ssize_t res;
935 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
936 +index 1d9dfb92b3b48..edb244331e6e9 100644
937 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
938 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
939 +@@ -338,7 +338,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
940 +
941 + ret_val = power_up_sst(stream);
942 + if (ret_val < 0)
943 +- return ret_val;
944 ++ goto out_power_up;
945 +
946 + /* Make sure, that the period size is always even */
947 + snd_pcm_hw_constraint_step(substream->runtime, 0,
948 +@@ -347,8 +347,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
949 + return snd_pcm_hw_constraint_integer(runtime,
950 + SNDRV_PCM_HW_PARAM_PERIODS);
951 + out_ops:
952 +- kfree(stream);
953 + mutex_unlock(&sst_lock);
954 ++out_power_up:
955 ++ kfree(stream);
956 + return ret_val;
957 + }
958 +
959 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
960 +index c694f10d004cc..1b73537af91db 100644
961 +--- a/tools/perf/util/probe-finder.c
962 ++++ b/tools/perf/util/probe-finder.c
963 +@@ -1274,7 +1274,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
964 + tf.ntevs = 0;
965 +
966 + ret = debuginfo__find_probes(dbg, &tf.pf);
967 +- if (ret < 0) {
968 ++ if (ret < 0 || tf.ntevs == 0) {
969 + for (i = 0; i < tf.ntevs; i++)
970 + clear_probe_trace_event(&tf.tevs[i]);
971 + zfree(tevs);