Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.13 commit in: /
Date: Sat, 31 Jul 2021 10:28:29
Message-Id: 1627727291.853dad7e1b5cbd0cf31a9ff7de75724bace6cf20.alicef@gentoo
1 commit: 853dad7e1b5cbd0cf31a9ff7de75724bace6cf20
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jul 31 10:27:57 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sat Jul 31 10:28:11 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=853dad7e
7
8 Linux patch 5.13.7
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1006_linux-5.13.7.patch | 781 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 785 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 87d634d..79d1b68 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -67,6 +67,10 @@ Patch: 1005_linux-5.13.6.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.13.6
23
24 +Patch: 1006_linux-5.13.7.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.13.7
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1006_linux-5.13.7.patch b/1006_linux-5.13.7.patch
33 new file mode 100644
34 index 0000000..840c600
35 --- /dev/null
36 +++ b/1006_linux-5.13.7.patch
37 @@ -0,0 +1,781 @@
38 +diff --git a/Makefile b/Makefile
39 +index 96967f8951933..614327400aea2 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 13
46 +-SUBLEVEL = 6
47 ++SUBLEVEL = 7
48 + EXTRAVERSION =
49 + NAME = Opossums on Parade
50 +
51 +diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
52 +index 37bd41ff8dffa..151c0220047dd 100644
53 +--- a/arch/arm/boot/dts/versatile-ab.dts
54 ++++ b/arch/arm/boot/dts/versatile-ab.dts
55 +@@ -195,16 +195,15 @@
56 + #size-cells = <1>;
57 + ranges;
58 +
59 +- vic: intc@10140000 {
60 ++ vic: interrupt-controller@10140000 {
61 + compatible = "arm,versatile-vic";
62 + interrupt-controller;
63 + #interrupt-cells = <1>;
64 + reg = <0x10140000 0x1000>;
65 +- clear-mask = <0xffffffff>;
66 + valid-mask = <0xffffffff>;
67 + };
68 +
69 +- sic: intc@10003000 {
70 ++ sic: interrupt-controller@10003000 {
71 + compatible = "arm,versatile-sic";
72 + interrupt-controller;
73 + #interrupt-cells = <1>;
74 +diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
75 +index 06a0fdf24026c..e7e751a858d81 100644
76 +--- a/arch/arm/boot/dts/versatile-pb.dts
77 ++++ b/arch/arm/boot/dts/versatile-pb.dts
78 +@@ -7,7 +7,7 @@
79 +
80 + amba {
81 + /* The Versatile PB is using more SIC IRQ lines than the AB */
82 +- sic: intc@10003000 {
83 ++ sic: interrupt-controller@10003000 {
84 + clear-mask = <0xffffffff>;
85 + /*
86 + * Valid interrupt lines mask according to
87 +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
88 +index 74986bf96656f..c9fda6261c6b0 100644
89 +--- a/drivers/firmware/arm_scmi/driver.c
90 ++++ b/drivers/firmware/arm_scmi/driver.c
91 +@@ -47,7 +47,6 @@ enum scmi_error_codes {
92 + SCMI_ERR_GENERIC = -8, /* Generic Error */
93 + SCMI_ERR_HARDWARE = -9, /* Hardware Error */
94 + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
95 +- SCMI_ERR_MAX
96 + };
97 +
98 + /* List of all SCMI devices active in system */
99 +@@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = {
100 +
101 + static inline int scmi_to_linux_errno(int errno)
102 + {
103 +- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
104 +- return scmi_linux_errmap[-errno];
105 ++ int err_idx = -errno;
106 ++
107 ++ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
108 ++ return scmi_linux_errmap[err_idx];
109 + return -EIO;
110 + }
111 +
112 +@@ -1029,8 +1030,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
113 + const struct scmi_desc *desc = sinfo->desc;
114 +
115 + /* Pre-allocated messages, no more than what hdr.seq can support */
116 +- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
117 +- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
118 ++ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
119 ++ dev_err(dev,
120 ++ "Invalid maximum messages %d, not in range [1 - %lu]\n",
121 + desc->max_msg, MSG_TOKEN_MAX);
122 + return -EINVAL;
123 + }
124 +diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
125 +index 707e5c1528967..ed053fd15c90b 100644
126 +--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
127 ++++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
128 +@@ -146,6 +146,9 @@ int ttm_range_man_fini(struct ttm_device *bdev,
129 + struct drm_mm *mm = &rman->mm;
130 + int ret;
131 +
132 ++ if (!man)
133 ++ return 0;
134 ++
135 + ttm_resource_manager_set_used(man, false);
136 +
137 + ret = ttm_resource_manager_evict_all(bdev, man);
138 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
139 +index fb1c5ae0da39d..d963f25fc7aed 100644
140 +--- a/drivers/nvme/host/pci.c
141 ++++ b/drivers/nvme/host/pci.c
142 +@@ -1562,6 +1562,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
143 + wmb(); /* ensure the first interrupt sees the initialization */
144 + }
145 +
146 ++/*
147 ++ * Try getting shutdown_lock while setting up IO queues.
148 ++ */
149 ++static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
150 ++{
151 ++ /*
152 ++ * Give up if the lock is being held by nvme_dev_disable.
153 ++ */
154 ++ if (!mutex_trylock(&dev->shutdown_lock))
155 ++ return -ENODEV;
156 ++
157 ++ /*
158 ++ * Controller is in wrong state, fail early.
159 ++ */
160 ++ if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
161 ++ mutex_unlock(&dev->shutdown_lock);
162 ++ return -ENODEV;
163 ++ }
164 ++
165 ++ return 0;
166 ++}
167 ++
168 + static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
169 + {
170 + struct nvme_dev *dev = nvmeq->dev;
171 +@@ -1590,8 +1612,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
172 + goto release_cq;
173 +
174 + nvmeq->cq_vector = vector;
175 +- nvme_init_queue(nvmeq, qid);
176 +
177 ++ result = nvme_setup_io_queues_trylock(dev);
178 ++ if (result)
179 ++ return result;
180 ++ nvme_init_queue(nvmeq, qid);
181 + if (!polled) {
182 + result = queue_request_irq(nvmeq);
183 + if (result < 0)
184 +@@ -1599,10 +1624,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
185 + }
186 +
187 + set_bit(NVMEQ_ENABLED, &nvmeq->flags);
188 ++ mutex_unlock(&dev->shutdown_lock);
189 + return result;
190 +
191 + release_sq:
192 + dev->online_queues--;
193 ++ mutex_unlock(&dev->shutdown_lock);
194 + adapter_delete_sq(dev, qid);
195 + release_cq:
196 + adapter_delete_cq(dev, qid);
197 +@@ -2176,7 +2203,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
198 + if (nr_io_queues == 0)
199 + return 0;
200 +
201 +- clear_bit(NVMEQ_ENABLED, &adminq->flags);
202 ++ /*
203 ++ * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
204 ++ * from set to unset. If there is a window to it is truely freed,
205 ++ * pci_free_irq_vectors() jumping into this window will crash.
206 ++ * And take lock to avoid racing with pci_free_irq_vectors() in
207 ++ * nvme_dev_disable() path.
208 ++ */
209 ++ result = nvme_setup_io_queues_trylock(dev);
210 ++ if (result)
211 ++ return result;
212 ++ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
213 ++ pci_free_irq(pdev, 0, adminq);
214 +
215 + if (dev->cmb_use_sqes) {
216 + result = nvme_cmb_qdepth(dev, nr_io_queues,
217 +@@ -2192,14 +2230,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
218 + result = nvme_remap_bar(dev, size);
219 + if (!result)
220 + break;
221 +- if (!--nr_io_queues)
222 +- return -ENOMEM;
223 ++ if (!--nr_io_queues) {
224 ++ result = -ENOMEM;
225 ++ goto out_unlock;
226 ++ }
227 + } while (1);
228 + adminq->q_db = dev->dbs;
229 +
230 + retry:
231 + /* Deregister the admin queue's interrupt */
232 +- pci_free_irq(pdev, 0, adminq);
233 ++ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
234 ++ pci_free_irq(pdev, 0, adminq);
235 +
236 + /*
237 + * If we enable msix early due to not intx, disable it again before
238 +@@ -2208,8 +2249,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
239 + pci_free_irq_vectors(pdev);
240 +
241 + result = nvme_setup_irqs(dev, nr_io_queues);
242 +- if (result <= 0)
243 +- return -EIO;
244 ++ if (result <= 0) {
245 ++ result = -EIO;
246 ++ goto out_unlock;
247 ++ }
248 +
249 + dev->num_vecs = result;
250 + result = max(result - 1, 1);
251 +@@ -2223,8 +2266,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
252 + */
253 + result = queue_request_irq(adminq);
254 + if (result)
255 +- return result;
256 ++ goto out_unlock;
257 + set_bit(NVMEQ_ENABLED, &adminq->flags);
258 ++ mutex_unlock(&dev->shutdown_lock);
259 +
260 + result = nvme_create_io_queues(dev);
261 + if (result || dev->online_queues < 2)
262 +@@ -2233,6 +2277,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
263 + if (dev->online_queues - 1 < dev->max_qid) {
264 + nr_io_queues = dev->online_queues - 1;
265 + nvme_disable_io_queues(dev);
266 ++ result = nvme_setup_io_queues_trylock(dev);
267 ++ if (result)
268 ++ return result;
269 + nvme_suspend_io_queues(dev);
270 + goto retry;
271 + }
272 +@@ -2241,6 +2288,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
273 + dev->io_queues[HCTX_TYPE_READ],
274 + dev->io_queues[HCTX_TYPE_POLL]);
275 + return 0;
276 ++out_unlock:
277 ++ mutex_unlock(&dev->shutdown_lock);
278 ++ return result;
279 + }
280 +
281 + static void nvme_del_queue_end(struct request *req, blk_status_t error)
282 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
283 +index 64cad843ce723..398c941e38974 100644
284 +--- a/fs/cifs/smb2ops.c
285 ++++ b/fs/cifs/smb2ops.c
286 +@@ -555,8 +555,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
287 + p = buf;
288 + while (bytes_left >= sizeof(*p)) {
289 + info->speed = le64_to_cpu(p->LinkSpeed);
290 +- info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
291 +- info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
292 ++ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
293 ++ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
294 +
295 + cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
296 + cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
297 +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
298 +index 4af318fbda774..ef9498a6e88ac 100644
299 +--- a/fs/hfs/bfind.c
300 ++++ b/fs/hfs/bfind.c
301 +@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
302 + fd->key = ptr + tree->max_key_len + 2;
303 + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
304 + tree->cnid, __builtin_return_address(0));
305 +- mutex_lock(&tree->tree_lock);
306 ++ switch (tree->cnid) {
307 ++ case HFS_CAT_CNID:
308 ++ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
309 ++ break;
310 ++ case HFS_EXT_CNID:
311 ++ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
312 ++ break;
313 ++ case HFS_ATTR_CNID:
314 ++ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
315 ++ break;
316 ++ default:
317 ++ return -EINVAL;
318 ++ }
319 + return 0;
320 + }
321 +
322 +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
323 +index b63a4df7327b6..c0a73a6ffb28b 100644
324 +--- a/fs/hfs/bnode.c
325 ++++ b/fs/hfs/bnode.c
326 +@@ -15,16 +15,31 @@
327 +
328 + #include "btree.h"
329 +
330 +-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
331 +- int off, int len)
332 ++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
333 + {
334 + struct page *page;
335 ++ int pagenum;
336 ++ int bytes_read;
337 ++ int bytes_to_read;
338 ++ void *vaddr;
339 +
340 + off += node->page_offset;
341 +- page = node->page[0];
342 ++ pagenum = off >> PAGE_SHIFT;
343 ++ off &= ~PAGE_MASK; /* compute page offset for the first page */
344 +
345 +- memcpy(buf, kmap(page) + off, len);
346 +- kunmap(page);
347 ++ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
348 ++ if (pagenum >= node->tree->pages_per_bnode)
349 ++ break;
350 ++ page = node->page[pagenum];
351 ++ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
352 ++
353 ++ vaddr = kmap_atomic(page);
354 ++ memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
355 ++ kunmap_atomic(vaddr);
356 ++
357 ++ pagenum++;
358 ++ off = 0; /* page offset only applies to the first page */
359 ++ }
360 + }
361 +
362 + u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
363 +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
364 +index 4ba45caf59392..0e6baee932453 100644
365 +--- a/fs/hfs/btree.h
366 ++++ b/fs/hfs/btree.h
367 +@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
368 +
369 + #define NODE_HASH_SIZE 256
370 +
371 ++/* B-tree mutex nested subclasses */
372 ++enum hfs_btree_mutex_classes {
373 ++ CATALOG_BTREE_MUTEX,
374 ++ EXTENTS_BTREE_MUTEX,
375 ++ ATTR_BTREE_MUTEX,
376 ++};
377 ++
378 + /* A HFS BTree held in memory */
379 + struct hfs_btree {
380 + struct super_block *sb;
381 +diff --git a/fs/hfs/super.c b/fs/hfs/super.c
382 +index 44d07c9e3a7f0..12d9bae393631 100644
383 +--- a/fs/hfs/super.c
384 ++++ b/fs/hfs/super.c
385 +@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
386 + if (!res) {
387 + if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
388 + res = -EIO;
389 +- goto bail;
390 ++ goto bail_hfs_find;
391 + }
392 + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
393 + }
394 +- if (res) {
395 +- hfs_find_exit(&fd);
396 +- goto bail_no_root;
397 +- }
398 ++ if (res)
399 ++ goto bail_hfs_find;
400 + res = -EINVAL;
401 + root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
402 + hfs_find_exit(&fd);
403 +@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
404 + /* everything's okay */
405 + return 0;
406 +
407 ++bail_hfs_find:
408 ++ hfs_find_exit(&fd);
409 + bail_no_root:
410 + pr_err("get root inode failed\n");
411 + bail:
412 +diff --git a/fs/internal.h b/fs/internal.h
413 +index 6aeae7ef33803..728f8d70d7f1d 100644
414 +--- a/fs/internal.h
415 ++++ b/fs/internal.h
416 +@@ -61,7 +61,6 @@ extern void __init chrdev_init(void);
417 + */
418 + extern const struct fs_context_operations legacy_fs_context_ops;
419 + extern int parse_monolithic_mount_data(struct fs_context *, void *);
420 +-extern void fc_drop_locked(struct fs_context *);
421 + extern void vfs_clean_context(struct fs_context *fc);
422 + extern int finish_clean_context(struct fs_context *fc);
423 +
424 +diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
425 +index dab1b02eba5b7..ce6fb810854fe 100644
426 +--- a/fs/iomap/seek.c
427 ++++ b/fs/iomap/seek.c
428 +@@ -35,23 +35,20 @@ loff_t
429 + iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
430 + {
431 + loff_t size = i_size_read(inode);
432 +- loff_t length = size - offset;
433 + loff_t ret;
434 +
435 + /* Nothing to be found before or beyond the end of the file. */
436 + if (offset < 0 || offset >= size)
437 + return -ENXIO;
438 +
439 +- while (length > 0) {
440 +- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
441 +- &offset, iomap_seek_hole_actor);
442 ++ while (offset < size) {
443 ++ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
444 ++ ops, &offset, iomap_seek_hole_actor);
445 + if (ret < 0)
446 + return ret;
447 + if (ret == 0)
448 + break;
449 +-
450 + offset += ret;
451 +- length -= ret;
452 + }
453 +
454 + return offset;
455 +@@ -83,27 +80,23 @@ loff_t
456 + iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
457 + {
458 + loff_t size = i_size_read(inode);
459 +- loff_t length = size - offset;
460 + loff_t ret;
461 +
462 + /* Nothing to be found before or beyond the end of the file. */
463 + if (offset < 0 || offset >= size)
464 + return -ENXIO;
465 +
466 +- while (length > 0) {
467 +- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
468 +- &offset, iomap_seek_data_actor);
469 ++ while (offset < size) {
470 ++ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
471 ++ ops, &offset, iomap_seek_data_actor);
472 + if (ret < 0)
473 + return ret;
474 + if (ret == 0)
475 +- break;
476 +-
477 ++ return offset;
478 + offset += ret;
479 +- length -= ret;
480 + }
481 +
482 +- if (length <= 0)
483 +- return -ENXIO;
484 +- return offset;
485 ++ /* We've reached the end of the file without finding data */
486 ++ return -ENXIO;
487 + }
488 + EXPORT_SYMBOL_GPL(iomap_seek_data);
489 +diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
490 +index 37e1e8f7f08da..5b44b0195a28a 100644
491 +--- a/include/linux/fs_context.h
492 ++++ b/include/linux/fs_context.h
493 +@@ -139,6 +139,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
494 + extern int generic_parse_monolithic(struct fs_context *fc, void *data);
495 + extern int vfs_get_tree(struct fs_context *fc);
496 + extern void put_fs_context(struct fs_context *fc);
497 ++extern void fc_drop_locked(struct fs_context *fc);
498 +
499 + /*
500 + * sget() wrappers to be called from the ->get_tree() op.
501 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
502 +index 73af4a64a5999..40296ed976a97 100644
503 +--- a/include/net/busy_poll.h
504 ++++ b/include/net/busy_poll.h
505 +@@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void)
506 +
507 + static inline bool sk_can_busy_loop(const struct sock *sk)
508 + {
509 +- return sk->sk_ll_usec && !signal_pending(current);
510 ++ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
511 + }
512 +
513 + bool sk_busy_loop_end(void *p, unsigned long start_time);
514 +diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
515 +index 14a0d22c91133..bf23a2ed92da8 100644
516 +--- a/include/net/sctp/constants.h
517 ++++ b/include/net/sctp/constants.h
518 +@@ -342,8 +342,7 @@ enum {
519 + #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
520 +
521 + /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
522 +- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
523 +- * 192.88.99.0/24.
524 ++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
525 + * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
526 + * addresses.
527 + */
528 +@@ -351,7 +350,6 @@ enum {
529 + ((htonl(INADDR_BROADCAST) == a) || \
530 + ipv4_is_multicast(a) || \
531 + ipv4_is_zeronet(a) || \
532 +- ipv4_is_test_198(a) || \
533 + ipv4_is_anycast_6to4(a))
534 +
535 + /* Flags used for the bind address copy functions. */
536 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
537 +index d189eff4c92ff..583790d2060ce 100644
538 +--- a/kernel/cgroup/cgroup-v1.c
539 ++++ b/kernel/cgroup/cgroup-v1.c
540 +@@ -1225,9 +1225,7 @@ int cgroup1_get_tree(struct fs_context *fc)
541 + ret = cgroup_do_get_tree(fc);
542 +
543 + if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
544 +- struct super_block *sb = fc->root->d_sb;
545 +- dput(fc->root);
546 +- deactivate_locked_super(sb);
547 ++ fc_drop_locked(fc);
548 + ret = 1;
549 + }
550 +
551 +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
552 +index 350ebf5051f97..fcef5f0c60b8b 100644
553 +--- a/kernel/rcu/tasks.h
554 ++++ b/kernel/rcu/tasks.h
555 +@@ -908,10 +908,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
556 + in_qs = likely(!t->trc_reader_nesting);
557 + }
558 +
559 +- // Mark as checked. Because this is called from the grace-period
560 +- // kthread, also remove the task from the holdout list.
561 ++ // Mark as checked so that the grace-period kthread will
562 ++ // remove it from the holdout list.
563 + t->trc_reader_checked = true;
564 +- trc_del_holdout(t);
565 +
566 + if (in_qs)
567 + return true; // Already in quiescent state, done!!!
568 +@@ -938,7 +937,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
569 + // The current task had better be in a quiescent state.
570 + if (t == current) {
571 + t->trc_reader_checked = true;
572 +- trc_del_holdout(t);
573 + WARN_ON_ONCE(t->trc_reader_nesting);
574 + return;
575 + }
576 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
577 +index 50142fc08902d..f148eacda55a9 100644
578 +--- a/kernel/workqueue.c
579 ++++ b/kernel/workqueue.c
580 +@@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
581 + unbound_release_work);
582 + struct workqueue_struct *wq = pwq->wq;
583 + struct worker_pool *pool = pwq->pool;
584 +- bool is_last;
585 ++ bool is_last = false;
586 +
587 +- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
588 +- return;
589 ++ /*
590 ++ * when @pwq is not linked, it doesn't hold any reference to the
591 ++ * @wq, and @wq is invalid to access.
592 ++ */
593 ++ if (!list_empty(&pwq->pwqs_node)) {
594 ++ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
595 ++ return;
596 +
597 +- mutex_lock(&wq->mutex);
598 +- list_del_rcu(&pwq->pwqs_node);
599 +- is_last = list_empty(&wq->pwqs);
600 +- mutex_unlock(&wq->mutex);
601 ++ mutex_lock(&wq->mutex);
602 ++ list_del_rcu(&pwq->pwqs_node);
603 ++ is_last = list_empty(&wq->pwqs);
604 ++ mutex_unlock(&wq->mutex);
605 ++ }
606 +
607 + mutex_lock(&wq_pool_mutex);
608 + put_unbound_pool(pool);
609 +diff --git a/net/802/garp.c b/net/802/garp.c
610 +index 400bd857e5f57..f6012f8e59f00 100644
611 +--- a/net/802/garp.c
612 ++++ b/net/802/garp.c
613 +@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
614 + kfree(attr);
615 + }
616 +
617 ++static void garp_attr_destroy_all(struct garp_applicant *app)
618 ++{
619 ++ struct rb_node *node, *next;
620 ++ struct garp_attr *attr;
621 ++
622 ++ for (node = rb_first(&app->gid);
623 ++ next = node ? rb_next(node) : NULL, node != NULL;
624 ++ node = next) {
625 ++ attr = rb_entry(node, struct garp_attr, node);
626 ++ garp_attr_destroy(app, attr);
627 ++ }
628 ++}
629 ++
630 + static int garp_pdu_init(struct garp_applicant *app)
631 + {
632 + struct sk_buff *skb;
633 +@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
634 +
635 + spin_lock_bh(&app->lock);
636 + garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
637 ++ garp_attr_destroy_all(app);
638 + garp_pdu_queue(app);
639 + spin_unlock_bh(&app->lock);
640 +
641 +diff --git a/net/802/mrp.c b/net/802/mrp.c
642 +index bea6e43d45a0d..35e04cc5390c4 100644
643 +--- a/net/802/mrp.c
644 ++++ b/net/802/mrp.c
645 +@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
646 + kfree(attr);
647 + }
648 +
649 ++static void mrp_attr_destroy_all(struct mrp_applicant *app)
650 ++{
651 ++ struct rb_node *node, *next;
652 ++ struct mrp_attr *attr;
653 ++
654 ++ for (node = rb_first(&app->mad);
655 ++ next = node ? rb_next(node) : NULL, node != NULL;
656 ++ node = next) {
657 ++ attr = rb_entry(node, struct mrp_attr, node);
658 ++ mrp_attr_destroy(app, attr);
659 ++ }
660 ++}
661 ++
662 + static int mrp_pdu_init(struct mrp_applicant *app)
663 + {
664 + struct sk_buff *skb;
665 +@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
666 +
667 + spin_lock_bh(&app->lock);
668 + mrp_mad_event(app, MRP_EVENT_TX);
669 ++ mrp_attr_destroy_all(app);
670 + mrp_pdu_queue(app);
671 + spin_unlock_bh(&app->lock);
672 +
673 +diff --git a/net/core/sock.c b/net/core/sock.c
674 +index 2003c5ebb4c2e..37d732fe3fcf9 100644
675 +--- a/net/core/sock.c
676 ++++ b/net/core/sock.c
677 +@@ -1172,7 +1172,7 @@ set_sndbuf:
678 + if (val < 0)
679 + ret = -EINVAL;
680 + else
681 +- sk->sk_ll_usec = val;
682 ++ WRITE_ONCE(sk->sk_ll_usec, val);
683 + }
684 + break;
685 + case SO_PREFER_BUSY_POLL:
686 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
687 +index b7ffb4f227a45..6062ad1d5b510 100644
688 +--- a/net/ipv6/ip6_output.c
689 ++++ b/net/ipv6/ip6_output.c
690 +@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
691 + {
692 + struct dst_entry *dst = skb_dst(skb);
693 + struct net_device *dev = dst->dev;
694 ++ unsigned int hh_len = LL_RESERVED_SPACE(dev);
695 ++ int delta = hh_len - skb_headroom(skb);
696 + const struct in6_addr *nexthop;
697 + struct neighbour *neigh;
698 + int ret;
699 +
700 ++ /* Be paranoid, rather than too clever. */
701 ++ if (unlikely(delta > 0) && dev->header_ops) {
702 ++ /* pskb_expand_head() might crash, if skb is shared */
703 ++ if (skb_shared(skb)) {
704 ++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
705 ++
706 ++ if (likely(nskb)) {
707 ++ if (skb->sk)
708 ++ skb_set_owner_w(nskb, skb->sk);
709 ++ consume_skb(skb);
710 ++ } else {
711 ++ kfree_skb(skb);
712 ++ }
713 ++ skb = nskb;
714 ++ }
715 ++ if (skb &&
716 ++ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
717 ++ kfree_skb(skb);
718 ++ skb = NULL;
719 ++ }
720 ++ if (!skb) {
721 ++ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
722 ++ return -ENOMEM;
723 ++ }
724 ++ }
725 ++
726 + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
727 + struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
728 +
729 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
730 +index 25192b378e2ec..9b444df5e53ee 100644
731 +--- a/net/sctp/protocol.c
732 ++++ b/net/sctp/protocol.c
733 +@@ -398,7 +398,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
734 + retval = SCTP_SCOPE_LINK;
735 + } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
736 + ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
737 +- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
738 ++ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
739 ++ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
740 + retval = SCTP_SCOPE_PRIVATE;
741 + } else {
742 + retval = SCTP_SCOPE_GLOBAL;
743 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
744 +index 5d1192ceb1397..68a9591d0144c 100644
745 +--- a/net/unix/af_unix.c
746 ++++ b/net/unix/af_unix.c
747 +@@ -1522,6 +1522,53 @@ out:
748 + return err;
749 + }
750 +
751 ++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
752 ++{
753 ++ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
754 ++
755 ++ /*
756 ++ * Garbage collection of unix sockets starts by selecting a set of
757 ++ * candidate sockets which have reference only from being in flight
758 ++ * (total_refs == inflight_refs). This condition is checked once during
759 ++ * the candidate collection phase, and candidates are marked as such, so
760 ++ * that non-candidates can later be ignored. While inflight_refs is
761 ++ * protected by unix_gc_lock, total_refs (file count) is not, hence this
762 ++ * is an instantaneous decision.
763 ++ *
764 ++ * Once a candidate, however, the socket must not be reinstalled into a
765 ++ * file descriptor while the garbage collection is in progress.
766 ++ *
767 ++ * If the above conditions are met, then the directed graph of
768 ++ * candidates (*) does not change while unix_gc_lock is held.
769 ++ *
770 ++ * Any operations that changes the file count through file descriptors
771 ++ * (dup, close, sendmsg) does not change the graph since candidates are
772 ++ * not installed in fds.
773 ++ *
774 ++ * Dequeing a candidate via recvmsg would install it into an fd, but
775 ++ * that takes unix_gc_lock to decrement the inflight count, so it's
776 ++ * serialized with garbage collection.
777 ++ *
778 ++ * MSG_PEEK is special in that it does not change the inflight count,
779 ++ * yet does install the socket into an fd. The following lock/unlock
780 ++ * pair is to ensure serialization with garbage collection. It must be
781 ++ * done between incrementing the file count and installing the file into
782 ++ * an fd.
783 ++ *
784 ++ * If garbage collection starts after the barrier provided by the
785 ++ * lock/unlock, then it will see the elevated refcount and not mark this
786 ++ * as a candidate. If a garbage collection is already in progress
787 ++ * before the file count was incremented, then the lock/unlock pair will
788 ++ * ensure that garbage collection is finished before progressing to
789 ++ * installing the fd.
790 ++ *
791 ++ * (*) A -> B where B is on the queue of A or B is on the queue of C
792 ++ * which is on the queue of listening socket A.
793 ++ */
794 ++ spin_lock(&unix_gc_lock);
795 ++ spin_unlock(&unix_gc_lock);
796 ++}
797 ++
798 + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
799 + {
800 + int err = 0;
801 +@@ -2171,7 +2218,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
802 + sk_peek_offset_fwd(sk, size);
803 +
804 + if (UNIXCB(skb).fp)
805 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
806 ++ unix_peek_fds(&scm, skb);
807 + }
808 + err = (flags & MSG_TRUNC) ? skb->len - skip : size;
809 +
810 +@@ -2414,7 +2461,7 @@ unlock:
811 + /* It is questionable, see note in unix_dgram_recvmsg.
812 + */
813 + if (UNIXCB(skb).fp)
814 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
815 ++ unix_peek_fds(&scm, skb);
816 +
817 + sk_peek_offset_fwd(sk, chunk);
818 +