Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Sat, 31 Jul 2021 10:32:29
Message-Id: 1627727525.25000b5d68e5dc24a837c747662c5058e18afb90.alicef@gentoo
1 commit: 25000b5d68e5dc24a837c747662c5058e18afb90
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jul 31 10:31:52 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sat Jul 31 10:32:05 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=25000b5d
7
8 linux patch 5.4.137
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1136_linux-5.4.137.patch | 692 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 696 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index a760b64..577b47e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -587,6 +587,10 @@ Patch: 1135_linux-5.4.136.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.136
23
24 +Patch: 1136_linux-5.4.137.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.137
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1136_linux-5.4.137.patch b/1136_linux-5.4.137.patch
33 new file mode 100644
34 index 0000000..535b258
35 --- /dev/null
36 +++ b/1136_linux-5.4.137.patch
37 @@ -0,0 +1,692 @@
38 +diff --git a/Makefile b/Makefile
39 +index 1c565572bfb24..7cd8862d854ed 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 136
47 ++SUBLEVEL = 137
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
52 +index 37bd41ff8dffa..151c0220047dd 100644
53 +--- a/arch/arm/boot/dts/versatile-ab.dts
54 ++++ b/arch/arm/boot/dts/versatile-ab.dts
55 +@@ -195,16 +195,15 @@
56 + #size-cells = <1>;
57 + ranges;
58 +
59 +- vic: intc@10140000 {
60 ++ vic: interrupt-controller@10140000 {
61 + compatible = "arm,versatile-vic";
62 + interrupt-controller;
63 + #interrupt-cells = <1>;
64 + reg = <0x10140000 0x1000>;
65 +- clear-mask = <0xffffffff>;
66 + valid-mask = <0xffffffff>;
67 + };
68 +
69 +- sic: intc@10003000 {
70 ++ sic: interrupt-controller@10003000 {
71 + compatible = "arm,versatile-sic";
72 + interrupt-controller;
73 + #interrupt-cells = <1>;
74 +diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
75 +index 06a0fdf24026c..e7e751a858d81 100644
76 +--- a/arch/arm/boot/dts/versatile-pb.dts
77 ++++ b/arch/arm/boot/dts/versatile-pb.dts
78 +@@ -7,7 +7,7 @@
79 +
80 + amba {
81 + /* The Versatile PB is using more SIC IRQ lines than the AB */
82 +- sic: intc@10003000 {
83 ++ sic: interrupt-controller@10003000 {
84 + clear-mask = <0xffffffff>;
85 + /*
86 + * Valid interrupt lines mask according to
87 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
88 +index 377157656a8b6..5d35b9656b67d 100644
89 +--- a/arch/x86/kvm/x86.c
90 ++++ b/arch/x86/kvm/x86.c
91 +@@ -475,8 +475,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
92 +
93 + if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
94 + queue:
95 +- if (has_error && !is_protmode(vcpu))
96 +- has_error = false;
97 + if (reinject) {
98 + /*
99 + * On vmentry, vcpu->arch.exception.pending is only
100 +@@ -7592,6 +7590,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
101 + kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
102 + }
103 +
104 ++static void kvm_inject_exception(struct kvm_vcpu *vcpu)
105 ++{
106 ++ if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
107 ++ vcpu->arch.exception.error_code = false;
108 ++ kvm_x86_ops->queue_exception(vcpu);
109 ++}
110 ++
111 + static int inject_pending_event(struct kvm_vcpu *vcpu)
112 + {
113 + int r;
114 +@@ -7599,7 +7604,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
115 + /* try to reinject previous events if any */
116 +
117 + if (vcpu->arch.exception.injected)
118 +- kvm_x86_ops->queue_exception(vcpu);
119 ++ kvm_inject_exception(vcpu);
120 + /*
121 + * Do not inject an NMI or interrupt if there is a pending
122 + * exception. Exceptions and interrupts are recognized at
123 +@@ -7665,7 +7670,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
124 + }
125 + }
126 +
127 +- kvm_x86_ops->queue_exception(vcpu);
128 ++ kvm_inject_exception(vcpu);
129 + }
130 +
131 + /* Don't consider new event if we re-injected an event */
132 +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
133 +index 7b6903bad4085..48e6e2b489241 100644
134 +--- a/drivers/firmware/arm_scmi/driver.c
135 ++++ b/drivers/firmware/arm_scmi/driver.c
136 +@@ -54,7 +54,6 @@ enum scmi_error_codes {
137 + SCMI_ERR_GENERIC = -8, /* Generic Error */
138 + SCMI_ERR_HARDWARE = -9, /* Hardware Error */
139 + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
140 +- SCMI_ERR_MAX
141 + };
142 +
143 + /* List of all SCMI devices active in system */
144 +@@ -176,8 +175,10 @@ static const int scmi_linux_errmap[] = {
145 +
146 + static inline int scmi_to_linux_errno(int errno)
147 + {
148 +- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
149 +- return scmi_linux_errmap[-errno];
150 ++ int err_idx = -errno;
151 ++
152 ++ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
153 ++ return scmi_linux_errmap[err_idx];
154 + return -EIO;
155 + }
156 +
157 +@@ -693,8 +694,9 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
158 + struct scmi_xfers_info *info = &sinfo->tx_minfo;
159 +
160 + /* Pre-allocated messages, no more than what hdr.seq can support */
161 +- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
162 +- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
163 ++ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
164 ++ dev_err(dev,
165 ++ "Invalid maximum messages %d, not in range [1 - %lu]\n",
166 + desc->max_msg, MSG_TOKEN_MAX);
167 + return -EINVAL;
168 + }
169 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
170 +index bf6b4f71dc58f..defee1d208d22 100644
171 +--- a/fs/cifs/smb2ops.c
172 ++++ b/fs/cifs/smb2ops.c
173 +@@ -498,8 +498,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
174 + p = buf;
175 + while (bytes_left >= sizeof(*p)) {
176 + info->speed = le64_to_cpu(p->LinkSpeed);
177 +- info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
178 +- info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
179 ++ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
180 ++ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
181 +
182 + cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
183 + cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
184 +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
185 +index 4af318fbda774..ef9498a6e88ac 100644
186 +--- a/fs/hfs/bfind.c
187 ++++ b/fs/hfs/bfind.c
188 +@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
189 + fd->key = ptr + tree->max_key_len + 2;
190 + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
191 + tree->cnid, __builtin_return_address(0));
192 +- mutex_lock(&tree->tree_lock);
193 ++ switch (tree->cnid) {
194 ++ case HFS_CAT_CNID:
195 ++ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
196 ++ break;
197 ++ case HFS_EXT_CNID:
198 ++ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
199 ++ break;
200 ++ case HFS_ATTR_CNID:
201 ++ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
202 ++ break;
203 ++ default:
204 ++ return -EINVAL;
205 ++ }
206 + return 0;
207 + }
208 +
209 +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
210 +index b63a4df7327b6..c0a73a6ffb28b 100644
211 +--- a/fs/hfs/bnode.c
212 ++++ b/fs/hfs/bnode.c
213 +@@ -15,16 +15,31 @@
214 +
215 + #include "btree.h"
216 +
217 +-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
218 +- int off, int len)
219 ++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
220 + {
221 + struct page *page;
222 ++ int pagenum;
223 ++ int bytes_read;
224 ++ int bytes_to_read;
225 ++ void *vaddr;
226 +
227 + off += node->page_offset;
228 +- page = node->page[0];
229 ++ pagenum = off >> PAGE_SHIFT;
230 ++ off &= ~PAGE_MASK; /* compute page offset for the first page */
231 +
232 +- memcpy(buf, kmap(page) + off, len);
233 +- kunmap(page);
234 ++ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
235 ++ if (pagenum >= node->tree->pages_per_bnode)
236 ++ break;
237 ++ page = node->page[pagenum];
238 ++ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
239 ++
240 ++ vaddr = kmap_atomic(page);
241 ++ memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
242 ++ kunmap_atomic(vaddr);
243 ++
244 ++ pagenum++;
245 ++ off = 0; /* page offset only applies to the first page */
246 ++ }
247 + }
248 +
249 + u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
250 +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
251 +index dcc2aab1b2c43..25ac9a8bb57a7 100644
252 +--- a/fs/hfs/btree.h
253 ++++ b/fs/hfs/btree.h
254 +@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
255 +
256 + #define NODE_HASH_SIZE 256
257 +
258 ++/* B-tree mutex nested subclasses */
259 ++enum hfs_btree_mutex_classes {
260 ++ CATALOG_BTREE_MUTEX,
261 ++ EXTENTS_BTREE_MUTEX,
262 ++ ATTR_BTREE_MUTEX,
263 ++};
264 ++
265 + /* A HFS BTree held in memory */
266 + struct hfs_btree {
267 + struct super_block *sb;
268 +diff --git a/fs/hfs/super.c b/fs/hfs/super.c
269 +index c33324686d89e..bcf820ce0e02e 100644
270 +--- a/fs/hfs/super.c
271 ++++ b/fs/hfs/super.c
272 +@@ -421,14 +421,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
273 + if (!res) {
274 + if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
275 + res = -EIO;
276 +- goto bail;
277 ++ goto bail_hfs_find;
278 + }
279 + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
280 + }
281 +- if (res) {
282 +- hfs_find_exit(&fd);
283 +- goto bail_no_root;
284 +- }
285 ++ if (res)
286 ++ goto bail_hfs_find;
287 + res = -EINVAL;
288 + root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
289 + hfs_find_exit(&fd);
290 +@@ -444,6 +442,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
291 + /* everything's okay */
292 + return 0;
293 +
294 ++bail_hfs_find:
295 ++ hfs_find_exit(&fd);
296 + bail_no_root:
297 + pr_err("get root inode failed\n");
298 + bail:
299 +diff --git a/fs/internal.h b/fs/internal.h
300 +index 7651e8b8ef136..61aed95f83d1e 100644
301 +--- a/fs/internal.h
302 ++++ b/fs/internal.h
303 +@@ -52,7 +52,6 @@ extern void __init chrdev_init(void);
304 + */
305 + extern const struct fs_context_operations legacy_fs_context_ops;
306 + extern int parse_monolithic_mount_data(struct fs_context *, void *);
307 +-extern void fc_drop_locked(struct fs_context *);
308 + extern void vfs_clean_context(struct fs_context *fc);
309 + extern int finish_clean_context(struct fs_context *fc);
310 +
311 +diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
312 +index c04bad4b2b43f..10c4c1e80124f 100644
313 +--- a/fs/iomap/seek.c
314 ++++ b/fs/iomap/seek.c
315 +@@ -140,23 +140,20 @@ loff_t
316 + iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
317 + {
318 + loff_t size = i_size_read(inode);
319 +- loff_t length = size - offset;
320 + loff_t ret;
321 +
322 + /* Nothing to be found before or beyond the end of the file. */
323 + if (offset < 0 || offset >= size)
324 + return -ENXIO;
325 +
326 +- while (length > 0) {
327 +- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
328 +- &offset, iomap_seek_hole_actor);
329 ++ while (offset < size) {
330 ++ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
331 ++ ops, &offset, iomap_seek_hole_actor);
332 + if (ret < 0)
333 + return ret;
334 + if (ret == 0)
335 + break;
336 +-
337 + offset += ret;
338 +- length -= ret;
339 + }
340 +
341 + return offset;
342 +@@ -186,27 +183,23 @@ loff_t
343 + iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
344 + {
345 + loff_t size = i_size_read(inode);
346 +- loff_t length = size - offset;
347 + loff_t ret;
348 +
349 + /* Nothing to be found before or beyond the end of the file. */
350 + if (offset < 0 || offset >= size)
351 + return -ENXIO;
352 +
353 +- while (length > 0) {
354 +- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
355 +- &offset, iomap_seek_data_actor);
356 ++ while (offset < size) {
357 ++ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
358 ++ ops, &offset, iomap_seek_data_actor);
359 + if (ret < 0)
360 + return ret;
361 + if (ret == 0)
362 +- break;
363 +-
364 ++ return offset;
365 + offset += ret;
366 +- length -= ret;
367 + }
368 +
369 +- if (length <= 0)
370 +- return -ENXIO;
371 +- return offset;
372 ++ /* We've reached the end of the file without finding data */
373 ++ return -ENXIO;
374 + }
375 + EXPORT_SYMBOL_GPL(iomap_seek_data);
376 +diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
377 +index e5c14e2c53d35..ba8a58754340d 100644
378 +--- a/include/linux/fs_context.h
379 ++++ b/include/linux/fs_context.h
380 +@@ -134,6 +134,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
381 + extern int generic_parse_monolithic(struct fs_context *fc, void *data);
382 + extern int vfs_get_tree(struct fs_context *fc);
383 + extern void put_fs_context(struct fs_context *fc);
384 ++extern void fc_drop_locked(struct fs_context *fc);
385 +
386 + /*
387 + * sget() wrappers to be called from the ->get_tree() op.
388 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
389 +index 86e028388badc..9899b9af7f22f 100644
390 +--- a/include/net/busy_poll.h
391 ++++ b/include/net/busy_poll.h
392 +@@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
393 +
394 + static inline bool sk_can_busy_loop(const struct sock *sk)
395 + {
396 +- return sk->sk_ll_usec && !signal_pending(current);
397 ++ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
398 + }
399 +
400 + bool sk_busy_loop_end(void *p, unsigned long start_time);
401 +diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
402 +index 06e1deeef4640..8c6b04f9f6cbe 100644
403 +--- a/include/net/sctp/constants.h
404 ++++ b/include/net/sctp/constants.h
405 +@@ -328,8 +328,7 @@ enum {
406 + #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
407 +
408 + /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
409 +- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
410 +- * 192.88.99.0/24.
411 ++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
412 + * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
413 + * addresses.
414 + */
415 +@@ -337,7 +336,6 @@ enum {
416 + ((htonl(INADDR_BROADCAST) == a) || \
417 + ipv4_is_multicast(a) || \
418 + ipv4_is_zeronet(a) || \
419 +- ipv4_is_test_198(a) || \
420 + ipv4_is_anycast_6to4(a))
421 +
422 + /* Flags used for the bind address copy functions. */
423 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
424 +index 9329f725d22b6..2d0ef613ca070 100644
425 +--- a/kernel/cgroup/cgroup-v1.c
426 ++++ b/kernel/cgroup/cgroup-v1.c
427 +@@ -1228,9 +1228,7 @@ int cgroup1_get_tree(struct fs_context *fc)
428 + ret = cgroup_do_get_tree(fc);
429 +
430 + if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
431 +- struct super_block *sb = fc->root->d_sb;
432 +- dput(fc->root);
433 +- deactivate_locked_super(sb);
434 ++ fc_drop_locked(fc);
435 + ret = 1;
436 + }
437 +
438 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
439 +index 8f41499d8257d..6aeb53b4e19f8 100644
440 +--- a/kernel/workqueue.c
441 ++++ b/kernel/workqueue.c
442 +@@ -3660,15 +3660,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
443 + unbound_release_work);
444 + struct workqueue_struct *wq = pwq->wq;
445 + struct worker_pool *pool = pwq->pool;
446 +- bool is_last;
447 ++ bool is_last = false;
448 +
449 +- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
450 +- return;
451 ++ /*
452 ++ * when @pwq is not linked, it doesn't hold any reference to the
453 ++ * @wq, and @wq is invalid to access.
454 ++ */
455 ++ if (!list_empty(&pwq->pwqs_node)) {
456 ++ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
457 ++ return;
458 +
459 +- mutex_lock(&wq->mutex);
460 +- list_del_rcu(&pwq->pwqs_node);
461 +- is_last = list_empty(&wq->pwqs);
462 +- mutex_unlock(&wq->mutex);
463 ++ mutex_lock(&wq->mutex);
464 ++ list_del_rcu(&pwq->pwqs_node);
465 ++ is_last = list_empty(&wq->pwqs);
466 ++ mutex_unlock(&wq->mutex);
467 ++ }
468 +
469 + mutex_lock(&wq_pool_mutex);
470 + put_unbound_pool(pool);
471 +diff --git a/net/802/garp.c b/net/802/garp.c
472 +index 400bd857e5f57..f6012f8e59f00 100644
473 +--- a/net/802/garp.c
474 ++++ b/net/802/garp.c
475 +@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
476 + kfree(attr);
477 + }
478 +
479 ++static void garp_attr_destroy_all(struct garp_applicant *app)
480 ++{
481 ++ struct rb_node *node, *next;
482 ++ struct garp_attr *attr;
483 ++
484 ++ for (node = rb_first(&app->gid);
485 ++ next = node ? rb_next(node) : NULL, node != NULL;
486 ++ node = next) {
487 ++ attr = rb_entry(node, struct garp_attr, node);
488 ++ garp_attr_destroy(app, attr);
489 ++ }
490 ++}
491 ++
492 + static int garp_pdu_init(struct garp_applicant *app)
493 + {
494 + struct sk_buff *skb;
495 +@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
496 +
497 + spin_lock_bh(&app->lock);
498 + garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
499 ++ garp_attr_destroy_all(app);
500 + garp_pdu_queue(app);
501 + spin_unlock_bh(&app->lock);
502 +
503 +diff --git a/net/802/mrp.c b/net/802/mrp.c
504 +index 2cfdfbfbb2edb..5b804dbe2d08f 100644
505 +--- a/net/802/mrp.c
506 ++++ b/net/802/mrp.c
507 +@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
508 + kfree(attr);
509 + }
510 +
511 ++static void mrp_attr_destroy_all(struct mrp_applicant *app)
512 ++{
513 ++ struct rb_node *node, *next;
514 ++ struct mrp_attr *attr;
515 ++
516 ++ for (node = rb_first(&app->mad);
517 ++ next = node ? rb_next(node) : NULL, node != NULL;
518 ++ node = next) {
519 ++ attr = rb_entry(node, struct mrp_attr, node);
520 ++ mrp_attr_destroy(app, attr);
521 ++ }
522 ++}
523 ++
524 + static int mrp_pdu_init(struct mrp_applicant *app)
525 + {
526 + struct sk_buff *skb;
527 +@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
528 +
529 + spin_lock_bh(&app->lock);
530 + mrp_mad_event(app, MRP_EVENT_TX);
531 ++ mrp_attr_destroy_all(app);
532 + mrp_pdu_queue(app);
533 + spin_unlock_bh(&app->lock);
534 +
535 +diff --git a/net/core/sock.c b/net/core/sock.c
536 +index 68f84fac63e0b..452883b28abab 100644
537 +--- a/net/core/sock.c
538 ++++ b/net/core/sock.c
539 +@@ -1098,7 +1098,7 @@ set_rcvbuf:
540 + if (val < 0)
541 + ret = -EINVAL;
542 + else
543 +- sk->sk_ll_usec = val;
544 ++ WRITE_ONCE(sk->sk_ll_usec, val);
545 + }
546 + break;
547 + #endif
548 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
549 +index 33444d9856819..fc913f09606db 100644
550 +--- a/net/ipv6/ip6_output.c
551 ++++ b/net/ipv6/ip6_output.c
552 +@@ -59,10 +59,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
553 + {
554 + struct dst_entry *dst = skb_dst(skb);
555 + struct net_device *dev = dst->dev;
556 ++ unsigned int hh_len = LL_RESERVED_SPACE(dev);
557 ++ int delta = hh_len - skb_headroom(skb);
558 + const struct in6_addr *nexthop;
559 + struct neighbour *neigh;
560 + int ret;
561 +
562 ++ /* Be paranoid, rather than too clever. */
563 ++ if (unlikely(delta > 0) && dev->header_ops) {
564 ++ /* pskb_expand_head() might crash, if skb is shared */
565 ++ if (skb_shared(skb)) {
566 ++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
567 ++
568 ++ if (likely(nskb)) {
569 ++ if (skb->sk)
570 ++ skb_set_owner_w(nskb, skb->sk);
571 ++ consume_skb(skb);
572 ++ } else {
573 ++ kfree_skb(skb);
574 ++ }
575 ++ skb = nskb;
576 ++ }
577 ++ if (skb &&
578 ++ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
579 ++ kfree_skb(skb);
580 ++ skb = NULL;
581 ++ }
582 ++ if (!skb) {
583 ++ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
584 ++ return -ENOMEM;
585 ++ }
586 ++ }
587 ++
588 + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
589 + struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
590 +
591 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
592 +index 7f8702abc7bfe..bb370a7948f42 100644
593 +--- a/net/sctp/protocol.c
594 ++++ b/net/sctp/protocol.c
595 +@@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
596 + retval = SCTP_SCOPE_LINK;
597 + } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
598 + ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
599 +- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
600 ++ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
601 ++ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
602 + retval = SCTP_SCOPE_PRIVATE;
603 + } else {
604 + retval = SCTP_SCOPE_GLOBAL;
605 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
606 +index 9f96826eb3ba0..52ee3a9bb7093 100644
607 +--- a/net/unix/af_unix.c
608 ++++ b/net/unix/af_unix.c
609 +@@ -1512,6 +1512,53 @@ out:
610 + return err;
611 + }
612 +
613 ++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
614 ++{
615 ++ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
616 ++
617 ++ /*
618 ++ * Garbage collection of unix sockets starts by selecting a set of
619 ++ * candidate sockets which have reference only from being in flight
620 ++ * (total_refs == inflight_refs). This condition is checked once during
621 ++ * the candidate collection phase, and candidates are marked as such, so
622 ++ * that non-candidates can later be ignored. While inflight_refs is
623 ++ * protected by unix_gc_lock, total_refs (file count) is not, hence this
624 ++ * is an instantaneous decision.
625 ++ *
626 ++ * Once a candidate, however, the socket must not be reinstalled into a
627 ++ * file descriptor while the garbage collection is in progress.
628 ++ *
629 ++ * If the above conditions are met, then the directed graph of
630 ++ * candidates (*) does not change while unix_gc_lock is held.
631 ++ *
632 ++ * Any operations that changes the file count through file descriptors
633 ++ * (dup, close, sendmsg) does not change the graph since candidates are
634 ++ * not installed in fds.
635 ++ *
636 ++ * Dequeing a candidate via recvmsg would install it into an fd, but
637 ++ * that takes unix_gc_lock to decrement the inflight count, so it's
638 ++ * serialized with garbage collection.
639 ++ *
640 ++ * MSG_PEEK is special in that it does not change the inflight count,
641 ++ * yet does install the socket into an fd. The following lock/unlock
642 ++ * pair is to ensure serialization with garbage collection. It must be
643 ++ * done between incrementing the file count and installing the file into
644 ++ * an fd.
645 ++ *
646 ++ * If garbage collection starts after the barrier provided by the
647 ++ * lock/unlock, then it will see the elevated refcount and not mark this
648 ++ * as a candidate. If a garbage collection is already in progress
649 ++ * before the file count was incremented, then the lock/unlock pair will
650 ++ * ensure that garbage collection is finished before progressing to
651 ++ * installing the fd.
652 ++ *
653 ++ * (*) A -> B where B is on the queue of A or B is on the queue of C
654 ++ * which is on the queue of listening socket A.
655 ++ */
656 ++ spin_lock(&unix_gc_lock);
657 ++ spin_unlock(&unix_gc_lock);
658 ++}
659 ++
660 + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
661 + {
662 + int err = 0;
663 +@@ -2137,7 +2184,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
664 + sk_peek_offset_fwd(sk, size);
665 +
666 + if (UNIXCB(skb).fp)
667 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
668 ++ unix_peek_fds(&scm, skb);
669 + }
670 + err = (flags & MSG_TRUNC) ? skb->len - skip : size;
671 +
672 +@@ -2378,7 +2425,7 @@ unlock:
673 + /* It is questionable, see note in unix_dgram_recvmsg.
674 + */
675 + if (UNIXCB(skb).fp)
676 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
677 ++ unix_peek_fds(&scm, skb);
678 +
679 + sk_peek_offset_fwd(sk, chunk);
680 +
681 +diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
682 +index 812fc97bb1a97..add0ef37ba967 100644
683 +--- a/tools/scripts/Makefile.include
684 ++++ b/tools/scripts/Makefile.include
685 +@@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
686 + EXTRA_WARNINGS += -Wwrite-strings
687 + EXTRA_WARNINGS += -Wformat
688 +
689 +-CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
690 +-
691 + # Makefiles suck: This macro sets a default value of $(2) for the
692 + # variable named by $(1), unless the variable has been set by
693 + # environment or command line. This is necessary for CC and AR
694 +@@ -52,12 +50,22 @@ define allow-override
695 + $(eval $(1) = $(2)))
696 + endef
697 +
698 ++ifneq ($(LLVM),)
699 ++$(call allow-override,CC,clang)
700 ++$(call allow-override,AR,llvm-ar)
701 ++$(call allow-override,LD,ld.lld)
702 ++$(call allow-override,CXX,clang++)
703 ++$(call allow-override,STRIP,llvm-strip)
704 ++else
705 + # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
706 + $(call allow-override,CC,$(CROSS_COMPILE)gcc)
707 + $(call allow-override,AR,$(CROSS_COMPILE)ar)
708 + $(call allow-override,LD,$(CROSS_COMPILE)ld)
709 + $(call allow-override,CXX,$(CROSS_COMPILE)g++)
710 + $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
711 ++endif
712 ++
713 ++CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
714 +
715 + ifneq ($(LLVM),)
716 + HOSTAR ?= llvm-ar
717 +diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
718 +index 17ac167823a6d..9ba7feffe344b 100644
719 +--- a/tools/testing/selftests/vm/userfaultfd.c
720 ++++ b/tools/testing/selftests/vm/userfaultfd.c
721 +@@ -141,7 +141,7 @@ static void anon_allocate_area(void **alloc_area)
722 + {
723 + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
724 + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
725 +- if (*alloc_area == MAP_FAILED)
726 ++ if (*alloc_area == MAP_FAILED) {
727 + fprintf(stderr, "mmap of anonymous memory failed");
728 + *alloc_area = NULL;
729 + }