Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sat, 31 Jul 2021 10:30:37
Message-Id: 1627727415.fffb3b021d5afffa2a7eba74d2e5fc5d114650ac.alicef@gentoo
1 commit: fffb3b021d5afffa2a7eba74d2e5fc5d114650ac
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jul 31 10:29:25 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sat Jul 31 10:30:15 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fffb3b02
7
8 Linux patch 5.10.55
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1054_linux-5.10.55.patch | 730 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 734 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e70652a..f1cdfa7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -259,6 +259,10 @@ Patch: 1053_linux-5.10.54.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.54
23
24 +Patch: 1054_linux-5.10.55.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.55
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1054_linux-5.10.55.patch b/1054_linux-5.10.55.patch
33 new file mode 100644
34 index 0000000..25c76f1
35 --- /dev/null
36 +++ b/1054_linux-5.10.55.patch
37 @@ -0,0 +1,730 @@
38 +diff --git a/Makefile b/Makefile
39 +index eb01d3028b020..7fb6405f3b60f 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 54
47 ++SUBLEVEL = 55
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
52 +index 37bd41ff8dffa..151c0220047dd 100644
53 +--- a/arch/arm/boot/dts/versatile-ab.dts
54 ++++ b/arch/arm/boot/dts/versatile-ab.dts
55 +@@ -195,16 +195,15 @@
56 + #size-cells = <1>;
57 + ranges;
58 +
59 +- vic: intc@10140000 {
60 ++ vic: interrupt-controller@10140000 {
61 + compatible = "arm,versatile-vic";
62 + interrupt-controller;
63 + #interrupt-cells = <1>;
64 + reg = <0x10140000 0x1000>;
65 +- clear-mask = <0xffffffff>;
66 + valid-mask = <0xffffffff>;
67 + };
68 +
69 +- sic: intc@10003000 {
70 ++ sic: interrupt-controller@10003000 {
71 + compatible = "arm,versatile-sic";
72 + interrupt-controller;
73 + #interrupt-cells = <1>;
74 +diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
75 +index 06a0fdf24026c..e7e751a858d81 100644
76 +--- a/arch/arm/boot/dts/versatile-pb.dts
77 ++++ b/arch/arm/boot/dts/versatile-pb.dts
78 +@@ -7,7 +7,7 @@
79 +
80 + amba {
81 + /* The Versatile PB is using more SIC IRQ lines than the AB */
82 +- sic: intc@10003000 {
83 ++ sic: interrupt-controller@10003000 {
84 + clear-mask = <0xffffffff>;
85 + /*
86 + * Valid interrupt lines mask according to
87 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
88 +index 800914e9e12b9..3ad6f77ea1c45 100644
89 +--- a/arch/x86/kvm/x86.c
90 ++++ b/arch/x86/kvm/x86.c
91 +@@ -541,8 +541,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
92 +
93 + if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
94 + queue:
95 +- if (has_error && !is_protmode(vcpu))
96 +- has_error = false;
97 + if (reinject) {
98 + /*
99 + * On vmentry, vcpu->arch.exception.pending is only
100 +@@ -8265,6 +8263,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
101 + kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
102 + }
103 +
104 ++static void kvm_inject_exception(struct kvm_vcpu *vcpu)
105 ++{
106 ++ if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
107 ++ vcpu->arch.exception.error_code = false;
108 ++ kvm_x86_ops.queue_exception(vcpu);
109 ++}
110 ++
111 + static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
112 + {
113 + int r;
114 +@@ -8273,7 +8278,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
115 + /* try to reinject previous events if any */
116 +
117 + if (vcpu->arch.exception.injected) {
118 +- kvm_x86_ops.queue_exception(vcpu);
119 ++ kvm_inject_exception(vcpu);
120 + can_inject = false;
121 + }
122 + /*
123 +@@ -8336,7 +8341,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
124 + }
125 + }
126 +
127 +- kvm_x86_ops.queue_exception(vcpu);
128 ++ kvm_inject_exception(vcpu);
129 + can_inject = false;
130 + }
131 +
132 +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
133 +index af4560dab6b40..8c9663258d5d4 100644
134 +--- a/drivers/firmware/arm_scmi/driver.c
135 ++++ b/drivers/firmware/arm_scmi/driver.c
136 +@@ -43,7 +43,6 @@ enum scmi_error_codes {
137 + SCMI_ERR_GENERIC = -8, /* Generic Error */
138 + SCMI_ERR_HARDWARE = -9, /* Hardware Error */
139 + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
140 +- SCMI_ERR_MAX
141 + };
142 +
143 + /* List of all SCMI devices active in system */
144 +@@ -118,8 +117,10 @@ static const int scmi_linux_errmap[] = {
145 +
146 + static inline int scmi_to_linux_errno(int errno)
147 + {
148 +- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
149 +- return scmi_linux_errmap[-errno];
150 ++ int err_idx = -errno;
151 ++
152 ++ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
153 ++ return scmi_linux_errmap[err_idx];
154 + return -EIO;
155 + }
156 +
157 +@@ -614,8 +615,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
158 + const struct scmi_desc *desc = sinfo->desc;
159 +
160 + /* Pre-allocated messages, no more than what hdr.seq can support */
161 +- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
162 +- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
163 ++ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
164 ++ dev_err(dev,
165 ++ "Invalid maximum messages %d, not in range [1 - %lu]\n",
166 + desc->max_msg, MSG_TOKEN_MAX);
167 + return -EINVAL;
168 + }
169 +diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
170 +index 1da0e277c5111..ce9d127edbb5d 100644
171 +--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
172 ++++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
173 +@@ -147,6 +147,9 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev,
174 + struct drm_mm *mm = &rman->mm;
175 + int ret;
176 +
177 ++ if (!man)
178 ++ return 0;
179 ++
180 + ttm_resource_manager_set_used(man, false);
181 +
182 + ret = ttm_resource_manager_force_list_clean(bdev, man);
183 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
184 +index b0b06eb86edfb..81e0877237770 100644
185 +--- a/fs/cifs/smb2ops.c
186 ++++ b/fs/cifs/smb2ops.c
187 +@@ -497,8 +497,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
188 + p = buf;
189 + while (bytes_left >= sizeof(*p)) {
190 + info->speed = le64_to_cpu(p->LinkSpeed);
191 +- info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
192 +- info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
193 ++ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
194 ++ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
195 +
196 + cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
197 + cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
198 +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
199 +index 4af318fbda774..ef9498a6e88ac 100644
200 +--- a/fs/hfs/bfind.c
201 ++++ b/fs/hfs/bfind.c
202 +@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
203 + fd->key = ptr + tree->max_key_len + 2;
204 + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
205 + tree->cnid, __builtin_return_address(0));
206 +- mutex_lock(&tree->tree_lock);
207 ++ switch (tree->cnid) {
208 ++ case HFS_CAT_CNID:
209 ++ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
210 ++ break;
211 ++ case HFS_EXT_CNID:
212 ++ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
213 ++ break;
214 ++ case HFS_ATTR_CNID:
215 ++ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
216 ++ break;
217 ++ default:
218 ++ return -EINVAL;
219 ++ }
220 + return 0;
221 + }
222 +
223 +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
224 +index b63a4df7327b6..c0a73a6ffb28b 100644
225 +--- a/fs/hfs/bnode.c
226 ++++ b/fs/hfs/bnode.c
227 +@@ -15,16 +15,31 @@
228 +
229 + #include "btree.h"
230 +
231 +-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
232 +- int off, int len)
233 ++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
234 + {
235 + struct page *page;
236 ++ int pagenum;
237 ++ int bytes_read;
238 ++ int bytes_to_read;
239 ++ void *vaddr;
240 +
241 + off += node->page_offset;
242 +- page = node->page[0];
243 ++ pagenum = off >> PAGE_SHIFT;
244 ++ off &= ~PAGE_MASK; /* compute page offset for the first page */
245 +
246 +- memcpy(buf, kmap(page) + off, len);
247 +- kunmap(page);
248 ++ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
249 ++ if (pagenum >= node->tree->pages_per_bnode)
250 ++ break;
251 ++ page = node->page[pagenum];
252 ++ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
253 ++
254 ++ vaddr = kmap_atomic(page);
255 ++ memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
256 ++ kunmap_atomic(vaddr);
257 ++
258 ++ pagenum++;
259 ++ off = 0; /* page offset only applies to the first page */
260 ++ }
261 + }
262 +
263 + u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
264 +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
265 +index 4ba45caf59392..0e6baee932453 100644
266 +--- a/fs/hfs/btree.h
267 ++++ b/fs/hfs/btree.h
268 +@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
269 +
270 + #define NODE_HASH_SIZE 256
271 +
272 ++/* B-tree mutex nested subclasses */
273 ++enum hfs_btree_mutex_classes {
274 ++ CATALOG_BTREE_MUTEX,
275 ++ EXTENTS_BTREE_MUTEX,
276 ++ ATTR_BTREE_MUTEX,
277 ++};
278 ++
279 + /* A HFS BTree held in memory */
280 + struct hfs_btree {
281 + struct super_block *sb;
282 +diff --git a/fs/hfs/super.c b/fs/hfs/super.c
283 +index 44d07c9e3a7f0..12d9bae393631 100644
284 +--- a/fs/hfs/super.c
285 ++++ b/fs/hfs/super.c
286 +@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
287 + if (!res) {
288 + if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
289 + res = -EIO;
290 +- goto bail;
291 ++ goto bail_hfs_find;
292 + }
293 + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
294 + }
295 +- if (res) {
296 +- hfs_find_exit(&fd);
297 +- goto bail_no_root;
298 +- }
299 ++ if (res)
300 ++ goto bail_hfs_find;
301 + res = -EINVAL;
302 + root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
303 + hfs_find_exit(&fd);
304 +@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
305 + /* everything's okay */
306 + return 0;
307 +
308 ++bail_hfs_find:
309 ++ hfs_find_exit(&fd);
310 + bail_no_root:
311 + pr_err("get root inode failed\n");
312 + bail:
313 +diff --git a/fs/internal.h b/fs/internal.h
314 +index a7cd0f64faa4a..5155f6ce95c79 100644
315 +--- a/fs/internal.h
316 ++++ b/fs/internal.h
317 +@@ -64,7 +64,6 @@ extern void __init chrdev_init(void);
318 + */
319 + extern const struct fs_context_operations legacy_fs_context_ops;
320 + extern int parse_monolithic_mount_data(struct fs_context *, void *);
321 +-extern void fc_drop_locked(struct fs_context *);
322 + extern void vfs_clean_context(struct fs_context *fc);
323 + extern int finish_clean_context(struct fs_context *fc);
324 +
325 +diff --git a/fs/io_uring.c b/fs/io_uring.c
326 +index 07f08c424d17b..525b44140d7a3 100644
327 +--- a/fs/io_uring.c
328 ++++ b/fs/io_uring.c
329 +@@ -6266,7 +6266,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
330 + if (prev) {
331 + io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
332 + io_put_req_deferred(prev, 1);
333 +- io_put_req_deferred(req, 1);
334 + } else {
335 + io_cqring_add_event(req, -ETIME, 0);
336 + io_put_req_deferred(req, 1);
337 +diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
338 +index 107ee80c35683..220c306167f79 100644
339 +--- a/fs/iomap/seek.c
340 ++++ b/fs/iomap/seek.c
341 +@@ -140,23 +140,20 @@ loff_t
342 + iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
343 + {
344 + loff_t size = i_size_read(inode);
345 +- loff_t length = size - offset;
346 + loff_t ret;
347 +
348 + /* Nothing to be found before or beyond the end of the file. */
349 + if (offset < 0 || offset >= size)
350 + return -ENXIO;
351 +
352 +- while (length > 0) {
353 +- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
354 +- &offset, iomap_seek_hole_actor);
355 ++ while (offset < size) {
356 ++ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
357 ++ ops, &offset, iomap_seek_hole_actor);
358 + if (ret < 0)
359 + return ret;
360 + if (ret == 0)
361 + break;
362 +-
363 + offset += ret;
364 +- length -= ret;
365 + }
366 +
367 + return offset;
368 +@@ -186,27 +183,23 @@ loff_t
369 + iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
370 + {
371 + loff_t size = i_size_read(inode);
372 +- loff_t length = size - offset;
373 + loff_t ret;
374 +
375 + /* Nothing to be found before or beyond the end of the file. */
376 + if (offset < 0 || offset >= size)
377 + return -ENXIO;
378 +
379 +- while (length > 0) {
380 +- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
381 +- &offset, iomap_seek_data_actor);
382 ++ while (offset < size) {
383 ++ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
384 ++ ops, &offset, iomap_seek_data_actor);
385 + if (ret < 0)
386 + return ret;
387 + if (ret == 0)
388 +- break;
389 +-
390 ++ return offset;
391 + offset += ret;
392 +- length -= ret;
393 + }
394 +
395 +- if (length <= 0)
396 +- return -ENXIO;
397 +- return offset;
398 ++ /* We've reached the end of the file without finding data */
399 ++ return -ENXIO;
400 + }
401 + EXPORT_SYMBOL_GPL(iomap_seek_data);
402 +diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
403 +index 37e1e8f7f08da..5b44b0195a28a 100644
404 +--- a/include/linux/fs_context.h
405 ++++ b/include/linux/fs_context.h
406 +@@ -139,6 +139,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
407 + extern int generic_parse_monolithic(struct fs_context *fc, void *data);
408 + extern int vfs_get_tree(struct fs_context *fc);
409 + extern void put_fs_context(struct fs_context *fc);
410 ++extern void fc_drop_locked(struct fs_context *fc);
411 +
412 + /*
413 + * sget() wrappers to be called from the ->get_tree() op.
414 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
415 +index b001fa91c14ea..716b7c5f6fdd9 100644
416 +--- a/include/net/busy_poll.h
417 ++++ b/include/net/busy_poll.h
418 +@@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
419 +
420 + static inline bool sk_can_busy_loop(const struct sock *sk)
421 + {
422 +- return sk->sk_ll_usec && !signal_pending(current);
423 ++ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
424 + }
425 +
426 + bool sk_busy_loop_end(void *p, unsigned long start_time);
427 +diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
428 +index 122d9e2d8dfde..1ad049ac2add4 100644
429 +--- a/include/net/sctp/constants.h
430 ++++ b/include/net/sctp/constants.h
431 +@@ -340,8 +340,7 @@ enum {
432 + #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
433 +
434 + /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
435 +- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
436 +- * 192.88.99.0/24.
437 ++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
438 + * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
439 + * addresses.
440 + */
441 +@@ -349,7 +348,6 @@ enum {
442 + ((htonl(INADDR_BROADCAST) == a) || \
443 + ipv4_is_multicast(a) || \
444 + ipv4_is_zeronet(a) || \
445 +- ipv4_is_test_198(a) || \
446 + ipv4_is_anycast_6to4(a))
447 +
448 + /* Flags used for the bind address copy functions. */
449 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
450 +index 04eb28f7735fb..7f71b54c06c5f 100644
451 +--- a/kernel/cgroup/cgroup-v1.c
452 ++++ b/kernel/cgroup/cgroup-v1.c
453 +@@ -1225,9 +1225,7 @@ int cgroup1_get_tree(struct fs_context *fc)
454 + ret = cgroup_do_get_tree(fc);
455 +
456 + if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
457 +- struct super_block *sb = fc->root->d_sb;
458 +- dput(fc->root);
459 +- deactivate_locked_super(sb);
460 ++ fc_drop_locked(fc);
461 + ret = 1;
462 + }
463 +
464 +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
465 +index 73bbe792fe1e8..b338f514ee5aa 100644
466 +--- a/kernel/rcu/tasks.h
467 ++++ b/kernel/rcu/tasks.h
468 +@@ -879,10 +879,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
469 + in_qs = likely(!t->trc_reader_nesting);
470 + }
471 +
472 +- // Mark as checked. Because this is called from the grace-period
473 +- // kthread, also remove the task from the holdout list.
474 ++ // Mark as checked so that the grace-period kthread will
475 ++ // remove it from the holdout list.
476 + t->trc_reader_checked = true;
477 +- trc_del_holdout(t);
478 +
479 + if (in_qs)
480 + return true; // Already in quiescent state, done!!!
481 +@@ -909,7 +908,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
482 + // The current task had better be in a quiescent state.
483 + if (t == current) {
484 + t->trc_reader_checked = true;
485 +- trc_del_holdout(t);
486 + WARN_ON_ONCE(t->trc_reader_nesting);
487 + return;
488 + }
489 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
490 +index b23f7d1044be7..51d19fc71e616 100644
491 +--- a/kernel/workqueue.c
492 ++++ b/kernel/workqueue.c
493 +@@ -3670,15 +3670,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
494 + unbound_release_work);
495 + struct workqueue_struct *wq = pwq->wq;
496 + struct worker_pool *pool = pwq->pool;
497 +- bool is_last;
498 ++ bool is_last = false;
499 +
500 +- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
501 +- return;
502 ++ /*
503 ++ * when @pwq is not linked, it doesn't hold any reference to the
504 ++ * @wq, and @wq is invalid to access.
505 ++ */
506 ++ if (!list_empty(&pwq->pwqs_node)) {
507 ++ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
508 ++ return;
509 +
510 +- mutex_lock(&wq->mutex);
511 +- list_del_rcu(&pwq->pwqs_node);
512 +- is_last = list_empty(&wq->pwqs);
513 +- mutex_unlock(&wq->mutex);
514 ++ mutex_lock(&wq->mutex);
515 ++ list_del_rcu(&pwq->pwqs_node);
516 ++ is_last = list_empty(&wq->pwqs);
517 ++ mutex_unlock(&wq->mutex);
518 ++ }
519 +
520 + mutex_lock(&wq_pool_mutex);
521 + put_unbound_pool(pool);
522 +diff --git a/net/802/garp.c b/net/802/garp.c
523 +index 400bd857e5f57..f6012f8e59f00 100644
524 +--- a/net/802/garp.c
525 ++++ b/net/802/garp.c
526 +@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
527 + kfree(attr);
528 + }
529 +
530 ++static void garp_attr_destroy_all(struct garp_applicant *app)
531 ++{
532 ++ struct rb_node *node, *next;
533 ++ struct garp_attr *attr;
534 ++
535 ++ for (node = rb_first(&app->gid);
536 ++ next = node ? rb_next(node) : NULL, node != NULL;
537 ++ node = next) {
538 ++ attr = rb_entry(node, struct garp_attr, node);
539 ++ garp_attr_destroy(app, attr);
540 ++ }
541 ++}
542 ++
543 + static int garp_pdu_init(struct garp_applicant *app)
544 + {
545 + struct sk_buff *skb;
546 +@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
547 +
548 + spin_lock_bh(&app->lock);
549 + garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
550 ++ garp_attr_destroy_all(app);
551 + garp_pdu_queue(app);
552 + spin_unlock_bh(&app->lock);
553 +
554 +diff --git a/net/802/mrp.c b/net/802/mrp.c
555 +index bea6e43d45a0d..35e04cc5390c4 100644
556 +--- a/net/802/mrp.c
557 ++++ b/net/802/mrp.c
558 +@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
559 + kfree(attr);
560 + }
561 +
562 ++static void mrp_attr_destroy_all(struct mrp_applicant *app)
563 ++{
564 ++ struct rb_node *node, *next;
565 ++ struct mrp_attr *attr;
566 ++
567 ++ for (node = rb_first(&app->mad);
568 ++ next = node ? rb_next(node) : NULL, node != NULL;
569 ++ node = next) {
570 ++ attr = rb_entry(node, struct mrp_attr, node);
571 ++ mrp_attr_destroy(app, attr);
572 ++ }
573 ++}
574 ++
575 + static int mrp_pdu_init(struct mrp_applicant *app)
576 + {
577 + struct sk_buff *skb;
578 +@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
579 +
580 + spin_lock_bh(&app->lock);
581 + mrp_mad_event(app, MRP_EVENT_TX);
582 ++ mrp_attr_destroy_all(app);
583 + mrp_pdu_queue(app);
584 + spin_unlock_bh(&app->lock);
585 +
586 +diff --git a/net/core/sock.c b/net/core/sock.c
587 +index 7de51ea15cdfc..d638c5361ed29 100644
588 +--- a/net/core/sock.c
589 ++++ b/net/core/sock.c
590 +@@ -1164,7 +1164,7 @@ set_sndbuf:
591 + if (val < 0)
592 + ret = -EINVAL;
593 + else
594 +- sk->sk_ll_usec = val;
595 ++ WRITE_ONCE(sk->sk_ll_usec, val);
596 + }
597 + break;
598 + #endif
599 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
600 +index 341d0c7acc8bf..72a673a43a754 100644
601 +--- a/net/ipv6/ip6_output.c
602 ++++ b/net/ipv6/ip6_output.c
603 +@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
604 + {
605 + struct dst_entry *dst = skb_dst(skb);
606 + struct net_device *dev = dst->dev;
607 ++ unsigned int hh_len = LL_RESERVED_SPACE(dev);
608 ++ int delta = hh_len - skb_headroom(skb);
609 + const struct in6_addr *nexthop;
610 + struct neighbour *neigh;
611 + int ret;
612 +
613 ++ /* Be paranoid, rather than too clever. */
614 ++ if (unlikely(delta > 0) && dev->header_ops) {
615 ++ /* pskb_expand_head() might crash, if skb is shared */
616 ++ if (skb_shared(skb)) {
617 ++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
618 ++
619 ++ if (likely(nskb)) {
620 ++ if (skb->sk)
621 ++ skb_set_owner_w(nskb, skb->sk);
622 ++ consume_skb(skb);
623 ++ } else {
624 ++ kfree_skb(skb);
625 ++ }
626 ++ skb = nskb;
627 ++ }
628 ++ if (skb &&
629 ++ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
630 ++ kfree_skb(skb);
631 ++ skb = NULL;
632 ++ }
633 ++ if (!skb) {
634 ++ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
635 ++ return -ENOMEM;
636 ++ }
637 ++ }
638 ++
639 + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
640 + struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
641 +
642 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
643 +index 47fb87ce489fc..940f1e257a90a 100644
644 +--- a/net/sctp/protocol.c
645 ++++ b/net/sctp/protocol.c
646 +@@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
647 + retval = SCTP_SCOPE_LINK;
648 + } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
649 + ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
650 +- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
651 ++ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
652 ++ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
653 + retval = SCTP_SCOPE_PRIVATE;
654 + } else {
655 + retval = SCTP_SCOPE_GLOBAL;
656 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
657 +index 39be4b52329b5..37ffa7725cee2 100644
658 +--- a/net/unix/af_unix.c
659 ++++ b/net/unix/af_unix.c
660 +@@ -1521,6 +1521,53 @@ out:
661 + return err;
662 + }
663 +
664 ++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
665 ++{
666 ++ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
667 ++
668 ++ /*
669 ++ * Garbage collection of unix sockets starts by selecting a set of
670 ++ * candidate sockets which have reference only from being in flight
671 ++ * (total_refs == inflight_refs). This condition is checked once during
672 ++ * the candidate collection phase, and candidates are marked as such, so
673 ++ * that non-candidates can later be ignored. While inflight_refs is
674 ++ * protected by unix_gc_lock, total_refs (file count) is not, hence this
675 ++ * is an instantaneous decision.
676 ++ *
677 ++ * Once a candidate, however, the socket must not be reinstalled into a
678 ++ * file descriptor while the garbage collection is in progress.
679 ++ *
680 ++ * If the above conditions are met, then the directed graph of
681 ++ * candidates (*) does not change while unix_gc_lock is held.
682 ++ *
683 ++ * Any operations that changes the file count through file descriptors
684 ++ * (dup, close, sendmsg) does not change the graph since candidates are
685 ++ * not installed in fds.
686 ++ *
687 ++ * Dequeing a candidate via recvmsg would install it into an fd, but
688 ++ * that takes unix_gc_lock to decrement the inflight count, so it's
689 ++ * serialized with garbage collection.
690 ++ *
691 ++ * MSG_PEEK is special in that it does not change the inflight count,
692 ++ * yet does install the socket into an fd. The following lock/unlock
693 ++ * pair is to ensure serialization with garbage collection. It must be
694 ++ * done between incrementing the file count and installing the file into
695 ++ * an fd.
696 ++ *
697 ++ * If garbage collection starts after the barrier provided by the
698 ++ * lock/unlock, then it will see the elevated refcount and not mark this
699 ++ * as a candidate. If a garbage collection is already in progress
700 ++ * before the file count was incremented, then the lock/unlock pair will
701 ++ * ensure that garbage collection is finished before progressing to
702 ++ * installing the fd.
703 ++ *
704 ++ * (*) A -> B where B is on the queue of A or B is on the queue of C
705 ++ * which is on the queue of listening socket A.
706 ++ */
707 ++ spin_lock(&unix_gc_lock);
708 ++ spin_unlock(&unix_gc_lock);
709 ++}
710 ++
711 + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
712 + {
713 + int err = 0;
714 +@@ -2170,7 +2217,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
715 + sk_peek_offset_fwd(sk, size);
716 +
717 + if (UNIXCB(skb).fp)
718 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
719 ++ unix_peek_fds(&scm, skb);
720 + }
721 + err = (flags & MSG_TRUNC) ? skb->len - skip : size;
722 +
723 +@@ -2413,7 +2460,7 @@ unlock:
724 + /* It is questionable, see note in unix_dgram_recvmsg.
725 + */
726 + if (UNIXCB(skb).fp)
727 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
728 ++ unix_peek_fds(&scm, skb);
729 +
730 + sk_peek_offset_fwd(sk, chunk);
731 +
732 +diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
733 +index 1358e89cdf7d6..a3f99ef6b11ba 100644
734 +--- a/tools/scripts/Makefile.include
735 ++++ b/tools/scripts/Makefile.include
736 +@@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
737 + EXTRA_WARNINGS += -Wwrite-strings
738 + EXTRA_WARNINGS += -Wformat
739 +
740 +-CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
741 +-
742 + # Makefiles suck: This macro sets a default value of $(2) for the
743 + # variable named by $(1), unless the variable has been set by
744 + # environment or command line. This is necessary for CC and AR
745 +@@ -52,12 +50,22 @@ define allow-override
746 + $(eval $(1) = $(2)))
747 + endef
748 +
749 ++ifneq ($(LLVM),)
750 ++$(call allow-override,CC,clang)
751 ++$(call allow-override,AR,llvm-ar)
752 ++$(call allow-override,LD,ld.lld)
753 ++$(call allow-override,CXX,clang++)
754 ++$(call allow-override,STRIP,llvm-strip)
755 ++else
756 + # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
757 + $(call allow-override,CC,$(CROSS_COMPILE)gcc)
758 + $(call allow-override,AR,$(CROSS_COMPILE)ar)
759 + $(call allow-override,LD,$(CROSS_COMPILE)ld)
760 + $(call allow-override,CXX,$(CROSS_COMPILE)g++)
761 + $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
762 ++endif
763 ++
764 ++CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
765 +
766 + ifneq ($(LLVM),)
767 + HOSTAR ?= llvm-ar