Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 31 Jul 2021 10:34:30
Message-Id: 1627727649.af04b07db818faf519780e2e77f637a6419c5ab6.alicef@gentoo
1 commit: af04b07db818faf519780e2e77f637a6419c5ab6
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jul 31 10:33:59 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sat Jul 31 10:34:09 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=af04b07d
7
8 Linux patch 4.19.200
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1199_linux-4.19.200.patch | 893 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 897 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f1619e0..58e7859 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -835,6 +835,10 @@ Patch: 1198_linux-4.19.199.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.199
23
24 +Patch: 1199_linux-4.19.200.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.200
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1199_linux-4.19.200.patch b/1199_linux-4.19.200.patch
33 new file mode 100644
34 index 0000000..da6c8c4
35 --- /dev/null
36 +++ b/1199_linux-4.19.200.patch
37 @@ -0,0 +1,893 @@
38 +diff --git a/Makefile b/Makefile
39 +index f3ad63a089a18..a4ea351c4e5d6 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 199
47 ++SUBLEVEL = 200
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
52 +index 6f4f60ba5429c..990b7ef1800e4 100644
53 +--- a/arch/arm/boot/dts/versatile-ab.dts
54 ++++ b/arch/arm/boot/dts/versatile-ab.dts
55 +@@ -192,16 +192,15 @@
56 + #size-cells = <1>;
57 + ranges;
58 +
59 +- vic: intc@10140000 {
60 ++ vic: interrupt-controller@10140000 {
61 + compatible = "arm,versatile-vic";
62 + interrupt-controller;
63 + #interrupt-cells = <1>;
64 + reg = <0x10140000 0x1000>;
65 +- clear-mask = <0xffffffff>;
66 + valid-mask = <0xffffffff>;
67 + };
68 +
69 +- sic: intc@10003000 {
70 ++ sic: interrupt-controller@10003000 {
71 + compatible = "arm,versatile-sic";
72 + interrupt-controller;
73 + #interrupt-cells = <1>;
74 +diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
75 +index 06a0fdf24026c..e7e751a858d81 100644
76 +--- a/arch/arm/boot/dts/versatile-pb.dts
77 ++++ b/arch/arm/boot/dts/versatile-pb.dts
78 +@@ -7,7 +7,7 @@
79 +
80 + amba {
81 + /* The Versatile PB is using more SIC IRQ lines than the AB */
82 +- sic: intc@10003000 {
83 ++ sic: interrupt-controller@10003000 {
84 + clear-mask = <0xffffffff>;
85 + /*
86 + * Valid interrupt lines mask according to
87 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
88 +index 43fb4e296d8de..9cfc669b4a243 100644
89 +--- a/arch/x86/kvm/x86.c
90 ++++ b/arch/x86/kvm/x86.c
91 +@@ -416,8 +416,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
92 +
93 + if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
94 + queue:
95 +- if (has_error && !is_protmode(vcpu))
96 +- has_error = false;
97 + if (reinject) {
98 + /*
99 + * On vmentry, vcpu->arch.exception.pending is only
100 +@@ -7114,6 +7112,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
101 + kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
102 + }
103 +
104 ++static void kvm_inject_exception(struct kvm_vcpu *vcpu)
105 ++{
106 ++ if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
107 ++ vcpu->arch.exception.error_code = false;
108 ++ kvm_x86_ops->queue_exception(vcpu);
109 ++}
110 ++
111 + static int inject_pending_event(struct kvm_vcpu *vcpu)
112 + {
113 + int r;
114 +@@ -7121,7 +7126,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
115 + /* try to reinject previous events if any */
116 +
117 + if (vcpu->arch.exception.injected)
118 +- kvm_x86_ops->queue_exception(vcpu);
119 ++ kvm_inject_exception(vcpu);
120 + /*
121 + * Do not inject an NMI or interrupt if there is a pending
122 + * exception. Exceptions and interrupts are recognized at
123 +@@ -7175,7 +7180,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
124 + kvm_update_dr7(vcpu);
125 + }
126 +
127 +- kvm_x86_ops->queue_exception(vcpu);
128 ++ kvm_inject_exception(vcpu);
129 + }
130 +
131 + /* Don't consider new event if we re-injected an event */
132 +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
133 +index effc4c17e0fb9..af5139eb96b5d 100644
134 +--- a/drivers/firmware/arm_scmi/driver.c
135 ++++ b/drivers/firmware/arm_scmi/driver.c
136 +@@ -48,7 +48,6 @@ enum scmi_error_codes {
137 + SCMI_ERR_GENERIC = -8, /* Generic Error */
138 + SCMI_ERR_HARDWARE = -9, /* Hardware Error */
139 + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
140 +- SCMI_ERR_MAX
141 + };
142 +
143 + /* List of all SCMI devices active in system */
144 +@@ -168,8 +167,10 @@ static const int scmi_linux_errmap[] = {
145 +
146 + static inline int scmi_to_linux_errno(int errno)
147 + {
148 +- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
149 +- return scmi_linux_errmap[-errno];
150 ++ int err_idx = -errno;
151 ++
152 ++ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
153 ++ return scmi_linux_errmap[err_idx];
154 + return -EIO;
155 + }
156 +
157 +@@ -628,8 +629,9 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
158 + struct scmi_xfers_info *info = &sinfo->minfo;
159 +
160 + /* Pre-allocated messages, no more than what hdr.seq can support */
161 +- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
162 +- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
163 ++ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
164 ++ dev_err(dev,
165 ++ "Invalid maximum messages %d, not in range [1 - %lu]\n",
166 + desc->max_msg, MSG_TOKEN_MAX);
167 + return -EINVAL;
168 + }
169 +diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
170 +index 714a97f913199..ae9be792693bf 100644
171 +--- a/drivers/iio/dac/ds4424.c
172 ++++ b/drivers/iio/dac/ds4424.c
173 +@@ -236,12 +236,6 @@ static int ds4424_probe(struct i2c_client *client,
174 + indio_dev->dev.of_node = client->dev.of_node;
175 + indio_dev->dev.parent = &client->dev;
176 +
177 +- if (!client->dev.of_node) {
178 +- dev_err(&client->dev,
179 +- "Not found DT.\n");
180 +- return -ENODEV;
181 +- }
182 +-
183 + data->vcc_reg = devm_regulator_get(&client->dev, "vcc");
184 + if (IS_ERR(data->vcc_reg)) {
185 + dev_err(&client->dev,
186 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
187 +index 5a14f518cd979..61955a7c838b4 100644
188 +--- a/fs/cifs/smb2ops.c
189 ++++ b/fs/cifs/smb2ops.c
190 +@@ -386,8 +386,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
191 + p = buf;
192 + while (bytes_left >= sizeof(*p)) {
193 + info->speed = le64_to_cpu(p->LinkSpeed);
194 +- info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
195 +- info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
196 ++ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
197 ++ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
198 +
199 + cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
200 + cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
201 +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
202 +index 4af318fbda774..ef9498a6e88ac 100644
203 +--- a/fs/hfs/bfind.c
204 ++++ b/fs/hfs/bfind.c
205 +@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
206 + fd->key = ptr + tree->max_key_len + 2;
207 + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
208 + tree->cnid, __builtin_return_address(0));
209 +- mutex_lock(&tree->tree_lock);
210 ++ switch (tree->cnid) {
211 ++ case HFS_CAT_CNID:
212 ++ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
213 ++ break;
214 ++ case HFS_EXT_CNID:
215 ++ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
216 ++ break;
217 ++ case HFS_ATTR_CNID:
218 ++ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
219 ++ break;
220 ++ default:
221 ++ return -EINVAL;
222 ++ }
223 + return 0;
224 + }
225 +
226 +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
227 +index b63a4df7327b6..c0a73a6ffb28b 100644
228 +--- a/fs/hfs/bnode.c
229 ++++ b/fs/hfs/bnode.c
230 +@@ -15,16 +15,31 @@
231 +
232 + #include "btree.h"
233 +
234 +-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
235 +- int off, int len)
236 ++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
237 + {
238 + struct page *page;
239 ++ int pagenum;
240 ++ int bytes_read;
241 ++ int bytes_to_read;
242 ++ void *vaddr;
243 +
244 + off += node->page_offset;
245 +- page = node->page[0];
246 ++ pagenum = off >> PAGE_SHIFT;
247 ++ off &= ~PAGE_MASK; /* compute page offset for the first page */
248 +
249 +- memcpy(buf, kmap(page) + off, len);
250 +- kunmap(page);
251 ++ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
252 ++ if (pagenum >= node->tree->pages_per_bnode)
253 ++ break;
254 ++ page = node->page[pagenum];
255 ++ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
256 ++
257 ++ vaddr = kmap_atomic(page);
258 ++ memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
259 ++ kunmap_atomic(vaddr);
260 ++
261 ++ pagenum++;
262 ++ off = 0; /* page offset only applies to the first page */
263 ++ }
264 + }
265 +
266 + u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
267 +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
268 +index dcc2aab1b2c43..25ac9a8bb57a7 100644
269 +--- a/fs/hfs/btree.h
270 ++++ b/fs/hfs/btree.h
271 +@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
272 +
273 + #define NODE_HASH_SIZE 256
274 +
275 ++/* B-tree mutex nested subclasses */
276 ++enum hfs_btree_mutex_classes {
277 ++ CATALOG_BTREE_MUTEX,
278 ++ EXTENTS_BTREE_MUTEX,
279 ++ ATTR_BTREE_MUTEX,
280 ++};
281 ++
282 + /* A HFS BTree held in memory */
283 + struct hfs_btree {
284 + struct super_block *sb;
285 +diff --git a/fs/hfs/super.c b/fs/hfs/super.c
286 +index 173876782f73f..77b6f35a4aa93 100644
287 +--- a/fs/hfs/super.c
288 ++++ b/fs/hfs/super.c
289 +@@ -427,14 +427,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
290 + if (!res) {
291 + if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
292 + res = -EIO;
293 +- goto bail;
294 ++ goto bail_hfs_find;
295 + }
296 + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
297 + }
298 +- if (res) {
299 +- hfs_find_exit(&fd);
300 +- goto bail_no_root;
301 +- }
302 ++ if (res)
303 ++ goto bail_hfs_find;
304 + res = -EINVAL;
305 + root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
306 + hfs_find_exit(&fd);
307 +@@ -450,6 +448,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
308 + /* everything's okay */
309 + return 0;
310 +
311 ++bail_hfs_find:
312 ++ hfs_find_exit(&fd);
313 + bail_no_root:
314 + pr_err("get root inode failed\n");
315 + bail:
316 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
317 +index a5ba41b3b8673..7ec1cdb66be8d 100644
318 +--- a/include/net/af_unix.h
319 ++++ b/include/net/af_unix.h
320 +@@ -10,6 +10,7 @@
321 +
322 + void unix_inflight(struct user_struct *user, struct file *fp);
323 + void unix_notinflight(struct user_struct *user, struct file *fp);
324 ++void unix_destruct_scm(struct sk_buff *skb);
325 + void unix_gc(void);
326 + void wait_for_unix_gc(void);
327 + struct sock *unix_get_socket(struct file *filp);
328 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
329 +index cf8f792743ec2..c76a5e9894dac 100644
330 +--- a/include/net/busy_poll.h
331 ++++ b/include/net/busy_poll.h
332 +@@ -48,7 +48,7 @@ static inline bool net_busy_loop_on(void)
333 +
334 + static inline bool sk_can_busy_loop(const struct sock *sk)
335 + {
336 +- return sk->sk_ll_usec && !signal_pending(current);
337 ++ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
338 + }
339 +
340 + bool sk_busy_loop_end(void *p, unsigned long start_time);
341 +diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
342 +index 48d74674d5e95..bc22e44ffcdf7 100644
343 +--- a/include/net/sctp/constants.h
344 ++++ b/include/net/sctp/constants.h
345 +@@ -348,8 +348,7 @@ enum {
346 + #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
347 +
348 + /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
349 +- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
350 +- * 192.88.99.0/24.
351 ++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
352 + * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
353 + * addresses.
354 + */
355 +@@ -357,7 +356,6 @@ enum {
356 + ((htonl(INADDR_BROADCAST) == a) || \
357 + ipv4_is_multicast(a) || \
358 + ipv4_is_zeronet(a) || \
359 +- ipv4_is_test_198(a) || \
360 + ipv4_is_anycast_6to4(a))
361 +
362 + /* Flags used for the bind address copy functions. */
363 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
364 +index f278e2f584fd2..1573d1bf63007 100644
365 +--- a/kernel/workqueue.c
366 ++++ b/kernel/workqueue.c
367 +@@ -3498,15 +3498,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
368 + unbound_release_work);
369 + struct workqueue_struct *wq = pwq->wq;
370 + struct worker_pool *pool = pwq->pool;
371 +- bool is_last;
372 ++ bool is_last = false;
373 +
374 +- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
375 +- return;
376 ++ /*
377 ++ * when @pwq is not linked, it doesn't hold any reference to the
378 ++ * @wq, and @wq is invalid to access.
379 ++ */
380 ++ if (!list_empty(&pwq->pwqs_node)) {
381 ++ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
382 ++ return;
383 +
384 +- mutex_lock(&wq->mutex);
385 +- list_del_rcu(&pwq->pwqs_node);
386 +- is_last = list_empty(&wq->pwqs);
387 +- mutex_unlock(&wq->mutex);
388 ++ mutex_lock(&wq->mutex);
389 ++ list_del_rcu(&pwq->pwqs_node);
390 ++ is_last = list_empty(&wq->pwqs);
391 ++ mutex_unlock(&wq->mutex);
392 ++ }
393 +
394 + mutex_lock(&wq_pool_mutex);
395 + put_unbound_pool(pool);
396 +diff --git a/net/802/garp.c b/net/802/garp.c
397 +index 7f50d47470bd4..8e19f51833d6f 100644
398 +--- a/net/802/garp.c
399 ++++ b/net/802/garp.c
400 +@@ -206,6 +206,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
401 + kfree(attr);
402 + }
403 +
404 ++static void garp_attr_destroy_all(struct garp_applicant *app)
405 ++{
406 ++ struct rb_node *node, *next;
407 ++ struct garp_attr *attr;
408 ++
409 ++ for (node = rb_first(&app->gid);
410 ++ next = node ? rb_next(node) : NULL, node != NULL;
411 ++ node = next) {
412 ++ attr = rb_entry(node, struct garp_attr, node);
413 ++ garp_attr_destroy(app, attr);
414 ++ }
415 ++}
416 ++
417 + static int garp_pdu_init(struct garp_applicant *app)
418 + {
419 + struct sk_buff *skb;
420 +@@ -612,6 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
421 +
422 + spin_lock_bh(&app->lock);
423 + garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
424 ++ garp_attr_destroy_all(app);
425 + garp_pdu_queue(app);
426 + spin_unlock_bh(&app->lock);
427 +
428 +diff --git a/net/802/mrp.c b/net/802/mrp.c
429 +index a808dd5bbb27a..32f87d458f054 100644
430 +--- a/net/802/mrp.c
431 ++++ b/net/802/mrp.c
432 +@@ -295,6 +295,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
433 + kfree(attr);
434 + }
435 +
436 ++static void mrp_attr_destroy_all(struct mrp_applicant *app)
437 ++{
438 ++ struct rb_node *node, *next;
439 ++ struct mrp_attr *attr;
440 ++
441 ++ for (node = rb_first(&app->mad);
442 ++ next = node ? rb_next(node) : NULL, node != NULL;
443 ++ node = next) {
444 ++ attr = rb_entry(node, struct mrp_attr, node);
445 ++ mrp_attr_destroy(app, attr);
446 ++ }
447 ++}
448 ++
449 + static int mrp_pdu_init(struct mrp_applicant *app)
450 + {
451 + struct sk_buff *skb;
452 +@@ -898,6 +911,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
453 +
454 + spin_lock_bh(&app->lock);
455 + mrp_mad_event(app, MRP_EVENT_TX);
456 ++ mrp_attr_destroy_all(app);
457 + mrp_pdu_queue(app);
458 + spin_unlock_bh(&app->lock);
459 +
460 +diff --git a/net/Makefile b/net/Makefile
461 +index bdaf53925acd5..449fc0b221f83 100644
462 +--- a/net/Makefile
463 ++++ b/net/Makefile
464 +@@ -18,7 +18,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
465 + obj-$(CONFIG_INET) += ipv4/
466 + obj-$(CONFIG_TLS) += tls/
467 + obj-$(CONFIG_XFRM) += xfrm/
468 +-obj-$(CONFIG_UNIX) += unix/
469 ++obj-$(CONFIG_UNIX_SCM) += unix/
470 + obj-$(CONFIG_NET) += ipv6/
471 + obj-$(CONFIG_BPFILTER) += bpfilter/
472 + obj-$(CONFIG_PACKET) += packet/
473 +diff --git a/net/core/sock.c b/net/core/sock.c
474 +index e6cbe137cb6fc..956af38aa0d6e 100644
475 +--- a/net/core/sock.c
476 ++++ b/net/core/sock.c
477 +@@ -989,7 +989,7 @@ set_rcvbuf:
478 + if (val < 0)
479 + ret = -EINVAL;
480 + else
481 +- sk->sk_ll_usec = val;
482 ++ WRITE_ONCE(sk->sk_ll_usec, val);
483 + }
484 + break;
485 + #endif
486 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
487 +index dd51256582556..7207a9769f1a9 100644
488 +--- a/net/sctp/protocol.c
489 ++++ b/net/sctp/protocol.c
490 +@@ -412,7 +412,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
491 + retval = SCTP_SCOPE_LINK;
492 + } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
493 + ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
494 +- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
495 ++ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
496 ++ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
497 + retval = SCTP_SCOPE_PRIVATE;
498 + } else {
499 + retval = SCTP_SCOPE_GLOBAL;
500 +diff --git a/net/unix/Kconfig b/net/unix/Kconfig
501 +index 8b31ab85d050f..3b9e450656a4d 100644
502 +--- a/net/unix/Kconfig
503 ++++ b/net/unix/Kconfig
504 +@@ -19,6 +19,11 @@ config UNIX
505 +
506 + Say Y unless you know what you are doing.
507 +
508 ++config UNIX_SCM
509 ++ bool
510 ++ depends on UNIX
511 ++ default y
512 ++
513 + config UNIX_DIAG
514 + tristate "UNIX: socket monitoring interface"
515 + depends on UNIX
516 +diff --git a/net/unix/Makefile b/net/unix/Makefile
517 +index ffd0a275c3a79..54e58cc4f9450 100644
518 +--- a/net/unix/Makefile
519 ++++ b/net/unix/Makefile
520 +@@ -10,3 +10,5 @@ unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o
521 +
522 + obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
523 + unix_diag-y := diag.o
524 ++
525 ++obj-$(CONFIG_UNIX_SCM) += scm.o
526 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
527 +index 53fe5ada5a83a..98c253afa0db2 100644
528 +--- a/net/unix/af_unix.c
529 ++++ b/net/unix/af_unix.c
530 +@@ -119,6 +119,8 @@
531 + #include <linux/freezer.h>
532 + #include <linux/file.h>
533 +
534 ++#include "scm.h"
535 ++
536 + struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
537 + EXPORT_SYMBOL_GPL(unix_socket_table);
538 + DEFINE_SPINLOCK(unix_table_lock);
539 +@@ -1515,65 +1517,51 @@ out:
540 + return err;
541 + }
542 +
543 +-static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
544 +-{
545 +- int i;
546 +-
547 +- scm->fp = UNIXCB(skb).fp;
548 +- UNIXCB(skb).fp = NULL;
549 +-
550 +- for (i = scm->fp->count-1; i >= 0; i--)
551 +- unix_notinflight(scm->fp->user, scm->fp->fp[i]);
552 +-}
553 +-
554 +-static void unix_destruct_scm(struct sk_buff *skb)
555 +-{
556 +- struct scm_cookie scm;
557 +- memset(&scm, 0, sizeof(scm));
558 +- scm.pid = UNIXCB(skb).pid;
559 +- if (UNIXCB(skb).fp)
560 +- unix_detach_fds(&scm, skb);
561 +-
562 +- /* Alas, it calls VFS */
563 +- /* So fscking what? fput() had been SMP-safe since the last Summer */
564 +- scm_destroy(&scm);
565 +- sock_wfree(skb);
566 +-}
567 +-
568 +-/*
569 +- * The "user->unix_inflight" variable is protected by the garbage
570 +- * collection lock, and we just read it locklessly here. If you go
571 +- * over the limit, there might be a tiny race in actually noticing
572 +- * it across threads. Tough.
573 +- */
574 +-static inline bool too_many_unix_fds(struct task_struct *p)
575 +-{
576 +- struct user_struct *user = current_user();
577 +-
578 +- if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
579 +- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
580 +- return false;
581 +-}
582 +-
583 +-static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
584 ++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
585 + {
586 +- int i;
587 +-
588 +- if (too_many_unix_fds(current))
589 +- return -ETOOMANYREFS;
590 ++ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
591 +
592 + /*
593 +- * Need to duplicate file references for the sake of garbage
594 +- * collection. Otherwise a socket in the fps might become a
595 +- * candidate for GC while the skb is not yet queued.
596 ++ * Garbage collection of unix sockets starts by selecting a set of
597 ++ * candidate sockets which have reference only from being in flight
598 ++ * (total_refs == inflight_refs). This condition is checked once during
599 ++ * the candidate collection phase, and candidates are marked as such, so
600 ++ * that non-candidates can later be ignored. While inflight_refs is
601 ++ * protected by unix_gc_lock, total_refs (file count) is not, hence this
602 ++ * is an instantaneous decision.
603 ++ *
604 ++ * Once a candidate, however, the socket must not be reinstalled into a
605 ++ * file descriptor while the garbage collection is in progress.
606 ++ *
607 ++ * If the above conditions are met, then the directed graph of
608 ++ * candidates (*) does not change while unix_gc_lock is held.
609 ++ *
610 ++ * Any operations that changes the file count through file descriptors
611 ++ * (dup, close, sendmsg) does not change the graph since candidates are
612 ++ * not installed in fds.
613 ++ *
614 ++ * Dequeing a candidate via recvmsg would install it into an fd, but
615 ++ * that takes unix_gc_lock to decrement the inflight count, so it's
616 ++ * serialized with garbage collection.
617 ++ *
618 ++ * MSG_PEEK is special in that it does not change the inflight count,
619 ++ * yet does install the socket into an fd. The following lock/unlock
620 ++ * pair is to ensure serialization with garbage collection. It must be
621 ++ * done between incrementing the file count and installing the file into
622 ++ * an fd.
623 ++ *
624 ++ * If garbage collection starts after the barrier provided by the
625 ++ * lock/unlock, then it will see the elevated refcount and not mark this
626 ++ * as a candidate. If a garbage collection is already in progress
627 ++ * before the file count was incremented, then the lock/unlock pair will
628 ++ * ensure that garbage collection is finished before progressing to
629 ++ * installing the fd.
630 ++ *
631 ++ * (*) A -> B where B is on the queue of A or B is on the queue of C
632 ++ * which is on the queue of listening socket A.
633 + */
634 +- UNIXCB(skb).fp = scm_fp_dup(scm->fp);
635 +- if (!UNIXCB(skb).fp)
636 +- return -ENOMEM;
637 +-
638 +- for (i = scm->fp->count - 1; i >= 0; i--)
639 +- unix_inflight(scm->fp->user, scm->fp->fp[i]);
640 +- return 0;
641 ++ spin_lock(&unix_gc_lock);
642 ++ spin_unlock(&unix_gc_lock);
643 + }
644 +
645 + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
646 +@@ -2201,7 +2189,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
647 + sk_peek_offset_fwd(sk, size);
648 +
649 + if (UNIXCB(skb).fp)
650 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
651 ++ unix_peek_fds(&scm, skb);
652 + }
653 + err = (flags & MSG_TRUNC) ? skb->len - skip : size;
654 +
655 +@@ -2442,7 +2430,7 @@ unlock:
656 + /* It is questionable, see note in unix_dgram_recvmsg.
657 + */
658 + if (UNIXCB(skb).fp)
659 +- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
660 ++ unix_peek_fds(&scm, skb);
661 +
662 + sk_peek_offset_fwd(sk, chunk);
663 +
664 +diff --git a/net/unix/garbage.c b/net/unix/garbage.c
665 +index c36757e728442..8bbe1b8e4ff7f 100644
666 +--- a/net/unix/garbage.c
667 ++++ b/net/unix/garbage.c
668 +@@ -86,77 +86,13 @@
669 + #include <net/scm.h>
670 + #include <net/tcp_states.h>
671 +
672 ++#include "scm.h"
673 ++
674 + /* Internal data structures and random procedures: */
675 +
676 +-static LIST_HEAD(gc_inflight_list);
677 + static LIST_HEAD(gc_candidates);
678 +-static DEFINE_SPINLOCK(unix_gc_lock);
679 + static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
680 +
681 +-unsigned int unix_tot_inflight;
682 +-
683 +-struct sock *unix_get_socket(struct file *filp)
684 +-{
685 +- struct sock *u_sock = NULL;
686 +- struct inode *inode = file_inode(filp);
687 +-
688 +- /* Socket ? */
689 +- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
690 +- struct socket *sock = SOCKET_I(inode);
691 +- struct sock *s = sock->sk;
692 +-
693 +- /* PF_UNIX ? */
694 +- if (s && sock->ops && sock->ops->family == PF_UNIX)
695 +- u_sock = s;
696 +- }
697 +- return u_sock;
698 +-}
699 +-
700 +-/* Keep the number of times in flight count for the file
701 +- * descriptor if it is for an AF_UNIX socket.
702 +- */
703 +-
704 +-void unix_inflight(struct user_struct *user, struct file *fp)
705 +-{
706 +- struct sock *s = unix_get_socket(fp);
707 +-
708 +- spin_lock(&unix_gc_lock);
709 +-
710 +- if (s) {
711 +- struct unix_sock *u = unix_sk(s);
712 +-
713 +- if (atomic_long_inc_return(&u->inflight) == 1) {
714 +- BUG_ON(!list_empty(&u->link));
715 +- list_add_tail(&u->link, &gc_inflight_list);
716 +- } else {
717 +- BUG_ON(list_empty(&u->link));
718 +- }
719 +- unix_tot_inflight++;
720 +- }
721 +- user->unix_inflight++;
722 +- spin_unlock(&unix_gc_lock);
723 +-}
724 +-
725 +-void unix_notinflight(struct user_struct *user, struct file *fp)
726 +-{
727 +- struct sock *s = unix_get_socket(fp);
728 +-
729 +- spin_lock(&unix_gc_lock);
730 +-
731 +- if (s) {
732 +- struct unix_sock *u = unix_sk(s);
733 +-
734 +- BUG_ON(!atomic_long_read(&u->inflight));
735 +- BUG_ON(list_empty(&u->link));
736 +-
737 +- if (atomic_long_dec_and_test(&u->inflight))
738 +- list_del_init(&u->link);
739 +- unix_tot_inflight--;
740 +- }
741 +- user->unix_inflight--;
742 +- spin_unlock(&unix_gc_lock);
743 +-}
744 +-
745 + static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
746 + struct sk_buff_head *hitlist)
747 + {
748 +diff --git a/net/unix/scm.c b/net/unix/scm.c
749 +new file mode 100644
750 +index 0000000000000..83413ade79838
751 +--- /dev/null
752 ++++ b/net/unix/scm.c
753 +@@ -0,0 +1,148 @@
754 ++// SPDX-License-Identifier: GPL-2.0
755 ++#include <linux/module.h>
756 ++#include <linux/kernel.h>
757 ++#include <linux/string.h>
758 ++#include <linux/socket.h>
759 ++#include <linux/net.h>
760 ++#include <linux/fs.h>
761 ++#include <net/af_unix.h>
762 ++#include <net/scm.h>
763 ++#include <linux/init.h>
764 ++
765 ++#include "scm.h"
766 ++
767 ++unsigned int unix_tot_inflight;
768 ++EXPORT_SYMBOL(unix_tot_inflight);
769 ++
770 ++LIST_HEAD(gc_inflight_list);
771 ++EXPORT_SYMBOL(gc_inflight_list);
772 ++
773 ++DEFINE_SPINLOCK(unix_gc_lock);
774 ++EXPORT_SYMBOL(unix_gc_lock);
775 ++
776 ++struct sock *unix_get_socket(struct file *filp)
777 ++{
778 ++ struct sock *u_sock = NULL;
779 ++ struct inode *inode = file_inode(filp);
780 ++
781 ++ /* Socket ? */
782 ++ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
783 ++ struct socket *sock = SOCKET_I(inode);
784 ++ struct sock *s = sock->sk;
785 ++
786 ++ /* PF_UNIX ? */
787 ++ if (s && sock->ops && sock->ops->family == PF_UNIX)
788 ++ u_sock = s;
789 ++ }
790 ++ return u_sock;
791 ++}
792 ++EXPORT_SYMBOL(unix_get_socket);
793 ++
794 ++/* Keep the number of times in flight count for the file
795 ++ * descriptor if it is for an AF_UNIX socket.
796 ++ */
797 ++void unix_inflight(struct user_struct *user, struct file *fp)
798 ++{
799 ++ struct sock *s = unix_get_socket(fp);
800 ++
801 ++ spin_lock(&unix_gc_lock);
802 ++
803 ++ if (s) {
804 ++ struct unix_sock *u = unix_sk(s);
805 ++
806 ++ if (atomic_long_inc_return(&u->inflight) == 1) {
807 ++ BUG_ON(!list_empty(&u->link));
808 ++ list_add_tail(&u->link, &gc_inflight_list);
809 ++ } else {
810 ++ BUG_ON(list_empty(&u->link));
811 ++ }
812 ++ unix_tot_inflight++;
813 ++ }
814 ++ user->unix_inflight++;
815 ++ spin_unlock(&unix_gc_lock);
816 ++}
817 ++
818 ++void unix_notinflight(struct user_struct *user, struct file *fp)
819 ++{
820 ++ struct sock *s = unix_get_socket(fp);
821 ++
822 ++ spin_lock(&unix_gc_lock);
823 ++
824 ++ if (s) {
825 ++ struct unix_sock *u = unix_sk(s);
826 ++
827 ++ BUG_ON(!atomic_long_read(&u->inflight));
828 ++ BUG_ON(list_empty(&u->link));
829 ++
830 ++ if (atomic_long_dec_and_test(&u->inflight))
831 ++ list_del_init(&u->link);
832 ++ unix_tot_inflight--;
833 ++ }
834 ++ user->unix_inflight--;
835 ++ spin_unlock(&unix_gc_lock);
836 ++}
837 ++
838 ++/*
839 ++ * The "user->unix_inflight" variable is protected by the garbage
840 ++ * collection lock, and we just read it locklessly here. If you go
841 ++ * over the limit, there might be a tiny race in actually noticing
842 ++ * it across threads. Tough.
843 ++ */
844 ++static inline bool too_many_unix_fds(struct task_struct *p)
845 ++{
846 ++ struct user_struct *user = current_user();
847 ++
848 ++ if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
849 ++ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
850 ++ return false;
851 ++}
852 ++
853 ++int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
854 ++{
855 ++ int i;
856 ++
857 ++ if (too_many_unix_fds(current))
858 ++ return -ETOOMANYREFS;
859 ++
860 ++ /*
861 ++ * Need to duplicate file references for the sake of garbage
862 ++ * collection. Otherwise a socket in the fps might become a
863 ++ * candidate for GC while the skb is not yet queued.
864 ++ */
865 ++ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
866 ++ if (!UNIXCB(skb).fp)
867 ++ return -ENOMEM;
868 ++
869 ++ for (i = scm->fp->count - 1; i >= 0; i--)
870 ++ unix_inflight(scm->fp->user, scm->fp->fp[i]);
871 ++ return 0;
872 ++}
873 ++EXPORT_SYMBOL(unix_attach_fds);
874 ++
875 ++void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
876 ++{
877 ++ int i;
878 ++
879 ++ scm->fp = UNIXCB(skb).fp;
880 ++ UNIXCB(skb).fp = NULL;
881 ++
882 ++ for (i = scm->fp->count-1; i >= 0; i--)
883 ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
884 ++}
885 ++EXPORT_SYMBOL(unix_detach_fds);
886 ++
887 ++void unix_destruct_scm(struct sk_buff *skb)
888 ++{
889 ++ struct scm_cookie scm;
890 ++
891 ++ memset(&scm, 0, sizeof(scm));
892 ++ scm.pid = UNIXCB(skb).pid;
893 ++ if (UNIXCB(skb).fp)
894 ++ unix_detach_fds(&scm, skb);
895 ++
896 ++ /* Alas, it calls VFS */
897 ++ /* So fscking what? fput() had been SMP-safe since the last Summer */
898 ++ scm_destroy(&scm);
899 ++ sock_wfree(skb);
900 ++}
901 ++EXPORT_SYMBOL(unix_destruct_scm);
902 +diff --git a/net/unix/scm.h b/net/unix/scm.h
903 +new file mode 100644
904 +index 0000000000000..5a255a477f160
905 +--- /dev/null
906 ++++ b/net/unix/scm.h
907 +@@ -0,0 +1,10 @@
908 ++#ifndef NET_UNIX_SCM_H
909 ++#define NET_UNIX_SCM_H
910 ++
911 ++extern struct list_head gc_inflight_list;
912 ++extern spinlock_t unix_gc_lock;
913 ++
914 ++int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
915 ++void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
916 ++
917 ++#endif
918 +diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
919 +index 16d42b2de424e..1963440f67251 100644
920 +--- a/tools/testing/selftests/vm/userfaultfd.c
921 ++++ b/tools/testing/selftests/vm/userfaultfd.c
922 +@@ -131,7 +131,7 @@ static void anon_allocate_area(void **alloc_area)
923 + {
924 + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
925 + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
926 +- if (*alloc_area == MAP_FAILED)
927 ++ if (*alloc_area == MAP_FAILED) {
928 + fprintf(stderr, "mmap of anonymous memory failed");
929 + *alloc_area = NULL;
930 + }