Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.7 commit in: /
Date: Fri, 31 Jul 2020 18:07:58
Message-Id: 1596218859.3019bd9ccad7fa58df721cc1831b0444e4fb1d3b.mpagano@gentoo
1 commit: 3019bd9ccad7fa58df721cc1831b0444e4fb1d3b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jul 31 18:07:39 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jul 31 18:07:39 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3019bd9c
7
8 Linux patch 5.7.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-5.7.12.patch | 784 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 788 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 6409a51..21eff3a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-5.7.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.7.11
23
24 +Patch: 1011_linux-5.7.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.7.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-5.7.12.patch b/1011_linux-5.7.12.patch
33 new file mode 100644
34 index 0000000..bd95a59
35 --- /dev/null
36 +++ b/1011_linux-5.7.12.patch
37 @@ -0,0 +1,784 @@
38 +diff --git a/Makefile b/Makefile
39 +index 12777a95833f..401d58b35e61 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 7
46 +-SUBLEVEL = 11
47 ++SUBLEVEL = 12
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
52 +index e16afa27700d..f58baff2be0a 100644
53 +--- a/drivers/base/regmap/regmap-debugfs.c
54 ++++ b/drivers/base/regmap/regmap-debugfs.c
55 +@@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
56 + if (*ppos < 0 || !count)
57 + return -EINVAL;
58 +
59 ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
60 ++ count = PAGE_SIZE << (MAX_ORDER - 1);
61 ++
62 + buf = kmalloc(count, GFP_KERNEL);
63 + if (!buf)
64 + return -ENOMEM;
65 +@@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
66 + if (*ppos < 0 || !count)
67 + return -EINVAL;
68 +
69 ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
70 ++ count = PAGE_SIZE << (MAX_ORDER - 1);
71 ++
72 + buf = kmalloc(count, GFP_KERNEL);
73 + if (!buf)
74 + return -ENOMEM;
75 +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
76 +index 69773d228ec1..84640a0c13f3 100644
77 +--- a/drivers/net/wan/x25_asy.c
78 ++++ b/drivers/net/wan/x25_asy.c
79 +@@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
80 + netif_wake_queue(sl->dev);
81 + }
82 +
83 +-/* Send one completely decapsulated IP datagram to the IP layer. */
84 ++/* Send an LAPB frame to the LAPB module to process. */
85 +
86 + static void x25_asy_bump(struct x25_asy *sl)
87 + {
88 +@@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
89 + count = sl->rcount;
90 + dev->stats.rx_bytes += count;
91 +
92 +- skb = dev_alloc_skb(count+1);
93 ++ skb = dev_alloc_skb(count);
94 + if (skb == NULL) {
95 + netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
96 + dev->stats.rx_dropped++;
97 + return;
98 + }
99 +- skb_push(skb, 1); /* LAPB internal control */
100 + skb_put_data(skb, sl->rbuff, count);
101 + skb->protocol = x25_type_trans(skb, sl->dev);
102 + err = lapb_data_received(skb->dev, skb);
103 +@@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
104 + kfree_skb(skb);
105 + printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
106 + } else {
107 +- netif_rx(skb);
108 + dev->stats.rx_packets++;
109 + }
110 + }
111 +@@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
112 + */
113 +
114 + /*
115 +- * Called when I frame data arrives. We did the work above - throw it
116 +- * at the net layer.
117 ++ * Called when I frame data arrive. We add a pseudo header for upper
118 ++ * layers and pass it to upper layers.
119 + */
120 +
121 + static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
122 + {
123 ++ if (skb_cow(skb, 1)) {
124 ++ kfree_skb(skb);
125 ++ return NET_RX_DROP;
126 ++ }
127 ++ skb_push(skb, 1);
128 ++ skb->data[0] = X25_IFACE_DATA;
129 ++
130 ++ skb->protocol = x25_type_trans(skb, dev);
131 ++
132 + return netif_rx(skb);
133 + }
134 +
135 +@@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
136 + switch (s) {
137 + case X25_END:
138 + if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
139 +- sl->rcount > 2)
140 ++ sl->rcount >= 2)
141 + x25_asy_bump(sl);
142 + clear_bit(SLF_ESCAPE, &sl->flags);
143 + sl->rcount = 0;
144 +diff --git a/fs/io_uring.c b/fs/io_uring.c
145 +index 51be3a20ade1..d0d3efaaa4d4 100644
146 +--- a/fs/io_uring.c
147 ++++ b/fs/io_uring.c
148 +@@ -581,6 +581,7 @@ enum {
149 +
150 + struct async_poll {
151 + struct io_poll_iocb poll;
152 ++ struct io_poll_iocb *double_poll;
153 + struct io_wq_work work;
154 + };
155 +
156 +@@ -4220,9 +4221,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
157 + return false;
158 + }
159 +
160 +-static void io_poll_remove_double(struct io_kiocb *req)
161 ++static void io_poll_remove_double(struct io_kiocb *req, void *data)
162 + {
163 +- struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
164 ++ struct io_poll_iocb *poll = data;
165 +
166 + lockdep_assert_held(&req->ctx->completion_lock);
167 +
168 +@@ -4242,7 +4243,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
169 + {
170 + struct io_ring_ctx *ctx = req->ctx;
171 +
172 +- io_poll_remove_double(req);
173 ++ io_poll_remove_double(req, req->io);
174 + req->poll.done = true;
175 + io_cqring_fill_event(req, error ? error : mangle_poll(mask));
176 + io_commit_cqring(ctx);
177 +@@ -4285,21 +4286,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
178 + int sync, void *key)
179 + {
180 + struct io_kiocb *req = wait->private;
181 +- struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
182 ++ struct io_poll_iocb *poll = req->apoll->double_poll;
183 + __poll_t mask = key_to_poll(key);
184 +
185 + /* for instances that support it check for an event match first: */
186 + if (mask && !(mask & poll->events))
187 + return 0;
188 +
189 +- if (req->poll.head) {
190 ++ if (poll && poll->head) {
191 + bool done;
192 +
193 +- spin_lock(&req->poll.head->lock);
194 +- done = list_empty(&req->poll.wait.entry);
195 ++ spin_lock(&poll->head->lock);
196 ++ done = list_empty(&poll->wait.entry);
197 + if (!done)
198 +- list_del_init(&req->poll.wait.entry);
199 +- spin_unlock(&req->poll.head->lock);
200 ++ list_del_init(&poll->wait.entry);
201 ++ spin_unlock(&poll->head->lock);
202 + if (!done)
203 + __io_async_wake(req, poll, mask, io_poll_task_func);
204 + }
205 +@@ -4319,7 +4320,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
206 + }
207 +
208 + static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
209 +- struct wait_queue_head *head)
210 ++ struct wait_queue_head *head,
211 ++ struct io_poll_iocb **poll_ptr)
212 + {
213 + struct io_kiocb *req = pt->req;
214 +
215 +@@ -4330,7 +4332,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
216 + */
217 + if (unlikely(poll->head)) {
218 + /* already have a 2nd entry, fail a third attempt */
219 +- if (req->io) {
220 ++ if (*poll_ptr) {
221 + pt->error = -EINVAL;
222 + return;
223 + }
224 +@@ -4342,7 +4344,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
225 + io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
226 + refcount_inc(&req->refs);
227 + poll->wait.private = req;
228 +- req->io = (void *) poll;
229 ++ *poll_ptr = poll;
230 + }
231 +
232 + pt->error = 0;
233 +@@ -4354,8 +4356,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
234 + struct poll_table_struct *p)
235 + {
236 + struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
237 ++ struct async_poll *apoll = pt->req->apoll;
238 +
239 +- __io_queue_proc(&pt->req->apoll->poll, pt, head);
240 ++ __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
241 + }
242 +
243 + static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
244 +@@ -4409,6 +4412,7 @@ static void io_async_task_func(struct callback_head *cb)
245 + memcpy(&req->work, &apoll->work, sizeof(req->work));
246 +
247 + if (canceled) {
248 ++ kfree(apoll->double_poll);
249 + kfree(apoll);
250 + io_cqring_ev_posted(ctx);
251 + end_req:
252 +@@ -4426,6 +4430,7 @@ end_req:
253 + __io_queue_sqe(req, NULL);
254 + mutex_unlock(&ctx->uring_lock);
255 +
256 ++ kfree(apoll->double_poll);
257 + kfree(apoll);
258 + }
259 +
260 +@@ -4497,7 +4502,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
261 + struct async_poll *apoll;
262 + struct io_poll_table ipt;
263 + __poll_t mask, ret;
264 +- bool had_io;
265 +
266 + if (!req->file || !file_can_poll(req->file))
267 + return false;
268 +@@ -4509,10 +4513,10 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
269 + apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
270 + if (unlikely(!apoll))
271 + return false;
272 ++ apoll->double_poll = NULL;
273 +
274 + req->flags |= REQ_F_POLLED;
275 + memcpy(&apoll->work, &req->work, sizeof(req->work));
276 +- had_io = req->io != NULL;
277 +
278 + get_task_struct(current);
279 + req->task = current;
280 +@@ -4531,12 +4535,10 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
281 + ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
282 + io_async_wake);
283 + if (ret) {
284 +- ipt.error = 0;
285 +- /* only remove double add if we did it here */
286 +- if (!had_io)
287 +- io_poll_remove_double(req);
288 ++ io_poll_remove_double(req, apoll->double_poll);
289 + spin_unlock_irq(&ctx->completion_lock);
290 + memcpy(&req->work, &apoll->work, sizeof(req->work));
291 ++ kfree(apoll->double_poll);
292 + kfree(apoll);
293 + return false;
294 + }
295 +@@ -4567,11 +4569,13 @@ static bool io_poll_remove_one(struct io_kiocb *req)
296 + bool do_complete;
297 +
298 + if (req->opcode == IORING_OP_POLL_ADD) {
299 +- io_poll_remove_double(req);
300 ++ io_poll_remove_double(req, req->io);
301 + do_complete = __io_poll_remove_one(req, &req->poll);
302 + } else {
303 + struct async_poll *apoll = req->apoll;
304 +
305 ++ io_poll_remove_double(req, apoll->double_poll);
306 ++
307 + /* non-poll requests have submit ref still */
308 + do_complete = __io_poll_remove_one(req, &apoll->poll);
309 + if (do_complete) {
310 +@@ -4582,6 +4586,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
311 + * final reference.
312 + */
313 + memcpy(&req->work, &apoll->work, sizeof(req->work));
314 ++ kfree(apoll->double_poll);
315 + kfree(apoll);
316 + }
317 + }
318 +@@ -4682,7 +4687,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
319 + {
320 + struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
321 +
322 +- __io_queue_proc(&pt->req->poll, pt, head);
323 ++ __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
324 + }
325 +
326 + static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
327 +diff --git a/include/linux/tcp.h b/include/linux/tcp.h
328 +index 4f8159e90ce1..0bba582e83ca 100644
329 +--- a/include/linux/tcp.h
330 ++++ b/include/linux/tcp.h
331 +@@ -217,6 +217,8 @@ struct tcp_sock {
332 + } rack;
333 + u16 advmss; /* Advertised MSS */
334 + u8 compressed_ack;
335 ++ u8 tlp_retrans:1, /* TLP is a retransmission */
336 ++ unused:7;
337 + u32 chrono_start; /* Start time in jiffies of a TCP chrono */
338 + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
339 + u8 chrono_type:2, /* current chronograph type */
340 +@@ -239,7 +241,7 @@ struct tcp_sock {
341 + save_syn:1, /* Save headers of SYN packet */
342 + is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
343 + syn_smc:1; /* SYN includes SMC */
344 +- u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
345 ++ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
346 +
347 + u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
348 + u64 tcp_wstamp_ns; /* departure time for next sent data packet */
349 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
350 +index fd91cd34f25e..dec3f35467c9 100644
351 +--- a/net/ax25/af_ax25.c
352 ++++ b/net/ax25/af_ax25.c
353 +@@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock,
354 + if (addr_len > sizeof(struct sockaddr_ax25) &&
355 + fsa->fsa_ax25.sax25_ndigis != 0) {
356 + /* Valid number of digipeaters ? */
357 +- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
358 ++ if (fsa->fsa_ax25.sax25_ndigis < 1 ||
359 ++ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
360 ++ addr_len < sizeof(struct sockaddr_ax25) +
361 ++ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
362 + err = -EINVAL;
363 + goto out_release;
364 + }
365 +@@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
366 + struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
367 +
368 + /* Valid number of digipeaters ? */
369 +- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
370 ++ if (usax->sax25_ndigis < 1 ||
371 ++ usax->sax25_ndigis > AX25_MAX_DIGIS ||
372 ++ addr_len < sizeof(struct sockaddr_ax25) +
373 ++ sizeof(ax25_address) * usax->sax25_ndigis) {
374 + err = -EINVAL;
375 + goto out;
376 + }
377 +diff --git a/net/core/dev.c b/net/core/dev.c
378 +index c9ee5d80d5ea..c1c2688a955c 100644
379 +--- a/net/core/dev.c
380 ++++ b/net/core/dev.c
381 +@@ -5504,7 +5504,7 @@ static void flush_backlog(struct work_struct *work)
382 + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
383 + if (skb->dev->reg_state == NETREG_UNREGISTERING) {
384 + __skb_unlink(skb, &sd->input_pkt_queue);
385 +- kfree_skb(skb);
386 ++ dev_kfree_skb_irq(skb);
387 + input_queue_head_incr(sd);
388 + }
389 + }
390 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
391 +index 4773ad6ec111..f67f5ca39d63 100644
392 +--- a/net/core/net-sysfs.c
393 ++++ b/net/core/net-sysfs.c
394 +@@ -1077,7 +1077,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
395 + trans_timeout = queue->trans_timeout;
396 + spin_unlock_irq(&queue->_xmit_lock);
397 +
398 +- return sprintf(buf, "%lu", trans_timeout);
399 ++ return sprintf(buf, fmt_ulong, trans_timeout);
400 + }
401 +
402 + static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
403 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
404 +index 709ebbf8ab5b..78345e39e54a 100644
405 +--- a/net/core/rtnetlink.c
406 ++++ b/net/core/rtnetlink.c
407 +@@ -3337,7 +3337,8 @@ replay:
408 + */
409 + if (err < 0) {
410 + /* If device is not registered at all, free it now */
411 +- if (dev->reg_state == NETREG_UNINITIALIZED)
412 ++ if (dev->reg_state == NETREG_UNINITIALIZED ||
413 ++ dev->reg_state == NETREG_UNREGISTERED)
414 + free_netdev(dev);
415 + goto out;
416 + }
417 +diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
418 +index adcb3aea576d..bbdd3c7b6cb5 100644
419 +--- a/net/core/sock_reuseport.c
420 ++++ b/net/core/sock_reuseport.c
421 +@@ -101,6 +101,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
422 + more_reuse->prog = reuse->prog;
423 + more_reuse->reuseport_id = reuse->reuseport_id;
424 + more_reuse->bind_inany = reuse->bind_inany;
425 ++ more_reuse->has_conns = reuse->has_conns;
426 +
427 + memcpy(more_reuse->socks, reuse->socks,
428 + reuse->num_socks * sizeof(struct sock *));
429 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
430 +index 31c58e00d25b..32ac66a8c657 100644
431 +--- a/net/ipv4/tcp_input.c
432 ++++ b/net/ipv4/tcp_input.c
433 +@@ -3506,10 +3506,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
434 + }
435 + }
436 +
437 +-/* This routine deals with acks during a TLP episode.
438 +- * We mark the end of a TLP episode on receiving TLP dupack or when
439 +- * ack is after tlp_high_seq.
440 +- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
441 ++/* This routine deals with acks during a TLP episode and ends an episode by
442 ++ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
443 + */
444 + static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
445 + {
446 +@@ -3518,7 +3516,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
447 + if (before(ack, tp->tlp_high_seq))
448 + return;
449 +
450 +- if (flag & FLAG_DSACKING_ACK) {
451 ++ if (!tp->tlp_retrans) {
452 ++ /* TLP of new data has been acknowledged */
453 ++ tp->tlp_high_seq = 0;
454 ++ } else if (flag & FLAG_DSACKING_ACK) {
455 + /* This DSACK means original and TLP probe arrived; no loss */
456 + tp->tlp_high_seq = 0;
457 + } else if (after(ack, tp->tlp_high_seq)) {
458 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
459 +index bee2f9b8b8a1..b1c2484b4314 100644
460 +--- a/net/ipv4/tcp_output.c
461 ++++ b/net/ipv4/tcp_output.c
462 +@@ -2625,6 +2625,11 @@ void tcp_send_loss_probe(struct sock *sk)
463 + int pcount;
464 + int mss = tcp_current_mss(sk);
465 +
466 ++ /* At most one outstanding TLP */
467 ++ if (tp->tlp_high_seq)
468 ++ goto rearm_timer;
469 ++
470 ++ tp->tlp_retrans = 0;
471 + skb = tcp_send_head(sk);
472 + if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
473 + pcount = tp->packets_out;
474 +@@ -2642,10 +2647,6 @@ void tcp_send_loss_probe(struct sock *sk)
475 + return;
476 + }
477 +
478 +- /* At most one outstanding TLP retransmission. */
479 +- if (tp->tlp_high_seq)
480 +- goto rearm_timer;
481 +-
482 + if (skb_still_in_host_queue(sk, skb))
483 + goto rearm_timer;
484 +
485 +@@ -2667,10 +2668,12 @@ void tcp_send_loss_probe(struct sock *sk)
486 + if (__tcp_retransmit_skb(sk, skb, 1))
487 + goto rearm_timer;
488 +
489 ++ tp->tlp_retrans = 1;
490 ++
491 ++probe_sent:
492 + /* Record snd_nxt for loss detection. */
493 + tp->tlp_high_seq = tp->snd_nxt;
494 +
495 +-probe_sent:
496 + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
497 + /* Reset s.t. tcp_rearm_rto will restart timer from now */
498 + inet_csk(sk)->icsk_pending = 0;
499 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
500 +index 32564b350823..6ffef9861fa9 100644
501 +--- a/net/ipv4/udp.c
502 ++++ b/net/ipv4/udp.c
503 +@@ -413,7 +413,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
504 + struct udp_hslot *hslot2,
505 + struct sk_buff *skb)
506 + {
507 +- struct sock *sk, *result;
508 ++ struct sock *sk, *result, *reuseport_result;
509 + int score, badness;
510 + u32 hash = 0;
511 +
512 +@@ -423,17 +423,20 @@ static struct sock *udp4_lib_lookup2(struct net *net,
513 + score = compute_score(sk, net, saddr, sport,
514 + daddr, hnum, dif, sdif);
515 + if (score > badness) {
516 ++ reuseport_result = NULL;
517 ++
518 + if (sk->sk_reuseport &&
519 + sk->sk_state != TCP_ESTABLISHED) {
520 + hash = udp_ehashfn(net, daddr, hnum,
521 + saddr, sport);
522 +- result = reuseport_select_sock(sk, hash, skb,
523 +- sizeof(struct udphdr));
524 +- if (result && !reuseport_has_conns(sk, false))
525 +- return result;
526 ++ reuseport_result = reuseport_select_sock(sk, hash, skb,
527 ++ sizeof(struct udphdr));
528 ++ if (reuseport_result && !reuseport_has_conns(sk, false))
529 ++ return reuseport_result;
530 + }
531 ++
532 ++ result = reuseport_result ? : sk;
533 + badness = score;
534 +- result = sk;
535 + }
536 + }
537 + return result;
538 +@@ -2048,7 +2051,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
539 + /*
540 + * UDP-Lite specific tests, ignored on UDP sockets
541 + */
542 +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
543 ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
544 +
545 + /*
546 + * MIB statistics other than incrementing the error count are
547 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
548 +index 6532bde82b40..3a57fb9ce049 100644
549 +--- a/net/ipv6/ip6_gre.c
550 ++++ b/net/ipv6/ip6_gre.c
551 +@@ -1562,17 +1562,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
552 + static int __net_init ip6gre_init_net(struct net *net)
553 + {
554 + struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
555 ++ struct net_device *ndev;
556 + int err;
557 +
558 + if (!net_has_fallback_tunnels(net))
559 + return 0;
560 +- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
561 +- NET_NAME_UNKNOWN,
562 +- ip6gre_tunnel_setup);
563 +- if (!ign->fb_tunnel_dev) {
564 ++ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
565 ++ NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
566 ++ if (!ndev) {
567 + err = -ENOMEM;
568 + goto err_alloc_dev;
569 + }
570 ++ ign->fb_tunnel_dev = ndev;
571 + dev_net_set(ign->fb_tunnel_dev, net);
572 + /* FB netdevice is special: we have one, and only one per netns.
573 + * Allowing to move it to another netns is clearly unsafe.
574 +@@ -1592,7 +1593,7 @@ static int __net_init ip6gre_init_net(struct net *net)
575 + return 0;
576 +
577 + err_reg_dev:
578 +- free_netdev(ign->fb_tunnel_dev);
579 ++ free_netdev(ndev);
580 + err_alloc_dev:
581 + return err;
582 + }
583 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
584 +index 7d4151747340..a8d74f44056a 100644
585 +--- a/net/ipv6/udp.c
586 ++++ b/net/ipv6/udp.c
587 +@@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
588 + int dif, int sdif, struct udp_hslot *hslot2,
589 + struct sk_buff *skb)
590 + {
591 +- struct sock *sk, *result;
592 ++ struct sock *sk, *result, *reuseport_result;
593 + int score, badness;
594 + u32 hash = 0;
595 +
596 +@@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net,
597 + score = compute_score(sk, net, saddr, sport,
598 + daddr, hnum, dif, sdif);
599 + if (score > badness) {
600 ++ reuseport_result = NULL;
601 ++
602 + if (sk->sk_reuseport &&
603 + sk->sk_state != TCP_ESTABLISHED) {
604 + hash = udp6_ehashfn(net, daddr, hnum,
605 + saddr, sport);
606 +
607 +- result = reuseport_select_sock(sk, hash, skb,
608 +- sizeof(struct udphdr));
609 +- if (result && !reuseport_has_conns(sk, false))
610 +- return result;
611 ++ reuseport_result = reuseport_select_sock(sk, hash, skb,
612 ++ sizeof(struct udphdr));
613 ++ if (reuseport_result && !reuseport_has_conns(sk, false))
614 ++ return reuseport_result;
615 + }
616 +- result = sk;
617 ++
618 ++ result = reuseport_result ? : sk;
619 + badness = score;
620 + }
621 + }
622 +@@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
623 + /*
624 + * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
625 + */
626 +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
627 ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
628 +
629 + if (up->pcrlen == 0) { /* full coverage was set */
630 + net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
631 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
632 +index 24a8c3c6da0d..300a104b9a0f 100644
633 +--- a/net/qrtr/qrtr.c
634 ++++ b/net/qrtr/qrtr.c
635 +@@ -1180,6 +1180,7 @@ static int qrtr_release(struct socket *sock)
636 + sk->sk_state_change(sk);
637 +
638 + sock_set_flag(sk, SOCK_DEAD);
639 ++ sock_orphan(sk);
640 + sock->sk = NULL;
641 +
642 + if (!sock_flag(sk, SOCK_ZAPPED))
643 +diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
644 +index 8578c39ec839..6896a33ef842 100644
645 +--- a/net/rxrpc/recvmsg.c
646 ++++ b/net/rxrpc/recvmsg.c
647 +@@ -464,7 +464,7 @@ try_again:
648 + list_empty(&rx->recvmsg_q) &&
649 + rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
650 + release_sock(&rx->sk);
651 +- return -ENODATA;
652 ++ return -EAGAIN;
653 + }
654 +
655 + if (list_empty(&rx->recvmsg_q)) {
656 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
657 +index 5e9c43d4a314..49d03c8c64da 100644
658 +--- a/net/rxrpc/sendmsg.c
659 ++++ b/net/rxrpc/sendmsg.c
660 +@@ -306,7 +306,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
661 + /* this should be in poll */
662 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
663 +
664 +- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
665 ++ if (sk->sk_shutdown & SEND_SHUTDOWN)
666 + return -EPIPE;
667 +
668 + more = msg->msg_flags & MSG_MORE;
669 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
670 +index 6a114f80e54b..e191f2728389 100644
671 +--- a/net/sched/act_ct.c
672 ++++ b/net/sched/act_ct.c
673 +@@ -671,9 +671,10 @@ static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
674 + }
675 +
676 + static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
677 +- u8 family, u16 zone)
678 ++ u8 family, u16 zone, bool *defrag)
679 + {
680 + enum ip_conntrack_info ctinfo;
681 ++ struct qdisc_skb_cb cb;
682 + struct nf_conn *ct;
683 + int err = 0;
684 + bool frag;
685 +@@ -691,6 +692,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
686 + return err;
687 +
688 + skb_get(skb);
689 ++ cb = *qdisc_skb_cb(skb);
690 +
691 + if (family == NFPROTO_IPV4) {
692 + enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
693 +@@ -701,6 +703,9 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
694 + local_bh_enable();
695 + if (err && err != -EINPROGRESS)
696 + goto out_free;
697 ++
698 ++ if (!err)
699 ++ *defrag = true;
700 + } else { /* NFPROTO_IPV6 */
701 + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
702 + enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
703 +@@ -709,12 +714,16 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
704 + err = nf_ct_frag6_gather(net, skb, user);
705 + if (err && err != -EINPROGRESS)
706 + goto out_free;
707 ++
708 ++ if (!err)
709 ++ *defrag = true;
710 + #else
711 + err = -EOPNOTSUPP;
712 + goto out_free;
713 + #endif
714 + }
715 +
716 ++ *qdisc_skb_cb(skb) = cb;
717 + skb_clear_hash(skb);
718 + skb->ignore_df = 1;
719 + return err;
720 +@@ -912,6 +921,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
721 + int nh_ofs, err, retval;
722 + struct tcf_ct_params *p;
723 + bool skip_add = false;
724 ++ bool defrag = false;
725 + struct nf_conn *ct;
726 + u8 family;
727 +
728 +@@ -942,7 +952,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
729 + */
730 + nh_ofs = skb_network_offset(skb);
731 + skb_pull_rcsum(skb, nh_ofs);
732 +- err = tcf_ct_handle_fragments(net, skb, family, p->zone);
733 ++ err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
734 + if (err == -EINPROGRESS) {
735 + retval = TC_ACT_STOLEN;
736 + goto out;
737 +@@ -1010,6 +1020,8 @@ out_push:
738 +
739 + out:
740 + tcf_action_update_bstats(&c->common, skb);
741 ++ if (defrag)
742 ++ qdisc_skb_cb(skb)->pkt_len = skb->len;
743 + return retval;
744 +
745 + drop:
746 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
747 +index 67f7e71f9129..bda2536dd740 100644
748 +--- a/net/sctp/stream.c
749 ++++ b/net/sctp/stream.c
750 +@@ -22,17 +22,11 @@
751 + #include <net/sctp/sm.h>
752 + #include <net/sctp/stream_sched.h>
753 +
754 +-/* Migrates chunks from stream queues to new stream queues if needed,
755 +- * but not across associations. Also, removes those chunks to streams
756 +- * higher than the new max.
757 +- */
758 +-static void sctp_stream_outq_migrate(struct sctp_stream *stream,
759 +- struct sctp_stream *new, __u16 outcnt)
760 ++static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
761 + {
762 + struct sctp_association *asoc;
763 + struct sctp_chunk *ch, *temp;
764 + struct sctp_outq *outq;
765 +- int i;
766 +
767 + asoc = container_of(stream, struct sctp_association, stream);
768 + outq = &asoc->outqueue;
769 +@@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
770 +
771 + sctp_chunk_free(ch);
772 + }
773 ++}
774 ++
775 ++/* Migrates chunks from stream queues to new stream queues if needed,
776 ++ * but not across associations. Also, removes those chunks to streams
777 ++ * higher than the new max.
778 ++ */
779 ++static void sctp_stream_outq_migrate(struct sctp_stream *stream,
780 ++ struct sctp_stream *new, __u16 outcnt)
781 ++{
782 ++ int i;
783 ++
784 ++ if (stream->outcnt > outcnt)
785 ++ sctp_stream_shrink_out(stream, outcnt);
786 +
787 + if (new) {
788 + /* Here we actually move the old ext stuff into the new
789 +@@ -1037,11 +1044,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
790 + nums = ntohs(addstrm->number_of_streams);
791 + number = stream->outcnt - nums;
792 +
793 +- if (result == SCTP_STRRESET_PERFORMED)
794 ++ if (result == SCTP_STRRESET_PERFORMED) {
795 + for (i = number; i < stream->outcnt; i++)
796 + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
797 +- else
798 ++ } else {
799 ++ sctp_stream_shrink_out(stream, number);
800 + stream->outcnt = number;
801 ++ }
802 +
803 + *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
804 + 0, nums, GFP_ATOMIC);
805 +diff --git a/net/tipc/link.c b/net/tipc/link.c
806 +index d4675e922a8f..e18369201a15 100644
807 +--- a/net/tipc/link.c
808 ++++ b/net/tipc/link.c
809 +@@ -813,11 +813,11 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
810 + state |= l->bc_rcvlink->rcv_unacked;
811 + state |= l->rcv_unacked;
812 + state |= !skb_queue_empty(&l->transmq);
813 +- state |= !skb_queue_empty(&l->deferdq);
814 + probe = mstate->probing;
815 + probe |= l->silent_intv_cnt;
816 + if (probe || mstate->monitoring)
817 + l->silent_intv_cnt++;
818 ++ probe |= !skb_queue_empty(&l->deferdq);
819 + if (l->snd_nxt == l->checkpoint) {
820 + tipc_link_update_cwin(l, 0, 0);
821 + probe = true;