Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 31 Jul 2020 18:04:48
Message-Id: 1596218663.57c4d4d1115957730e411ae485e62623e0e71c04.mpagano@gentoo
1 commit: 57c4d4d1115957730e411ae485e62623e0e71c04
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jul 31 18:04:23 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jul 31 18:04:23 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=57c4d4d1
7
8 Linux patch 5.4.55
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1054_linux-5.4.55.patch | 534 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 538 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fb63537..3aec8f2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -259,6 +259,10 @@ Patch: 1053_linux-5.4.54.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.54
23
24 +Patch: 1054_linux-5.4.55.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.55
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1054_linux-5.4.55.patch b/1054_linux-5.4.55.patch
33 new file mode 100644
34 index 0000000..52c0d78
35 --- /dev/null
36 +++ b/1054_linux-5.4.55.patch
37 @@ -0,0 +1,534 @@
38 +diff --git a/Makefile b/Makefile
39 +index ea711f30de29..072fe0eaa740 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 54
47 ++SUBLEVEL = 55
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
52 +index 0bd9b291bb29..92f0960e9014 100644
53 +--- a/drivers/base/power/wakeup.c
54 ++++ b/drivers/base/power/wakeup.c
55 +@@ -1073,6 +1073,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
56 + break;
57 + }
58 +
59 ++ if (!next_ws)
60 ++ print_wakeup_source_stats(m, &deleted_ws);
61 ++
62 + return next_ws;
63 + }
64 +
65 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
66 +index e16afa27700d..f58baff2be0a 100644
67 +--- a/drivers/base/regmap/regmap-debugfs.c
68 ++++ b/drivers/base/regmap/regmap-debugfs.c
69 +@@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
70 + if (*ppos < 0 || !count)
71 + return -EINVAL;
72 +
73 ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
74 ++ count = PAGE_SIZE << (MAX_ORDER - 1);
75 ++
76 + buf = kmalloc(count, GFP_KERNEL);
77 + if (!buf)
78 + return -ENOMEM;
79 +@@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
80 + if (*ppos < 0 || !count)
81 + return -EINVAL;
82 +
83 ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
84 ++ count = PAGE_SIZE << (MAX_ORDER - 1);
85 ++
86 + buf = kmalloc(count, GFP_KERNEL);
87 + if (!buf)
88 + return -ENOMEM;
89 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
90 +index cd9d08695cc1..00c4beb760c3 100644
91 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
92 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
93 +@@ -2802,7 +2802,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
94 + }
95 +
96 + /* Do this here, so we can be verbose early */
97 +- SET_NETDEV_DEV(net_dev, dev->parent);
98 ++ SET_NETDEV_DEV(net_dev, dev);
99 + dev_set_drvdata(dev, net_dev);
100 +
101 + priv = netdev_priv(net_dev);
102 +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
103 +index 914be5847386..cdcc380b4c26 100644
104 +--- a/drivers/net/wan/x25_asy.c
105 ++++ b/drivers/net/wan/x25_asy.c
106 +@@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
107 + netif_wake_queue(sl->dev);
108 + }
109 +
110 +-/* Send one completely decapsulated IP datagram to the IP layer. */
111 ++/* Send an LAPB frame to the LAPB module to process. */
112 +
113 + static void x25_asy_bump(struct x25_asy *sl)
114 + {
115 +@@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
116 + count = sl->rcount;
117 + dev->stats.rx_bytes += count;
118 +
119 +- skb = dev_alloc_skb(count+1);
120 ++ skb = dev_alloc_skb(count);
121 + if (skb == NULL) {
122 + netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
123 + dev->stats.rx_dropped++;
124 + return;
125 + }
126 +- skb_push(skb, 1); /* LAPB internal control */
127 + skb_put_data(skb, sl->rbuff, count);
128 + skb->protocol = x25_type_trans(skb, sl->dev);
129 + err = lapb_data_received(skb->dev, skb);
130 +@@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
131 + kfree_skb(skb);
132 + printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
133 + } else {
134 +- netif_rx(skb);
135 + dev->stats.rx_packets++;
136 + }
137 + }
138 +@@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
139 + */
140 +
141 + /*
142 +- * Called when I frame data arrives. We did the work above - throw it
143 +- * at the net layer.
144 ++ * Called when I frame data arrive. We add a pseudo header for upper
145 ++ * layers and pass it to upper layers.
146 + */
147 +
148 + static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
149 + {
150 ++ if (skb_cow(skb, 1)) {
151 ++ kfree_skb(skb);
152 ++ return NET_RX_DROP;
153 ++ }
154 ++ skb_push(skb, 1);
155 ++ skb->data[0] = X25_IFACE_DATA;
156 ++
157 ++ skb->protocol = x25_type_trans(skb, dev);
158 ++
159 + return netif_rx(skb);
160 + }
161 +
162 +@@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
163 + switch (s) {
164 + case X25_END:
165 + if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
166 +- sl->rcount > 2)
167 ++ sl->rcount >= 2)
168 + x25_asy_bump(sl);
169 + clear_bit(SLF_ESCAPE, &sl->flags);
170 + sl->rcount = 0;
171 +diff --git a/include/linux/tcp.h b/include/linux/tcp.h
172 +index 668e25a76d69..358deb4ff830 100644
173 +--- a/include/linux/tcp.h
174 ++++ b/include/linux/tcp.h
175 +@@ -216,6 +216,8 @@ struct tcp_sock {
176 + } rack;
177 + u16 advmss; /* Advertised MSS */
178 + u8 compressed_ack;
179 ++ u8 tlp_retrans:1, /* TLP is a retransmission */
180 ++ unused_1:7;
181 + u32 chrono_start; /* Start time in jiffies of a TCP chrono */
182 + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
183 + u8 chrono_type:2, /* current chronograph type */
184 +@@ -238,7 +240,7 @@ struct tcp_sock {
185 + save_syn:1, /* Save headers of SYN packet */
186 + is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
187 + syn_smc:1; /* SYN includes SMC */
188 +- u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
189 ++ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
190 +
191 + u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
192 + u64 tcp_wstamp_ns; /* departure time for next sent data packet */
193 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
194 +index e5a3dc28116d..2fdb1b573e8c 100644
195 +--- a/net/ax25/af_ax25.c
196 ++++ b/net/ax25/af_ax25.c
197 +@@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock,
198 + if (addr_len > sizeof(struct sockaddr_ax25) &&
199 + fsa->fsa_ax25.sax25_ndigis != 0) {
200 + /* Valid number of digipeaters ? */
201 +- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
202 ++ if (fsa->fsa_ax25.sax25_ndigis < 1 ||
203 ++ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
204 ++ addr_len < sizeof(struct sockaddr_ax25) +
205 ++ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
206 + err = -EINVAL;
207 + goto out_release;
208 + }
209 +@@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
210 + struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
211 +
212 + /* Valid number of digipeaters ? */
213 +- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
214 ++ if (usax->sax25_ndigis < 1 ||
215 ++ usax->sax25_ndigis > AX25_MAX_DIGIS ||
216 ++ addr_len < sizeof(struct sockaddr_ax25) +
217 ++ sizeof(ax25_address) * usax->sax25_ndigis) {
218 + err = -EINVAL;
219 + goto out;
220 + }
221 +diff --git a/net/core/dev.c b/net/core/dev.c
222 +index 727965565d31..25858f1f67cf 100644
223 +--- a/net/core/dev.c
224 ++++ b/net/core/dev.c
225 +@@ -5229,7 +5229,7 @@ static void flush_backlog(struct work_struct *work)
226 + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
227 + if (skb->dev->reg_state == NETREG_UNREGISTERING) {
228 + __skb_unlink(skb, &sd->input_pkt_queue);
229 +- kfree_skb(skb);
230 ++ dev_kfree_skb_irq(skb);
231 + input_queue_head_incr(sd);
232 + }
233 + }
234 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
235 +index 4c826b8bf9b1..2ebf9b252779 100644
236 +--- a/net/core/net-sysfs.c
237 ++++ b/net/core/net-sysfs.c
238 +@@ -1036,7 +1036,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
239 + trans_timeout = queue->trans_timeout;
240 + spin_unlock_irq(&queue->_xmit_lock);
241 +
242 +- return sprintf(buf, "%lu", trans_timeout);
243 ++ return sprintf(buf, fmt_ulong, trans_timeout);
244 + }
245 +
246 + static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
247 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
248 +index 944acb1a9f29..b0c06a063776 100644
249 +--- a/net/core/rtnetlink.c
250 ++++ b/net/core/rtnetlink.c
251 +@@ -3231,7 +3231,8 @@ replay:
252 + */
253 + if (err < 0) {
254 + /* If device is not registered at all, free it now */
255 +- if (dev->reg_state == NETREG_UNINITIALIZED)
256 ++ if (dev->reg_state == NETREG_UNINITIALIZED ||
257 ++ dev->reg_state == NETREG_UNREGISTERED)
258 + free_netdev(dev);
259 + goto out;
260 + }
261 +diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
262 +index f3ceec93f392..40829111fe00 100644
263 +--- a/net/core/sock_reuseport.c
264 ++++ b/net/core/sock_reuseport.c
265 +@@ -112,6 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
266 + more_reuse->prog = reuse->prog;
267 + more_reuse->reuseport_id = reuse->reuseport_id;
268 + more_reuse->bind_inany = reuse->bind_inany;
269 ++ more_reuse->has_conns = reuse->has_conns;
270 +
271 + memcpy(more_reuse->socks, reuse->socks,
272 + reuse->num_socks * sizeof(struct sock *));
273 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
274 +index 6d331307beca..5040f7ca37ec 100644
275 +--- a/net/ipv4/tcp_input.c
276 ++++ b/net/ipv4/tcp_input.c
277 +@@ -3505,10 +3505,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
278 + }
279 + }
280 +
281 +-/* This routine deals with acks during a TLP episode.
282 +- * We mark the end of a TLP episode on receiving TLP dupack or when
283 +- * ack is after tlp_high_seq.
284 +- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
285 ++/* This routine deals with acks during a TLP episode and ends an episode by
286 ++ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
287 + */
288 + static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
289 + {
290 +@@ -3517,7 +3515,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
291 + if (before(ack, tp->tlp_high_seq))
292 + return;
293 +
294 +- if (flag & FLAG_DSACKING_ACK) {
295 ++ if (!tp->tlp_retrans) {
296 ++ /* TLP of new data has been acknowledged */
297 ++ tp->tlp_high_seq = 0;
298 ++ } else if (flag & FLAG_DSACKING_ACK) {
299 + /* This DSACK means original and TLP probe arrived; no loss */
300 + tp->tlp_high_seq = 0;
301 + } else if (after(ack, tp->tlp_high_seq)) {
302 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
303 +index 5dc7485c4076..4407193bd702 100644
304 +--- a/net/ipv4/tcp_output.c
305 ++++ b/net/ipv4/tcp_output.c
306 +@@ -2564,6 +2564,11 @@ void tcp_send_loss_probe(struct sock *sk)
307 + int pcount;
308 + int mss = tcp_current_mss(sk);
309 +
310 ++ /* At most one outstanding TLP */
311 ++ if (tp->tlp_high_seq)
312 ++ goto rearm_timer;
313 ++
314 ++ tp->tlp_retrans = 0;
315 + skb = tcp_send_head(sk);
316 + if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
317 + pcount = tp->packets_out;
318 +@@ -2581,10 +2586,6 @@ void tcp_send_loss_probe(struct sock *sk)
319 + return;
320 + }
321 +
322 +- /* At most one outstanding TLP retransmission. */
323 +- if (tp->tlp_high_seq)
324 +- goto rearm_timer;
325 +-
326 + if (skb_still_in_host_queue(sk, skb))
327 + goto rearm_timer;
328 +
329 +@@ -2606,10 +2607,12 @@ void tcp_send_loss_probe(struct sock *sk)
330 + if (__tcp_retransmit_skb(sk, skb, 1))
331 + goto rearm_timer;
332 +
333 ++ tp->tlp_retrans = 1;
334 ++
335 ++probe_sent:
336 + /* Record snd_nxt for loss detection. */
337 + tp->tlp_high_seq = tp->snd_nxt;
338 +
339 +-probe_sent:
340 + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
341 + /* Reset s.t. tcp_rearm_rto will restart timer from now */
342 + inet_csk(sk)->icsk_pending = 0;
343 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
344 +index f3b7cb725c1b..5d016bbdf16e 100644
345 +--- a/net/ipv4/udp.c
346 ++++ b/net/ipv4/udp.c
347 +@@ -413,7 +413,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
348 + struct udp_hslot *hslot2,
349 + struct sk_buff *skb)
350 + {
351 +- struct sock *sk, *result;
352 ++ struct sock *sk, *result, *reuseport_result;
353 + int score, badness;
354 + u32 hash = 0;
355 +
356 +@@ -423,17 +423,20 @@ static struct sock *udp4_lib_lookup2(struct net *net,
357 + score = compute_score(sk, net, saddr, sport,
358 + daddr, hnum, dif, sdif);
359 + if (score > badness) {
360 ++ reuseport_result = NULL;
361 ++
362 + if (sk->sk_reuseport &&
363 + sk->sk_state != TCP_ESTABLISHED) {
364 + hash = udp_ehashfn(net, daddr, hnum,
365 + saddr, sport);
366 +- result = reuseport_select_sock(sk, hash, skb,
367 +- sizeof(struct udphdr));
368 +- if (result && !reuseport_has_conns(sk, false))
369 +- return result;
370 ++ reuseport_result = reuseport_select_sock(sk, hash, skb,
371 ++ sizeof(struct udphdr));
372 ++ if (reuseport_result && !reuseport_has_conns(sk, false))
373 ++ return reuseport_result;
374 + }
375 ++
376 ++ result = reuseport_result ? : sk;
377 + badness = score;
378 +- result = sk;
379 + }
380 + }
381 + return result;
382 +@@ -2045,7 +2048,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
383 + /*
384 + * UDP-Lite specific tests, ignored on UDP sockets
385 + */
386 +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
387 ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
388 +
389 + /*
390 + * MIB statistics other than incrementing the error count are
391 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
392 +index 04d76f043e18..44876509d215 100644
393 +--- a/net/ipv6/ip6_gre.c
394 ++++ b/net/ipv6/ip6_gre.c
395 +@@ -1560,17 +1560,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
396 + static int __net_init ip6gre_init_net(struct net *net)
397 + {
398 + struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
399 ++ struct net_device *ndev;
400 + int err;
401 +
402 + if (!net_has_fallback_tunnels(net))
403 + return 0;
404 +- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
405 +- NET_NAME_UNKNOWN,
406 +- ip6gre_tunnel_setup);
407 +- if (!ign->fb_tunnel_dev) {
408 ++ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
409 ++ NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
410 ++ if (!ndev) {
411 + err = -ENOMEM;
412 + goto err_alloc_dev;
413 + }
414 ++ ign->fb_tunnel_dev = ndev;
415 + dev_net_set(ign->fb_tunnel_dev, net);
416 + /* FB netdevice is special: we have one, and only one per netns.
417 + * Allowing to move it to another netns is clearly unsafe.
418 +@@ -1590,7 +1591,7 @@ static int __net_init ip6gre_init_net(struct net *net)
419 + return 0;
420 +
421 + err_reg_dev:
422 +- free_netdev(ign->fb_tunnel_dev);
423 ++ free_netdev(ndev);
424 + err_alloc_dev:
425 + return err;
426 + }
427 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
428 +index 9fec580c968e..6762430280f5 100644
429 +--- a/net/ipv6/udp.c
430 ++++ b/net/ipv6/udp.c
431 +@@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
432 + int dif, int sdif, struct udp_hslot *hslot2,
433 + struct sk_buff *skb)
434 + {
435 +- struct sock *sk, *result;
436 ++ struct sock *sk, *result, *reuseport_result;
437 + int score, badness;
438 + u32 hash = 0;
439 +
440 +@@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net,
441 + score = compute_score(sk, net, saddr, sport,
442 + daddr, hnum, dif, sdif);
443 + if (score > badness) {
444 ++ reuseport_result = NULL;
445 ++
446 + if (sk->sk_reuseport &&
447 + sk->sk_state != TCP_ESTABLISHED) {
448 + hash = udp6_ehashfn(net, daddr, hnum,
449 + saddr, sport);
450 +
451 +- result = reuseport_select_sock(sk, hash, skb,
452 +- sizeof(struct udphdr));
453 +- if (result && !reuseport_has_conns(sk, false))
454 +- return result;
455 ++ reuseport_result = reuseport_select_sock(sk, hash, skb,
456 ++ sizeof(struct udphdr));
457 ++ if (reuseport_result && !reuseport_has_conns(sk, false))
458 ++ return reuseport_result;
459 + }
460 +- result = sk;
461 ++
462 ++ result = reuseport_result ? : sk;
463 + badness = score;
464 + }
465 + }
466 +@@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
467 + /*
468 + * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
469 + */
470 +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
471 ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
472 +
473 + if (up->pcrlen == 0) { /* full coverage was set */
474 + net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
475 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
476 +index 14c101e104ce..1ce1e710d025 100644
477 +--- a/net/qrtr/qrtr.c
478 ++++ b/net/qrtr/qrtr.c
479 +@@ -1004,6 +1004,7 @@ static int qrtr_release(struct socket *sock)
480 + sk->sk_state_change(sk);
481 +
482 + sock_set_flag(sk, SOCK_DEAD);
483 ++ sock_orphan(sk);
484 + sock->sk = NULL;
485 +
486 + if (!sock_flag(sk, SOCK_ZAPPED))
487 +diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
488 +index 8578c39ec839..6896a33ef842 100644
489 +--- a/net/rxrpc/recvmsg.c
490 ++++ b/net/rxrpc/recvmsg.c
491 +@@ -464,7 +464,7 @@ try_again:
492 + list_empty(&rx->recvmsg_q) &&
493 + rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
494 + release_sock(&rx->sk);
495 +- return -ENODATA;
496 ++ return -EAGAIN;
497 + }
498 +
499 + if (list_empty(&rx->recvmsg_q)) {
500 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
501 +index 5e9c43d4a314..49d03c8c64da 100644
502 +--- a/net/rxrpc/sendmsg.c
503 ++++ b/net/rxrpc/sendmsg.c
504 +@@ -306,7 +306,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
505 + /* this should be in poll */
506 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
507 +
508 +- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
509 ++ if (sk->sk_shutdown & SEND_SHUTDOWN)
510 + return -EPIPE;
511 +
512 + more = msg->msg_flags & MSG_MORE;
513 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
514 +index c1a100d2fed3..e13cbd5c0193 100644
515 +--- a/net/sctp/stream.c
516 ++++ b/net/sctp/stream.c
517 +@@ -22,17 +22,11 @@
518 + #include <net/sctp/sm.h>
519 + #include <net/sctp/stream_sched.h>
520 +
521 +-/* Migrates chunks from stream queues to new stream queues if needed,
522 +- * but not across associations. Also, removes those chunks to streams
523 +- * higher than the new max.
524 +- */
525 +-static void sctp_stream_outq_migrate(struct sctp_stream *stream,
526 +- struct sctp_stream *new, __u16 outcnt)
527 ++static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
528 + {
529 + struct sctp_association *asoc;
530 + struct sctp_chunk *ch, *temp;
531 + struct sctp_outq *outq;
532 +- int i;
533 +
534 + asoc = container_of(stream, struct sctp_association, stream);
535 + outq = &asoc->outqueue;
536 +@@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
537 +
538 + sctp_chunk_free(ch);
539 + }
540 ++}
541 ++
542 ++/* Migrates chunks from stream queues to new stream queues if needed,
543 ++ * but not across associations. Also, removes those chunks to streams
544 ++ * higher than the new max.
545 ++ */
546 ++static void sctp_stream_outq_migrate(struct sctp_stream *stream,
547 ++ struct sctp_stream *new, __u16 outcnt)
548 ++{
549 ++ int i;
550 ++
551 ++ if (stream->outcnt > outcnt)
552 ++ sctp_stream_shrink_out(stream, outcnt);
553 +
554 + if (new) {
555 + /* Here we actually move the old ext stuff into the new
556 +@@ -1038,11 +1045,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
557 + nums = ntohs(addstrm->number_of_streams);
558 + number = stream->outcnt - nums;
559 +
560 +- if (result == SCTP_STRRESET_PERFORMED)
561 ++ if (result == SCTP_STRRESET_PERFORMED) {
562 + for (i = number; i < stream->outcnt; i++)
563 + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
564 +- else
565 ++ } else {
566 ++ sctp_stream_shrink_out(stream, number);
567 + stream->outcnt = number;
568 ++ }
569 +
570 + *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
571 + 0, nums, GFP_ATOMIC);