Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 31 Jul 2020 18:00:40
Message-Id: 1596218416.4be1d21e5760af6acabcd96eb6f872b1e6ceb10e.mpagano@gentoo
1 commit: 4be1d21e5760af6acabcd96eb6f872b1e6ceb10e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jul 31 18:00:16 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jul 31 18:00:16 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4be1d21e
7
8 Linux patch 4.19.136
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1135_linux-4.19.136.patch | 507 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 511 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ea4b4c9..b50ea6d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -579,6 +579,10 @@ Patch: 1134_linux-4.19.135.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.135
23
24 +Patch: 1135_linux-4.19.136.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.136
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1135_linux-4.19.136.patch b/1135_linux-4.19.136.patch
33 new file mode 100644
34 index 0000000..3e3d251
35 --- /dev/null
36 +++ b/1135_linux-4.19.136.patch
37 @@ -0,0 +1,507 @@
38 +diff --git a/Makefile b/Makefile
39 +index 1253143f3f6f..a76c159bb605 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 135
47 ++SUBLEVEL = 136
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
52 +index 056e34ce1edd..182b1908edec 100644
53 +--- a/drivers/base/regmap/regmap-debugfs.c
54 ++++ b/drivers/base/regmap/regmap-debugfs.c
55 +@@ -209,6 +209,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
56 + if (*ppos < 0 || !count)
57 + return -EINVAL;
58 +
59 ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
60 ++ count = PAGE_SIZE << (MAX_ORDER - 1);
61 ++
62 + buf = kmalloc(count, GFP_KERNEL);
63 + if (!buf)
64 + return -ENOMEM;
65 +@@ -357,6 +360,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
66 + if (*ppos < 0 || !count)
67 + return -EINVAL;
68 +
69 ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
70 ++ count = PAGE_SIZE << (MAX_ORDER - 1);
71 ++
72 + buf = kmalloc(count, GFP_KERNEL);
73 + if (!buf)
74 + return -ENOMEM;
75 +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
76 +index 4f25c2d8fff0..6fe9695a5f18 100644
77 +--- a/drivers/net/wan/x25_asy.c
78 ++++ b/drivers/net/wan/x25_asy.c
79 +@@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
80 + netif_wake_queue(sl->dev);
81 + }
82 +
83 +-/* Send one completely decapsulated IP datagram to the IP layer. */
84 ++/* Send an LAPB frame to the LAPB module to process. */
85 +
86 + static void x25_asy_bump(struct x25_asy *sl)
87 + {
88 +@@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
89 + count = sl->rcount;
90 + dev->stats.rx_bytes += count;
91 +
92 +- skb = dev_alloc_skb(count+1);
93 ++ skb = dev_alloc_skb(count);
94 + if (skb == NULL) {
95 + netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
96 + dev->stats.rx_dropped++;
97 + return;
98 + }
99 +- skb_push(skb, 1); /* LAPB internal control */
100 + skb_put_data(skb, sl->rbuff, count);
101 + skb->protocol = x25_type_trans(skb, sl->dev);
102 + err = lapb_data_received(skb->dev, skb);
103 +@@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
104 + kfree_skb(skb);
105 + printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
106 + } else {
107 +- netif_rx(skb);
108 + dev->stats.rx_packets++;
109 + }
110 + }
111 +@@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
112 + */
113 +
114 + /*
115 +- * Called when I frame data arrives. We did the work above - throw it
116 +- * at the net layer.
117 ++ * Called when I frame data arrive. We add a pseudo header for upper
118 ++ * layers and pass it to upper layers.
119 + */
120 +
121 + static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
122 + {
123 ++ if (skb_cow(skb, 1)) {
124 ++ kfree_skb(skb);
125 ++ return NET_RX_DROP;
126 ++ }
127 ++ skb_push(skb, 1);
128 ++ skb->data[0] = X25_IFACE_DATA;
129 ++
130 ++ skb->protocol = x25_type_trans(skb, dev);
131 ++
132 + return netif_rx(skb);
133 + }
134 +
135 +@@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
136 + switch (s) {
137 + case X25_END:
138 + if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
139 +- sl->rcount > 2)
140 ++ sl->rcount >= 2)
141 + x25_asy_bump(sl);
142 + clear_bit(SLF_ESCAPE, &sl->flags);
143 + sl->rcount = 0;
144 +diff --git a/include/linux/tcp.h b/include/linux/tcp.h
145 +index 4374196b98ea..1192f1e76015 100644
146 +--- a/include/linux/tcp.h
147 ++++ b/include/linux/tcp.h
148 +@@ -225,6 +225,8 @@ struct tcp_sock {
149 + } rack;
150 + u16 advmss; /* Advertised MSS */
151 + u8 compressed_ack;
152 ++ u8 tlp_retrans:1, /* TLP is a retransmission */
153 ++ unused_1:7;
154 + u32 chrono_start; /* Start time in jiffies of a TCP chrono */
155 + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
156 + u8 chrono_type:2, /* current chronograph type */
157 +@@ -247,7 +249,7 @@ struct tcp_sock {
158 + save_syn:1, /* Save headers of SYN packet */
159 + is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
160 + syn_smc:1; /* SYN includes SMC */
161 +- u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
162 ++ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
163 +
164 + /* RTT measurement */
165 + u64 tcp_mstamp; /* most recent packet received/sent */
166 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
167 +index 5c7a513bbaaa..a45db78eaf00 100644
168 +--- a/net/ax25/af_ax25.c
169 ++++ b/net/ax25/af_ax25.c
170 +@@ -1190,7 +1190,10 @@ static int __must_check ax25_connect(struct socket *sock,
171 + if (addr_len > sizeof(struct sockaddr_ax25) &&
172 + fsa->fsa_ax25.sax25_ndigis != 0) {
173 + /* Valid number of digipeaters ? */
174 +- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
175 ++ if (fsa->fsa_ax25.sax25_ndigis < 1 ||
176 ++ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
177 ++ addr_len < sizeof(struct sockaddr_ax25) +
178 ++ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
179 + err = -EINVAL;
180 + goto out_release;
181 + }
182 +@@ -1510,7 +1513,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
183 + struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
184 +
185 + /* Valid number of digipeaters ? */
186 +- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
187 ++ if (usax->sax25_ndigis < 1 ||
188 ++ usax->sax25_ndigis > AX25_MAX_DIGIS ||
189 ++ addr_len < sizeof(struct sockaddr_ax25) +
190 ++ sizeof(ax25_address) * usax->sax25_ndigis) {
191 + err = -EINVAL;
192 + goto out;
193 + }
194 +diff --git a/net/core/dev.c b/net/core/dev.c
195 +index 4b1053057ca6..42ba150fa18d 100644
196 +--- a/net/core/dev.c
197 ++++ b/net/core/dev.c
198 +@@ -5252,7 +5252,7 @@ static void flush_backlog(struct work_struct *work)
199 + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
200 + if (skb->dev->reg_state == NETREG_UNREGISTERING) {
201 + __skb_unlink(skb, &sd->input_pkt_queue);
202 +- kfree_skb(skb);
203 ++ dev_kfree_skb_irq(skb);
204 + input_queue_head_incr(sd);
205 + }
206 + }
207 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
208 +index 7614a4f42bfc..001d7f07e780 100644
209 +--- a/net/core/net-sysfs.c
210 ++++ b/net/core/net-sysfs.c
211 +@@ -1045,7 +1045,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
212 + trans_timeout = queue->trans_timeout;
213 + spin_unlock_irq(&queue->_xmit_lock);
214 +
215 +- return sprintf(buf, "%lu", trans_timeout);
216 ++ return sprintf(buf, fmt_ulong, trans_timeout);
217 + }
218 +
219 + static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
220 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
221 +index f51973f458e4..935053ee7765 100644
222 +--- a/net/core/rtnetlink.c
223 ++++ b/net/core/rtnetlink.c
224 +@@ -3146,7 +3146,8 @@ replay:
225 + */
226 + if (err < 0) {
227 + /* If device is not registered at all, free it now */
228 +- if (dev->reg_state == NETREG_UNINITIALIZED)
229 ++ if (dev->reg_state == NETREG_UNINITIALIZED ||
230 ++ dev->reg_state == NETREG_UNREGISTERED)
231 + free_netdev(dev);
232 + goto out;
233 + }
234 +diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
235 +index fd38cf1d2b02..9c85ef2b7e1d 100644
236 +--- a/net/core/sock_reuseport.c
237 ++++ b/net/core/sock_reuseport.c
238 +@@ -112,6 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
239 + more_reuse->prog = reuse->prog;
240 + more_reuse->reuseport_id = reuse->reuseport_id;
241 + more_reuse->bind_inany = reuse->bind_inany;
242 ++ more_reuse->has_conns = reuse->has_conns;
243 +
244 + memcpy(more_reuse->socks, reuse->socks,
245 + reuse->num_socks * sizeof(struct sock *));
246 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
247 +index 2896840618fa..9813d62de631 100644
248 +--- a/net/ipv4/tcp_input.c
249 ++++ b/net/ipv4/tcp_input.c
250 +@@ -3489,10 +3489,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
251 + }
252 + }
253 +
254 +-/* This routine deals with acks during a TLP episode.
255 +- * We mark the end of a TLP episode on receiving TLP dupack or when
256 +- * ack is after tlp_high_seq.
257 +- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
258 ++/* This routine deals with acks during a TLP episode and ends an episode by
259 ++ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
260 + */
261 + static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
262 + {
263 +@@ -3501,7 +3499,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
264 + if (before(ack, tp->tlp_high_seq))
265 + return;
266 +
267 +- if (flag & FLAG_DSACKING_ACK) {
268 ++ if (!tp->tlp_retrans) {
269 ++ /* TLP of new data has been acknowledged */
270 ++ tp->tlp_high_seq = 0;
271 ++ } else if (flag & FLAG_DSACKING_ACK) {
272 + /* This DSACK means original and TLP probe arrived; no loss */
273 + tp->tlp_high_seq = 0;
274 + } else if (after(ack, tp->tlp_high_seq)) {
275 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
276 +index b4d0d0aa6b38..74fb211e0ea6 100644
277 +--- a/net/ipv4/tcp_output.c
278 ++++ b/net/ipv4/tcp_output.c
279 +@@ -2495,6 +2495,11 @@ void tcp_send_loss_probe(struct sock *sk)
280 + int pcount;
281 + int mss = tcp_current_mss(sk);
282 +
283 ++ /* At most one outstanding TLP */
284 ++ if (tp->tlp_high_seq)
285 ++ goto rearm_timer;
286 ++
287 ++ tp->tlp_retrans = 0;
288 + skb = tcp_send_head(sk);
289 + if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
290 + pcount = tp->packets_out;
291 +@@ -2512,10 +2517,6 @@ void tcp_send_loss_probe(struct sock *sk)
292 + return;
293 + }
294 +
295 +- /* At most one outstanding TLP retransmission. */
296 +- if (tp->tlp_high_seq)
297 +- goto rearm_timer;
298 +-
299 + if (skb_still_in_host_queue(sk, skb))
300 + goto rearm_timer;
301 +
302 +@@ -2537,10 +2538,12 @@ void tcp_send_loss_probe(struct sock *sk)
303 + if (__tcp_retransmit_skb(sk, skb, 1))
304 + goto rearm_timer;
305 +
306 ++ tp->tlp_retrans = 1;
307 ++
308 ++probe_sent:
309 + /* Record snd_nxt for loss detection. */
310 + tp->tlp_high_seq = tp->snd_nxt;
311 +
312 +-probe_sent:
313 + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
314 + /* Reset s.t. tcp_rearm_rto will restart timer from now */
315 + inet_csk(sk)->icsk_pending = 0;
316 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
317 +index 0ef04cda1b27..2aacf2b34834 100644
318 +--- a/net/ipv4/udp.c
319 ++++ b/net/ipv4/udp.c
320 +@@ -433,7 +433,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
321 + struct udp_hslot *hslot2,
322 + struct sk_buff *skb)
323 + {
324 +- struct sock *sk, *result;
325 ++ struct sock *sk, *result, *reuseport_result;
326 + int score, badness;
327 + u32 hash = 0;
328 +
329 +@@ -443,17 +443,20 @@ static struct sock *udp4_lib_lookup2(struct net *net,
330 + score = compute_score(sk, net, saddr, sport,
331 + daddr, hnum, dif, sdif, exact_dif);
332 + if (score > badness) {
333 ++ reuseport_result = NULL;
334 ++
335 + if (sk->sk_reuseport &&
336 + sk->sk_state != TCP_ESTABLISHED) {
337 + hash = udp_ehashfn(net, daddr, hnum,
338 + saddr, sport);
339 +- result = reuseport_select_sock(sk, hash, skb,
340 +- sizeof(struct udphdr));
341 +- if (result && !reuseport_has_conns(sk, false))
342 +- return result;
343 ++ reuseport_result = reuseport_select_sock(sk, hash, skb,
344 ++ sizeof(struct udphdr));
345 ++ if (reuseport_result && !reuseport_has_conns(sk, false))
346 ++ return reuseport_result;
347 + }
348 ++
349 ++ result = reuseport_result ? : sk;
350 + badness = score;
351 +- result = sk;
352 + }
353 + }
354 + return result;
355 +@@ -1986,7 +1989,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
356 + /*
357 + * UDP-Lite specific tests, ignored on UDP sockets
358 + */
359 +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
360 ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
361 +
362 + /*
363 + * MIB statistics other than incrementing the error count are
364 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
365 +index f5144573c45c..7cc9cd83ecb5 100644
366 +--- a/net/ipv6/ip6_gre.c
367 ++++ b/net/ipv6/ip6_gre.c
368 +@@ -1580,17 +1580,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
369 + static int __net_init ip6gre_init_net(struct net *net)
370 + {
371 + struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
372 ++ struct net_device *ndev;
373 + int err;
374 +
375 + if (!net_has_fallback_tunnels(net))
376 + return 0;
377 +- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
378 +- NET_NAME_UNKNOWN,
379 +- ip6gre_tunnel_setup);
380 +- if (!ign->fb_tunnel_dev) {
381 ++ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
382 ++ NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
383 ++ if (!ndev) {
384 + err = -ENOMEM;
385 + goto err_alloc_dev;
386 + }
387 ++ ign->fb_tunnel_dev = ndev;
388 + dev_net_set(ign->fb_tunnel_dev, net);
389 + /* FB netdevice is special: we have one, and only one per netns.
390 + * Allowing to move it to another netns is clearly unsafe.
391 +@@ -1610,7 +1611,7 @@ static int __net_init ip6gre_init_net(struct net *net)
392 + return 0;
393 +
394 + err_reg_dev:
395 +- free_netdev(ign->fb_tunnel_dev);
396 ++ free_netdev(ndev);
397 + err_alloc_dev:
398 + return err;
399 + }
400 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
401 +index 1979922bcf67..6799ad462be3 100644
402 +--- a/net/ipv6/udp.c
403 ++++ b/net/ipv6/udp.c
404 +@@ -167,7 +167,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
405 + int dif, int sdif, bool exact_dif,
406 + struct udp_hslot *hslot2, struct sk_buff *skb)
407 + {
408 +- struct sock *sk, *result;
409 ++ struct sock *sk, *result, *reuseport_result;
410 + int score, badness;
411 + u32 hash = 0;
412 +
413 +@@ -177,17 +177,20 @@ static struct sock *udp6_lib_lookup2(struct net *net,
414 + score = compute_score(sk, net, saddr, sport,
415 + daddr, hnum, dif, sdif, exact_dif);
416 + if (score > badness) {
417 ++ reuseport_result = NULL;
418 ++
419 + if (sk->sk_reuseport &&
420 + sk->sk_state != TCP_ESTABLISHED) {
421 + hash = udp6_ehashfn(net, daddr, hnum,
422 + saddr, sport);
423 +
424 +- result = reuseport_select_sock(sk, hash, skb,
425 +- sizeof(struct udphdr));
426 +- if (result && !reuseport_has_conns(sk, false))
427 +- return result;
428 ++ reuseport_result = reuseport_select_sock(sk, hash, skb,
429 ++ sizeof(struct udphdr));
430 ++ if (reuseport_result && !reuseport_has_conns(sk, false))
431 ++ return reuseport_result;
432 + }
433 +- result = sk;
434 ++
435 ++ result = reuseport_result ? : sk;
436 + badness = score;
437 + }
438 + }
439 +@@ -606,7 +609,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
440 + /*
441 + * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
442 + */
443 +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
444 ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
445 +
446 + if (up->pcrlen == 0) { /* full coverage was set */
447 + net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
448 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
449 +index 0baffc9666e6..b5671966fa03 100644
450 +--- a/net/qrtr/qrtr.c
451 ++++ b/net/qrtr/qrtr.c
452 +@@ -1013,6 +1013,7 @@ static int qrtr_release(struct socket *sock)
453 + sk->sk_state_change(sk);
454 +
455 + sock_set_flag(sk, SOCK_DEAD);
456 ++ sock_orphan(sk);
457 + sock->sk = NULL;
458 +
459 + if (!sock_flag(sk, SOCK_ZAPPED))
460 +diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
461 +index 0374b0623c8b..6e9d977f0797 100644
462 +--- a/net/rxrpc/recvmsg.c
463 ++++ b/net/rxrpc/recvmsg.c
464 +@@ -453,7 +453,7 @@ try_again:
465 + list_empty(&rx->recvmsg_q) &&
466 + rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
467 + release_sock(&rx->sk);
468 +- return -ENODATA;
469 ++ return -EAGAIN;
470 + }
471 +
472 + if (list_empty(&rx->recvmsg_q)) {
473 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
474 +index 250d3dae8af4..caee7632c257 100644
475 +--- a/net/rxrpc/sendmsg.c
476 ++++ b/net/rxrpc/sendmsg.c
477 +@@ -278,7 +278,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
478 + /* this should be in poll */
479 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
480 +
481 +- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
482 ++ if (sk->sk_shutdown & SEND_SHUTDOWN)
483 + return -EPIPE;
484 +
485 + more = msg->msg_flags & MSG_MORE;
486 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
487 +index 87061a4bb44b..516bc48be5bc 100644
488 +--- a/net/sctp/stream.c
489 ++++ b/net/sctp/stream.c
490 +@@ -97,17 +97,11 @@ static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
491 + return index;
492 + }
493 +
494 +-/* Migrates chunks from stream queues to new stream queues if needed,
495 +- * but not across associations. Also, removes those chunks to streams
496 +- * higher than the new max.
497 +- */
498 +-static void sctp_stream_outq_migrate(struct sctp_stream *stream,
499 +- struct sctp_stream *new, __u16 outcnt)
500 ++static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
501 + {
502 + struct sctp_association *asoc;
503 + struct sctp_chunk *ch, *temp;
504 + struct sctp_outq *outq;
505 +- int i;
506 +
507 + asoc = container_of(stream, struct sctp_association, stream);
508 + outq = &asoc->outqueue;
509 +@@ -131,6 +125,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
510 +
511 + sctp_chunk_free(ch);
512 + }
513 ++}
514 ++
515 ++/* Migrates chunks from stream queues to new stream queues if needed,
516 ++ * but not across associations. Also, removes those chunks to streams
517 ++ * higher than the new max.
518 ++ */
519 ++static void sctp_stream_outq_migrate(struct sctp_stream *stream,
520 ++ struct sctp_stream *new, __u16 outcnt)
521 ++{
522 ++ int i;
523 ++
524 ++ if (stream->outcnt > outcnt)
525 ++ sctp_stream_shrink_out(stream, outcnt);
526 +
527 + if (new) {
528 + /* Here we actually move the old ext stuff into the new
529 +@@ -1136,11 +1143,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
530 + nums = ntohs(addstrm->number_of_streams);
531 + number = stream->outcnt - nums;
532 +
533 +- if (result == SCTP_STRRESET_PERFORMED)
534 ++ if (result == SCTP_STRRESET_PERFORMED) {
535 + for (i = number; i < stream->outcnt; i++)
536 + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
537 +- else
538 ++ } else {
539 ++ sctp_stream_shrink_out(stream, number);
540 + stream->outcnt = number;
541 ++ }
542 +
543 + *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
544 + 0, nums, GFP_ATOMIC);