Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 31 Mar 2018 22:18:51
Message-Id: 1522534720.3dd51bb7cb6180ad0c4c2e6f1ff12ec6f1a45a42.mpagano@gentoo
1 commit: 3dd51bb7cb6180ad0c4c2e6f1ff12ec6f1a45a42
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Mar 31 22:18:40 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Mar 31 22:18:40 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3dd51bb7
7
8 Linux patch 4.14.32
9
10 0000_README | 4 +
11 1031_linux-4.14.32.patch | 1401 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1405 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 74f23dc..54facf6 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -167,6 +167,10 @@ Patch: 1030_linux-4.14.31.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.31
21
22 +Patch: 1031_linux-4.14.32.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.32
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1031_linux-4.14.32.patch b/1031_linux-4.14.32.patch
31 new file mode 100644
32 index 0000000..f72eb49
33 --- /dev/null
34 +++ b/1031_linux-4.14.32.patch
35 @@ -0,0 +1,1401 @@
36 +diff --git a/Makefile b/Makefile
37 +index 99e31da48422..c4c681b53ff0 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 31
45 ++SUBLEVEL = 32
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
50 +index c6163874e4e7..c770ca37c9b2 100644
51 +--- a/drivers/net/ethernet/arc/emac_rockchip.c
52 ++++ b/drivers/net/ethernet/arc/emac_rockchip.c
53 +@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
54 + /* Optional regulator for PHY */
55 + priv->regulator = devm_regulator_get_optional(dev, "phy");
56 + if (IS_ERR(priv->regulator)) {
57 +- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
58 +- return -EPROBE_DEFER;
59 ++ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
60 ++ err = -EPROBE_DEFER;
61 ++ goto out_clk_disable;
62 ++ }
63 + dev_err(dev, "no regulator found\n");
64 + priv->regulator = NULL;
65 + }
66 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
67 +index eb441e5e2cd8..1e856e8b9a92 100644
68 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
69 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
70 +@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
71 + static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
72 + struct bcm_sysport_tx_ring *ring)
73 + {
74 +- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
75 + unsigned int pkts_compl = 0, bytes_compl = 0;
76 + struct net_device *ndev = priv->netdev;
77 ++ unsigned int txbds_processed = 0;
78 + struct bcm_sysport_cb *cb;
79 ++ unsigned int txbds_ready;
80 ++ unsigned int c_index;
81 + u32 hw_ind;
82 +
83 + /* Clear status before servicing to reduce spurious interrupts */
84 +@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
85 + /* Compute how many descriptors have been processed since last call */
86 + hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
87 + c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
88 +- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
89 +-
90 +- last_c_index = ring->c_index;
91 +- num_tx_cbs = ring->size;
92 +-
93 +- c_index &= (num_tx_cbs - 1);
94 +-
95 +- if (c_index >= last_c_index)
96 +- last_tx_cn = c_index - last_c_index;
97 +- else
98 +- last_tx_cn = num_tx_cbs - last_c_index + c_index;
99 ++ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
100 +
101 + netif_dbg(priv, tx_done, ndev,
102 +- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
103 +- ring->index, c_index, last_tx_cn, last_c_index);
104 ++ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
105 ++ ring->index, ring->c_index, c_index, txbds_ready);
106 +
107 +- while (last_tx_cn-- > 0) {
108 +- cb = ring->cbs + last_c_index;
109 ++ while (txbds_processed < txbds_ready) {
110 ++ cb = &ring->cbs[ring->clean_index];
111 + bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
112 +
113 + ring->desc_count++;
114 +- last_c_index++;
115 +- last_c_index &= (num_tx_cbs - 1);
116 ++ txbds_processed++;
117 ++
118 ++ if (likely(ring->clean_index < ring->size - 1))
119 ++ ring->clean_index++;
120 ++ else
121 ++ ring->clean_index = 0;
122 + }
123 +
124 + u64_stats_update_begin(&priv->syncp);
125 +@@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
126 + netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
127 + ring->index = index;
128 + ring->size = size;
129 ++ ring->clean_index = 0;
130 + ring->alloc_size = ring->size;
131 + ring->desc_cpu = p;
132 + ring->desc_count = ring->size;
133 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
134 +index 82e401df199e..a2006f5fc26f 100644
135 +--- a/drivers/net/ethernet/broadcom/bcmsysport.h
136 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.h
137 +@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
138 + unsigned int desc_count; /* Number of descriptors */
139 + unsigned int curr_desc; /* Current descriptor */
140 + unsigned int c_index; /* Last consumer index */
141 +- unsigned int p_index; /* Current producer index */
142 ++ unsigned int clean_index; /* Current clean index */
143 + struct bcm_sysport_cb *cbs; /* Transmit control blocks */
144 + struct dma_desc *desc_cpu; /* CPU view of the descriptor */
145 + struct bcm_sysport_priv *priv; /* private context backpointer */
146 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
147 +index 42258060f142..4f6e9d3470d5 100644
148 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
149 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
150 +@@ -2022,7 +2022,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
151 + }
152 +
153 + if (unlikely(err < 0)) {
154 +- percpu_stats->tx_errors++;
155 + percpu_stats->tx_fifo_errors++;
156 + return err;
157 + }
158 +@@ -2292,7 +2291,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
159 + vaddr = phys_to_virt(addr);
160 + prefetch(vaddr + qm_fd_get_offset(fd));
161 +
162 +- fd_format = qm_fd_get_format(fd);
163 + /* The only FD types that we may receive are contig and S/G */
164 + WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
165 +
166 +@@ -2325,8 +2323,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
167 +
168 + skb_len = skb->len;
169 +
170 +- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
171 ++ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
172 ++ percpu_stats->rx_dropped++;
173 + return qman_cb_dqrr_consume;
174 ++ }
175 +
176 + percpu_stats->rx_packets++;
177 + percpu_stats->rx_bytes += skb_len;
178 +@@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev)
179 + struct device *dev;
180 + int err;
181 +
182 +- dev = &pdev->dev;
183 ++ dev = pdev->dev.parent;
184 + net_dev = dev_get_drvdata(dev);
185 +
186 + priv = netdev_priv(net_dev);
187 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
188 +index 311539c6625f..eb2ea231c7ca 100644
189 +--- a/drivers/net/ethernet/freescale/fec_main.c
190 ++++ b/drivers/net/ethernet/freescale/fec_main.c
191 +@@ -3565,6 +3565,8 @@ fec_drv_remove(struct platform_device *pdev)
192 + fec_enet_mii_remove(fep);
193 + if (fep->reg_phy)
194 + regulator_disable(fep->reg_phy);
195 ++ pm_runtime_put(&pdev->dev);
196 ++ pm_runtime_disable(&pdev->dev);
197 + if (of_phy_is_fixed_link(np))
198 + of_phy_deregister_fixed_link(np);
199 + of_node_put(fep->phy_node);
200 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
201 +index 93728c694e6d..0a9adc5962fb 100644
202 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
203 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
204 +@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
205 +
206 + static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
207 + MLXSW_SP_CPU_PORT_SB_CM,
208 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
209 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
210 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
211 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
212 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
213 + MLXSW_SP_CPU_PORT_SB_CM,
214 +- MLXSW_SP_CPU_PORT_SB_CM,
215 +- MLXSW_SP_CPU_PORT_SB_CM,
216 +- MLXSW_SP_CPU_PORT_SB_CM,
217 +- MLXSW_SP_CPU_PORT_SB_CM,
218 +- MLXSW_SP_CPU_PORT_SB_CM,
219 +- MLXSW_SP_SB_CM(10000, 0, 0),
220 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
221 + MLXSW_SP_CPU_PORT_SB_CM,
222 + MLXSW_SP_CPU_PORT_SB_CM,
223 + MLXSW_SP_CPU_PORT_SB_CM,
224 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
225 +index 9d989c96278c..e41f28602535 100644
226 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
227 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
228 +@@ -1663,6 +1663,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
229 + iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
230 +
231 + if (eth_type == ETH_P_IP) {
232 ++ if (iph->protocol != IPPROTO_TCP) {
233 ++ DP_NOTICE(p_hwfn,
234 ++ "Unexpected ip protocol on ll2 %x\n",
235 ++ iph->protocol);
236 ++ return -EINVAL;
237 ++ }
238 ++
239 + cm_info->local_ip[0] = ntohl(iph->daddr);
240 + cm_info->remote_ip[0] = ntohl(iph->saddr);
241 + cm_info->ip_version = TCP_IPV4;
242 +@@ -1671,6 +1678,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
243 + *payload_len = ntohs(iph->tot_len) - ip_hlen;
244 + } else if (eth_type == ETH_P_IPV6) {
245 + ip6h = (struct ipv6hdr *)iph;
246 ++
247 ++ if (ip6h->nexthdr != IPPROTO_TCP) {
248 ++ DP_NOTICE(p_hwfn,
249 ++ "Unexpected ip protocol on ll2 %x\n",
250 ++ iph->protocol);
251 ++ return -EINVAL;
252 ++ }
253 ++
254 + for (i = 0; i < 4; i++) {
255 + cm_info->local_ip[i] =
256 + ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
257 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
258 +index e5ee9f274a71..6eab2c632c75 100644
259 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
260 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
261 +@@ -2066,8 +2066,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
262 + link_params.link_up = true;
263 + edev->ops->common->set_link(edev->cdev, &link_params);
264 +
265 +- qede_rdma_dev_event_open(edev);
266 +-
267 + edev->state = QEDE_STATE_OPEN;
268 +
269 + DP_INFO(edev, "Ending successfully qede load\n");
270 +@@ -2168,12 +2166,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
271 + DP_NOTICE(edev, "Link is up\n");
272 + netif_tx_start_all_queues(edev->ndev);
273 + netif_carrier_on(edev->ndev);
274 ++ qede_rdma_dev_event_open(edev);
275 + }
276 + } else {
277 + if (netif_carrier_ok(edev->ndev)) {
278 + DP_NOTICE(edev, "Link is down\n");
279 + netif_tx_disable(edev->ndev);
280 + netif_carrier_off(edev->ndev);
281 ++ qede_rdma_dev_event_close(edev);
282 + }
283 + }
284 + }
285 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
286 +index 14b646b3b084..a5bb7b19040e 100644
287 +--- a/drivers/net/ethernet/ti/cpsw.c
288 ++++ b/drivers/net/ethernet/ti/cpsw.c
289 +@@ -996,7 +996,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
290 + /* set speed_in input in case RMII mode is used in 100Mbps */
291 + if (phy->speed == 100)
292 + mac_control |= BIT(15);
293 +- else if (phy->speed == 10)
294 ++ /* in band mode only works in 10Mbps RGMII mode */
295 ++ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
296 + mac_control |= BIT(18); /* In Band mode */
297 +
298 + if (priv->rx_pause)
299 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
300 +index 176fc0906bfe..0f35597553f4 100644
301 +--- a/drivers/net/macvlan.c
302 ++++ b/drivers/net/macvlan.c
303 +@@ -1037,7 +1037,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
304 + lowerdev_features &= (features | ~NETIF_F_LRO);
305 + features = netdev_increment_features(lowerdev_features, features, mask);
306 + features |= ALWAYS_ON_FEATURES;
307 +- features &= ~NETIF_F_NETNS_LOCAL;
308 ++ features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
309 +
310 + return features;
311 + }
312 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
313 +index 39de77a8bb63..dba6d17ad885 100644
314 +--- a/drivers/net/phy/phy.c
315 ++++ b/drivers/net/phy/phy.c
316 +@@ -614,6 +614,91 @@ static void phy_error(struct phy_device *phydev)
317 + phy_trigger_machine(phydev, false);
318 + }
319 +
320 ++/**
321 ++ * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
322 ++ * @phydev: target phy_device struct
323 ++ */
324 ++static int phy_disable_interrupts(struct phy_device *phydev)
325 ++{
326 ++ int err;
327 ++
328 ++ /* Disable PHY interrupts */
329 ++ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
330 ++ if (err)
331 ++ goto phy_err;
332 ++
333 ++ /* Clear the interrupt */
334 ++ err = phy_clear_interrupt(phydev);
335 ++ if (err)
336 ++ goto phy_err;
337 ++
338 ++ return 0;
339 ++
340 ++phy_err:
341 ++ phy_error(phydev);
342 ++
343 ++ return err;
344 ++}
345 ++
346 ++/**
347 ++ * phy_change - Called by the phy_interrupt to handle PHY changes
348 ++ * @phydev: phy_device struct that interrupted
349 ++ */
350 ++static irqreturn_t phy_change(struct phy_device *phydev)
351 ++{
352 ++ if (phy_interrupt_is_valid(phydev)) {
353 ++ if (phydev->drv->did_interrupt &&
354 ++ !phydev->drv->did_interrupt(phydev))
355 ++ goto ignore;
356 ++
357 ++ if (phy_disable_interrupts(phydev))
358 ++ goto phy_err;
359 ++ }
360 ++
361 ++ mutex_lock(&phydev->lock);
362 ++ if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
363 ++ phydev->state = PHY_CHANGELINK;
364 ++ mutex_unlock(&phydev->lock);
365 ++
366 ++ if (phy_interrupt_is_valid(phydev)) {
367 ++ atomic_dec(&phydev->irq_disable);
368 ++ enable_irq(phydev->irq);
369 ++
370 ++ /* Reenable interrupts */
371 ++ if (PHY_HALTED != phydev->state &&
372 ++ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
373 ++ goto irq_enable_err;
374 ++ }
375 ++
376 ++ /* reschedule state queue work to run as soon as possible */
377 ++ phy_trigger_machine(phydev, true);
378 ++ return IRQ_HANDLED;
379 ++
380 ++ignore:
381 ++ atomic_dec(&phydev->irq_disable);
382 ++ enable_irq(phydev->irq);
383 ++ return IRQ_NONE;
384 ++
385 ++irq_enable_err:
386 ++ disable_irq(phydev->irq);
387 ++ atomic_inc(&phydev->irq_disable);
388 ++phy_err:
389 ++ phy_error(phydev);
390 ++ return IRQ_NONE;
391 ++}
392 ++
393 ++/**
394 ++ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
395 ++ * @work: work_struct that describes the work to be done
396 ++ */
397 ++void phy_change_work(struct work_struct *work)
398 ++{
399 ++ struct phy_device *phydev =
400 ++ container_of(work, struct phy_device, phy_queue);
401 ++
402 ++ phy_change(phydev);
403 ++}
404 ++
405 + /**
406 + * phy_interrupt - PHY interrupt handler
407 + * @irq: interrupt line
408 +@@ -632,9 +717,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
409 + disable_irq_nosync(irq);
410 + atomic_inc(&phydev->irq_disable);
411 +
412 +- phy_change(phydev);
413 +-
414 +- return IRQ_HANDLED;
415 ++ return phy_change(phydev);
416 + }
417 +
418 + /**
419 +@@ -651,32 +734,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
420 + return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
421 + }
422 +
423 +-/**
424 +- * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
425 +- * @phydev: target phy_device struct
426 +- */
427 +-static int phy_disable_interrupts(struct phy_device *phydev)
428 +-{
429 +- int err;
430 +-
431 +- /* Disable PHY interrupts */
432 +- err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
433 +- if (err)
434 +- goto phy_err;
435 +-
436 +- /* Clear the interrupt */
437 +- err = phy_clear_interrupt(phydev);
438 +- if (err)
439 +- goto phy_err;
440 +-
441 +- return 0;
442 +-
443 +-phy_err:
444 +- phy_error(phydev);
445 +-
446 +- return err;
447 +-}
448 +-
449 + /**
450 + * phy_start_interrupts - request and enable interrupts for a PHY device
451 + * @phydev: target phy_device struct
452 +@@ -727,64 +784,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
453 + }
454 + EXPORT_SYMBOL(phy_stop_interrupts);
455 +
456 +-/**
457 +- * phy_change - Called by the phy_interrupt to handle PHY changes
458 +- * @phydev: phy_device struct that interrupted
459 +- */
460 +-void phy_change(struct phy_device *phydev)
461 +-{
462 +- if (phy_interrupt_is_valid(phydev)) {
463 +- if (phydev->drv->did_interrupt &&
464 +- !phydev->drv->did_interrupt(phydev))
465 +- goto ignore;
466 +-
467 +- if (phy_disable_interrupts(phydev))
468 +- goto phy_err;
469 +- }
470 +-
471 +- mutex_lock(&phydev->lock);
472 +- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
473 +- phydev->state = PHY_CHANGELINK;
474 +- mutex_unlock(&phydev->lock);
475 +-
476 +- if (phy_interrupt_is_valid(phydev)) {
477 +- atomic_dec(&phydev->irq_disable);
478 +- enable_irq(phydev->irq);
479 +-
480 +- /* Reenable interrupts */
481 +- if (PHY_HALTED != phydev->state &&
482 +- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
483 +- goto irq_enable_err;
484 +- }
485 +-
486 +- /* reschedule state queue work to run as soon as possible */
487 +- phy_trigger_machine(phydev, true);
488 +- return;
489 +-
490 +-ignore:
491 +- atomic_dec(&phydev->irq_disable);
492 +- enable_irq(phydev->irq);
493 +- return;
494 +-
495 +-irq_enable_err:
496 +- disable_irq(phydev->irq);
497 +- atomic_inc(&phydev->irq_disable);
498 +-phy_err:
499 +- phy_error(phydev);
500 +-}
501 +-
502 +-/**
503 +- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
504 +- * @work: work_struct that describes the work to be done
505 +- */
506 +-void phy_change_work(struct work_struct *work)
507 +-{
508 +- struct phy_device *phydev =
509 +- container_of(work, struct phy_device, phy_queue);
510 +-
511 +- phy_change(phydev);
512 +-}
513 +-
514 + /**
515 + * phy_stop - Bring down the PHY link, and stop checking the status
516 + * @phydev: target phy_device struct
517 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
518 +index d312b314825e..a1e7ea4d4b16 100644
519 +--- a/drivers/net/phy/phy_device.c
520 ++++ b/drivers/net/phy/phy_device.c
521 +@@ -999,10 +999,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
522 + err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
523 + "attached_dev");
524 + if (!err) {
525 +- err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj,
526 +- "phydev");
527 +- if (err)
528 +- goto error;
529 ++ err = sysfs_create_link_nowarn(&dev->dev.kobj,
530 ++ &phydev->mdio.dev.kobj,
531 ++ "phydev");
532 ++ if (err) {
533 ++ dev_err(&dev->dev, "could not add device link to %s err %d\n",
534 ++ kobject_name(&phydev->mdio.dev.kobj),
535 ++ err);
536 ++ /* non-fatal - some net drivers can use one netdevice
537 ++ * with more then one phy
538 ++ */
539 ++ }
540 +
541 + phydev->sysfs_links = true;
542 + }
543 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
544 +index 38cd2e8fae23..34b24d7e1e2f 100644
545 +--- a/drivers/net/ppp/ppp_generic.c
546 ++++ b/drivers/net/ppp/ppp_generic.c
547 +@@ -256,7 +256,7 @@ struct ppp_net {
548 + /* Prototypes. */
549 + static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
550 + struct file *file, unsigned int cmd, unsigned long arg);
551 +-static void ppp_xmit_process(struct ppp *ppp);
552 ++static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
553 + static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
554 + static void ppp_push(struct ppp *ppp);
555 + static void ppp_channel_push(struct channel *pch);
556 +@@ -512,13 +512,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
557 + goto out;
558 + }
559 +
560 +- skb_queue_tail(&pf->xq, skb);
561 +-
562 + switch (pf->kind) {
563 + case INTERFACE:
564 +- ppp_xmit_process(PF_TO_PPP(pf));
565 ++ ppp_xmit_process(PF_TO_PPP(pf), skb);
566 + break;
567 + case CHANNEL:
568 ++ skb_queue_tail(&pf->xq, skb);
569 + ppp_channel_push(PF_TO_CHANNEL(pf));
570 + break;
571 + }
572 +@@ -1264,8 +1263,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
573 + put_unaligned_be16(proto, pp);
574 +
575 + skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
576 +- skb_queue_tail(&ppp->file.xq, skb);
577 +- ppp_xmit_process(ppp);
578 ++ ppp_xmit_process(ppp, skb);
579 ++
580 + return NETDEV_TX_OK;
581 +
582 + outf:
583 +@@ -1417,13 +1416,14 @@ static void ppp_setup(struct net_device *dev)
584 + */
585 +
586 + /* Called to do any work queued up on the transmit side that can now be done */
587 +-static void __ppp_xmit_process(struct ppp *ppp)
588 ++static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
589 + {
590 +- struct sk_buff *skb;
591 +-
592 + ppp_xmit_lock(ppp);
593 + if (!ppp->closing) {
594 + ppp_push(ppp);
595 ++
596 ++ if (skb)
597 ++ skb_queue_tail(&ppp->file.xq, skb);
598 + while (!ppp->xmit_pending &&
599 + (skb = skb_dequeue(&ppp->file.xq)))
600 + ppp_send_frame(ppp, skb);
601 +@@ -1437,7 +1437,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
602 + ppp_xmit_unlock(ppp);
603 + }
604 +
605 +-static void ppp_xmit_process(struct ppp *ppp)
606 ++static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
607 + {
608 + local_bh_disable();
609 +
610 +@@ -1445,7 +1445,7 @@ static void ppp_xmit_process(struct ppp *ppp)
611 + goto err;
612 +
613 + (*this_cpu_ptr(ppp->xmit_recursion))++;
614 +- __ppp_xmit_process(ppp);
615 ++ __ppp_xmit_process(ppp, skb);
616 + (*this_cpu_ptr(ppp->xmit_recursion))--;
617 +
618 + local_bh_enable();
619 +@@ -1455,6 +1455,8 @@ static void ppp_xmit_process(struct ppp *ppp)
620 + err:
621 + local_bh_enable();
622 +
623 ++ kfree_skb(skb);
624 ++
625 + if (net_ratelimit())
626 + netdev_err(ppp->dev, "recursion detected\n");
627 + }
628 +@@ -1939,7 +1941,7 @@ static void __ppp_channel_push(struct channel *pch)
629 + if (skb_queue_empty(&pch->file.xq)) {
630 + ppp = pch->ppp;
631 + if (ppp)
632 +- __ppp_xmit_process(ppp);
633 ++ __ppp_xmit_process(ppp, NULL);
634 + }
635 + }
636 +
637 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
638 +index ae53e899259f..23cd41c82210 100644
639 +--- a/drivers/net/team/team.c
640 ++++ b/drivers/net/team/team.c
641 +@@ -2394,7 +2394,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
642 + if (!nlh) {
643 + err = __send_and_alloc_skb(&skb, team, portid, send_func);
644 + if (err)
645 +- goto errout;
646 ++ return err;
647 + goto send_done;
648 + }
649 +
650 +@@ -2680,7 +2680,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
651 + if (!nlh) {
652 + err = __send_and_alloc_skb(&skb, team, portid, send_func);
653 + if (err)
654 +- goto errout;
655 ++ return err;
656 + goto send_done;
657 + }
658 +
659 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
660 +index 145b57762d8f..939b5b5e97ef 100644
661 +--- a/drivers/s390/net/qeth_core_main.c
662 ++++ b/drivers/s390/net/qeth_core_main.c
663 +@@ -526,8 +526,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
664 + queue == card->qdio.no_in_queues - 1;
665 + }
666 +
667 +-
668 +-static int qeth_issue_next_read(struct qeth_card *card)
669 ++static int __qeth_issue_next_read(struct qeth_card *card)
670 + {
671 + int rc;
672 + struct qeth_cmd_buffer *iob;
673 +@@ -558,6 +557,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
674 + return rc;
675 + }
676 +
677 ++static int qeth_issue_next_read(struct qeth_card *card)
678 ++{
679 ++ int ret;
680 ++
681 ++ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
682 ++ ret = __qeth_issue_next_read(card);
683 ++ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
684 ++
685 ++ return ret;
686 ++}
687 ++
688 + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
689 + {
690 + struct qeth_reply *reply;
691 +@@ -961,7 +971,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
692 + spin_lock_irqsave(&card->thread_mask_lock, flags);
693 + card->thread_running_mask &= ~thread;
694 + spin_unlock_irqrestore(&card->thread_mask_lock, flags);
695 +- wake_up(&card->wait_q);
696 ++ wake_up_all(&card->wait_q);
697 + }
698 + EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
699 +
700 +@@ -1165,6 +1175,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
701 + }
702 + rc = qeth_get_problem(cdev, irb);
703 + if (rc) {
704 ++ card->read_or_write_problem = 1;
705 + qeth_clear_ipacmd_list(card);
706 + qeth_schedule_recovery(card);
707 + goto out;
708 +@@ -1183,7 +1194,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
709 + return;
710 + if (channel == &card->read &&
711 + channel->state == CH_STATE_UP)
712 +- qeth_issue_next_read(card);
713 ++ __qeth_issue_next_read(card);
714 +
715 + iob = channel->iob;
716 + index = channel->buf_no;
717 +@@ -5061,8 +5072,6 @@ static void qeth_core_free_card(struct qeth_card *card)
718 + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
719 + qeth_clean_channel(&card->read);
720 + qeth_clean_channel(&card->write);
721 +- if (card->dev)
722 +- free_netdev(card->dev);
723 + qeth_free_qdio_buffers(card);
724 + unregister_service_level(&card->qeth_service_level);
725 + kfree(card);
726 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
727 +index 5a973ebcb13c..521293b1f4fa 100644
728 +--- a/drivers/s390/net/qeth_l2_main.c
729 ++++ b/drivers/s390/net/qeth_l2_main.c
730 +@@ -935,8 +935,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
731 + qeth_l2_set_offline(cgdev);
732 +
733 + if (card->dev) {
734 +- netif_napi_del(&card->napi);
735 + unregister_netdev(card->dev);
736 ++ free_netdev(card->dev);
737 + card->dev = NULL;
738 + }
739 + return;
740 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
741 +index 96576e729222..1c62cbbaa66f 100644
742 +--- a/drivers/s390/net/qeth_l3_main.c
743 ++++ b/drivers/s390/net/qeth_l3_main.c
744 +@@ -3046,8 +3046,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
745 + qeth_l3_set_offline(cgdev);
746 +
747 + if (card->dev) {
748 +- netif_napi_del(&card->napi);
749 + unregister_netdev(card->dev);
750 ++ free_netdev(card->dev);
751 + card->dev = NULL;
752 + }
753 +
754 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
755 +index 18eefc3f1abe..0c6065dba48a 100644
756 +--- a/drivers/soc/fsl/qbman/qman.c
757 ++++ b/drivers/soc/fsl/qbman/qman.c
758 +@@ -2414,39 +2414,21 @@ struct cgr_comp {
759 + struct completion completion;
760 + };
761 +
762 +-static int qman_delete_cgr_thread(void *p)
763 ++static void qman_delete_cgr_smp_call(void *p)
764 + {
765 +- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
766 +- int ret;
767 +-
768 +- ret = qman_delete_cgr(cgr_comp->cgr);
769 +- complete(&cgr_comp->completion);
770 +-
771 +- return ret;
772 ++ qman_delete_cgr((struct qman_cgr *)p);
773 + }
774 +
775 + void qman_delete_cgr_safe(struct qman_cgr *cgr)
776 + {
777 +- struct task_struct *thread;
778 +- struct cgr_comp cgr_comp;
779 +-
780 + preempt_disable();
781 + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
782 +- init_completion(&cgr_comp.completion);
783 +- cgr_comp.cgr = cgr;
784 +- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
785 +- "cgr_del");
786 +-
787 +- if (IS_ERR(thread))
788 +- goto out;
789 +-
790 +- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
791 +- wake_up_process(thread);
792 +- wait_for_completion(&cgr_comp.completion);
793 ++ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
794 ++ qman_delete_cgr_smp_call, cgr, true);
795 + preempt_enable();
796 + return;
797 + }
798 +-out:
799 ++
800 + qman_delete_cgr(cgr);
801 + preempt_enable();
802 + }
803 +diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
804 +index aecb15f84557..808f018fa976 100644
805 +--- a/fs/sysfs/symlink.c
806 ++++ b/fs/sysfs/symlink.c
807 +@@ -107,6 +107,7 @@ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target,
808 + {
809 + return sysfs_do_create_link(kobj, target, name, 0);
810 + }
811 ++EXPORT_SYMBOL_GPL(sysfs_create_link_nowarn);
812 +
813 + /**
814 + * sysfs_delete_link - remove symlink in object's directory.
815 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
816 +index 1dff0a478b45..4e8f77504a57 100644
817 +--- a/include/linux/cgroup-defs.h
818 ++++ b/include/linux/cgroup-defs.h
819 +@@ -696,13 +696,13 @@ struct sock_cgroup_data {
820 + * updaters and return part of the previous pointer as the prioidx or
821 + * classid. Such races are short-lived and the result isn't critical.
822 + */
823 +-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
824 ++static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
825 + {
826 + /* fallback to 1 which is always the ID of the root cgroup */
827 + return (skcd->is_data & 1) ? skcd->prioidx : 1;
828 + }
829 +
830 +-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
831 ++static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
832 + {
833 + /* fallback to 0 which is the unconfigured default classid */
834 + return (skcd->is_data & 1) ? skcd->classid : 0;
835 +diff --git a/include/linux/phy.h b/include/linux/phy.h
836 +index 600076e1ce84..dca9e926b88f 100644
837 +--- a/include/linux/phy.h
838 ++++ b/include/linux/phy.h
839 +@@ -895,7 +895,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
840 + int phy_drivers_register(struct phy_driver *new_driver, int n,
841 + struct module *owner);
842 + void phy_state_machine(struct work_struct *work);
843 +-void phy_change(struct phy_device *phydev);
844 + void phy_change_work(struct work_struct *work);
845 + void phy_mac_interrupt(struct phy_device *phydev, int new_link);
846 + void phy_start_machine(struct phy_device *phydev);
847 +diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
848 +index 361c08e35dbc..7fd514f36e74 100644
849 +--- a/include/linux/rhashtable.h
850 ++++ b/include/linux/rhashtable.h
851 +@@ -750,8 +750,10 @@ static inline void *__rhashtable_insert_fast(
852 + if (!key ||
853 + (params.obj_cmpfn ?
854 + params.obj_cmpfn(&arg, rht_obj(ht, head)) :
855 +- rhashtable_compare(&arg, rht_obj(ht, head))))
856 ++ rhashtable_compare(&arg, rht_obj(ht, head)))) {
857 ++ pprev = &head->next;
858 + continue;
859 ++ }
860 +
861 + data = rht_obj(ht, head);
862 +
863 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
864 +index 6073e8bae025..f59acacaa265 100644
865 +--- a/include/net/sch_generic.h
866 ++++ b/include/net/sch_generic.h
867 +@@ -723,6 +723,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
868 + *to_free = skb;
869 + }
870 +
871 ++static inline void __qdisc_drop_all(struct sk_buff *skb,
872 ++ struct sk_buff **to_free)
873 ++{
874 ++ if (skb->prev)
875 ++ skb->prev->next = *to_free;
876 ++ else
877 ++ skb->next = *to_free;
878 ++ *to_free = skb;
879 ++}
880 ++
881 + static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
882 + struct qdisc_skb_head *qh,
883 + struct sk_buff **to_free)
884 +@@ -843,6 +853,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
885 + return NET_XMIT_DROP;
886 + }
887 +
888 ++static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
889 ++ struct sk_buff **to_free)
890 ++{
891 ++ __qdisc_drop_all(skb, to_free);
892 ++ qdisc_qstats_drop(sch);
893 ++
894 ++ return NET_XMIT_DROP;
895 ++}
896 ++
897 + /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
898 + long it will take to send a packet given its size.
899 + */
900 +diff --git a/include/net/tcp.h b/include/net/tcp.h
901 +index 0a13574134b8..d323d4fa742c 100644
902 +--- a/include/net/tcp.h
903 ++++ b/include/net/tcp.h
904 +@@ -1600,6 +1600,11 @@ enum tcp_chrono {
905 + void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
906 + void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
907 +
908 ++static inline void tcp_init_send_head(struct sock *sk)
909 ++{
910 ++ sk->sk_send_head = NULL;
911 ++}
912 ++
913 + /* write queue abstraction */
914 + static inline void tcp_write_queue_purge(struct sock *sk)
915 + {
916 +@@ -1610,6 +1615,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
917 + sk_wmem_free_skb(sk, skb);
918 + sk_mem_reclaim(sk);
919 + tcp_clear_all_retrans_hints(tcp_sk(sk));
920 ++ tcp_init_send_head(sk);
921 + }
922 +
923 + static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
924 +@@ -1672,11 +1678,6 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
925 + tcp_sk(sk)->highest_sack = NULL;
926 + }
927 +
928 +-static inline void tcp_init_send_head(struct sock *sk)
929 +-{
930 +- sk->sk_send_head = NULL;
931 +-}
932 +-
933 + static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
934 + {
935 + __skb_queue_tail(&sk->sk_write_queue, skb);
936 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
937 +index ddd7dde87c3c..b734ce731a7a 100644
938 +--- a/lib/rhashtable.c
939 ++++ b/lib/rhashtable.c
940 +@@ -537,8 +537,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
941 + if (!key ||
942 + (ht->p.obj_cmpfn ?
943 + ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
944 +- rhashtable_compare(&arg, rht_obj(ht, head))))
945 ++ rhashtable_compare(&arg, rht_obj(ht, head)))) {
946 ++ pprev = &head->next;
947 + continue;
948 ++ }
949 +
950 + if (!ht->rhlist)
951 + return rht_obj(ht, head);
952 +diff --git a/net/core/dev.c b/net/core/dev.c
953 +index c75ef9d8105a..387af3415385 100644
954 +--- a/net/core/dev.c
955 ++++ b/net/core/dev.c
956 +@@ -3224,15 +3224,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
957 + #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
958 + static void skb_update_prio(struct sk_buff *skb)
959 + {
960 +- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
961 ++ const struct netprio_map *map;
962 ++ const struct sock *sk;
963 ++ unsigned int prioidx;
964 +
965 +- if (!skb->priority && skb->sk && map) {
966 +- unsigned int prioidx =
967 +- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
968 ++ if (skb->priority)
969 ++ return;
970 ++ map = rcu_dereference_bh(skb->dev->priomap);
971 ++ if (!map)
972 ++ return;
973 ++ sk = skb_to_full_sk(skb);
974 ++ if (!sk)
975 ++ return;
976 +
977 +- if (prioidx < map->priomap_len)
978 +- skb->priority = map->priomap[prioidx];
979 +- }
980 ++ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
981 ++
982 ++ if (prioidx < map->priomap_len)
983 ++ skb->priority = map->priomap[prioidx];
984 + }
985 + #else
986 + #define skb_update_prio(skb)
987 +diff --git a/net/core/devlink.c b/net/core/devlink.c
988 +index 7d430c1d9c3e..5ba973311025 100644
989 +--- a/net/core/devlink.c
990 ++++ b/net/core/devlink.c
991 +@@ -1776,7 +1776,7 @@ static int devlink_dpipe_tables_fill(struct genl_info *info,
992 + if (!nlh) {
993 + err = devlink_dpipe_send_and_alloc_skb(&skb, info);
994 + if (err)
995 +- goto err_skb_send_alloc;
996 ++ return err;
997 + goto send_done;
998 + }
999 +
1000 +@@ -1785,7 +1785,6 @@ static int devlink_dpipe_tables_fill(struct genl_info *info,
1001 + nla_put_failure:
1002 + err = -EMSGSIZE;
1003 + err_table_put:
1004 +-err_skb_send_alloc:
1005 + genlmsg_cancel(skb, hdr);
1006 + nlmsg_free(skb);
1007 + return err;
1008 +@@ -2051,7 +2050,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
1009 + table->counters_enabled,
1010 + &dump_ctx);
1011 + if (err)
1012 +- goto err_entries_dump;
1013 ++ return err;
1014 +
1015 + send_done:
1016 + nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
1017 +@@ -2059,16 +2058,10 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
1018 + if (!nlh) {
1019 + err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
1020 + if (err)
1021 +- goto err_skb_send_alloc;
1022 ++ return err;
1023 + goto send_done;
1024 + }
1025 + return genlmsg_reply(dump_ctx.skb, info);
1026 +-
1027 +-err_entries_dump:
1028 +-err_skb_send_alloc:
1029 +- genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
1030 +- nlmsg_free(dump_ctx.skb);
1031 +- return err;
1032 + }
1033 +
1034 + static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
1035 +@@ -2207,7 +2200,7 @@ static int devlink_dpipe_headers_fill(struct genl_info *info,
1036 + if (!nlh) {
1037 + err = devlink_dpipe_send_and_alloc_skb(&skb, info);
1038 + if (err)
1039 +- goto err_skb_send_alloc;
1040 ++ return err;
1041 + goto send_done;
1042 + }
1043 + return genlmsg_reply(skb, info);
1044 +@@ -2215,7 +2208,6 @@ static int devlink_dpipe_headers_fill(struct genl_info *info,
1045 + nla_put_failure:
1046 + err = -EMSGSIZE;
1047 + err_table_put:
1048 +-err_skb_send_alloc:
1049 + genlmsg_cancel(skb, hdr);
1050 + nlmsg_free(skb);
1051 + return err;
1052 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1053 +index cc811add68c6..564beb7e6d1c 100644
1054 +--- a/net/core/skbuff.c
1055 ++++ b/net/core/skbuff.c
1056 +@@ -4171,7 +4171,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1057 +
1058 + skb_queue_tail(&sk->sk_error_queue, skb);
1059 + if (!sock_flag(sk, SOCK_DEAD))
1060 +- sk->sk_data_ready(sk);
1061 ++ sk->sk_error_report(sk);
1062 + return 0;
1063 + }
1064 + EXPORT_SYMBOL(sock_queue_err_skb);
1065 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
1066 +index 9d43c1f40274..ff3b058cf58c 100644
1067 +--- a/net/dccp/proto.c
1068 ++++ b/net/dccp/proto.c
1069 +@@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1070 + if (skb == NULL)
1071 + goto out_release;
1072 +
1073 ++ if (sk->sk_state == DCCP_CLOSED) {
1074 ++ rc = -ENOTCONN;
1075 ++ goto out_discard;
1076 ++ }
1077 ++
1078 + skb_reserve(skb, sk->sk_prot->max_header);
1079 + rc = memcpy_from_msg(skb_put(skb, len), msg, len);
1080 + if (rc != 0)
1081 +diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
1082 +index 974765b7d92a..e9f0489e4229 100644
1083 +--- a/net/ieee802154/6lowpan/core.c
1084 ++++ b/net/ieee802154/6lowpan/core.c
1085 +@@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void)
1086 + static int lowpan_device_event(struct notifier_block *unused,
1087 + unsigned long event, void *ptr)
1088 + {
1089 +- struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
1090 ++ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1091 ++ struct wpan_dev *wpan_dev;
1092 +
1093 +- if (wdev->type != ARPHRD_IEEE802154)
1094 ++ if (ndev->type != ARPHRD_IEEE802154)
1095 ++ return NOTIFY_DONE;
1096 ++ wpan_dev = ndev->ieee802154_ptr;
1097 ++ if (!wpan_dev)
1098 + return NOTIFY_DONE;
1099 +
1100 + switch (event) {
1101 +@@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused,
1102 + * also delete possible lowpan interfaces which belongs
1103 + * to the wpan interface.
1104 + */
1105 +- if (wdev->ieee802154_ptr->lowpan_dev)
1106 +- lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
1107 ++ if (wpan_dev->lowpan_dev)
1108 ++ lowpan_dellink(wpan_dev->lowpan_dev, NULL);
1109 + break;
1110 + default:
1111 + return NOTIFY_DONE;
1112 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1113 +index af74d0433453..e691705f0a85 100644
1114 +--- a/net/ipv4/inet_fragment.c
1115 ++++ b/net/ipv4/inet_fragment.c
1116 +@@ -119,6 +119,9 @@ static void inet_frag_secret_rebuild(struct inet_frags *f)
1117 +
1118 + static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
1119 + {
1120 ++ if (!hlist_unhashed(&q->list_evictor))
1121 ++ return false;
1122 ++
1123 + return q->net->low_thresh == 0 ||
1124 + frag_mem_limit(q->net) >= q->net->low_thresh;
1125 + }
1126 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1127 +index f56aab54e0c8..1e70ed5244ea 100644
1128 +--- a/net/ipv4/ip_sockglue.c
1129 ++++ b/net/ipv4/ip_sockglue.c
1130 +@@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
1131 + src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1132 + if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
1133 + return -EINVAL;
1134 +- ipc->oif = src_info->ipi6_ifindex;
1135 ++ if (src_info->ipi6_ifindex)
1136 ++ ipc->oif = src_info->ipi6_ifindex;
1137 + ipc->addr = src_info->ipi6_addr.s6_addr32[3];
1138 + continue;
1139 + }
1140 +@@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
1141 + if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
1142 + return -EINVAL;
1143 + info = (struct in_pktinfo *)CMSG_DATA(cmsg);
1144 +- ipc->oif = info->ipi_ifindex;
1145 ++ if (info->ipi_ifindex)
1146 ++ ipc->oif = info->ipi_ifindex;
1147 + ipc->addr = info->ipi_spec_dst.s_addr;
1148 + break;
1149 + }
1150 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1151 +index fe11128d7df4..38b9a6276a9d 100644
1152 +--- a/net/ipv4/tcp.c
1153 ++++ b/net/ipv4/tcp.c
1154 +@@ -3445,6 +3445,7 @@ int tcp_abort(struct sock *sk, int err)
1155 +
1156 + bh_unlock_sock(sk);
1157 + local_bh_enable();
1158 ++ tcp_write_queue_purge(sk);
1159 + release_sock(sk);
1160 + return 0;
1161 + }
1162 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1163 +index 14ac7df95380..a845b7692c1b 100644
1164 +--- a/net/ipv4/tcp_timer.c
1165 ++++ b/net/ipv4/tcp_timer.c
1166 +@@ -36,6 +36,7 @@ static void tcp_write_err(struct sock *sk)
1167 + sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
1168 + sk->sk_error_report(sk);
1169 +
1170 ++ tcp_write_queue_purge(sk);
1171 + tcp_done(sk);
1172 + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
1173 + }
1174 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1175 +index a1f918713006..287112da3c06 100644
1176 +--- a/net/ipv6/datagram.c
1177 ++++ b/net/ipv6/datagram.c
1178 +@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1179 + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1180 + struct inet_sock *inet = inet_sk(sk);
1181 + struct ipv6_pinfo *np = inet6_sk(sk);
1182 +- struct in6_addr *daddr;
1183 ++ struct in6_addr *daddr, old_daddr;
1184 ++ __be32 fl6_flowlabel = 0;
1185 ++ __be32 old_fl6_flowlabel;
1186 ++ __be16 old_dport;
1187 + int addr_type;
1188 + int err;
1189 +- __be32 fl6_flowlabel = 0;
1190 +
1191 + if (usin->sin6_family == AF_INET) {
1192 + if (__ipv6_only_sock(sk))
1193 +@@ -239,9 +241,13 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1194 + }
1195 + }
1196 +
1197 ++ /* save the current peer information before updating it */
1198 ++ old_daddr = sk->sk_v6_daddr;
1199 ++ old_fl6_flowlabel = np->flow_label;
1200 ++ old_dport = inet->inet_dport;
1201 ++
1202 + sk->sk_v6_daddr = *daddr;
1203 + np->flow_label = fl6_flowlabel;
1204 +-
1205 + inet->inet_dport = usin->sin6_port;
1206 +
1207 + /*
1208 +@@ -251,11 +257,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1209 +
1210 + err = ip6_datagram_dst_update(sk, true);
1211 + if (err) {
1212 +- /* Reset daddr and dport so that udp_v6_early_demux()
1213 +- * fails to find this socket
1214 ++ /* Restore the socket peer info, to keep it consistent with
1215 ++ * the old socket state
1216 + */
1217 +- memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
1218 +- inet->inet_dport = 0;
1219 ++ sk->sk_v6_daddr = old_daddr;
1220 ++ np->flow_label = old_fl6_flowlabel;
1221 ++ inet->inet_dport = old_dport;
1222 + goto out;
1223 + }
1224 +
1225 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1226 +index 2a937c8d19e9..dd28005efb97 100644
1227 +--- a/net/ipv6/ndisc.c
1228 ++++ b/net/ipv6/ndisc.c
1229 +@@ -1546,7 +1546,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1230 + *(opt++) = (rd_len >> 3);
1231 + opt += 6;
1232 +
1233 +- memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
1234 ++ skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
1235 ++ rd_len - 8);
1236 + }
1237 +
1238 + void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1239 +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
1240 +index bd6cc688bd19..7a78dcfda68a 100644
1241 +--- a/net/ipv6/seg6_iptunnel.c
1242 ++++ b/net/ipv6/seg6_iptunnel.c
1243 +@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
1244 + /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
1245 + int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1246 + {
1247 +- struct net *net = dev_net(skb_dst(skb)->dev);
1248 ++ struct dst_entry *dst = skb_dst(skb);
1249 ++ struct net *net = dev_net(dst->dev);
1250 + struct ipv6hdr *hdr, *inner_hdr;
1251 + struct ipv6_sr_hdr *isrh;
1252 + int hdrlen, tot_len, err;
1253 +@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1254 + isrh->nexthdr = proto;
1255 +
1256 + hdr->daddr = isrh->segments[isrh->first_segment];
1257 +- set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
1258 ++ set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
1259 +
1260 + #ifdef CONFIG_IPV6_SEG6_HMAC
1261 + if (sr_has_hmac(isrh)) {
1262 +@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
1263 +
1264 + slwt = seg6_lwt_lwtunnel(newts);
1265 +
1266 +- err = dst_cache_init(&slwt->cache, GFP_KERNEL);
1267 ++ err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
1268 + if (err) {
1269 + kfree(newts);
1270 + return err;
1271 +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
1272 +index 148533169b1d..ca98276c2709 100644
1273 +--- a/net/iucv/af_iucv.c
1274 ++++ b/net/iucv/af_iucv.c
1275 +@@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void)
1276 + af_iucv_dev->driver = &af_iucv_driver;
1277 + err = device_register(af_iucv_dev);
1278 + if (err)
1279 +- goto out_driver;
1280 ++ goto out_iucv_dev;
1281 + return 0;
1282 +
1283 ++out_iucv_dev:
1284 ++ put_device(af_iucv_dev);
1285 + out_driver:
1286 + driver_unregister(&af_iucv_driver);
1287 + out_iucv:
1288 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1289 +index 58d53b907d53..9db49805b7be 100644
1290 +--- a/net/kcm/kcmsock.c
1291 ++++ b/net/kcm/kcmsock.c
1292 +@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1293 + .parse_msg = kcm_parse_func_strparser,
1294 + .read_sock_done = kcm_read_sock_done,
1295 + };
1296 +- int err;
1297 ++ int err = 0;
1298 +
1299 + csk = csock->sk;
1300 + if (!csk)
1301 + return -EINVAL;
1302 +
1303 ++ lock_sock(csk);
1304 ++
1305 + /* Only allow TCP sockets to be attached for now */
1306 + if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1307 +- csk->sk_protocol != IPPROTO_TCP)
1308 +- return -EOPNOTSUPP;
1309 ++ csk->sk_protocol != IPPROTO_TCP) {
1310 ++ err = -EOPNOTSUPP;
1311 ++ goto out;
1312 ++ }
1313 +
1314 + /* Don't allow listeners or closed sockets */
1315 +- if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
1316 +- return -EOPNOTSUPP;
1317 ++ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1318 ++ err = -EOPNOTSUPP;
1319 ++ goto out;
1320 ++ }
1321 +
1322 + psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1323 +- if (!psock)
1324 +- return -ENOMEM;
1325 ++ if (!psock) {
1326 ++ err = -ENOMEM;
1327 ++ goto out;
1328 ++ }
1329 +
1330 + psock->mux = mux;
1331 + psock->sk = csk;
1332 +@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1333 + err = strp_init(&psock->strp, csk, &cb);
1334 + if (err) {
1335 + kmem_cache_free(kcm_psockp, psock);
1336 +- return err;
1337 ++ goto out;
1338 + }
1339 +
1340 + write_lock_bh(&csk->sk_callback_lock);
1341 +@@ -1419,7 +1427,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1342 + write_unlock_bh(&csk->sk_callback_lock);
1343 + strp_done(&psock->strp);
1344 + kmem_cache_free(kcm_psockp, psock);
1345 +- return -EALREADY;
1346 ++ err = -EALREADY;
1347 ++ goto out;
1348 + }
1349 +
1350 + psock->save_data_ready = csk->sk_data_ready;
1351 +@@ -1455,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1352 + /* Schedule RX work in case there are already bytes queued */
1353 + strp_check_rcv(&psock->strp);
1354 +
1355 +- return 0;
1356 ++out:
1357 ++ release_sock(csk);
1358 ++
1359 ++ return err;
1360 + }
1361 +
1362 + static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1363 +@@ -1507,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock)
1364 +
1365 + if (WARN_ON(psock->rx_kcm)) {
1366 + write_unlock_bh(&csk->sk_callback_lock);
1367 ++ release_sock(csk);
1368 + return;
1369 + }
1370 +
1371 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1372 +index af22aa8ae35b..490d7360222e 100644
1373 +--- a/net/l2tp/l2tp_core.c
1374 ++++ b/net/l2tp/l2tp_core.c
1375 +@@ -1562,9 +1562,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1376 + encap = cfg->encap;
1377 +
1378 + /* Quick sanity checks */
1379 ++ err = -EPROTONOSUPPORT;
1380 ++ if (sk->sk_type != SOCK_DGRAM) {
1381 ++ pr_debug("tunl %hu: fd %d wrong socket type\n",
1382 ++ tunnel_id, fd);
1383 ++ goto err;
1384 ++ }
1385 + switch (encap) {
1386 + case L2TP_ENCAPTYPE_UDP:
1387 +- err = -EPROTONOSUPPORT;
1388 + if (sk->sk_protocol != IPPROTO_UDP) {
1389 + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1390 + tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1391 +@@ -1572,7 +1577,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1392 + }
1393 + break;
1394 + case L2TP_ENCAPTYPE_IP:
1395 +- err = -EPROTONOSUPPORT;
1396 + if (sk->sk_protocol != IPPROTO_L2TP) {
1397 + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1398 + tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1399 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
1400 +index 6f02499ef007..b9ce82c9440f 100644
1401 +--- a/net/netlink/genetlink.c
1402 ++++ b/net/netlink/genetlink.c
1403 +@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1404 + if (!err)
1405 + delivered = true;
1406 + else if (err != -ESRCH)
1407 +- goto error;
1408 ++ return err;
1409 + return delivered ? 0 : -ESRCH;
1410 + error:
1411 + kfree_skb(skb);
1412 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
1413 +index 30c96274c638..22bf1a376b91 100644
1414 +--- a/net/sched/act_tunnel_key.c
1415 ++++ b/net/sched/act_tunnel_key.c
1416 +@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
1417 + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
1418 + break;
1419 + default:
1420 ++ ret = -EINVAL;
1421 + goto err_out;
1422 + }
1423 +
1424 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
1425 +index b1266e75ca43..8c8df75dbead 100644
1426 +--- a/net/sched/sch_netem.c
1427 ++++ b/net/sched/sch_netem.c
1428 +@@ -513,7 +513,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1429 + }
1430 +
1431 + if (unlikely(sch->q.qlen >= sch->limit))
1432 +- return qdisc_drop(skb, sch, to_free);
1433 ++ return qdisc_drop_all(skb, sch, to_free);
1434 +
1435 + qdisc_qstats_backlog_inc(sch, skb);
1436 +