Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Sat, 31 Mar 2018 22:20:17
Message-Id: 1522534790.b2dfe994a979d5ace0f18e467c1e82bfa4d3ab30.mpagano@gentoo
1 commit: b2dfe994a979d5ace0f18e467c1e82bfa4d3ab30
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Mar 31 22:19:50 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Mar 31 22:19:50 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b2dfe994
7
8 Linux patch 4.15.15
9
10 0000_README | 4 +
11 1014_linux-4.15.15.patch | 1683 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1687 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index f4d8a80..f1a4ce6 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -99,6 +99,10 @@ Patch: 1013_linux-4.15.14.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.15.14
21
22 +Patch: 1014_linux-4.15.15.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.15.15
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1014_linux-4.15.15.patch b/1014_linux-4.15.15.patch
31 new file mode 100644
32 index 0000000..ab1089f
33 --- /dev/null
34 +++ b/1014_linux-4.15.15.patch
35 @@ -0,0 +1,1683 @@
36 +diff --git a/Makefile b/Makefile
37 +index a5e561900daf..20c9b7bfeed4 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 15
44 +-SUBLEVEL = 14
45 ++SUBLEVEL = 15
46 + EXTRAVERSION =
47 + NAME = Fearless Coyote
48 +
49 +diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
50 +index 16f9bee992fe..0f6576802607 100644
51 +--- a/drivers/net/ethernet/arc/emac_rockchip.c
52 ++++ b/drivers/net/ethernet/arc/emac_rockchip.c
53 +@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
54 + /* Optional regulator for PHY */
55 + priv->regulator = devm_regulator_get_optional(dev, "phy");
56 + if (IS_ERR(priv->regulator)) {
57 +- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
58 +- return -EPROBE_DEFER;
59 ++ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
60 ++ err = -EPROBE_DEFER;
61 ++ goto out_clk_disable;
62 ++ }
63 + dev_err(dev, "no regulator found\n");
64 + priv->regulator = NULL;
65 + }
66 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
67 +index 087f01b4dc3a..f239ef2e6f23 100644
68 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
69 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
70 +@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
71 + static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
72 + struct bcm_sysport_tx_ring *ring)
73 + {
74 +- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
75 + unsigned int pkts_compl = 0, bytes_compl = 0;
76 + struct net_device *ndev = priv->netdev;
77 ++ unsigned int txbds_processed = 0;
78 + struct bcm_sysport_cb *cb;
79 ++ unsigned int txbds_ready;
80 ++ unsigned int c_index;
81 + u32 hw_ind;
82 +
83 + /* Clear status before servicing to reduce spurious interrupts */
84 +@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
85 + /* Compute how many descriptors have been processed since last call */
86 + hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
87 + c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
88 +- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
89 +-
90 +- last_c_index = ring->c_index;
91 +- num_tx_cbs = ring->size;
92 +-
93 +- c_index &= (num_tx_cbs - 1);
94 +-
95 +- if (c_index >= last_c_index)
96 +- last_tx_cn = c_index - last_c_index;
97 +- else
98 +- last_tx_cn = num_tx_cbs - last_c_index + c_index;
99 ++ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
100 +
101 + netif_dbg(priv, tx_done, ndev,
102 +- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
103 +- ring->index, c_index, last_tx_cn, last_c_index);
104 ++ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
105 ++ ring->index, ring->c_index, c_index, txbds_ready);
106 +
107 +- while (last_tx_cn-- > 0) {
108 +- cb = ring->cbs + last_c_index;
109 ++ while (txbds_processed < txbds_ready) {
110 ++ cb = &ring->cbs[ring->clean_index];
111 + bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
112 +
113 + ring->desc_count++;
114 +- last_c_index++;
115 +- last_c_index &= (num_tx_cbs - 1);
116 ++ txbds_processed++;
117 ++
118 ++ if (likely(ring->clean_index < ring->size - 1))
119 ++ ring->clean_index++;
120 ++ else
121 ++ ring->clean_index = 0;
122 + }
123 +
124 + u64_stats_update_begin(&priv->syncp);
125 +@@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
126 + netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
127 + ring->index = index;
128 + ring->size = size;
129 ++ ring->clean_index = 0;
130 + ring->alloc_size = ring->size;
131 + ring->desc_cpu = p;
132 + ring->desc_count = ring->size;
133 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
134 +index f5a984c1c986..19c91c76e327 100644
135 +--- a/drivers/net/ethernet/broadcom/bcmsysport.h
136 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.h
137 +@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
138 + unsigned int desc_count; /* Number of descriptors */
139 + unsigned int curr_desc; /* Current descriptor */
140 + unsigned int c_index; /* Last consumer index */
141 +- unsigned int p_index; /* Current producer index */
142 ++ unsigned int clean_index; /* Current clean index */
143 + struct bcm_sysport_cb *cbs; /* Transmit control blocks */
144 + struct dma_desc *desc_cpu; /* CPU view of the descriptor */
145 + struct bcm_sysport_priv *priv; /* private context backpointer */
146 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
147 +index 7caa8da48421..e4ec32a9ca15 100644
148 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
149 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
150 +@@ -2008,7 +2008,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
151 + }
152 +
153 + if (unlikely(err < 0)) {
154 +- percpu_stats->tx_errors++;
155 + percpu_stats->tx_fifo_errors++;
156 + return err;
157 + }
158 +@@ -2278,7 +2277,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
159 + vaddr = phys_to_virt(addr);
160 + prefetch(vaddr + qm_fd_get_offset(fd));
161 +
162 +- fd_format = qm_fd_get_format(fd);
163 + /* The only FD types that we may receive are contig and S/G */
164 + WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
165 +
166 +@@ -2311,8 +2309,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
167 +
168 + skb_len = skb->len;
169 +
170 +- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
171 ++ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
172 ++ percpu_stats->rx_dropped++;
173 + return qman_cb_dqrr_consume;
174 ++ }
175 +
176 + percpu_stats->rx_packets++;
177 + percpu_stats->rx_bytes += skb_len;
178 +@@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev)
179 + struct device *dev;
180 + int err;
181 +
182 +- dev = &pdev->dev;
183 ++ dev = pdev->dev.parent;
184 + net_dev = dev_get_drvdata(dev);
185 +
186 + priv = netdev_priv(net_dev);
187 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
188 +index a74300a4459c..febadd39e29a 100644
189 +--- a/drivers/net/ethernet/freescale/fec_main.c
190 ++++ b/drivers/net/ethernet/freescale/fec_main.c
191 +@@ -3578,6 +3578,8 @@ fec_drv_remove(struct platform_device *pdev)
192 + fec_enet_mii_remove(fep);
193 + if (fep->reg_phy)
194 + regulator_disable(fep->reg_phy);
195 ++ pm_runtime_put(&pdev->dev);
196 ++ pm_runtime_disable(&pdev->dev);
197 + if (of_phy_is_fixed_link(np))
198 + of_phy_deregister_fixed_link(np);
199 + of_node_put(fep->phy_node);
200 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
201 +index 93728c694e6d..0a9adc5962fb 100644
202 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
203 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
204 +@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
205 +
206 + static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
207 + MLXSW_SP_CPU_PORT_SB_CM,
208 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
209 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
210 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
211 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
212 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
213 + MLXSW_SP_CPU_PORT_SB_CM,
214 +- MLXSW_SP_CPU_PORT_SB_CM,
215 +- MLXSW_SP_CPU_PORT_SB_CM,
216 +- MLXSW_SP_CPU_PORT_SB_CM,
217 +- MLXSW_SP_CPU_PORT_SB_CM,
218 +- MLXSW_SP_CPU_PORT_SB_CM,
219 +- MLXSW_SP_SB_CM(10000, 0, 0),
220 ++ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
221 + MLXSW_SP_CPU_PORT_SB_CM,
222 + MLXSW_SP_CPU_PORT_SB_CM,
223 + MLXSW_SP_CPU_PORT_SB_CM,
224 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
225 +index 409041eab189..fba7f5c34b85 100644
226 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
227 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
228 +@@ -1681,6 +1681,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
229 + iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
230 +
231 + if (eth_type == ETH_P_IP) {
232 ++ if (iph->protocol != IPPROTO_TCP) {
233 ++ DP_NOTICE(p_hwfn,
234 ++ "Unexpected ip protocol on ll2 %x\n",
235 ++ iph->protocol);
236 ++ return -EINVAL;
237 ++ }
238 ++
239 + cm_info->local_ip[0] = ntohl(iph->daddr);
240 + cm_info->remote_ip[0] = ntohl(iph->saddr);
241 + cm_info->ip_version = TCP_IPV4;
242 +@@ -1689,6 +1696,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
243 + *payload_len = ntohs(iph->tot_len) - ip_hlen;
244 + } else if (eth_type == ETH_P_IPV6) {
245 + ip6h = (struct ipv6hdr *)iph;
246 ++
247 ++ if (ip6h->nexthdr != IPPROTO_TCP) {
248 ++ DP_NOTICE(p_hwfn,
249 ++ "Unexpected ip protocol on ll2 %x\n",
250 ++ iph->protocol);
251 ++ return -EINVAL;
252 ++ }
253 ++
254 + for (i = 0; i < 4; i++) {
255 + cm_info->local_ip[i] =
256 + ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
257 +@@ -1906,8 +1921,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
258 + /* Missing lower byte is now available */
259 + mpa_len = fpdu->fpdu_length | *mpa_data;
260 + fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
261 +- fpdu->mpa_frag_len = fpdu->fpdu_length;
262 + /* one byte of hdr */
263 ++ fpdu->mpa_frag_len = 1;
264 + fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
265 + DP_VERBOSE(p_hwfn,
266 + QED_MSG_RDMA,
267 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
268 +index 8f9b3eb82137..cdcccecfc24a 100644
269 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
270 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
271 +@@ -2066,8 +2066,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
272 + link_params.link_up = true;
273 + edev->ops->common->set_link(edev->cdev, &link_params);
274 +
275 +- qede_rdma_dev_event_open(edev);
276 +-
277 + edev->state = QEDE_STATE_OPEN;
278 +
279 + DP_INFO(edev, "Ending successfully qede load\n");
280 +@@ -2168,12 +2166,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
281 + DP_NOTICE(edev, "Link is up\n");
282 + netif_tx_start_all_queues(edev->ndev);
283 + netif_carrier_on(edev->ndev);
284 ++ qede_rdma_dev_event_open(edev);
285 + }
286 + } else {
287 + if (netif_carrier_ok(edev->ndev)) {
288 + DP_NOTICE(edev, "Link is down\n");
289 + netif_tx_disable(edev->ndev);
290 + netif_carrier_off(edev->ndev);
291 ++ qede_rdma_dev_event_close(edev);
292 + }
293 + }
294 + }
295 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
296 +index a1ffc3ed77f9..c08d74cd1fd2 100644
297 +--- a/drivers/net/ethernet/ti/cpsw.c
298 ++++ b/drivers/net/ethernet/ti/cpsw.c
299 +@@ -996,7 +996,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
300 + /* set speed_in input in case RMII mode is used in 100Mbps */
301 + if (phy->speed == 100)
302 + mac_control |= BIT(15);
303 +- else if (phy->speed == 10)
304 ++ /* in band mode only works in 10Mbps RGMII mode */
305 ++ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
306 + mac_control |= BIT(18); /* In Band mode */
307 +
308 + if (priv->rx_pause)
309 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
310 +index a0f2be81d52e..4884f6149b0a 100644
311 +--- a/drivers/net/macvlan.c
312 ++++ b/drivers/net/macvlan.c
313 +@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
314 + lowerdev_features &= (features | ~NETIF_F_LRO);
315 + features = netdev_increment_features(lowerdev_features, features, mask);
316 + features |= ALWAYS_ON_FEATURES;
317 +- features &= ~NETIF_F_NETNS_LOCAL;
318 ++ features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
319 +
320 + return features;
321 + }
322 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
323 +index 39de77a8bb63..dba6d17ad885 100644
324 +--- a/drivers/net/phy/phy.c
325 ++++ b/drivers/net/phy/phy.c
326 +@@ -614,6 +614,91 @@ static void phy_error(struct phy_device *phydev)
327 + phy_trigger_machine(phydev, false);
328 + }
329 +
330 ++/**
331 ++ * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
332 ++ * @phydev: target phy_device struct
333 ++ */
334 ++static int phy_disable_interrupts(struct phy_device *phydev)
335 ++{
336 ++ int err;
337 ++
338 ++ /* Disable PHY interrupts */
339 ++ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
340 ++ if (err)
341 ++ goto phy_err;
342 ++
343 ++ /* Clear the interrupt */
344 ++ err = phy_clear_interrupt(phydev);
345 ++ if (err)
346 ++ goto phy_err;
347 ++
348 ++ return 0;
349 ++
350 ++phy_err:
351 ++ phy_error(phydev);
352 ++
353 ++ return err;
354 ++}
355 ++
356 ++/**
357 ++ * phy_change - Called by the phy_interrupt to handle PHY changes
358 ++ * @phydev: phy_device struct that interrupted
359 ++ */
360 ++static irqreturn_t phy_change(struct phy_device *phydev)
361 ++{
362 ++ if (phy_interrupt_is_valid(phydev)) {
363 ++ if (phydev->drv->did_interrupt &&
364 ++ !phydev->drv->did_interrupt(phydev))
365 ++ goto ignore;
366 ++
367 ++ if (phy_disable_interrupts(phydev))
368 ++ goto phy_err;
369 ++ }
370 ++
371 ++ mutex_lock(&phydev->lock);
372 ++ if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
373 ++ phydev->state = PHY_CHANGELINK;
374 ++ mutex_unlock(&phydev->lock);
375 ++
376 ++ if (phy_interrupt_is_valid(phydev)) {
377 ++ atomic_dec(&phydev->irq_disable);
378 ++ enable_irq(phydev->irq);
379 ++
380 ++ /* Reenable interrupts */
381 ++ if (PHY_HALTED != phydev->state &&
382 ++ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
383 ++ goto irq_enable_err;
384 ++ }
385 ++
386 ++ /* reschedule state queue work to run as soon as possible */
387 ++ phy_trigger_machine(phydev, true);
388 ++ return IRQ_HANDLED;
389 ++
390 ++ignore:
391 ++ atomic_dec(&phydev->irq_disable);
392 ++ enable_irq(phydev->irq);
393 ++ return IRQ_NONE;
394 ++
395 ++irq_enable_err:
396 ++ disable_irq(phydev->irq);
397 ++ atomic_inc(&phydev->irq_disable);
398 ++phy_err:
399 ++ phy_error(phydev);
400 ++ return IRQ_NONE;
401 ++}
402 ++
403 ++/**
404 ++ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
405 ++ * @work: work_struct that describes the work to be done
406 ++ */
407 ++void phy_change_work(struct work_struct *work)
408 ++{
409 ++ struct phy_device *phydev =
410 ++ container_of(work, struct phy_device, phy_queue);
411 ++
412 ++ phy_change(phydev);
413 ++}
414 ++
415 + /**
416 + * phy_interrupt - PHY interrupt handler
417 + * @irq: interrupt line
418 +@@ -632,9 +717,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
419 + disable_irq_nosync(irq);
420 + atomic_inc(&phydev->irq_disable);
421 +
422 +- phy_change(phydev);
423 +-
424 +- return IRQ_HANDLED;
425 ++ return phy_change(phydev);
426 + }
427 +
428 + /**
429 +@@ -651,32 +734,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
430 + return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
431 + }
432 +
433 +-/**
434 +- * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
435 +- * @phydev: target phy_device struct
436 +- */
437 +-static int phy_disable_interrupts(struct phy_device *phydev)
438 +-{
439 +- int err;
440 +-
441 +- /* Disable PHY interrupts */
442 +- err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
443 +- if (err)
444 +- goto phy_err;
445 +-
446 +- /* Clear the interrupt */
447 +- err = phy_clear_interrupt(phydev);
448 +- if (err)
449 +- goto phy_err;
450 +-
451 +- return 0;
452 +-
453 +-phy_err:
454 +- phy_error(phydev);
455 +-
456 +- return err;
457 +-}
458 +-
459 + /**
460 + * phy_start_interrupts - request and enable interrupts for a PHY device
461 + * @phydev: target phy_device struct
462 +@@ -727,64 +784,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
463 + }
464 + EXPORT_SYMBOL(phy_stop_interrupts);
465 +
466 +-/**
467 +- * phy_change - Called by the phy_interrupt to handle PHY changes
468 +- * @phydev: phy_device struct that interrupted
469 +- */
470 +-void phy_change(struct phy_device *phydev)
471 +-{
472 +- if (phy_interrupt_is_valid(phydev)) {
473 +- if (phydev->drv->did_interrupt &&
474 +- !phydev->drv->did_interrupt(phydev))
475 +- goto ignore;
476 +-
477 +- if (phy_disable_interrupts(phydev))
478 +- goto phy_err;
479 +- }
480 +-
481 +- mutex_lock(&phydev->lock);
482 +- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
483 +- phydev->state = PHY_CHANGELINK;
484 +- mutex_unlock(&phydev->lock);
485 +-
486 +- if (phy_interrupt_is_valid(phydev)) {
487 +- atomic_dec(&phydev->irq_disable);
488 +- enable_irq(phydev->irq);
489 +-
490 +- /* Reenable interrupts */
491 +- if (PHY_HALTED != phydev->state &&
492 +- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
493 +- goto irq_enable_err;
494 +- }
495 +-
496 +- /* reschedule state queue work to run as soon as possible */
497 +- phy_trigger_machine(phydev, true);
498 +- return;
499 +-
500 +-ignore:
501 +- atomic_dec(&phydev->irq_disable);
502 +- enable_irq(phydev->irq);
503 +- return;
504 +-
505 +-irq_enable_err:
506 +- disable_irq(phydev->irq);
507 +- atomic_inc(&phydev->irq_disable);
508 +-phy_err:
509 +- phy_error(phydev);
510 +-}
511 +-
512 +-/**
513 +- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
514 +- * @work: work_struct that describes the work to be done
515 +- */
516 +-void phy_change_work(struct work_struct *work)
517 +-{
518 +- struct phy_device *phydev =
519 +- container_of(work, struct phy_device, phy_queue);
520 +-
521 +- phy_change(phydev);
522 +-}
523 +-
524 + /**
525 + * phy_stop - Bring down the PHY link, and stop checking the status
526 + * @phydev: target phy_device struct
527 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
528 +index d312b314825e..a1e7ea4d4b16 100644
529 +--- a/drivers/net/phy/phy_device.c
530 ++++ b/drivers/net/phy/phy_device.c
531 +@@ -999,10 +999,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
532 + err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
533 + "attached_dev");
534 + if (!err) {
535 +- err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj,
536 +- "phydev");
537 +- if (err)
538 +- goto error;
539 ++ err = sysfs_create_link_nowarn(&dev->dev.kobj,
540 ++ &phydev->mdio.dev.kobj,
541 ++ "phydev");
542 ++ if (err) {
543 ++ dev_err(&dev->dev, "could not add device link to %s err %d\n",
544 ++ kobject_name(&phydev->mdio.dev.kobj),
545 ++ err);
546 ++ /* non-fatal - some net drivers can use one netdevice
547 ++ * with more then one phy
548 ++ */
549 ++ }
550 +
551 + phydev->sysfs_links = true;
552 + }
553 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
554 +index 9f79f9274c50..d37183aec313 100644
555 +--- a/drivers/net/ppp/ppp_generic.c
556 ++++ b/drivers/net/ppp/ppp_generic.c
557 +@@ -257,7 +257,7 @@ struct ppp_net {
558 + /* Prototypes. */
559 + static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
560 + struct file *file, unsigned int cmd, unsigned long arg);
561 +-static void ppp_xmit_process(struct ppp *ppp);
562 ++static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
563 + static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
564 + static void ppp_push(struct ppp *ppp);
565 + static void ppp_channel_push(struct channel *pch);
566 +@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
567 + goto out;
568 + }
569 +
570 +- skb_queue_tail(&pf->xq, skb);
571 +-
572 + switch (pf->kind) {
573 + case INTERFACE:
574 +- ppp_xmit_process(PF_TO_PPP(pf));
575 ++ ppp_xmit_process(PF_TO_PPP(pf), skb);
576 + break;
577 + case CHANNEL:
578 ++ skb_queue_tail(&pf->xq, skb);
579 + ppp_channel_push(PF_TO_CHANNEL(pf));
580 + break;
581 + }
582 +@@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 + put_unaligned_be16(proto, pp);
584 +
585 + skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
586 +- skb_queue_tail(&ppp->file.xq, skb);
587 +- ppp_xmit_process(ppp);
588 ++ ppp_xmit_process(ppp, skb);
589 ++
590 + return NETDEV_TX_OK;
591 +
592 + outf:
593 +@@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device *dev)
594 + */
595 +
596 + /* Called to do any work queued up on the transmit side that can now be done */
597 +-static void __ppp_xmit_process(struct ppp *ppp)
598 ++static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
599 + {
600 +- struct sk_buff *skb;
601 +-
602 + ppp_xmit_lock(ppp);
603 + if (!ppp->closing) {
604 + ppp_push(ppp);
605 ++
606 ++ if (skb)
607 ++ skb_queue_tail(&ppp->file.xq, skb);
608 + while (!ppp->xmit_pending &&
609 + (skb = skb_dequeue(&ppp->file.xq)))
610 + ppp_send_frame(ppp, skb);
611 +@@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
612 + ppp_xmit_unlock(ppp);
613 + }
614 +
615 +-static void ppp_xmit_process(struct ppp *ppp)
616 ++static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
617 + {
618 + local_bh_disable();
619 +
620 +@@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp *ppp)
621 + goto err;
622 +
623 + (*this_cpu_ptr(ppp->xmit_recursion))++;
624 +- __ppp_xmit_process(ppp);
625 ++ __ppp_xmit_process(ppp, skb);
626 + (*this_cpu_ptr(ppp->xmit_recursion))--;
627 +
628 + local_bh_enable();
629 +@@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp *ppp)
630 + err:
631 + local_bh_enable();
632 +
633 ++ kfree_skb(skb);
634 ++
635 + if (net_ratelimit())
636 + netdev_err(ppp->dev, "recursion detected\n");
637 + }
638 +@@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct channel *pch)
639 + if (skb_queue_empty(&pch->file.xq)) {
640 + ppp = pch->ppp;
641 + if (ppp)
642 +- __ppp_xmit_process(ppp);
643 ++ __ppp_xmit_process(ppp, NULL);
644 + }
645 + }
646 +
647 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
648 +index a468439969df..56c701b73c12 100644
649 +--- a/drivers/net/team/team.c
650 ++++ b/drivers/net/team/team.c
651 +@@ -2395,7 +2395,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
652 + if (!nlh) {
653 + err = __send_and_alloc_skb(&skb, team, portid, send_func);
654 + if (err)
655 +- goto errout;
656 ++ return err;
657 + goto send_done;
658 + }
659 +
660 +@@ -2681,7 +2681,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
661 + if (!nlh) {
662 + err = __send_and_alloc_skb(&skb, team, portid, send_func);
663 + if (err)
664 +- goto errout;
665 ++ return err;
666 + goto send_done;
667 + }
668 +
669 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
670 +index 61e9d0bca197..eeabbcf7a4e2 100644
671 +--- a/drivers/s390/net/qeth_core_main.c
672 ++++ b/drivers/s390/net/qeth_core_main.c
673 +@@ -526,8 +526,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
674 + queue == card->qdio.no_in_queues - 1;
675 + }
676 +
677 +-
678 +-static int qeth_issue_next_read(struct qeth_card *card)
679 ++static int __qeth_issue_next_read(struct qeth_card *card)
680 + {
681 + int rc;
682 + struct qeth_cmd_buffer *iob;
683 +@@ -558,6 +557,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
684 + return rc;
685 + }
686 +
687 ++static int qeth_issue_next_read(struct qeth_card *card)
688 ++{
689 ++ int ret;
690 ++
691 ++ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
692 ++ ret = __qeth_issue_next_read(card);
693 ++ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
694 ++
695 ++ return ret;
696 ++}
697 ++
698 + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
699 + {
700 + struct qeth_reply *reply;
701 +@@ -961,7 +971,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
702 + spin_lock_irqsave(&card->thread_mask_lock, flags);
703 + card->thread_running_mask &= ~thread;
704 + spin_unlock_irqrestore(&card->thread_mask_lock, flags);
705 +- wake_up(&card->wait_q);
706 ++ wake_up_all(&card->wait_q);
707 + }
708 + EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
709 +
710 +@@ -1165,6 +1175,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
711 + }
712 + rc = qeth_get_problem(cdev, irb);
713 + if (rc) {
714 ++ card->read_or_write_problem = 1;
715 + qeth_clear_ipacmd_list(card);
716 + qeth_schedule_recovery(card);
717 + goto out;
718 +@@ -1183,7 +1194,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
719 + return;
720 + if (channel == &card->read &&
721 + channel->state == CH_STATE_UP)
722 +- qeth_issue_next_read(card);
723 ++ __qeth_issue_next_read(card);
724 +
725 + iob = channel->iob;
726 + index = channel->buf_no;
727 +@@ -5022,8 +5033,6 @@ static void qeth_core_free_card(struct qeth_card *card)
728 + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
729 + qeth_clean_channel(&card->read);
730 + qeth_clean_channel(&card->write);
731 +- if (card->dev)
732 +- free_netdev(card->dev);
733 + qeth_free_qdio_buffers(card);
734 + unregister_service_level(&card->qeth_service_level);
735 + kfree(card);
736 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
737 +index 5863ea170ff2..42d56b3bed82 100644
738 +--- a/drivers/s390/net/qeth_l2_main.c
739 ++++ b/drivers/s390/net/qeth_l2_main.c
740 +@@ -933,8 +933,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
741 + qeth_l2_set_offline(cgdev);
742 +
743 + if (card->dev) {
744 +- netif_napi_del(&card->napi);
745 + unregister_netdev(card->dev);
746 ++ free_netdev(card->dev);
747 + card->dev = NULL;
748 + }
749 + return;
750 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
751 +index 33131c594627..5287eab5c600 100644
752 +--- a/drivers/s390/net/qeth_l3_main.c
753 ++++ b/drivers/s390/net/qeth_l3_main.c
754 +@@ -3042,8 +3042,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
755 + qeth_l3_set_offline(cgdev);
756 +
757 + if (card->dev) {
758 +- netif_napi_del(&card->napi);
759 + unregister_netdev(card->dev);
760 ++ free_netdev(card->dev);
761 + card->dev = NULL;
762 + }
763 +
764 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
765 +index e4f5bb056fd2..ba3cfa8e279b 100644
766 +--- a/drivers/soc/fsl/qbman/qman.c
767 ++++ b/drivers/soc/fsl/qbman/qman.c
768 +@@ -2443,39 +2443,21 @@ struct cgr_comp {
769 + struct completion completion;
770 + };
771 +
772 +-static int qman_delete_cgr_thread(void *p)
773 ++static void qman_delete_cgr_smp_call(void *p)
774 + {
775 +- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
776 +- int ret;
777 +-
778 +- ret = qman_delete_cgr(cgr_comp->cgr);
779 +- complete(&cgr_comp->completion);
780 +-
781 +- return ret;
782 ++ qman_delete_cgr((struct qman_cgr *)p);
783 + }
784 +
785 + void qman_delete_cgr_safe(struct qman_cgr *cgr)
786 + {
787 +- struct task_struct *thread;
788 +- struct cgr_comp cgr_comp;
789 +-
790 + preempt_disable();
791 + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
792 +- init_completion(&cgr_comp.completion);
793 +- cgr_comp.cgr = cgr;
794 +- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
795 +- "cgr_del");
796 +-
797 +- if (IS_ERR(thread))
798 +- goto out;
799 +-
800 +- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
801 +- wake_up_process(thread);
802 +- wait_for_completion(&cgr_comp.completion);
803 ++ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
804 ++ qman_delete_cgr_smp_call, cgr, true);
805 + preempt_enable();
806 + return;
807 + }
808 +-out:
809 ++
810 + qman_delete_cgr(cgr);
811 + preempt_enable();
812 + }
813 +diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
814 +index aecb15f84557..808f018fa976 100644
815 +--- a/fs/sysfs/symlink.c
816 ++++ b/fs/sysfs/symlink.c
817 +@@ -107,6 +107,7 @@ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target,
818 + {
819 + return sysfs_do_create_link(kobj, target, name, 0);
820 + }
821 ++EXPORT_SYMBOL_GPL(sysfs_create_link_nowarn);
822 +
823 + /**
824 + * sysfs_delete_link - remove symlink in object's directory.
825 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
826 +index 8b7fd8eeccee..cb8a9ce149de 100644
827 +--- a/include/linux/cgroup-defs.h
828 ++++ b/include/linux/cgroup-defs.h
829 +@@ -755,13 +755,13 @@ struct sock_cgroup_data {
830 + * updaters and return part of the previous pointer as the prioidx or
831 + * classid. Such races are short-lived and the result isn't critical.
832 + */
833 +-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
834 ++static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
835 + {
836 + /* fallback to 1 which is always the ID of the root cgroup */
837 + return (skcd->is_data & 1) ? skcd->prioidx : 1;
838 + }
839 +
840 +-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
841 ++static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
842 + {
843 + /* fallback to 0 which is the unconfigured default classid */
844 + return (skcd->is_data & 1) ? skcd->classid : 0;
845 +diff --git a/include/linux/phy.h b/include/linux/phy.h
846 +index 123cd703741d..ea0cbd6d9556 100644
847 +--- a/include/linux/phy.h
848 ++++ b/include/linux/phy.h
849 +@@ -897,7 +897,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
850 + int phy_drivers_register(struct phy_driver *new_driver, int n,
851 + struct module *owner);
852 + void phy_state_machine(struct work_struct *work);
853 +-void phy_change(struct phy_device *phydev);
854 + void phy_change_work(struct work_struct *work);
855 + void phy_mac_interrupt(struct phy_device *phydev, int new_link);
856 + void phy_start_machine(struct phy_device *phydev);
857 +diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
858 +index 361c08e35dbc..7fd514f36e74 100644
859 +--- a/include/linux/rhashtable.h
860 ++++ b/include/linux/rhashtable.h
861 +@@ -750,8 +750,10 @@ static inline void *__rhashtable_insert_fast(
862 + if (!key ||
863 + (params.obj_cmpfn ?
864 + params.obj_cmpfn(&arg, rht_obj(ht, head)) :
865 +- rhashtable_compare(&arg, rht_obj(ht, head))))
866 ++ rhashtable_compare(&arg, rht_obj(ht, head)))) {
867 ++ pprev = &head->next;
868 + continue;
869 ++ }
870 +
871 + data = rht_obj(ht, head);
872 +
873 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
874 +index d6ec5a5a6782..d794aebb3157 100644
875 +--- a/include/net/sch_generic.h
876 ++++ b/include/net/sch_generic.h
877 +@@ -735,6 +735,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
878 + *to_free = skb;
879 + }
880 +
881 ++static inline void __qdisc_drop_all(struct sk_buff *skb,
882 ++ struct sk_buff **to_free)
883 ++{
884 ++ if (skb->prev)
885 ++ skb->prev->next = *to_free;
886 ++ else
887 ++ skb->next = *to_free;
888 ++ *to_free = skb;
889 ++}
890 ++
891 + static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
892 + struct qdisc_skb_head *qh,
893 + struct sk_buff **to_free)
894 +@@ -855,6 +865,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
895 + return NET_XMIT_DROP;
896 + }
897 +
898 ++static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
899 ++ struct sk_buff **to_free)
900 ++{
901 ++ __qdisc_drop_all(skb, to_free);
902 ++ qdisc_qstats_drop(sch);
903 ++
904 ++ return NET_XMIT_DROP;
905 ++}
906 ++
907 + /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
908 + long it will take to send a packet given its size.
909 + */
910 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
911 +index ddd7dde87c3c..b734ce731a7a 100644
912 +--- a/lib/rhashtable.c
913 ++++ b/lib/rhashtable.c
914 +@@ -537,8 +537,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
915 + if (!key ||
916 + (ht->p.obj_cmpfn ?
917 + ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
918 +- rhashtable_compare(&arg, rht_obj(ht, head))))
919 ++ rhashtable_compare(&arg, rht_obj(ht, head)))) {
920 ++ pprev = &head->next;
921 + continue;
922 ++ }
923 +
924 + if (!ht->rhlist)
925 + return rht_obj(ht, head);
926 +diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
927 +index 8e83cbdc049c..6f2e3dc44a80 100644
928 +--- a/lib/test_rhashtable.c
929 ++++ b/lib/test_rhashtable.c
930 +@@ -79,6 +79,21 @@ struct thread_data {
931 + struct test_obj *objs;
932 + };
933 +
934 ++static u32 my_hashfn(const void *data, u32 len, u32 seed)
935 ++{
936 ++ const struct test_obj_rhl *obj = data;
937 ++
938 ++ return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
939 ++}
940 ++
941 ++static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
942 ++{
943 ++ const struct test_obj_rhl *test_obj = obj;
944 ++ const struct test_obj_val *val = arg->key;
945 ++
946 ++ return test_obj->value.id - val->id;
947 ++}
948 ++
949 + static struct rhashtable_params test_rht_params = {
950 + .head_offset = offsetof(struct test_obj, node),
951 + .key_offset = offsetof(struct test_obj, value),
952 +@@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = {
953 + .nulls_base = (3U << RHT_BASE_SHIFT),
954 + };
955 +
956 ++static struct rhashtable_params test_rht_params_dup = {
957 ++ .head_offset = offsetof(struct test_obj_rhl, list_node),
958 ++ .key_offset = offsetof(struct test_obj_rhl, value),
959 ++ .key_len = sizeof(struct test_obj_val),
960 ++ .hashfn = jhash,
961 ++ .obj_hashfn = my_hashfn,
962 ++ .obj_cmpfn = my_cmpfn,
963 ++ .nelem_hint = 128,
964 ++ .automatic_shrinking = false,
965 ++};
966 ++
967 + static struct semaphore prestart_sem;
968 + static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
969 +
970 +@@ -469,6 +495,112 @@ static int __init test_rhashtable_max(struct test_obj *array,
971 + return err;
972 + }
973 +
974 ++static unsigned int __init print_ht(struct rhltable *rhlt)
975 ++{
976 ++ struct rhashtable *ht;
977 ++ const struct bucket_table *tbl;
978 ++ char buff[512] = "";
979 ++ unsigned int i, cnt = 0;
980 ++
981 ++ ht = &rhlt->ht;
982 ++ tbl = rht_dereference(ht->tbl, ht);
983 ++ for (i = 0; i < tbl->size; i++) {
984 ++ struct rhash_head *pos, *next;
985 ++ struct test_obj_rhl *p;
986 ++
987 ++ pos = rht_dereference(tbl->buckets[i], ht);
988 ++ next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
989 ++
990 ++ if (!rht_is_a_nulls(pos)) {
991 ++ sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
992 ++ }
993 ++
994 ++ while (!rht_is_a_nulls(pos)) {
995 ++ struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
996 ++ sprintf(buff, "%s[[", buff);
997 ++ do {
998 ++ pos = &list->rhead;
999 ++ list = rht_dereference(list->next, ht);
1000 ++ p = rht_obj(ht, pos);
1001 ++
1002 ++ sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
1003 ++ list? ", " : " ");
1004 ++ cnt++;
1005 ++ } while (list);
1006 ++
1007 ++ pos = next,
1008 ++ next = !rht_is_a_nulls(pos) ?
1009 ++ rht_dereference(pos->next, ht) : NULL;
1010 ++
1011 ++ sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
1012 ++ }
1013 ++ }
1014 ++ printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
1015 ++
1016 ++ return cnt;
1017 ++}
1018 ++
1019 ++static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
1020 ++ int cnt, bool slow)
1021 ++{
1022 ++ struct rhltable rhlt;
1023 ++ unsigned int i, ret;
1024 ++ const char *key;
1025 ++ int err = 0;
1026 ++
1027 ++ err = rhltable_init(&rhlt, &test_rht_params_dup);
1028 ++ if (WARN_ON(err))
1029 ++ return err;
1030 ++
1031 ++ for (i = 0; i < cnt; i++) {
1032 ++ rhl_test_objects[i].value.tid = i;
1033 ++ key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
1034 ++ key += test_rht_params_dup.key_offset;
1035 ++
1036 ++ if (slow) {
1037 ++ err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
1038 ++ &rhl_test_objects[i].list_node.rhead));
1039 ++ if (err == -EAGAIN)
1040 ++ err = 0;
1041 ++ } else
1042 ++ err = rhltable_insert(&rhlt,
1043 ++ &rhl_test_objects[i].list_node,
1044 ++ test_rht_params_dup);
1045 ++ if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
1046 ++ goto skip_print;
1047 ++ }
1048 ++
1049 ++ ret = print_ht(&rhlt);
1050 ++ WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
1051 ++
1052 ++skip_print:
1053 ++ rhltable_destroy(&rhlt);
1054 ++
1055 ++ return 0;
1056 ++}
1057 ++
1058 ++static int __init test_insert_duplicates_run(void)
1059 ++{
1060 ++ struct test_obj_rhl rhl_test_objects[3] = {};
1061 ++
1062 ++ pr_info("test inserting duplicates\n");
1063 ++
1064 ++ /* two different values that map to same bucket */
1065 ++ rhl_test_objects[0].value.id = 1;
1066 ++ rhl_test_objects[1].value.id = 21;
1067 ++
1068 ++ /* and another duplicate with same as [0] value
1069 ++ * which will be second on the bucket list */
1070 ++ rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
1071 ++
1072 ++ test_insert_dup(rhl_test_objects, 2, false);
1073 ++ test_insert_dup(rhl_test_objects, 3, false);
1074 ++ test_insert_dup(rhl_test_objects, 2, true);
1075 ++ test_insert_dup(rhl_test_objects, 3, true);
1076 ++
1077 ++ return 0;
1078 ++}
1079 ++
1080 + static int thread_lookup_test(struct thread_data *tdata)
1081 + {
1082 + unsigned int entries = tdata->entries;
1083 +@@ -617,6 +749,8 @@ static int __init test_rht_init(void)
1084 + do_div(total_time, runs);
1085 + pr_info("Average test time: %llu\n", total_time);
1086 +
1087 ++ test_insert_duplicates_run();
1088 ++
1089 + if (!tcount)
1090 + return 0;
1091 +
1092 +diff --git a/net/core/dev.c b/net/core/dev.c
1093 +index a2a89acd0de8..f3fbd10a0632 100644
1094 +--- a/net/core/dev.c
1095 ++++ b/net/core/dev.c
1096 +@@ -3247,15 +3247,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1097 + #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1098 + static void skb_update_prio(struct sk_buff *skb)
1099 + {
1100 +- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
1101 ++ const struct netprio_map *map;
1102 ++ const struct sock *sk;
1103 ++ unsigned int prioidx;
1104 +
1105 +- if (!skb->priority && skb->sk && map) {
1106 +- unsigned int prioidx =
1107 +- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
1108 ++ if (skb->priority)
1109 ++ return;
1110 ++ map = rcu_dereference_bh(skb->dev->priomap);
1111 ++ if (!map)
1112 ++ return;
1113 ++ sk = skb_to_full_sk(skb);
1114 ++ if (!sk)
1115 ++ return;
1116 +
1117 +- if (prioidx < map->priomap_len)
1118 +- skb->priority = map->priomap[prioidx];
1119 +- }
1120 ++ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
1121 ++
1122 ++ if (prioidx < map->priomap_len)
1123 ++ skb->priority = map->priomap[prioidx];
1124 + }
1125 + #else
1126 + #define skb_update_prio(skb)
1127 +diff --git a/net/core/devlink.c b/net/core/devlink.c
1128 +index 7d430c1d9c3e..5ba973311025 100644
1129 +--- a/net/core/devlink.c
1130 ++++ b/net/core/devlink.c
1131 +@@ -1776,7 +1776,7 @@ static int devlink_dpipe_tables_fill(struct genl_info *info,
1132 + if (!nlh) {
1133 + err = devlink_dpipe_send_and_alloc_skb(&skb, info);
1134 + if (err)
1135 +- goto err_skb_send_alloc;
1136 ++ return err;
1137 + goto send_done;
1138 + }
1139 +
1140 +@@ -1785,7 +1785,6 @@ static int devlink_dpipe_tables_fill(struct genl_info *info,
1141 + nla_put_failure:
1142 + err = -EMSGSIZE;
1143 + err_table_put:
1144 +-err_skb_send_alloc:
1145 + genlmsg_cancel(skb, hdr);
1146 + nlmsg_free(skb);
1147 + return err;
1148 +@@ -2051,7 +2050,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
1149 + table->counters_enabled,
1150 + &dump_ctx);
1151 + if (err)
1152 +- goto err_entries_dump;
1153 ++ return err;
1154 +
1155 + send_done:
1156 + nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
1157 +@@ -2059,16 +2058,10 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
1158 + if (!nlh) {
1159 + err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
1160 + if (err)
1161 +- goto err_skb_send_alloc;
1162 ++ return err;
1163 + goto send_done;
1164 + }
1165 + return genlmsg_reply(dump_ctx.skb, info);
1166 +-
1167 +-err_entries_dump:
1168 +-err_skb_send_alloc:
1169 +- genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
1170 +- nlmsg_free(dump_ctx.skb);
1171 +- return err;
1172 + }
1173 +
1174 + static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
1175 +@@ -2207,7 +2200,7 @@ static int devlink_dpipe_headers_fill(struct genl_info *info,
1176 + if (!nlh) {
1177 + err = devlink_dpipe_send_and_alloc_skb(&skb, info);
1178 + if (err)
1179 +- goto err_skb_send_alloc;
1180 ++ return err;
1181 + goto send_done;
1182 + }
1183 + return genlmsg_reply(skb, info);
1184 +@@ -2215,7 +2208,6 @@ static int devlink_dpipe_headers_fill(struct genl_info *info,
1185 + nla_put_failure:
1186 + err = -EMSGSIZE;
1187 + err_table_put:
1188 +-err_skb_send_alloc:
1189 + genlmsg_cancel(skb, hdr);
1190 + nlmsg_free(skb);
1191 + return err;
1192 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1193 +index 08f574081315..3538ba8771e9 100644
1194 +--- a/net/core/skbuff.c
1195 ++++ b/net/core/skbuff.c
1196 +@@ -4173,7 +4173,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1197 +
1198 + skb_queue_tail(&sk->sk_error_queue, skb);
1199 + if (!sock_flag(sk, SOCK_DEAD))
1200 +- sk->sk_data_ready(sk);
1201 ++ sk->sk_error_report(sk);
1202 + return 0;
1203 + }
1204 + EXPORT_SYMBOL(sock_queue_err_skb);
1205 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
1206 +index 9d43c1f40274..ff3b058cf58c 100644
1207 +--- a/net/dccp/proto.c
1208 ++++ b/net/dccp/proto.c
1209 +@@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1210 + if (skb == NULL)
1211 + goto out_release;
1212 +
1213 ++ if (sk->sk_state == DCCP_CLOSED) {
1214 ++ rc = -ENOTCONN;
1215 ++ goto out_discard;
1216 ++ }
1217 ++
1218 + skb_reserve(skb, sk->sk_prot->max_header);
1219 + rc = memcpy_from_msg(skb_put(skb, len), msg, len);
1220 + if (rc != 0)
1221 +diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
1222 +index 84611d7fcfa2..3c9cee268b8a 100644
1223 +--- a/net/dsa/legacy.c
1224 ++++ b/net/dsa/legacy.c
1225 +@@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds,
1226 + ds->ports[i].dn = cd->port_dn[i];
1227 + ds->ports[i].cpu_dp = dst->cpu_dp;
1228 +
1229 +- if (dsa_is_user_port(ds, i))
1230 ++ if (!dsa_is_user_port(ds, i))
1231 + continue;
1232 +
1233 + ret = dsa_slave_create(&ds->ports[i]);
1234 +diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
1235 +index 974765b7d92a..e9f0489e4229 100644
1236 +--- a/net/ieee802154/6lowpan/core.c
1237 ++++ b/net/ieee802154/6lowpan/core.c
1238 +@@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void)
1239 + static int lowpan_device_event(struct notifier_block *unused,
1240 + unsigned long event, void *ptr)
1241 + {
1242 +- struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
1243 ++ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1244 ++ struct wpan_dev *wpan_dev;
1245 +
1246 +- if (wdev->type != ARPHRD_IEEE802154)
1247 ++ if (ndev->type != ARPHRD_IEEE802154)
1248 ++ return NOTIFY_DONE;
1249 ++ wpan_dev = ndev->ieee802154_ptr;
1250 ++ if (!wpan_dev)
1251 + return NOTIFY_DONE;
1252 +
1253 + switch (event) {
1254 +@@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused,
1255 + * also delete possible lowpan interfaces which belongs
1256 + * to the wpan interface.
1257 + */
1258 +- if (wdev->ieee802154_ptr->lowpan_dev)
1259 +- lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
1260 ++ if (wpan_dev->lowpan_dev)
1261 ++ lowpan_dellink(wpan_dev->lowpan_dev, NULL);
1262 + break;
1263 + default:
1264 + return NOTIFY_DONE;
1265 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1266 +index 26a3d0315728..e8ec28999f5c 100644
1267 +--- a/net/ipv4/inet_fragment.c
1268 ++++ b/net/ipv4/inet_fragment.c
1269 +@@ -119,6 +119,9 @@ static void inet_frag_secret_rebuild(struct inet_frags *f)
1270 +
1271 + static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
1272 + {
1273 ++ if (!hlist_unhashed(&q->list_evictor))
1274 ++ return false;
1275 ++
1276 + return q->net->low_thresh == 0 ||
1277 + frag_mem_limit(q->net) >= q->net->low_thresh;
1278 + }
1279 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1280 +index f56aab54e0c8..1e70ed5244ea 100644
1281 +--- a/net/ipv4/ip_sockglue.c
1282 ++++ b/net/ipv4/ip_sockglue.c
1283 +@@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
1284 + src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1285 + if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
1286 + return -EINVAL;
1287 +- ipc->oif = src_info->ipi6_ifindex;
1288 ++ if (src_info->ipi6_ifindex)
1289 ++ ipc->oif = src_info->ipi6_ifindex;
1290 + ipc->addr = src_info->ipi6_addr.s6_addr32[3];
1291 + continue;
1292 + }
1293 +@@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
1294 + if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
1295 + return -EINVAL;
1296 + info = (struct in_pktinfo *)CMSG_DATA(cmsg);
1297 +- ipc->oif = info->ipi_ifindex;
1298 ++ if (info->ipi_ifindex)
1299 ++ ipc->oif = info->ipi_ifindex;
1300 + ipc->addr = info->ipi_spec_dst.s_addr;
1301 + break;
1302 + }
1303 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1304 +index c821f5d68720..2eb91b97a062 100644
1305 +--- a/net/ipv4/tcp.c
1306 ++++ b/net/ipv4/tcp.c
1307 +@@ -3542,6 +3542,7 @@ int tcp_abort(struct sock *sk, int err)
1308 +
1309 + bh_unlock_sock(sk);
1310 + local_bh_enable();
1311 ++ tcp_write_queue_purge(sk);
1312 + release_sock(sk);
1313 + return 0;
1314 + }
1315 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1316 +index 388158c9d9f6..c721140a7d79 100644
1317 +--- a/net/ipv4/tcp_timer.c
1318 ++++ b/net/ipv4/tcp_timer.c
1319 +@@ -34,6 +34,7 @@ static void tcp_write_err(struct sock *sk)
1320 + sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
1321 + sk->sk_error_report(sk);
1322 +
1323 ++ tcp_write_queue_purge(sk);
1324 + tcp_done(sk);
1325 + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
1326 + }
1327 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1328 +index a1f918713006..287112da3c06 100644
1329 +--- a/net/ipv6/datagram.c
1330 ++++ b/net/ipv6/datagram.c
1331 +@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1332 + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1333 + struct inet_sock *inet = inet_sk(sk);
1334 + struct ipv6_pinfo *np = inet6_sk(sk);
1335 +- struct in6_addr *daddr;
1336 ++ struct in6_addr *daddr, old_daddr;
1337 ++ __be32 fl6_flowlabel = 0;
1338 ++ __be32 old_fl6_flowlabel;
1339 ++ __be16 old_dport;
1340 + int addr_type;
1341 + int err;
1342 +- __be32 fl6_flowlabel = 0;
1343 +
1344 + if (usin->sin6_family == AF_INET) {
1345 + if (__ipv6_only_sock(sk))
1346 +@@ -239,9 +241,13 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1347 + }
1348 + }
1349 +
1350 ++ /* save the current peer information before updating it */
1351 ++ old_daddr = sk->sk_v6_daddr;
1352 ++ old_fl6_flowlabel = np->flow_label;
1353 ++ old_dport = inet->inet_dport;
1354 ++
1355 + sk->sk_v6_daddr = *daddr;
1356 + np->flow_label = fl6_flowlabel;
1357 +-
1358 + inet->inet_dport = usin->sin6_port;
1359 +
1360 + /*
1361 +@@ -251,11 +257,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1362 +
1363 + err = ip6_datagram_dst_update(sk, true);
1364 + if (err) {
1365 +- /* Reset daddr and dport so that udp_v6_early_demux()
1366 +- * fails to find this socket
1367 ++ /* Restore the socket peer info, to keep it consistent with
1368 ++ * the old socket state
1369 + */
1370 +- memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
1371 +- inet->inet_dport = 0;
1372 ++ sk->sk_v6_daddr = old_daddr;
1373 ++ np->flow_label = old_fl6_flowlabel;
1374 ++ inet->inet_dport = old_dport;
1375 + goto out;
1376 + }
1377 +
1378 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1379 +index f61a5b613b52..ba5e04c6ae17 100644
1380 +--- a/net/ipv6/ndisc.c
1381 ++++ b/net/ipv6/ndisc.c
1382 +@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1383 + *(opt++) = (rd_len >> 3);
1384 + opt += 6;
1385 +
1386 +- memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
1387 ++ skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
1388 ++ rd_len - 8);
1389 + }
1390 +
1391 + void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1392 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1393 +index a560fb1d0230..08a2a65d3304 100644
1394 +--- a/net/ipv6/route.c
1395 ++++ b/net/ipv6/route.c
1396 +@@ -1510,7 +1510,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1397 + }
1398 + }
1399 +
1400 +-static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1401 ++static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1402 ++ struct rt6_info *rt, int mtu)
1403 ++{
1404 ++ /* If the new MTU is lower than the route PMTU, this new MTU will be the
1405 ++ * lowest MTU in the path: always allow updating the route PMTU to
1406 ++ * reflect PMTU decreases.
1407 ++ *
1408 ++ * If the new MTU is higher, and the route PMTU is equal to the local
1409 ++ * MTU, this means the old MTU is the lowest in the path, so allow
1410 ++ * updating it: if other nodes now have lower MTUs, PMTU discovery will
1411 ++ * handle this.
1412 ++ */
1413 ++
1414 ++ if (dst_mtu(&rt->dst) >= mtu)
1415 ++ return true;
1416 ++
1417 ++ if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1418 ++ return true;
1419 ++
1420 ++ return false;
1421 ++}
1422 ++
1423 ++static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1424 ++ struct rt6_info *rt, int mtu)
1425 + {
1426 + struct rt6_exception_bucket *bucket;
1427 + struct rt6_exception *rt6_ex;
1428 +@@ -1519,20 +1542,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1429 + bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1430 + lockdep_is_held(&rt6_exception_lock));
1431 +
1432 +- if (bucket) {
1433 +- for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1434 +- hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1435 +- struct rt6_info *entry = rt6_ex->rt6i;
1436 +- /* For RTF_CACHE with rt6i_pmtu == 0
1437 +- * (i.e. a redirected route),
1438 +- * the metrics of its rt->dst.from has already
1439 +- * been updated.
1440 +- */
1441 +- if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu)
1442 +- entry->rt6i_pmtu = mtu;
1443 +- }
1444 +- bucket++;
1445 ++ if (!bucket)
1446 ++ return;
1447 ++
1448 ++ for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1449 ++ hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1450 ++ struct rt6_info *entry = rt6_ex->rt6i;
1451 ++
1452 ++ /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1453 ++ * route), the metrics of its rt->dst.from have already
1454 ++ * been updated.
1455 ++ */
1456 ++ if (entry->rt6i_pmtu &&
1457 ++ rt6_mtu_change_route_allowed(idev, entry, mtu))
1458 ++ entry->rt6i_pmtu = mtu;
1459 + }
1460 ++ bucket++;
1461 + }
1462 + }
1463 +
1464 +@@ -3521,25 +3546,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1465 + Since RFC 1981 doesn't include administrative MTU increase
1466 + update PMTU increase is a MUST. (i.e. jumbo frame)
1467 + */
1468 +- /*
1469 +- If new MTU is less than route PMTU, this new MTU will be the
1470 +- lowest MTU in the path, update the route PMTU to reflect PMTU
1471 +- decreases; if new MTU is greater than route PMTU, and the
1472 +- old MTU is the lowest MTU in the path, update the route PMTU
1473 +- to reflect the increase. In this case if the other nodes' MTU
1474 +- also have the lowest MTU, TOO BIG MESSAGE will be lead to
1475 +- PMTU discovery.
1476 +- */
1477 + if (rt->dst.dev == arg->dev &&
1478 +- dst_metric_raw(&rt->dst, RTAX_MTU) &&
1479 + !dst_metric_locked(&rt->dst, RTAX_MTU)) {
1480 + spin_lock_bh(&rt6_exception_lock);
1481 +- if (dst_mtu(&rt->dst) >= arg->mtu ||
1482 +- (dst_mtu(&rt->dst) < arg->mtu &&
1483 +- dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
1484 ++ if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
1485 ++ rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
1486 + dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
1487 +- }
1488 +- rt6_exceptions_update_pmtu(rt, arg->mtu);
1489 ++ rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
1490 + spin_unlock_bh(&rt6_exception_lock);
1491 + }
1492 + return 0;
1493 +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
1494 +index bd6cc688bd19..7a78dcfda68a 100644
1495 +--- a/net/ipv6/seg6_iptunnel.c
1496 ++++ b/net/ipv6/seg6_iptunnel.c
1497 +@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
1498 + /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
1499 + int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1500 + {
1501 +- struct net *net = dev_net(skb_dst(skb)->dev);
1502 ++ struct dst_entry *dst = skb_dst(skb);
1503 ++ struct net *net = dev_net(dst->dev);
1504 + struct ipv6hdr *hdr, *inner_hdr;
1505 + struct ipv6_sr_hdr *isrh;
1506 + int hdrlen, tot_len, err;
1507 +@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1508 + isrh->nexthdr = proto;
1509 +
1510 + hdr->daddr = isrh->segments[isrh->first_segment];
1511 +- set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
1512 ++ set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
1513 +
1514 + #ifdef CONFIG_IPV6_SEG6_HMAC
1515 + if (sr_has_hmac(isrh)) {
1516 +@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
1517 +
1518 + slwt = seg6_lwt_lwtunnel(newts);
1519 +
1520 +- err = dst_cache_init(&slwt->cache, GFP_KERNEL);
1521 ++ err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
1522 + if (err) {
1523 + kfree(newts);
1524 + return err;
1525 +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
1526 +index 148533169b1d..ca98276c2709 100644
1527 +--- a/net/iucv/af_iucv.c
1528 ++++ b/net/iucv/af_iucv.c
1529 +@@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void)
1530 + af_iucv_dev->driver = &af_iucv_driver;
1531 + err = device_register(af_iucv_dev);
1532 + if (err)
1533 +- goto out_driver;
1534 ++ goto out_iucv_dev;
1535 + return 0;
1536 +
1537 ++out_iucv_dev:
1538 ++ put_device(af_iucv_dev);
1539 + out_driver:
1540 + driver_unregister(&af_iucv_driver);
1541 + out_iucv:
1542 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1543 +index 4a8d407f8902..3f15ffd356da 100644
1544 +--- a/net/kcm/kcmsock.c
1545 ++++ b/net/kcm/kcmsock.c
1546 +@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1547 + .parse_msg = kcm_parse_func_strparser,
1548 + .read_sock_done = kcm_read_sock_done,
1549 + };
1550 +- int err;
1551 ++ int err = 0;
1552 +
1553 + csk = csock->sk;
1554 + if (!csk)
1555 + return -EINVAL;
1556 +
1557 ++ lock_sock(csk);
1558 ++
1559 + /* Only allow TCP sockets to be attached for now */
1560 + if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1561 +- csk->sk_protocol != IPPROTO_TCP)
1562 +- return -EOPNOTSUPP;
1563 ++ csk->sk_protocol != IPPROTO_TCP) {
1564 ++ err = -EOPNOTSUPP;
1565 ++ goto out;
1566 ++ }
1567 +
1568 + /* Don't allow listeners or closed sockets */
1569 +- if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
1570 +- return -EOPNOTSUPP;
1571 ++ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1572 ++ err = -EOPNOTSUPP;
1573 ++ goto out;
1574 ++ }
1575 +
1576 + psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1577 +- if (!psock)
1578 +- return -ENOMEM;
1579 ++ if (!psock) {
1580 ++ err = -ENOMEM;
1581 ++ goto out;
1582 ++ }
1583 +
1584 + psock->mux = mux;
1585 + psock->sk = csk;
1586 +@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1587 + err = strp_init(&psock->strp, csk, &cb);
1588 + if (err) {
1589 + kmem_cache_free(kcm_psockp, psock);
1590 +- return err;
1591 ++ goto out;
1592 + }
1593 +
1594 + write_lock_bh(&csk->sk_callback_lock);
1595 +@@ -1419,7 +1427,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1596 + write_unlock_bh(&csk->sk_callback_lock);
1597 + strp_done(&psock->strp);
1598 + kmem_cache_free(kcm_psockp, psock);
1599 +- return -EALREADY;
1600 ++ err = -EALREADY;
1601 ++ goto out;
1602 + }
1603 +
1604 + psock->save_data_ready = csk->sk_data_ready;
1605 +@@ -1455,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1606 + /* Schedule RX work in case there are already bytes queued */
1607 + strp_check_rcv(&psock->strp);
1608 +
1609 +- return 0;
1610 ++out:
1611 ++ release_sock(csk);
1612 ++
1613 ++ return err;
1614 + }
1615 +
1616 + static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1617 +@@ -1507,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock)
1618 +
1619 + if (WARN_ON(psock->rx_kcm)) {
1620 + write_unlock_bh(&csk->sk_callback_lock);
1621 ++ release_sock(csk);
1622 + return;
1623 + }
1624 +
1625 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1626 +index 861b67c34191..e8b26afeb194 100644
1627 +--- a/net/l2tp/l2tp_core.c
1628 ++++ b/net/l2tp/l2tp_core.c
1629 +@@ -1466,9 +1466,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1630 + encap = cfg->encap;
1631 +
1632 + /* Quick sanity checks */
1633 ++ err = -EPROTONOSUPPORT;
1634 ++ if (sk->sk_type != SOCK_DGRAM) {
1635 ++ pr_debug("tunl %hu: fd %d wrong socket type\n",
1636 ++ tunnel_id, fd);
1637 ++ goto err;
1638 ++ }
1639 + switch (encap) {
1640 + case L2TP_ENCAPTYPE_UDP:
1641 +- err = -EPROTONOSUPPORT;
1642 + if (sk->sk_protocol != IPPROTO_UDP) {
1643 + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1644 + tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1645 +@@ -1476,7 +1481,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1646 + }
1647 + break;
1648 + case L2TP_ENCAPTYPE_IP:
1649 +- err = -EPROTONOSUPPORT;
1650 + if (sk->sk_protocol != IPPROTO_L2TP) {
1651 + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1652 + tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1653 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
1654 +index 6f02499ef007..b9ce82c9440f 100644
1655 +--- a/net/netlink/genetlink.c
1656 ++++ b/net/netlink/genetlink.c
1657 +@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1658 + if (!err)
1659 + delivered = true;
1660 + else if (err != -ESRCH)
1661 +- goto error;
1662 ++ return err;
1663 + return delivered ? 0 : -ESRCH;
1664 + error:
1665 + kfree_skb(skb);
1666 +diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
1667 +index 3fbfc78991ac..0d961f09d0c7 100644
1668 +--- a/net/openvswitch/meter.c
1669 ++++ b/net/openvswitch/meter.c
1670 +@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
1671 +
1672 + band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
1673 + band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
1674 ++ if (band->rate == 0) {
1675 ++ err = -EINVAL;
1676 ++ goto exit_free_meter;
1677 ++ }
1678 ++
1679 + band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
1680 + /* Figure out max delta_t that is enough to fill any bucket.
1681 + * Keep max_delta_t size to the bucket units:
1682 + * pkts => 1/1000 packets, kilobits => bits.
1683 ++ *
1684 ++ * Start with a full bucket.
1685 + */
1686 +- band_max_delta_t = (band->burst_size + band->rate) * 1000;
1687 +- /* Start with a full bucket. */
1688 +- band->bucket = band_max_delta_t;
1689 ++ band->bucket = (band->burst_size + band->rate) * 1000;
1690 ++ band_max_delta_t = band->bucket / band->rate;
1691 + if (band_max_delta_t > meter->max_delta_t)
1692 + meter->max_delta_t = band_max_delta_t;
1693 + band++;
1694 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
1695 +index 30c96274c638..22bf1a376b91 100644
1696 +--- a/net/sched/act_tunnel_key.c
1697 ++++ b/net/sched/act_tunnel_key.c
1698 +@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
1699 + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
1700 + break;
1701 + default:
1702 ++ ret = -EINVAL;
1703 + goto err_out;
1704 + }
1705 +
1706 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
1707 +index dd70924cbcdf..2aeca57f9bd0 100644
1708 +--- a/net/sched/sch_netem.c
1709 ++++ b/net/sched/sch_netem.c
1710 +@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1711 + }
1712 +
1713 + if (unlikely(sch->q.qlen >= sch->limit))
1714 +- return qdisc_drop(skb, sch, to_free);
1715 ++ return qdisc_drop_all(skb, sch, to_free);
1716 +
1717 + qdisc_qstats_backlog_inc(sch, skb);
1718 +