Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 31 Mar 2018 22:17:38
Message-Id: 1522534643.c32a77ad28a18bac5e964c1f7c1b54798f58be08.mpagano@gentoo
1 commit: c32a77ad28a18bac5e964c1f7c1b54798f58be08
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Mar 31 22:17:23 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Mar 31 22:17:23 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c32a77ad
7
8 Linux patch 4.9.92
9
10 0000_README | 4 +
11 1091_linux-4.9.92.patch | 895 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 899 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 9dbda35..7083c5f 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -407,6 +407,10 @@ Patch: 1090_linux-4.9.91.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.91
21
22 +Patch: 1091_linux-4.9.92.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.92
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1091_linux-4.9.92.patch b/1091_linux-4.9.92.patch
31 new file mode 100644
32 index 0000000..6861f22
33 --- /dev/null
34 +++ b/1091_linux-4.9.92.patch
35 @@ -0,0 +1,895 @@
36 +diff --git a/Makefile b/Makefile
37 +index db3d37e18723..3ab3b8203bf6 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 91
44 ++SUBLEVEL = 92
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
49 +index c6163874e4e7..c770ca37c9b2 100644
50 +--- a/drivers/net/ethernet/arc/emac_rockchip.c
51 ++++ b/drivers/net/ethernet/arc/emac_rockchip.c
52 +@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
53 + /* Optional regulator for PHY */
54 + priv->regulator = devm_regulator_get_optional(dev, "phy");
55 + if (IS_ERR(priv->regulator)) {
56 +- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
57 +- return -EPROBE_DEFER;
58 ++ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
59 ++ err = -EPROBE_DEFER;
60 ++ goto out_clk_disable;
61 ++ }
62 + dev_err(dev, "no regulator found\n");
63 + priv->regulator = NULL;
64 + }
65 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
66 +index 744ed6ddaf37..91fbba58d033 100644
67 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
68 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
69 +@@ -707,37 +707,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
70 + struct bcm_sysport_tx_ring *ring)
71 + {
72 + struct net_device *ndev = priv->netdev;
73 +- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
74 + unsigned int pkts_compl = 0, bytes_compl = 0;
75 ++ unsigned int txbds_processed = 0;
76 + struct bcm_sysport_cb *cb;
77 ++ unsigned int txbds_ready;
78 ++ unsigned int c_index;
79 + u32 hw_ind;
80 +
81 + /* Compute how many descriptors have been processed since last call */
82 + hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
83 + c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
84 +- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
85 +-
86 +- last_c_index = ring->c_index;
87 +- num_tx_cbs = ring->size;
88 +-
89 +- c_index &= (num_tx_cbs - 1);
90 +-
91 +- if (c_index >= last_c_index)
92 +- last_tx_cn = c_index - last_c_index;
93 +- else
94 +- last_tx_cn = num_tx_cbs - last_c_index + c_index;
95 ++ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
96 +
97 + netif_dbg(priv, tx_done, ndev,
98 +- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
99 +- ring->index, c_index, last_tx_cn, last_c_index);
100 ++ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
101 ++ ring->index, ring->c_index, c_index, txbds_ready);
102 +
103 +- while (last_tx_cn-- > 0) {
104 +- cb = ring->cbs + last_c_index;
105 ++ while (txbds_processed < txbds_ready) {
106 ++ cb = &ring->cbs[ring->clean_index];
107 + bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
108 +
109 + ring->desc_count++;
110 +- last_c_index++;
111 +- last_c_index &= (num_tx_cbs - 1);
112 ++ txbds_processed++;
113 ++
114 ++ if (likely(ring->clean_index < ring->size - 1))
115 ++ ring->clean_index++;
116 ++ else
117 ++ ring->clean_index = 0;
118 + }
119 +
120 + ring->c_index = c_index;
121 +@@ -1207,6 +1203,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
122 + netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
123 + ring->index = index;
124 + ring->size = size;
125 ++ ring->clean_index = 0;
126 + ring->alloc_size = ring->size;
127 + ring->desc_cpu = p;
128 + ring->desc_count = ring->size;
129 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
130 +index 1c82e3da69a7..07b0aaa98de0 100644
131 +--- a/drivers/net/ethernet/broadcom/bcmsysport.h
132 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.h
133 +@@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring {
134 + unsigned int desc_count; /* Number of descriptors */
135 + unsigned int curr_desc; /* Current descriptor */
136 + unsigned int c_index; /* Last consumer index */
137 +- unsigned int p_index; /* Current producer index */
138 ++ unsigned int clean_index; /* Current clean index */
139 + struct bcm_sysport_cb *cbs; /* Transmit control blocks */
140 + struct dma_desc *desc_cpu; /* CPU view of the descriptor */
141 + struct bcm_sysport_priv *priv; /* private context backpointer */
142 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
143 +index dd6e07c748f5..05e5b38e4891 100644
144 +--- a/drivers/net/ethernet/freescale/fec_main.c
145 ++++ b/drivers/net/ethernet/freescale/fec_main.c
146 +@@ -3533,6 +3533,8 @@ fec_drv_remove(struct platform_device *pdev)
147 + fec_enet_mii_remove(fep);
148 + if (fep->reg_phy)
149 + regulator_disable(fep->reg_phy);
150 ++ pm_runtime_put(&pdev->dev);
151 ++ pm_runtime_disable(&pdev->dev);
152 + if (of_phy_is_fixed_link(np))
153 + of_phy_deregister_fixed_link(np);
154 + of_node_put(fep->phy_node);
155 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
156 +index a79e0a1100aa..111e1aab7d83 100644
157 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
158 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
159 +@@ -299,9 +299,9 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
160 + mtu);
161 + }
162 +
163 +-int hns_nic_net_xmit_hw(struct net_device *ndev,
164 +- struct sk_buff *skb,
165 +- struct hns_nic_ring_data *ring_data)
166 ++netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
167 ++ struct sk_buff *skb,
168 ++ struct hns_nic_ring_data *ring_data)
169 + {
170 + struct hns_nic_priv *priv = netdev_priv(ndev);
171 + struct hnae_ring *ring = ring_data->ring;
172 +@@ -360,6 +360,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
173 + dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
174 + netdev_tx_sent_queue(dev_queue, skb->len);
175 +
176 ++ netif_trans_update(ndev);
177 ++ ndev->stats.tx_bytes += skb->len;
178 ++ ndev->stats.tx_packets++;
179 ++
180 + wmb(); /* commit all data before submit */
181 + assert(skb->queue_mapping < priv->ae_handle->q_num);
182 + hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
183 +@@ -1408,17 +1412,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
184 + struct net_device *ndev)
185 + {
186 + struct hns_nic_priv *priv = netdev_priv(ndev);
187 +- int ret;
188 +
189 + assert(skb->queue_mapping < ndev->ae_handle->q_num);
190 +- ret = hns_nic_net_xmit_hw(ndev, skb,
191 +- &tx_ring_data(priv, skb->queue_mapping));
192 +- if (ret == NETDEV_TX_OK) {
193 +- netif_trans_update(ndev);
194 +- ndev->stats.tx_bytes += skb->len;
195 +- ndev->stats.tx_packets++;
196 +- }
197 +- return (netdev_tx_t)ret;
198 ++
199 ++ return hns_nic_net_xmit_hw(ndev, skb,
200 ++ &tx_ring_data(priv, skb->queue_mapping));
201 + }
202 +
203 + static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
204 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
205 +index 5b412de350aa..7bc6a6ecd666 100644
206 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
207 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
208 +@@ -91,8 +91,8 @@ void hns_ethtool_set_ops(struct net_device *ndev);
209 + void hns_nic_net_reset(struct net_device *ndev);
210 + void hns_nic_net_reinit(struct net_device *netdev);
211 + int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
212 +-int hns_nic_net_xmit_hw(struct net_device *ndev,
213 +- struct sk_buff *skb,
214 +- struct hns_nic_ring_data *ring_data);
215 ++netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
216 ++ struct sk_buff *skb,
217 ++ struct hns_nic_ring_data *ring_data);
218 +
219 + #endif /**__HNS_ENET_H */
220 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
221 +index 3f1971d485f3..2bd1282735b0 100644
222 +--- a/drivers/net/ethernet/ti/cpsw.c
223 ++++ b/drivers/net/ethernet/ti/cpsw.c
224 +@@ -901,7 +901,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
225 + /* set speed_in input in case RMII mode is used in 100Mbps */
226 + if (phy->speed == 100)
227 + mac_control |= BIT(15);
228 +- else if (phy->speed == 10)
229 ++ /* in band mode only works in 10Mbps RGMII mode */
230 ++ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
231 + mac_control |= BIT(18); /* In Band mode */
232 +
233 + if (priv->rx_pause)
234 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
235 +index 114457921890..1e4969d90f1a 100644
236 +--- a/drivers/net/ppp/ppp_generic.c
237 ++++ b/drivers/net/ppp/ppp_generic.c
238 +@@ -255,7 +255,7 @@ struct ppp_net {
239 + /* Prototypes. */
240 + static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
241 + struct file *file, unsigned int cmd, unsigned long arg);
242 +-static void ppp_xmit_process(struct ppp *ppp);
243 ++static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
244 + static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
245 + static void ppp_push(struct ppp *ppp);
246 + static void ppp_channel_push(struct channel *pch);
247 +@@ -511,13 +511,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
248 + goto out;
249 + }
250 +
251 +- skb_queue_tail(&pf->xq, skb);
252 +-
253 + switch (pf->kind) {
254 + case INTERFACE:
255 +- ppp_xmit_process(PF_TO_PPP(pf));
256 ++ ppp_xmit_process(PF_TO_PPP(pf), skb);
257 + break;
258 + case CHANNEL:
259 ++ skb_queue_tail(&pf->xq, skb);
260 + ppp_channel_push(PF_TO_CHANNEL(pf));
261 + break;
262 + }
263 +@@ -1261,8 +1260,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
264 + put_unaligned_be16(proto, pp);
265 +
266 + skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
267 +- skb_queue_tail(&ppp->file.xq, skb);
268 +- ppp_xmit_process(ppp);
269 ++ ppp_xmit_process(ppp, skb);
270 ++
271 + return NETDEV_TX_OK;
272 +
273 + outf:
274 +@@ -1416,13 +1415,14 @@ static void ppp_setup(struct net_device *dev)
275 + */
276 +
277 + /* Called to do any work queued up on the transmit side that can now be done */
278 +-static void __ppp_xmit_process(struct ppp *ppp)
279 ++static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
280 + {
281 +- struct sk_buff *skb;
282 +-
283 + ppp_xmit_lock(ppp);
284 + if (!ppp->closing) {
285 + ppp_push(ppp);
286 ++
287 ++ if (skb)
288 ++ skb_queue_tail(&ppp->file.xq, skb);
289 + while (!ppp->xmit_pending &&
290 + (skb = skb_dequeue(&ppp->file.xq)))
291 + ppp_send_frame(ppp, skb);
292 +@@ -1436,7 +1436,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
293 + ppp_xmit_unlock(ppp);
294 + }
295 +
296 +-static void ppp_xmit_process(struct ppp *ppp)
297 ++static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
298 + {
299 + local_bh_disable();
300 +
301 +@@ -1444,7 +1444,7 @@ static void ppp_xmit_process(struct ppp *ppp)
302 + goto err;
303 +
304 + (*this_cpu_ptr(ppp->xmit_recursion))++;
305 +- __ppp_xmit_process(ppp);
306 ++ __ppp_xmit_process(ppp, skb);
307 + (*this_cpu_ptr(ppp->xmit_recursion))--;
308 +
309 + local_bh_enable();
310 +@@ -1454,6 +1454,8 @@ static void ppp_xmit_process(struct ppp *ppp)
311 + err:
312 + local_bh_enable();
313 +
314 ++ kfree_skb(skb);
315 ++
316 + if (net_ratelimit())
317 + netdev_err(ppp->dev, "recursion detected\n");
318 + }
319 +@@ -1938,7 +1940,7 @@ static void __ppp_channel_push(struct channel *pch)
320 + if (skb_queue_empty(&pch->file.xq)) {
321 + ppp = pch->ppp;
322 + if (ppp)
323 +- __ppp_xmit_process(ppp);
324 ++ __ppp_xmit_process(ppp, NULL);
325 + }
326 + }
327 +
328 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
329 +index 26681707fc7a..a0a9c9d39f01 100644
330 +--- a/drivers/net/team/team.c
331 ++++ b/drivers/net/team/team.c
332 +@@ -2403,7 +2403,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
333 + if (!nlh) {
334 + err = __send_and_alloc_skb(&skb, team, portid, send_func);
335 + if (err)
336 +- goto errout;
337 ++ return err;
338 + goto send_done;
339 + }
340 +
341 +@@ -2688,7 +2688,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
342 + if (!nlh) {
343 + err = __send_and_alloc_skb(&skb, team, portid, send_func);
344 + if (err)
345 +- goto errout;
346 ++ return err;
347 + goto send_done;
348 + }
349 +
350 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
351 +index cc28dda322b5..283416aefa56 100644
352 +--- a/drivers/s390/net/qeth_core_main.c
353 ++++ b/drivers/s390/net/qeth_core_main.c
354 +@@ -522,8 +522,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
355 + queue == card->qdio.no_in_queues - 1;
356 + }
357 +
358 +-
359 +-static int qeth_issue_next_read(struct qeth_card *card)
360 ++static int __qeth_issue_next_read(struct qeth_card *card)
361 + {
362 + int rc;
363 + struct qeth_cmd_buffer *iob;
364 +@@ -554,6 +553,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
365 + return rc;
366 + }
367 +
368 ++static int qeth_issue_next_read(struct qeth_card *card)
369 ++{
370 ++ int ret;
371 ++
372 ++ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
373 ++ ret = __qeth_issue_next_read(card);
374 ++ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
375 ++
376 ++ return ret;
377 ++}
378 ++
379 + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
380 + {
381 + struct qeth_reply *reply;
382 +@@ -957,7 +967,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
383 + spin_lock_irqsave(&card->thread_mask_lock, flags);
384 + card->thread_running_mask &= ~thread;
385 + spin_unlock_irqrestore(&card->thread_mask_lock, flags);
386 +- wake_up(&card->wait_q);
387 ++ wake_up_all(&card->wait_q);
388 + }
389 + EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
390 +
391 +@@ -1161,6 +1171,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
392 + }
393 + rc = qeth_get_problem(cdev, irb);
394 + if (rc) {
395 ++ card->read_or_write_problem = 1;
396 + qeth_clear_ipacmd_list(card);
397 + qeth_schedule_recovery(card);
398 + goto out;
399 +@@ -1179,7 +1190,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
400 + return;
401 + if (channel == &card->read &&
402 + channel->state == CH_STATE_UP)
403 +- qeth_issue_next_read(card);
404 ++ __qeth_issue_next_read(card);
405 +
406 + iob = channel->iob;
407 + index = channel->buf_no;
408 +@@ -4989,8 +5000,6 @@ static void qeth_core_free_card(struct qeth_card *card)
409 + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
410 + qeth_clean_channel(&card->read);
411 + qeth_clean_channel(&card->write);
412 +- if (card->dev)
413 +- free_netdev(card->dev);
414 + qeth_free_qdio_buffers(card);
415 + unregister_service_level(&card->qeth_service_level);
416 + kfree(card);
417 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
418 +index 5082dfeacb95..e94e9579914e 100644
419 +--- a/drivers/s390/net/qeth_l2_main.c
420 ++++ b/drivers/s390/net/qeth_l2_main.c
421 +@@ -1057,8 +1057,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
422 + qeth_l2_set_offline(cgdev);
423 +
424 + if (card->dev) {
425 +- netif_napi_del(&card->napi);
426 + unregister_netdev(card->dev);
427 ++ free_netdev(card->dev);
428 + card->dev = NULL;
429 + }
430 + return;
431 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
432 +index a668e6b71a29..4ca161bdc696 100644
433 +--- a/drivers/s390/net/qeth_l3_main.c
434 ++++ b/drivers/s390/net/qeth_l3_main.c
435 +@@ -3192,8 +3192,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
436 + qeth_l3_set_offline(cgdev);
437 +
438 + if (card->dev) {
439 +- netif_napi_del(&card->napi);
440 + unregister_netdev(card->dev);
441 ++ free_netdev(card->dev);
442 + card->dev = NULL;
443 + }
444 +
445 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
446 +index 7592ac8514d2..f61b37109e5c 100644
447 +--- a/drivers/scsi/sg.c
448 ++++ b/drivers/scsi/sg.c
449 +@@ -2064,11 +2064,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
450 + if ((1 == resp->done) && (!resp->sg_io_owned) &&
451 + ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
452 + resp->done = 2; /* guard against other readers */
453 +- break;
454 ++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
455 ++ return resp;
456 + }
457 + }
458 + write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
459 +- return resp;
460 ++ return NULL;
461 + }
462 +
463 + /* always adds to end of list */
464 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
465 +index 119054bc922b..2caacd9d2526 100644
466 +--- a/drivers/soc/fsl/qbman/qman.c
467 ++++ b/drivers/soc/fsl/qbman/qman.c
468 +@@ -2429,39 +2429,21 @@ struct cgr_comp {
469 + struct completion completion;
470 + };
471 +
472 +-static int qman_delete_cgr_thread(void *p)
473 ++static void qman_delete_cgr_smp_call(void *p)
474 + {
475 +- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
476 +- int ret;
477 +-
478 +- ret = qman_delete_cgr(cgr_comp->cgr);
479 +- complete(&cgr_comp->completion);
480 +-
481 +- return ret;
482 ++ qman_delete_cgr((struct qman_cgr *)p);
483 + }
484 +
485 + void qman_delete_cgr_safe(struct qman_cgr *cgr)
486 + {
487 +- struct task_struct *thread;
488 +- struct cgr_comp cgr_comp;
489 +-
490 + preempt_disable();
491 + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
492 +- init_completion(&cgr_comp.completion);
493 +- cgr_comp.cgr = cgr;
494 +- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
495 +- "cgr_del");
496 +-
497 +- if (IS_ERR(thread))
498 +- goto out;
499 +-
500 +- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
501 +- wake_up_process(thread);
502 +- wait_for_completion(&cgr_comp.completion);
503 ++ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
504 ++ qman_delete_cgr_smp_call, cgr, true);
505 + preempt_enable();
506 + return;
507 + }
508 +-out:
509 ++
510 + qman_delete_cgr(cgr);
511 + preempt_enable();
512 + }
513 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
514 +index 6fb1c34cf805..1619a3213af5 100644
515 +--- a/include/linux/cgroup-defs.h
516 ++++ b/include/linux/cgroup-defs.h
517 +@@ -609,13 +609,13 @@ struct sock_cgroup_data {
518 + * updaters and return part of the previous pointer as the prioidx or
519 + * classid. Such races are short-lived and the result isn't critical.
520 + */
521 +-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
522 ++static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
523 + {
524 + /* fallback to 1 which is always the ID of the root cgroup */
525 + return (skcd->is_data & 1) ? skcd->prioidx : 1;
526 + }
527 +
528 +-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
529 ++static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
530 + {
531 + /* fallback to 0 which is the unconfigured default classid */
532 + return (skcd->is_data & 1) ? skcd->classid : 0;
533 +diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
534 +index 5c132d3188be..85d1ffc90285 100644
535 +--- a/include/linux/rhashtable.h
536 ++++ b/include/linux/rhashtable.h
537 +@@ -706,8 +706,10 @@ static inline void *__rhashtable_insert_fast(
538 + if (!key ||
539 + (params.obj_cmpfn ?
540 + params.obj_cmpfn(&arg, rht_obj(ht, head)) :
541 +- rhashtable_compare(&arg, rht_obj(ht, head))))
542 ++ rhashtable_compare(&arg, rht_obj(ht, head)))) {
543 ++ pprev = &head->next;
544 + continue;
545 ++ }
546 +
547 + data = rht_obj(ht, head);
548 +
549 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
550 +index f18fc1a0321f..538f3c4458b0 100644
551 +--- a/include/net/sch_generic.h
552 ++++ b/include/net/sch_generic.h
553 +@@ -675,6 +675,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
554 + *to_free = skb;
555 + }
556 +
557 ++static inline void __qdisc_drop_all(struct sk_buff *skb,
558 ++ struct sk_buff **to_free)
559 ++{
560 ++ if (skb->prev)
561 ++ skb->prev->next = *to_free;
562 ++ else
563 ++ skb->next = *to_free;
564 ++ *to_free = skb;
565 ++}
566 ++
567 + static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
568 + struct qdisc_skb_head *qh,
569 + struct sk_buff **to_free)
570 +@@ -795,6 +805,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
571 + return NET_XMIT_DROP;
572 + }
573 +
574 ++static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
575 ++ struct sk_buff **to_free)
576 ++{
577 ++ __qdisc_drop_all(skb, to_free);
578 ++ qdisc_qstats_drop(sch);
579 ++
580 ++ return NET_XMIT_DROP;
581 ++}
582 ++
583 + /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
584 + long it will take to send a packet given its size.
585 + */
586 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
587 +index ed18aa4dceab..ea41820ab12e 100644
588 +--- a/kernel/irq/manage.c
589 ++++ b/kernel/irq/manage.c
590 +@@ -1210,10 +1210,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
591 + * set the trigger type must match. Also all must
592 + * agree on ONESHOT.
593 + */
594 +- unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
595 +-
596 + if (!((old->flags & new->flags) & IRQF_SHARED) ||
597 +- (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
598 ++ ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
599 + ((old->flags ^ new->flags) & IRQF_ONESHOT))
600 + goto mismatch;
601 +
602 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
603 +index 32d0ad058380..895961c53385 100644
604 +--- a/lib/rhashtable.c
605 ++++ b/lib/rhashtable.c
606 +@@ -448,8 +448,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
607 + if (!key ||
608 + (ht->p.obj_cmpfn ?
609 + ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
610 +- rhashtable_compare(&arg, rht_obj(ht, head))))
611 ++ rhashtable_compare(&arg, rht_obj(ht, head)))) {
612 ++ pprev = &head->next;
613 + continue;
614 ++ }
615 +
616 + if (!ht->rhlist)
617 + return rht_obj(ht, head);
618 +diff --git a/net/core/dev.c b/net/core/dev.c
619 +index 272f84ad16e0..07d2c93c9636 100644
620 +--- a/net/core/dev.c
621 ++++ b/net/core/dev.c
622 +@@ -3179,15 +3179,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
623 + #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
624 + static void skb_update_prio(struct sk_buff *skb)
625 + {
626 +- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
627 ++ const struct netprio_map *map;
628 ++ const struct sock *sk;
629 ++ unsigned int prioidx;
630 +
631 +- if (!skb->priority && skb->sk && map) {
632 +- unsigned int prioidx =
633 +- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
634 ++ if (skb->priority)
635 ++ return;
636 ++ map = rcu_dereference_bh(skb->dev->priomap);
637 ++ if (!map)
638 ++ return;
639 ++ sk = skb_to_full_sk(skb);
640 ++ if (!sk)
641 ++ return;
642 +
643 +- if (prioidx < map->priomap_len)
644 +- skb->priority = map->priomap[prioidx];
645 +- }
646 ++ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
647 ++
648 ++ if (prioidx < map->priomap_len)
649 ++ skb->priority = map->priomap[prioidx];
650 + }
651 + #else
652 + #define skb_update_prio(skb)
653 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
654 +index a64515583bc1..c5ac9f48f058 100644
655 +--- a/net/core/skbuff.c
656 ++++ b/net/core/skbuff.c
657 +@@ -3717,7 +3717,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
658 +
659 + skb_queue_tail(&sk->sk_error_queue, skb);
660 + if (!sock_flag(sk, SOCK_DEAD))
661 +- sk->sk_data_ready(sk);
662 ++ sk->sk_error_report(sk);
663 + return 0;
664 + }
665 + EXPORT_SYMBOL(sock_queue_err_skb);
666 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
667 +index 9d43c1f40274..ff3b058cf58c 100644
668 +--- a/net/dccp/proto.c
669 ++++ b/net/dccp/proto.c
670 +@@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
671 + if (skb == NULL)
672 + goto out_release;
673 +
674 ++ if (sk->sk_state == DCCP_CLOSED) {
675 ++ rc = -ENOTCONN;
676 ++ goto out_discard;
677 ++ }
678 ++
679 + skb_reserve(skb, sk->sk_prot->max_header);
680 + rc = memcpy_from_msg(skb_put(skb, len), msg, len);
681 + if (rc != 0)
682 +diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
683 +index d7efbf0dad20..83af5339e582 100644
684 +--- a/net/ieee802154/6lowpan/core.c
685 ++++ b/net/ieee802154/6lowpan/core.c
686 +@@ -204,9 +204,13 @@ static inline void lowpan_netlink_fini(void)
687 + static int lowpan_device_event(struct notifier_block *unused,
688 + unsigned long event, void *ptr)
689 + {
690 +- struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
691 ++ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
692 ++ struct wpan_dev *wpan_dev;
693 +
694 +- if (wdev->type != ARPHRD_IEEE802154)
695 ++ if (ndev->type != ARPHRD_IEEE802154)
696 ++ return NOTIFY_DONE;
697 ++ wpan_dev = ndev->ieee802154_ptr;
698 ++ if (!wpan_dev)
699 + return NOTIFY_DONE;
700 +
701 + switch (event) {
702 +@@ -215,8 +219,8 @@ static int lowpan_device_event(struct notifier_block *unused,
703 + * also delete possible lowpan interfaces which belongs
704 + * to the wpan interface.
705 + */
706 +- if (wdev->ieee802154_ptr->lowpan_dev)
707 +- lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
708 ++ if (wpan_dev->lowpan_dev)
709 ++ lowpan_dellink(wpan_dev->lowpan_dev, NULL);
710 + break;
711 + default:
712 + return NOTIFY_DONE;
713 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
714 +index 631c0d0d7cf8..8effac0f2219 100644
715 +--- a/net/ipv4/inet_fragment.c
716 ++++ b/net/ipv4/inet_fragment.c
717 +@@ -119,6 +119,9 @@ static void inet_frag_secret_rebuild(struct inet_frags *f)
718 +
719 + static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
720 + {
721 ++ if (!hlist_unhashed(&q->list_evictor))
722 ++ return false;
723 ++
724 + return q->net->low_thresh == 0 ||
725 + frag_mem_limit(q->net) >= q->net->low_thresh;
726 + }
727 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
728 +index fd1e6b8562e0..5ddd64995e73 100644
729 +--- a/net/ipv4/ip_sockglue.c
730 ++++ b/net/ipv4/ip_sockglue.c
731 +@@ -242,7 +242,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
732 + src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
733 + if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
734 + return -EINVAL;
735 +- ipc->oif = src_info->ipi6_ifindex;
736 ++ if (src_info->ipi6_ifindex)
737 ++ ipc->oif = src_info->ipi6_ifindex;
738 + ipc->addr = src_info->ipi6_addr.s6_addr32[3];
739 + continue;
740 + }
741 +@@ -272,7 +273,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
742 + if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
743 + return -EINVAL;
744 + info = (struct in_pktinfo *)CMSG_DATA(cmsg);
745 +- ipc->oif = info->ipi_ifindex;
746 ++ if (info->ipi_ifindex)
747 ++ ipc->oif = info->ipi_ifindex;
748 + ipc->addr = info->ipi_spec_dst.s_addr;
749 + break;
750 + }
751 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
752 +index 41c22cb33424..3fe80e104b58 100644
753 +--- a/net/ipv6/ndisc.c
754 ++++ b/net/ipv6/ndisc.c
755 +@@ -1516,7 +1516,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
756 + *(opt++) = (rd_len >> 3);
757 + opt += 6;
758 +
759 +- memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
760 ++ skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
761 ++ rd_len - 8);
762 + }
763 +
764 + void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
765 +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
766 +index 91cbbf1c3f82..c2dfc32eb9f2 100644
767 +--- a/net/iucv/af_iucv.c
768 ++++ b/net/iucv/af_iucv.c
769 +@@ -2418,9 +2418,11 @@ static int afiucv_iucv_init(void)
770 + af_iucv_dev->driver = &af_iucv_driver;
771 + err = device_register(af_iucv_dev);
772 + if (err)
773 +- goto out_driver;
774 ++ goto out_iucv_dev;
775 + return 0;
776 +
777 ++out_iucv_dev:
778 ++ put_device(af_iucv_dev);
779 + out_driver:
780 + driver_unregister(&af_iucv_driver);
781 + out_iucv:
782 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
783 +index 179cd9b1b1f4..63e6d08388ab 100644
784 +--- a/net/kcm/kcmsock.c
785 ++++ b/net/kcm/kcmsock.c
786 +@@ -1375,24 +1375,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
787 + struct list_head *head;
788 + int index = 0;
789 + struct strp_callbacks cb;
790 +- int err;
791 ++ int err = 0;
792 +
793 + csk = csock->sk;
794 + if (!csk)
795 + return -EINVAL;
796 +
797 ++ lock_sock(csk);
798 ++
799 + /* Only allow TCP sockets to be attached for now */
800 + if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
801 +- csk->sk_protocol != IPPROTO_TCP)
802 +- return -EOPNOTSUPP;
803 ++ csk->sk_protocol != IPPROTO_TCP) {
804 ++ err = -EOPNOTSUPP;
805 ++ goto out;
806 ++ }
807 +
808 + /* Don't allow listeners or closed sockets */
809 +- if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
810 +- return -EOPNOTSUPP;
811 ++ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
812 ++ err = -EOPNOTSUPP;
813 ++ goto out;
814 ++ }
815 +
816 + psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
817 +- if (!psock)
818 +- return -ENOMEM;
819 ++ if (!psock) {
820 ++ err = -ENOMEM;
821 ++ goto out;
822 ++ }
823 +
824 + psock->mux = mux;
825 + psock->sk = csk;
826 +@@ -1406,7 +1414,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
827 + err = strp_init(&psock->strp, csk, &cb);
828 + if (err) {
829 + kmem_cache_free(kcm_psockp, psock);
830 +- return err;
831 ++ goto out;
832 + }
833 +
834 + write_lock_bh(&csk->sk_callback_lock);
835 +@@ -1418,7 +1426,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
836 + write_unlock_bh(&csk->sk_callback_lock);
837 + strp_done(&psock->strp);
838 + kmem_cache_free(kcm_psockp, psock);
839 +- return -EALREADY;
840 ++ err = -EALREADY;
841 ++ goto out;
842 + }
843 +
844 + psock->save_data_ready = csk->sk_data_ready;
845 +@@ -1454,7 +1463,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
846 + /* Schedule RX work in case there are already bytes queued */
847 + strp_check_rcv(&psock->strp);
848 +
849 +- return 0;
850 ++out:
851 ++ release_sock(csk);
852 ++
853 ++ return err;
854 + }
855 +
856 + static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
857 +@@ -1506,6 +1518,7 @@ static void kcm_unattach(struct kcm_psock *psock)
858 +
859 + if (WARN_ON(psock->rx_kcm)) {
860 + write_unlock_bh(&csk->sk_callback_lock);
861 ++ release_sock(csk);
862 + return;
863 + }
864 +
865 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
866 +index cfc4dd8997e5..ead98e8e0b1f 100644
867 +--- a/net/l2tp/l2tp_core.c
868 ++++ b/net/l2tp/l2tp_core.c
869 +@@ -1612,9 +1612,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
870 + encap = cfg->encap;
871 +
872 + /* Quick sanity checks */
873 ++ err = -EPROTONOSUPPORT;
874 ++ if (sk->sk_type != SOCK_DGRAM) {
875 ++ pr_debug("tunl %hu: fd %d wrong socket type\n",
876 ++ tunnel_id, fd);
877 ++ goto err;
878 ++ }
879 + switch (encap) {
880 + case L2TP_ENCAPTYPE_UDP:
881 +- err = -EPROTONOSUPPORT;
882 + if (sk->sk_protocol != IPPROTO_UDP) {
883 + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
884 + tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
885 +@@ -1622,7 +1627,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
886 + }
887 + break;
888 + case L2TP_ENCAPTYPE_IP:
889 +- err = -EPROTONOSUPPORT;
890 + if (sk->sk_protocol != IPPROTO_L2TP) {
891 + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
892 + tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
893 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
894 +index 11702016c900..9192a6143523 100644
895 +--- a/net/netlink/genetlink.c
896 ++++ b/net/netlink/genetlink.c
897 +@@ -1128,7 +1128,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
898 + if (!err)
899 + delivered = true;
900 + else if (err != -ESRCH)
901 +- goto error;
902 ++ return err;
903 + return delivered ? 0 : -ESRCH;
904 + error:
905 + kfree_skb(skb);
906 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
907 +index af47bdf2f483..b6e3abe505ac 100644
908 +--- a/net/sched/act_tunnel_key.c
909 ++++ b/net/sched/act_tunnel_key.c
910 +@@ -141,6 +141,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
911 + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
912 + break;
913 + default:
914 ++ ret = -EINVAL;
915 + goto err_out;
916 + }
917 +
918 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
919 +index c73d58872cf8..e899d9eb76cb 100644
920 +--- a/net/sched/sch_netem.c
921 ++++ b/net/sched/sch_netem.c
922 +@@ -513,7 +513,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
923 + }
924 +
925 + if (unlikely(sch->q.qlen >= sch->limit))
926 +- return qdisc_drop(skb, sch, to_free);
927 ++ return qdisc_drop_all(skb, sch, to_free);
928 +
929 + qdisc_qstats_backlog_inc(sch, skb);
930 +