Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.8 commit in: /
Date: Sat, 26 Sep 2020 21:50:28
Message-Id: 1601157007.63e318c66258ba6be277fb558bc364ef2f2c126f.mpagano@gentoo
1 commit: 63e318c66258ba6be277fb558bc364ef2f2c126f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 26 21:50:07 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 26 21:50:07 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=63e318c6
7
8 Linux patch 5.8.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-5.8.12.patch | 2440 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2444 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d438f0f..51cee27 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-5.8.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.8.11
23
24 +Patch: 1011_linux-5.8.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.8.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-5.8.12.patch b/1011_linux-5.8.12.patch
33 new file mode 100644
34 index 0000000..ac579a3
35 --- /dev/null
36 +++ b/1011_linux-5.8.12.patch
37 @@ -0,0 +1,2440 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0b025b3a56401..d0d40c628dc34 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 8
46 +-SUBLEVEL = 11
47 ++SUBLEVEL = 12
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
52 +index 7c17b0f705ec3..87db588bcdd6b 100644
53 +--- a/drivers/net/dsa/microchip/ksz8795.c
54 ++++ b/drivers/net/dsa/microchip/ksz8795.c
55 +@@ -1269,7 +1269,7 @@ static int ksz8795_switch_init(struct ksz_device *dev)
56 + }
57 +
58 + /* set the real number of ports */
59 +- dev->ds->num_ports = dev->port_cnt;
60 ++ dev->ds->num_ports = dev->port_cnt + 1;
61 +
62 + return 0;
63 + }
64 +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
65 +index 1368816abaed1..99cdb2f18fa2f 100644
66 +--- a/drivers/net/dsa/rtl8366.c
67 ++++ b/drivers/net/dsa/rtl8366.c
68 +@@ -452,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
69 + return ret;
70 +
71 + if (vid == vlanmc.vid) {
72 +- /* clear VLAN member configurations */
73 +- vlanmc.vid = 0;
74 +- vlanmc.priority = 0;
75 +- vlanmc.member = 0;
76 +- vlanmc.untag = 0;
77 +- vlanmc.fid = 0;
78 +-
79 ++ /* Remove this port from the VLAN */
80 ++ vlanmc.member &= ~BIT(port);
81 ++ vlanmc.untag &= ~BIT(port);
82 ++ /*
83 ++ * If no ports are members of this VLAN
84 ++ * anymore then clear the whole member
85 ++ * config so it can be reused.
86 ++ */
87 ++ if (!vlanmc.member && vlanmc.untag) {
88 ++ vlanmc.vid = 0;
89 ++ vlanmc.priority = 0;
90 ++ vlanmc.fid = 0;
91 ++ }
92 + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
93 + if (ret) {
94 + dev_err(smi->dev,
95 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
96 +index cd5c7a1412c6d..dd07db656a5c3 100644
97 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
98 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
99 +@@ -4198,7 +4198,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
100 + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
101 + u16 dst = BNXT_HWRM_CHNL_CHIMP;
102 +
103 +- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
104 ++ if (BNXT_NO_FW_ACCESS(bp))
105 + return -EBUSY;
106 +
107 + if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
108 +@@ -5530,7 +5530,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
109 + struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
110 + u16 error_code;
111 +
112 +- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
113 ++ if (BNXT_NO_FW_ACCESS(bp))
114 + return 0;
115 +
116 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
117 +@@ -7502,7 +7502,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
118 +
119 + if (set_tpa)
120 + tpa_flags = bp->flags & BNXT_FLAG_TPA;
121 +- else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
122 ++ else if (BNXT_NO_FW_ACCESS(bp))
123 + return 0;
124 + for (i = 0; i < bp->nr_vnics; i++) {
125 + rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
126 +@@ -8993,18 +8993,16 @@ static ssize_t bnxt_show_temp(struct device *dev,
127 + struct hwrm_temp_monitor_query_output *resp;
128 + struct bnxt *bp = dev_get_drvdata(dev);
129 + u32 len = 0;
130 ++ int rc;
131 +
132 + resp = bp->hwrm_cmd_resp_addr;
133 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
134 + mutex_lock(&bp->hwrm_cmd_lock);
135 +- if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
136 ++ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
137 ++ if (!rc)
138 + len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
139 + mutex_unlock(&bp->hwrm_cmd_lock);
140 +-
141 +- if (len)
142 +- return len;
143 +-
144 +- return sprintf(buf, "unknown\n");
145 ++ return rc ?: len;
146 + }
147 + static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
148 +
149 +@@ -9024,7 +9022,16 @@ static void bnxt_hwmon_close(struct bnxt *bp)
150 +
151 + static void bnxt_hwmon_open(struct bnxt *bp)
152 + {
153 ++ struct hwrm_temp_monitor_query_input req = {0};
154 + struct pci_dev *pdev = bp->pdev;
155 ++ int rc;
156 ++
157 ++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
158 ++ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
159 ++ if (rc == -EACCES || rc == -EOPNOTSUPP) {
160 ++ bnxt_hwmon_close(bp);
161 ++ return;
162 ++ }
163 +
164 + if (bp->hwmon_dev)
165 + return;
166 +@@ -11498,6 +11505,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
167 + if (BNXT_PF(bp))
168 + bnxt_sriov_disable(bp);
169 +
170 ++ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
171 ++ bnxt_cancel_sp_work(bp);
172 ++ bp->sp_event = 0;
173 ++
174 + bnxt_dl_fw_reporters_destroy(bp, true);
175 + if (BNXT_PF(bp))
176 + devlink_port_type_clear(&bp->dl_port);
177 +@@ -11505,9 +11516,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
178 + unregister_netdev(dev);
179 + bnxt_dl_unregister(bp);
180 + bnxt_shutdown_tc(bp);
181 +- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
182 +- bnxt_cancel_sp_work(bp);
183 +- bp->sp_event = 0;
184 +
185 + bnxt_clear_int_mode(bp);
186 + bnxt_hwrm_func_drv_unrgtr(bp);
187 +@@ -11806,7 +11814,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
188 + static void bnxt_vpd_read_info(struct bnxt *bp)
189 + {
190 + struct pci_dev *pdev = bp->pdev;
191 +- int i, len, pos, ro_size;
192 ++ int i, len, pos, ro_size, size;
193 + ssize_t vpd_size;
194 + u8 *vpd_data;
195 +
196 +@@ -11841,7 +11849,8 @@ static void bnxt_vpd_read_info(struct bnxt *bp)
197 + if (len + pos > vpd_size)
198 + goto read_sn;
199 +
200 +- strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
201 ++ size = min(len, BNXT_VPD_FLD_LEN - 1);
202 ++ memcpy(bp->board_partno, &vpd_data[pos], size);
203 +
204 + read_sn:
205 + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
206 +@@ -11854,7 +11863,8 @@ read_sn:
207 + if (len + pos > vpd_size)
208 + goto exit;
209 +
210 +- strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
211 ++ size = min(len, BNXT_VPD_FLD_LEN - 1);
212 ++ memcpy(bp->board_serialno, &vpd_data[pos], size);
213 + exit:
214 + kfree(vpd_data);
215 + }
216 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
217 +index 78e2fd63ac3d5..440b43c8068f1 100644
218 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
219 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
220 +@@ -1673,6 +1673,10 @@ struct bnxt {
221 + #define BNXT_STATE_FW_FATAL_COND 6
222 + #define BNXT_STATE_DRV_REGISTERED 7
223 +
224 ++#define BNXT_NO_FW_ACCESS(bp) \
225 ++ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
226 ++ pci_channel_offline((bp)->pdev))
227 ++
228 + struct bnxt_irq *irq_tbl;
229 + int total_irqs;
230 + u8 mac_addr[ETH_ALEN];
231 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
232 +index bc2c76fa54cad..f6e236a7bf18d 100644
233 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
234 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
235 +@@ -1735,9 +1735,12 @@ static int bnxt_set_pauseparam(struct net_device *dev,
236 + if (!BNXT_PHY_CFG_ABLE(bp))
237 + return -EOPNOTSUPP;
238 +
239 ++ mutex_lock(&bp->link_lock);
240 + if (epause->autoneg) {
241 +- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
242 +- return -EINVAL;
243 ++ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
244 ++ rc = -EINVAL;
245 ++ goto pause_exit;
246 ++ }
247 +
248 + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
249 + if (bp->hwrm_spec_code >= 0x10201)
250 +@@ -1758,11 +1761,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
251 + if (epause->tx_pause)
252 + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
253 +
254 +- if (netif_running(dev)) {
255 +- mutex_lock(&bp->link_lock);
256 ++ if (netif_running(dev))
257 + rc = bnxt_hwrm_set_pause(bp);
258 +- mutex_unlock(&bp->link_lock);
259 +- }
260 ++
261 ++pause_exit:
262 ++ mutex_unlock(&bp->link_lock);
263 + return rc;
264 + }
265 +
266 +@@ -2499,8 +2502,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
267 + struct bnxt *bp = netdev_priv(dev);
268 + struct ethtool_eee *eee = &bp->eee;
269 + struct bnxt_link_info *link_info = &bp->link_info;
270 +- u32 advertising =
271 +- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
272 ++ u32 advertising;
273 + int rc = 0;
274 +
275 + if (!BNXT_PHY_CFG_ABLE(bp))
276 +@@ -2509,19 +2511,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
277 + if (!(bp->flags & BNXT_FLAG_EEE_CAP))
278 + return -EOPNOTSUPP;
279 +
280 ++ mutex_lock(&bp->link_lock);
281 ++ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
282 + if (!edata->eee_enabled)
283 + goto eee_ok;
284 +
285 + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
286 + netdev_warn(dev, "EEE requires autoneg\n");
287 +- return -EINVAL;
288 ++ rc = -EINVAL;
289 ++ goto eee_exit;
290 + }
291 + if (edata->tx_lpi_enabled) {
292 + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
293 + edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
294 + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
295 + bp->lpi_tmr_lo, bp->lpi_tmr_hi);
296 +- return -EINVAL;
297 ++ rc = -EINVAL;
298 ++ goto eee_exit;
299 + } else if (!bp->lpi_tmr_hi) {
300 + edata->tx_lpi_timer = eee->tx_lpi_timer;
301 + }
302 +@@ -2531,7 +2537,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
303 + } else if (edata->advertised & ~advertising) {
304 + netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
305 + edata->advertised, advertising);
306 +- return -EINVAL;
307 ++ rc = -EINVAL;
308 ++ goto eee_exit;
309 + }
310 +
311 + eee->advertised = edata->advertised;
312 +@@ -2543,6 +2550,8 @@ eee_ok:
313 + if (netif_running(dev))
314 + rc = bnxt_hwrm_set_link_setting(bp, false, true);
315 +
316 ++eee_exit:
317 ++ mutex_unlock(&bp->link_lock);
318 + return rc;
319 + }
320 +
321 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
322 +index 4b1b5928b1043..55347bcea2285 100644
323 +--- a/drivers/net/ethernet/cadence/macb_main.c
324 ++++ b/drivers/net/ethernet/cadence/macb_main.c
325 +@@ -647,8 +647,7 @@ static void macb_mac_link_up(struct phylink_config *config,
326 + ctrl |= GEM_BIT(GBE);
327 + }
328 +
329 +- /* We do not support MLO_PAUSE_RX yet */
330 +- if (tx_pause)
331 ++ if (rx_pause)
332 + ctrl |= MACB_BIT(PAE);
333 +
334 + macb_set_tx_clk(bp->tx_clk, speed, ndev);
335 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
336 +index d02d346629b36..ff0d82e2535da 100644
337 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
338 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
339 +@@ -1906,13 +1906,16 @@ out:
340 + static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
341 + struct filter_entry *f)
342 + {
343 +- if (f->fs.hitcnts)
344 ++ if (f->fs.hitcnts) {
345 + set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
346 +- TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
347 ++ TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
348 ++ TCB_TIMESTAMP_V(0ULL),
349 ++ 1);
350 ++ set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
351 + TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
352 +- TCB_TIMESTAMP_V(0ULL) |
353 + TCB_RTT_TS_RECENT_AGE_V(0ULL),
354 + 1);
355 ++ }
356 +
357 + if (f->fs.newdmac)
358 + set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
359 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
360 +index b1a073eea60b2..a020e84906813 100644
361 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
362 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
363 +@@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap)
364 + {
365 + struct mps_entries_ref *mps_entry, *tmp;
366 +
367 +- if (!list_empty(&adap->mps_ref))
368 ++ if (list_empty(&adap->mps_ref))
369 + return;
370 +
371 + spin_lock(&adap->mps_ref_lock);
372 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
373 +index e0f5a81d8620d..7fe39a155b329 100644
374 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
375 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
376 +@@ -45,6 +45,8 @@
377 +
378 + #define MGMT_MSG_TIMEOUT 5000
379 +
380 ++#define SET_FUNC_PORT_MBOX_TIMEOUT 30000
381 ++
382 + #define SET_FUNC_PORT_MGMT_TIMEOUT 25000
383 +
384 + #define mgmt_to_pfhwdev(pf_mgmt) \
385 +@@ -358,16 +360,20 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
386 + return -EINVAL;
387 + }
388 +
389 +- if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
390 +- timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
391 ++ if (HINIC_IS_VF(hwif)) {
392 ++ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
393 ++ timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
394 +
395 +- if (HINIC_IS_VF(hwif))
396 + return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
397 +- in_size, buf_out, out_size, 0);
398 +- else
399 ++ in_size, buf_out, out_size, timeout);
400 ++ } else {
401 ++ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
402 ++ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
403 ++
404 + return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
405 + buf_out, out_size, MGMT_DIRECT_SEND,
406 + MSG_NOT_RESP, timeout);
407 ++ }
408 + }
409 +
410 + static void recv_mgmt_msg_work_handler(struct work_struct *work)
411 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
412 +index e9e6f4c9309a1..c9d884049fd04 100644
413 +--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
414 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
415 +@@ -168,6 +168,24 @@ err_init_txq:
416 + return err;
417 + }
418 +
419 ++static void enable_txqs_napi(struct hinic_dev *nic_dev)
420 ++{
421 ++ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
422 ++ int i;
423 ++
424 ++ for (i = 0; i < num_txqs; i++)
425 ++ napi_enable(&nic_dev->txqs[i].napi);
426 ++}
427 ++
428 ++static void disable_txqs_napi(struct hinic_dev *nic_dev)
429 ++{
430 ++ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
431 ++ int i;
432 ++
433 ++ for (i = 0; i < num_txqs; i++)
434 ++ napi_disable(&nic_dev->txqs[i].napi);
435 ++}
436 ++
437 + /**
438 + * free_txqs - Free the Logical Tx Queues of specific NIC device
439 + * @nic_dev: the specific NIC device
440 +@@ -394,6 +412,8 @@ int hinic_open(struct net_device *netdev)
441 + goto err_create_txqs;
442 + }
443 +
444 ++ enable_txqs_napi(nic_dev);
445 ++
446 + err = create_rxqs(nic_dev);
447 + if (err) {
448 + netif_err(nic_dev, drv, netdev,
449 +@@ -475,6 +495,7 @@ err_port_state:
450 + }
451 +
452 + err_create_rxqs:
453 ++ disable_txqs_napi(nic_dev);
454 + free_txqs(nic_dev);
455 +
456 + err_create_txqs:
457 +@@ -488,6 +509,9 @@ int hinic_close(struct net_device *netdev)
458 + struct hinic_dev *nic_dev = netdev_priv(netdev);
459 + unsigned int flags;
460 +
461 ++ /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
462 ++ disable_txqs_napi(nic_dev);
463 ++
464 + down(&nic_dev->mgmt_lock);
465 +
466 + flags = nic_dev->flags;
467 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
468 +index 4c66a0bc1b283..789aa278851e3 100644
469 +--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
470 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
471 +@@ -684,18 +684,6 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
472 + return budget;
473 + }
474 +
475 +-static void tx_napi_add(struct hinic_txq *txq, int weight)
476 +-{
477 +- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
478 +- napi_enable(&txq->napi);
479 +-}
480 +-
481 +-static void tx_napi_del(struct hinic_txq *txq)
482 +-{
483 +- napi_disable(&txq->napi);
484 +- netif_napi_del(&txq->napi);
485 +-}
486 +-
487 + static irqreturn_t tx_irq(int irq, void *data)
488 + {
489 + struct hinic_txq *txq = data;
490 +@@ -724,7 +712,7 @@ static int tx_request_irq(struct hinic_txq *txq)
491 + struct hinic_sq *sq = txq->sq;
492 + int err;
493 +
494 +- tx_napi_add(txq, nic_dev->tx_weight);
495 ++ netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
496 +
497 + hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
498 + TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
499 +@@ -734,7 +722,7 @@ static int tx_request_irq(struct hinic_txq *txq)
500 + err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
501 + if (err) {
502 + dev_err(&pdev->dev, "Failed to request Tx irq\n");
503 +- tx_napi_del(txq);
504 ++ netif_napi_del(&txq->napi);
505 + return err;
506 + }
507 +
508 +@@ -746,7 +734,7 @@ static void tx_free_irq(struct hinic_txq *txq)
509 + struct hinic_sq *sq = txq->sq;
510 +
511 + free_irq(sq->irq, txq);
512 +- tx_napi_del(txq);
513 ++ netif_napi_del(&txq->napi);
514 + }
515 +
516 + /**
517 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
518 +index 5afb3c9c52d20..1b702a43a5d01 100644
519 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
520 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
521 +@@ -479,6 +479,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
522 + int i, j, rc;
523 + u64 *size_array;
524 +
525 ++ if (!adapter->rx_pool)
526 ++ return -1;
527 ++
528 + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
529 + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
530 +
531 +@@ -649,6 +652,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
532 + int tx_scrqs;
533 + int i, rc;
534 +
535 ++ if (!adapter->tx_pool)
536 ++ return -1;
537 ++
538 + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
539 + for (i = 0; i < tx_scrqs; i++) {
540 + rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
541 +@@ -2011,7 +2017,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
542 + adapter->req_rx_add_entries_per_subcrq !=
543 + old_num_rx_slots ||
544 + adapter->req_tx_entries_per_subcrq !=
545 +- old_num_tx_slots) {
546 ++ old_num_tx_slots ||
547 ++ !adapter->rx_pool ||
548 ++ !adapter->tso_pool ||
549 ++ !adapter->tx_pool) {
550 + release_rx_pools(adapter);
551 + release_tx_pools(adapter);
552 + release_napi(adapter);
553 +@@ -2023,12 +2032,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
554 +
555 + } else {
556 + rc = reset_tx_pools(adapter);
557 +- if (rc)
558 ++ if (rc) {
559 ++ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
560 ++ rc);
561 + goto out;
562 ++ }
563 +
564 + rc = reset_rx_pools(adapter);
565 +- if (rc)
566 ++ if (rc) {
567 ++ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
568 ++ rc);
569 + goto out;
570 ++ }
571 + }
572 + ibmvnic_disable_irqs(adapter);
573 + }
574 +diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
575 +index 1645e4e7ebdbb..635ff3a5dcfb3 100644
576 +--- a/drivers/net/ethernet/lantiq_xrx200.c
577 ++++ b/drivers/net/ethernet/lantiq_xrx200.c
578 +@@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
579 + }
580 +
581 + if (rx < budget) {
582 +- napi_complete(&ch->napi);
583 +- ltq_dma_enable_irq(&ch->dma);
584 ++ if (napi_complete_done(&ch->napi, rx))
585 ++ ltq_dma_enable_irq(&ch->dma);
586 + }
587 +
588 + return rx;
589 +@@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
590 + net_dev->stats.tx_bytes += bytes;
591 + netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
592 +
593 ++ if (netif_queue_stopped(net_dev))
594 ++ netif_wake_queue(net_dev);
595 ++
596 + if (pkts < budget) {
597 +- napi_complete(&ch->napi);
598 +- ltq_dma_enable_irq(&ch->dma);
599 ++ if (napi_complete_done(&ch->napi, pkts))
600 ++ ltq_dma_enable_irq(&ch->dma);
601 + }
602 +
603 + return pkts;
604 +@@ -342,10 +345,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
605 + {
606 + struct xrx200_chan *ch = ptr;
607 +
608 +- ltq_dma_disable_irq(&ch->dma);
609 +- ltq_dma_ack_irq(&ch->dma);
610 ++ if (napi_schedule_prep(&ch->napi)) {
611 ++ __napi_schedule(&ch->napi);
612 ++ ltq_dma_disable_irq(&ch->dma);
613 ++ }
614 +
615 +- napi_schedule(&ch->napi);
616 ++ ltq_dma_ack_irq(&ch->dma);
617 +
618 + return IRQ_HANDLED;
619 + }
620 +@@ -499,7 +504,7 @@ static int xrx200_probe(struct platform_device *pdev)
621 +
622 + /* setup NAPI */
623 + netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
624 +- netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
625 ++ netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
626 +
627 + platform_set_drvdata(pdev, priv);
628 +
629 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
630 +index 842db20493df6..76b23ba7a4687 100644
631 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
632 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
633 +@@ -604,7 +604,7 @@ struct mlx5e_rq {
634 + struct dim dim; /* Dynamic Interrupt Moderation */
635 +
636 + /* XDP */
637 +- struct bpf_prog *xdp_prog;
638 ++ struct bpf_prog __rcu *xdp_prog;
639 + struct mlx5e_xdpsq *xdpsq;
640 + DECLARE_BITMAP(flags, 8);
641 + struct page_pool *page_pool;
642 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
643 +index c9d308e919655..75ed820b0ad72 100644
644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
645 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
646 +@@ -121,7 +121,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
647 + bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
648 + u32 *len, struct xdp_buff *xdp)
649 + {
650 +- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
651 ++ struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
652 + u32 act;
653 + int err;
654 +
655 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
656 +index a33a1f762c70d..40db27bf790bb 100644
657 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
658 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
659 +@@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
660 + {
661 + struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
662 + u32 cqe_bcnt32 = cqe_bcnt;
663 +- bool consumed;
664 +
665 + /* Check packet size. Note LRO doesn't use linear SKB */
666 + if (unlikely(cqe_bcnt > rq->hw_mtu)) {
667 +@@ -51,10 +50,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
668 + xsk_buff_dma_sync_for_cpu(xdp);
669 + prefetch(xdp->data);
670 +
671 +- rcu_read_lock();
672 +- consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
673 +- rcu_read_unlock();
674 +-
675 + /* Possible flows:
676 + * - XDP_REDIRECT to XSKMAP:
677 + * The page is owned by the userspace from now.
678 +@@ -70,7 +65,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
679 + * allocated first from the Reuse Ring, so it has enough space.
680 + */
681 +
682 +- if (likely(consumed)) {
683 ++ if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
684 + if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
685 + __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
686 + return NULL; /* page/packet was consumed by XDP */
687 +@@ -88,7 +83,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
688 + u32 cqe_bcnt)
689 + {
690 + struct xdp_buff *xdp = wi->di->xsk;
691 +- bool consumed;
692 +
693 + /* wi->offset is not used in this function, because xdp->data and the
694 + * DMA address point directly to the necessary place. Furthermore, the
695 +@@ -107,11 +101,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
696 + return NULL;
697 + }
698 +
699 +- rcu_read_lock();
700 +- consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
701 +- rcu_read_unlock();
702 +-
703 +- if (likely(consumed))
704 ++ if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
705 + return NULL; /* page/packet was consumed by XDP */
706 +
707 + /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
708 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
709 +index 2c80205dc939d..3081cd74d651b 100644
710 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
711 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
712 +@@ -143,8 +143,7 @@ err_free_cparam:
713 + void mlx5e_close_xsk(struct mlx5e_channel *c)
714 + {
715 + clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
716 +- napi_synchronize(&c->napi);
717 +- synchronize_rcu(); /* Sync with the XSK wakeup. */
718 ++ synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
719 +
720 + mlx5e_close_rq(&c->xskrq);
721 + mlx5e_close_cq(&c->xskrq.cq);
722 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
723 +index 01468ec274466..b949b9a7538b0 100644
724 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
725 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
726 +@@ -35,7 +35,6 @@
727 + #include <net/sock.h>
728 +
729 + #include "en.h"
730 +-#include "accel/tls.h"
731 + #include "fpga/sdk.h"
732 + #include "en_accel/tls.h"
733 +
734 +@@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
735 +
736 + #define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
737 +
738 ++static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
739 ++{
740 ++ return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
741 ++}
742 ++
743 + int mlx5e_tls_get_count(struct mlx5e_priv *priv)
744 + {
745 +- if (!priv->tls)
746 ++ if (!is_tls_atomic_stats(priv))
747 + return 0;
748 +
749 + return NUM_TLS_SW_COUNTERS;
750 +@@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
751 + {
752 + unsigned int i, idx = 0;
753 +
754 +- if (!priv->tls)
755 ++ if (!is_tls_atomic_stats(priv))
756 + return 0;
757 +
758 + for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
759 +@@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
760 + {
761 + int i, idx = 0;
762 +
763 +- if (!priv->tls)
764 ++ if (!is_tls_atomic_stats(priv))
765 + return 0;
766 +
767 + for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
768 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
769 +index 3b892ec301b4a..cccf65fc116ee 100644
770 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
771 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
772 +@@ -401,7 +401,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
773 +
774 + if (params->xdp_prog)
775 + bpf_prog_inc(params->xdp_prog);
776 +- rq->xdp_prog = params->xdp_prog;
777 ++ RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
778 +
779 + rq_xdp_ix = rq->ix;
780 + if (xsk)
781 +@@ -410,7 +410,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
782 + if (err < 0)
783 + goto err_rq_wq_destroy;
784 +
785 +- rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
786 ++ rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
787 + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
788 + pool_size = 1 << params->log_rq_mtu_frames;
789 +
790 +@@ -605,8 +605,8 @@ err_free:
791 + }
792 +
793 + err_rq_wq_destroy:
794 +- if (rq->xdp_prog)
795 +- bpf_prog_put(rq->xdp_prog);
796 ++ if (params->xdp_prog)
797 ++ bpf_prog_put(params->xdp_prog);
798 + xdp_rxq_info_unreg(&rq->xdp_rxq);
799 + page_pool_destroy(rq->page_pool);
800 + mlx5_wq_destroy(&rq->wq_ctrl);
801 +@@ -616,10 +616,16 @@ err_rq_wq_destroy:
802 +
803 + static void mlx5e_free_rq(struct mlx5e_rq *rq)
804 + {
805 ++ struct mlx5e_channel *c = rq->channel;
806 ++ struct bpf_prog *old_prog = NULL;
807 + int i;
808 +
809 +- if (rq->xdp_prog)
810 +- bpf_prog_put(rq->xdp_prog);
811 ++ /* drop_rq has neither channel nor xdp_prog. */
812 ++ if (c)
813 ++ old_prog = rcu_dereference_protected(rq->xdp_prog,
814 ++ lockdep_is_held(&c->priv->state_lock));
815 ++ if (old_prog)
816 ++ bpf_prog_put(old_prog);
817 +
818 + switch (rq->wq_type) {
819 + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
820 +@@ -905,7 +911,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
821 + void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
822 + {
823 + clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
824 +- napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
825 ++ synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
826 + }
827 +
828 + void mlx5e_close_rq(struct mlx5e_rq *rq)
829 +@@ -1350,12 +1356,10 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
830 +
831 + static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
832 + {
833 +- struct mlx5e_channel *c = sq->channel;
834 + struct mlx5_wq_cyc *wq = &sq->wq;
835 +
836 + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
837 +- /* prevent netif_tx_wake_queue */
838 +- napi_synchronize(&c->napi);
839 ++ synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
840 +
841 + mlx5e_tx_disable_queue(sq->txq);
842 +
843 +@@ -1430,10 +1434,8 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
844 +
845 + void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
846 + {
847 +- struct mlx5e_channel *c = icosq->channel;
848 +-
849 + clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
850 +- napi_synchronize(&c->napi);
851 ++ synchronize_rcu(); /* Sync with NAPI. */
852 + }
853 +
854 + void mlx5e_close_icosq(struct mlx5e_icosq *sq)
855 +@@ -1511,7 +1513,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
856 + struct mlx5e_channel *c = sq->channel;
857 +
858 + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
859 +- napi_synchronize(&c->napi);
860 ++ synchronize_rcu(); /* Sync with NAPI. */
861 +
862 + mlx5e_destroy_sq(c->mdev, sq->sqn);
863 + mlx5e_free_xdpsq_descs(sq);
864 +@@ -4423,6 +4425,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
865 + return 0;
866 + }
867 +
868 ++static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
869 ++{
870 ++ struct bpf_prog *old_prog;
871 ++
872 ++ old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
873 ++ lockdep_is_held(&rq->channel->priv->state_lock));
874 ++ if (old_prog)
875 ++ bpf_prog_put(old_prog);
876 ++}
877 ++
878 + static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
879 + {
880 + struct mlx5e_priv *priv = netdev_priv(netdev);
881 +@@ -4481,29 +4493,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
882 + */
883 + for (i = 0; i < priv->channels.num; i++) {
884 + struct mlx5e_channel *c = priv->channels.c[i];
885 +- bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
886 +-
887 +- clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
888 +- if (xsk_open)
889 +- clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
890 +- napi_synchronize(&c->napi);
891 +- /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
892 +-
893 +- old_prog = xchg(&c->rq.xdp_prog, prog);
894 +- if (old_prog)
895 +- bpf_prog_put(old_prog);
896 +-
897 +- if (xsk_open) {
898 +- old_prog = xchg(&c->xskrq.xdp_prog, prog);
899 +- if (old_prog)
900 +- bpf_prog_put(old_prog);
901 +- }
902 +
903 +- set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
904 +- if (xsk_open)
905 +- set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
906 +- /* napi_schedule in case we have missed anything */
907 +- napi_schedule(&c->napi);
908 ++ mlx5e_rq_replace_xdp_prog(&c->rq, prog);
909 ++ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
910 ++ mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
911 + }
912 +
913 + unlock:
914 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
915 +index dbb1c63239672..409fecbcc5d2b 100644
916 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
917 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
918 +@@ -1072,7 +1072,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
919 + struct xdp_buff xdp;
920 + struct sk_buff *skb;
921 + void *va, *data;
922 +- bool consumed;
923 + u32 frag_size;
924 +
925 + va = page_address(di->page) + wi->offset;
926 +@@ -1084,11 +1083,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
927 + prefetchw(va); /* xdp_frame data area */
928 + prefetch(data);
929 +
930 +- rcu_read_lock();
931 + mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
932 +- consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
933 +- rcu_read_unlock();
934 +- if (consumed)
935 ++ if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
936 + return NULL; /* page/packet was consumed by XDP */
937 +
938 + rx_headroom = xdp.data - xdp.data_hard_start;
939 +@@ -1369,7 +1365,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
940 + struct sk_buff *skb;
941 + void *va, *data;
942 + u32 frag_size;
943 +- bool consumed;
944 +
945 + /* Check packet size. Note LRO doesn't use linear SKB */
946 + if (unlikely(cqe_bcnt > rq->hw_mtu)) {
947 +@@ -1386,11 +1381,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
948 + prefetchw(va); /* xdp_frame data area */
949 + prefetch(data);
950 +
951 +- rcu_read_lock();
952 + mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
953 +- consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
954 +- rcu_read_unlock();
955 +- if (consumed) {
956 ++ if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
957 + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
958 + __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
959 + return NULL; /* page/packet was consumed by XDP */
960 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
961 +index fcedb5bdca9e5..7da1e7462f64e 100644
962 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
963 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
964 +@@ -1399,11 +1399,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
965 +
966 + mlx5e_put_flow_tunnel_id(flow);
967 +
968 +- if (flow_flag_test(flow, NOT_READY)) {
969 ++ if (flow_flag_test(flow, NOT_READY))
970 + remove_unready_flow(flow);
971 +- kvfree(attr->parse_attr);
972 +- return;
973 +- }
974 +
975 + if (mlx5e_is_offloaded_flow(flow)) {
976 + if (flow_flag_test(flow, SLOW))
977 +@@ -2734,6 +2731,22 @@ static struct mlx5_fields fields[] = {
978 + OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
979 + };
980 +
981 ++static unsigned long mask_to_le(unsigned long mask, int size)
982 ++{
983 ++ __be32 mask_be32;
984 ++ __be16 mask_be16;
985 ++
986 ++ if (size == 32) {
987 ++ mask_be32 = (__force __be32)(mask);
988 ++ mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
989 ++ } else if (size == 16) {
990 ++ mask_be32 = (__force __be32)(mask);
991 ++ mask_be16 = *(__be16 *)&mask_be32;
992 ++ mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
993 ++ }
994 ++
995 ++ return mask;
996 ++}
997 + static int offload_pedit_fields(struct mlx5e_priv *priv,
998 + int namespace,
999 + struct pedit_headers_action *hdrs,
1000 +@@ -2747,9 +2760,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
1001 + u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
1002 + struct mlx5e_tc_mod_hdr_acts *mod_acts;
1003 + struct mlx5_fields *f;
1004 +- unsigned long mask;
1005 +- __be32 mask_be32;
1006 +- __be16 mask_be16;
1007 ++ unsigned long mask, field_mask;
1008 + int err;
1009 + u8 cmd;
1010 +
1011 +@@ -2815,14 +2826,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
1012 + if (skip)
1013 + continue;
1014 +
1015 +- if (f->field_bsize == 32) {
1016 +- mask_be32 = (__force __be32)(mask);
1017 +- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1018 +- } else if (f->field_bsize == 16) {
1019 +- mask_be32 = (__force __be32)(mask);
1020 +- mask_be16 = *(__be16 *)&mask_be32;
1021 +- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1022 +- }
1023 ++ mask = mask_to_le(mask, f->field_bsize);
1024 +
1025 + first = find_first_bit(&mask, f->field_bsize);
1026 + next_z = find_next_zero_bit(&mask, f->field_bsize, first);
1027 +@@ -2853,9 +2857,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
1028 + if (cmd == MLX5_ACTION_TYPE_SET) {
1029 + int start;
1030 +
1031 ++ field_mask = mask_to_le(f->field_mask, f->field_bsize);
1032 ++
1033 + /* if field is bit sized it can start not from first bit */
1034 +- start = find_first_bit((unsigned long *)&f->field_mask,
1035 +- f->field_bsize);
1036 ++ start = find_first_bit(&field_mask, f->field_bsize);
1037 +
1038 + MLX5_SET(set_action_in, action, offset, first - start);
1039 + /* length is num of bits to be written, zero means length of 32 */
1040 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
1041 +index 8480278f2ee20..954a2f0513d67 100644
1042 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
1043 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
1044 +@@ -121,13 +121,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
1045 + struct mlx5e_xdpsq *xsksq = &c->xsksq;
1046 + struct mlx5e_rq *xskrq = &c->xskrq;
1047 + struct mlx5e_rq *rq = &c->rq;
1048 +- bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
1049 + bool aff_change = false;
1050 + bool busy_xsk = false;
1051 + bool busy = false;
1052 + int work_done = 0;
1053 ++ bool xsk_open;
1054 + int i;
1055 +
1056 ++ rcu_read_lock();
1057 ++
1058 ++ xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
1059 ++
1060 + ch_stats->poll++;
1061 +
1062 + for (i = 0; i < c->num_tc; i++)
1063 +@@ -167,8 +171,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
1064 + busy |= busy_xsk;
1065 +
1066 + if (busy) {
1067 +- if (likely(mlx5e_channel_no_affinity_change(c)))
1068 +- return budget;
1069 ++ if (likely(mlx5e_channel_no_affinity_change(c))) {
1070 ++ work_done = budget;
1071 ++ goto out;
1072 ++ }
1073 + ch_stats->aff_change++;
1074 + aff_change = true;
1075 + if (budget && work_done == budget)
1076 +@@ -176,7 +182,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
1077 + }
1078 +
1079 + if (unlikely(!napi_complete_done(napi, work_done)))
1080 +- return work_done;
1081 ++ goto out;
1082 +
1083 + ch_stats->arm++;
1084 +
1085 +@@ -203,6 +209,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
1086 + ch_stats->force_irq++;
1087 + }
1088 +
1089 ++out:
1090 ++ rcu_read_unlock();
1091 ++
1092 + return work_done;
1093 + }
1094 +
1095 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1096 +index ed75353c56b85..f16610feab88d 100644
1097 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1098 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1099 +@@ -1219,35 +1219,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1100 + }
1101 + esw->fdb_table.offloads.send_to_vport_grp = g;
1102 +
1103 +- /* create peer esw miss group */
1104 +- memset(flow_group_in, 0, inlen);
1105 ++ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1106 ++ /* create peer esw miss group */
1107 ++ memset(flow_group_in, 0, inlen);
1108 +
1109 +- esw_set_flow_group_source_port(esw, flow_group_in);
1110 ++ esw_set_flow_group_source_port(esw, flow_group_in);
1111 +
1112 +- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1113 +- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1114 +- flow_group_in,
1115 +- match_criteria);
1116 ++ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1117 ++ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1118 ++ flow_group_in,
1119 ++ match_criteria);
1120 +
1121 +- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1122 +- misc_parameters.source_eswitch_owner_vhca_id);
1123 ++ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1124 ++ misc_parameters.source_eswitch_owner_vhca_id);
1125 +
1126 +- MLX5_SET(create_flow_group_in, flow_group_in,
1127 +- source_eswitch_owner_vhca_id_valid, 1);
1128 +- }
1129 ++ MLX5_SET(create_flow_group_in, flow_group_in,
1130 ++ source_eswitch_owner_vhca_id_valid, 1);
1131 ++ }
1132 +
1133 +- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1134 +- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1135 +- ix + esw->total_vports - 1);
1136 +- ix += esw->total_vports;
1137 ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1138 ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1139 ++ ix + esw->total_vports - 1);
1140 ++ ix += esw->total_vports;
1141 +
1142 +- g = mlx5_create_flow_group(fdb, flow_group_in);
1143 +- if (IS_ERR(g)) {
1144 +- err = PTR_ERR(g);
1145 +- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1146 +- goto peer_miss_err;
1147 ++ g = mlx5_create_flow_group(fdb, flow_group_in);
1148 ++ if (IS_ERR(g)) {
1149 ++ err = PTR_ERR(g);
1150 ++ esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1151 ++ goto peer_miss_err;
1152 ++ }
1153 ++ esw->fdb_table.offloads.peer_miss_grp = g;
1154 + }
1155 +- esw->fdb_table.offloads.peer_miss_grp = g;
1156 +
1157 + /* create miss group */
1158 + memset(flow_group_in, 0, inlen);
1159 +@@ -1282,7 +1284,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1160 + miss_rule_err:
1161 + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1162 + miss_err:
1163 +- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1164 ++ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1165 ++ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1166 + peer_miss_err:
1167 + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1168 + send_vport_err:
1169 +@@ -1306,7 +1309,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1170 + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1171 + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1172 + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1173 +- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1174 ++ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1175 ++ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1176 + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1177 +
1178 + mlx5_esw_chains_destroy(esw);
1179 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1180 +index 2e5f7efb82a88..1f96f9efa3c18 100644
1181 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1182 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1183 +@@ -655,7 +655,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
1184 + fte->action = *flow_act;
1185 + fte->flow_context = spec->flow_context;
1186 +
1187 +- tree_init_node(&fte->node, NULL, del_sw_fte);
1188 ++ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
1189 +
1190 + return fte;
1191 + }
1192 +@@ -1792,7 +1792,6 @@ skip_search:
1193 + up_write_ref_node(&g->node, false);
1194 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1195 + up_write_ref_node(&fte->node, false);
1196 +- tree_put_node(&fte->node, false);
1197 + return rule;
1198 + }
1199 + rule = ERR_PTR(-ENOENT);
1200 +@@ -1891,7 +1890,6 @@ search_again_locked:
1201 + up_write_ref_node(&g->node, false);
1202 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1203 + up_write_ref_node(&fte->node, false);
1204 +- tree_put_node(&fte->node, false);
1205 + tree_put_node(&g->node, false);
1206 + return rule;
1207 +
1208 +@@ -2001,7 +1999,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1209 + up_write_ref_node(&fte->node, false);
1210 + } else {
1211 + del_hw_fte(&fte->node);
1212 +- up_write(&fte->node.lock);
1213 ++ /* Avoid double call to del_hw_fte */
1214 ++ fte->node.del_hw_func = NULL;
1215 ++ up_write_ref_node(&fte->node, false);
1216 + tree_put_node(&fte->node, false);
1217 + }
1218 + kfree(handle);
1219 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1220 +index 6eb9fb9a18145..9c9ae33d84ce9 100644
1221 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1222 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1223 +@@ -829,8 +829,8 @@ nfp_port_get_fecparam(struct net_device *netdev,
1224 + struct nfp_eth_table_port *eth_port;
1225 + struct nfp_port *port;
1226 +
1227 +- param->active_fec = ETHTOOL_FEC_NONE_BIT;
1228 +- param->fec = ETHTOOL_FEC_NONE_BIT;
1229 ++ param->active_fec = ETHTOOL_FEC_NONE;
1230 ++ param->fec = ETHTOOL_FEC_NONE;
1231 +
1232 + port = nfp_port_from_netdev(netdev);
1233 + eth_port = nfp_port_get_eth_port(port);
1234 +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
1235 +index 8ed78577cdedf..15672d0a4de69 100644
1236 +--- a/drivers/net/ethernet/ti/cpsw_new.c
1237 ++++ b/drivers/net/ethernet/ti/cpsw_new.c
1238 +@@ -17,6 +17,7 @@
1239 + #include <linux/phy.h>
1240 + #include <linux/phy/phy.h>
1241 + #include <linux/delay.h>
1242 ++#include <linux/pinctrl/consumer.h>
1243 + #include <linux/pm_runtime.h>
1244 + #include <linux/gpio/consumer.h>
1245 + #include <linux/of.h>
1246 +@@ -2070,9 +2071,61 @@ static int cpsw_remove(struct platform_device *pdev)
1247 + return 0;
1248 + }
1249 +
1250 ++static int __maybe_unused cpsw_suspend(struct device *dev)
1251 ++{
1252 ++ struct cpsw_common *cpsw = dev_get_drvdata(dev);
1253 ++ int i;
1254 ++
1255 ++ rtnl_lock();
1256 ++
1257 ++ for (i = 0; i < cpsw->data.slaves; i++) {
1258 ++ struct net_device *ndev = cpsw->slaves[i].ndev;
1259 ++
1260 ++ if (!(ndev && netif_running(ndev)))
1261 ++ continue;
1262 ++
1263 ++ cpsw_ndo_stop(ndev);
1264 ++ }
1265 ++
1266 ++ rtnl_unlock();
1267 ++
1268 ++ /* Select sleep pin state */
1269 ++ pinctrl_pm_select_sleep_state(dev);
1270 ++
1271 ++ return 0;
1272 ++}
1273 ++
1274 ++static int __maybe_unused cpsw_resume(struct device *dev)
1275 ++{
1276 ++ struct cpsw_common *cpsw = dev_get_drvdata(dev);
1277 ++ int i;
1278 ++
1279 ++ /* Select default pin state */
1280 ++ pinctrl_pm_select_default_state(dev);
1281 ++
1282 ++ /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
1283 ++ rtnl_lock();
1284 ++
1285 ++ for (i = 0; i < cpsw->data.slaves; i++) {
1286 ++ struct net_device *ndev = cpsw->slaves[i].ndev;
1287 ++
1288 ++ if (!(ndev && netif_running(ndev)))
1289 ++ continue;
1290 ++
1291 ++ cpsw_ndo_open(ndev);
1292 ++ }
1293 ++
1294 ++ rtnl_unlock();
1295 ++
1296 ++ return 0;
1297 ++}
1298 ++
1299 ++static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
1300 ++
1301 + static struct platform_driver cpsw_driver = {
1302 + .driver = {
1303 + .name = "cpsw-switch",
1304 ++ .pm = &cpsw_pm_ops,
1305 + .of_match_table = cpsw_of_mtable,
1306 + },
1307 + .probe = cpsw_probe,
1308 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1309 +index dec52b763d508..deede92b17fc7 100644
1310 +--- a/drivers/net/geneve.c
1311 ++++ b/drivers/net/geneve.c
1312 +@@ -773,7 +773,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
1313 + struct net_device *dev,
1314 + struct geneve_sock *gs4,
1315 + struct flowi4 *fl4,
1316 +- const struct ip_tunnel_info *info)
1317 ++ const struct ip_tunnel_info *info,
1318 ++ __be16 dport, __be16 sport)
1319 + {
1320 + bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1321 + struct geneve_dev *geneve = netdev_priv(dev);
1322 +@@ -789,6 +790,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
1323 + fl4->flowi4_proto = IPPROTO_UDP;
1324 + fl4->daddr = info->key.u.ipv4.dst;
1325 + fl4->saddr = info->key.u.ipv4.src;
1326 ++ fl4->fl4_dport = dport;
1327 ++ fl4->fl4_sport = sport;
1328 +
1329 + tos = info->key.tos;
1330 + if ((tos == 1) && !geneve->collect_md) {
1331 +@@ -823,7 +826,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
1332 + struct net_device *dev,
1333 + struct geneve_sock *gs6,
1334 + struct flowi6 *fl6,
1335 +- const struct ip_tunnel_info *info)
1336 ++ const struct ip_tunnel_info *info,
1337 ++ __be16 dport, __be16 sport)
1338 + {
1339 + bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1340 + struct geneve_dev *geneve = netdev_priv(dev);
1341 +@@ -839,6 +843,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
1342 + fl6->flowi6_proto = IPPROTO_UDP;
1343 + fl6->daddr = info->key.u.ipv6.dst;
1344 + fl6->saddr = info->key.u.ipv6.src;
1345 ++ fl6->fl6_dport = dport;
1346 ++ fl6->fl6_sport = sport;
1347 ++
1348 + prio = info->key.tos;
1349 + if ((prio == 1) && !geneve->collect_md) {
1350 + prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
1351 +@@ -885,14 +892,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1352 + __be16 sport;
1353 + int err;
1354 +
1355 +- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
1356 ++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1357 ++ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
1358 ++ geneve->info.key.tp_dst, sport);
1359 + if (IS_ERR(rt))
1360 + return PTR_ERR(rt);
1361 +
1362 + skb_tunnel_check_pmtu(skb, &rt->dst,
1363 + GENEVE_IPV4_HLEN + info->options_len);
1364 +
1365 +- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1366 + if (geneve->collect_md) {
1367 + tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
1368 + ttl = key->ttl;
1369 +@@ -947,13 +955,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1370 + __be16 sport;
1371 + int err;
1372 +
1373 +- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
1374 ++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1375 ++ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
1376 ++ geneve->info.key.tp_dst, sport);
1377 + if (IS_ERR(dst))
1378 + return PTR_ERR(dst);
1379 +
1380 + skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
1381 +
1382 +- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1383 + if (geneve->collect_md) {
1384 + prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
1385 + ttl = key->ttl;
1386 +@@ -1034,13 +1043,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1387 + {
1388 + struct ip_tunnel_info *info = skb_tunnel_info(skb);
1389 + struct geneve_dev *geneve = netdev_priv(dev);
1390 ++ __be16 sport;
1391 +
1392 + if (ip_tunnel_info_af(info) == AF_INET) {
1393 + struct rtable *rt;
1394 + struct flowi4 fl4;
1395 ++
1396 + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
1397 ++ sport = udp_flow_src_port(geneve->net, skb,
1398 ++ 1, USHRT_MAX, true);
1399 +
1400 +- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
1401 ++ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
1402 ++ geneve->info.key.tp_dst, sport);
1403 + if (IS_ERR(rt))
1404 + return PTR_ERR(rt);
1405 +
1406 +@@ -1050,9 +1064,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1407 + } else if (ip_tunnel_info_af(info) == AF_INET6) {
1408 + struct dst_entry *dst;
1409 + struct flowi6 fl6;
1410 ++
1411 + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
1412 ++ sport = udp_flow_src_port(geneve->net, skb,
1413 ++ 1, USHRT_MAX, true);
1414 +
1415 +- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
1416 ++ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
1417 ++ geneve->info.key.tp_dst, sport);
1418 + if (IS_ERR(dst))
1419 + return PTR_ERR(dst);
1420 +
1421 +@@ -1063,8 +1081,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1422 + return -EINVAL;
1423 + }
1424 +
1425 +- info->key.tp_src = udp_flow_src_port(geneve->net, skb,
1426 +- 1, USHRT_MAX, true);
1427 ++ info->key.tp_src = sport;
1428 + info->key.tp_dst = geneve->info.key.tp_dst;
1429 + return 0;
1430 + }
1431 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1432 +index 47159b31e6b39..8309194b351a9 100644
1433 +--- a/drivers/net/hyperv/netvsc_drv.c
1434 ++++ b/drivers/net/hyperv/netvsc_drv.c
1435 +@@ -2544,8 +2544,8 @@ static int netvsc_remove(struct hv_device *dev)
1436 + static int netvsc_suspend(struct hv_device *dev)
1437 + {
1438 + struct net_device_context *ndev_ctx;
1439 +- struct net_device *vf_netdev, *net;
1440 + struct netvsc_device *nvdev;
1441 ++ struct net_device *net;
1442 + int ret;
1443 +
1444 + net = hv_get_drvdata(dev);
1445 +@@ -2561,10 +2561,6 @@ static int netvsc_suspend(struct hv_device *dev)
1446 + goto out;
1447 + }
1448 +
1449 +- vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1450 +- if (vf_netdev)
1451 +- netvsc_unregister_vf(vf_netdev);
1452 +-
1453 + /* Save the current config info */
1454 + ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
1455 +
1456 +@@ -2580,6 +2576,7 @@ static int netvsc_resume(struct hv_device *dev)
1457 + struct net_device *net = hv_get_drvdata(dev);
1458 + struct net_device_context *net_device_ctx;
1459 + struct netvsc_device_info *device_info;
1460 ++ struct net_device *vf_netdev;
1461 + int ret;
1462 +
1463 + rtnl_lock();
1464 +@@ -2592,6 +2589,15 @@ static int netvsc_resume(struct hv_device *dev)
1465 + netvsc_devinfo_put(device_info);
1466 + net_device_ctx->saved_netvsc_dev_info = NULL;
1467 +
1468 ++ /* A NIC driver (e.g. mlx5) may keep the VF network interface across
1469 ++ * hibernation, but here the data path is implicitly switched to the
1470 ++ * netvsc NIC since the vmbus channel is closed and re-opened, so
1471 ++ * netvsc_vf_changed() must be used to switch the data path to the VF.
1472 ++ */
1473 ++ vf_netdev = rtnl_dereference(net_device_ctx->vf_netdev);
1474 ++ if (vf_netdev && netvsc_vf_changed(vf_netdev) != NOTIFY_OK)
1475 ++ ret = -EINVAL;
1476 ++
1477 + rtnl_unlock();
1478 +
1479 + return ret;
1480 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
1481 +index 9df2a3e78c989..d08c626b2baa6 100644
1482 +--- a/drivers/net/ipa/ipa_table.c
1483 ++++ b/drivers/net/ipa/ipa_table.c
1484 +@@ -521,7 +521,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
1485 + val = ioread32(endpoint->ipa->reg_virt + offset);
1486 +
1487 + /* Zero all filter-related fields, preserving the rest */
1488 +- u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
1489 ++ u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
1490 +
1491 + iowrite32(val, endpoint->ipa->reg_virt + offset);
1492 + }
1493 +@@ -572,7 +572,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
1494 + val = ioread32(ipa->reg_virt + offset);
1495 +
1496 + /* Zero all route-related fields, preserving the rest */
1497 +- u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
1498 ++ u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
1499 +
1500 + iowrite32(val, ipa->reg_virt + offset);
1501 + }
1502 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1503 +index 56cfae9504727..f5620f91dbf3a 100644
1504 +--- a/drivers/net/phy/phy.c
1505 ++++ b/drivers/net/phy/phy.c
1506 +@@ -948,7 +948,7 @@ void phy_stop(struct phy_device *phydev)
1507 + {
1508 + struct net_device *dev = phydev->attached_dev;
1509 +
1510 +- if (!phy_is_started(phydev)) {
1511 ++ if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
1512 + WARN(1, "called from state %s\n",
1513 + phy_state_to_str(phydev->state));
1514 + return;
1515 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1516 +index 98369430a3be5..067910d242ab3 100644
1517 +--- a/drivers/net/phy/phy_device.c
1518 ++++ b/drivers/net/phy/phy_device.c
1519 +@@ -1092,10 +1092,6 @@ int phy_init_hw(struct phy_device *phydev)
1520 + if (ret < 0)
1521 + return ret;
1522 +
1523 +- ret = phy_disable_interrupts(phydev);
1524 +- if (ret)
1525 +- return ret;
1526 +-
1527 + if (phydev->drv->config_init)
1528 + ret = phydev->drv->config_init(phydev);
1529 +
1530 +@@ -1372,6 +1368,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1531 + if (err)
1532 + goto error;
1533 +
1534 ++ err = phy_disable_interrupts(phydev);
1535 ++ if (err)
1536 ++ return err;
1537 ++
1538 + phy_resume(phydev);
1539 + phy_led_triggers_register(phydev);
1540 +
1541 +@@ -1631,7 +1631,8 @@ void phy_detach(struct phy_device *phydev)
1542 +
1543 + phy_led_triggers_unregister(phydev);
1544 +
1545 +- module_put(phydev->mdio.dev.driver->owner);
1546 ++ if (phydev->mdio.dev.driver)
1547 ++ module_put(phydev->mdio.dev.driver->owner);
1548 +
1549 + /* If the device had no specific driver before (i.e. - it
1550 + * was using the generic driver), we unbind the device
1551 +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
1552 +index 48ced3912576c..16f33d1ffbfb9 100644
1553 +--- a/drivers/net/wan/hdlc_ppp.c
1554 ++++ b/drivers/net/wan/hdlc_ppp.c
1555 +@@ -383,11 +383,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
1556 + }
1557 +
1558 + for (opt = data; len; len -= opt[1], opt += opt[1]) {
1559 +- if (len < 2 || len < opt[1]) {
1560 +- dev->stats.rx_errors++;
1561 +- kfree(out);
1562 +- return; /* bad packet, drop silently */
1563 +- }
1564 ++ if (len < 2 || opt[1] < 2 || len < opt[1])
1565 ++ goto err_out;
1566 +
1567 + if (pid == PID_LCP)
1568 + switch (opt[0]) {
1569 +@@ -395,6 +392,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
1570 + continue; /* MRU always OK and > 1500 bytes? */
1571 +
1572 + case LCP_OPTION_ACCM: /* async control character map */
1573 ++ if (opt[1] < sizeof(valid_accm))
1574 ++ goto err_out;
1575 + if (!memcmp(opt, valid_accm,
1576 + sizeof(valid_accm)))
1577 + continue;
1578 +@@ -406,6 +405,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
1579 + }
1580 + break;
1581 + case LCP_OPTION_MAGIC:
1582 ++ if (len < 6)
1583 ++ goto err_out;
1584 + if (opt[1] != 6 || (!opt[2] && !opt[3] &&
1585 + !opt[4] && !opt[5]))
1586 + break; /* reject invalid magic number */
1587 +@@ -424,6 +425,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
1588 + ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
1589 +
1590 + kfree(out);
1591 ++ return;
1592 ++
1593 ++err_out:
1594 ++ dev->stats.rx_errors++;
1595 ++ kfree(out);
1596 + }
1597 +
1598 + static int ppp_rx(struct sk_buff *skb)
1599 +diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
1600 +index 201a22681945f..27cb5045bed2d 100644
1601 +--- a/drivers/net/wireguard/noise.c
1602 ++++ b/drivers/net/wireguard/noise.c
1603 +@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_handshake *handshake)
1604 +
1605 + void wg_noise_handshake_clear(struct noise_handshake *handshake)
1606 + {
1607 ++ down_write(&handshake->lock);
1608 + wg_index_hashtable_remove(
1609 + handshake->entry.peer->device->index_hashtable,
1610 + &handshake->entry);
1611 +- down_write(&handshake->lock);
1612 + handshake_zero(handshake);
1613 + up_write(&handshake->lock);
1614 +- wg_index_hashtable_remove(
1615 +- handshake->entry.peer->device->index_hashtable,
1616 +- &handshake->entry);
1617 + }
1618 +
1619 + static struct noise_keypair *keypair_create(struct wg_peer *peer)
1620 +diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c
1621 +index e4deb331476b3..f2783aa7a88f1 100644
1622 +--- a/drivers/net/wireguard/peerlookup.c
1623 ++++ b/drivers/net/wireguard/peerlookup.c
1624 +@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct index_hashtable *table,
1625 + struct index_hashtable_entry *old,
1626 + struct index_hashtable_entry *new)
1627 + {
1628 +- if (unlikely(hlist_unhashed(&old->index_hash)))
1629 +- return false;
1630 ++ bool ret;
1631 ++
1632 + spin_lock_bh(&table->lock);
1633 ++ ret = !hlist_unhashed(&old->index_hash);
1634 ++ if (unlikely(!ret))
1635 ++ goto out;
1636 ++
1637 + new->index = old->index;
1638 + hlist_replace_rcu(&old->index_hash, &new->index_hash);
1639 +
1640 +@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct index_hashtable *table,
1641 + * simply gets dropped, which isn't terrible.
1642 + */
1643 + INIT_HLIST_NODE(&old->index_hash);
1644 ++out:
1645 + spin_unlock_bh(&table->lock);
1646 +- return true;
1647 ++ return ret;
1648 + }
1649 +
1650 + void wg_index_hashtable_remove(struct index_hashtable *table,
1651 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1652 +index 0c0377fc00c2a..1119463cf2425 100644
1653 +--- a/include/linux/skbuff.h
1654 ++++ b/include/linux/skbuff.h
1655 +@@ -3208,8 +3208,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1656 + * is untouched. Otherwise it is extended. Returns zero on
1657 + * success. The skb is freed on error if @free_on_error is true.
1658 + */
1659 +-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
1660 +- bool free_on_error)
1661 ++static inline int __must_check __skb_put_padto(struct sk_buff *skb,
1662 ++ unsigned int len,
1663 ++ bool free_on_error)
1664 + {
1665 + unsigned int size = skb->len;
1666 +
1667 +@@ -3232,7 +3233,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
1668 + * is untouched. Otherwise it is extended. Returns zero on
1669 + * success. The skb is freed on error.
1670 + */
1671 +-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
1672 ++static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
1673 + {
1674 + return __skb_put_padto(skb, len, true);
1675 + }
1676 +diff --git a/include/net/flow.h b/include/net/flow.h
1677 +index a50fb77a0b279..d058e63fb59a3 100644
1678 +--- a/include/net/flow.h
1679 ++++ b/include/net/flow.h
1680 +@@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
1681 + fl4->saddr = saddr;
1682 + fl4->fl4_dport = dport;
1683 + fl4->fl4_sport = sport;
1684 ++ fl4->flowi4_multipath_hash = 0;
1685 + }
1686 +
1687 + /* Reset some input parameters after previous lookup */
1688 +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
1689 +index fb42c90348d3b..f3c5d9d2f82d2 100644
1690 +--- a/include/net/sctp/structs.h
1691 ++++ b/include/net/sctp/structs.h
1692 +@@ -226,12 +226,14 @@ struct sctp_sock {
1693 + data_ready_signalled:1;
1694 +
1695 + atomic_t pd_mode;
1696 ++
1697 ++ /* Fields after this point will be skipped on copies, like on accept
1698 ++ * and peeloff operations
1699 ++ */
1700 ++
1701 + /* Receive to here while partial delivery is in effect. */
1702 + struct sk_buff_head pd_lobby;
1703 +
1704 +- /* These must be the last fields, as they will skipped on copies,
1705 +- * like on accept and peeloff operations
1706 +- */
1707 + struct list_head auto_asconf_list;
1708 + int do_auto_asconf;
1709 + };
1710 +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
1711 +index f9092c71225fd..61c94cefa8436 100644
1712 +--- a/net/bridge/br_vlan.c
1713 ++++ b/net/bridge/br_vlan.c
1714 +@@ -1288,11 +1288,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
1715 + }
1716 + }
1717 +
1718 +-static int __br_vlan_get_pvid(const struct net_device *dev,
1719 +- struct net_bridge_port *p, u16 *p_pvid)
1720 ++int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1721 + {
1722 + struct net_bridge_vlan_group *vg;
1723 ++ struct net_bridge_port *p;
1724 +
1725 ++ ASSERT_RTNL();
1726 ++ p = br_port_get_check_rtnl(dev);
1727 + if (p)
1728 + vg = nbp_vlan_group(p);
1729 + else if (netif_is_bridge_master(dev))
1730 +@@ -1303,18 +1305,23 @@ static int __br_vlan_get_pvid(const struct net_device *dev,
1731 + *p_pvid = br_get_pvid(vg);
1732 + return 0;
1733 + }
1734 +-
1735 +-int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1736 +-{
1737 +- ASSERT_RTNL();
1738 +-
1739 +- return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
1740 +-}
1741 + EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1742 +
1743 + int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1744 + {
1745 +- return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
1746 ++ struct net_bridge_vlan_group *vg;
1747 ++ struct net_bridge_port *p;
1748 ++
1749 ++ p = br_port_get_check_rcu(dev);
1750 ++ if (p)
1751 ++ vg = nbp_vlan_group_rcu(p);
1752 ++ else if (netif_is_bridge_master(dev))
1753 ++ vg = br_vlan_group_rcu(netdev_priv(dev));
1754 ++ else
1755 ++ return -EINVAL;
1756 ++
1757 ++ *p_pvid = br_get_pvid(vg);
1758 ++ return 0;
1759 + }
1760 + EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1761 +
1762 +diff --git a/net/core/dev.c b/net/core/dev.c
1763 +index 5bd0b550893fb..181b13e02bdc0 100644
1764 +--- a/net/core/dev.c
1765 ++++ b/net/core/dev.c
1766 +@@ -8641,7 +8641,7 @@ int dev_get_port_parent_id(struct net_device *dev,
1767 + if (!first.id_len)
1768 + first = *ppid;
1769 + else if (memcmp(&first, ppid, sizeof(*ppid)))
1770 +- return -ENODATA;
1771 ++ return -EOPNOTSUPP;
1772 + }
1773 +
1774 + return err;
1775 +diff --git a/net/core/filter.c b/net/core/filter.c
1776 +index a69e79327c29e..d13ea1642b974 100644
1777 +--- a/net/core/filter.c
1778 ++++ b/net/core/filter.c
1779 +@@ -4774,6 +4774,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
1780 + fl4.saddr = params->ipv4_src;
1781 + fl4.fl4_sport = params->sport;
1782 + fl4.fl4_dport = params->dport;
1783 ++ fl4.flowi4_multipath_hash = 0;
1784 +
1785 + if (flags & BPF_FIB_LOOKUP_DIRECT) {
1786 + u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
1787 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
1788 +index dcd61aca343ec..944ab214e5ae8 100644
1789 +--- a/net/core/net_namespace.c
1790 ++++ b/net/core/net_namespace.c
1791 +@@ -251,10 +251,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
1792 + if (refcount_read(&net->count) == 0)
1793 + return NETNSA_NSID_NOT_ASSIGNED;
1794 +
1795 +- spin_lock(&net->nsid_lock);
1796 ++ spin_lock_bh(&net->nsid_lock);
1797 + id = __peernet2id(net, peer);
1798 + if (id >= 0) {
1799 +- spin_unlock(&net->nsid_lock);
1800 ++ spin_unlock_bh(&net->nsid_lock);
1801 + return id;
1802 + }
1803 +
1804 +@@ -264,12 +264,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
1805 + * just been idr_remove()'d from there in cleanup_net().
1806 + */
1807 + if (!maybe_get_net(peer)) {
1808 +- spin_unlock(&net->nsid_lock);
1809 ++ spin_unlock_bh(&net->nsid_lock);
1810 + return NETNSA_NSID_NOT_ASSIGNED;
1811 + }
1812 +
1813 + id = alloc_netid(net, peer, -1);
1814 +- spin_unlock(&net->nsid_lock);
1815 ++ spin_unlock_bh(&net->nsid_lock);
1816 +
1817 + put_net(peer);
1818 + if (id < 0)
1819 +@@ -534,20 +534,20 @@ static void unhash_nsid(struct net *net, struct net *last)
1820 + for_each_net(tmp) {
1821 + int id;
1822 +
1823 +- spin_lock(&tmp->nsid_lock);
1824 ++ spin_lock_bh(&tmp->nsid_lock);
1825 + id = __peernet2id(tmp, net);
1826 + if (id >= 0)
1827 + idr_remove(&tmp->netns_ids, id);
1828 +- spin_unlock(&tmp->nsid_lock);
1829 ++ spin_unlock_bh(&tmp->nsid_lock);
1830 + if (id >= 0)
1831 + rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
1832 + GFP_KERNEL);
1833 + if (tmp == last)
1834 + break;
1835 + }
1836 +- spin_lock(&net->nsid_lock);
1837 ++ spin_lock_bh(&net->nsid_lock);
1838 + idr_destroy(&net->netns_ids);
1839 +- spin_unlock(&net->nsid_lock);
1840 ++ spin_unlock_bh(&net->nsid_lock);
1841 + }
1842 +
1843 + static LLIST_HEAD(cleanup_list);
1844 +@@ -760,9 +760,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
1845 + return PTR_ERR(peer);
1846 + }
1847 +
1848 +- spin_lock(&net->nsid_lock);
1849 ++ spin_lock_bh(&net->nsid_lock);
1850 + if (__peernet2id(net, peer) >= 0) {
1851 +- spin_unlock(&net->nsid_lock);
1852 ++ spin_unlock_bh(&net->nsid_lock);
1853 + err = -EEXIST;
1854 + NL_SET_BAD_ATTR(extack, nla);
1855 + NL_SET_ERR_MSG(extack,
1856 +@@ -771,7 +771,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
1857 + }
1858 +
1859 + err = alloc_netid(net, peer, nsid);
1860 +- spin_unlock(&net->nsid_lock);
1861 ++ spin_unlock_bh(&net->nsid_lock);
1862 + if (err >= 0) {
1863 + rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
1864 + nlh, GFP_KERNEL);
1865 +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
1866 +index d2a4553bcf39d..0fd1c2aa13615 100644
1867 +--- a/net/dcb/dcbnl.c
1868 ++++ b/net/dcb/dcbnl.c
1869 +@@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1870 + {
1871 + const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1872 + struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1873 ++ int prio;
1874 + int err;
1875 +
1876 + if (!ops)
1877 +@@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1878 + struct dcbnl_buffer *buffer =
1879 + nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
1880 +
1881 ++ for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
1882 ++ if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
1883 ++ err = -EINVAL;
1884 ++ goto err;
1885 ++ }
1886 ++ }
1887 ++
1888 + err = ops->dcbnl_setbuffer(netdev, buffer);
1889 + if (err)
1890 + goto err;
1891 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
1892 +index 4c7f086a047b1..3f7be8c64c504 100644
1893 +--- a/net/dsa/slave.c
1894 ++++ b/net/dsa/slave.c
1895 +@@ -1801,15 +1801,27 @@ int dsa_slave_create(struct dsa_port *port)
1896 +
1897 + dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1898 +
1899 +- ret = register_netdev(slave_dev);
1900 ++ rtnl_lock();
1901 ++
1902 ++ ret = register_netdevice(slave_dev);
1903 + if (ret) {
1904 + netdev_err(master, "error %d registering interface %s\n",
1905 + ret, slave_dev->name);
1906 ++ rtnl_unlock();
1907 + goto out_phy;
1908 + }
1909 +
1910 ++ ret = netdev_upper_dev_link(master, slave_dev, NULL);
1911 ++
1912 ++ rtnl_unlock();
1913 ++
1914 ++ if (ret)
1915 ++ goto out_unregister;
1916 ++
1917 + return 0;
1918 +
1919 ++out_unregister:
1920 ++ unregister_netdev(slave_dev);
1921 + out_phy:
1922 + rtnl_lock();
1923 + phylink_disconnect_phy(p->dp->pl);
1924 +@@ -1826,16 +1838,18 @@ out_free:
1925 +
1926 + void dsa_slave_destroy(struct net_device *slave_dev)
1927 + {
1928 ++ struct net_device *master = dsa_slave_to_master(slave_dev);
1929 + struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1930 + struct dsa_slave_priv *p = netdev_priv(slave_dev);
1931 +
1932 + netif_carrier_off(slave_dev);
1933 + rtnl_lock();
1934 ++ netdev_upper_dev_unlink(master, slave_dev);
1935 ++ unregister_netdevice(slave_dev);
1936 + phylink_disconnect_phy(dp->pl);
1937 + rtnl_unlock();
1938 +
1939 + dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1940 +- unregister_netdev(slave_dev);
1941 + phylink_destroy(dp->pl);
1942 + gro_cells_destroy(&p->gcells);
1943 + free_percpu(p->stats64);
1944 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1945 +index 41079490a1181..86a23e4a6a50f 100644
1946 +--- a/net/ipv4/fib_frontend.c
1947 ++++ b/net/ipv4/fib_frontend.c
1948 +@@ -362,6 +362,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
1949 + fl4.flowi4_tun_key.tun_id = 0;
1950 + fl4.flowi4_flags = 0;
1951 + fl4.flowi4_uid = sock_net_uid(net, NULL);
1952 ++ fl4.flowi4_multipath_hash = 0;
1953 +
1954 + no_addr = idev->ifa_list == NULL;
1955 +
1956 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1957 +index 17206677d5033..f09a188397165 100644
1958 +--- a/net/ipv4/ip_output.c
1959 ++++ b/net/ipv4/ip_output.c
1960 +@@ -74,6 +74,7 @@
1961 + #include <net/icmp.h>
1962 + #include <net/checksum.h>
1963 + #include <net/inetpeer.h>
1964 ++#include <net/inet_ecn.h>
1965 + #include <net/lwtunnel.h>
1966 + #include <linux/bpf-cgroup.h>
1967 + #include <linux/igmp.h>
1968 +@@ -1697,7 +1698,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1969 + if (IS_ERR(rt))
1970 + return;
1971 +
1972 +- inet_sk(sk)->tos = arg->tos;
1973 ++ inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1974 +
1975 + sk->sk_protocol = ip_hdr(skb)->protocol;
1976 + sk->sk_bound_dev_if = arg->bound_dev_if;
1977 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1978 +index a01efa062f6bc..37f1288894747 100644
1979 +--- a/net/ipv4/route.c
1980 ++++ b/net/ipv4/route.c
1981 +@@ -786,8 +786,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
1982 + neigh_event_send(n, NULL);
1983 + } else {
1984 + if (fib_lookup(net, fl4, &res, 0) == 0) {
1985 +- struct fib_nh_common *nhc = FIB_RES_NHC(res);
1986 ++ struct fib_nh_common *nhc;
1987 +
1988 ++ fib_select_path(net, &res, fl4, skb);
1989 ++ nhc = FIB_RES_NHC(res);
1990 + update_or_create_fnhe(nhc, fl4->daddr, new_gw,
1991 + 0, false,
1992 + jiffies + ip_rt_gc_timeout);
1993 +@@ -1013,6 +1015,7 @@ out: kfree_skb(skb);
1994 + static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1995 + {
1996 + struct dst_entry *dst = &rt->dst;
1997 ++ struct net *net = dev_net(dst->dev);
1998 + u32 old_mtu = ipv4_mtu(dst);
1999 + struct fib_result res;
2000 + bool lock = false;
2001 +@@ -1033,9 +1036,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
2002 + return;
2003 +
2004 + rcu_read_lock();
2005 +- if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
2006 +- struct fib_nh_common *nhc = FIB_RES_NHC(res);
2007 ++ if (fib_lookup(net, fl4, &res, 0) == 0) {
2008 ++ struct fib_nh_common *nhc;
2009 +
2010 ++ fib_select_path(net, &res, fl4, NULL);
2011 ++ nhc = FIB_RES_NHC(res);
2012 + update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
2013 + jiffies + ip_rt_mtu_expires);
2014 + }
2015 +@@ -2142,6 +2147,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2016 + fl4.daddr = daddr;
2017 + fl4.saddr = saddr;
2018 + fl4.flowi4_uid = sock_net_uid(net, NULL);
2019 ++ fl4.flowi4_multipath_hash = 0;
2020 +
2021 + if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2022 + flkeys = &_flkeys;
2023 +@@ -2662,8 +2668,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2024 + fib_select_path(net, res, fl4, skb);
2025 +
2026 + dev_out = FIB_RES_DEV(*res);
2027 +- fl4->flowi4_oif = dev_out->ifindex;
2028 +-
2029 +
2030 + make_route:
2031 + rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2032 +diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
2033 +index f4f19e89af5ed..9d66af9e4c7fe 100644
2034 +--- a/net/ipv6/Kconfig
2035 ++++ b/net/ipv6/Kconfig
2036 +@@ -303,6 +303,7 @@ config IPV6_SEG6_LWTUNNEL
2037 + config IPV6_SEG6_HMAC
2038 + bool "IPv6: Segment Routing HMAC support"
2039 + depends on IPV6
2040 ++ select CRYPTO
2041 + select CRYPTO_HMAC
2042 + select CRYPTO_SHA1
2043 + select CRYPTO_SHA256
2044 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2045 +index 49ee89bbcba0c..3c32dcb5fd8e2 100644
2046 +--- a/net/ipv6/ip6_fib.c
2047 ++++ b/net/ipv6/ip6_fib.c
2048 +@@ -1992,14 +1992,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
2049 + /* Need to own table->tb6_lock */
2050 + int fib6_del(struct fib6_info *rt, struct nl_info *info)
2051 + {
2052 +- struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
2053 +- lockdep_is_held(&rt->fib6_table->tb6_lock));
2054 +- struct fib6_table *table = rt->fib6_table;
2055 + struct net *net = info->nl_net;
2056 + struct fib6_info __rcu **rtp;
2057 + struct fib6_info __rcu **rtp_next;
2058 ++ struct fib6_table *table;
2059 ++ struct fib6_node *fn;
2060 ++
2061 ++ if (rt == net->ipv6.fib6_null_entry)
2062 ++ return -ENOENT;
2063 +
2064 +- if (!fn || rt == net->ipv6.fib6_null_entry)
2065 ++ table = rt->fib6_table;
2066 ++ fn = rcu_dereference_protected(rt->fib6_node,
2067 ++ lockdep_is_held(&table->tb6_lock));
2068 ++ if (!fn)
2069 + return -ENOENT;
2070 +
2071 + WARN_ON(!(fn->fn_flags & RTN_RTINFO));
2072 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
2073 +index 85ab4559f0577..0f77e24a5152e 100644
2074 +--- a/net/qrtr/qrtr.c
2075 ++++ b/net/qrtr/qrtr.c
2076 +@@ -332,8 +332,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
2077 + {
2078 + struct qrtr_hdr_v1 *hdr;
2079 + size_t len = skb->len;
2080 +- int rc = -ENODEV;
2081 +- int confirm_rx;
2082 ++ int rc, confirm_rx;
2083 +
2084 + confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
2085 + if (confirm_rx < 0) {
2086 +@@ -357,15 +356,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
2087 + hdr->size = cpu_to_le32(len);
2088 + hdr->confirm_rx = !!confirm_rx;
2089 +
2090 +- skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
2091 +-
2092 +- mutex_lock(&node->ep_lock);
2093 +- if (node->ep)
2094 +- rc = node->ep->xmit(node->ep, skb);
2095 +- else
2096 +- kfree_skb(skb);
2097 +- mutex_unlock(&node->ep_lock);
2098 ++ rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
2099 +
2100 ++ if (!rc) {
2101 ++ mutex_lock(&node->ep_lock);
2102 ++ rc = -ENODEV;
2103 ++ if (node->ep)
2104 ++ rc = node->ep->xmit(node->ep, skb);
2105 ++ else
2106 ++ kfree_skb(skb);
2107 ++ mutex_unlock(&node->ep_lock);
2108 ++ }
2109 + /* Need to ensure that a subsequent message carries the otherwise lost
2110 + * confirm_rx flag if we dropped this one */
2111 + if (rc && confirm_rx)
2112 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
2113 +index c1fcd85719d6a..5c568757643b2 100644
2114 +--- a/net/sched/act_ife.c
2115 ++++ b/net/sched/act_ife.c
2116 +@@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a)
2117 + kfree_rcu(p, rcu);
2118 + }
2119 +
2120 ++static int load_metalist(struct nlattr **tb, bool rtnl_held)
2121 ++{
2122 ++ int i;
2123 ++
2124 ++ for (i = 1; i < max_metacnt; i++) {
2125 ++ if (tb[i]) {
2126 ++ void *val = nla_data(tb[i]);
2127 ++ int len = nla_len(tb[i]);
2128 ++ int rc;
2129 ++
2130 ++ rc = load_metaops_and_vet(i, val, len, rtnl_held);
2131 ++ if (rc != 0)
2132 ++ return rc;
2133 ++ }
2134 ++ }
2135 ++
2136 ++ return 0;
2137 ++}
2138 ++
2139 + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
2140 + bool exists, bool rtnl_held)
2141 + {
2142 +@@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
2143 + val = nla_data(tb[i]);
2144 + len = nla_len(tb[i]);
2145 +
2146 +- rc = load_metaops_and_vet(i, val, len, rtnl_held);
2147 +- if (rc != 0)
2148 +- return rc;
2149 +-
2150 + rc = add_metainfo(ife, i, val, len, exists);
2151 + if (rc)
2152 + return rc;
2153 +@@ -509,6 +524,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
2154 + if (!p)
2155 + return -ENOMEM;
2156 +
2157 ++ if (tb[TCA_IFE_METALST]) {
2158 ++ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
2159 ++ tb[TCA_IFE_METALST], NULL,
2160 ++ NULL);
2161 ++ if (err) {
2162 ++ kfree(p);
2163 ++ return err;
2164 ++ }
2165 ++ err = load_metalist(tb2, rtnl_held);
2166 ++ if (err) {
2167 ++ kfree(p);
2168 ++ return err;
2169 ++ }
2170 ++ }
2171 ++
2172 + index = parm->index;
2173 + err = tcf_idr_check_alloc(tn, &index, a, bind);
2174 + if (err < 0) {
2175 +@@ -570,15 +600,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
2176 + }
2177 +
2178 + if (tb[TCA_IFE_METALST]) {
2179 +- err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
2180 +- tb[TCA_IFE_METALST], NULL,
2181 +- NULL);
2182 +- if (err)
2183 +- goto metadata_parse_err;
2184 + err = populate_metalist(ife, tb2, exists, rtnl_held);
2185 + if (err)
2186 + goto metadata_parse_err;
2187 +-
2188 + } else {
2189 + /* if no passed metadata allow list or passed allow-all
2190 + * then here we process by adding as many supported metadatum
2191 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
2192 +index e30bd969fc485..5fe145d97f52e 100644
2193 +--- a/net/sched/cls_flower.c
2194 ++++ b/net/sched/cls_flower.c
2195 +@@ -1215,6 +1215,7 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
2196 + }
2197 + if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
2198 + nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
2199 ++ memset(&md->u, 0x00, sizeof(md->u));
2200 + md->u.index = nla_get_be32(nla);
2201 + }
2202 + } else if (md->version == 2) {
2203 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2204 +index 265a61d011dfa..54c417244642a 100644
2205 +--- a/net/sched/sch_generic.c
2206 ++++ b/net/sched/sch_generic.c
2207 +@@ -1131,24 +1131,10 @@ EXPORT_SYMBOL(dev_activate);
2208 +
2209 + static void qdisc_deactivate(struct Qdisc *qdisc)
2210 + {
2211 +- bool nolock = qdisc->flags & TCQ_F_NOLOCK;
2212 +-
2213 + if (qdisc->flags & TCQ_F_BUILTIN)
2214 + return;
2215 +- if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state))
2216 +- return;
2217 +-
2218 +- if (nolock)
2219 +- spin_lock_bh(&qdisc->seqlock);
2220 +- spin_lock_bh(qdisc_lock(qdisc));
2221 +
2222 + set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
2223 +-
2224 +- qdisc_reset(qdisc);
2225 +-
2226 +- spin_unlock_bh(qdisc_lock(qdisc));
2227 +- if (nolock)
2228 +- spin_unlock_bh(&qdisc->seqlock);
2229 + }
2230 +
2231 + static void dev_deactivate_queue(struct net_device *dev,
2232 +@@ -1165,6 +1151,30 @@ static void dev_deactivate_queue(struct net_device *dev,
2233 + }
2234 + }
2235 +
2236 ++static void dev_reset_queue(struct net_device *dev,
2237 ++ struct netdev_queue *dev_queue,
2238 ++ void *_unused)
2239 ++{
2240 ++ struct Qdisc *qdisc;
2241 ++ bool nolock;
2242 ++
2243 ++ qdisc = dev_queue->qdisc_sleeping;
2244 ++ if (!qdisc)
2245 ++ return;
2246 ++
2247 ++ nolock = qdisc->flags & TCQ_F_NOLOCK;
2248 ++
2249 ++ if (nolock)
2250 ++ spin_lock_bh(&qdisc->seqlock);
2251 ++ spin_lock_bh(qdisc_lock(qdisc));
2252 ++
2253 ++ qdisc_reset(qdisc);
2254 ++
2255 ++ spin_unlock_bh(qdisc_lock(qdisc));
2256 ++ if (nolock)
2257 ++ spin_unlock_bh(&qdisc->seqlock);
2258 ++}
2259 ++
2260 + static bool some_qdisc_is_busy(struct net_device *dev)
2261 + {
2262 + unsigned int i;
2263 +@@ -1213,12 +1223,20 @@ void dev_deactivate_many(struct list_head *head)
2264 + dev_watchdog_down(dev);
2265 + }
2266 +
2267 +- /* Wait for outstanding qdisc-less dev_queue_xmit calls.
2268 ++ /* Wait for outstanding qdisc-less dev_queue_xmit calls or
2269 ++ * outstanding qdisc enqueuing calls.
2270 + * This is avoided if all devices are in dismantle phase :
2271 + * Caller will call synchronize_net() for us
2272 + */
2273 + synchronize_net();
2274 +
2275 ++ list_for_each_entry(dev, head, close_list) {
2276 ++ netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
2277 ++
2278 ++ if (dev_ingress_queue(dev))
2279 ++ dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
2280 ++ }
2281 ++
2282 + /* Wait for outstanding qdisc_run calls. */
2283 + list_for_each_entry(dev, head, close_list) {
2284 + while (some_qdisc_is_busy(dev)) {
2285 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
2286 +index 6a5086e586efb..2b797a71e9bda 100644
2287 +--- a/net/sched/sch_taprio.c
2288 ++++ b/net/sched/sch_taprio.c
2289 +@@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
2290 + [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
2291 + };
2292 +
2293 +-static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
2294 ++static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
2295 ++ struct sched_entry *entry,
2296 + struct netlink_ext_ack *extack)
2297 + {
2298 ++ int min_duration = length_to_duration(q, ETH_ZLEN);
2299 + u32 interval = 0;
2300 +
2301 + if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
2302 +@@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
2303 + interval = nla_get_u32(
2304 + tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
2305 +
2306 +- if (interval == 0) {
2307 ++ /* The interval should allow at least the minimum ethernet
2308 ++ * frame to go out.
2309 ++ */
2310 ++ if (interval < min_duration) {
2311 + NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
2312 + return -EINVAL;
2313 + }
2314 +@@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
2315 + return 0;
2316 + }
2317 +
2318 +-static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
2319 +- int index, struct netlink_ext_ack *extack)
2320 ++static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
2321 ++ struct sched_entry *entry, int index,
2322 ++ struct netlink_ext_ack *extack)
2323 + {
2324 + struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
2325 + int err;
2326 +@@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
2327 +
2328 + entry->index = index;
2329 +
2330 +- return fill_sched_entry(tb, entry, extack);
2331 ++ return fill_sched_entry(q, tb, entry, extack);
2332 + }
2333 +
2334 +-static int parse_sched_list(struct nlattr *list,
2335 ++static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
2336 + struct sched_gate_list *sched,
2337 + struct netlink_ext_ack *extack)
2338 + {
2339 +@@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list,
2340 + return -ENOMEM;
2341 + }
2342 +
2343 +- err = parse_sched_entry(n, entry, i, extack);
2344 ++ err = parse_sched_entry(q, n, entry, i, extack);
2345 + if (err < 0) {
2346 + kfree(entry);
2347 + return err;
2348 +@@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list,
2349 + return i;
2350 + }
2351 +
2352 +-static int parse_taprio_schedule(struct nlattr **tb,
2353 ++static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
2354 + struct sched_gate_list *new,
2355 + struct netlink_ext_ack *extack)
2356 + {
2357 +@@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb,
2358 + new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
2359 +
2360 + if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
2361 +- err = parse_sched_list(
2362 +- tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
2363 ++ err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
2364 ++ new, extack);
2365 + if (err < 0)
2366 + return err;
2367 +
2368 +@@ -1474,7 +1480,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
2369 + goto free_sched;
2370 + }
2371 +
2372 +- err = parse_taprio_schedule(tb, new_admin, extack);
2373 ++ err = parse_taprio_schedule(q, tb, new_admin, extack);
2374 + if (err < 0)
2375 + goto free_sched;
2376 +
2377 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2378 +index fa20e945700e0..102aee4f7dfde 100644
2379 +--- a/net/sctp/socket.c
2380 ++++ b/net/sctp/socket.c
2381 +@@ -9457,13 +9457,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
2382 + static inline void sctp_copy_descendant(struct sock *sk_to,
2383 + const struct sock *sk_from)
2384 + {
2385 +- int ancestor_size = sizeof(struct inet_sock) +
2386 +- sizeof(struct sctp_sock) -
2387 +- offsetof(struct sctp_sock, pd_lobby);
2388 +-
2389 +- if (sk_from->sk_family == PF_INET6)
2390 +- ancestor_size += sizeof(struct ipv6_pinfo);
2391 ++ size_t ancestor_size = sizeof(struct inet_sock);
2392 +
2393 ++ ancestor_size += sk_from->sk_prot->obj_size;
2394 ++ ancestor_size -= offsetof(struct sctp_sock, pd_lobby);
2395 + __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
2396 + }
2397 +
2398 +diff --git a/net/tipc/group.c b/net/tipc/group.c
2399 +index 89257e2a980de..f53871baa42eb 100644
2400 +--- a/net/tipc/group.c
2401 ++++ b/net/tipc/group.c
2402 +@@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
2403 + return NULL;
2404 + }
2405 +
2406 +-static void tipc_group_add_to_tree(struct tipc_group *grp,
2407 +- struct tipc_member *m)
2408 ++static int tipc_group_add_to_tree(struct tipc_group *grp,
2409 ++ struct tipc_member *m)
2410 + {
2411 + u64 nkey, key = (u64)m->node << 32 | m->port;
2412 + struct rb_node **n, *parent = NULL;
2413 +@@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp,
2414 + else if (key > nkey)
2415 + n = &(*n)->rb_right;
2416 + else
2417 +- return;
2418 ++ return -EEXIST;
2419 + }
2420 + rb_link_node(&m->tree_node, parent, n);
2421 + rb_insert_color(&m->tree_node, &grp->members);
2422 ++ return 0;
2423 + }
2424 +
2425 + static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
2426 +@@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
2427 + u32 instance, int state)
2428 + {
2429 + struct tipc_member *m;
2430 ++ int ret;
2431 +
2432 + m = kzalloc(sizeof(*m), GFP_ATOMIC);
2433 + if (!m)
2434 +@@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
2435 + m->port = port;
2436 + m->instance = instance;
2437 + m->bc_acked = grp->bc_snd_nxt - 1;
2438 ++ ret = tipc_group_add_to_tree(grp, m);
2439 ++ if (ret < 0) {
2440 ++ kfree(m);
2441 ++ return NULL;
2442 ++ }
2443 + grp->member_cnt++;
2444 +- tipc_group_add_to_tree(grp, m);
2445 + tipc_nlist_add(&grp->dests, m->node);
2446 + m->state = state;
2447 + return m;
2448 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
2449 +index 01b64869a1739..2776a41e0dece 100644
2450 +--- a/net/tipc/msg.c
2451 ++++ b/net/tipc/msg.c
2452 +@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
2453 + if (fragid == FIRST_FRAGMENT) {
2454 + if (unlikely(head))
2455 + goto err;
2456 +- if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
2457 ++ frag = skb_unshare(frag, GFP_ATOMIC);
2458 ++ if (unlikely(!frag))
2459 + goto err;
2460 + head = *headbuf = frag;
2461 + *buf = NULL;
2462 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
2463 +index 79cc84393f932..59c9e592b0a25 100644
2464 +--- a/net/tipc/socket.c
2465 ++++ b/net/tipc/socket.c
2466 +@@ -2773,10 +2773,7 @@ static int tipc_shutdown(struct socket *sock, int how)
2467 +
2468 + trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2469 + __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2470 +- if (tipc_sk_type_connectionless(sk))
2471 +- sk->sk_shutdown = SHUTDOWN_MASK;
2472 +- else
2473 +- sk->sk_shutdown = SEND_SHUTDOWN;
2474 ++ sk->sk_shutdown = SHUTDOWN_MASK;
2475 +
2476 + if (sk->sk_state == TIPC_DISCONNECTING) {
2477 + /* Discard any unreceived messages */