1 |
commit: 39ab397fef7a1a3b1656abfef80a92a913c61a99 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Sep 26 21:59:11 2020 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Sep 26 21:59:11 2020 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=39ab397f |
7 |
|
8 |
Linux patch 5.4.68 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1067_linux-5.4.68.patch | 1672 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 1676 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index e42eb75..83f469d 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -311,6 +311,10 @@ Patch: 1066_linux-5.4.67.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.4.67 |
23 |
|
24 |
+Patch: 1067_linux-5.4.68.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.4.68 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1067_linux-5.4.68.patch b/1067_linux-5.4.68.patch |
33 |
new file mode 100644 |
34 |
index 0000000..9e3a07e |
35 |
--- /dev/null |
36 |
+++ b/1067_linux-5.4.68.patch |
37 |
@@ -0,0 +1,1672 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index d2e46ca4c955b..acb2499d9b053 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 5 |
45 |
+ PATCHLEVEL = 4 |
46 |
+-SUBLEVEL = 67 |
47 |
++SUBLEVEL = 68 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Kleptomaniac Octopus |
50 |
+ |
51 |
+diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig |
52 |
+index 390568afee9fd..fc0160e8ed334 100644 |
53 |
+--- a/drivers/iommu/Kconfig |
54 |
++++ b/drivers/iommu/Kconfig |
55 |
+@@ -138,7 +138,7 @@ config AMD_IOMMU |
56 |
+ select PCI_PASID |
57 |
+ select IOMMU_API |
58 |
+ select IOMMU_IOVA |
59 |
+- depends on X86_64 && PCI && ACPI |
60 |
++ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE |
61 |
+ ---help--- |
62 |
+ With this option you can enable support for AMD IOMMU hardware in |
63 |
+ your system. An IOMMU is a hardware component which provides |
64 |
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
65 |
+index fa91d856a43ee..7b724f7b27a99 100644 |
66 |
+--- a/drivers/iommu/amd_iommu.c |
67 |
++++ b/drivers/iommu/amd_iommu.c |
68 |
+@@ -3873,6 +3873,7 @@ out: |
69 |
+ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, |
70 |
+ struct amd_ir_data *data) |
71 |
+ { |
72 |
++ bool ret; |
73 |
+ struct irq_remap_table *table; |
74 |
+ struct amd_iommu *iommu; |
75 |
+ unsigned long flags; |
76 |
+@@ -3890,10 +3891,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, |
77 |
+ |
78 |
+ entry = (struct irte_ga *)table->table; |
79 |
+ entry = &entry[index]; |
80 |
+- entry->lo.fields_remap.valid = 0; |
81 |
+- entry->hi.val = irte->hi.val; |
82 |
+- entry->lo.val = irte->lo.val; |
83 |
+- entry->lo.fields_remap.valid = 1; |
84 |
++ |
85 |
++ ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, |
86 |
++ entry->lo.val, entry->hi.val, |
87 |
++ irte->lo.val, irte->hi.val); |
88 |
++ /* |
89 |
++ * We use cmpxchg16 to atomically update the 128-bit IRTE, |
90 |
++ * and it cannot be updated by the hardware or other processors |
91 |
++ * behind us, so the return value of cmpxchg16 should be the |
92 |
++ * same as the old value. |
93 |
++ */ |
94 |
++ WARN_ON(!ret); |
95 |
++ |
96 |
+ if (data) |
97 |
+ data->ref = entry; |
98 |
+ |
99 |
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c |
100 |
+index 135ae5222cf35..31d7e2d4f3040 100644 |
101 |
+--- a/drivers/iommu/amd_iommu_init.c |
102 |
++++ b/drivers/iommu/amd_iommu_init.c |
103 |
+@@ -1522,7 +1522,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
104 |
+ iommu->mmio_phys_end = MMIO_REG_END_OFFSET; |
105 |
+ else |
106 |
+ iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; |
107 |
+- if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) |
108 |
++ |
109 |
++ /* |
110 |
++ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. |
111 |
++ * GAM also requires GA mode. Therefore, we need to |
112 |
++ * check cmpxchg16b support before enabling it. |
113 |
++ */ |
114 |
++ if (!boot_cpu_has(X86_FEATURE_CX16) || |
115 |
++ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) |
116 |
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; |
117 |
+ break; |
118 |
+ case 0x11: |
119 |
+@@ -1531,8 +1538,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
120 |
+ iommu->mmio_phys_end = MMIO_REG_END_OFFSET; |
121 |
+ else |
122 |
+ iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; |
123 |
+- if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) |
124 |
++ |
125 |
++ /* |
126 |
++ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. |
127 |
++ * XT, GAM also requires GA mode. Therefore, we need to |
128 |
++ * check cmpxchg16b support before enabling them. |
129 |
++ */ |
130 |
++ if (!boot_cpu_has(X86_FEATURE_CX16) || |
131 |
++ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { |
132 |
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; |
133 |
++ break; |
134 |
++ } |
135 |
++ |
136 |
+ /* |
137 |
+ * Note: Since iommu_update_intcapxt() leverages |
138 |
+ * the IOMMU MMIO access to MSI capability block registers |
139 |
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c |
140 |
+index 1368816abaed1..99cdb2f18fa2f 100644 |
141 |
+--- a/drivers/net/dsa/rtl8366.c |
142 |
++++ b/drivers/net/dsa/rtl8366.c |
143 |
+@@ -452,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, |
144 |
+ return ret; |
145 |
+ |
146 |
+ if (vid == vlanmc.vid) { |
147 |
+- /* clear VLAN member configurations */ |
148 |
+- vlanmc.vid = 0; |
149 |
+- vlanmc.priority = 0; |
150 |
+- vlanmc.member = 0; |
151 |
+- vlanmc.untag = 0; |
152 |
+- vlanmc.fid = 0; |
153 |
+- |
154 |
++ /* Remove this port from the VLAN */ |
155 |
++ vlanmc.member &= ~BIT(port); |
156 |
++ vlanmc.untag &= ~BIT(port); |
157 |
++ /* |
158 |
++ * If no ports are members of this VLAN |
159 |
++ * anymore then clear the whole member |
160 |
++ * config so it can be reused. |
161 |
++ */ |
162 |
++ if (!vlanmc.member && vlanmc.untag) { |
163 |
++ vlanmc.vid = 0; |
164 |
++ vlanmc.priority = 0; |
165 |
++ vlanmc.fid = 0; |
166 |
++ } |
167 |
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); |
168 |
+ if (ret) { |
169 |
+ dev_err(smi->dev, |
170 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
171 |
+index 4030020f92be5..4f4fd80762610 100644 |
172 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
173 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
174 |
+@@ -4204,7 +4204,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, |
175 |
+ u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; |
176 |
+ u16 dst = BNXT_HWRM_CHNL_CHIMP; |
177 |
+ |
178 |
+- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
179 |
++ if (BNXT_NO_FW_ACCESS(bp)) |
180 |
+ return -EBUSY; |
181 |
+ |
182 |
+ if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { |
183 |
+@@ -5539,7 +5539,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, |
184 |
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; |
185 |
+ u16 error_code; |
186 |
+ |
187 |
+- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
188 |
++ if (BNXT_NO_FW_ACCESS(bp)) |
189 |
+ return 0; |
190 |
+ |
191 |
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); |
192 |
+@@ -7454,7 +7454,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) |
193 |
+ |
194 |
+ if (set_tpa) |
195 |
+ tpa_flags = bp->flags & BNXT_FLAG_TPA; |
196 |
+- else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
197 |
++ else if (BNXT_NO_FW_ACCESS(bp)) |
198 |
+ return 0; |
199 |
+ for (i = 0; i < bp->nr_vnics; i++) { |
200 |
+ rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); |
201 |
+@@ -8939,18 +8939,16 @@ static ssize_t bnxt_show_temp(struct device *dev, |
202 |
+ struct hwrm_temp_monitor_query_output *resp; |
203 |
+ struct bnxt *bp = dev_get_drvdata(dev); |
204 |
+ u32 len = 0; |
205 |
++ int rc; |
206 |
+ |
207 |
+ resp = bp->hwrm_cmd_resp_addr; |
208 |
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); |
209 |
+ mutex_lock(&bp->hwrm_cmd_lock); |
210 |
+- if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) |
211 |
++ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
212 |
++ if (!rc) |
213 |
+ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ |
214 |
+ mutex_unlock(&bp->hwrm_cmd_lock); |
215 |
+- |
216 |
+- if (len) |
217 |
+- return len; |
218 |
+- |
219 |
+- return sprintf(buf, "unknown\n"); |
220 |
++ return rc ?: len; |
221 |
+ } |
222 |
+ static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); |
223 |
+ |
224 |
+@@ -8970,7 +8968,16 @@ static void bnxt_hwmon_close(struct bnxt *bp) |
225 |
+ |
226 |
+ static void bnxt_hwmon_open(struct bnxt *bp) |
227 |
+ { |
228 |
++ struct hwrm_temp_monitor_query_input req = {0}; |
229 |
+ struct pci_dev *pdev = bp->pdev; |
230 |
++ int rc; |
231 |
++ |
232 |
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); |
233 |
++ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
234 |
++ if (rc == -EACCES || rc == -EOPNOTSUPP) { |
235 |
++ bnxt_hwmon_close(bp); |
236 |
++ return; |
237 |
++ } |
238 |
+ |
239 |
+ if (bp->hwmon_dev) |
240 |
+ return; |
241 |
+@@ -11385,14 +11392,15 @@ static void bnxt_remove_one(struct pci_dev *pdev) |
242 |
+ if (BNXT_PF(bp)) |
243 |
+ bnxt_sriov_disable(bp); |
244 |
+ |
245 |
++ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
246 |
++ bnxt_cancel_sp_work(bp); |
247 |
++ bp->sp_event = 0; |
248 |
++ |
249 |
+ bnxt_dl_fw_reporters_destroy(bp, true); |
250 |
+ pci_disable_pcie_error_reporting(pdev); |
251 |
+ unregister_netdev(dev); |
252 |
+ bnxt_dl_unregister(bp); |
253 |
+ bnxt_shutdown_tc(bp); |
254 |
+- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
255 |
+- bnxt_cancel_sp_work(bp); |
256 |
+- bp->sp_event = 0; |
257 |
+ |
258 |
+ bnxt_clear_int_mode(bp); |
259 |
+ bnxt_hwrm_func_drv_unrgtr(bp); |
260 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
261 |
+index a61a5873ab0a7..d2dd852d27da9 100644 |
262 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
263 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
264 |
+@@ -1628,6 +1628,10 @@ struct bnxt { |
265 |
+ #define BNXT_STATE_ABORT_ERR 5 |
266 |
+ #define BNXT_STATE_FW_FATAL_COND 6 |
267 |
+ |
268 |
++#define BNXT_NO_FW_ACCESS(bp) \ |
269 |
++ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ |
270 |
++ pci_channel_offline((bp)->pdev)) |
271 |
++ |
272 |
+ struct bnxt_irq *irq_tbl; |
273 |
+ int total_irqs; |
274 |
+ u8 mac_addr[ETH_ALEN]; |
275 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
276 |
+index fd01bcc8e28d4..1d15ff08f176b 100644 |
277 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
278 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
279 |
+@@ -1665,9 +1665,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, |
280 |
+ if (!BNXT_SINGLE_PF(bp)) |
281 |
+ return -EOPNOTSUPP; |
282 |
+ |
283 |
++ mutex_lock(&bp->link_lock); |
284 |
+ if (epause->autoneg) { |
285 |
+- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) |
286 |
+- return -EINVAL; |
287 |
++ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
288 |
++ rc = -EINVAL; |
289 |
++ goto pause_exit; |
290 |
++ } |
291 |
+ |
292 |
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
293 |
+ if (bp->hwrm_spec_code >= 0x10201) |
294 |
+@@ -1688,11 +1691,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, |
295 |
+ if (epause->tx_pause) |
296 |
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; |
297 |
+ |
298 |
+- if (netif_running(dev)) { |
299 |
+- mutex_lock(&bp->link_lock); |
300 |
++ if (netif_running(dev)) |
301 |
+ rc = bnxt_hwrm_set_pause(bp); |
302 |
+- mutex_unlock(&bp->link_lock); |
303 |
+- } |
304 |
++ |
305 |
++pause_exit: |
306 |
++ mutex_unlock(&bp->link_lock); |
307 |
+ return rc; |
308 |
+ } |
309 |
+ |
310 |
+@@ -2397,8 +2400,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
311 |
+ struct bnxt *bp = netdev_priv(dev); |
312 |
+ struct ethtool_eee *eee = &bp->eee; |
313 |
+ struct bnxt_link_info *link_info = &bp->link_info; |
314 |
+- u32 advertising = |
315 |
+- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
316 |
++ u32 advertising; |
317 |
+ int rc = 0; |
318 |
+ |
319 |
+ if (!BNXT_SINGLE_PF(bp)) |
320 |
+@@ -2407,19 +2409,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
321 |
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP)) |
322 |
+ return -EOPNOTSUPP; |
323 |
+ |
324 |
++ mutex_lock(&bp->link_lock); |
325 |
++ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
326 |
+ if (!edata->eee_enabled) |
327 |
+ goto eee_ok; |
328 |
+ |
329 |
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
330 |
+ netdev_warn(dev, "EEE requires autoneg\n"); |
331 |
+- return -EINVAL; |
332 |
++ rc = -EINVAL; |
333 |
++ goto eee_exit; |
334 |
+ } |
335 |
+ if (edata->tx_lpi_enabled) { |
336 |
+ if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || |
337 |
+ edata->tx_lpi_timer < bp->lpi_tmr_lo)) { |
338 |
+ netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", |
339 |
+ bp->lpi_tmr_lo, bp->lpi_tmr_hi); |
340 |
+- return -EINVAL; |
341 |
++ rc = -EINVAL; |
342 |
++ goto eee_exit; |
343 |
+ } else if (!bp->lpi_tmr_hi) { |
344 |
+ edata->tx_lpi_timer = eee->tx_lpi_timer; |
345 |
+ } |
346 |
+@@ -2429,7 +2435,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
347 |
+ } else if (edata->advertised & ~advertising) { |
348 |
+ netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", |
349 |
+ edata->advertised, advertising); |
350 |
+- return -EINVAL; |
351 |
++ rc = -EINVAL; |
352 |
++ goto eee_exit; |
353 |
+ } |
354 |
+ |
355 |
+ eee->advertised = edata->advertised; |
356 |
+@@ -2441,6 +2448,8 @@ eee_ok: |
357 |
+ if (netif_running(dev)) |
358 |
+ rc = bnxt_hwrm_set_link_setting(bp, false, true); |
359 |
+ |
360 |
++eee_exit: |
361 |
++ mutex_unlock(&bp->link_lock); |
362 |
+ return rc; |
363 |
+ } |
364 |
+ |
365 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
366 |
+index f459313357c78..137ff00605d94 100644 |
367 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
368 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
369 |
+@@ -1617,13 +1617,16 @@ out: |
370 |
+ static int configure_filter_tcb(struct adapter *adap, unsigned int tid, |
371 |
+ struct filter_entry *f) |
372 |
+ { |
373 |
+- if (f->fs.hitcnts) |
374 |
++ if (f->fs.hitcnts) { |
375 |
+ set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, |
376 |
+- TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | |
377 |
++ TCB_TIMESTAMP_V(TCB_TIMESTAMP_M), |
378 |
++ TCB_TIMESTAMP_V(0ULL), |
379 |
++ 1); |
380 |
++ set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, |
381 |
+ TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), |
382 |
+- TCB_TIMESTAMP_V(0ULL) | |
383 |
+ TCB_RTT_TS_RECENT_AGE_V(0ULL), |
384 |
+ 1); |
385 |
++ } |
386 |
+ |
387 |
+ if (f->fs.newdmac) |
388 |
+ set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, |
389 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c |
390 |
+index b1a073eea60b2..a020e84906813 100644 |
391 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c |
392 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c |
393 |
+@@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap) |
394 |
+ { |
395 |
+ struct mps_entries_ref *mps_entry, *tmp; |
396 |
+ |
397 |
+- if (!list_empty(&adap->mps_ref)) |
398 |
++ if (list_empty(&adap->mps_ref)) |
399 |
+ return; |
400 |
+ |
401 |
+ spin_lock(&adap->mps_ref_lock); |
402 |
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
403 |
+index 2d20a48f0ba0a..5329af2337a91 100644 |
404 |
+--- a/drivers/net/ethernet/ibm/ibmvnic.c |
405 |
++++ b/drivers/net/ethernet/ibm/ibmvnic.c |
406 |
+@@ -416,6 +416,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) |
407 |
+ int i, j, rc; |
408 |
+ u64 *size_array; |
409 |
+ |
410 |
++ if (!adapter->rx_pool) |
411 |
++ return -1; |
412 |
++ |
413 |
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
414 |
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); |
415 |
+ |
416 |
+@@ -586,6 +589,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) |
417 |
+ int tx_scrqs; |
418 |
+ int i, rc; |
419 |
+ |
420 |
++ if (!adapter->tx_pool) |
421 |
++ return -1; |
422 |
++ |
423 |
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); |
424 |
+ for (i = 0; i < tx_scrqs; i++) { |
425 |
+ rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); |
426 |
+@@ -1918,7 +1924,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, |
427 |
+ adapter->req_rx_add_entries_per_subcrq != |
428 |
+ old_num_rx_slots || |
429 |
+ adapter->req_tx_entries_per_subcrq != |
430 |
+- old_num_tx_slots) { |
431 |
++ old_num_tx_slots || |
432 |
++ !adapter->rx_pool || |
433 |
++ !adapter->tso_pool || |
434 |
++ !adapter->tx_pool) { |
435 |
+ release_rx_pools(adapter); |
436 |
+ release_tx_pools(adapter); |
437 |
+ release_napi(adapter); |
438 |
+@@ -1930,12 +1939,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, |
439 |
+ |
440 |
+ } else { |
441 |
+ rc = reset_tx_pools(adapter); |
442 |
+- if (rc) |
443 |
++ if (rc) { |
444 |
++ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", |
445 |
++ rc); |
446 |
+ goto out; |
447 |
++ } |
448 |
+ |
449 |
+ rc = reset_rx_pools(adapter); |
450 |
+- if (rc) |
451 |
++ if (rc) { |
452 |
++ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", |
453 |
++ rc); |
454 |
+ goto out; |
455 |
++ } |
456 |
+ } |
457 |
+ ibmvnic_disable_irqs(adapter); |
458 |
+ } |
459 |
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c |
460 |
+index 900affbdcc0e4..96948276b2bc3 100644 |
461 |
+--- a/drivers/net/ethernet/lantiq_xrx200.c |
462 |
++++ b/drivers/net/ethernet/lantiq_xrx200.c |
463 |
+@@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget) |
464 |
+ } |
465 |
+ |
466 |
+ if (rx < budget) { |
467 |
+- napi_complete(&ch->napi); |
468 |
+- ltq_dma_enable_irq(&ch->dma); |
469 |
++ if (napi_complete_done(&ch->napi, rx)) |
470 |
++ ltq_dma_enable_irq(&ch->dma); |
471 |
+ } |
472 |
+ |
473 |
+ return rx; |
474 |
+@@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) |
475 |
+ net_dev->stats.tx_bytes += bytes; |
476 |
+ netdev_completed_queue(ch->priv->net_dev, pkts, bytes); |
477 |
+ |
478 |
++ if (netif_queue_stopped(net_dev)) |
479 |
++ netif_wake_queue(net_dev); |
480 |
++ |
481 |
+ if (pkts < budget) { |
482 |
+- napi_complete(&ch->napi); |
483 |
+- ltq_dma_enable_irq(&ch->dma); |
484 |
++ if (napi_complete_done(&ch->napi, pkts)) |
485 |
++ ltq_dma_enable_irq(&ch->dma); |
486 |
+ } |
487 |
+ |
488 |
+ return pkts; |
489 |
+@@ -341,10 +344,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr) |
490 |
+ { |
491 |
+ struct xrx200_chan *ch = ptr; |
492 |
+ |
493 |
+- ltq_dma_disable_irq(&ch->dma); |
494 |
+- ltq_dma_ack_irq(&ch->dma); |
495 |
++ if (napi_schedule_prep(&ch->napi)) { |
496 |
++ __napi_schedule(&ch->napi); |
497 |
++ ltq_dma_disable_irq(&ch->dma); |
498 |
++ } |
499 |
+ |
500 |
+- napi_schedule(&ch->napi); |
501 |
++ ltq_dma_ack_irq(&ch->dma); |
502 |
+ |
503 |
+ return IRQ_HANDLED; |
504 |
+ } |
505 |
+@@ -498,7 +503,7 @@ static int xrx200_probe(struct platform_device *pdev) |
506 |
+ |
507 |
+ /* setup NAPI */ |
508 |
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); |
509 |
+- netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); |
510 |
++ netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); |
511 |
+ |
512 |
+ platform_set_drvdata(pdev, priv); |
513 |
+ |
514 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c |
515 |
+index 01468ec274466..b949b9a7538b0 100644 |
516 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c |
517 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c |
518 |
+@@ -35,7 +35,6 @@ |
519 |
+ #include <net/sock.h> |
520 |
+ |
521 |
+ #include "en.h" |
522 |
+-#include "accel/tls.h" |
523 |
+ #include "fpga/sdk.h" |
524 |
+ #include "en_accel/tls.h" |
525 |
+ |
526 |
+@@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { |
527 |
+ |
528 |
+ #define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) |
529 |
+ |
530 |
++static bool is_tls_atomic_stats(struct mlx5e_priv *priv) |
531 |
++{ |
532 |
++ return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev); |
533 |
++} |
534 |
++ |
535 |
+ int mlx5e_tls_get_count(struct mlx5e_priv *priv) |
536 |
+ { |
537 |
+- if (!priv->tls) |
538 |
++ if (!is_tls_atomic_stats(priv)) |
539 |
+ return 0; |
540 |
+ |
541 |
+ return NUM_TLS_SW_COUNTERS; |
542 |
+@@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) |
543 |
+ { |
544 |
+ unsigned int i, idx = 0; |
545 |
+ |
546 |
+- if (!priv->tls) |
547 |
++ if (!is_tls_atomic_stats(priv)) |
548 |
+ return 0; |
549 |
+ |
550 |
+ for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) |
551 |
+@@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) |
552 |
+ { |
553 |
+ int i, idx = 0; |
554 |
+ |
555 |
+- if (!priv->tls) |
556 |
++ if (!is_tls_atomic_stats(priv)) |
557 |
+ return 0; |
558 |
+ |
559 |
+ for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) |
560 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
561 |
+index 5acfdea3a75a8..7cc80dc4e6d89 100644 |
562 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
563 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
564 |
+@@ -1143,35 +1143,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) |
565 |
+ } |
566 |
+ esw->fdb_table.offloads.send_to_vport_grp = g; |
567 |
+ |
568 |
+- /* create peer esw miss group */ |
569 |
+- memset(flow_group_in, 0, inlen); |
570 |
++ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { |
571 |
++ /* create peer esw miss group */ |
572 |
++ memset(flow_group_in, 0, inlen); |
573 |
+ |
574 |
+- esw_set_flow_group_source_port(esw, flow_group_in); |
575 |
++ esw_set_flow_group_source_port(esw, flow_group_in); |
576 |
+ |
577 |
+- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
578 |
+- match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
579 |
+- flow_group_in, |
580 |
+- match_criteria); |
581 |
++ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
582 |
++ match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
583 |
++ flow_group_in, |
584 |
++ match_criteria); |
585 |
+ |
586 |
+- MLX5_SET_TO_ONES(fte_match_param, match_criteria, |
587 |
+- misc_parameters.source_eswitch_owner_vhca_id); |
588 |
++ MLX5_SET_TO_ONES(fte_match_param, match_criteria, |
589 |
++ misc_parameters.source_eswitch_owner_vhca_id); |
590 |
+ |
591 |
+- MLX5_SET(create_flow_group_in, flow_group_in, |
592 |
+- source_eswitch_owner_vhca_id_valid, 1); |
593 |
+- } |
594 |
++ MLX5_SET(create_flow_group_in, flow_group_in, |
595 |
++ source_eswitch_owner_vhca_id_valid, 1); |
596 |
++ } |
597 |
+ |
598 |
+- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); |
599 |
+- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
600 |
+- ix + esw->total_vports - 1); |
601 |
+- ix += esw->total_vports; |
602 |
++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); |
603 |
++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
604 |
++ ix + esw->total_vports - 1); |
605 |
++ ix += esw->total_vports; |
606 |
+ |
607 |
+- g = mlx5_create_flow_group(fdb, flow_group_in); |
608 |
+- if (IS_ERR(g)) { |
609 |
+- err = PTR_ERR(g); |
610 |
+- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); |
611 |
+- goto peer_miss_err; |
612 |
++ g = mlx5_create_flow_group(fdb, flow_group_in); |
613 |
++ if (IS_ERR(g)) { |
614 |
++ err = PTR_ERR(g); |
615 |
++ esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); |
616 |
++ goto peer_miss_err; |
617 |
++ } |
618 |
++ esw->fdb_table.offloads.peer_miss_grp = g; |
619 |
+ } |
620 |
+- esw->fdb_table.offloads.peer_miss_grp = g; |
621 |
+ |
622 |
+ /* create miss group */ |
623 |
+ memset(flow_group_in, 0, inlen); |
624 |
+@@ -1206,7 +1208,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) |
625 |
+ miss_rule_err: |
626 |
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); |
627 |
+ miss_err: |
628 |
+- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
629 |
++ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
630 |
++ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
631 |
+ peer_miss_err: |
632 |
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
633 |
+ send_vport_err: |
634 |
+@@ -1229,7 +1232,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) |
635 |
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); |
636 |
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); |
637 |
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
638 |
+- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
639 |
++ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
640 |
++ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
641 |
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); |
642 |
+ |
643 |
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); |
644 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
645 |
+index b66e5b6eecd99..9ac2f52187ea4 100644 |
646 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
647 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
648 |
+@@ -629,7 +629,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, |
649 |
+ fte->action = *flow_act; |
650 |
+ fte->flow_context = spec->flow_context; |
651 |
+ |
652 |
+- tree_init_node(&fte->node, NULL, del_sw_fte); |
653 |
++ tree_init_node(&fte->node, del_hw_fte, del_sw_fte); |
654 |
+ |
655 |
+ return fte; |
656 |
+ } |
657 |
+@@ -1737,7 +1737,6 @@ skip_search: |
658 |
+ up_write_ref_node(&g->node, false); |
659 |
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); |
660 |
+ up_write_ref_node(&fte->node, false); |
661 |
+- tree_put_node(&fte->node, false); |
662 |
+ return rule; |
663 |
+ } |
664 |
+ rule = ERR_PTR(-ENOENT); |
665 |
+@@ -1837,7 +1836,6 @@ search_again_locked: |
666 |
+ up_write_ref_node(&g->node, false); |
667 |
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); |
668 |
+ up_write_ref_node(&fte->node, false); |
669 |
+- tree_put_node(&fte->node, false); |
670 |
+ tree_put_node(&g->node, false); |
671 |
+ return rule; |
672 |
+ |
673 |
+@@ -1930,7 +1928,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) |
674 |
+ up_write_ref_node(&fte->node, false); |
675 |
+ } else { |
676 |
+ del_hw_fte(&fte->node); |
677 |
+- up_write(&fte->node.lock); |
678 |
++ /* Avoid double call to del_hw_fte */ |
679 |
++ fte->node.del_hw_func = NULL; |
680 |
++ up_write_ref_node(&fte->node, false); |
681 |
+ tree_put_node(&fte->node, false); |
682 |
+ } |
683 |
+ kfree(handle); |
684 |
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c |
685 |
+index 1b840ee473396..17b91ed39369c 100644 |
686 |
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c |
687 |
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c |
688 |
+@@ -731,8 +731,8 @@ nfp_port_get_fecparam(struct net_device *netdev, |
689 |
+ struct nfp_eth_table_port *eth_port; |
690 |
+ struct nfp_port *port; |
691 |
+ |
692 |
+- param->active_fec = ETHTOOL_FEC_NONE_BIT; |
693 |
+- param->fec = ETHTOOL_FEC_NONE_BIT; |
694 |
++ param->active_fec = ETHTOOL_FEC_NONE; |
695 |
++ param->fec = ETHTOOL_FEC_NONE; |
696 |
+ |
697 |
+ port = nfp_port_from_netdev(netdev); |
698 |
+ eth_port = nfp_port_get_eth_port(port); |
699 |
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
700 |
+index adfdf6260b269..fcb7a6b4cc02a 100644 |
701 |
+--- a/drivers/net/geneve.c |
702 |
++++ b/drivers/net/geneve.c |
703 |
+@@ -773,7 +773,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, |
704 |
+ struct net_device *dev, |
705 |
+ struct geneve_sock *gs4, |
706 |
+ struct flowi4 *fl4, |
707 |
+- const struct ip_tunnel_info *info) |
708 |
++ const struct ip_tunnel_info *info, |
709 |
++ __be16 dport, __be16 sport) |
710 |
+ { |
711 |
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
712 |
+ struct geneve_dev *geneve = netdev_priv(dev); |
713 |
+@@ -789,6 +790,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, |
714 |
+ fl4->flowi4_proto = IPPROTO_UDP; |
715 |
+ fl4->daddr = info->key.u.ipv4.dst; |
716 |
+ fl4->saddr = info->key.u.ipv4.src; |
717 |
++ fl4->fl4_dport = dport; |
718 |
++ fl4->fl4_sport = sport; |
719 |
+ |
720 |
+ tos = info->key.tos; |
721 |
+ if ((tos == 1) && !geneve->collect_md) { |
722 |
+@@ -823,7 +826,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, |
723 |
+ struct net_device *dev, |
724 |
+ struct geneve_sock *gs6, |
725 |
+ struct flowi6 *fl6, |
726 |
+- const struct ip_tunnel_info *info) |
727 |
++ const struct ip_tunnel_info *info, |
728 |
++ __be16 dport, __be16 sport) |
729 |
+ { |
730 |
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
731 |
+ struct geneve_dev *geneve = netdev_priv(dev); |
732 |
+@@ -839,6 +843,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, |
733 |
+ fl6->flowi6_proto = IPPROTO_UDP; |
734 |
+ fl6->daddr = info->key.u.ipv6.dst; |
735 |
+ fl6->saddr = info->key.u.ipv6.src; |
736 |
++ fl6->fl6_dport = dport; |
737 |
++ fl6->fl6_sport = sport; |
738 |
++ |
739 |
+ prio = info->key.tos; |
740 |
+ if ((prio == 1) && !geneve->collect_md) { |
741 |
+ prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); |
742 |
+@@ -885,14 +892,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
743 |
+ __be16 sport; |
744 |
+ int err; |
745 |
+ |
746 |
+- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); |
747 |
++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
748 |
++ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, |
749 |
++ geneve->info.key.tp_dst, sport); |
750 |
+ if (IS_ERR(rt)) |
751 |
+ return PTR_ERR(rt); |
752 |
+ |
753 |
+ skb_tunnel_check_pmtu(skb, &rt->dst, |
754 |
+ GENEVE_IPV4_HLEN + info->options_len); |
755 |
+ |
756 |
+- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
757 |
+ if (geneve->collect_md) { |
758 |
+ tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
759 |
+ ttl = key->ttl; |
760 |
+@@ -947,13 +955,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
761 |
+ __be16 sport; |
762 |
+ int err; |
763 |
+ |
764 |
+- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); |
765 |
++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
766 |
++ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, |
767 |
++ geneve->info.key.tp_dst, sport); |
768 |
+ if (IS_ERR(dst)) |
769 |
+ return PTR_ERR(dst); |
770 |
+ |
771 |
+ skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); |
772 |
+ |
773 |
+- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
774 |
+ if (geneve->collect_md) { |
775 |
+ prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
776 |
+ ttl = key->ttl; |
777 |
+@@ -1034,13 +1043,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
778 |
+ { |
779 |
+ struct ip_tunnel_info *info = skb_tunnel_info(skb); |
780 |
+ struct geneve_dev *geneve = netdev_priv(dev); |
781 |
++ __be16 sport; |
782 |
+ |
783 |
+ if (ip_tunnel_info_af(info) == AF_INET) { |
784 |
+ struct rtable *rt; |
785 |
+ struct flowi4 fl4; |
786 |
++ |
787 |
+ struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); |
788 |
++ sport = udp_flow_src_port(geneve->net, skb, |
789 |
++ 1, USHRT_MAX, true); |
790 |
+ |
791 |
+- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); |
792 |
++ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, |
793 |
++ geneve->info.key.tp_dst, sport); |
794 |
+ if (IS_ERR(rt)) |
795 |
+ return PTR_ERR(rt); |
796 |
+ |
797 |
+@@ -1050,9 +1064,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
798 |
+ } else if (ip_tunnel_info_af(info) == AF_INET6) { |
799 |
+ struct dst_entry *dst; |
800 |
+ struct flowi6 fl6; |
801 |
++ |
802 |
+ struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); |
803 |
++ sport = udp_flow_src_port(geneve->net, skb, |
804 |
++ 1, USHRT_MAX, true); |
805 |
+ |
806 |
+- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); |
807 |
++ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, |
808 |
++ geneve->info.key.tp_dst, sport); |
809 |
+ if (IS_ERR(dst)) |
810 |
+ return PTR_ERR(dst); |
811 |
+ |
812 |
+@@ -1063,8 +1081,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
813 |
+ return -EINVAL; |
814 |
+ } |
815 |
+ |
816 |
+- info->key.tp_src = udp_flow_src_port(geneve->net, skb, |
817 |
+- 1, USHRT_MAX, true); |
818 |
++ info->key.tp_src = sport; |
819 |
+ info->key.tp_dst = geneve->info.key.tp_dst; |
820 |
+ return 0; |
821 |
+ } |
822 |
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
823 |
+index 54e5d4f9622cd..b718b11607fcd 100644 |
824 |
+--- a/drivers/net/phy/phy.c |
825 |
++++ b/drivers/net/phy/phy.c |
826 |
+@@ -834,7 +834,7 @@ EXPORT_SYMBOL(phy_free_interrupt); |
827 |
+ */ |
828 |
+ void phy_stop(struct phy_device *phydev) |
829 |
+ { |
830 |
+- if (!phy_is_started(phydev)) { |
831 |
++ if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) { |
832 |
+ WARN(1, "called from state %s\n", |
833 |
+ phy_state_to_str(phydev->state)); |
834 |
+ return; |
835 |
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
836 |
+index 110924d627449..9d0a306f05623 100644 |
837 |
+--- a/drivers/net/phy/phy_device.c |
838 |
++++ b/drivers/net/phy/phy_device.c |
839 |
+@@ -1421,7 +1421,8 @@ void phy_detach(struct phy_device *phydev) |
840 |
+ |
841 |
+ phy_led_triggers_unregister(phydev); |
842 |
+ |
843 |
+- module_put(phydev->mdio.dev.driver->owner); |
844 |
++ if (phydev->mdio.dev.driver) |
845 |
++ module_put(phydev->mdio.dev.driver->owner); |
846 |
+ |
847 |
+ /* If the device had no specific driver before (i.e. - it |
848 |
+ * was using the generic driver), we unbind the device |
849 |
+diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c |
850 |
+index 48ced3912576c..16f33d1ffbfb9 100644 |
851 |
+--- a/drivers/net/wan/hdlc_ppp.c |
852 |
++++ b/drivers/net/wan/hdlc_ppp.c |
853 |
+@@ -383,11 +383,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
854 |
+ } |
855 |
+ |
856 |
+ for (opt = data; len; len -= opt[1], opt += opt[1]) { |
857 |
+- if (len < 2 || len < opt[1]) { |
858 |
+- dev->stats.rx_errors++; |
859 |
+- kfree(out); |
860 |
+- return; /* bad packet, drop silently */ |
861 |
+- } |
862 |
++ if (len < 2 || opt[1] < 2 || len < opt[1]) |
863 |
++ goto err_out; |
864 |
+ |
865 |
+ if (pid == PID_LCP) |
866 |
+ switch (opt[0]) { |
867 |
+@@ -395,6 +392,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
868 |
+ continue; /* MRU always OK and > 1500 bytes? */ |
869 |
+ |
870 |
+ case LCP_OPTION_ACCM: /* async control character map */ |
871 |
++ if (opt[1] < sizeof(valid_accm)) |
872 |
++ goto err_out; |
873 |
+ if (!memcmp(opt, valid_accm, |
874 |
+ sizeof(valid_accm))) |
875 |
+ continue; |
876 |
+@@ -406,6 +405,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
877 |
+ } |
878 |
+ break; |
879 |
+ case LCP_OPTION_MAGIC: |
880 |
++ if (len < 6) |
881 |
++ goto err_out; |
882 |
+ if (opt[1] != 6 || (!opt[2] && !opt[3] && |
883 |
+ !opt[4] && !opt[5])) |
884 |
+ break; /* reject invalid magic number */ |
885 |
+@@ -424,6 +425,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
886 |
+ ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); |
887 |
+ |
888 |
+ kfree(out); |
889 |
++ return; |
890 |
++ |
891 |
++err_out: |
892 |
++ dev->stats.rx_errors++; |
893 |
++ kfree(out); |
894 |
+ } |
895 |
+ |
896 |
+ static int ppp_rx(struct sk_buff *skb) |
897 |
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
898 |
+index 955e1370f033d..a62889c8bed7a 100644 |
899 |
+--- a/include/linux/skbuff.h |
900 |
++++ b/include/linux/skbuff.h |
901 |
+@@ -3185,8 +3185,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) |
902 |
+ * is untouched. Otherwise it is extended. Returns zero on |
903 |
+ * success. The skb is freed on error if @free_on_error is true. |
904 |
+ */ |
905 |
+-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, |
906 |
+- bool free_on_error) |
907 |
++static inline int __must_check __skb_put_padto(struct sk_buff *skb, |
908 |
++ unsigned int len, |
909 |
++ bool free_on_error) |
910 |
+ { |
911 |
+ unsigned int size = skb->len; |
912 |
+ |
913 |
+@@ -3209,7 +3210,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, |
914 |
+ * is untouched. Otherwise it is extended. Returns zero on |
915 |
+ * success. The skb is freed on error. |
916 |
+ */ |
917 |
+-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) |
918 |
++static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) |
919 |
+ { |
920 |
+ return __skb_put_padto(skb, len, true); |
921 |
+ } |
922 |
+diff --git a/include/net/flow.h b/include/net/flow.h |
923 |
+index a50fb77a0b279..d058e63fb59a3 100644 |
924 |
+--- a/include/net/flow.h |
925 |
++++ b/include/net/flow.h |
926 |
+@@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, |
927 |
+ fl4->saddr = saddr; |
928 |
+ fl4->fl4_dport = dport; |
929 |
+ fl4->fl4_sport = sport; |
930 |
++ fl4->flowi4_multipath_hash = 0; |
931 |
+ } |
932 |
+ |
933 |
+ /* Reset some input parameters after previous lookup */ |
934 |
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h |
935 |
+index 2b6f3f13d5bcb..3e8f87a3c52fa 100644 |
936 |
+--- a/include/net/sctp/structs.h |
937 |
++++ b/include/net/sctp/structs.h |
938 |
+@@ -224,12 +224,14 @@ struct sctp_sock { |
939 |
+ data_ready_signalled:1; |
940 |
+ |
941 |
+ atomic_t pd_mode; |
942 |
++ |
943 |
++ /* Fields after this point will be skipped on copies, like on accept |
944 |
++ * and peeloff operations |
945 |
++ */ |
946 |
++ |
947 |
+ /* Receive to here while partial delivery is in effect. */ |
948 |
+ struct sk_buff_head pd_lobby; |
949 |
+ |
950 |
+- /* These must be the last fields, as they will skipped on copies, |
951 |
+- * like on accept and peeloff operations |
952 |
+- */ |
953 |
+ struct list_head auto_asconf_list; |
954 |
+ int do_auto_asconf; |
955 |
+ }; |
956 |
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
957 |
+index bbff4bccb885d..5646f291eb705 100644 |
958 |
+--- a/kernel/kprobes.c |
959 |
++++ b/kernel/kprobes.c |
960 |
+@@ -2088,6 +2088,9 @@ static void kill_kprobe(struct kprobe *p) |
961 |
+ { |
962 |
+ struct kprobe *kp; |
963 |
+ |
964 |
++ if (WARN_ON_ONCE(kprobe_gone(p))) |
965 |
++ return; |
966 |
++ |
967 |
+ p->flags |= KPROBE_FLAG_GONE; |
968 |
+ if (kprobe_aggrprobe(p)) { |
969 |
+ /* |
970 |
+@@ -2270,7 +2273,10 @@ static int kprobes_module_callback(struct notifier_block *nb, |
971 |
+ mutex_lock(&kprobe_mutex); |
972 |
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
973 |
+ head = &kprobe_table[i]; |
974 |
+- hlist_for_each_entry_rcu(p, head, hlist) |
975 |
++ hlist_for_each_entry_rcu(p, head, hlist) { |
976 |
++ if (kprobe_gone(p)) |
977 |
++ continue; |
978 |
++ |
979 |
+ if (within_module_init((unsigned long)p->addr, mod) || |
980 |
+ (checkcore && |
981 |
+ within_module_core((unsigned long)p->addr, mod))) { |
982 |
+@@ -2287,6 +2293,7 @@ static int kprobes_module_callback(struct notifier_block *nb, |
983 |
+ */ |
984 |
+ kill_kprobe(p); |
985 |
+ } |
986 |
++ } |
987 |
+ } |
988 |
+ mutex_unlock(&kprobe_mutex); |
989 |
+ return NOTIFY_DONE; |
990 |
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
991 |
+index da9040a6838f8..873de55d93fb2 100644 |
992 |
+--- a/mm/huge_memory.c |
993 |
++++ b/mm/huge_memory.c |
994 |
+@@ -2174,7 +2174,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
995 |
+ put_page(page); |
996 |
+ add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); |
997 |
+ return; |
998 |
+- } else if (is_huge_zero_pmd(*pmd)) { |
999 |
++ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { |
1000 |
+ /* |
1001 |
+ * FIXME: Do we want to invalidate secondary mmu by calling |
1002 |
+ * mmu_notifier_invalidate_range() see comments below inside |
1003 |
+@@ -2262,27 +2262,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
1004 |
+ pte = pte_offset_map(&_pmd, addr); |
1005 |
+ BUG_ON(!pte_none(*pte)); |
1006 |
+ set_pte_at(mm, addr, pte, entry); |
1007 |
+- atomic_inc(&page[i]._mapcount); |
1008 |
+- pte_unmap(pte); |
1009 |
+- } |
1010 |
+- |
1011 |
+- /* |
1012 |
+- * Set PG_double_map before dropping compound_mapcount to avoid |
1013 |
+- * false-negative page_mapped(). |
1014 |
+- */ |
1015 |
+- if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { |
1016 |
+- for (i = 0; i < HPAGE_PMD_NR; i++) |
1017 |
++ if (!pmd_migration) |
1018 |
+ atomic_inc(&page[i]._mapcount); |
1019 |
++ pte_unmap(pte); |
1020 |
+ } |
1021 |
+ |
1022 |
+- if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
1023 |
+- /* Last compound_mapcount is gone. */ |
1024 |
+- __dec_node_page_state(page, NR_ANON_THPS); |
1025 |
+- if (TestClearPageDoubleMap(page)) { |
1026 |
+- /* No need in mapcount reference anymore */ |
1027 |
++ if (!pmd_migration) { |
1028 |
++ /* |
1029 |
++ * Set PG_double_map before dropping compound_mapcount to avoid |
1030 |
++ * false-negative page_mapped(). |
1031 |
++ */ |
1032 |
++ if (compound_mapcount(page) > 1 && |
1033 |
++ !TestSetPageDoubleMap(page)) { |
1034 |
+ for (i = 0; i < HPAGE_PMD_NR; i++) |
1035 |
+- atomic_dec(&page[i]._mapcount); |
1036 |
++ atomic_inc(&page[i]._mapcount); |
1037 |
++ } |
1038 |
++ |
1039 |
++ lock_page_memcg(page); |
1040 |
++ if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
1041 |
++ /* Last compound_mapcount is gone. */ |
1042 |
++ __dec_lruvec_page_state(page, NR_ANON_THPS); |
1043 |
++ if (TestClearPageDoubleMap(page)) { |
1044 |
++ /* No need in mapcount reference anymore */ |
1045 |
++ for (i = 0; i < HPAGE_PMD_NR; i++) |
1046 |
++ atomic_dec(&page[i]._mapcount); |
1047 |
++ } |
1048 |
+ } |
1049 |
++ unlock_page_memcg(page); |
1050 |
+ } |
1051 |
+ |
1052 |
+ smp_wmb(); /* make pte visible before pmd */ |
1053 |
+diff --git a/mm/vmscan.c b/mm/vmscan.c |
1054 |
+index 7fde5f904c8d3..6db9176d8c63e 100644 |
1055 |
+--- a/mm/vmscan.c |
1056 |
++++ b/mm/vmscan.c |
1057 |
+@@ -2775,6 +2775,14 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) |
1058 |
+ unsigned long reclaimed; |
1059 |
+ unsigned long scanned; |
1060 |
+ |
1061 |
++ /* |
1062 |
++ * This loop can become CPU-bound when target memcgs |
1063 |
++ * aren't eligible for reclaim - either because they |
1064 |
++ * don't have any reclaimable pages, or because their |
1065 |
++ * memory is explicitly protected. Avoid soft lockups. |
1066 |
++ */ |
1067 |
++ cond_resched(); |
1068 |
++ |
1069 |
+ switch (mem_cgroup_protected(root, memcg)) { |
1070 |
+ case MEMCG_PROT_MIN: |
1071 |
+ /* |
1072 |
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c |
1073 |
+index bb98984cd27d0..48413b5eb61fc 100644 |
1074 |
+--- a/net/bridge/br_vlan.c |
1075 |
++++ b/net/bridge/br_vlan.c |
1076 |
+@@ -1229,11 +1229,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v, |
1077 |
+ } |
1078 |
+ } |
1079 |
+ |
1080 |
+-static int __br_vlan_get_pvid(const struct net_device *dev, |
1081 |
+- struct net_bridge_port *p, u16 *p_pvid) |
1082 |
++int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) |
1083 |
+ { |
1084 |
+ struct net_bridge_vlan_group *vg; |
1085 |
++ struct net_bridge_port *p; |
1086 |
+ |
1087 |
++ ASSERT_RTNL(); |
1088 |
++ p = br_port_get_check_rtnl(dev); |
1089 |
+ if (p) |
1090 |
+ vg = nbp_vlan_group(p); |
1091 |
+ else if (netif_is_bridge_master(dev)) |
1092 |
+@@ -1244,18 +1246,23 @@ static int __br_vlan_get_pvid(const struct net_device *dev, |
1093 |
+ *p_pvid = br_get_pvid(vg); |
1094 |
+ return 0; |
1095 |
+ } |
1096 |
+- |
1097 |
+-int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) |
1098 |
+-{ |
1099 |
+- ASSERT_RTNL(); |
1100 |
+- |
1101 |
+- return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid); |
1102 |
+-} |
1103 |
+ EXPORT_SYMBOL_GPL(br_vlan_get_pvid); |
1104 |
+ |
1105 |
+ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) |
1106 |
+ { |
1107 |
+- return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid); |
1108 |
++ struct net_bridge_vlan_group *vg; |
1109 |
++ struct net_bridge_port *p; |
1110 |
++ |
1111 |
++ p = br_port_get_check_rcu(dev); |
1112 |
++ if (p) |
1113 |
++ vg = nbp_vlan_group_rcu(p); |
1114 |
++ else if (netif_is_bridge_master(dev)) |
1115 |
++ vg = br_vlan_group_rcu(netdev_priv(dev)); |
1116 |
++ else |
1117 |
++ return -EINVAL; |
1118 |
++ |
1119 |
++ *p_pvid = br_get_pvid(vg); |
1120 |
++ return 0; |
1121 |
+ } |
1122 |
+ EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu); |
1123 |
+ |
1124 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
1125 |
+index cdc1c3a144e1f..20c7fd7b8b4bc 100644 |
1126 |
+--- a/net/core/dev.c |
1127 |
++++ b/net/core/dev.c |
1128 |
+@@ -8241,7 +8241,7 @@ int dev_get_port_parent_id(struct net_device *dev, |
1129 |
+ if (!first.id_len) |
1130 |
+ first = *ppid; |
1131 |
+ else if (memcmp(&first, ppid, sizeof(*ppid))) |
1132 |
+- return -ENODATA; |
1133 |
++ return -EOPNOTSUPP; |
1134 |
+ } |
1135 |
+ |
1136 |
+ return err; |
1137 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
1138 |
+index 5c490d473df1d..cf2a68513bfd5 100644 |
1139 |
+--- a/net/core/filter.c |
1140 |
++++ b/net/core/filter.c |
1141 |
+@@ -4650,6 +4650,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, |
1142 |
+ fl4.saddr = params->ipv4_src; |
1143 |
+ fl4.fl4_sport = params->sport; |
1144 |
+ fl4.fl4_dport = params->dport; |
1145 |
++ fl4.flowi4_multipath_hash = 0; |
1146 |
+ |
1147 |
+ if (flags & BPF_FIB_LOOKUP_DIRECT) { |
1148 |
+ u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; |
1149 |
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c |
1150 |
+index d2a4553bcf39d..0fd1c2aa13615 100644 |
1151 |
+--- a/net/dcb/dcbnl.c |
1152 |
++++ b/net/dcb/dcbnl.c |
1153 |
+@@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, |
1154 |
+ { |
1155 |
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; |
1156 |
+ struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; |
1157 |
++ int prio; |
1158 |
+ int err; |
1159 |
+ |
1160 |
+ if (!ops) |
1161 |
+@@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, |
1162 |
+ struct dcbnl_buffer *buffer = |
1163 |
+ nla_data(ieee[DCB_ATTR_DCB_BUFFER]); |
1164 |
+ |
1165 |
++ for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { |
1166 |
++ if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { |
1167 |
++ err = -EINVAL; |
1168 |
++ goto err; |
1169 |
++ } |
1170 |
++ } |
1171 |
++ |
1172 |
+ err = ops->dcbnl_setbuffer(netdev, buffer); |
1173 |
+ if (err) |
1174 |
+ goto err; |
1175 |
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c |
1176 |
+index 2b0521feadaa7..0a8220d30c992 100644 |
1177 |
+--- a/net/ipv4/fib_frontend.c |
1178 |
++++ b/net/ipv4/fib_frontend.c |
1179 |
+@@ -372,6 +372,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, |
1180 |
+ fl4.flowi4_tun_key.tun_id = 0; |
1181 |
+ fl4.flowi4_flags = 0; |
1182 |
+ fl4.flowi4_uid = sock_net_uid(net, NULL); |
1183 |
++ fl4.flowi4_multipath_hash = 0; |
1184 |
+ |
1185 |
+ no_addr = idev->ifa_list == NULL; |
1186 |
+ |
1187 |
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
1188 |
+index b36c4a3159e52..079dcf9f0c56d 100644 |
1189 |
+--- a/net/ipv4/ip_output.c |
1190 |
++++ b/net/ipv4/ip_output.c |
1191 |
+@@ -74,6 +74,7 @@ |
1192 |
+ #include <net/icmp.h> |
1193 |
+ #include <net/checksum.h> |
1194 |
+ #include <net/inetpeer.h> |
1195 |
++#include <net/inet_ecn.h> |
1196 |
+ #include <net/lwtunnel.h> |
1197 |
+ #include <linux/bpf-cgroup.h> |
1198 |
+ #include <linux/igmp.h> |
1199 |
+@@ -1699,7 +1700,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, |
1200 |
+ if (IS_ERR(rt)) |
1201 |
+ return; |
1202 |
+ |
1203 |
+- inet_sk(sk)->tos = arg->tos; |
1204 |
++ inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; |
1205 |
+ |
1206 |
+ sk->sk_protocol = ip_hdr(skb)->protocol; |
1207 |
+ sk->sk_bound_dev_if = arg->bound_dev_if; |
1208 |
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
1209 |
+index b3a8d32f7d8df..aa77f989ba817 100644 |
1210 |
+--- a/net/ipv4/route.c |
1211 |
++++ b/net/ipv4/route.c |
1212 |
+@@ -785,8 +785,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow |
1213 |
+ neigh_event_send(n, NULL); |
1214 |
+ } else { |
1215 |
+ if (fib_lookup(net, fl4, &res, 0) == 0) { |
1216 |
+- struct fib_nh_common *nhc = FIB_RES_NHC(res); |
1217 |
++ struct fib_nh_common *nhc; |
1218 |
+ |
1219 |
++ fib_select_path(net, &res, fl4, skb); |
1220 |
++ nhc = FIB_RES_NHC(res); |
1221 |
+ update_or_create_fnhe(nhc, fl4->daddr, new_gw, |
1222 |
+ 0, false, |
1223 |
+ jiffies + ip_rt_gc_timeout); |
1224 |
+@@ -1012,6 +1014,7 @@ out: kfree_skb(skb); |
1225 |
+ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
1226 |
+ { |
1227 |
+ struct dst_entry *dst = &rt->dst; |
1228 |
++ struct net *net = dev_net(dst->dev); |
1229 |
+ u32 old_mtu = ipv4_mtu(dst); |
1230 |
+ struct fib_result res; |
1231 |
+ bool lock = false; |
1232 |
+@@ -1032,9 +1035,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
1233 |
+ return; |
1234 |
+ |
1235 |
+ rcu_read_lock(); |
1236 |
+- if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { |
1237 |
+- struct fib_nh_common *nhc = FIB_RES_NHC(res); |
1238 |
++ if (fib_lookup(net, fl4, &res, 0) == 0) { |
1239 |
++ struct fib_nh_common *nhc; |
1240 |
+ |
1241 |
++ fib_select_path(net, &res, fl4, NULL); |
1242 |
++ nhc = FIB_RES_NHC(res); |
1243 |
+ update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, |
1244 |
+ jiffies + ip_rt_mtu_expires); |
1245 |
+ } |
1246 |
+@@ -2104,6 +2109,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1247 |
+ fl4.daddr = daddr; |
1248 |
+ fl4.saddr = saddr; |
1249 |
+ fl4.flowi4_uid = sock_net_uid(net, NULL); |
1250 |
++ fl4.flowi4_multipath_hash = 0; |
1251 |
+ |
1252 |
+ if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { |
1253 |
+ flkeys = &_flkeys; |
1254 |
+@@ -2625,8 +2631,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, |
1255 |
+ fib_select_path(net, res, fl4, skb); |
1256 |
+ |
1257 |
+ dev_out = FIB_RES_DEV(*res); |
1258 |
+- fl4->flowi4_oif = dev_out->ifindex; |
1259 |
+- |
1260 |
+ |
1261 |
+ make_route: |
1262 |
+ rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); |
1263 |
+diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig |
1264 |
+index ae1344e4cec54..dce14470b15ac 100644 |
1265 |
+--- a/net/ipv6/Kconfig |
1266 |
++++ b/net/ipv6/Kconfig |
1267 |
+@@ -289,6 +289,7 @@ config IPV6_SEG6_LWTUNNEL |
1268 |
+ config IPV6_SEG6_HMAC |
1269 |
+ bool "IPv6: Segment Routing HMAC support" |
1270 |
+ depends on IPV6 |
1271 |
++ select CRYPTO |
1272 |
+ select CRYPTO_HMAC |
1273 |
+ select CRYPTO_SHA1 |
1274 |
+ select CRYPTO_SHA256 |
1275 |
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
1276 |
+index 7a0c877ca306c..96d80e50bf35b 100644 |
1277 |
+--- a/net/ipv6/ip6_fib.c |
1278 |
++++ b/net/ipv6/ip6_fib.c |
1279 |
+@@ -1896,14 +1896,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, |
1280 |
+ /* Need to own table->tb6_lock */ |
1281 |
+ int fib6_del(struct fib6_info *rt, struct nl_info *info) |
1282 |
+ { |
1283 |
+- struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, |
1284 |
+- lockdep_is_held(&rt->fib6_table->tb6_lock)); |
1285 |
+- struct fib6_table *table = rt->fib6_table; |
1286 |
+ struct net *net = info->nl_net; |
1287 |
+ struct fib6_info __rcu **rtp; |
1288 |
+ struct fib6_info __rcu **rtp_next; |
1289 |
++ struct fib6_table *table; |
1290 |
++ struct fib6_node *fn; |
1291 |
++ |
1292 |
++ if (rt == net->ipv6.fib6_null_entry) |
1293 |
++ return -ENOENT; |
1294 |
+ |
1295 |
+- if (!fn || rt == net->ipv6.fib6_null_entry) |
1296 |
++ table = rt->fib6_table; |
1297 |
++ fn = rcu_dereference_protected(rt->fib6_node, |
1298 |
++ lockdep_is_held(&table->tb6_lock)); |
1299 |
++ if (!fn) |
1300 |
+ return -ENOENT; |
1301 |
+ |
1302 |
+ WARN_ON(!(fn->fn_flags & RTN_RTINFO)); |
1303 |
+diff --git a/net/key/af_key.c b/net/key/af_key.c |
1304 |
+index 979c579afc63b..a915bc86620af 100644 |
1305 |
+--- a/net/key/af_key.c |
1306 |
++++ b/net/key/af_key.c |
1307 |
+@@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms |
1308 |
+ if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { |
1309 |
+ struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; |
1310 |
+ |
1311 |
++ if ((xfilter->sadb_x_filter_splen >= |
1312 |
++ (sizeof(xfrm_address_t) << 3)) || |
1313 |
++ (xfilter->sadb_x_filter_dplen >= |
1314 |
++ (sizeof(xfrm_address_t) << 3))) { |
1315 |
++ mutex_unlock(&pfk->dump_lock); |
1316 |
++ return -EINVAL; |
1317 |
++ } |
1318 |
+ filter = kmalloc(sizeof(*filter), GFP_KERNEL); |
1319 |
+ if (filter == NULL) { |
1320 |
+ mutex_unlock(&pfk->dump_lock); |
1321 |
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c |
1322 |
+index a699e318b9a01..d6d2736ec9273 100644 |
1323 |
+--- a/net/qrtr/qrtr.c |
1324 |
++++ b/net/qrtr/qrtr.c |
1325 |
+@@ -178,7 +178,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, |
1326 |
+ { |
1327 |
+ struct qrtr_hdr_v1 *hdr; |
1328 |
+ size_t len = skb->len; |
1329 |
+- int rc = -ENODEV; |
1330 |
++ int rc; |
1331 |
+ |
1332 |
+ hdr = skb_push(skb, sizeof(*hdr)); |
1333 |
+ hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); |
1334 |
+@@ -196,15 +196,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, |
1335 |
+ hdr->size = cpu_to_le32(len); |
1336 |
+ hdr->confirm_rx = 0; |
1337 |
+ |
1338 |
+- skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); |
1339 |
+- |
1340 |
+- mutex_lock(&node->ep_lock); |
1341 |
+- if (node->ep) |
1342 |
+- rc = node->ep->xmit(node->ep, skb); |
1343 |
+- else |
1344 |
+- kfree_skb(skb); |
1345 |
+- mutex_unlock(&node->ep_lock); |
1346 |
++ rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); |
1347 |
+ |
1348 |
++ if (!rc) { |
1349 |
++ mutex_lock(&node->ep_lock); |
1350 |
++ rc = -ENODEV; |
1351 |
++ if (node->ep) |
1352 |
++ rc = node->ep->xmit(node->ep, skb); |
1353 |
++ else |
1354 |
++ kfree_skb(skb); |
1355 |
++ mutex_unlock(&node->ep_lock); |
1356 |
++ } |
1357 |
+ return rc; |
1358 |
+ } |
1359 |
+ |
1360 |
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c |
1361 |
+index a0cfb4793c93f..778371bac93e2 100644 |
1362 |
+--- a/net/sched/act_ife.c |
1363 |
++++ b/net/sched/act_ife.c |
1364 |
+@@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a) |
1365 |
+ kfree_rcu(p, rcu); |
1366 |
+ } |
1367 |
+ |
1368 |
++static int load_metalist(struct nlattr **tb, bool rtnl_held) |
1369 |
++{ |
1370 |
++ int i; |
1371 |
++ |
1372 |
++ for (i = 1; i < max_metacnt; i++) { |
1373 |
++ if (tb[i]) { |
1374 |
++ void *val = nla_data(tb[i]); |
1375 |
++ int len = nla_len(tb[i]); |
1376 |
++ int rc; |
1377 |
++ |
1378 |
++ rc = load_metaops_and_vet(i, val, len, rtnl_held); |
1379 |
++ if (rc != 0) |
1380 |
++ return rc; |
1381 |
++ } |
1382 |
++ } |
1383 |
++ |
1384 |
++ return 0; |
1385 |
++} |
1386 |
++ |
1387 |
+ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
1388 |
+ bool exists, bool rtnl_held) |
1389 |
+ { |
1390 |
+@@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
1391 |
+ val = nla_data(tb[i]); |
1392 |
+ len = nla_len(tb[i]); |
1393 |
+ |
1394 |
+- rc = load_metaops_and_vet(i, val, len, rtnl_held); |
1395 |
+- if (rc != 0) |
1396 |
+- return rc; |
1397 |
+- |
1398 |
+ rc = add_metainfo(ife, i, val, len, exists); |
1399 |
+ if (rc) |
1400 |
+ return rc; |
1401 |
+@@ -508,6 +523,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, |
1402 |
+ if (!p) |
1403 |
+ return -ENOMEM; |
1404 |
+ |
1405 |
++ if (tb[TCA_IFE_METALST]) { |
1406 |
++ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, |
1407 |
++ tb[TCA_IFE_METALST], NULL, |
1408 |
++ NULL); |
1409 |
++ if (err) { |
1410 |
++ kfree(p); |
1411 |
++ return err; |
1412 |
++ } |
1413 |
++ err = load_metalist(tb2, rtnl_held); |
1414 |
++ if (err) { |
1415 |
++ kfree(p); |
1416 |
++ return err; |
1417 |
++ } |
1418 |
++ } |
1419 |
++ |
1420 |
+ index = parm->index; |
1421 |
+ err = tcf_idr_check_alloc(tn, &index, a, bind); |
1422 |
+ if (err < 0) { |
1423 |
+@@ -569,15 +599,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, |
1424 |
+ } |
1425 |
+ |
1426 |
+ if (tb[TCA_IFE_METALST]) { |
1427 |
+- err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, |
1428 |
+- tb[TCA_IFE_METALST], NULL, |
1429 |
+- NULL); |
1430 |
+- if (err) |
1431 |
+- goto metadata_parse_err; |
1432 |
+ err = populate_metalist(ife, tb2, exists, rtnl_held); |
1433 |
+ if (err) |
1434 |
+ goto metadata_parse_err; |
1435 |
+- |
1436 |
+ } else { |
1437 |
+ /* if no passed metadata allow list or passed allow-all |
1438 |
+ * then here we process by adding as many supported metadatum |
1439 |
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c |
1440 |
+index 896c9037155a5..0e275e11f5115 100644 |
1441 |
+--- a/net/sched/sch_generic.c |
1442 |
++++ b/net/sched/sch_generic.c |
1443 |
+@@ -1126,27 +1126,36 @@ static void dev_deactivate_queue(struct net_device *dev, |
1444 |
+ struct netdev_queue *dev_queue, |
1445 |
+ void *_qdisc_default) |
1446 |
+ { |
1447 |
+- struct Qdisc *qdisc_default = _qdisc_default; |
1448 |
+- struct Qdisc *qdisc; |
1449 |
++ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc); |
1450 |
+ |
1451 |
+- qdisc = rtnl_dereference(dev_queue->qdisc); |
1452 |
+ if (qdisc) { |
1453 |
+- bool nolock = qdisc->flags & TCQ_F_NOLOCK; |
1454 |
+- |
1455 |
+- if (nolock) |
1456 |
+- spin_lock_bh(&qdisc->seqlock); |
1457 |
+- spin_lock_bh(qdisc_lock(qdisc)); |
1458 |
+- |
1459 |
+ if (!(qdisc->flags & TCQ_F_BUILTIN)) |
1460 |
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); |
1461 |
++ } |
1462 |
++} |
1463 |
+ |
1464 |
+- rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
1465 |
+- qdisc_reset(qdisc); |
1466 |
++static void dev_reset_queue(struct net_device *dev, |
1467 |
++ struct netdev_queue *dev_queue, |
1468 |
++ void *_unused) |
1469 |
++{ |
1470 |
++ struct Qdisc *qdisc; |
1471 |
++ bool nolock; |
1472 |
+ |
1473 |
+- spin_unlock_bh(qdisc_lock(qdisc)); |
1474 |
+- if (nolock) |
1475 |
+- spin_unlock_bh(&qdisc->seqlock); |
1476 |
+- } |
1477 |
++ qdisc = dev_queue->qdisc_sleeping; |
1478 |
++ if (!qdisc) |
1479 |
++ return; |
1480 |
++ |
1481 |
++ nolock = qdisc->flags & TCQ_F_NOLOCK; |
1482 |
++ |
1483 |
++ if (nolock) |
1484 |
++ spin_lock_bh(&qdisc->seqlock); |
1485 |
++ spin_lock_bh(qdisc_lock(qdisc)); |
1486 |
++ |
1487 |
++ qdisc_reset(qdisc); |
1488 |
++ |
1489 |
++ spin_unlock_bh(qdisc_lock(qdisc)); |
1490 |
++ if (nolock) |
1491 |
++ spin_unlock_bh(&qdisc->seqlock); |
1492 |
+ } |
1493 |
+ |
1494 |
+ static bool some_qdisc_is_busy(struct net_device *dev) |
1495 |
+@@ -1207,12 +1216,20 @@ void dev_deactivate_many(struct list_head *head) |
1496 |
+ dev_watchdog_down(dev); |
1497 |
+ } |
1498 |
+ |
1499 |
+- /* Wait for outstanding qdisc-less dev_queue_xmit calls. |
1500 |
++ /* Wait for outstanding qdisc-less dev_queue_xmit calls or |
1501 |
++ * outstanding qdisc enqueuing calls. |
1502 |
+ * This is avoided if all devices are in dismantle phase : |
1503 |
+ * Caller will call synchronize_net() for us |
1504 |
+ */ |
1505 |
+ synchronize_net(); |
1506 |
+ |
1507 |
++ list_for_each_entry(dev, head, close_list) { |
1508 |
++ netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); |
1509 |
++ |
1510 |
++ if (dev_ingress_queue(dev)) |
1511 |
++ dev_reset_queue(dev, dev_ingress_queue(dev), NULL); |
1512 |
++ } |
1513 |
++ |
1514 |
+ /* Wait for outstanding qdisc_run calls. */ |
1515 |
+ list_for_each_entry(dev, head, close_list) { |
1516 |
+ while (some_qdisc_is_busy(dev)) |
1517 |
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c |
1518 |
+index 6a5086e586efb..2b797a71e9bda 100644 |
1519 |
+--- a/net/sched/sch_taprio.c |
1520 |
++++ b/net/sched/sch_taprio.c |
1521 |
+@@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { |
1522 |
+ [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, |
1523 |
+ }; |
1524 |
+ |
1525 |
+-static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
1526 |
++static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, |
1527 |
++ struct sched_entry *entry, |
1528 |
+ struct netlink_ext_ack *extack) |
1529 |
+ { |
1530 |
++ int min_duration = length_to_duration(q, ETH_ZLEN); |
1531 |
+ u32 interval = 0; |
1532 |
+ |
1533 |
+ if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) |
1534 |
+@@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
1535 |
+ interval = nla_get_u32( |
1536 |
+ tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); |
1537 |
+ |
1538 |
+- if (interval == 0) { |
1539 |
++ /* The interval should allow at least the minimum ethernet |
1540 |
++ * frame to go out. |
1541 |
++ */ |
1542 |
++ if (interval < min_duration) { |
1543 |
+ NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); |
1544 |
+ return -EINVAL; |
1545 |
+ } |
1546 |
+@@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
1547 |
+ return 0; |
1548 |
+ } |
1549 |
+ |
1550 |
+-static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, |
1551 |
+- int index, struct netlink_ext_ack *extack) |
1552 |
++static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, |
1553 |
++ struct sched_entry *entry, int index, |
1554 |
++ struct netlink_ext_ack *extack) |
1555 |
+ { |
1556 |
+ struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; |
1557 |
+ int err; |
1558 |
+@@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, |
1559 |
+ |
1560 |
+ entry->index = index; |
1561 |
+ |
1562 |
+- return fill_sched_entry(tb, entry, extack); |
1563 |
++ return fill_sched_entry(q, tb, entry, extack); |
1564 |
+ } |
1565 |
+ |
1566 |
+-static int parse_sched_list(struct nlattr *list, |
1567 |
++static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, |
1568 |
+ struct sched_gate_list *sched, |
1569 |
+ struct netlink_ext_ack *extack) |
1570 |
+ { |
1571 |
+@@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list, |
1572 |
+ return -ENOMEM; |
1573 |
+ } |
1574 |
+ |
1575 |
+- err = parse_sched_entry(n, entry, i, extack); |
1576 |
++ err = parse_sched_entry(q, n, entry, i, extack); |
1577 |
+ if (err < 0) { |
1578 |
+ kfree(entry); |
1579 |
+ return err; |
1580 |
+@@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list, |
1581 |
+ return i; |
1582 |
+ } |
1583 |
+ |
1584 |
+-static int parse_taprio_schedule(struct nlattr **tb, |
1585 |
++static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, |
1586 |
+ struct sched_gate_list *new, |
1587 |
+ struct netlink_ext_ack *extack) |
1588 |
+ { |
1589 |
+@@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb, |
1590 |
+ new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); |
1591 |
+ |
1592 |
+ if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) |
1593 |
+- err = parse_sched_list( |
1594 |
+- tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack); |
1595 |
++ err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], |
1596 |
++ new, extack); |
1597 |
+ if (err < 0) |
1598 |
+ return err; |
1599 |
+ |
1600 |
+@@ -1474,7 +1480,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
1601 |
+ goto free_sched; |
1602 |
+ } |
1603 |
+ |
1604 |
+- err = parse_taprio_schedule(tb, new_admin, extack); |
1605 |
++ err = parse_taprio_schedule(q, tb, new_admin, extack); |
1606 |
+ if (err < 0) |
1607 |
+ goto free_sched; |
1608 |
+ |
1609 |
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
1610 |
+index 3a11212bb4c0e..1fcc13f6073ef 100644 |
1611 |
+--- a/net/sctp/socket.c |
1612 |
++++ b/net/sctp/socket.c |
1613 |
+@@ -9337,13 +9337,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, |
1614 |
+ static inline void sctp_copy_descendant(struct sock *sk_to, |
1615 |
+ const struct sock *sk_from) |
1616 |
+ { |
1617 |
+- int ancestor_size = sizeof(struct inet_sock) + |
1618 |
+- sizeof(struct sctp_sock) - |
1619 |
+- offsetof(struct sctp_sock, pd_lobby); |
1620 |
+- |
1621 |
+- if (sk_from->sk_family == PF_INET6) |
1622 |
+- ancestor_size += sizeof(struct ipv6_pinfo); |
1623 |
++ size_t ancestor_size = sizeof(struct inet_sock); |
1624 |
+ |
1625 |
++ ancestor_size += sk_from->sk_prot->obj_size; |
1626 |
++ ancestor_size -= offsetof(struct sctp_sock, pd_lobby); |
1627 |
+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); |
1628 |
+ } |
1629 |
+ |
1630 |
+diff --git a/net/tipc/group.c b/net/tipc/group.c |
1631 |
+index 89257e2a980de..f53871baa42eb 100644 |
1632 |
+--- a/net/tipc/group.c |
1633 |
++++ b/net/tipc/group.c |
1634 |
+@@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, |
1635 |
+ return NULL; |
1636 |
+ } |
1637 |
+ |
1638 |
+-static void tipc_group_add_to_tree(struct tipc_group *grp, |
1639 |
+- struct tipc_member *m) |
1640 |
++static int tipc_group_add_to_tree(struct tipc_group *grp, |
1641 |
++ struct tipc_member *m) |
1642 |
+ { |
1643 |
+ u64 nkey, key = (u64)m->node << 32 | m->port; |
1644 |
+ struct rb_node **n, *parent = NULL; |
1645 |
+@@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, |
1646 |
+ else if (key > nkey) |
1647 |
+ n = &(*n)->rb_right; |
1648 |
+ else |
1649 |
+- return; |
1650 |
++ return -EEXIST; |
1651 |
+ } |
1652 |
+ rb_link_node(&m->tree_node, parent, n); |
1653 |
+ rb_insert_color(&m->tree_node, &grp->members); |
1654 |
++ return 0; |
1655 |
+ } |
1656 |
+ |
1657 |
+ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, |
1658 |
+@@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, |
1659 |
+ u32 instance, int state) |
1660 |
+ { |
1661 |
+ struct tipc_member *m; |
1662 |
++ int ret; |
1663 |
+ |
1664 |
+ m = kzalloc(sizeof(*m), GFP_ATOMIC); |
1665 |
+ if (!m) |
1666 |
+@@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, |
1667 |
+ m->port = port; |
1668 |
+ m->instance = instance; |
1669 |
+ m->bc_acked = grp->bc_snd_nxt - 1; |
1670 |
++ ret = tipc_group_add_to_tree(grp, m); |
1671 |
++ if (ret < 0) { |
1672 |
++ kfree(m); |
1673 |
++ return NULL; |
1674 |
++ } |
1675 |
+ grp->member_cnt++; |
1676 |
+- tipc_group_add_to_tree(grp, m); |
1677 |
+ tipc_nlist_add(&grp->dests, m->node); |
1678 |
+ m->state = state; |
1679 |
+ return m; |
1680 |
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c |
1681 |
+index 922d262e153ff..ee4b2261e7957 100644 |
1682 |
+--- a/net/tipc/msg.c |
1683 |
++++ b/net/tipc/msg.c |
1684 |
+@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) |
1685 |
+ if (fragid == FIRST_FRAGMENT) { |
1686 |
+ if (unlikely(head)) |
1687 |
+ goto err; |
1688 |
+- if (unlikely(skb_unclone(frag, GFP_ATOMIC))) |
1689 |
++ frag = skb_unshare(frag, GFP_ATOMIC); |
1690 |
++ if (unlikely(!frag)) |
1691 |
+ goto err; |
1692 |
+ head = *headbuf = frag; |
1693 |
+ *buf = NULL; |
1694 |
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
1695 |
+index 5318bb6611abc..959155c3a1608 100644 |
1696 |
+--- a/net/tipc/socket.c |
1697 |
++++ b/net/tipc/socket.c |
1698 |
+@@ -2616,10 +2616,7 @@ static int tipc_shutdown(struct socket *sock, int how) |
1699 |
+ |
1700 |
+ trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); |
1701 |
+ __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); |
1702 |
+- if (tipc_sk_type_connectionless(sk)) |
1703 |
+- sk->sk_shutdown = SHUTDOWN_MASK; |
1704 |
+- else |
1705 |
+- sk->sk_shutdown = SEND_SHUTDOWN; |
1706 |
++ sk->sk_shutdown = SHUTDOWN_MASK; |
1707 |
+ |
1708 |
+ if (sk->sk_state == TIPC_DISCONNECTING) { |
1709 |
+ /* Discard any unreceived messages */ |