Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 23 Nov 2018 12:42:49
Message-Id: 1542976943.dbdea736f818d9e94ece21b82d2ec584871eb772.mpagano@gentoo
1 commit: dbdea736f818d9e94ece21b82d2ec584871eb772
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Nov 23 12:42:23 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Nov 23 12:42:23 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dbdea736
7
8 proj/linux-patches: Linux patch 4.19.4
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1003_linux-4.19.4.patch | 1733 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1737 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 6ce85c3..f74e5e3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,6 +55,10 @@ Patch: 1002_linux-4.19.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.3
23
24 +Patch: 1003_linux-4.19.4.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.4
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1003_linux-4.19.4.patch b/1003_linux-4.19.4.patch
33 new file mode 100644
34 index 0000000..6ff628e
35 --- /dev/null
36 +++ b/1003_linux-4.19.4.patch
37 @@ -0,0 +1,1733 @@
38 +diff --git a/Makefile b/Makefile
39 +index e4064fa16f11..1f3c7adeea63 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 3
47 ++SUBLEVEL = 4
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
52 +index 53eb14a65610..40bdaea97fe7 100644
53 +--- a/arch/x86/kernel/cpu/bugs.c
54 ++++ b/arch/x86/kernel/cpu/bugs.c
55 +@@ -35,10 +35,12 @@ static void __init spectre_v2_select_mitigation(void);
56 + static void __init ssb_select_mitigation(void);
57 + static void __init l1tf_select_mitigation(void);
58 +
59 +-/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
60 +-u64 x86_spec_ctrl_base;
61 ++/*
62 ++ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
63 ++ * writes to SPEC_CTRL contain whatever reserved bits have been set.
64 ++ */
65 ++u64 __ro_after_init x86_spec_ctrl_base;
66 + EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
67 +-static DEFINE_MUTEX(spec_ctrl_mutex);
68 +
69 + /*
70 + * The vendor and possibly platform specific bits which can be modified in
71 +@@ -323,46 +325,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
72 + return cmd;
73 + }
74 +
75 +-static bool stibp_needed(void)
76 +-{
77 +- if (spectre_v2_enabled == SPECTRE_V2_NONE)
78 +- return false;
79 +-
80 +- if (!boot_cpu_has(X86_FEATURE_STIBP))
81 +- return false;
82 +-
83 +- return true;
84 +-}
85 +-
86 +-static void update_stibp_msr(void *info)
87 +-{
88 +- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
89 +-}
90 +-
91 +-void arch_smt_update(void)
92 +-{
93 +- u64 mask;
94 +-
95 +- if (!stibp_needed())
96 +- return;
97 +-
98 +- mutex_lock(&spec_ctrl_mutex);
99 +- mask = x86_spec_ctrl_base;
100 +- if (cpu_smt_control == CPU_SMT_ENABLED)
101 +- mask |= SPEC_CTRL_STIBP;
102 +- else
103 +- mask &= ~SPEC_CTRL_STIBP;
104 +-
105 +- if (mask != x86_spec_ctrl_base) {
106 +- pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
107 +- cpu_smt_control == CPU_SMT_ENABLED ?
108 +- "Enabling" : "Disabling");
109 +- x86_spec_ctrl_base = mask;
110 +- on_each_cpu(update_stibp_msr, NULL, 1);
111 +- }
112 +- mutex_unlock(&spec_ctrl_mutex);
113 +-}
114 +-
115 + static void __init spectre_v2_select_mitigation(void)
116 + {
117 + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
118 +@@ -462,9 +424,6 @@ specv2_set_mode:
119 + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
120 + pr_info("Enabling Restricted Speculation for firmware calls\n");
121 + }
122 +-
123 +- /* Enable STIBP if appropriate */
124 +- arch_smt_update();
125 + }
126 +
127 + #undef pr_fmt
128 +@@ -855,8 +814,6 @@ static ssize_t l1tf_show_state(char *buf)
129 + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
130 + char *buf, unsigned int bug)
131 + {
132 +- int ret;
133 +-
134 + if (!boot_cpu_has_bug(bug))
135 + return sprintf(buf, "Not affected\n");
136 +
137 +@@ -874,12 +831,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
138 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
139 +
140 + case X86_BUG_SPECTRE_V2:
141 +- ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
142 ++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
143 + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
144 + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
145 +- (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
146 + spectre_v2_module_string());
147 +- return ret;
148 +
149 + case X86_BUG_SPEC_STORE_BYPASS:
150 + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
151 +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
152 +index 54e0ca6ed730..86b6464b4525 100644
153 +--- a/drivers/net/dsa/microchip/ksz_common.c
154 ++++ b/drivers/net/dsa/microchip/ksz_common.c
155 +@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
156 + {
157 + int i;
158 +
159 +- mutex_init(&dev->reg_mutex);
160 +- mutex_init(&dev->stats_mutex);
161 +- mutex_init(&dev->alu_mutex);
162 +- mutex_init(&dev->vlan_mutex);
163 +-
164 + dev->ds->ops = &ksz_switch_ops;
165 +
166 + for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
167 +@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
168 + if (dev->pdata)
169 + dev->chip_id = dev->pdata->chip_id;
170 +
171 ++ mutex_init(&dev->reg_mutex);
172 ++ mutex_init(&dev->stats_mutex);
173 ++ mutex_init(&dev->alu_mutex);
174 ++ mutex_init(&dev->vlan_mutex);
175 ++
176 + if (ksz_switch_detect(dev))
177 + return -EINVAL;
178 +
179 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
180 +index d721ccf7d8be..38e399e0f30e 100644
181 +--- a/drivers/net/dsa/mv88e6xxx/global1.c
182 ++++ b/drivers/net/dsa/mv88e6xxx/global1.c
183 +@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
184 + if (err)
185 + return err;
186 +
187 ++ /* Keep the histogram mode bits */
188 ++ val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
189 + val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
190 +
191 + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
192 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
193 +index c57238fce863..7b6859e4924e 100644
194 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
195 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
196 +@@ -1897,9 +1897,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
197 + intrl2_1_mask_clear(priv, 0xffffffff);
198 + else
199 + intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
200 +-
201 +- /* Last call before we start the real business */
202 +- netif_tx_start_all_queues(dev);
203 + }
204 +
205 + static void rbuf_init(struct bcm_sysport_priv *priv)
206 +@@ -2045,6 +2042,8 @@ static int bcm_sysport_open(struct net_device *dev)
207 +
208 + bcm_sysport_netif_start(dev);
209 +
210 ++ netif_tx_start_all_queues(dev);
211 ++
212 + return 0;
213 +
214 + out_clear_rx_int:
215 +@@ -2068,7 +2067,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
216 + struct bcm_sysport_priv *priv = netdev_priv(dev);
217 +
218 + /* stop all software from updating hardware */
219 +- netif_tx_stop_all_queues(dev);
220 ++ netif_tx_disable(dev);
221 + napi_disable(&priv->napi);
222 + cancel_work_sync(&priv->dim.dim.work);
223 + phy_stop(dev->phydev);
224 +@@ -2654,12 +2653,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
225 + if (!netif_running(dev))
226 + return 0;
227 +
228 ++ netif_device_detach(dev);
229 ++
230 + bcm_sysport_netif_stop(dev);
231 +
232 + phy_suspend(dev->phydev);
233 +
234 +- netif_device_detach(dev);
235 +-
236 + /* Disable UniMAC RX */
237 + umac_enable_set(priv, CMD_RX_EN, 0);
238 +
239 +@@ -2743,8 +2742,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
240 + goto out_free_rx_ring;
241 + }
242 +
243 +- netif_device_attach(dev);
244 +-
245 + /* RX pipe enable */
246 + topctrl_writel(priv, 0, RX_FLUSH_CNTL);
247 +
248 +@@ -2789,6 +2786,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
249 +
250 + bcm_sysport_netif_start(dev);
251 +
252 ++ netif_device_attach(dev);
253 ++
254 + return 0;
255 +
256 + out_free_rx_ring:
257 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
258 +index 20c1681bb1af..2d6f090bf644 100644
259 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
260 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
261 +@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
262 +
263 + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
264 +
265 +- netif_tx_start_all_queues(dev);
266 + bcmgenet_enable_tx_napi(priv);
267 +
268 + /* Monitor link interrupts now */
269 +@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
270 +
271 + bcmgenet_netif_start(dev);
272 +
273 ++ netif_tx_start_all_queues(dev);
274 ++
275 + return 0;
276 +
277 + err_irq1:
278 +@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
279 + struct bcmgenet_priv *priv = netdev_priv(dev);
280 +
281 + bcmgenet_disable_tx_napi(priv);
282 +- netif_tx_stop_all_queues(dev);
283 ++ netif_tx_disable(dev);
284 +
285 + /* Disable MAC receive */
286 + umac_enable_set(priv, CMD_RX_EN, false);
287 +@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
288 + if (!netif_running(dev))
289 + return 0;
290 +
291 ++ netif_device_detach(dev);
292 ++
293 + bcmgenet_netif_stop(dev);
294 +
295 + if (!device_may_wakeup(d))
296 + phy_suspend(dev->phydev);
297 +
298 +- netif_device_detach(dev);
299 +-
300 + /* Prepare the device for Wake-on-LAN and switch to the slow clock */
301 + if (device_may_wakeup(d) && priv->wolopts) {
302 + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
303 +@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
304 + /* Always enable ring 16 - descriptor ring */
305 + bcmgenet_enable_dma(priv, dma_ctrl);
306 +
307 +- netif_device_attach(dev);
308 +-
309 + if (!device_may_wakeup(d))
310 + phy_resume(dev->phydev);
311 +
312 +@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
313 +
314 + bcmgenet_netif_start(dev);
315 +
316 ++ netif_device_attach(dev);
317 ++
318 + return 0;
319 +
320 + out_clk_disable:
321 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
322 +index e6f28c7942ab..a12962702611 100644
323 +--- a/drivers/net/ethernet/broadcom/tg3.c
324 ++++ b/drivers/net/ethernet/broadcom/tg3.c
325 +@@ -12426,6 +12426,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
326 + {
327 + struct tg3 *tp = netdev_priv(dev);
328 + int i, irq_sync = 0, err = 0;
329 ++ bool reset_phy = false;
330 +
331 + if ((ering->rx_pending > tp->rx_std_ring_mask) ||
332 + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
333 +@@ -12457,7 +12458,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
334 +
335 + if (netif_running(dev)) {
336 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
337 +- err = tg3_restart_hw(tp, false);
338 ++ /* Reset PHY to avoid PHY lock up */
339 ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
340 ++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
341 ++ tg3_asic_rev(tp) == ASIC_REV_5720)
342 ++ reset_phy = true;
343 ++
344 ++ err = tg3_restart_hw(tp, reset_phy);
345 + if (!err)
346 + tg3_netif_start(tp);
347 + }
348 +@@ -12491,6 +12498,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
349 + {
350 + struct tg3 *tp = netdev_priv(dev);
351 + int err = 0;
352 ++ bool reset_phy = false;
353 +
354 + if (tp->link_config.autoneg == AUTONEG_ENABLE)
355 + tg3_warn_mgmt_link_flap(tp);
356 +@@ -12581,7 +12589,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
357 +
358 + if (netif_running(dev)) {
359 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
360 +- err = tg3_restart_hw(tp, false);
361 ++ /* Reset PHY to avoid PHY lock up */
362 ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
363 ++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
364 ++ tg3_asic_rev(tp) == ASIC_REV_5720)
365 ++ reset_phy = true;
366 ++
367 ++ err = tg3_restart_hw(tp, reset_phy);
368 + if (!err)
369 + tg3_netif_start(tp);
370 + }
371 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
372 +index 699ef942b615..7661064c815b 100644
373 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
374 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
375 +@@ -1545,7 +1545,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
376 + tx_crq.v1.sge_len = cpu_to_be32(skb->len);
377 + tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
378 +
379 +- if (adapter->vlan_header_insertion) {
380 ++ if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
381 + tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
382 + tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
383 + }
384 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
385 +index 0f189f873859..16ceeb1b2c9d 100644
386 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
387 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
388 +@@ -566,6 +566,7 @@ struct mlx5e_rq {
389 +
390 + unsigned long state;
391 + int ix;
392 ++ unsigned int hw_mtu;
393 +
394 + struct net_dim dim; /* Dynamic Interrupt Moderation */
395 +
396 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
397 +index 24e3b564964f..12e1682f940b 100644
398 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
399 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
400 +@@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
401 +
402 + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
403 + *speed = mlx5e_port_ptys2speed(eth_proto_oper);
404 +- if (!(*speed)) {
405 +- mlx5_core_warn(mdev, "cannot get port speed\n");
406 ++ if (!(*speed))
407 + err = -EINVAL;
408 +- }
409 +
410 + return err;
411 + }
412 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
413 +index c047da8752da..eac245a93f91 100644
414 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
415 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
416 +@@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
417 + int err;
418 +
419 + err = mlx5e_port_linkspeed(priv->mdev, &speed);
420 +- if (err)
421 ++ if (err) {
422 ++ mlx5_core_warn(priv->mdev, "cannot get port speed\n");
423 + return 0;
424 ++ }
425 +
426 + xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
427 +
428 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
429 +index f291d1bf1558..faa84b45e20a 100644
430 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
431 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
432 +@@ -492,6 +492,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
433 + rq->channel = c;
434 + rq->ix = c->ix;
435 + rq->mdev = mdev;
436 ++ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
437 + rq->stats = &c->priv->channel_stats[c->ix].rq;
438 +
439 + rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
440 +@@ -1610,13 +1611,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
441 + int err;
442 + u32 i;
443 +
444 ++ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
445 ++ if (err)
446 ++ return err;
447 ++
448 + err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
449 + &cq->wq_ctrl);
450 + if (err)
451 + return err;
452 +
453 +- mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
454 +-
455 + mcq->cqe_sz = 64;
456 + mcq->set_ci_db = cq->wq_ctrl.db.db;
457 + mcq->arm_db = cq->wq_ctrl.db.db + 1;
458 +@@ -1674,6 +1677,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
459 + int eqn;
460 + int err;
461 +
462 ++ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
463 ++ if (err)
464 ++ return err;
465 ++
466 + inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
467 + sizeof(u64) * cq->wq_ctrl.buf.npages;
468 + in = kvzalloc(inlen, GFP_KERNEL);
469 +@@ -1687,8 +1694,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
470 + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
471 + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
472 +
473 +- mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
474 +-
475 + MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
476 + MLX5_SET(cqc, cqc, c_eqn, eqn);
477 + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
478 +@@ -1908,6 +1913,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
479 + int err;
480 + int eqn;
481 +
482 ++ err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
483 ++ if (err)
484 ++ return err;
485 ++
486 + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
487 + if (!c)
488 + return -ENOMEM;
489 +@@ -1924,7 +1933,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
490 + c->xdp = !!params->xdp_prog;
491 + c->stats = &priv->channel_stats[ix].ch;
492 +
493 +- mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
494 + c->irq_desc = irq_to_desc(irq);
495 +
496 + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
497 +@@ -3566,6 +3574,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
498 + return 0;
499 + }
500 +
501 ++#ifdef CONFIG_MLX5_ESWITCH
502 + static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
503 + {
504 + struct mlx5e_priv *priv = netdev_priv(netdev);
505 +@@ -3578,6 +3587,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
506 +
507 + return 0;
508 + }
509 ++#endif
510 +
511 + static int set_feature_rx_all(struct net_device *netdev, bool enable)
512 + {
513 +@@ -3676,7 +3686,9 @@ static int mlx5e_set_features(struct net_device *netdev,
514 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
515 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
516 + set_feature_cvlan_filter);
517 ++#ifdef CONFIG_MLX5_ESWITCH
518 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
519 ++#endif
520 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
521 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
522 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
523 +@@ -3747,10 +3759,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
524 + }
525 +
526 + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
527 ++ bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
528 + u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
529 + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
530 +
531 +- reset = reset && (ppw_old != ppw_new);
532 ++ reset = reset && (is_linear || (ppw_old != ppw_new));
533 + }
534 +
535 + if (!reset) {
536 +@@ -4685,7 +4698,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
537 + FT_CAP(modify_root) &&
538 + FT_CAP(identified_miss_table_mode) &&
539 + FT_CAP(flow_table_modify)) {
540 ++#ifdef CONFIG_MLX5_ESWITCH
541 + netdev->hw_features |= NETIF_F_HW_TC;
542 ++#endif
543 + #ifdef CONFIG_MLX5_EN_ARFS
544 + netdev->hw_features |= NETIF_F_NTUPLE;
545 + #endif
546 +@@ -4958,11 +4973,21 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
547 + {
548 + struct mlx5_core_dev *mdev = priv->mdev;
549 + const struct mlx5e_profile *profile;
550 ++ int max_nch;
551 + int err;
552 +
553 + profile = priv->profile;
554 + clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
555 +
556 ++ /* max number of channels may have changed */
557 ++ max_nch = mlx5e_get_max_num_channels(priv->mdev);
558 ++ if (priv->channels.params.num_channels > max_nch) {
559 ++ mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
560 ++ priv->channels.params.num_channels = max_nch;
561 ++ mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
562 ++ MLX5E_INDIR_RQT_SIZE, max_nch);
563 ++ }
564 ++
565 + err = profile->init_tx(priv);
566 + if (err)
567 + goto out;
568 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
569 +index a144146b769c..d543a5cff049 100644
570 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
571 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
572 +@@ -1064,6 +1064,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
573 + u32 frag_size;
574 + bool consumed;
575 +
576 ++ /* Check packet size. Note LRO doesn't use linear SKB */
577 ++ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
578 ++ rq->stats->oversize_pkts_sw_drop++;
579 ++ return NULL;
580 ++ }
581 ++
582 + va = page_address(di->page) + head_offset;
583 + data = va + rx_headroom;
584 + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
585 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
586 +index 35ded91203f5..4382ef85488c 100644
587 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
588 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
589 +@@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
590 + return 1;
591 + }
592 +
593 +-#ifdef CONFIG_INET
594 +-/* loopback test */
595 +-#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
596 +-static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
597 +-#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
598 +-
599 + struct mlx5ehdr {
600 + __be32 version;
601 + __be64 magic;
602 +- char text[ETH_GSTRING_LEN];
603 + };
604 +
605 ++#ifdef CONFIG_INET
606 ++/* loopback test */
607 ++#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
608 ++ sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
609 ++#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
610 ++
611 + static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
612 + {
613 + struct sk_buff *skb = NULL;
614 +@@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
615 + struct ethhdr *ethh;
616 + struct udphdr *udph;
617 + struct iphdr *iph;
618 +- int datalen, iplen;
619 +-
620 +- datalen = MLX5E_TEST_PKT_SIZE -
621 +- (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
622 ++ int iplen;
623 +
624 + skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
625 + if (!skb) {
626 +@@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
627 + /* Fill UDP header */
628 + udph->source = htons(9);
629 + udph->dest = htons(9); /* Discard Protocol */
630 +- udph->len = htons(datalen + sizeof(struct udphdr));
631 ++ udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
632 + udph->check = 0;
633 +
634 + /* Fill IP header */
635 +@@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
636 + iph->ttl = 32;
637 + iph->version = 4;
638 + iph->protocol = IPPROTO_UDP;
639 +- iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
640 ++ iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
641 ++ sizeof(struct mlx5ehdr);
642 + iph->tot_len = htons(iplen);
643 + iph->frag_off = 0;
644 + iph->saddr = 0;
645 +@@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
646 + mlxh = skb_put(skb, sizeof(*mlxh));
647 + mlxh->version = 0;
648 + mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
649 +- strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
650 +- datalen -= sizeof(*mlxh);
651 +- skb_put_zero(skb, datalen);
652 +
653 + skb->csum = 0;
654 + skb->ip_summed = CHECKSUM_PARTIAL;
655 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
656 +index 6839481f7697..d57d51c4e658 100644
657 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
658 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
659 +@@ -82,6 +82,7 @@ static const struct counter_desc sw_stats_desc[] = {
660 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
661 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
662 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
663 ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
664 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
665 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
666 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
667 +@@ -158,6 +159,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
668 + s->rx_wqe_err += rq_stats->wqe_err;
669 + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
670 + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
671 ++ s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
672 + s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
673 + s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
674 + s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
675 +@@ -1148,6 +1150,7 @@ static const struct counter_desc rq_stats_desc[] = {
676 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
677 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
678 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
679 ++ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
680 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
681 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
682 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
683 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
684 +index a4c035aedd46..c1064af9d54c 100644
685 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
686 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
687 +@@ -95,6 +95,7 @@ struct mlx5e_sw_stats {
688 + u64 rx_wqe_err;
689 + u64 rx_mpwqe_filler_cqes;
690 + u64 rx_mpwqe_filler_strides;
691 ++ u64 rx_oversize_pkts_sw_drop;
692 + u64 rx_buff_alloc_err;
693 + u64 rx_cqe_compress_blks;
694 + u64 rx_cqe_compress_pkts;
695 +@@ -190,6 +191,7 @@ struct mlx5e_rq_stats {
696 + u64 wqe_err;
697 + u64 mpwqe_filler_cqes;
698 + u64 mpwqe_filler_strides;
699 ++ u64 oversize_pkts_sw_drop;
700 + u64 buff_alloc_err;
701 + u64 cqe_compress_blks;
702 + u64 cqe_compress_pkts;
703 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
704 +index 85796727093e..3092c59c0dc7 100644
705 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
706 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
707 +@@ -1310,31 +1310,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
708 + inner_headers);
709 + }
710 +
711 +- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
712 +- struct flow_dissector_key_eth_addrs *key =
713 ++ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
714 ++ struct flow_dissector_key_basic *key =
715 + skb_flow_dissector_target(f->dissector,
716 +- FLOW_DISSECTOR_KEY_ETH_ADDRS,
717 ++ FLOW_DISSECTOR_KEY_BASIC,
718 + f->key);
719 +- struct flow_dissector_key_eth_addrs *mask =
720 ++ struct flow_dissector_key_basic *mask =
721 + skb_flow_dissector_target(f->dissector,
722 +- FLOW_DISSECTOR_KEY_ETH_ADDRS,
723 ++ FLOW_DISSECTOR_KEY_BASIC,
724 + f->mask);
725 ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
726 ++ ntohs(mask->n_proto));
727 ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
728 ++ ntohs(key->n_proto));
729 +
730 +- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
731 +- dmac_47_16),
732 +- mask->dst);
733 +- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
734 +- dmac_47_16),
735 +- key->dst);
736 +-
737 +- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
738 +- smac_47_16),
739 +- mask->src);
740 +- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
741 +- smac_47_16),
742 +- key->src);
743 +-
744 +- if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
745 ++ if (mask->n_proto)
746 + *match_level = MLX5_MATCH_L2;
747 + }
748 +
749 +@@ -1368,9 +1358,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
750 +
751 + *match_level = MLX5_MATCH_L2;
752 + }
753 +- } else {
754 ++ } else if (*match_level != MLX5_MATCH_NONE) {
755 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
756 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
757 ++ *match_level = MLX5_MATCH_L2;
758 + }
759 +
760 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
761 +@@ -1408,21 +1399,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
762 + }
763 + }
764 +
765 +- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
766 +- struct flow_dissector_key_basic *key =
767 ++ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
768 ++ struct flow_dissector_key_eth_addrs *key =
769 + skb_flow_dissector_target(f->dissector,
770 +- FLOW_DISSECTOR_KEY_BASIC,
771 ++ FLOW_DISSECTOR_KEY_ETH_ADDRS,
772 + f->key);
773 +- struct flow_dissector_key_basic *mask =
774 ++ struct flow_dissector_key_eth_addrs *mask =
775 + skb_flow_dissector_target(f->dissector,
776 +- FLOW_DISSECTOR_KEY_BASIC,
777 ++ FLOW_DISSECTOR_KEY_ETH_ADDRS,
778 + f->mask);
779 +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
780 +- ntohs(mask->n_proto));
781 +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
782 +- ntohs(key->n_proto));
783 +
784 +- if (mask->n_proto)
785 ++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
786 ++ dmac_47_16),
787 ++ mask->dst);
788 ++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
789 ++ dmac_47_16),
790 ++ key->dst);
791 ++
792 ++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
793 ++ smac_47_16),
794 ++ mask->src);
795 ++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
796 ++ smac_47_16),
797 ++ key->src);
798 ++
799 ++ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
800 + *match_level = MLX5_MATCH_L2;
801 + }
802 +
803 +@@ -1449,10 +1450,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
804 +
805 + /* the HW doesn't need L3 inline to match on frag=no */
806 + if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
807 +- *match_level = MLX5_INLINE_MODE_L2;
808 ++ *match_level = MLX5_MATCH_L2;
809 + /* *** L2 attributes parsing up to here *** */
810 + else
811 +- *match_level = MLX5_INLINE_MODE_IP;
812 ++ *match_level = MLX5_MATCH_L3;
813 + }
814 + }
815 +
816 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
817 +index b8ee9101c506..b5a8769a5bfd 100644
818 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
819 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
820 +@@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
821 + };
822 +
823 + static const struct rhashtable_params rhash_sa = {
824 +- .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
825 +- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
826 ++ /* Keep out "cmd" field from the key as it's
827 ++ * value is not constant during the lifetime
828 ++ * of the key object.
829 ++ */
830 ++ .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
831 ++ FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
832 ++ .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
833 ++ FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
834 + .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
835 + .automatic_shrinking = true,
836 + .min_size = 1,
837 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
838 +index e3797a44e074..5b7fe8264144 100644
839 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
840 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
841 +@@ -502,9 +502,9 @@ static int mlx5i_close(struct net_device *netdev)
842 +
843 + netif_carrier_off(epriv->netdev);
844 + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
845 +- mlx5i_uninit_underlay_qp(epriv);
846 + mlx5e_deactivate_priv_channels(epriv);
847 + mlx5e_close_channels(&epriv->channels);
848 ++ mlx5i_uninit_underlay_qp(epriv);
849 + unlock:
850 + mutex_unlock(&epriv->state_lock);
851 + return 0;
852 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
853 +index 30bb2c533cec..ada644d90029 100644
854 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
855 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
856 +@@ -3519,7 +3519,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
857 + burst_size = 7;
858 + break;
859 + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
860 +- is_bytes = true;
861 + rate = 4 * 1024;
862 + burst_size = 4;
863 + break;
864 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
865 +index 0afc3d335d56..d11c16aeb19a 100644
866 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
867 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
868 +@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
869 + struct net_device *real_dev,
870 + struct rmnet_endpoint *ep)
871 + {
872 +- struct rmnet_priv *priv;
873 ++ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
874 + int rc;
875 +
876 + if (ep->egress_dev)
877 +@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
878 + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
879 + rmnet_dev->hw_features |= NETIF_F_SG;
880 +
881 ++ priv->real_dev = real_dev;
882 ++
883 + rc = register_netdevice(rmnet_dev);
884 + if (!rc) {
885 + ep->egress_dev = rmnet_dev;
886 +@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
887 +
888 + rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
889 +
890 +- priv = netdev_priv(rmnet_dev);
891 + priv->mux_id = id;
892 +- priv->real_dev = real_dev;
893 +
894 + netdev_dbg(rmnet_dev, "rmnet dev created\n");
895 + }
896 +diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
897 +index 33265747bf39..0fbcedcdf6e2 100644
898 +--- a/drivers/net/phy/mdio-gpio.c
899 ++++ b/drivers/net/phy/mdio-gpio.c
900 +@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
901 + * assume the pin serves as pull-up. If direction is
902 + * output, the default value is high.
903 + */
904 +- gpiod_set_value(bitbang->mdo, 1);
905 ++ gpiod_set_value_cansleep(bitbang->mdo, 1);
906 + return;
907 + }
908 +
909 +@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
910 + struct mdio_gpio_info *bitbang =
911 + container_of(ctrl, struct mdio_gpio_info, ctrl);
912 +
913 +- return gpiod_get_value(bitbang->mdio);
914 ++ return gpiod_get_value_cansleep(bitbang->mdio);
915 + }
916 +
917 + static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
918 +@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
919 + container_of(ctrl, struct mdio_gpio_info, ctrl);
920 +
921 + if (bitbang->mdo)
922 +- gpiod_set_value(bitbang->mdo, what);
923 ++ gpiod_set_value_cansleep(bitbang->mdo, what);
924 + else
925 +- gpiod_set_value(bitbang->mdio, what);
926 ++ gpiod_set_value_cansleep(bitbang->mdio, what);
927 + }
928 +
929 + static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
930 +@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
931 + struct mdio_gpio_info *bitbang =
932 + container_of(ctrl, struct mdio_gpio_info, ctrl);
933 +
934 +- gpiod_set_value(bitbang->mdc, what);
935 ++ gpiod_set_value_cansleep(bitbang->mdc, what);
936 + }
937 +
938 + static const struct mdiobb_ops mdio_gpio_ops = {
939 +diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
940 +index 7fc8508b5231..271e8adc39f1 100644
941 +--- a/drivers/net/phy/realtek.c
942 ++++ b/drivers/net/phy/realtek.c
943 +@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
944 + .flags = PHY_HAS_INTERRUPT,
945 + }, {
946 + .phy_id = 0x001cc816,
947 +- .name = "RTL8201F 10/100Mbps Ethernet",
948 ++ .name = "RTL8201F Fast Ethernet",
949 + .phy_id_mask = 0x001fffff,
950 + .features = PHY_BASIC_FEATURES,
951 + .flags = PHY_HAS_INTERRUPT,
952 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
953 +index c52207beef88..573620771154 100644
954 +--- a/drivers/net/tun.c
955 ++++ b/drivers/net/tun.c
956 +@@ -1527,6 +1527,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
957 +
958 + if (!rx_batched || (!more && skb_queue_empty(queue))) {
959 + local_bh_disable();
960 ++ skb_record_rx_queue(skb, tfile->queue_index);
961 + netif_receive_skb(skb);
962 + local_bh_enable();
963 + return;
964 +@@ -1546,8 +1547,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
965 + struct sk_buff *nskb;
966 +
967 + local_bh_disable();
968 +- while ((nskb = __skb_dequeue(&process_queue)))
969 ++ while ((nskb = __skb_dequeue(&process_queue))) {
970 ++ skb_record_rx_queue(nskb, tfile->queue_index);
971 + netif_receive_skb(nskb);
972 ++ }
973 ++ skb_record_rx_queue(skb, tfile->queue_index);
974 + netif_receive_skb(skb);
975 + local_bh_enable();
976 + }
977 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
978 +index 262e7a3c23cb..f2d01cb6f958 100644
979 +--- a/drivers/net/usb/smsc95xx.c
980 ++++ b/drivers/net/usb/smsc95xx.c
981 +@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
982 + dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
983 + dev->net->flags |= IFF_MULTICAST;
984 + dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
985 ++ dev->net->min_mtu = ETH_MIN_MTU;
986 ++ dev->net->max_mtu = ETH_DATA_LEN;
987 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
988 +
989 + pdata->dev = dev;
990 +@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
991 + return ret;
992 + }
993 +
994 ++ cancel_delayed_work_sync(&pdata->carrier_check);
995 ++
996 + if (pdata->suspend_flags) {
997 + netdev_warn(dev->net, "error during last resume\n");
998 + pdata->suspend_flags = 0;
999 +@@ -1840,6 +1844,11 @@ done:
1000 + */
1001 + if (ret && PMSG_IS_AUTO(message))
1002 + usbnet_resume(intf);
1003 ++
1004 ++ if (ret)
1005 ++ schedule_delayed_work(&pdata->carrier_check,
1006 ++ CARRIER_CHECK_DELAY);
1007 ++
1008 + return ret;
1009 + }
1010 +
1011 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
1012 +index 8c2caa370e0f..ab9242e51d9e 100644
1013 +--- a/include/net/sctp/sctp.h
1014 ++++ b/include/net/sctp/sctp.h
1015 +@@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
1016 + SCTP_DEFAULT_MINSEGMENT));
1017 + }
1018 +
1019 ++static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
1020 ++{
1021 ++ __u32 pmtu = sctp_dst_mtu(t->dst);
1022 ++
1023 ++ if (t->pathmtu == pmtu)
1024 ++ return true;
1025 ++
1026 ++ t->pathmtu = pmtu;
1027 ++
1028 ++ return false;
1029 ++}
1030 ++
1031 + #endif /* __net_sctp_h__ */
1032 +diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
1033 +index 34dd3d497f2c..c81feb373d3e 100644
1034 +--- a/include/uapi/linux/sctp.h
1035 ++++ b/include/uapi/linux/sctp.h
1036 +@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
1037 +
1038 + #define SCTP_ASSOC_CHANGE_DENIED 0x0004
1039 + #define SCTP_ASSOC_CHANGE_FAILED 0x0008
1040 ++#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
1041 ++#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
1042 + struct sctp_stream_change_event {
1043 + __u16 strchange_type;
1044 + __u16 strchange_flags;
1045 +@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
1046 + /* SCTP Stream schedulers */
1047 + enum sctp_sched_type {
1048 + SCTP_SS_FCFS,
1049 ++ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
1050 + SCTP_SS_PRIO,
1051 + SCTP_SS_RR,
1052 + SCTP_SS_MAX = SCTP_SS_RR
1053 +diff --git a/kernel/cpu.c b/kernel/cpu.c
1054 +index 3adecda21444..0097acec1c71 100644
1055 +--- a/kernel/cpu.c
1056 ++++ b/kernel/cpu.c
1057 +@@ -2026,12 +2026,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
1058 + kobject_uevent(&dev->kobj, KOBJ_ONLINE);
1059 + }
1060 +
1061 +-/*
1062 +- * Architectures that need SMT-specific errata handling during SMT hotplug
1063 +- * should override this.
1064 +- */
1065 +-void __weak arch_smt_update(void) { };
1066 +-
1067 + static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1068 + {
1069 + int cpu, ret = 0;
1070 +@@ -2058,10 +2052,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1071 + */
1072 + cpuhp_offline_cpu_device(cpu);
1073 + }
1074 +- if (!ret) {
1075 ++ if (!ret)
1076 + cpu_smt_control = ctrlval;
1077 +- arch_smt_update();
1078 +- }
1079 + cpu_maps_update_done();
1080 + return ret;
1081 + }
1082 +@@ -2072,7 +2064,6 @@ static int cpuhp_smt_enable(void)
1083 +
1084 + cpu_maps_update_begin();
1085 + cpu_smt_control = CPU_SMT_ENABLED;
1086 +- arch_smt_update();
1087 + for_each_present_cpu(cpu) {
1088 + /* Skip online CPUs and CPUs on offline nodes */
1089 + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
1090 +diff --git a/net/core/dev.c b/net/core/dev.c
1091 +index e16ba3625400..097c02101450 100644
1092 +--- a/net/core/dev.c
1093 ++++ b/net/core/dev.c
1094 +@@ -5630,6 +5630,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
1095 + skb->vlan_tci = 0;
1096 + skb->dev = napi->dev;
1097 + skb->skb_iif = 0;
1098 ++
1099 ++ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
1100 ++ skb->pkt_type = PACKET_HOST;
1101 ++
1102 + skb->encapsulation = 0;
1103 + skb_shinfo(skb)->gso_type = 0;
1104 + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
1105 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1106 +index ce9eeeb7c024..415b95f76b66 100644
1107 +--- a/net/core/flow_dissector.c
1108 ++++ b/net/core/flow_dissector.c
1109 +@@ -1026,8 +1026,8 @@ ip_proto_again:
1110 + break;
1111 + }
1112 +
1113 +- if (dissector_uses_key(flow_dissector,
1114 +- FLOW_DISSECTOR_KEY_PORTS)) {
1115 ++ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
1116 ++ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
1117 + key_ports = skb_flow_dissector_target(flow_dissector,
1118 + FLOW_DISSECTOR_KEY_PORTS,
1119 + target_container);
1120 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1121 +index bcb11f3a27c0..760a9e52e02b 100644
1122 +--- a/net/ipv4/inet_fragment.c
1123 ++++ b/net/ipv4/inet_fragment.c
1124 +@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
1125 + }
1126 +
1127 + static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
1128 +- void *arg)
1129 ++ void *arg,
1130 ++ struct inet_frag_queue **prev)
1131 + {
1132 + struct inet_frags *f = nf->f;
1133 + struct inet_frag_queue *q;
1134 +- int err;
1135 +
1136 + q = inet_frag_alloc(nf, f, arg);
1137 +- if (!q)
1138 ++ if (!q) {
1139 ++ *prev = ERR_PTR(-ENOMEM);
1140 + return NULL;
1141 +-
1142 ++ }
1143 + mod_timer(&q->timer, jiffies + nf->timeout);
1144 +
1145 +- err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
1146 +- f->rhash_params);
1147 +- if (err < 0) {
1148 ++ *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
1149 ++ &q->node, f->rhash_params);
1150 ++ if (*prev) {
1151 + q->flags |= INET_FRAG_COMPLETE;
1152 + inet_frag_kill(q);
1153 + inet_frag_destroy(q);
1154 +@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
1155 + /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
1156 + struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
1157 + {
1158 +- struct inet_frag_queue *fq;
1159 ++ struct inet_frag_queue *fq = NULL, *prev;
1160 +
1161 + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
1162 + return NULL;
1163 +
1164 + rcu_read_lock();
1165 +
1166 +- fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
1167 +- if (fq) {
1168 ++ prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
1169 ++ if (!prev)
1170 ++ fq = inet_frag_create(nf, key, &prev);
1171 ++ if (prev && !IS_ERR(prev)) {
1172 ++ fq = prev;
1173 + if (!refcount_inc_not_zero(&fq->refcnt))
1174 + fq = NULL;
1175 +- rcu_read_unlock();
1176 +- return fq;
1177 + }
1178 + rcu_read_unlock();
1179 +-
1180 +- return inet_frag_create(nf, key);
1181 ++ return fq;
1182 + }
1183 + EXPORT_SYMBOL(inet_frag_find);
1184 +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
1185 +index dde671e97829..c248e0dccbe1 100644
1186 +--- a/net/ipv4/ip_tunnel_core.c
1187 ++++ b/net/ipv4/ip_tunnel_core.c
1188 +@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
1189 +
1190 + iph->version = 4;
1191 + iph->ihl = sizeof(struct iphdr) >> 2;
1192 +- iph->frag_off = df;
1193 ++ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
1194 + iph->protocol = proto;
1195 + iph->tos = tos;
1196 + iph->daddr = dst;
1197 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1198 +index 47e08c1b5bc3..72898cbef43d 100644
1199 +--- a/net/ipv4/tcp_input.c
1200 ++++ b/net/ipv4/tcp_input.c
1201 +@@ -4371,6 +4371,7 @@ static bool tcp_try_coalesce(struct sock *sk,
1202 + if (TCP_SKB_CB(from)->has_rxtstamp) {
1203 + TCP_SKB_CB(to)->has_rxtstamp = true;
1204 + to->tstamp = from->tstamp;
1205 ++ skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
1206 + }
1207 +
1208 + return true;
1209 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1210 +index 1cf00d857fc1..a33681dc4796 100644
1211 +--- a/net/ipv6/route.c
1212 ++++ b/net/ipv6/route.c
1213 +@@ -2263,8 +2263,7 @@ static void ip6_link_failure(struct sk_buff *skb)
1214 + if (rt) {
1215 + rcu_read_lock();
1216 + if (rt->rt6i_flags & RTF_CACHE) {
1217 +- if (dst_hold_safe(&rt->dst))
1218 +- rt6_remove_exception_rt(rt);
1219 ++ rt6_remove_exception_rt(rt);
1220 + } else {
1221 + struct fib6_info *from;
1222 + struct fib6_node *fn;
1223 +@@ -2392,10 +2391,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1224 +
1225 + void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1226 + {
1227 ++ int oif = sk->sk_bound_dev_if;
1228 + struct dst_entry *dst;
1229 +
1230 +- ip6_update_pmtu(skb, sock_net(sk), mtu,
1231 +- sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1232 ++ if (!oif && skb->dev)
1233 ++ oif = l3mdev_master_ifindex(skb->dev);
1234 ++
1235 ++ ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
1236 +
1237 + dst = __sk_dst_get(sk);
1238 + if (!dst || !dst->obsolete ||
1239 +@@ -3266,8 +3268,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
1240 + if (cfg->fc_flags & RTF_GATEWAY &&
1241 + !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1242 + goto out;
1243 +- if (dst_hold_safe(&rt->dst))
1244 +- rc = rt6_remove_exception_rt(rt);
1245 ++
1246 ++ rc = rt6_remove_exception_rt(rt);
1247 + out:
1248 + return rc;
1249 + }
1250 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1251 +index 82cdf9020b53..26f1d435696a 100644
1252 +--- a/net/l2tp/l2tp_core.c
1253 ++++ b/net/l2tp/l2tp_core.c
1254 +@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1255 + goto err_sock;
1256 + }
1257 +
1258 +- sk = sock->sk;
1259 +-
1260 +- sock_hold(sk);
1261 +- tunnel->sock = sk;
1262 + tunnel->l2tp_net = net;
1263 +-
1264 + pn = l2tp_pernet(net);
1265 +
1266 + spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1267 +@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1268 + list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1269 + spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1270 +
1271 ++ sk = sock->sk;
1272 ++ sock_hold(sk);
1273 ++ tunnel->sock = sk;
1274 ++
1275 + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1276 + struct udp_tunnel_sock_cfg udp_cfg = {
1277 + .sk_user_data = tunnel,
1278 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
1279 +index a6e6cae82c30..03e0fc8c183f 100644
1280 +--- a/net/rxrpc/ar-internal.h
1281 ++++ b/net/rxrpc/ar-internal.h
1282 +@@ -611,6 +611,7 @@ struct rxrpc_call {
1283 + * not hard-ACK'd packet follows this.
1284 + */
1285 + rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
1286 ++ u16 tx_backoff; /* Delay to insert due to Tx failure */
1287 +
1288 + /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
1289 + * is fixed, we keep these numbers in terms of segments (ie. DATA
1290 +diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
1291 +index 8e7434e92097..468efc3660c0 100644
1292 +--- a/net/rxrpc/call_event.c
1293 ++++ b/net/rxrpc/call_event.c
1294 +@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
1295 + else
1296 + ack_at = expiry;
1297 +
1298 ++ ack_at += READ_ONCE(call->tx_backoff);
1299 + ack_at += now;
1300 + if (time_before(ack_at, call->ack_at)) {
1301 + WRITE_ONCE(call->ack_at, ack_at);
1302 +@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
1303 + container_of(work, struct rxrpc_call, processor);
1304 + rxrpc_serial_t *send_ack;
1305 + unsigned long now, next, t;
1306 ++ unsigned int iterations = 0;
1307 +
1308 + rxrpc_see_call(call);
1309 +
1310 +@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
1311 + call->debug_id, rxrpc_call_states[call->state], call->events);
1312 +
1313 + recheck_state:
1314 ++ /* Limit the number of times we do this before returning to the manager */
1315 ++ iterations++;
1316 ++ if (iterations > 5)
1317 ++ goto requeue;
1318 ++
1319 + if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
1320 + rxrpc_send_abort_packet(call);
1321 + goto recheck_state;
1322 +@@ -447,13 +454,16 @@ recheck_state:
1323 + rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
1324 +
1325 + /* other events may have been raised since we started checking */
1326 +- if (call->events && call->state < RXRPC_CALL_COMPLETE) {
1327 +- __rxrpc_queue_call(call);
1328 +- goto out;
1329 +- }
1330 ++ if (call->events && call->state < RXRPC_CALL_COMPLETE)
1331 ++ goto requeue;
1332 +
1333 + out_put:
1334 + rxrpc_put_call(call, rxrpc_call_put);
1335 + out:
1336 + _leave("");
1337 ++ return;
1338 ++
1339 ++requeue:
1340 ++ __rxrpc_queue_call(call);
1341 ++ goto out;
1342 + }
1343 +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
1344 +index a141ee3ab812..345dc1c5fe72 100644
1345 +--- a/net/rxrpc/output.c
1346 ++++ b/net/rxrpc/output.c
1347 +@@ -34,6 +34,21 @@ struct rxrpc_abort_buffer {
1348 +
1349 + static const char rxrpc_keepalive_string[] = "";
1350 +
1351 ++/*
1352 ++ * Increase Tx backoff on transmission failure and clear it on success.
1353 ++ */
1354 ++static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
1355 ++{
1356 ++ if (ret < 0) {
1357 ++ u16 tx_backoff = READ_ONCE(call->tx_backoff);
1358 ++
1359 ++ if (tx_backoff < HZ)
1360 ++ WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
1361 ++ } else {
1362 ++ WRITE_ONCE(call->tx_backoff, 0);
1363 ++ }
1364 ++}
1365 ++
1366 + /*
1367 + * Arrange for a keepalive ping a certain time after we last transmitted. This
1368 + * lets the far side know we're still interested in this call and helps keep
1369 +@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
1370 + else
1371 + trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
1372 + rxrpc_tx_point_call_ack);
1373 ++ rxrpc_tx_backoff(call, ret);
1374 +
1375 + if (call->state < RXRPC_CALL_COMPLETE) {
1376 + if (ret < 0) {
1377 +@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
1378 + rxrpc_propose_ACK(call, pkt->ack.reason,
1379 + ntohs(pkt->ack.maxSkew),
1380 + ntohl(pkt->ack.serial),
1381 +- true, true,
1382 ++ false, true,
1383 + rxrpc_propose_ack_retry_tx);
1384 + } else {
1385 + spin_lock_bh(&call->lock);
1386 +@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
1387 + else
1388 + trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
1389 + rxrpc_tx_point_call_abort);
1390 +-
1391 ++ rxrpc_tx_backoff(call, ret);
1392 +
1393 + rxrpc_put_connection(conn);
1394 + return ret;
1395 +@@ -411,6 +427,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
1396 + else
1397 + trace_rxrpc_tx_packet(call->debug_id, &whdr,
1398 + rxrpc_tx_point_call_data_nofrag);
1399 ++ rxrpc_tx_backoff(call, ret);
1400 + if (ret == -EMSGSIZE)
1401 + goto send_fragmentable;
1402 +
1403 +@@ -445,9 +462,18 @@ done:
1404 + rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
1405 + rxrpc_timer_set_for_normal);
1406 + }
1407 +- }
1408 +
1409 +- rxrpc_set_keepalive(call);
1410 ++ rxrpc_set_keepalive(call);
1411 ++ } else {
1412 ++ /* Cancel the call if the initial transmission fails,
1413 ++ * particularly if that's due to network routing issues that
1414 ++ * aren't going away anytime soon. The layer above can arrange
1415 ++ * the retransmission.
1416 ++ */
1417 ++ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
1418 ++ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
1419 ++ RX_USER_ABORT, ret);
1420 ++ }
1421 +
1422 + _leave(" = %d [%u]", ret, call->peer->maxdata);
1423 + return ret;
1424 +@@ -506,6 +532,7 @@ send_fragmentable:
1425 + else
1426 + trace_rxrpc_tx_packet(call->debug_id, &whdr,
1427 + rxrpc_tx_point_call_data_frag);
1428 ++ rxrpc_tx_backoff(call, ret);
1429 +
1430 + up_write(&conn->params.local->defrag_sem);
1431 + goto done;
1432 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
1433 +index ad99a99f11f6..ca535a8585bc 100644
1434 +--- a/net/sched/act_pedit.c
1435 ++++ b/net/sched/act_pedit.c
1436 +@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
1437 + goto out_release;
1438 + }
1439 + } else {
1440 +- return err;
1441 ++ ret = err;
1442 ++ goto out_free;
1443 + }
1444 +
1445 + p = to_pedit(*a);
1446 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1447 +index 6fd9bdd93796..7fade7107f95 100644
1448 +--- a/net/sched/cls_flower.c
1449 ++++ b/net/sched/cls_flower.c
1450 +@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1451 + struct netlink_ext_ack *extack)
1452 + {
1453 + const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1454 +- int option_len, key_depth, msk_depth = 0;
1455 ++ int err, option_len, key_depth, msk_depth = 0;
1456 ++
1457 ++ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
1458 ++ TCA_FLOWER_KEY_ENC_OPTS_MAX,
1459 ++ enc_opts_policy, extack);
1460 ++ if (err)
1461 ++ return err;
1462 +
1463 + nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1464 +
1465 + if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1466 ++ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1467 ++ TCA_FLOWER_KEY_ENC_OPTS_MAX,
1468 ++ enc_opts_policy, extack);
1469 ++ if (err)
1470 ++ return err;
1471 ++
1472 + nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1473 + msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1474 + }
1475 +diff --git a/net/sctp/output.c b/net/sctp/output.c
1476 +index 67939ad99c01..08601223b0bf 100644
1477 +--- a/net/sctp/output.c
1478 ++++ b/net/sctp/output.c
1479 +@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
1480 + sctp_transport_route(tp, NULL, sp);
1481 + if (asoc->param_flags & SPP_PMTUD_ENABLE)
1482 + sctp_assoc_sync_pmtu(asoc);
1483 ++ } else if (!sctp_transport_pmtu_check(tp)) {
1484 ++ if (asoc->param_flags & SPP_PMTUD_ENABLE)
1485 ++ sctp_assoc_sync_pmtu(asoc);
1486 + }
1487 +
1488 + if (asoc->pmtu_pending) {
1489 +diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
1490 +index 42191ed9902b..7bb8e5603298 100644
1491 +--- a/net/sctp/outqueue.c
1492 ++++ b/net/sctp/outqueue.c
1493 +@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
1494 + INIT_LIST_HEAD(&q->retransmit);
1495 + INIT_LIST_HEAD(&q->sacked);
1496 + INIT_LIST_HEAD(&q->abandoned);
1497 +- sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
1498 ++ sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
1499 + }
1500 +
1501 + /* Free the outqueue structure and any related pending chunks.
1502 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1503 +index c1693e28aed4..876393cf5ed6 100644
1504 +--- a/net/sctp/socket.c
1505 ++++ b/net/sctp/socket.c
1506 +@@ -3958,32 +3958,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
1507 + unsigned int optlen)
1508 + {
1509 + struct sctp_assoc_value params;
1510 +- struct sctp_association *asoc;
1511 +- int retval = -EINVAL;
1512 +
1513 + if (optlen != sizeof(params))
1514 +- goto out;
1515 +-
1516 +- if (copy_from_user(&params, optval, optlen)) {
1517 +- retval = -EFAULT;
1518 +- goto out;
1519 +- }
1520 +-
1521 +- asoc = sctp_id2assoc(sk, params.assoc_id);
1522 +- if (asoc) {
1523 +- asoc->prsctp_enable = !!params.assoc_value;
1524 +- } else if (!params.assoc_id) {
1525 +- struct sctp_sock *sp = sctp_sk(sk);
1526 ++ return -EINVAL;
1527 +
1528 +- sp->ep->prsctp_enable = !!params.assoc_value;
1529 +- } else {
1530 +- goto out;
1531 +- }
1532 ++ if (copy_from_user(&params, optval, optlen))
1533 ++ return -EFAULT;
1534 +
1535 +- retval = 0;
1536 ++ sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
1537 +
1538 +-out:
1539 +- return retval;
1540 ++ return 0;
1541 + }
1542 +
1543 + static int sctp_setsockopt_default_prinfo(struct sock *sk,
1544 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
1545 +index ffb940d3b57c..3892e7630f3a 100644
1546 +--- a/net/sctp/stream.c
1547 ++++ b/net/sctp/stream.c
1548 +@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
1549 + goto out;
1550 + }
1551 +
1552 +- stream->incnt = incnt;
1553 + stream->outcnt = outcnt;
1554 +
1555 + asoc->strreset_outstanding = !!out + !!in;
1556 +diff --git a/net/tipc/discover.c b/net/tipc/discover.c
1557 +index 2830709957bd..c138d68e8a69 100644
1558 +--- a/net/tipc/discover.c
1559 ++++ b/net/tipc/discover.c
1560 +@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
1561 +
1562 + /* Apply trial address if we just left trial period */
1563 + if (!trial && !self) {
1564 +- tipc_net_finalize(net, tn->trial_addr);
1565 ++ tipc_sched_net_finalize(net, tn->trial_addr);
1566 ++ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
1567 + msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
1568 + }
1569 +
1570 +@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
1571 + goto exit;
1572 + }
1573 +
1574 +- /* Trial period over ? */
1575 +- if (!time_before(jiffies, tn->addr_trial_end)) {
1576 +- /* Did we just leave it ? */
1577 +- if (!tipc_own_addr(net))
1578 +- tipc_net_finalize(net, tn->trial_addr);
1579 +-
1580 +- msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
1581 +- msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
1582 ++ /* Did we just leave trial period ? */
1583 ++ if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
1584 ++ mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
1585 ++ spin_unlock_bh(&d->lock);
1586 ++ tipc_sched_net_finalize(net, tn->trial_addr);
1587 ++ return;
1588 + }
1589 +
1590 + /* Adjust timeout interval according to discovery phase */
1591 +@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
1592 + d->timer_intv = TIPC_DISC_SLOW;
1593 + else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
1594 + d->timer_intv = TIPC_DISC_FAST;
1595 ++ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
1596 ++ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
1597 + }
1598 +
1599 + mod_timer(&d->timer, jiffies + d->timer_intv);
1600 +diff --git a/net/tipc/link.c b/net/tipc/link.c
1601 +index 201c3b5bc96b..836727e363c4 100644
1602 +--- a/net/tipc/link.c
1603 ++++ b/net/tipc/link.c
1604 +@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1605 + if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1606 + l->priority = peers_prio;
1607 +
1608 +- /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1609 +- if (msg_peer_stopping(hdr))
1610 ++ /* If peer is going down we want full re-establish cycle */
1611 ++ if (msg_peer_stopping(hdr)) {
1612 + rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1613 +- else if ((mtyp == RESET_MSG) || !link_is_up(l))
1614 ++ break;
1615 ++ }
1616 ++ /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1617 ++ if (mtyp == RESET_MSG || !link_is_up(l))
1618 + rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1619 +
1620 + /* ACTIVATE_MSG takes up link if it was already locally reset */
1621 +- if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1622 ++ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1623 + rc = TIPC_LINK_UP_EVT;
1624 +
1625 + l->peer_session = msg_session(hdr);
1626 +diff --git a/net/tipc/net.c b/net/tipc/net.c
1627 +index 62199cf5a56c..f076edb74338 100644
1628 +--- a/net/tipc/net.c
1629 ++++ b/net/tipc/net.c
1630 +@@ -104,6 +104,14 @@
1631 + * - A local spin_lock protecting the queue of subscriber events.
1632 + */
1633 +
1634 ++struct tipc_net_work {
1635 ++ struct work_struct work;
1636 ++ struct net *net;
1637 ++ u32 addr;
1638 ++};
1639 ++
1640 ++static void tipc_net_finalize(struct net *net, u32 addr);
1641 ++
1642 + int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
1643 + {
1644 + if (tipc_own_id(net)) {
1645 +@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
1646 + return 0;
1647 + }
1648 +
1649 +-void tipc_net_finalize(struct net *net, u32 addr)
1650 ++static void tipc_net_finalize(struct net *net, u32 addr)
1651 + {
1652 + struct tipc_net *tn = tipc_net(net);
1653 +
1654 +- if (!cmpxchg(&tn->node_addr, 0, addr)) {
1655 +- tipc_set_node_addr(net, addr);
1656 +- tipc_named_reinit(net);
1657 +- tipc_sk_reinit(net);
1658 +- tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
1659 +- TIPC_CLUSTER_SCOPE, 0, addr);
1660 +- }
1661 ++ if (cmpxchg(&tn->node_addr, 0, addr))
1662 ++ return;
1663 ++ tipc_set_node_addr(net, addr);
1664 ++ tipc_named_reinit(net);
1665 ++ tipc_sk_reinit(net);
1666 ++ tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
1667 ++ TIPC_CLUSTER_SCOPE, 0, addr);
1668 ++}
1669 ++
1670 ++static void tipc_net_finalize_work(struct work_struct *work)
1671 ++{
1672 ++ struct tipc_net_work *fwork;
1673 ++
1674 ++ fwork = container_of(work, struct tipc_net_work, work);
1675 ++ tipc_net_finalize(fwork->net, fwork->addr);
1676 ++ kfree(fwork);
1677 ++}
1678 ++
1679 ++void tipc_sched_net_finalize(struct net *net, u32 addr)
1680 ++{
1681 ++ struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
1682 ++
1683 ++ if (!fwork)
1684 ++ return;
1685 ++ INIT_WORK(&fwork->work, tipc_net_finalize_work);
1686 ++ fwork->net = net;
1687 ++ fwork->addr = addr;
1688 ++ schedule_work(&fwork->work);
1689 + }
1690 +
1691 + void tipc_net_stop(struct net *net)
1692 +diff --git a/net/tipc/net.h b/net/tipc/net.h
1693 +index 09ad02b50bb1..b7f2e364eb99 100644
1694 +--- a/net/tipc/net.h
1695 ++++ b/net/tipc/net.h
1696 +@@ -42,7 +42,7 @@
1697 + extern const struct nla_policy tipc_nl_net_policy[];
1698 +
1699 + int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
1700 +-void tipc_net_finalize(struct net *net, u32 addr);
1701 ++void tipc_sched_net_finalize(struct net *net, u32 addr);
1702 + void tipc_net_stop(struct net *net);
1703 + int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
1704 + int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
1705 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
1706 +index 0bf8ad486c5e..366ce0bf2658 100644
1707 +--- a/net/tipc/socket.c
1708 ++++ b/net/tipc/socket.c
1709 +@@ -1548,16 +1548,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1710 + /**
1711 + * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1712 + * @m: descriptor for message info
1713 +- * @msg: received message header
1714 ++ * @skb: received message buffer
1715 + * @tsk: TIPC port associated with message
1716 + *
1717 + * Note: Ancillary data is not captured if not requested by receiver.
1718 + *
1719 + * Returns 0 if successful, otherwise errno
1720 + */
1721 +-static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1722 ++static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1723 + struct tipc_sock *tsk)
1724 + {
1725 ++ struct tipc_msg *msg;
1726 + u32 anc_data[3];
1727 + u32 err;
1728 + u32 dest_type;
1729 +@@ -1566,6 +1567,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1730 +
1731 + if (likely(m->msg_controllen == 0))
1732 + return 0;
1733 ++ msg = buf_msg(skb);
1734 +
1735 + /* Optionally capture errored message object(s) */
1736 + err = msg ? msg_errcode(msg) : 0;
1737 +@@ -1576,6 +1578,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1738 + if (res)
1739 + return res;
1740 + if (anc_data[1]) {
1741 ++ if (skb_linearize(skb))
1742 ++ return -ENOMEM;
1743 ++ msg = buf_msg(skb);
1744 + res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1745 + msg_data(msg));
1746 + if (res)
1747 +@@ -1737,9 +1742,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1748 +
1749 + /* Collect msg meta data, including error code and rejected data */
1750 + tipc_sk_set_orig_addr(m, skb);
1751 +- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1752 ++ rc = tipc_sk_anc_data_recv(m, skb, tsk);
1753 + if (unlikely(rc))
1754 + goto exit;
1755 ++ hdr = buf_msg(skb);
1756 +
1757 + /* Capture data if non-error msg, otherwise just set return value */
1758 + if (likely(!err)) {
1759 +@@ -1849,9 +1855,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1760 + /* Collect msg meta data, incl. error code and rejected data */
1761 + if (!copied) {
1762 + tipc_sk_set_orig_addr(m, skb);
1763 +- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1764 ++ rc = tipc_sk_anc_data_recv(m, skb, tsk);
1765 + if (rc)
1766 + break;
1767 ++ hdr = buf_msg(skb);
1768 + }
1769 +
1770 + /* Copy data if msg ok, otherwise return error/partial data */