Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Sat, 05 Feb 2022 12:12:41
Message-Id: 1644063146.6a96e27b480549045257da0f3a2e6b813025b8bb.mpagano@gentoo
1 commit: 6a96e27b480549045257da0f3a2e6b813025b8bb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Feb 5 12:12:26 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Feb 5 12:12:26 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6a96e27b
7
8 Linux patch 5.15.20
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1019_linux-5.15.20.patch | 985 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 989 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e7523966..a24b369c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -119,6 +119,10 @@ Patch: 1018_linux-5.15.19.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.19
23
24 +Patch: 1019_linux-5.15.20.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.20
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1019_linux-5.15.20.patch b/1019_linux-5.15.20.patch
33 new file mode 100644
34 index 00000000..6a505802
35 --- /dev/null
36 +++ b/1019_linux-5.15.20.patch
37 @@ -0,0 +1,985 @@
38 +diff --git a/Makefile b/Makefile
39 +index 463d46a9e6171..3643400c15d8c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 19
47 ++SUBLEVEL = 20
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
52 +index 5580267fb3624..2d532c0fe8191 100644
53 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
54 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
55 +@@ -1738,18 +1738,18 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
56 + u32 val;
57 + int ret;
58 +
59 +- ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
60 +- if (ret)
61 +- return ret;
62 ++ if (enable) {
63 ++ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
64 ++ if (ret)
65 ++ return ret;
66 +
67 +- val = HDMI_READ(HDMI_CEC_CNTRL_5);
68 +- val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
69 +- VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
70 +- VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
71 +- val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
72 +- ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
73 ++ val = HDMI_READ(HDMI_CEC_CNTRL_5);
74 ++ val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
75 ++ VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
76 ++ VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
77 ++ val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
78 ++ ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
79 +
80 +- if (enable) {
81 + HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
82 + VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
83 + HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
84 +@@ -1777,7 +1777,10 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
85 + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
86 + HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
87 + VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
88 ++
89 ++ pm_runtime_put(&vc4_hdmi->pdev->dev);
90 + }
91 ++
92 + return 0;
93 + }
94 +
95 +@@ -1888,8 +1891,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
96 + if (ret < 0)
97 + goto err_remove_handlers;
98 +
99 +- pm_runtime_put(&vc4_hdmi->pdev->dev);
100 +-
101 + return 0;
102 +
103 + err_remove_handlers:
104 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
105 +index 17a585adfb49c..e6883d52d230c 100644
106 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
107 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
108 +@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
109 + if (!channel->tx_ring)
110 + break;
111 +
112 ++ /* Deactivate the Tx timer */
113 + del_timer_sync(&channel->tx_timer);
114 ++ channel->tx_timer_active = 0;
115 + }
116 + }
117 +
118 +@@ -2555,6 +2557,14 @@ read_again:
119 + buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
120 + len += buf2_len;
121 +
122 ++ if (buf2_len > rdata->rx.buf.dma_len) {
123 ++ /* Hardware inconsistency within the descriptors
124 ++ * that has resulted in a length underflow.
125 ++ */
126 ++ error = 1;
127 ++ goto skip_data;
128 ++ }
129 ++
130 + if (!skb) {
131 + skb = xgbe_create_skb(pdata, napi, rdata,
132 + buf1_len);
133 +@@ -2584,8 +2594,10 @@ skip_data:
134 + if (!last || context_next)
135 + goto read_again;
136 +
137 +- if (!skb)
138 ++ if (!skb || error) {
139 ++ dev_kfree_skb(skb);
140 + goto next_packet;
141 ++ }
142 +
143 + /* Be sure we don't exceed the configured MTU */
144 + max_len = netdev->mtu + ETH_HLEN;
145 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
146 +index ebcb2a30add09..276a0aa1ed4ab 100644
147 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
148 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
149 +@@ -6346,7 +6346,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
150 + u32 mac_data;
151 + u16 phy_data;
152 +
153 +- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
154 ++ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
155 ++ hw->mac.type >= e1000_pch_adp) {
156 + /* Request ME configure the device for S0ix */
157 + mac_data = er32(H2ME);
158 + mac_data |= E1000_H2ME_START_DPG;
159 +@@ -6495,7 +6496,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
160 + u16 phy_data;
161 + u32 i = 0;
162 +
163 +- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
164 ++ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
165 ++ hw->mac.type >= e1000_pch_adp) {
166 + /* Request ME unconfigure the device from S0ix */
167 + mac_data = er32(H2ME);
168 + mac_data &= ~E1000_H2ME_START_DPG;
169 +diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
170 +index 389df4d86ab4c..56a3a6d1dbe41 100644
171 +--- a/drivers/net/ethernet/intel/i40e/i40e.h
172 ++++ b/drivers/net/ethernet/intel/i40e/i40e.h
173 +@@ -144,6 +144,7 @@ enum i40e_state_t {
174 + __I40E_VIRTCHNL_OP_PENDING,
175 + __I40E_RECOVERY_MODE,
176 + __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
177 ++ __I40E_IN_REMOVE,
178 + __I40E_VFS_RELEASING,
179 + /* This must be last as it determines the size of the BITMAP */
180 + __I40E_STATE_SIZE__,
181 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
182 +index 20c8c0231e2c4..063ded36b902e 100644
183 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
184 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
185 +@@ -5372,7 +5372,15 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
186 + /* There is no need to reset BW when mqprio mode is on. */
187 + if (pf->flags & I40E_FLAG_TC_MQPRIO)
188 + return 0;
189 +- if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
190 ++
191 ++ if (!vsi->mqprio_qopt.qopt.hw) {
192 ++ if (pf->flags & I40E_FLAG_DCB_ENABLED)
193 ++ goto skip_reset;
194 ++
195 ++ if (IS_ENABLED(CONFIG_I40E_DCB) &&
196 ++ i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
197 ++ goto skip_reset;
198 ++
199 + ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
200 + if (ret)
201 + dev_info(&pf->pdev->dev,
202 +@@ -5380,6 +5388,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
203 + vsi->seid);
204 + return ret;
205 + }
206 ++
207 ++skip_reset:
208 + memset(&bw_data, 0, sizeof(bw_data));
209 + bw_data.tc_valid_bits = enabled_tc;
210 + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
211 +@@ -10853,6 +10863,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
212 + bool lock_acquired)
213 + {
214 + int ret;
215 ++
216 ++ if (test_bit(__I40E_IN_REMOVE, pf->state))
217 ++ return;
218 + /* Now we wait for GRST to settle out.
219 + * We don't have to delete the VEBs or VSIs from the hw switch
220 + * because the reset will make them disappear.
221 +@@ -12212,6 +12225,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
222 +
223 + vsi->req_queue_pairs = queue_count;
224 + i40e_prep_for_reset(pf);
225 ++ if (test_bit(__I40E_IN_REMOVE, pf->state))
226 ++ return pf->alloc_rss_size;
227 +
228 + pf->alloc_rss_size = new_rss_size;
229 +
230 +@@ -13038,6 +13053,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
231 + if (need_reset)
232 + i40e_prep_for_reset(pf);
233 +
234 ++ /* VSI shall be deleted in a moment, just return EINVAL */
235 ++ if (test_bit(__I40E_IN_REMOVE, pf->state))
236 ++ return -EINVAL;
237 ++
238 + old_prog = xchg(&vsi->xdp_prog, prog);
239 +
240 + if (need_reset) {
241 +@@ -15928,8 +15947,13 @@ static void i40e_remove(struct pci_dev *pdev)
242 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
243 + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
244 +
245 +- while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
246 ++ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
247 ++ * flags, once they are set, i40e_rebuild should not be called as
248 ++ * i40e_prep_for_reset always returns early.
249 ++ */
250 ++ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
251 + usleep_range(1000, 2000);
252 ++ set_bit(__I40E_IN_REMOVE, pf->state);
253 +
254 + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
255 + set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
256 +@@ -16128,6 +16152,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
257 + {
258 + struct i40e_pf *pf = pci_get_drvdata(pdev);
259 +
260 ++ if (test_bit(__I40E_IN_REMOVE, pf->state))
261 ++ return;
262 ++
263 + i40e_reset_and_rebuild(pf, false, false);
264 + }
265 +
266 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
267 +index e8a8d78e3e4d5..965838893432d 100644
268 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
269 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
270 +@@ -553,7 +553,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
271 +
272 + static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
273 + {
274 +- *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
275 ++ /* Hardware treats 0 as "unlimited", set at least 1. */
276 ++ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
277 +
278 + qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
279 + ceil, *max_average_bw);
280 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
281 +index 9c076aa20306a..b6f5c1bcdbcd4 100644
282 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
283 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
284 +@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
285 +
286 + static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
287 + {
288 +- struct mlx5e_rep_priv *rpriv;
289 +- struct mlx5e_priv *priv;
290 +-
291 +- /* A given netdev is not a representor or not a slave of LAG configuration */
292 +- if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
293 +- return false;
294 +-
295 +- priv = netdev_priv(netdev);
296 +- rpriv = priv->ppriv;
297 +-
298 +- /* Egress acl forward to vport is supported only non-uplink representor */
299 +- return rpriv->rep->vport != MLX5_VPORT_UPLINK;
300 ++ return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
301 + }
302 +
303 + static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
304 +@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
305 + u16 fwd_vport_num;
306 + int err;
307 +
308 +- if (!mlx5e_rep_is_lag_netdev(netdev))
309 +- return;
310 +-
311 + info = ptr;
312 + lag_info = info->lower_state_info;
313 + /* This is not an event of a representor becoming active slave */
314 +@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
315 + struct net_device *lag_dev;
316 + struct mlx5e_priv *priv;
317 +
318 +- if (!mlx5e_rep_is_lag_netdev(netdev))
319 +- return;
320 +-
321 + priv = netdev_priv(netdev);
322 + rpriv = priv->ppriv;
323 + lag_dev = info->upper_dev;
324 +@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
325 + unsigned long event, void *ptr)
326 + {
327 + struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
328 ++ struct mlx5e_rep_priv *rpriv;
329 ++ struct mlx5e_rep_bond *bond;
330 ++ struct mlx5e_priv *priv;
331 ++
332 ++ if (!mlx5e_rep_is_lag_netdev(netdev))
333 ++ return NOTIFY_DONE;
334 ++
335 ++ bond = container_of(nb, struct mlx5e_rep_bond, nb);
336 ++ priv = netdev_priv(netdev);
337 ++ rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
338 ++ /* Verify VF representor is on the same device of the bond handling the netevent. */
339 ++ if (rpriv->uplink_priv.bond != bond)
340 ++ return NOTIFY_DONE;
341 +
342 + switch (event) {
343 + case NETDEV_CHANGELOWERSTATE:
344 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
345 +index c6d2f8c78db71..48dc121b2cb4c 100644
346 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
347 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
348 +@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
349 + }
350 +
351 + br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
352 +- err = register_netdevice_notifier(&br_offloads->netdev_nb);
353 ++ err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
354 + if (err) {
355 + esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
356 + err);
357 +@@ -509,7 +509,9 @@ err_register_swdev_blk:
358 + err_register_swdev:
359 + destroy_workqueue(br_offloads->wq);
360 + err_alloc_wq:
361 ++ rtnl_lock();
362 + mlx5_esw_bridge_cleanup(esw);
363 ++ rtnl_unlock();
364 + }
365 +
366 + void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
367 +@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
368 + return;
369 +
370 + cancel_delayed_work_sync(&br_offloads->update_work);
371 +- unregister_netdevice_notifier(&br_offloads->netdev_nb);
372 ++ unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
373 + unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
374 + unregister_switchdev_notifier(&br_offloads->nb);
375 + destroy_workqueue(br_offloads->wq);
376 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
377 +index 2db9573a3fe69..b56fea142c246 100644
378 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
379 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
380 +@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
381 + /* Tunnel mode */
382 + if (mode == XFRM_MODE_TUNNEL) {
383 + eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
384 +- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
385 + if (xo->proto == IPPROTO_IPV6)
386 + eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
387 +- if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
388 ++
389 ++ switch (xo->inner_ipproto) {
390 ++ case IPPROTO_UDP:
391 + eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
392 ++ fallthrough;
393 ++ case IPPROTO_TCP:
394 ++ /* IP | ESP | IP | [TCP | UDP] */
395 ++ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
396 ++ break;
397 ++ default:
398 ++ break;
399 ++ }
400 + return;
401 + }
402 +
403 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
404 +index 7e221038df8d5..317d76b97c42a 100644
405 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
406 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
407 +@@ -1385,6 +1385,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
408 + {
409 + struct mlx5_esw_bridge_offloads *br_offloads;
410 +
411 ++ ASSERT_RTNL();
412 ++
413 + br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
414 + if (!br_offloads)
415 + return ERR_PTR(-ENOMEM);
416 +@@ -1401,6 +1403,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
417 + {
418 + struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
419 +
420 ++ ASSERT_RTNL();
421 ++
422 + if (!br_offloads)
423 + return;
424 +
425 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
426 +index 3401188e0a602..51ac24e6ec3c3 100644
427 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
428 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
429 +@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
430 + __field(unsigned int, used)
431 + ),
432 + TP_fast_assign(
433 +- strncpy(__entry->dev_name,
434 ++ strscpy(__entry->dev_name,
435 + netdev_name(fdb->dev),
436 + IFNAMSIZ);
437 + memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
438 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
439 +index 106b50e42b464..a45c6f25add16 100644
440 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
441 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
442 +@@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
443 + {
444 + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
445 +
446 +- del_timer(&fw_reset->timer);
447 ++ del_timer_sync(&fw_reset->timer);
448 + }
449 +
450 + static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
451 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
452 +index d5e47630e2849..df58cba37930a 100644
453 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
454 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
455 +@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
456 +
457 + u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
458 + {
459 +- if (!mlx5_chains_prios_supported(chains))
460 +- return 1;
461 +-
462 + if (mlx5_chains_ignore_flow_level_supported(chains))
463 + return UINT_MAX;
464 +
465 ++ if (!chains->dev->priv.eswitch ||
466 ++ chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
467 ++ return 1;
468 ++
469 + /* We should get here only for eswitch case */
470 + return FDB_TC_MAX_PRIO;
471 + }
472 +@@ -211,7 +212,7 @@ static int
473 + create_chain_restore(struct fs_chain *chain)
474 + {
475 + struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
476 +- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
477 ++ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
478 + struct mlx5_fs_chains *chains = chain->chains;
479 + enum mlx5e_tc_attr_to_reg chain_to_reg;
480 + struct mlx5_modify_hdr *mod_hdr;
481 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
482 +index 1ef2b6a848c10..7b16a1188aabb 100644
483 +--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
484 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
485 +@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
486 +
487 + switch (module_id) {
488 + case MLX5_MODULE_ID_SFP:
489 +- mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
490 ++ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
491 + break;
492 + case MLX5_MODULE_ID_QSFP:
493 + case MLX5_MODULE_ID_QSFP_PLUS:
494 + case MLX5_MODULE_ID_QSFP28:
495 +- mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
496 ++ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
497 + break;
498 + default:
499 + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
500 + return -EINVAL;
501 + }
502 +
503 +- if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
504 ++ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
505 + /* Cross pages read, read until offset 256 in low page */
506 +- size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
507 ++ size = MLX5_EEPROM_PAGE_LENGTH - offset;
508 +
509 + query.size = size;
510 ++ query.offset = offset;
511 +
512 + return mlx5_query_mcia(dev, &query, data);
513 + }
514 +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
515 +index c8f90cb1ee8f3..87e42db1b61e6 100644
516 +--- a/drivers/net/ipa/ipa_endpoint.c
517 ++++ b/drivers/net/ipa/ipa_endpoint.c
518 +@@ -1069,21 +1069,33 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
519 + u32 backlog;
520 + int delta;
521 +
522 +- if (!endpoint->replenish_enabled) {
523 ++ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
524 + if (add_one)
525 + atomic_inc(&endpoint->replenish_saved);
526 + return;
527 + }
528 +
529 ++ /* If already active, just update the backlog */
530 ++ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
531 ++ if (add_one)
532 ++ atomic_inc(&endpoint->replenish_backlog);
533 ++ return;
534 ++ }
535 ++
536 + while (atomic_dec_not_zero(&endpoint->replenish_backlog))
537 + if (ipa_endpoint_replenish_one(endpoint))
538 + goto try_again_later;
539 ++
540 ++ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
541 ++
542 + if (add_one)
543 + atomic_inc(&endpoint->replenish_backlog);
544 +
545 + return;
546 +
547 + try_again_later:
548 ++ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
549 ++
550 + /* The last one didn't succeed, so fix the backlog */
551 + delta = add_one ? 2 : 1;
552 + backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
553 +@@ -1106,7 +1118,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
554 + u32 max_backlog;
555 + u32 saved;
556 +
557 +- endpoint->replenish_enabled = true;
558 ++ set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
559 + while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
560 + atomic_add(saved, &endpoint->replenish_backlog);
561 +
562 +@@ -1120,7 +1132,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
563 + {
564 + u32 backlog;
565 +
566 +- endpoint->replenish_enabled = false;
567 ++ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
568 + while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
569 + atomic_add(backlog, &endpoint->replenish_saved);
570 + }
571 +@@ -1665,7 +1677,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
572 + /* RX transactions require a single TRE, so the maximum
573 + * backlog is the same as the maximum outstanding TREs.
574 + */
575 +- endpoint->replenish_enabled = false;
576 ++ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
577 ++ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
578 + atomic_set(&endpoint->replenish_saved,
579 + gsi_channel_tre_max(gsi, endpoint->channel_id));
580 + atomic_set(&endpoint->replenish_backlog, 0);
581 +diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
582 +index 0a859d10312dc..0313cdc607de3 100644
583 +--- a/drivers/net/ipa/ipa_endpoint.h
584 ++++ b/drivers/net/ipa/ipa_endpoint.h
585 +@@ -40,6 +40,19 @@ enum ipa_endpoint_name {
586 +
587 + #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
588 +
589 ++/**
590 ++ * enum ipa_replenish_flag: RX buffer replenish flags
591 ++ *
592 ++ * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
593 ++ * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
594 ++ * @IPA_REPLENISH_COUNT: Number of defined replenish flags
595 ++ */
596 ++enum ipa_replenish_flag {
597 ++ IPA_REPLENISH_ENABLED,
598 ++ IPA_REPLENISH_ACTIVE,
599 ++ IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
600 ++};
601 ++
602 + /**
603 + * struct ipa_endpoint - IPA endpoint information
604 + * @ipa: IPA pointer
605 +@@ -51,7 +64,7 @@ enum ipa_endpoint_name {
606 + * @trans_tre_max: Maximum number of TRE descriptors per transaction
607 + * @evt_ring_id: GSI event ring used by the endpoint
608 + * @netdev: Network device pointer, if endpoint uses one
609 +- * @replenish_enabled: Whether receive buffer replenishing is enabled
610 ++ * @replenish_flags: Replenishing state flags
611 + * @replenish_ready: Number of replenish transactions without doorbell
612 + * @replenish_saved: Replenish requests held while disabled
613 + * @replenish_backlog: Number of buffers needed to fill hardware queue
614 +@@ -72,7 +85,7 @@ struct ipa_endpoint {
615 + struct net_device *netdev;
616 +
617 + /* Receive buffer replenishing for RX endpoints */
618 +- bool replenish_enabled;
619 ++ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
620 + u32 replenish_ready;
621 + atomic_t replenish_saved;
622 + atomic_t replenish_backlog;
623 +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
624 +index 06e2181e58108..d56e276e4d805 100644
625 +--- a/drivers/net/usb/ipheth.c
626 ++++ b/drivers/net/usb/ipheth.c
627 +@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
628 + if (tx_buf == NULL)
629 + goto free_rx_urb;
630 +
631 +- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
632 ++ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
633 + GFP_KERNEL, &rx_urb->transfer_dma);
634 + if (rx_buf == NULL)
635 + goto free_tx_buf;
636 +@@ -146,7 +146,7 @@ error_nomem:
637 +
638 + static void ipheth_free_urbs(struct ipheth_device *iphone)
639 + {
640 +- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
641 ++ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
642 + iphone->rx_urb->transfer_dma);
643 + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
644 + iphone->tx_urb->transfer_dma);
645 +@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
646 +
647 + usb_fill_bulk_urb(dev->rx_urb, udev,
648 + usb_rcvbulkpipe(udev, dev->bulk_in),
649 +- dev->rx_buf, IPHETH_BUF_SIZE,
650 ++ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
651 + ipheth_rcvbulk_callback,
652 + dev);
653 + dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
654 +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
655 +index 3a46227e2c73e..b0692f33b03ae 100644
656 +--- a/drivers/pci/hotplug/pciehp_hpc.c
657 ++++ b/drivers/pci/hotplug/pciehp_hpc.c
658 +@@ -642,6 +642,8 @@ read_status:
659 + */
660 + if (ctrl->power_fault_detected)
661 + status &= ~PCI_EXP_SLTSTA_PFD;
662 ++ else if (status & PCI_EXP_SLTSTA_PFD)
663 ++ ctrl->power_fault_detected = true;
664 +
665 + events |= status;
666 + if (!events) {
667 +@@ -651,7 +653,7 @@ read_status:
668 + }
669 +
670 + if (status) {
671 +- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
672 ++ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
673 +
674 + /*
675 + * In MSI mode, all event bits must be zero before the port
676 +@@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
677 + }
678 +
679 + /* Check Power Fault Detected */
680 +- if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
681 +- ctrl->power_fault_detected = 1;
682 ++ if (events & PCI_EXP_SLTSTA_PFD) {
683 + ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
684 + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
685 + PCI_EXP_SLTCTL_ATTN_IND_ON);
686 +diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
687 +index cb3a7512c33ec..0a22a2faf5522 100644
688 +--- a/fs/lockd/svcsubs.c
689 ++++ b/fs/lockd/svcsubs.c
690 +@@ -179,19 +179,21 @@ nlm_delete_file(struct nlm_file *file)
691 + static int nlm_unlock_files(struct nlm_file *file)
692 + {
693 + struct file_lock lock;
694 +- struct file *f;
695 +
696 ++ locks_init_lock(&lock);
697 + lock.fl_type = F_UNLCK;
698 + lock.fl_start = 0;
699 + lock.fl_end = OFFSET_MAX;
700 +- for (f = file->f_file[0]; f <= file->f_file[1]; f++) {
701 +- if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) {
702 +- pr_warn("lockd: unlock failure in %s:%d\n",
703 +- __FILE__, __LINE__);
704 +- return 1;
705 +- }
706 +- }
707 ++ if (file->f_file[O_RDONLY] &&
708 ++ vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
709 ++ goto out_err;
710 ++ if (file->f_file[O_WRONLY] &&
711 ++ vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL))
712 ++ goto out_err;
713 + return 0;
714 ++out_err:
715 ++ pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__);
716 ++ return 1;
717 + }
718 +
719 + /*
720 +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
721 +index 6facdf476255d..84ec851211d91 100644
722 +--- a/fs/notify/fanotify/fanotify_user.c
723 ++++ b/fs/notify/fanotify/fanotify_user.c
724 +@@ -611,9 +611,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
725 + if (fanotify_is_perm_event(event->mask))
726 + FANOTIFY_PERM(event)->fd = fd;
727 +
728 +- if (f)
729 +- fd_install(fd, f);
730 +-
731 + if (info_mode) {
732 + ret = copy_info_records_to_user(event, info, info_mode, pidfd,
733 + buf, count);
734 +@@ -621,6 +618,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
735 + goto out_close_fd;
736 + }
737 +
738 ++ if (f)
739 ++ fd_install(fd, f);
740 ++
741 + return metadata.event_len;
742 +
743 + out_close_fd:
744 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
745 +index b193d08a3dc36..e040970408d4f 100644
746 +--- a/fs/overlayfs/copy_up.c
747 ++++ b/fs/overlayfs/copy_up.c
748 +@@ -145,7 +145,7 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
749 + if (err == -ENOTTY || err == -EINVAL)
750 + return 0;
751 + pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
752 +- old, err);
753 ++ old->dentry, err);
754 + return err;
755 + }
756 +
757 +@@ -157,7 +157,9 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
758 + */
759 + if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
760 + err = ovl_set_protattr(inode, new->dentry, &oldfa);
761 +- if (err)
762 ++ if (err == -EPERM)
763 ++ pr_warn_once("copying fileattr: no xattr on upper\n");
764 ++ else if (err)
765 + return err;
766 + }
767 +
768 +@@ -167,8 +169,16 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
769 +
770 + err = ovl_real_fileattr_get(new, &newfa);
771 + if (err) {
772 ++ /*
773 ++ * Returning an error if upper doesn't support fileattr will
774 ++ * result in a regression, so revert to the old behavior.
775 ++ */
776 ++ if (err == -ENOTTY || err == -EINVAL) {
777 ++ pr_warn_once("copying fileattr: no support on upper\n");
778 ++ return 0;
779 ++ }
780 + pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
781 +- new, err);
782 ++ new->dentry, err);
783 + return err;
784 + }
785 +
786 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
787 +index 9537443de22dd..c59aa2c7749b2 100644
788 +--- a/kernel/cgroup/cgroup-v1.c
789 ++++ b/kernel/cgroup/cgroup-v1.c
790 +@@ -552,6 +552,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
791 +
792 + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
793 +
794 ++ /*
795 ++ * Release agent gets called with all capabilities,
796 ++ * require capabilities to set release agent.
797 ++ */
798 ++ if ((of->file->f_cred->user_ns != &init_user_ns) ||
799 ++ !capable(CAP_SYS_ADMIN))
800 ++ return -EPERM;
801 ++
802 + cgrp = cgroup_kn_lock_live(of->kn, false);
803 + if (!cgrp)
804 + return -ENODEV;
805 +@@ -963,6 +971,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
806 + /* Specifying two release agents is forbidden */
807 + if (ctx->release_agent)
808 + return invalfc(fc, "release_agent respecified");
809 ++ /*
810 ++ * Release agent gets called with all capabilities,
811 ++ * require capabilities to set release agent.
812 ++ */
813 ++ if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
814 ++ return invalfc(fc, "Setting release_agent not allowed");
815 + ctx->release_agent = param->string;
816 + param->string = NULL;
817 + break;
818 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
819 +index 2a9695ccb65f5..1d9d3e4d4cbc0 100644
820 +--- a/kernel/cgroup/cpuset.c
821 ++++ b/kernel/cgroup/cpuset.c
822 +@@ -1597,8 +1597,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
823 + * Make sure that subparts_cpus is a subset of cpus_allowed.
824 + */
825 + if (cs->nr_subparts_cpus) {
826 +- cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
827 +- cs->cpus_allowed);
828 ++ cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed);
829 + cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
830 + }
831 + spin_unlock_irq(&callback_lock);
832 +diff --git a/mm/gup.c b/mm/gup.c
833 +index 886d6148d3d03..52f08e3177e9f 100644
834 +--- a/mm/gup.c
835 ++++ b/mm/gup.c
836 +@@ -124,8 +124,8 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
837 + * considered failure, and furthermore, a likely bug in the caller, so a warning
838 + * is also emitted.
839 + */
840 +-struct page *try_grab_compound_head(struct page *page,
841 +- int refs, unsigned int flags)
842 ++__maybe_unused struct page *try_grab_compound_head(struct page *page,
843 ++ int refs, unsigned int flags)
844 + {
845 + if (flags & FOLL_GET)
846 + return try_get_compound_head(page, refs);
847 +@@ -208,10 +208,35 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
848 + */
849 + bool __must_check try_grab_page(struct page *page, unsigned int flags)
850 + {
851 +- if (!(flags & (FOLL_GET | FOLL_PIN)))
852 +- return true;
853 ++ WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
854 +
855 +- return try_grab_compound_head(page, 1, flags);
856 ++ if (flags & FOLL_GET)
857 ++ return try_get_page(page);
858 ++ else if (flags & FOLL_PIN) {
859 ++ int refs = 1;
860 ++
861 ++ page = compound_head(page);
862 ++
863 ++ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
864 ++ return false;
865 ++
866 ++ if (hpage_pincount_available(page))
867 ++ hpage_pincount_add(page, 1);
868 ++ else
869 ++ refs = GUP_PIN_COUNTING_BIAS;
870 ++
871 ++ /*
872 ++ * Similar to try_grab_compound_head(): even if using the
873 ++ * hpage_pincount_add/_sub() routines, be sure to
874 ++ * *also* increment the normal page refcount field at least
875 ++ * once, so that the page really is pinned.
876 ++ */
877 ++ page_ref_add(page, refs);
878 ++
879 ++ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
880 ++ }
881 ++
882 ++ return true;
883 + }
884 +
885 + /**
886 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
887 +index 8ccce85562a1d..198cc8b74dc3e 100644
888 +--- a/net/core/rtnetlink.c
889 ++++ b/net/core/rtnetlink.c
890 +@@ -3254,8 +3254,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
891 + struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
892 + unsigned char name_assign_type = NET_NAME_USER;
893 + struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
894 +- const struct rtnl_link_ops *m_ops = NULL;
895 +- struct net_device *master_dev = NULL;
896 ++ const struct rtnl_link_ops *m_ops;
897 ++ struct net_device *master_dev;
898 + struct net *net = sock_net(skb->sk);
899 + const struct rtnl_link_ops *ops;
900 + struct nlattr *tb[IFLA_MAX + 1];
901 +@@ -3293,6 +3293,8 @@ replay:
902 + else
903 + dev = NULL;
904 +
905 ++ master_dev = NULL;
906 ++ m_ops = NULL;
907 + if (dev) {
908 + master_dev = netdev_master_upper_dev_get(dev);
909 + if (master_dev)
910 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
911 +index f3b6239674361..509f577869d4e 100644
912 +--- a/net/ipv4/tcp_input.c
913 ++++ b/net/ipv4/tcp_input.c
914 +@@ -1652,6 +1652,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
915 + (mss != tcp_skb_seglen(skb)))
916 + goto out;
917 +
918 ++ if (!tcp_skb_can_collapse(prev, skb))
919 ++ goto out;
920 + len = skb->len;
921 + pcount = tcp_skb_pcount(skb);
922 + if (tcp_skb_shift(prev, skb, pcount, len))
923 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
924 +index 1a138e8d32d66..e00c38f242c32 100644
925 +--- a/net/packet/af_packet.c
926 ++++ b/net/packet/af_packet.c
927 +@@ -1753,7 +1753,10 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
928 + err = -ENOSPC;
929 + if (refcount_read(&match->sk_ref) < match->max_num_members) {
930 + __dev_remove_pack(&po->prot_hook);
931 +- po->fanout = match;
932 ++
933 ++ /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
934 ++ WRITE_ONCE(po->fanout, match);
935 ++
936 + po->rollover = rollover;
937 + rollover = NULL;
938 + refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
939 +@@ -3906,7 +3909,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
940 + }
941 + case PACKET_FANOUT_DATA:
942 + {
943 +- if (!po->fanout)
944 ++ /* Paired with the WRITE_ONCE() in fanout_add() */
945 ++ if (!READ_ONCE(po->fanout))
946 + return -EINVAL;
947 +
948 + return fanout_set_data(po, optval, optlen);
949 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
950 +index cc9409aa755eb..56dba8519d7c3 100644
951 +--- a/net/sched/cls_api.c
952 ++++ b/net/sched/cls_api.c
953 +@@ -1945,9 +1945,9 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
954 + bool prio_allocate;
955 + u32 parent;
956 + u32 chain_index;
957 +- struct Qdisc *q = NULL;
958 ++ struct Qdisc *q;
959 + struct tcf_chain_info chain_info;
960 +- struct tcf_chain *chain = NULL;
961 ++ struct tcf_chain *chain;
962 + struct tcf_block *block;
963 + struct tcf_proto *tp;
964 + unsigned long cl;
965 +@@ -1976,6 +1976,8 @@ replay:
966 + tp = NULL;
967 + cl = 0;
968 + block = NULL;
969 ++ q = NULL;
970 ++ chain = NULL;
971 + flags = 0;
972 +
973 + if (prio == 0) {
974 +@@ -2798,8 +2800,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
975 + struct tcmsg *t;
976 + u32 parent;
977 + u32 chain_index;
978 +- struct Qdisc *q = NULL;
979 +- struct tcf_chain *chain = NULL;
980 ++ struct Qdisc *q;
981 ++ struct tcf_chain *chain;
982 + struct tcf_block *block;
983 + unsigned long cl;
984 + int err;
985 +@@ -2809,6 +2811,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
986 + return -EPERM;
987 +
988 + replay:
989 ++ q = NULL;
990 + err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
991 + rtm_tca_policy, extack);
992 + if (err < 0)
993 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
994 +index 586af88194e56..3e9d3df9c45cb 100755
995 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
996 ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
997 +@@ -75,6 +75,7 @@ init()
998 +
999 + # let $ns2 reach any $ns1 address from any interface
1000 + ip -net "$ns2" route add default via 10.0.$i.1 dev ns2eth$i metric 10$i
1001 ++ ip -net "$ns2" route add default via dead:beef:$i::1 dev ns2eth$i metric 10$i
1002 + done
1003 + }
1004 +
1005 +@@ -1383,7 +1384,7 @@ ipv6_tests()
1006 + reset
1007 + ip netns exec $ns1 ./pm_nl_ctl limits 0 1
1008 + ip netns exec $ns2 ./pm_nl_ctl limits 0 1
1009 +- ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 flags subflow
1010 ++ ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 dev ns2eth3 flags subflow
1011 + run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
1012 + chk_join_nr "single subflow IPv6" 1 1 1
1013 +
1014 +@@ -1418,7 +1419,7 @@ ipv6_tests()
1015 + ip netns exec $ns1 ./pm_nl_ctl limits 0 2
1016 + ip netns exec $ns1 ./pm_nl_ctl add dead:beef:2::1 flags signal
1017 + ip netns exec $ns2 ./pm_nl_ctl limits 1 2
1018 +- ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 flags subflow
1019 ++ ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 dev ns2eth3 flags subflow
1020 + run_tests $ns1 $ns2 dead:beef:1::1 0 -1 -1 slow
1021 + chk_join_nr "remove subflow and signal IPv6" 2 2 2
1022 + chk_add_nr 1 1