Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Mon, 11 Jun 2018 21:48:25
Message-Id: 1528753677.373445e16ce72726343d67d7fca5de454a00a4c3.mpagano@gentoo
1 commit: 373445e16ce72726343d67d7fca5de454a00a4c3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 11 21:47:57 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 11 21:47:57 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=373445e1
7
8 Linux patch 4.16.15
9
10 0000_README | 4 +
11 1014_linux-4.16.15.patch | 1544 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1548 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5691b91..d817caf 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -99,6 +99,10 @@ Patch: 1013_linux-4.16.14.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.16.14
21
22 +Patch: 1014_linux-4.16.15.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.16.15
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1014_linux-4.16.15.patch b/1014_linux-4.16.15.patch
31 new file mode 100644
32 index 0000000..6820a0f
33 --- /dev/null
34 +++ b/1014_linux-4.16.15.patch
35 @@ -0,0 +1,1544 @@
36 +diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
37 +index 2a3278d5cf35..fa951b820b25 100644
38 +--- a/Documentation/networking/netdev-FAQ.txt
39 ++++ b/Documentation/networking/netdev-FAQ.txt
40 +@@ -179,6 +179,15 @@ A: No. See above answer. In short, if you think it really belongs in
41 + dash marker line as described in Documentation/process/submitting-patches.rst to
42 + temporarily embed that information into the patch that you send.
43 +
44 ++Q: Are all networking bug fixes backported to all stable releases?
45 ++
46 ++A: Due to capacity, Dave could only take care of the backports for the last
47 ++ 2 stable releases. For earlier stable releases, each stable branch maintainer
48 ++ is supposed to take care of them. If you find any patch is missing from an
49 ++ earlier stable branch, please notify stable@×××××××××××.org with either a
50 ++ commit ID or a formal patch backported, and CC Dave and other relevant
51 ++ networking developers.
52 ++
53 + Q: Someone said that the comment style and coding convention is different
54 + for the networking content. Is this true?
55 +
56 +diff --git a/Makefile b/Makefile
57 +index a043442e442f..e45c66b27241 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 4
63 + PATCHLEVEL = 16
64 +-SUBLEVEL = 14
65 ++SUBLEVEL = 15
66 + EXTRAVERSION =
67 + NAME = Fearless Coyote
68 +
69 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
70 +index e394799979a6..6d9b9453707c 100644
71 +--- a/drivers/gpu/drm/drm_file.c
72 ++++ b/drivers/gpu/drm/drm_file.c
73 +@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
74 + return -ENOMEM;
75 +
76 + filp->private_data = priv;
77 ++ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
78 + priv->filp = filp;
79 + priv->pid = get_pid(task_pid(current));
80 + priv->minor = minor;
81 +diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
82 +index 944a7f338099..1b25d8bc153a 100644
83 +--- a/drivers/isdn/hardware/eicon/diva.c
84 ++++ b/drivers/isdn/hardware/eicon/diva.c
85 +@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
86 + ** Receive and process command from user mode utility
87 + */
88 + void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
89 +- int length,
90 ++ int length, void *mptr,
91 + divas_xdi_copy_from_user_fn_t cp_fn)
92 + {
93 +- diva_xdi_um_cfg_cmd_t msg;
94 ++ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
95 + diva_os_xdi_adapter_t *a = NULL;
96 + diva_os_spin_lock_magic_t old_irql;
97 + struct list_head *tmp;
98 +@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
99 + length, sizeof(diva_xdi_um_cfg_cmd_t)))
100 + return NULL;
101 + }
102 +- if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
103 ++ if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
104 + DBG_ERR(("A: A(?) open, write error"))
105 + return NULL;
106 + }
107 + diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
108 + list_for_each(tmp, &adapter_queue) {
109 + a = list_entry(tmp, diva_os_xdi_adapter_t, link);
110 +- if (a->controller == (int)msg.adapter)
111 ++ if (a->controller == (int)msg->adapter)
112 + break;
113 + a = NULL;
114 + }
115 + diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
116 +
117 + if (!a) {
118 +- DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
119 ++ DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
120 + }
121 +
122 + return (a);
123 +@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
124 +
125 + int
126 + diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
127 +- int length, divas_xdi_copy_from_user_fn_t cp_fn)
128 ++ int length, void *mptr,
129 ++ divas_xdi_copy_from_user_fn_t cp_fn)
130 + {
131 ++ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
132 + diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
133 + void *data;
134 +
135 +@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
136 + return (-2);
137 + }
138 +
139 +- length = (*cp_fn) (os_handle, data, src, length);
140 ++ if (msg) {
141 ++ *(diva_xdi_um_cfg_cmd_t *)data = *msg;
142 ++ length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
143 ++ src + sizeof(*msg), length - sizeof(*msg));
144 ++ } else {
145 ++ length = (*cp_fn) (os_handle, data, src, length);
146 ++ }
147 + if (length > 0) {
148 + if ((*(a->interface.cmd_proc))
149 + (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
150 +diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
151 +index b067032093a8..1ad76650fbf9 100644
152 +--- a/drivers/isdn/hardware/eicon/diva.h
153 ++++ b/drivers/isdn/hardware/eicon/diva.h
154 +@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
155 + int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
156 +
157 + int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
158 +- int length, divas_xdi_copy_from_user_fn_t cp_fn);
159 ++ int length, void *msg,
160 ++ divas_xdi_copy_from_user_fn_t cp_fn);
161 +
162 + void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
163 +- int length,
164 ++ int length, void *msg,
165 + divas_xdi_copy_from_user_fn_t cp_fn);
166 +
167 + void diva_xdi_close_adapter(void *adapter, void *os_handle);
168 +diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
169 +index b9980e84f9db..b6a3950b2564 100644
170 +--- a/drivers/isdn/hardware/eicon/divasmain.c
171 ++++ b/drivers/isdn/hardware/eicon/divasmain.c
172 +@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
173 + static ssize_t divas_write(struct file *file, const char __user *buf,
174 + size_t count, loff_t *ppos)
175 + {
176 ++ diva_xdi_um_cfg_cmd_t msg;
177 + int ret = -EINVAL;
178 +
179 + if (!file->private_data) {
180 + file->private_data = diva_xdi_open_adapter(file, buf,
181 +- count,
182 ++ count, &msg,
183 + xdi_copy_from_user);
184 +- }
185 +- if (!file->private_data) {
186 +- return (-ENODEV);
187 ++ if (!file->private_data)
188 ++ return (-ENODEV);
189 ++ ret = diva_xdi_write(file->private_data, file,
190 ++ buf, count, &msg, xdi_copy_from_user);
191 ++ } else {
192 ++ ret = diva_xdi_write(file->private_data, file,
193 ++ buf, count, NULL, xdi_copy_from_user);
194 + }
195 +
196 +- ret = diva_xdi_write(file->private_data, file,
197 +- buf, count, xdi_copy_from_user);
198 + switch (ret) {
199 + case -1: /* Message should be removed from rx mailbox first */
200 + ret = -EBUSY;
201 +@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
202 + static ssize_t divas_read(struct file *file, char __user *buf,
203 + size_t count, loff_t *ppos)
204 + {
205 ++ diva_xdi_um_cfg_cmd_t msg;
206 + int ret = -EINVAL;
207 +
208 + if (!file->private_data) {
209 + file->private_data = diva_xdi_open_adapter(file, buf,
210 +- count,
211 ++ count, &msg,
212 + xdi_copy_from_user);
213 + }
214 + if (!file->private_data) {
215 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
216 +index 63e02a54d537..06e8e7a81994 100644
217 +--- a/drivers/net/dsa/b53/b53_common.c
218 ++++ b/drivers/net/dsa/b53/b53_common.c
219 +@@ -684,7 +684,8 @@ static int b53_switch_reset(struct b53_device *dev)
220 + * still use this driver as a library and need to perform the reset
221 + * earlier.
222 + */
223 +- if (dev->chip_id == BCM58XX_DEVICE_ID) {
224 ++ if (dev->chip_id == BCM58XX_DEVICE_ID ||
225 ++ dev->chip_id == BCM583XX_DEVICE_ID) {
226 + b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
227 + reg |= SW_RST | EN_SW_RST | EN_CH_RST;
228 + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
229 +@@ -1867,6 +1868,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
230 + .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
231 + .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
232 + },
233 ++ {
234 ++ .chip_id = BCM583XX_DEVICE_ID,
235 ++ .dev_name = "BCM583xx/11360",
236 ++ .vlans = 4096,
237 ++ .enabled_ports = 0x103,
238 ++ .arl_entries = 4,
239 ++ .cpu_port = B53_CPU_PORT,
240 ++ .vta_regs = B53_VTA_REGS,
241 ++ .duplex_reg = B53_DUPLEX_STAT_GE,
242 ++ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
243 ++ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
244 ++ },
245 + {
246 + .chip_id = BCM7445_DEVICE_ID,
247 + .dev_name = "BCM7445",
248 +diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
249 +index d954cf36ecd8..f91acda37572 100644
250 +--- a/drivers/net/dsa/b53/b53_priv.h
251 ++++ b/drivers/net/dsa/b53/b53_priv.h
252 +@@ -61,6 +61,7 @@ enum {
253 + BCM53018_DEVICE_ID = 0x53018,
254 + BCM53019_DEVICE_ID = 0x53019,
255 + BCM58XX_DEVICE_ID = 0x5800,
256 ++ BCM583XX_DEVICE_ID = 0x58300,
257 + BCM7445_DEVICE_ID = 0x7445,
258 + BCM7278_DEVICE_ID = 0x7278,
259 + };
260 +@@ -180,6 +181,7 @@ static inline int is5301x(struct b53_device *dev)
261 + static inline int is58xx(struct b53_device *dev)
262 + {
263 + return dev->chip_id == BCM58XX_DEVICE_ID ||
264 ++ dev->chip_id == BCM583XX_DEVICE_ID ||
265 + dev->chip_id == BCM7445_DEVICE_ID ||
266 + dev->chip_id == BCM7278_DEVICE_ID;
267 + }
268 +diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
269 +index c37ffd1b6833..8247481eaa06 100644
270 +--- a/drivers/net/dsa/b53/b53_srab.c
271 ++++ b/drivers/net/dsa/b53/b53_srab.c
272 +@@ -364,7 +364,7 @@ static const struct of_device_id b53_srab_of_match[] = {
273 + { .compatible = "brcm,bcm53018-srab" },
274 + { .compatible = "brcm,bcm53019-srab" },
275 + { .compatible = "brcm,bcm5301x-srab" },
276 +- { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID },
277 ++ { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID },
278 + { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
279 + { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
280 + { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
281 +@@ -372,7 +372,7 @@ static const struct of_device_id b53_srab_of_match[] = {
282 + { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
283 + { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
284 + { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
285 +- { .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID },
286 ++ { .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
287 + { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
288 + { /* sentinel */ },
289 + };
290 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
291 +index 7dd83d0ef0a0..22243c480a05 100644
292 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
293 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
294 +@@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
295 + * slots for the highest priority.
296 + */
297 + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
298 +- NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
299 ++ NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
300 + /* Mapping between the CREDIT_WEIGHT registers and actual client
301 + * numbers
302 + */
303 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
304 +index b91109d967fa..3179599dd797 100644
305 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
306 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
307 +@@ -2704,11 +2704,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
308 + pci_set_master(pdev);
309 +
310 + /* Query PCI controller on system for DMA addressing
311 +- * limitation for the device. Try 64-bit first, and
312 ++ * limitation for the device. Try 47-bit first, and
313 + * fail to 32-bit.
314 + */
315 +
316 +- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
317 ++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
318 + if (err) {
319 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
320 + if (err) {
321 +@@ -2722,10 +2722,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
322 + goto err_out_release_regions;
323 + }
324 + } else {
325 +- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
326 ++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
327 + if (err) {
328 + dev_err(dev, "Unable to obtain %u-bit DMA "
329 +- "for consistent allocations, aborting\n", 64);
330 ++ "for consistent allocations, aborting\n", 47);
331 + goto err_out_release_regions;
332 + }
333 + using_dac = 1;
334 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
335 +index 5774fb6f8aa0..4d764c3ee155 100644
336 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
337 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
338 +@@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
339 + if ((val & POST_STAGE_FAT_LOG_START)
340 + != POST_STAGE_FAT_LOG_START &&
341 + (val & POST_STAGE_ARMFW_UE)
342 +- != POST_STAGE_ARMFW_UE)
343 ++ != POST_STAGE_ARMFW_UE &&
344 ++ (val & POST_STAGE_RECOVERABLE_ERR)
345 ++ != POST_STAGE_RECOVERABLE_ERR)
346 + return;
347 + }
348 +
349 +diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
350 +index 3aaf4bad6c5a..427e7a31862c 100644
351 +--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
352 ++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
353 +@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
354 + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
355 + struct mlx4_qp *qp;
356 +
357 +- spin_lock(&qp_table->lock);
358 ++ spin_lock_irq(&qp_table->lock);
359 +
360 + qp = __mlx4_qp_lookup(dev, qpn);
361 +
362 +- spin_unlock(&qp_table->lock);
363 ++ spin_unlock_irq(&qp_table->lock);
364 + return qp;
365 + }
366 +
367 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
368 +index e5c3ab46a24a..f63b317f7b32 100644
369 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
370 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
371 +@@ -635,6 +635,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
372 + return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
373 + }
374 +
375 ++static __be32 mlx5e_get_fcs(struct sk_buff *skb)
376 ++{
377 ++ int last_frag_sz, bytes_in_prev, nr_frags;
378 ++ u8 *fcs_p1, *fcs_p2;
379 ++ skb_frag_t *last_frag;
380 ++ __be32 fcs_bytes;
381 ++
382 ++ if (!skb_is_nonlinear(skb))
383 ++ return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
384 ++
385 ++ nr_frags = skb_shinfo(skb)->nr_frags;
386 ++ last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
387 ++ last_frag_sz = skb_frag_size(last_frag);
388 ++
389 ++ /* If all FCS data is in last frag */
390 ++ if (last_frag_sz >= ETH_FCS_LEN)
391 ++ return *(__be32 *)(skb_frag_address(last_frag) +
392 ++ last_frag_sz - ETH_FCS_LEN);
393 ++
394 ++ fcs_p2 = (u8 *)skb_frag_address(last_frag);
395 ++ bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
396 ++
397 ++ /* Find where the other part of the FCS is - Linear or another frag */
398 ++ if (nr_frags == 1) {
399 ++ fcs_p1 = skb_tail_pointer(skb);
400 ++ } else {
401 ++ skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
402 ++
403 ++ fcs_p1 = skb_frag_address(prev_frag) +
404 ++ skb_frag_size(prev_frag);
405 ++ }
406 ++ fcs_p1 -= bytes_in_prev;
407 ++
408 ++ memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
409 ++ memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
410 ++
411 ++ return fcs_bytes;
412 ++}
413 ++
414 + static inline void mlx5e_handle_csum(struct net_device *netdev,
415 + struct mlx5_cqe64 *cqe,
416 + struct mlx5e_rq *rq,
417 +@@ -663,6 +702,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
418 + skb->csum = csum_partial(skb->data + ETH_HLEN,
419 + network_depth - ETH_HLEN,
420 + skb->csum);
421 ++ if (unlikely(netdev->features & NETIF_F_RXFCS))
422 ++ skb->csum = csum_add(skb->csum,
423 ++ (__force __wsum)mlx5e_get_fcs(skb));
424 + rq->stats.csum_complete++;
425 + return;
426 + }
427 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
428 +index bf400c75fcc8..c54762729bdf 100644
429 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
430 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
431 +@@ -4870,6 +4870,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
432 + "spectrum: Can not put a VLAN on an OVS port");
433 + return -EINVAL;
434 + }
435 ++ if (is_vlan_dev(upper_dev) &&
436 ++ vlan_dev_vlan_id(upper_dev) == 1) {
437 ++ NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
438 ++ return -EINVAL;
439 ++ }
440 + break;
441 + case NETDEV_CHANGEUPPER:
442 + upper_dev = info->upper_dev;
443 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
444 +index 00f41c145d4d..820b226d6ff8 100644
445 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
446 ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
447 +@@ -77,7 +77,7 @@
448 + #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
449 +
450 + /* ILT entry structure */
451 +-#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
452 ++#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
453 + #define ILT_ENTRY_PHY_ADDR_SHIFT 0
454 + #define ILT_ENTRY_VALID_MASK 0x1ULL
455 + #define ILT_ENTRY_VALID_SHIFT 52
456 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
457 +index f4c0b02ddad8..59fbf74dcada 100644
458 +--- a/drivers/net/ethernet/socionext/netsec.c
459 ++++ b/drivers/net/ethernet/socionext/netsec.c
460 +@@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
461 + if (ret)
462 + goto unreg_napi;
463 +
464 +- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
465 +- dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
466 ++ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
467 ++ dev_warn(&pdev->dev, "Failed to set DMA mask\n");
468 +
469 + ret = register_netdev(ndev);
470 + if (ret) {
471 +diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
472 +index abceea802ea1..38828ab77eb9 100644
473 +--- a/drivers/net/ethernet/ti/davinci_emac.c
474 ++++ b/drivers/net/ethernet/ti/davinci_emac.c
475 +@@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
476 + if (IS_ERR(priv->txchan)) {
477 + dev_err(&pdev->dev, "error initializing tx dma channel\n");
478 + rc = PTR_ERR(priv->txchan);
479 +- goto no_cpdma_chan;
480 ++ goto err_free_dma;
481 + }
482 +
483 + priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
484 +@@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
485 + if (IS_ERR(priv->rxchan)) {
486 + dev_err(&pdev->dev, "error initializing rx dma channel\n");
487 + rc = PTR_ERR(priv->rxchan);
488 +- goto no_cpdma_chan;
489 ++ goto err_free_txchan;
490 + }
491 +
492 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
493 + if (!res) {
494 + dev_err(&pdev->dev, "error getting irq res\n");
495 + rc = -ENOENT;
496 +- goto no_cpdma_chan;
497 ++ goto err_free_rxchan;
498 + }
499 + ndev->irq = res->start;
500 +
501 +@@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
502 + pm_runtime_put_noidle(&pdev->dev);
503 + dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
504 + __func__, rc);
505 +- goto no_cpdma_chan;
506 ++ goto err_napi_del;
507 + }
508 +
509 + /* register the network device */
510 +@@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
511 + dev_err(&pdev->dev, "error in register_netdev\n");
512 + rc = -ENODEV;
513 + pm_runtime_put(&pdev->dev);
514 +- goto no_cpdma_chan;
515 ++ goto err_napi_del;
516 + }
517 +
518 +
519 +@@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
520 +
521 + return 0;
522 +
523 +-no_cpdma_chan:
524 +- if (priv->txchan)
525 +- cpdma_chan_destroy(priv->txchan);
526 +- if (priv->rxchan)
527 +- cpdma_chan_destroy(priv->rxchan);
528 ++err_napi_del:
529 ++ netif_napi_del(&priv->napi);
530 ++err_free_rxchan:
531 ++ cpdma_chan_destroy(priv->rxchan);
532 ++err_free_txchan:
533 ++ cpdma_chan_destroy(priv->txchan);
534 ++err_free_dma:
535 + cpdma_ctlr_destroy(priv->dma);
536 + no_pdata:
537 + if (of_phy_is_fixed_link(np))
538 +diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
539 +index 6838129839ca..e757b09f1889 100644
540 +--- a/drivers/net/phy/bcm-cygnus.c
541 ++++ b/drivers/net/phy/bcm-cygnus.c
542 +@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
543 + return rc;
544 +
545 + /* make rcal=100, since rdb default is 000 */
546 +- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
547 ++ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
548 + if (rc < 0)
549 + return rc;
550 +
551 + /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
552 +- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
553 ++ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
554 + if (rc < 0)
555 + return rc;
556 +
557 + /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
558 +- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
559 ++ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
560 +
561 + return 0;
562 + }
563 +diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
564 +index 5ad130c3da43..d5e0833d69b9 100644
565 +--- a/drivers/net/phy/bcm-phy-lib.c
566 ++++ b/drivers/net/phy/bcm-phy-lib.c
567 +@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
568 + /* The register must be written to both the Shadow Register Select and
569 + * the Shadow Read Register Selector
570 + */
571 +- phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
572 ++ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
573 + regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
574 + return phy_read(phydev, MII_BCM54XX_AUX_CTL);
575 + }
576 +diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
577 +index 7c73808cbbde..81cceaa412fe 100644
578 +--- a/drivers/net/phy/bcm-phy-lib.h
579 ++++ b/drivers/net/phy/bcm-phy-lib.h
580 +@@ -14,11 +14,18 @@
581 + #ifndef _LINUX_BCM_PHY_LIB_H
582 + #define _LINUX_BCM_PHY_LIB_H
583 +
584 ++#include <linux/brcmphy.h>
585 + #include <linux/phy.h>
586 +
587 + int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
588 + int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
589 +
590 ++static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
591 ++ u16 reg, u16 val)
592 ++{
593 ++ return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
594 ++}
595 ++
596 + int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
597 + int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
598 +
599 +diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
600 +index 421feb8f92fe..90eb3e12a4f8 100644
601 +--- a/drivers/net/phy/bcm7xxx.c
602 ++++ b/drivers/net/phy/bcm7xxx.c
603 +@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
604 + static void r_rc_cal_reset(struct phy_device *phydev)
605 + {
606 + /* Reset R_CAL/RC_CAL Engine */
607 +- bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
608 ++ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
609 +
610 + /* Disable Reset R_AL/RC_CAL Engine */
611 +- bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
612 ++ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
613 + }
614 +
615 + static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
616 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
617 +index 3175f7410baf..8f3863cd0094 100644
618 +--- a/drivers/net/team/team.c
619 ++++ b/drivers/net/team/team.c
620 +@@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team,
621 + static void __team_compute_features(struct team *team)
622 + {
623 + struct team_port *port;
624 +- u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
625 ++ netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
626 ++ NETIF_F_ALL_FOR_ALL;
627 + netdev_features_t enc_features = TEAM_ENC_FEATURES;
628 + unsigned short max_hard_header_len = ETH_HLEN;
629 + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
630 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
631 +index ffae19714ffd..24e645c86ae7 100644
632 +--- a/drivers/net/tun.c
633 ++++ b/drivers/net/tun.c
634 +@@ -1632,7 +1632,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
635 + else
636 + *skb_xdp = 0;
637 +
638 +- preempt_disable();
639 ++ local_bh_disable();
640 + rcu_read_lock();
641 + xdp_prog = rcu_dereference(tun->xdp_prog);
642 + if (xdp_prog && !*skb_xdp) {
643 +@@ -1657,7 +1657,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
644 + if (err)
645 + goto err_redirect;
646 + rcu_read_unlock();
647 +- preempt_enable();
648 ++ local_bh_enable();
649 + return NULL;
650 + case XDP_TX:
651 + xdp_xmit = true;
652 +@@ -1679,7 +1679,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
653 + skb = build_skb(buf, buflen);
654 + if (!skb) {
655 + rcu_read_unlock();
656 +- preempt_enable();
657 ++ local_bh_enable();
658 + return ERR_PTR(-ENOMEM);
659 + }
660 +
661 +@@ -1692,12 +1692,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
662 + skb->dev = tun->dev;
663 + generic_xdp_tx(skb, xdp_prog);
664 + rcu_read_unlock();
665 +- preempt_enable();
666 ++ local_bh_enable();
667 + return NULL;
668 + }
669 +
670 + rcu_read_unlock();
671 +- preempt_enable();
672 ++ local_bh_enable();
673 +
674 + return skb;
675 +
676 +@@ -1705,7 +1705,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
677 + put_page(alloc_frag->page);
678 + err_xdp:
679 + rcu_read_unlock();
680 +- preempt_enable();
681 ++ local_bh_enable();
682 + this_cpu_inc(tun->pcpu_stats->rx_dropped);
683 + return NULL;
684 + }
685 +@@ -1901,16 +1901,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
686 + struct bpf_prog *xdp_prog;
687 + int ret;
688 +
689 ++ local_bh_disable();
690 + rcu_read_lock();
691 + xdp_prog = rcu_dereference(tun->xdp_prog);
692 + if (xdp_prog) {
693 + ret = do_xdp_generic(xdp_prog, skb);
694 + if (ret != XDP_PASS) {
695 + rcu_read_unlock();
696 ++ local_bh_enable();
697 + return total_len;
698 + }
699 + }
700 + rcu_read_unlock();
701 ++ local_bh_enable();
702 + }
703 +
704 + rcu_read_lock();
705 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
706 +index 7220cd620717..0362acd5cdca 100644
707 +--- a/drivers/net/usb/cdc_mbim.c
708 ++++ b/drivers/net/usb/cdc_mbim.c
709 +@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
710 + */
711 + static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
712 + .description = "CDC MBIM",
713 +- .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
714 ++ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
715 + .bind = cdc_mbim_bind,
716 + .unbind = cdc_mbim_unbind,
717 + .manage_power = cdc_mbim_manage_power,
718 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
719 +index 16b0c7db431b..8911e3466e61 100644
720 +--- a/drivers/net/virtio_net.c
721 ++++ b/drivers/net/virtio_net.c
722 +@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
723 + void *data;
724 + u32 act;
725 +
726 ++ /* Transient failure which in theory could occur if
727 ++ * in-flight packets from before XDP was enabled reach
728 ++ * the receive path after XDP is loaded.
729 ++ */
730 ++ if (unlikely(hdr->hdr.gso_type))
731 ++ goto err_xdp;
732 ++
733 + /* This happens when rx buffer size is underestimated
734 + * or headroom is not enough because of the buffer
735 + * was refilled before XDP is set. This should only
736 +@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
737 + xdp_page = page;
738 + }
739 +
740 +- /* Transient failure which in theory could occur if
741 +- * in-flight packets from before XDP was enabled reach
742 +- * the receive path after XDP is loaded. In practice I
743 +- * was not able to create this condition.
744 +- */
745 +- if (unlikely(hdr->hdr.gso_type))
746 +- goto err_xdp;
747 +-
748 + /* Allow consuming headroom but reserve enough space to push
749 + * the descriptor on if we get an XDP_TX return code.
750 + */
751 +@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
752 + }
753 + *xdp_xmit = true;
754 + if (unlikely(xdp_page != page))
755 +- goto err_xdp;
756 ++ put_page(page);
757 + rcu_read_unlock();
758 + goto xdp_xmit;
759 + case XDP_REDIRECT:
760 +@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
761 + }
762 + *xdp_xmit = true;
763 + if (unlikely(xdp_page != page))
764 +- goto err_xdp;
765 ++ put_page(page);
766 + rcu_read_unlock();
767 + goto xdp_xmit;
768 + default:
769 +@@ -875,7 +874,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
770 + rcu_read_unlock();
771 + err_skb:
772 + put_page(page);
773 +- while (--num_buf) {
774 ++ while (num_buf-- > 1) {
775 + buf = virtqueue_get_buf(rq->vq, &len);
776 + if (unlikely(!buf)) {
777 + pr_debug("%s: rx error: %d buffers missing\n",
778 +diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
779 +index cb694d2a1228..e826933f71da 100644
780 +--- a/drivers/pci/host/pci-hyperv.c
781 ++++ b/drivers/pci/host/pci-hyperv.c
782 +@@ -556,6 +556,26 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
783 + static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
784 + static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
785 +
786 ++/*
787 ++ * There is no good way to get notified from vmbus_onoffer_rescind(),
788 ++ * so let's use polling here, since this is not a hot path.
789 ++ */
790 ++static int wait_for_response(struct hv_device *hdev,
791 ++ struct completion *comp)
792 ++{
793 ++ while (true) {
794 ++ if (hdev->channel->rescind) {
795 ++ dev_warn_once(&hdev->device, "The device is gone.\n");
796 ++ return -ENODEV;
797 ++ }
798 ++
799 ++ if (wait_for_completion_timeout(comp, HZ / 10))
800 ++ break;
801 ++ }
802 ++
803 ++ return 0;
804 ++}
805 ++
806 + /**
807 + * devfn_to_wslot() - Convert from Linux PCI slot to Windows
808 + * @devfn: The Linux representation of PCI slot
809 +@@ -1568,7 +1588,8 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
810 + if (ret)
811 + goto error;
812 +
813 +- wait_for_completion(&comp_pkt.host_event);
814 ++ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
815 ++ goto error;
816 +
817 + hpdev->desc = *desc;
818 + refcount_set(&hpdev->refs, 1);
819 +@@ -2061,15 +2082,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
820 + sizeof(struct pci_version_request),
821 + (unsigned long)pkt, VM_PKT_DATA_INBAND,
822 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
823 ++ if (!ret)
824 ++ ret = wait_for_response(hdev, &comp_pkt.host_event);
825 ++
826 + if (ret) {
827 + dev_err(&hdev->device,
828 +- "PCI Pass-through VSP failed sending version reqquest: %#x",
829 ++ "PCI Pass-through VSP failed to request version: %d",
830 + ret);
831 + goto exit;
832 + }
833 +
834 +- wait_for_completion(&comp_pkt.host_event);
835 +-
836 + if (comp_pkt.completion_status >= 0) {
837 + pci_protocol_version = pci_protocol_versions[i];
838 + dev_info(&hdev->device,
839 +@@ -2278,11 +2300,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
840 + ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
841 + (unsigned long)pkt, VM_PKT_DATA_INBAND,
842 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
843 ++ if (!ret)
844 ++ ret = wait_for_response(hdev, &comp_pkt.host_event);
845 ++
846 + if (ret)
847 + goto exit;
848 +
849 +- wait_for_completion(&comp_pkt.host_event);
850 +-
851 + if (comp_pkt.completion_status < 0) {
852 + dev_err(&hdev->device,
853 + "PCI Pass-through VSP failed D0 Entry with status %x\n",
854 +@@ -2322,11 +2345,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
855 +
856 + ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
857 + 0, VM_PKT_DATA_INBAND, 0);
858 +- if (ret)
859 +- return ret;
860 ++ if (!ret)
861 ++ ret = wait_for_response(hdev, &comp);
862 +
863 +- wait_for_completion(&comp);
864 +- return 0;
865 ++ return ret;
866 + }
867 +
868 + /**
869 +@@ -2396,11 +2418,11 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
870 + size_res, (unsigned long)pkt,
871 + VM_PKT_DATA_INBAND,
872 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
873 ++ if (!ret)
874 ++ ret = wait_for_response(hdev, &comp_pkt.host_event);
875 + if (ret)
876 + break;
877 +
878 +- wait_for_completion(&comp_pkt.host_event);
879 +-
880 + if (comp_pkt.completion_status < 0) {
881 + ret = -EPROTO;
882 + dev_err(&hdev->device,
883 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
884 +index 12bcfbac2cc9..d3c90ce5d4c4 100644
885 +--- a/drivers/vhost/net.c
886 ++++ b/drivers/vhost/net.c
887 +@@ -101,7 +101,9 @@ struct vhost_net_virtqueue {
888 + /* vhost zerocopy support fields below: */
889 + /* last used idx for outstanding DMA zerocopy buffers */
890 + int upend_idx;
891 +- /* first used idx for DMA done zerocopy buffers */
892 ++ /* For TX, first used idx for DMA done zerocopy buffers
893 ++ * For RX, number of batched heads
894 ++ */
895 + int done_idx;
896 + /* an array of userspace buffers info */
897 + struct ubuf_info *ubuf_info;
898 +@@ -620,6 +622,18 @@ static int sk_has_rx_data(struct sock *sk)
899 + return skb_queue_empty(&sk->sk_receive_queue);
900 + }
901 +
902 ++static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
903 ++{
904 ++ struct vhost_virtqueue *vq = &nvq->vq;
905 ++ struct vhost_dev *dev = vq->dev;
906 ++
907 ++ if (!nvq->done_idx)
908 ++ return;
909 ++
910 ++ vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
911 ++ nvq->done_idx = 0;
912 ++}
913 ++
914 + static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
915 + {
916 + struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
917 +@@ -629,6 +643,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
918 + int len = peek_head_len(rvq, sk);
919 +
920 + if (!len && vq->busyloop_timeout) {
921 ++ /* Flush batched heads first */
922 ++ vhost_rx_signal_used(rvq);
923 + /* Both tx vq and rx socket were polled here */
924 + mutex_lock_nested(&vq->mutex, 1);
925 + vhost_disable_notify(&net->dev, vq);
926 +@@ -756,7 +772,7 @@ static void handle_rx(struct vhost_net *net)
927 + };
928 + size_t total_len = 0;
929 + int err, mergeable;
930 +- s16 headcount, nheads = 0;
931 ++ s16 headcount;
932 + size_t vhost_hlen, sock_hlen;
933 + size_t vhost_len, sock_len;
934 + struct socket *sock;
935 +@@ -784,8 +800,8 @@ static void handle_rx(struct vhost_net *net)
936 + while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
937 + sock_len += sock_hlen;
938 + vhost_len = sock_len + vhost_hlen;
939 +- headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
940 +- &in, vq_log, &log,
941 ++ headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
942 ++ vhost_len, &in, vq_log, &log,
943 + likely(mergeable) ? UIO_MAXIOV : 1);
944 + /* On error, stop handling until the next kick. */
945 + if (unlikely(headcount < 0))
946 +@@ -856,12 +872,9 @@ static void handle_rx(struct vhost_net *net)
947 + vhost_discard_vq_desc(vq, headcount);
948 + goto out;
949 + }
950 +- nheads += headcount;
951 +- if (nheads > VHOST_RX_BATCH) {
952 +- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
953 +- nheads);
954 +- nheads = 0;
955 +- }
956 ++ nvq->done_idx += headcount;
957 ++ if (nvq->done_idx > VHOST_RX_BATCH)
958 ++ vhost_rx_signal_used(nvq);
959 + if (unlikely(vq_log))
960 + vhost_log_write(vq, vq_log, log, vhost_len);
961 + total_len += vhost_len;
962 +@@ -872,9 +885,7 @@ static void handle_rx(struct vhost_net *net)
963 + }
964 + vhost_net_enable_vq(net, vq);
965 + out:
966 +- if (nheads)
967 +- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
968 +- nheads);
969 ++ vhost_rx_signal_used(nvq);
970 + mutex_unlock(&vq->mutex);
971 + }
972 +
973 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
974 +index be6a4b6a76c6..68242f50c303 100644
975 +--- a/drivers/vhost/vhost.c
976 ++++ b/drivers/vhost/vhost.c
977 +@@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
978 + {
979 + int ret = 0;
980 +
981 ++ mutex_lock(&dev->mutex);
982 + vhost_dev_lock_vqs(dev);
983 + switch (msg->type) {
984 + case VHOST_IOTLB_UPDATE:
985 +@@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
986 + }
987 +
988 + vhost_dev_unlock_vqs(dev);
989 ++ mutex_unlock(&dev->mutex);
990 ++
991 + return ret;
992 + }
993 + ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
994 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
995 +index 8606c9113d3f..a3339ff732a0 100644
996 +--- a/include/net/ipv6.h
997 ++++ b/include/net/ipv6.h
998 +@@ -918,6 +918,11 @@ static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
999 + return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
1000 + }
1001 +
1002 ++static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
1003 ++{
1004 ++ return fl6->flowlabel & IPV6_FLOWLABEL_MASK;
1005 ++}
1006 ++
1007 + /*
1008 + * Prototypes exported by ipv6
1009 + */
1010 +diff --git a/mm/mmap.c b/mm/mmap.c
1011 +index 03ca089cce0f..799217d6eea2 100644
1012 +--- a/mm/mmap.c
1013 ++++ b/mm/mmap.c
1014 +@@ -1315,6 +1315,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
1015 + return 0;
1016 + }
1017 +
1018 ++static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1019 ++{
1020 ++ if (S_ISREG(inode->i_mode))
1021 ++ return MAX_LFS_FILESIZE;
1022 ++
1023 ++ if (S_ISBLK(inode->i_mode))
1024 ++ return MAX_LFS_FILESIZE;
1025 ++
1026 ++ /* Special "we do even unsigned file positions" case */
1027 ++ if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1028 ++ return 0;
1029 ++
1030 ++ /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1031 ++ return ULONG_MAX;
1032 ++}
1033 ++
1034 ++static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1035 ++ unsigned long pgoff, unsigned long len)
1036 ++{
1037 ++ u64 maxsize = file_mmap_size_max(file, inode);
1038 ++
1039 ++ if (maxsize && len > maxsize)
1040 ++ return false;
1041 ++ maxsize -= len;
1042 ++ if (pgoff > maxsize >> PAGE_SHIFT)
1043 ++ return false;
1044 ++ return true;
1045 ++}
1046 ++
1047 + /*
1048 + * The caller must hold down_write(&current->mm->mmap_sem).
1049 + */
1050 +@@ -1389,6 +1418,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
1051 + struct inode *inode = file_inode(file);
1052 + unsigned long flags_mask;
1053 +
1054 ++ if (!file_mmap_ok(file, inode, pgoff, len))
1055 ++ return -EOVERFLOW;
1056 ++
1057 + flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1058 +
1059 + switch (flags & MAP_TYPE) {
1060 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1061 +index 559db9ea8d86..ec3d47ebd919 100644
1062 +--- a/net/core/flow_dissector.c
1063 ++++ b/net/core/flow_dissector.c
1064 +@@ -1334,7 +1334,7 @@ __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1065 + keys->ports.src = fl6->fl6_sport;
1066 + keys->ports.dst = fl6->fl6_dport;
1067 + keys->keyid.keyid = fl6->fl6_gre_key;
1068 +- keys->tags.flow_label = (__force u32)fl6->flowlabel;
1069 ++ keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1070 + keys->basic.ip_proto = fl6->flowi6_proto;
1071 +
1072 + return flow_hash_from_keys(keys);
1073 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
1074 +index 60a5ad2c33ee..82690745f94a 100644
1075 +--- a/net/core/net-sysfs.c
1076 ++++ b/net/core/net-sysfs.c
1077 +@@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1078 + cpumask_var_t mask;
1079 + unsigned long index;
1080 +
1081 +- if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1082 +- return -ENOMEM;
1083 +-
1084 + index = get_netdev_queue_index(queue);
1085 +
1086 + if (dev->num_tc) {
1087 +@@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1088 + return -EINVAL;
1089 + }
1090 +
1091 ++ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1092 ++ return -ENOMEM;
1093 ++
1094 + rcu_read_lock();
1095 + dev_maps = rcu_dereference(dev->xps_maps);
1096 + if (dev_maps) {
1097 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1098 +index bc290413a49d..824b32936e75 100644
1099 +--- a/net/core/rtnetlink.c
1100 ++++ b/net/core/rtnetlink.c
1101 +@@ -2245,6 +2245,10 @@ static int do_setlink(const struct sk_buff *skb,
1102 + const struct net_device_ops *ops = dev->netdev_ops;
1103 + int err;
1104 +
1105 ++ err = validate_linkmsg(dev, tb);
1106 ++ if (err < 0)
1107 ++ return err;
1108 ++
1109 + if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
1110 + struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
1111 + tb, CAP_NET_ADMIN);
1112 +@@ -2608,10 +2612,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1113 + goto errout;
1114 + }
1115 +
1116 +- err = validate_linkmsg(dev, tb);
1117 +- if (err < 0)
1118 +- goto errout;
1119 +-
1120 + err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
1121 + errout:
1122 + return err;
1123 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
1124 +index 84cd4e3fd01b..0d56e36a6db7 100644
1125 +--- a/net/dccp/proto.c
1126 ++++ b/net/dccp/proto.c
1127 +@@ -283,9 +283,7 @@ int dccp_disconnect(struct sock *sk, int flags)
1128 +
1129 + dccp_clear_xmit_timers(sk);
1130 + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
1131 +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
1132 + dp->dccps_hc_rx_ccid = NULL;
1133 +- dp->dccps_hc_tx_ccid = NULL;
1134 +
1135 + __skb_queue_purge(&sk->sk_receive_queue);
1136 + __skb_queue_purge(&sk->sk_write_queue);
1137 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1138 +index f05afaf3235c..aa597b2c1429 100644
1139 +--- a/net/ipv4/fib_frontend.c
1140 ++++ b/net/ipv4/fib_frontend.c
1141 +@@ -643,6 +643,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
1142 + [RTA_ENCAP] = { .type = NLA_NESTED },
1143 + [RTA_UID] = { .type = NLA_U32 },
1144 + [RTA_MARK] = { .type = NLA_U32 },
1145 ++ [RTA_TABLE] = { .type = NLA_U32 },
1146 + };
1147 +
1148 + static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
1149 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1150 +index 7d36a950d961..19f7d8cd4875 100644
1151 +--- a/net/ipv4/fib_semantics.c
1152 ++++ b/net/ipv4/fib_semantics.c
1153 +@@ -717,6 +717,8 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
1154 + nla_strlcpy(tmp, nla, sizeof(tmp));
1155 + val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
1156 + } else {
1157 ++ if (nla_len(nla) != sizeof(u32))
1158 ++ return false;
1159 + val = nla_get_u32(nla);
1160 + }
1161 +
1162 +@@ -1043,6 +1045,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
1163 + if (val == TCP_CA_UNSPEC)
1164 + return -EINVAL;
1165 + } else {
1166 ++ if (nla_len(nla) != sizeof(u32))
1167 ++ return -EINVAL;
1168 + val = nla_get_u32(nla);
1169 + }
1170 + if (type == RTAX_ADVMSS && val > 65535 - 40)
1171 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1172 +index 74c962b9b09c..d89d8c59b5ed 100644
1173 +--- a/net/ipv4/ip_sockglue.c
1174 ++++ b/net/ipv4/ip_sockglue.c
1175 +@@ -511,8 +511,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1176 + int err;
1177 + int copied;
1178 +
1179 +- WARN_ON_ONCE(sk->sk_family == AF_INET6);
1180 +-
1181 + err = -EAGAIN;
1182 + skb = sock_dequeue_err_skb(sk);
1183 + if (!skb)
1184 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
1185 +index 57478d68248d..c4e33f4141d8 100644
1186 +--- a/net/ipv4/ip_tunnel.c
1187 ++++ b/net/ipv4/ip_tunnel.c
1188 +@@ -344,7 +344,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
1189 +
1190 + if (tdev) {
1191 + hlen = tdev->hard_header_len + tdev->needed_headroom;
1192 +- mtu = tdev->mtu;
1193 ++ mtu = min(tdev->mtu, IP_MAX_MTU);
1194 + }
1195 +
1196 + dev->needed_headroom = t_hlen + hlen;
1197 +@@ -379,7 +379,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
1198 + nt = netdev_priv(dev);
1199 + t_hlen = nt->hlen + sizeof(struct iphdr);
1200 + dev->min_mtu = ETH_MIN_MTU;
1201 +- dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
1202 ++ dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
1203 + ip_tunnel_add(itn, nt);
1204 + return nt;
1205 +
1206 +@@ -948,7 +948,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1207 + {
1208 + struct ip_tunnel *tunnel = netdev_priv(dev);
1209 + int t_hlen = tunnel->hlen + sizeof(struct iphdr);
1210 +- int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
1211 ++ int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
1212 +
1213 + if (new_mtu < ETH_MIN_MTU)
1214 + return -EINVAL;
1215 +@@ -1119,7 +1119,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1216 +
1217 + mtu = ip_tunnel_bind_dev(dev);
1218 + if (tb[IFLA_MTU]) {
1219 +- unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
1220 ++ unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
1221 +
1222 + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1223 + (unsigned int)(max - sizeof(struct iphdr)));
1224 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1225 +index b05689bbba31..9669722f6f57 100644
1226 +--- a/net/ipv4/ipmr.c
1227 ++++ b/net/ipv4/ipmr.c
1228 +@@ -356,6 +356,7 @@ static const struct rhashtable_params ipmr_rht_params = {
1229 + static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1230 + {
1231 + struct mr_table *mrt;
1232 ++ int err;
1233 +
1234 + /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1235 + if (id != RT_TABLE_DEFAULT && id >= 1000000000)
1236 +@@ -371,7 +372,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1237 + write_pnet(&mrt->net, net);
1238 + mrt->id = id;
1239 +
1240 +- rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
1241 ++ err = rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
1242 ++ if (err) {
1243 ++ kfree(mrt);
1244 ++ return ERR_PTR(err);
1245 ++ }
1246 + INIT_LIST_HEAD(&mrt->mfc_cache_list);
1247 + INIT_LIST_HEAD(&mrt->mfc_unres_queue);
1248 +
1249 +diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
1250 +index 0cd46bffa469..fc3923932eda 100644
1251 +--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
1252 ++++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
1253 +@@ -213,7 +213,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
1254 + enum flow_offload_tuple_dir dir;
1255 + struct flow_offload *flow;
1256 + struct net_device *outdev;
1257 +- const struct rtable *rt;
1258 ++ struct rtable *rt;
1259 + struct iphdr *iph;
1260 + __be32 nexthop;
1261 +
1262 +@@ -234,7 +234,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
1263 + dir = tuplehash->tuple.dir;
1264 + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
1265 +
1266 +- rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
1267 ++ rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
1268 + if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
1269 + return NF_ACCEPT;
1270 +
1271 +@@ -251,6 +251,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
1272 +
1273 + skb->dev = outdev;
1274 + nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
1275 ++ skb_dst_set_noref(skb, &rt->dst);
1276 + neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
1277 +
1278 + return NF_STOLEN;
1279 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1280 +index 072333760a52..f39ea066977d 100644
1281 +--- a/net/ipv6/ip6_output.c
1282 ++++ b/net/ipv6/ip6_output.c
1283 +@@ -507,7 +507,8 @@ int ip6_forward(struct sk_buff *skb)
1284 + send redirects to source routed frames.
1285 + We don't send redirects to frames decapsulated from IPsec.
1286 + */
1287 +- if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
1288 ++ if (IP6CB(skb)->iif == dst->dev->ifindex &&
1289 ++ opt->srcrt == 0 && !skb_sec_path(skb)) {
1290 + struct in6_addr *target = NULL;
1291 + struct inet_peer *peer;
1292 + struct rt6_info *rt;
1293 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1294 +index 179313b0926c..58b4ffd7168e 100644
1295 +--- a/net/ipv6/ip6_tunnel.c
1296 ++++ b/net/ipv6/ip6_tunnel.c
1297 +@@ -1688,8 +1688,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1298 + if (new_mtu < ETH_MIN_MTU)
1299 + return -EINVAL;
1300 + }
1301 +- if (new_mtu > 0xFFF8 - dev->hard_header_len)
1302 +- return -EINVAL;
1303 ++ if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1304 ++ if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1305 ++ return -EINVAL;
1306 ++ } else {
1307 ++ if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1308 ++ return -EINVAL;
1309 ++ }
1310 + dev->mtu = new_mtu;
1311 + return 0;
1312 + }
1313 +@@ -1837,7 +1842,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1314 + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1315 + dev->mtu -= 8;
1316 + dev->min_mtu = ETH_MIN_MTU;
1317 +- dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1318 ++ dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1319 +
1320 + return 0;
1321 +
1322 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1323 +index 9f6cace9c817..bab166a6fbb3 100644
1324 +--- a/net/ipv6/ip6mr.c
1325 ++++ b/net/ipv6/ip6mr.c
1326 +@@ -1800,7 +1800,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1327 + ret = 0;
1328 + if (!ip6mr_new_table(net, v))
1329 + ret = -ENOMEM;
1330 +- raw6_sk(sk)->ip6mr_table = v;
1331 ++ else
1332 ++ raw6_sk(sk)->ip6mr_table = v;
1333 + rtnl_unlock();
1334 + return ret;
1335 + }
1336 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1337 +index ba5e04c6ae17..65956d0f8a1f 100644
1338 +--- a/net/ipv6/ndisc.c
1339 ++++ b/net/ipv6/ndisc.c
1340 +@@ -1576,6 +1576,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1341 + ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
1342 + bool ret;
1343 +
1344 ++ if (netif_is_l3_master(skb->dev)) {
1345 ++ dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
1346 ++ if (!dev)
1347 ++ return;
1348 ++ }
1349 ++
1350 + if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1351 + ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
1352 + dev->name);
1353 +diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
1354 +index 207cb35569b1..2d6652146bba 100644
1355 +--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
1356 ++++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
1357 +@@ -243,6 +243,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
1358 +
1359 + skb->dev = outdev;
1360 + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
1361 ++ skb_dst_set_noref(skb, &rt->dst);
1362 + neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
1363 +
1364 + return NF_STOLEN;
1365 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1366 +index 1aee1a537cb1..8f749742f11f 100644
1367 +--- a/net/ipv6/route.c
1368 ++++ b/net/ipv6/route.c
1369 +@@ -1850,7 +1850,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1370 + keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1371 + keys->addrs.v6addrs.src = key_iph->saddr;
1372 + keys->addrs.v6addrs.dst = key_iph->daddr;
1373 +- keys->tags.flow_label = ip6_flowinfo(key_iph);
1374 ++ keys->tags.flow_label = ip6_flowlabel(key_iph);
1375 + keys->basic.ip_proto = key_iph->nexthdr;
1376 + }
1377 +
1378 +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
1379 +index 5fe139484919..bf4763fd68c2 100644
1380 +--- a/net/ipv6/seg6_iptunnel.c
1381 ++++ b/net/ipv6/seg6_iptunnel.c
1382 +@@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1383 + hdrlen = (osrh->hdrlen + 1) << 3;
1384 + tot_len = hdrlen + sizeof(*hdr);
1385 +
1386 +- err = skb_cow_head(skb, tot_len);
1387 ++ err = skb_cow_head(skb, tot_len + skb->mac_len);
1388 + if (unlikely(err))
1389 + return err;
1390 +
1391 +@@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
1392 +
1393 + hdrlen = (osrh->hdrlen + 1) << 3;
1394 +
1395 +- err = skb_cow_head(skb, hdrlen);
1396 ++ err = skb_cow_head(skb, hdrlen + skb->mac_len);
1397 + if (unlikely(err))
1398 + return err;
1399 +
1400 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1401 +index e85791854c87..5d176c532f0c 100644
1402 +--- a/net/ipv6/sit.c
1403 ++++ b/net/ipv6/sit.c
1404 +@@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1405 + dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1406 + dev->mtu = ETH_DATA_LEN - t_hlen;
1407 + dev->min_mtu = IPV6_MIN_MTU;
1408 +- dev->max_mtu = 0xFFF8 - t_hlen;
1409 ++ dev->max_mtu = IP6_MAX_MTU - t_hlen;
1410 + dev->flags = IFF_NOARP;
1411 + netif_keep_dst(dev);
1412 + dev->addr_len = 4;
1413 +@@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1414 + if (tb[IFLA_MTU]) {
1415 + u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1416 +
1417 +- if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
1418 ++ if (mtu >= IPV6_MIN_MTU &&
1419 ++ mtu <= IP6_MAX_MTU - dev->hard_header_len)
1420 + dev->mtu = mtu;
1421 + }
1422 +
1423 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1424 +index dc76bc346829..d3601d421571 100644
1425 +--- a/net/kcm/kcmsock.c
1426 ++++ b/net/kcm/kcmsock.c
1427 +@@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock)
1428 + __module_get(newsock->ops->owner);
1429 +
1430 + newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1431 +- &kcm_proto, true);
1432 ++ &kcm_proto, false);
1433 + if (!newsk) {
1434 + sock_release(newsock);
1435 + return ERR_PTR(-ENOMEM);
1436 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1437 +index 0c4530ad74be..b7185d600844 100644
1438 +--- a/net/l2tp/l2tp_ppp.c
1439 ++++ b/net/l2tp/l2tp_ppp.c
1440 +@@ -428,16 +428,6 @@ static void pppol2tp_put_sk(struct rcu_head *head)
1441 + */
1442 + static void pppol2tp_session_close(struct l2tp_session *session)
1443 + {
1444 +- struct pppol2tp_session *ps;
1445 +-
1446 +- ps = l2tp_session_priv(session);
1447 +- mutex_lock(&ps->sk_lock);
1448 +- ps->__sk = rcu_dereference_protected(ps->sk,
1449 +- lockdep_is_held(&ps->sk_lock));
1450 +- RCU_INIT_POINTER(ps->sk, NULL);
1451 +- if (ps->__sk)
1452 +- call_rcu(&ps->rcu, pppol2tp_put_sk);
1453 +- mutex_unlock(&ps->sk_lock);
1454 + }
1455 +
1456 + /* Really kill the session socket. (Called from sock_put() if
1457 +@@ -480,15 +470,24 @@ static int pppol2tp_release(struct socket *sock)
1458 + sock_orphan(sk);
1459 + sock->sk = NULL;
1460 +
1461 +- /* If the socket is associated with a session,
1462 +- * l2tp_session_delete will call pppol2tp_session_close which
1463 +- * will drop the session's ref on the socket.
1464 +- */
1465 + session = pppol2tp_sock_to_session(sk);
1466 + if (session) {
1467 ++ struct pppol2tp_session *ps;
1468 ++
1469 + l2tp_session_delete(session);
1470 +- /* drop the ref obtained by pppol2tp_sock_to_session */
1471 +- sock_put(sk);
1472 ++
1473 ++ ps = l2tp_session_priv(session);
1474 ++ mutex_lock(&ps->sk_lock);
1475 ++ ps->__sk = rcu_dereference_protected(ps->sk,
1476 ++ lockdep_is_held(&ps->sk_lock));
1477 ++ RCU_INIT_POINTER(ps->sk, NULL);
1478 ++ mutex_unlock(&ps->sk_lock);
1479 ++ call_rcu(&ps->rcu, pppol2tp_put_sk);
1480 ++
1481 ++ /* Rely on the sock_put() call at the end of the function for
1482 ++ * dropping the reference held by pppol2tp_sock_to_session().
1483 ++ * The last reference will be dropped by pppol2tp_put_sk().
1484 ++ */
1485 + }
1486 +
1487 + release_sock(sk);
1488 +@@ -742,7 +741,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1489 + */
1490 + mutex_lock(&ps->sk_lock);
1491 + if (rcu_dereference_protected(ps->sk,
1492 +- lockdep_is_held(&ps->sk_lock))) {
1493 ++ lockdep_is_held(&ps->sk_lock)) ||
1494 ++ ps->__sk) {
1495 + mutex_unlock(&ps->sk_lock);
1496 + error = -EEXIST;
1497 + goto end;
1498 +@@ -803,7 +803,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1499 +
1500 + out_no_ppp:
1501 + /* This is how we get the session context from the socket. */
1502 +- sock_hold(sk);
1503 + sk->sk_user_data = session;
1504 + rcu_assign_pointer(ps->sk, sk);
1505 + mutex_unlock(&ps->sk_lock);
1506 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1507 +index c6a2dd890de3..c9432a0ccd56 100644
1508 +--- a/net/packet/af_packet.c
1509 ++++ b/net/packet/af_packet.c
1510 +@@ -2911,7 +2911,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1511 + if (unlikely(offset < 0))
1512 + goto out_free;
1513 + } else if (reserve) {
1514 +- skb_push(skb, reserve);
1515 ++ skb_reserve(skb, -reserve);
1516 + }
1517 +
1518 + /* Returns -EFAULT on error */
1519 +@@ -4284,7 +4284,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1520 + goto out;
1521 + if (po->tp_version >= TPACKET_V3 &&
1522 + req->tp_block_size <=
1523 +- BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
1524 ++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
1525 + goto out;
1526 + if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1527 + po->tp_reserve))
1528 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1529 +index c2c732aad87c..86d2d5977f56 100644
1530 +--- a/net/sched/cls_api.c
1531 ++++ b/net/sched/cls_api.c
1532 +@@ -1587,7 +1587,7 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
1533 + return ret;
1534 + ok_count = ret;
1535 +
1536 +- if (!exts)
1537 ++ if (!exts || ok_count)
1538 + return ok_count;
1539 + ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1540 + if (ret < 0)
1541 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1542 +index 7d0ce2c40f93..2c0c557c0007 100644
1543 +--- a/net/sched/cls_flower.c
1544 ++++ b/net/sched/cls_flower.c
1545 +@@ -974,7 +974,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1546 + return 0;
1547 +
1548 + errout_idr:
1549 +- if (fnew->handle)
1550 ++ if (!fold)
1551 + idr_remove(&head->handle_idr, fnew->handle);
1552 + errout:
1553 + tcf_exts_destroy(&fnew->exts);
1554 +diff --git a/net/sctp/transport.c b/net/sctp/transport.c
1555 +index 47f82bd794d9..03fc2c427aca 100644
1556 +--- a/net/sctp/transport.c
1557 ++++ b/net/sctp/transport.c
1558 +@@ -634,7 +634,7 @@ unsigned long sctp_transport_timeout(struct sctp_transport *trans)
1559 + trans->state != SCTP_PF)
1560 + timeout += trans->hbinterval;
1561 +
1562 +- return timeout;
1563 ++ return max_t(unsigned long, timeout, HZ / 5);
1564 + }
1565 +
1566 + /* Reset transport variables to their initial values */
1567 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
1568 +index df26c7b0fe13..1a24660bd2ec 100644
1569 +--- a/scripts/kconfig/confdata.c
1570 ++++ b/scripts/kconfig/confdata.c
1571 +@@ -745,7 +745,7 @@ int conf_write(const char *name)
1572 + struct menu *menu;
1573 + const char *basename;
1574 + const char *str;
1575 +- char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
1576 ++ char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
1577 + char *env;
1578 +
1579 + dirname[0] = 0;