Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Mon, 11 Jun 2018 21:46:49
Message-Id: 1528753595.3035cefd0c0580095edc4a0b27514ec83d648a2e.mpagano@gentoo
1 commit: 3035cefd0c0580095edc4a0b27514ec83d648a2e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 11 21:46:35 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 11 21:46:35 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3035cefd
7
8 Linux patch 4.14.49
9
10 0000_README | 4 +
11 1048_linux-4.14.49.patch | 1356 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1360 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 023e213..4c28456 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -235,6 +235,10 @@ Patch: 1047_linux-4.14.48.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.48
21
22 +Patch: 1048_linux-4.14.49.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.49
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1048_linux-4.14.49.patch b/1048_linux-4.14.49.patch
31 new file mode 100644
32 index 0000000..4f8e7ec
33 --- /dev/null
34 +++ b/1048_linux-4.14.49.patch
35 @@ -0,0 +1,1356 @@
36 +diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
37 +index cfc66ea72329..a365656e4873 100644
38 +--- a/Documentation/networking/netdev-FAQ.txt
39 ++++ b/Documentation/networking/netdev-FAQ.txt
40 +@@ -176,6 +176,15 @@ A: No. See above answer. In short, if you think it really belongs in
41 + dash marker line as described in Documentation/process/submitting-patches.rst to
42 + temporarily embed that information into the patch that you send.
43 +
44 ++Q: Are all networking bug fixes backported to all stable releases?
45 ++
46 ++A: Due to capacity, Dave could only take care of the backports for the last
47 ++ 2 stable releases. For earlier stable releases, each stable branch maintainer
48 ++ is supposed to take care of them. If you find any patch is missing from an
49 ++ earlier stable branch, please notify stable@×××××××××××.org with either a
50 ++ commit ID or a formal patch backported, and CC Dave and other relevant
51 ++ networking developers.
52 ++
53 + Q: Someone said that the comment style and coding convention is different
54 + for the networking content. Is this true?
55 +
56 +diff --git a/Makefile b/Makefile
57 +index 7a246f1ce44e..480ae7ef755c 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 4
63 + PATCHLEVEL = 14
64 +-SUBLEVEL = 48
65 ++SUBLEVEL = 49
66 + EXTRAVERSION =
67 + NAME = Petit Gorille
68 +
69 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
70 +index b3c6e997ccdb..03244b3c985d 100644
71 +--- a/drivers/gpu/drm/drm_file.c
72 ++++ b/drivers/gpu/drm/drm_file.c
73 +@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
74 + return -ENOMEM;
75 +
76 + filp->private_data = priv;
77 ++ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
78 + priv->filp = filp;
79 + priv->pid = get_pid(task_pid(current));
80 + priv->minor = minor;
81 +diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
82 +index 944a7f338099..1b25d8bc153a 100644
83 +--- a/drivers/isdn/hardware/eicon/diva.c
84 ++++ b/drivers/isdn/hardware/eicon/diva.c
85 +@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
86 + ** Receive and process command from user mode utility
87 + */
88 + void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
89 +- int length,
90 ++ int length, void *mptr,
91 + divas_xdi_copy_from_user_fn_t cp_fn)
92 + {
93 +- diva_xdi_um_cfg_cmd_t msg;
94 ++ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
95 + diva_os_xdi_adapter_t *a = NULL;
96 + diva_os_spin_lock_magic_t old_irql;
97 + struct list_head *tmp;
98 +@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
99 + length, sizeof(diva_xdi_um_cfg_cmd_t)))
100 + return NULL;
101 + }
102 +- if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
103 ++ if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
104 + DBG_ERR(("A: A(?) open, write error"))
105 + return NULL;
106 + }
107 + diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
108 + list_for_each(tmp, &adapter_queue) {
109 + a = list_entry(tmp, diva_os_xdi_adapter_t, link);
110 +- if (a->controller == (int)msg.adapter)
111 ++ if (a->controller == (int)msg->adapter)
112 + break;
113 + a = NULL;
114 + }
115 + diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
116 +
117 + if (!a) {
118 +- DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
119 ++ DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
120 + }
121 +
122 + return (a);
123 +@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
124 +
125 + int
126 + diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
127 +- int length, divas_xdi_copy_from_user_fn_t cp_fn)
128 ++ int length, void *mptr,
129 ++ divas_xdi_copy_from_user_fn_t cp_fn)
130 + {
131 ++ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
132 + diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
133 + void *data;
134 +
135 +@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
136 + return (-2);
137 + }
138 +
139 +- length = (*cp_fn) (os_handle, data, src, length);
140 ++ if (msg) {
141 ++ *(diva_xdi_um_cfg_cmd_t *)data = *msg;
142 ++ length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
143 ++ src + sizeof(*msg), length - sizeof(*msg));
144 ++ } else {
145 ++ length = (*cp_fn) (os_handle, data, src, length);
146 ++ }
147 + if (length > 0) {
148 + if ((*(a->interface.cmd_proc))
149 + (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
150 +diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
151 +index b067032093a8..1ad76650fbf9 100644
152 +--- a/drivers/isdn/hardware/eicon/diva.h
153 ++++ b/drivers/isdn/hardware/eicon/diva.h
154 +@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
155 + int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
156 +
157 + int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
158 +- int length, divas_xdi_copy_from_user_fn_t cp_fn);
159 ++ int length, void *msg,
160 ++ divas_xdi_copy_from_user_fn_t cp_fn);
161 +
162 + void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
163 +- int length,
164 ++ int length, void *msg,
165 + divas_xdi_copy_from_user_fn_t cp_fn);
166 +
167 + void diva_xdi_close_adapter(void *adapter, void *os_handle);
168 +diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
169 +index b2023e08dcd2..932e98d0d901 100644
170 +--- a/drivers/isdn/hardware/eicon/divasmain.c
171 ++++ b/drivers/isdn/hardware/eicon/divasmain.c
172 +@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
173 + static ssize_t divas_write(struct file *file, const char __user *buf,
174 + size_t count, loff_t *ppos)
175 + {
176 ++ diva_xdi_um_cfg_cmd_t msg;
177 + int ret = -EINVAL;
178 +
179 + if (!file->private_data) {
180 + file->private_data = diva_xdi_open_adapter(file, buf,
181 +- count,
182 ++ count, &msg,
183 + xdi_copy_from_user);
184 +- }
185 +- if (!file->private_data) {
186 +- return (-ENODEV);
187 ++ if (!file->private_data)
188 ++ return (-ENODEV);
189 ++ ret = diva_xdi_write(file->private_data, file,
190 ++ buf, count, &msg, xdi_copy_from_user);
191 ++ } else {
192 ++ ret = diva_xdi_write(file->private_data, file,
193 ++ buf, count, NULL, xdi_copy_from_user);
194 + }
195 +
196 +- ret = diva_xdi_write(file->private_data, file,
197 +- buf, count, xdi_copy_from_user);
198 + switch (ret) {
199 + case -1: /* Message should be removed from rx mailbox first */
200 + ret = -EBUSY;
201 +@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
202 + static ssize_t divas_read(struct file *file, char __user *buf,
203 + size_t count, loff_t *ppos)
204 + {
205 ++ diva_xdi_um_cfg_cmd_t msg;
206 + int ret = -EINVAL;
207 +
208 + if (!file->private_data) {
209 + file->private_data = diva_xdi_open_adapter(file, buf,
210 +- count,
211 ++ count, &msg,
212 + xdi_copy_from_user);
213 + }
214 + if (!file->private_data) {
215 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
216 +index 7dd83d0ef0a0..22243c480a05 100644
217 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
218 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
219 +@@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
220 + * slots for the highest priority.
221 + */
222 + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
223 +- NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
224 ++ NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
225 + /* Mapping between the CREDIT_WEIGHT registers and actual client
226 + * numbers
227 + */
228 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
229 +index aef40f02c77f..a03a32a4ffca 100644
230 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
231 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
232 +@@ -2703,11 +2703,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
233 + pci_set_master(pdev);
234 +
235 + /* Query PCI controller on system for DMA addressing
236 +- * limitation for the device. Try 64-bit first, and
237 ++ * limitation for the device. Try 47-bit first, and
238 + * fail to 32-bit.
239 + */
240 +
241 +- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
242 ++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
243 + if (err) {
244 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
245 + if (err) {
246 +@@ -2721,10 +2721,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
247 + goto err_out_release_regions;
248 + }
249 + } else {
250 +- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
251 ++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
252 + if (err) {
253 + dev_err(dev, "Unable to obtain %u-bit DMA "
254 +- "for consistent allocations, aborting\n", 64);
255 ++ "for consistent allocations, aborting\n", 47);
256 + goto err_out_release_regions;
257 + }
258 + using_dac = 1;
259 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
260 +index 1b03c32afc1f..7e2b70c2bba3 100644
261 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
262 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
263 +@@ -3294,7 +3294,9 @@ void be_detect_error(struct be_adapter *adapter)
264 + if ((val & POST_STAGE_FAT_LOG_START)
265 + != POST_STAGE_FAT_LOG_START &&
266 + (val & POST_STAGE_ARMFW_UE)
267 +- != POST_STAGE_ARMFW_UE)
268 ++ != POST_STAGE_ARMFW_UE &&
269 ++ (val & POST_STAGE_RECOVERABLE_ERR)
270 ++ != POST_STAGE_RECOVERABLE_ERR)
271 + return;
272 + }
273 +
274 +diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
275 +index 22a3bfe1ed8f..73419224367a 100644
276 +--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
277 ++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
278 +@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
279 + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
280 + struct mlx4_qp *qp;
281 +
282 +- spin_lock(&qp_table->lock);
283 ++ spin_lock_irq(&qp_table->lock);
284 +
285 + qp = __mlx4_qp_lookup(dev, qpn);
286 +
287 +- spin_unlock(&qp_table->lock);
288 ++ spin_unlock_irq(&qp_table->lock);
289 + return qp;
290 + }
291 +
292 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
293 +index 3476f594c195..8285e6d24f30 100644
294 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
295 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
296 +@@ -635,6 +635,45 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb)
297 + return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
298 + }
299 +
300 ++static __be32 mlx5e_get_fcs(struct sk_buff *skb)
301 ++{
302 ++ int last_frag_sz, bytes_in_prev, nr_frags;
303 ++ u8 *fcs_p1, *fcs_p2;
304 ++ skb_frag_t *last_frag;
305 ++ __be32 fcs_bytes;
306 ++
307 ++ if (!skb_is_nonlinear(skb))
308 ++ return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
309 ++
310 ++ nr_frags = skb_shinfo(skb)->nr_frags;
311 ++ last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
312 ++ last_frag_sz = skb_frag_size(last_frag);
313 ++
314 ++ /* If all FCS data is in last frag */
315 ++ if (last_frag_sz >= ETH_FCS_LEN)
316 ++ return *(__be32 *)(skb_frag_address(last_frag) +
317 ++ last_frag_sz - ETH_FCS_LEN);
318 ++
319 ++ fcs_p2 = (u8 *)skb_frag_address(last_frag);
320 ++ bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
321 ++
322 ++ /* Find where the other part of the FCS is - Linear or another frag */
323 ++ if (nr_frags == 1) {
324 ++ fcs_p1 = skb_tail_pointer(skb);
325 ++ } else {
326 ++ skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
327 ++
328 ++ fcs_p1 = skb_frag_address(prev_frag) +
329 ++ skb_frag_size(prev_frag);
330 ++ }
331 ++ fcs_p1 -= bytes_in_prev;
332 ++
333 ++ memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
334 ++ memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
335 ++
336 ++ return fcs_bytes;
337 ++}
338 ++
339 + static inline void mlx5e_handle_csum(struct net_device *netdev,
340 + struct mlx5_cqe64 *cqe,
341 + struct mlx5e_rq *rq,
342 +@@ -653,6 +692,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
343 + if (is_first_ethertype_ip(skb)) {
344 + skb->ip_summed = CHECKSUM_COMPLETE;
345 + skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
346 ++ if (unlikely(netdev->features & NETIF_F_RXFCS))
347 ++ skb->csum = csum_add(skb->csum,
348 ++ (__force __wsum)mlx5e_get_fcs(skb));
349 + rq->stats.csum_complete++;
350 + return;
351 + }
352 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
353 +index 629bfa0cd3f0..27ba476f761d 100644
354 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
355 ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
356 +@@ -77,7 +77,7 @@
357 + #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
358 +
359 + /* ILT entry structure */
360 +-#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
361 ++#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
362 + #define ILT_ENTRY_PHY_ADDR_SHIFT 0
363 + #define ILT_ENTRY_VALID_MASK 0x1ULL
364 + #define ILT_ENTRY_VALID_SHIFT 52
365 +diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
366 +index 3fe8cc5c177e..9b27ca264c66 100644
367 +--- a/drivers/net/phy/bcm-cygnus.c
368 ++++ b/drivers/net/phy/bcm-cygnus.c
369 +@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
370 + return rc;
371 +
372 + /* make rcal=100, since rdb default is 000 */
373 +- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
374 ++ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
375 + if (rc < 0)
376 + return rc;
377 +
378 + /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
379 +- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
380 ++ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
381 + if (rc < 0)
382 + return rc;
383 +
384 + /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
385 +- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
386 ++ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
387 +
388 + return 0;
389 + }
390 +diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
391 +index 171010eb4d9c..8d96c6f048d0 100644
392 +--- a/drivers/net/phy/bcm-phy-lib.c
393 ++++ b/drivers/net/phy/bcm-phy-lib.c
394 +@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
395 + /* The register must be written to both the Shadow Register Select and
396 + * the Shadow Read Register Selector
397 + */
398 +- phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
399 ++ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
400 + regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
401 + return phy_read(phydev, MII_BCM54XX_AUX_CTL);
402 + }
403 +diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
404 +index 7c73808cbbde..81cceaa412fe 100644
405 +--- a/drivers/net/phy/bcm-phy-lib.h
406 ++++ b/drivers/net/phy/bcm-phy-lib.h
407 +@@ -14,11 +14,18 @@
408 + #ifndef _LINUX_BCM_PHY_LIB_H
409 + #define _LINUX_BCM_PHY_LIB_H
410 +
411 ++#include <linux/brcmphy.h>
412 + #include <linux/phy.h>
413 +
414 + int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
415 + int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
416 +
417 ++static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
418 ++ u16 reg, u16 val)
419 ++{
420 ++ return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
421 ++}
422 ++
423 + int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
424 + int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
425 +
426 +diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
427 +index 8b33f688ac8a..3c5b2a2e2fcc 100644
428 +--- a/drivers/net/phy/bcm7xxx.c
429 ++++ b/drivers/net/phy/bcm7xxx.c
430 +@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
431 + static void r_rc_cal_reset(struct phy_device *phydev)
432 + {
433 + /* Reset R_CAL/RC_CAL Engine */
434 +- bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
435 ++ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
436 +
437 + /* Disable Reset R_AL/RC_CAL Engine */
438 +- bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
439 ++ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
440 + }
441 +
442 + static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
443 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
444 +index 8a222ae5950e..83c591713837 100644
445 +--- a/drivers/net/team/team.c
446 ++++ b/drivers/net/team/team.c
447 +@@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team,
448 + static void __team_compute_features(struct team *team)
449 + {
450 + struct team_port *port;
451 +- u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
452 ++ netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
453 ++ NETIF_F_ALL_FOR_ALL;
454 + netdev_features_t enc_features = TEAM_ENC_FEATURES;
455 + unsigned short max_hard_header_len = ETH_HLEN;
456 + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
457 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
458 +index bc38d54e37b9..3d9ad11e4f28 100644
459 +--- a/drivers/net/tun.c
460 ++++ b/drivers/net/tun.c
461 +@@ -1315,7 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
462 + else
463 + *skb_xdp = 0;
464 +
465 +- preempt_disable();
466 ++ local_bh_disable();
467 + rcu_read_lock();
468 + xdp_prog = rcu_dereference(tun->xdp_prog);
469 + if (xdp_prog && !*skb_xdp) {
470 +@@ -1338,7 +1338,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
471 + if (err)
472 + goto err_redirect;
473 + rcu_read_unlock();
474 +- preempt_enable();
475 ++ local_bh_enable();
476 + return NULL;
477 + case XDP_TX:
478 + xdp_xmit = true;
479 +@@ -1360,7 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
480 + skb = build_skb(buf, buflen);
481 + if (!skb) {
482 + rcu_read_unlock();
483 +- preempt_enable();
484 ++ local_bh_enable();
485 + return ERR_PTR(-ENOMEM);
486 + }
487 +
488 +@@ -1373,12 +1373,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
489 + skb->dev = tun->dev;
490 + generic_xdp_tx(skb, xdp_prog);
491 + rcu_read_unlock();
492 +- preempt_enable();
493 ++ local_bh_enable();
494 + return NULL;
495 + }
496 +
497 + rcu_read_unlock();
498 +- preempt_enable();
499 ++ local_bh_enable();
500 +
501 + return skb;
502 +
503 +@@ -1386,7 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
504 + put_page(alloc_frag->page);
505 + err_xdp:
506 + rcu_read_unlock();
507 +- preempt_enable();
508 ++ local_bh_enable();
509 + this_cpu_inc(tun->pcpu_stats->rx_dropped);
510 + return NULL;
511 + }
512 +@@ -1556,16 +1556,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
513 + struct bpf_prog *xdp_prog;
514 + int ret;
515 +
516 ++ local_bh_disable();
517 + rcu_read_lock();
518 + xdp_prog = rcu_dereference(tun->xdp_prog);
519 + if (xdp_prog) {
520 + ret = do_xdp_generic(xdp_prog, skb);
521 + if (ret != XDP_PASS) {
522 + rcu_read_unlock();
523 ++ local_bh_enable();
524 + return total_len;
525 + }
526 + }
527 + rcu_read_unlock();
528 ++ local_bh_enable();
529 + }
530 +
531 + rxhash = __skb_get_hash_symmetric(skb);
532 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
533 +index 7220cd620717..0362acd5cdca 100644
534 +--- a/drivers/net/usb/cdc_mbim.c
535 ++++ b/drivers/net/usb/cdc_mbim.c
536 +@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
537 + */
538 + static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
539 + .description = "CDC MBIM",
540 +- .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
541 ++ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
542 + .bind = cdc_mbim_bind,
543 + .unbind = cdc_mbim_unbind,
544 + .manage_power = cdc_mbim_manage_power,
545 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
546 +index 948611317c97..9e93e7a5df7e 100644
547 +--- a/drivers/net/virtio_net.c
548 ++++ b/drivers/net/virtio_net.c
549 +@@ -632,6 +632,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
550 + void *data;
551 + u32 act;
552 +
553 ++ /* Transient failure which in theory could occur if
554 ++ * in-flight packets from before XDP was enabled reach
555 ++ * the receive path after XDP is loaded.
556 ++ */
557 ++ if (unlikely(hdr->hdr.gso_type))
558 ++ goto err_xdp;
559 ++
560 + /* This happens when rx buffer size is underestimated */
561 + if (unlikely(num_buf > 1 ||
562 + headroom < virtnet_get_headroom(vi))) {
563 +@@ -647,14 +654,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
564 + xdp_page = page;
565 + }
566 +
567 +- /* Transient failure which in theory could occur if
568 +- * in-flight packets from before XDP was enabled reach
569 +- * the receive path after XDP is loaded. In practice I
570 +- * was not able to create this condition.
571 +- */
572 +- if (unlikely(hdr->hdr.gso_type))
573 +- goto err_xdp;
574 +-
575 + /* Allow consuming headroom but reserve enough space to push
576 + * the descriptor on if we get an XDP_TX return code.
577 + */
578 +@@ -688,7 +687,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
579 + trace_xdp_exception(vi->dev, xdp_prog, act);
580 + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
581 + if (unlikely(xdp_page != page))
582 +- goto err_xdp;
583 ++ put_page(page);
584 + rcu_read_unlock();
585 + goto xdp_xmit;
586 + default:
587 +@@ -777,7 +776,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
588 + rcu_read_unlock();
589 + err_skb:
590 + put_page(page);
591 +- while (--num_buf) {
592 ++ while (num_buf-- > 1) {
593 + buf = virtqueue_get_buf(rq->vq, &len);
594 + if (unlikely(!buf)) {
595 + pr_debug("%s: rx error: %d buffers missing\n",
596 +diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
597 +index c91662927de0..0b750228ad70 100644
598 +--- a/drivers/pci/host/pci-hyperv.c
599 ++++ b/drivers/pci/host/pci-hyperv.c
600 +@@ -566,6 +566,26 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
601 + static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
602 + static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
603 +
604 ++/*
605 ++ * There is no good way to get notified from vmbus_onoffer_rescind(),
606 ++ * so let's use polling here, since this is not a hot path.
607 ++ */
608 ++static int wait_for_response(struct hv_device *hdev,
609 ++ struct completion *comp)
610 ++{
611 ++ while (true) {
612 ++ if (hdev->channel->rescind) {
613 ++ dev_warn_once(&hdev->device, "The device is gone.\n");
614 ++ return -ENODEV;
615 ++ }
616 ++
617 ++ if (wait_for_completion_timeout(comp, HZ / 10))
618 ++ break;
619 ++ }
620 ++
621 ++ return 0;
622 ++}
623 ++
624 + /**
625 + * devfn_to_wslot() - Convert from Linux PCI slot to Windows
626 + * @devfn: The Linux representation of PCI slot
627 +@@ -1582,7 +1602,8 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
628 + if (ret)
629 + goto error;
630 +
631 +- wait_for_completion(&comp_pkt.host_event);
632 ++ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
633 ++ goto error;
634 +
635 + hpdev->desc = *desc;
636 + refcount_set(&hpdev->refs, 1);
637 +@@ -2075,15 +2096,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
638 + sizeof(struct pci_version_request),
639 + (unsigned long)pkt, VM_PKT_DATA_INBAND,
640 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
641 ++ if (!ret)
642 ++ ret = wait_for_response(hdev, &comp_pkt.host_event);
643 ++
644 + if (ret) {
645 + dev_err(&hdev->device,
646 +- "PCI Pass-through VSP failed sending version reqquest: %#x",
647 ++ "PCI Pass-through VSP failed to request version: %d",
648 + ret);
649 + goto exit;
650 + }
651 +
652 +- wait_for_completion(&comp_pkt.host_event);
653 +-
654 + if (comp_pkt.completion_status >= 0) {
655 + pci_protocol_version = pci_protocol_versions[i];
656 + dev_info(&hdev->device,
657 +@@ -2292,11 +2314,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
658 + ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
659 + (unsigned long)pkt, VM_PKT_DATA_INBAND,
660 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
661 ++ if (!ret)
662 ++ ret = wait_for_response(hdev, &comp_pkt.host_event);
663 ++
664 + if (ret)
665 + goto exit;
666 +
667 +- wait_for_completion(&comp_pkt.host_event);
668 +-
669 + if (comp_pkt.completion_status < 0) {
670 + dev_err(&hdev->device,
671 + "PCI Pass-through VSP failed D0 Entry with status %x\n",
672 +@@ -2336,11 +2359,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
673 +
674 + ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
675 + 0, VM_PKT_DATA_INBAND, 0);
676 +- if (ret)
677 +- return ret;
678 ++ if (!ret)
679 ++ ret = wait_for_response(hdev, &comp);
680 +
681 +- wait_for_completion(&comp);
682 +- return 0;
683 ++ return ret;
684 + }
685 +
686 + /**
687 +@@ -2410,11 +2432,11 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
688 + size_res, (unsigned long)pkt,
689 + VM_PKT_DATA_INBAND,
690 + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
691 ++ if (!ret)
692 ++ ret = wait_for_response(hdev, &comp_pkt.host_event);
693 + if (ret)
694 + break;
695 +
696 +- wait_for_completion(&comp_pkt.host_event);
697 +-
698 + if (comp_pkt.completion_status < 0) {
699 + ret = -EPROTO;
700 + dev_err(&hdev->device,
701 +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
702 +index 2eb61d54bbb4..ea9e1e0ed5b8 100644
703 +--- a/drivers/scsi/sd_zbc.c
704 ++++ b/drivers/scsi/sd_zbc.c
705 +@@ -423,9 +423,18 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
706 +
707 + #define SD_ZBC_BUF_SIZE 131072
708 +
709 +-static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
710 ++/**
711 ++ * sd_zbc_check_zone_size - Check the device zone sizes
712 ++ * @sdkp: Target disk
713 ++ *
714 ++ * Check that all zones of the device are equal. The last zone can however
715 ++ * be smaller. The zone size must also be a power of two number of LBAs.
716 ++ *
717 ++ * Returns the zone size in bytes upon success or an error code upon failure.
718 ++ */
719 ++static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
720 + {
721 +- u64 zone_blocks;
722 ++ u64 zone_blocks = 0;
723 + sector_t block = 0;
724 + unsigned char *buf;
725 + unsigned char *rec;
726 +@@ -434,8 +443,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
727 + int ret;
728 + u8 same;
729 +
730 +- sdkp->zone_blocks = 0;
731 +-
732 + /* Get a buffer */
733 + buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
734 + if (!buf)
735 +@@ -443,10 +450,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
736 +
737 + /* Do a report zone to get the same field */
738 + ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
739 +- if (ret) {
740 +- zone_blocks = 0;
741 +- goto out;
742 +- }
743 ++ if (ret)
744 ++ goto out_free;
745 +
746 + same = buf[4] & 0x0f;
747 + if (same > 0) {
748 +@@ -472,16 +477,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
749 +
750 + /* Parse zone descriptors */
751 + while (rec < buf + buf_len) {
752 +- zone_blocks = get_unaligned_be64(&rec[8]);
753 +- if (sdkp->zone_blocks == 0) {
754 +- sdkp->zone_blocks = zone_blocks;
755 +- } else if (zone_blocks != sdkp->zone_blocks &&
756 +- (block + zone_blocks < sdkp->capacity
757 +- || zone_blocks > sdkp->zone_blocks)) {
758 ++ u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
759 ++
760 ++ if (zone_blocks == 0) {
761 ++ zone_blocks = this_zone_blocks;
762 ++ } else if (this_zone_blocks != zone_blocks &&
763 ++ (block + this_zone_blocks < sdkp->capacity
764 ++ || this_zone_blocks > zone_blocks)) {
765 + zone_blocks = 0;
766 + goto out;
767 + }
768 +- block += zone_blocks;
769 ++ block += this_zone_blocks;
770 + rec += 64;
771 + }
772 +
773 +@@ -489,61 +495,77 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
774 + ret = sd_zbc_report_zones(sdkp, buf,
775 + SD_ZBC_BUF_SIZE, block);
776 + if (ret)
777 +- return ret;
778 ++ goto out_free;
779 + }
780 +
781 + } while (block < sdkp->capacity);
782 +
783 +- zone_blocks = sdkp->zone_blocks;
784 +-
785 + out:
786 +- kfree(buf);
787 +-
788 + if (!zone_blocks) {
789 + if (sdkp->first_scan)
790 + sd_printk(KERN_NOTICE, sdkp,
791 + "Devices with non constant zone "
792 + "size are not supported\n");
793 +- return -ENODEV;
794 +- }
795 +-
796 +- if (!is_power_of_2(zone_blocks)) {
797 ++ ret = -ENODEV;
798 ++ } else if (!is_power_of_2(zone_blocks)) {
799 + if (sdkp->first_scan)
800 + sd_printk(KERN_NOTICE, sdkp,
801 + "Devices with non power of 2 zone "
802 + "size are not supported\n");
803 +- return -ENODEV;
804 +- }
805 +-
806 +- if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
807 ++ ret = -ENODEV;
808 ++ } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
809 + if (sdkp->first_scan)
810 + sd_printk(KERN_NOTICE, sdkp,
811 + "Zone size too large\n");
812 +- return -ENODEV;
813 ++ ret = -ENODEV;
814 ++ } else {
815 ++ ret = zone_blocks;
816 + }
817 +
818 +- sdkp->zone_blocks = zone_blocks;
819 ++out_free:
820 ++ kfree(buf);
821 +
822 +- return 0;
823 ++ return ret;
824 + }
825 +
826 +-static int sd_zbc_setup(struct scsi_disk *sdkp)
827 ++static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
828 + {
829 ++ struct request_queue *q = sdkp->disk->queue;
830 ++ u32 zone_shift = ilog2(zone_blocks);
831 ++ u32 nr_zones;
832 +
833 + /* chunk_sectors indicates the zone size */
834 +- blk_queue_chunk_sectors(sdkp->disk->queue,
835 +- logical_to_sectors(sdkp->device, sdkp->zone_blocks));
836 +- sdkp->zone_shift = ilog2(sdkp->zone_blocks);
837 +- sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
838 +- if (sdkp->capacity & (sdkp->zone_blocks - 1))
839 +- sdkp->nr_zones++;
840 +-
841 +- if (!sdkp->zones_wlock) {
842 +- sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
843 +- sizeof(unsigned long),
844 +- GFP_KERNEL);
845 +- if (!sdkp->zones_wlock)
846 +- return -ENOMEM;
847 ++ blk_queue_chunk_sectors(q,
848 ++ logical_to_sectors(sdkp->device, zone_blocks));
849 ++ nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
850 ++
851 ++ /*
852 ++ * Initialize the disk zone write lock bitmap if the number
853 ++ * of zones changed.
854 ++ */
855 ++ if (nr_zones != sdkp->nr_zones) {
856 ++ unsigned long *zones_wlock = NULL;
857 ++
858 ++ if (nr_zones) {
859 ++ zones_wlock = kcalloc(BITS_TO_LONGS(nr_zones),
860 ++ sizeof(unsigned long),
861 ++ GFP_KERNEL);
862 ++ if (!zones_wlock)
863 ++ return -ENOMEM;
864 ++ }
865 ++
866 ++ blk_mq_freeze_queue(q);
867 ++ sdkp->zone_blocks = zone_blocks;
868 ++ sdkp->zone_shift = zone_shift;
869 ++ sdkp->nr_zones = nr_zones;
870 ++ swap(sdkp->zones_wlock, zones_wlock);
871 ++ blk_mq_unfreeze_queue(q);
872 ++
873 ++ kfree(zones_wlock);
874 ++
875 ++ /* READ16/WRITE16 is mandatory for ZBC disks */
876 ++ sdkp->device->use_16_for_rw = 1;
877 ++ sdkp->device->use_10_for_rw = 0;
878 + }
879 +
880 + return 0;
881 +@@ -552,6 +574,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
882 + int sd_zbc_read_zones(struct scsi_disk *sdkp,
883 + unsigned char *buf)
884 + {
885 ++ int64_t zone_blocks;
886 + int ret;
887 +
888 + if (!sd_is_zoned(sdkp))
889 +@@ -589,19 +612,19 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
890 + * Check zone size: only devices with a constant zone size (except
891 + * an eventual last runt zone) that is a power of 2 are supported.
892 + */
893 +- ret = sd_zbc_check_zone_size(sdkp);
894 +- if (ret)
895 ++ zone_blocks = sd_zbc_check_zone_size(sdkp);
896 ++ ret = -EFBIG;
897 ++ if (zone_blocks != (u32)zone_blocks)
898 ++ goto err;
899 ++ ret = zone_blocks;
900 ++ if (ret < 0)
901 + goto err;
902 +
903 + /* The drive satisfies the kernel restrictions: set it up */
904 +- ret = sd_zbc_setup(sdkp);
905 ++ ret = sd_zbc_setup(sdkp, zone_blocks);
906 + if (ret)
907 + goto err;
908 +
909 +- /* READ16/WRITE16 is mandatory for ZBC disks */
910 +- sdkp->device->use_16_for_rw = 1;
911 +- sdkp->device->use_10_for_rw = 0;
912 +-
913 + return 0;
914 +
915 + err:
916 +@@ -614,6 +637,7 @@ void sd_zbc_remove(struct scsi_disk *sdkp)
917 + {
918 + kfree(sdkp->zones_wlock);
919 + sdkp->zones_wlock = NULL;
920 ++ sdkp->nr_zones = 0;
921 + }
922 +
923 + void sd_zbc_print_zones(struct scsi_disk *sdkp)
924 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
925 +index 8e3ca4400766..50e48afd88ff 100644
926 +--- a/drivers/vhost/vhost.c
927 ++++ b/drivers/vhost/vhost.c
928 +@@ -993,6 +993,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
929 + {
930 + int ret = 0;
931 +
932 ++ mutex_lock(&dev->mutex);
933 + vhost_dev_lock_vqs(dev);
934 + switch (msg->type) {
935 + case VHOST_IOTLB_UPDATE:
936 +@@ -1024,6 +1025,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
937 + }
938 +
939 + vhost_dev_unlock_vqs(dev);
940 ++ mutex_unlock(&dev->mutex);
941 ++
942 + return ret;
943 + }
944 + ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
945 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
946 +index 27d59cf36341..b475d1ebbbbf 100644
947 +--- a/fs/btrfs/disk-io.c
948 ++++ b/fs/btrfs/disk-io.c
949 +@@ -59,7 +59,8 @@
950 + BTRFS_HEADER_FLAG_RELOC |\
951 + BTRFS_SUPER_FLAG_ERROR |\
952 + BTRFS_SUPER_FLAG_SEEDING |\
953 +- BTRFS_SUPER_FLAG_METADUMP)
954 ++ BTRFS_SUPER_FLAG_METADUMP |\
955 ++ BTRFS_SUPER_FLAG_METADUMP_V2)
956 +
957 + static const struct extent_io_ops btree_extent_io_ops;
958 + static void end_workqueue_fn(struct btrfs_work *work);
959 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
960 +index 9596aa93d6ef..a54b8c58ccb7 100644
961 +--- a/include/net/ipv6.h
962 ++++ b/include/net/ipv6.h
963 +@@ -861,6 +861,11 @@ static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
964 + return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
965 + }
966 +
967 ++static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
968 ++{
969 ++ return fl6->flowlabel & IPV6_FLOWLABEL_MASK;
970 ++}
971 ++
972 + /*
973 + * Prototypes exported by ipv6
974 + */
975 +diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
976 +index 8f659bb7badc..7115838fbf2a 100644
977 +--- a/include/uapi/linux/btrfs_tree.h
978 ++++ b/include/uapi/linux/btrfs_tree.h
979 +@@ -456,6 +456,7 @@ struct btrfs_free_space_header {
980 +
981 + #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
982 + #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
983 ++#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
984 +
985 +
986 + /*
987 +diff --git a/mm/mmap.c b/mm/mmap.c
988 +index 11f96fad5271..f858b1f336af 100644
989 +--- a/mm/mmap.c
990 ++++ b/mm/mmap.c
991 +@@ -1315,6 +1315,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
992 + return 0;
993 + }
994 +
995 ++static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
996 ++{
997 ++ if (S_ISREG(inode->i_mode))
998 ++ return MAX_LFS_FILESIZE;
999 ++
1000 ++ if (S_ISBLK(inode->i_mode))
1001 ++ return MAX_LFS_FILESIZE;
1002 ++
1003 ++ /* Special "we do even unsigned file positions" case */
1004 ++ if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1005 ++ return 0;
1006 ++
1007 ++ /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1008 ++ return ULONG_MAX;
1009 ++}
1010 ++
1011 ++static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1012 ++ unsigned long pgoff, unsigned long len)
1013 ++{
1014 ++ u64 maxsize = file_mmap_size_max(file, inode);
1015 ++
1016 ++ if (maxsize && len > maxsize)
1017 ++ return false;
1018 ++ maxsize -= len;
1019 ++ if (pgoff > maxsize >> PAGE_SHIFT)
1020 ++ return false;
1021 ++ return true;
1022 ++}
1023 ++
1024 + /*
1025 + * The caller must hold down_write(&current->mm->mmap_sem).
1026 + */
1027 +@@ -1388,6 +1417,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
1028 + if (file) {
1029 + struct inode *inode = file_inode(file);
1030 +
1031 ++ if (!file_mmap_ok(file, inode, pgoff, len))
1032 ++ return -EOVERFLOW;
1033 ++
1034 + switch (flags & MAP_TYPE) {
1035 + case MAP_SHARED:
1036 + if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1037 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1038 +index f950b80c0dd1..d8796a7874b6 100644
1039 +--- a/net/core/flow_dissector.c
1040 ++++ b/net/core/flow_dissector.c
1041 +@@ -1179,7 +1179,7 @@ __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1042 + keys->ports.src = fl6->fl6_sport;
1043 + keys->ports.dst = fl6->fl6_dport;
1044 + keys->keyid.keyid = fl6->fl6_gre_key;
1045 +- keys->tags.flow_label = (__force u32)fl6->flowlabel;
1046 ++ keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1047 + keys->basic.ip_proto = fl6->flowi6_proto;
1048 +
1049 + return flow_hash_from_keys(keys);
1050 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
1051 +index 927a6dcbad96..8f17724a173c 100644
1052 +--- a/net/core/net-sysfs.c
1053 ++++ b/net/core/net-sysfs.c
1054 +@@ -1207,9 +1207,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1055 + cpumask_var_t mask;
1056 + unsigned long index;
1057 +
1058 +- if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1059 +- return -ENOMEM;
1060 +-
1061 + index = get_netdev_queue_index(queue);
1062 +
1063 + if (dev->num_tc) {
1064 +@@ -1219,6 +1216,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1065 + return -EINVAL;
1066 + }
1067 +
1068 ++ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1069 ++ return -ENOMEM;
1070 ++
1071 + rcu_read_lock();
1072 + dev_maps = rcu_dereference(dev->xps_maps);
1073 + if (dev_maps) {
1074 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1075 +index 5ace48926b19..4cfdad08aca0 100644
1076 +--- a/net/core/rtnetlink.c
1077 ++++ b/net/core/rtnetlink.c
1078 +@@ -1958,6 +1958,10 @@ static int do_setlink(const struct sk_buff *skb,
1079 + const struct net_device_ops *ops = dev->netdev_ops;
1080 + int err;
1081 +
1082 ++ err = validate_linkmsg(dev, tb);
1083 ++ if (err < 0)
1084 ++ return err;
1085 ++
1086 + if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1087 + struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1088 + if (IS_ERR(net)) {
1089 +@@ -2296,10 +2300,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1090 + goto errout;
1091 + }
1092 +
1093 +- err = validate_linkmsg(dev, tb);
1094 +- if (err < 0)
1095 +- goto errout;
1096 +-
1097 + err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
1098 + errout:
1099 + return err;
1100 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
1101 +index ff3b058cf58c..936dab12f99f 100644
1102 +--- a/net/dccp/proto.c
1103 ++++ b/net/dccp/proto.c
1104 +@@ -280,9 +280,7 @@ int dccp_disconnect(struct sock *sk, int flags)
1105 +
1106 + dccp_clear_xmit_timers(sk);
1107 + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
1108 +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
1109 + dp->dccps_hc_rx_ccid = NULL;
1110 +- dp->dccps_hc_tx_ccid = NULL;
1111 +
1112 + __skb_queue_purge(&sk->sk_receive_queue);
1113 + __skb_queue_purge(&sk->sk_write_queue);
1114 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1115 +index d72874150905..df8fd3ce713d 100644
1116 +--- a/net/ipv4/fib_frontend.c
1117 ++++ b/net/ipv4/fib_frontend.c
1118 +@@ -625,6 +625,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
1119 + [RTA_ENCAP] = { .type = NLA_NESTED },
1120 + [RTA_UID] = { .type = NLA_U32 },
1121 + [RTA_MARK] = { .type = NLA_U32 },
1122 ++ [RTA_TABLE] = { .type = NLA_U32 },
1123 + };
1124 +
1125 + static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
1126 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1127 +index f39955913d3f..b557af72cde9 100644
1128 +--- a/net/ipv4/fib_semantics.c
1129 ++++ b/net/ipv4/fib_semantics.c
1130 +@@ -725,6 +725,8 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
1131 + nla_strlcpy(tmp, nla, sizeof(tmp));
1132 + val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1133 + } else {
1134 ++ if (nla_len(nla) != sizeof(u32))
1135 ++ return false;
1136 + val = nla_get_u32(nla);
1137 + }
1138 +
1139 +@@ -1051,6 +1053,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
1140 + if (val == TCP_CA_UNSPEC)
1141 + return -EINVAL;
1142 + } else {
1143 ++ if (nla_len(nla) != sizeof(u32))
1144 ++ return -EINVAL;
1145 + val = nla_get_u32(nla);
1146 + }
1147 + if (type == RTAX_ADVMSS && val > 65535 - 40)
1148 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1149 +index 1e70ed5244ea..d07ba4d5917b 100644
1150 +--- a/net/ipv4/ip_sockglue.c
1151 ++++ b/net/ipv4/ip_sockglue.c
1152 +@@ -511,8 +511,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1153 + int err;
1154 + int copied;
1155 +
1156 +- WARN_ON_ONCE(sk->sk_family == AF_INET6);
1157 +-
1158 + err = -EAGAIN;
1159 + skb = sock_dequeue_err_skb(sk);
1160 + if (!skb)
1161 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1162 +index c9b3e6e069ae..cbd9c0d8a788 100644
1163 +--- a/net/ipv4/ipmr.c
1164 ++++ b/net/ipv4/ipmr.c
1165 +@@ -323,6 +323,7 @@ static const struct rhashtable_params ipmr_rht_params = {
1166 + static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1167 + {
1168 + struct mr_table *mrt;
1169 ++ int err;
1170 +
1171 + /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1172 + if (id != RT_TABLE_DEFAULT && id >= 1000000000)
1173 +@@ -338,7 +339,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1174 + write_pnet(&mrt->net, net);
1175 + mrt->id = id;
1176 +
1177 +- rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
1178 ++ err = rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
1179 ++ if (err) {
1180 ++ kfree(mrt);
1181 ++ return ERR_PTR(err);
1182 ++ }
1183 + INIT_LIST_HEAD(&mrt->mfc_cache_list);
1184 + INIT_LIST_HEAD(&mrt->mfc_unres_queue);
1185 +
1186 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1187 +index 0f2d74885bcb..32fcce711855 100644
1188 +--- a/net/ipv6/ip6_output.c
1189 ++++ b/net/ipv6/ip6_output.c
1190 +@@ -506,7 +506,8 @@ int ip6_forward(struct sk_buff *skb)
1191 + send redirects to source routed frames.
1192 + We don't send redirects to frames decapsulated from IPsec.
1193 + */
1194 +- if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
1195 ++ if (IP6CB(skb)->iif == dst->dev->ifindex &&
1196 ++ opt->srcrt == 0 && !skb_sec_path(skb)) {
1197 + struct in6_addr *target = NULL;
1198 + struct inet_peer *peer;
1199 + struct rt6_info *rt;
1200 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1201 +index 565a0388587a..84ee2eb88121 100644
1202 +--- a/net/ipv6/ip6_tunnel.c
1203 ++++ b/net/ipv6/ip6_tunnel.c
1204 +@@ -1693,8 +1693,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1205 + if (new_mtu < ETH_MIN_MTU)
1206 + return -EINVAL;
1207 + }
1208 +- if (new_mtu > 0xFFF8 - dev->hard_header_len)
1209 +- return -EINVAL;
1210 ++ if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1211 ++ if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1212 ++ return -EINVAL;
1213 ++ } else {
1214 ++ if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1215 ++ return -EINVAL;
1216 ++ }
1217 + dev->mtu = new_mtu;
1218 + return 0;
1219 + }
1220 +@@ -1842,7 +1847,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1221 + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1222 + dev->mtu -= 8;
1223 + dev->min_mtu = ETH_MIN_MTU;
1224 +- dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1225 ++ dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1226 +
1227 + return 0;
1228 +
1229 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1230 +index e1060f28410d..8015e74fd7d9 100644
1231 +--- a/net/ipv6/ip6mr.c
1232 ++++ b/net/ipv6/ip6mr.c
1233 +@@ -1795,7 +1795,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1234 + ret = 0;
1235 + if (!ip6mr_new_table(net, v))
1236 + ret = -ENOMEM;
1237 +- raw6_sk(sk)->ip6mr_table = v;
1238 ++ else
1239 ++ raw6_sk(sk)->ip6mr_table = v;
1240 + rtnl_unlock();
1241 + return ret;
1242 + }
1243 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1244 +index dd28005efb97..d081db125905 100644
1245 +--- a/net/ipv6/ndisc.c
1246 ++++ b/net/ipv6/ndisc.c
1247 +@@ -1568,6 +1568,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1248 + ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
1249 + bool ret;
1250 +
1251 ++ if (netif_is_l3_master(skb->dev)) {
1252 ++ dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
1253 ++ if (!dev)
1254 ++ return;
1255 ++ }
1256 ++
1257 + if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1258 + ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
1259 + dev->name);
1260 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1261 +index 7d50d889ab6e..375b20d5bbd7 100644
1262 +--- a/net/ipv6/route.c
1263 ++++ b/net/ipv6/route.c
1264 +@@ -1250,7 +1250,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1265 + keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1266 + keys->addrs.v6addrs.src = key_iph->saddr;
1267 + keys->addrs.v6addrs.dst = key_iph->daddr;
1268 +- keys->tags.flow_label = ip6_flowinfo(key_iph);
1269 ++ keys->tags.flow_label = ip6_flowlabel(key_iph);
1270 + keys->basic.ip_proto = key_iph->nexthdr;
1271 + }
1272 +
1273 +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
1274 +index 5fe139484919..bf4763fd68c2 100644
1275 +--- a/net/ipv6/seg6_iptunnel.c
1276 ++++ b/net/ipv6/seg6_iptunnel.c
1277 +@@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1278 + hdrlen = (osrh->hdrlen + 1) << 3;
1279 + tot_len = hdrlen + sizeof(*hdr);
1280 +
1281 +- err = skb_cow_head(skb, tot_len);
1282 ++ err = skb_cow_head(skb, tot_len + skb->mac_len);
1283 + if (unlikely(err))
1284 + return err;
1285 +
1286 +@@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
1287 +
1288 + hdrlen = (osrh->hdrlen + 1) << 3;
1289 +
1290 +- err = skb_cow_head(skb, hdrlen);
1291 ++ err = skb_cow_head(skb, hdrlen + skb->mac_len);
1292 + if (unlikely(err))
1293 + return err;
1294 +
1295 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1296 +index ad1e7e6ce009..5d00a38cd1cb 100644
1297 +--- a/net/ipv6/sit.c
1298 ++++ b/net/ipv6/sit.c
1299 +@@ -1360,7 +1360,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1300 + dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1301 + dev->mtu = ETH_DATA_LEN - t_hlen;
1302 + dev->min_mtu = IPV6_MIN_MTU;
1303 +- dev->max_mtu = 0xFFF8 - t_hlen;
1304 ++ dev->max_mtu = IP6_MAX_MTU - t_hlen;
1305 + dev->flags = IFF_NOARP;
1306 + netif_keep_dst(dev);
1307 + dev->addr_len = 4;
1308 +@@ -1572,7 +1572,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1309 + if (tb[IFLA_MTU]) {
1310 + u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1311 +
1312 +- if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
1313 ++ if (mtu >= IPV6_MIN_MTU &&
1314 ++ mtu <= IP6_MAX_MTU - dev->hard_header_len)
1315 + dev->mtu = mtu;
1316 + }
1317 +
1318 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1319 +index 01a4ff3df60b..9bf997404918 100644
1320 +--- a/net/kcm/kcmsock.c
1321 ++++ b/net/kcm/kcmsock.c
1322 +@@ -1672,7 +1672,7 @@ static struct file *kcm_clone(struct socket *osock)
1323 + __module_get(newsock->ops->owner);
1324 +
1325 + newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1326 +- &kcm_proto, true);
1327 ++ &kcm_proto, false);
1328 + if (!newsk) {
1329 + sock_release(newsock);
1330 + return ERR_PTR(-ENOMEM);
1331 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1332 +index 8351faabba62..7806e166669a 100644
1333 +--- a/net/packet/af_packet.c
1334 ++++ b/net/packet/af_packet.c
1335 +@@ -2920,7 +2920,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1336 + if (unlikely(offset < 0))
1337 + goto out_free;
1338 + } else if (reserve) {
1339 +- skb_push(skb, reserve);
1340 ++ skb_reserve(skb, -reserve);
1341 + }
1342 +
1343 + /* Returns -EFAULT on error */
1344 +@@ -4293,7 +4293,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1345 + goto out;
1346 + if (po->tp_version >= TPACKET_V3 &&
1347 + req->tp_block_size <=
1348 +- BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
1349 ++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
1350 + goto out;
1351 + if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1352 + po->tp_reserve))
1353 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1354 +index 7a838d1c1c00..1879665e5a2b 100644
1355 +--- a/net/sched/cls_flower.c
1356 ++++ b/net/sched/cls_flower.c
1357 +@@ -1007,7 +1007,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1358 + return 0;
1359 +
1360 + errout_idr:
1361 +- if (fnew->handle)
1362 ++ if (!fold)
1363 + idr_remove_ext(&head->handle_idr, fnew->handle);
1364 + errout:
1365 + tcf_exts_destroy(&fnew->exts);
1366 +diff --git a/net/sctp/transport.c b/net/sctp/transport.c
1367 +index 7ef77fd7b52a..e0c2a4e23039 100644
1368 +--- a/net/sctp/transport.c
1369 ++++ b/net/sctp/transport.c
1370 +@@ -637,7 +637,7 @@ unsigned long sctp_transport_timeout(struct sctp_transport *trans)
1371 + trans->state != SCTP_PF)
1372 + timeout += trans->hbinterval;
1373 +
1374 +- return timeout;
1375 ++ return max_t(unsigned long, timeout, HZ / 5);
1376 + }
1377 +
1378 + /* Reset transport variables to their initial values */
1379 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
1380 +index 297b079ae4d9..27aac273205b 100644
1381 +--- a/scripts/kconfig/confdata.c
1382 ++++ b/scripts/kconfig/confdata.c
1383 +@@ -745,7 +745,7 @@ int conf_write(const char *name)
1384 + struct menu *menu;
1385 + const char *basename;
1386 + const char *str;
1387 +- char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
1388 ++ char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
1389 + char *env;
1390 +
1391 + dirname[0] = 0;