Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.10 commit in: /
Date: Wed, 03 May 2017 17:46:31
Message-Id: 1493833576.2924718e5b11fe3a7209b755097cba3a3f955839.mpagano@gentoo
1 commit: 2924718e5b11fe3a7209b755097cba3a3f955839
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 3 17:46:16 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 3 17:46:16 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2924718e
7
8 Linux patch 4.10.14
9
10 0000_README | 4 +
11 1013_linux-4.10.14.patch | 2251 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2255 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 0aa6665..5295a7d 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -95,6 +95,10 @@ Patch: 1012_linux-4.10.13.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.10.13
21
22 +Patch: 1013_linux-4.10.14.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.10.14
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1013_linux-4.10.14.patch b/1013_linux-4.10.14.patch
31 new file mode 100644
32 index 0000000..ae4d094
33 --- /dev/null
34 +++ b/1013_linux-4.10.14.patch
35 @@ -0,0 +1,2251 @@
36 +diff --git a/Makefile b/Makefile
37 +index 8285f4de02d1..48756653c42c 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 10
43 +-SUBLEVEL = 13
44 ++SUBLEVEL = 14
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
49 +index b65930a49589..54b54da6384c 100644
50 +--- a/arch/arc/include/asm/atomic.h
51 ++++ b/arch/arc/include/asm/atomic.h
52 +@@ -17,10 +17,11 @@
53 + #include <asm/barrier.h>
54 + #include <asm/smp.h>
55 +
56 ++#define ATOMIC_INIT(i) { (i) }
57 ++
58 + #ifndef CONFIG_ARC_PLAT_EZNPS
59 +
60 + #define atomic_read(v) READ_ONCE((v)->counter)
61 +-#define ATOMIC_INIT(i) { (i) }
62 +
63 + #ifdef CONFIG_ARC_HAS_LLSC
64 +
65 +diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
66 +index 804d2a2a19fe..dd6a18bc10ab 100644
67 +--- a/arch/mips/kernel/cevt-r4k.c
68 ++++ b/arch/mips/kernel/cevt-r4k.c
69 +@@ -80,7 +80,7 @@ static unsigned int calculate_min_delta(void)
70 + }
71 +
72 + /* Sorted insert of 75th percentile into buf2 */
73 +- for (k = 0; k < i; ++k) {
74 ++ for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
75 + if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
76 + l = min_t(unsigned int,
77 + i, ARRAY_SIZE(buf2) - 1);
78 +diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
79 +index 6430bff21fff..5c429d70e17f 100644
80 +--- a/arch/mips/kernel/elf.c
81 ++++ b/arch/mips/kernel/elf.c
82 +@@ -257,7 +257,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
83 + else if ((prog_req.fr1 && prog_req.frdefault) ||
84 + (prog_req.single && !prog_req.frdefault))
85 + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
86 +- state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
87 ++ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
88 + cpu_has_mips_r2_r6) ?
89 + FP_FR1 : FP_FR0;
90 + else if (prog_req.fr1)
91 +diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
92 +index 1f4bd222ba76..eb6c0d582626 100644
93 +--- a/arch/mips/kernel/kgdb.c
94 ++++ b/arch/mips/kernel/kgdb.c
95 +@@ -244,9 +244,6 @@ static int compute_signal(int tt)
96 + void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
97 + {
98 + int reg;
99 +- struct thread_info *ti = task_thread_info(p);
100 +- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
101 +- struct pt_regs *regs = (struct pt_regs *)ksp - 1;
102 + #if (KGDB_GDB_REG_SIZE == 32)
103 + u32 *ptr = (u32 *)gdb_regs;
104 + #else
105 +@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
106 + #endif
107 +
108 + for (reg = 0; reg < 16; reg++)
109 +- *(ptr++) = regs->regs[reg];
110 ++ *(ptr++) = 0;
111 +
112 + /* S0 - S7 */
113 +- for (reg = 16; reg < 24; reg++)
114 +- *(ptr++) = regs->regs[reg];
115 ++ *(ptr++) = p->thread.reg16;
116 ++ *(ptr++) = p->thread.reg17;
117 ++ *(ptr++) = p->thread.reg18;
118 ++ *(ptr++) = p->thread.reg19;
119 ++ *(ptr++) = p->thread.reg20;
120 ++ *(ptr++) = p->thread.reg21;
121 ++ *(ptr++) = p->thread.reg22;
122 ++ *(ptr++) = p->thread.reg23;
123 +
124 + for (reg = 24; reg < 28; reg++)
125 + *(ptr++) = 0;
126 +
127 + /* GP, SP, FP, RA */
128 +- for (reg = 28; reg < 32; reg++)
129 +- *(ptr++) = regs->regs[reg];
130 +-
131 +- *(ptr++) = regs->cp0_status;
132 +- *(ptr++) = regs->lo;
133 +- *(ptr++) = regs->hi;
134 +- *(ptr++) = regs->cp0_badvaddr;
135 +- *(ptr++) = regs->cp0_cause;
136 +- *(ptr++) = regs->cp0_epc;
137 ++ *(ptr++) = (long)p;
138 ++ *(ptr++) = p->thread.reg29;
139 ++ *(ptr++) = p->thread.reg30;
140 ++ *(ptr++) = p->thread.reg31;
141 ++
142 ++ *(ptr++) = p->thread.cp0_status;
143 ++
144 ++ /* lo, hi */
145 ++ *(ptr++) = 0;
146 ++ *(ptr++) = 0;
147 ++
148 ++ /*
149 ++ * BadVAddr, Cause
150 ++ * Ideally these would come from the last exception frame up the stack
151 ++ * but that requires unwinding, otherwise we can't know much for sure.
152 ++ */
153 ++ *(ptr++) = 0;
154 ++ *(ptr++) = 0;
155 ++
156 ++ /*
157 ++ * PC
158 ++ * use return address (RA), i.e. the moment after return from resume()
159 ++ */
160 ++ *(ptr++) = p->thread.reg31;
161 + }
162 +
163 + void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
164 +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
165 +index 314b66851348..f0266cef56e4 100644
166 +--- a/arch/sparc/include/asm/pgtable_64.h
167 ++++ b/arch/sparc/include/asm/pgtable_64.h
168 +@@ -673,26 +673,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
169 + return pte_pfn(pte);
170 + }
171 +
172 +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
173 +-static inline unsigned long pmd_dirty(pmd_t pmd)
174 ++#define __HAVE_ARCH_PMD_WRITE
175 ++static inline unsigned long pmd_write(pmd_t pmd)
176 + {
177 + pte_t pte = __pte(pmd_val(pmd));
178 +
179 +- return pte_dirty(pte);
180 ++ return pte_write(pte);
181 + }
182 +
183 +-static inline unsigned long pmd_young(pmd_t pmd)
184 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
185 ++static inline unsigned long pmd_dirty(pmd_t pmd)
186 + {
187 + pte_t pte = __pte(pmd_val(pmd));
188 +
189 +- return pte_young(pte);
190 ++ return pte_dirty(pte);
191 + }
192 +
193 +-static inline unsigned long pmd_write(pmd_t pmd)
194 ++static inline unsigned long pmd_young(pmd_t pmd)
195 + {
196 + pte_t pte = __pte(pmd_val(pmd));
197 +
198 +- return pte_write(pte);
199 ++ return pte_young(pte);
200 + }
201 +
202 + static inline unsigned long pmd_trans_huge(pmd_t pmd)
203 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
204 +index 5d2f91511c60..47ecac5106d3 100644
205 +--- a/arch/sparc/mm/init_64.c
206 ++++ b/arch/sparc/mm/init_64.c
207 +@@ -1495,7 +1495,7 @@ bool kern_addr_valid(unsigned long addr)
208 + if ((long)addr < 0L) {
209 + unsigned long pa = __pa(addr);
210 +
211 +- if ((addr >> max_phys_bits) != 0UL)
212 ++ if ((pa >> max_phys_bits) != 0UL)
213 + return false;
214 +
215 + return pfn_valid(pa >> PAGE_SHIFT);
216 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
217 +index 8639bb2ae058..6bf09f5594b2 100644
218 +--- a/arch/x86/kernel/ftrace.c
219 ++++ b/arch/x86/kernel/ftrace.c
220 +@@ -983,6 +983,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
221 + unsigned long return_hooker = (unsigned long)
222 + &return_to_handler;
223 +
224 ++ /*
225 ++ * When resuming from suspend-to-ram, this function can be indirectly
226 ++ * called from early CPU startup code while the CPU is in real mode,
227 ++ * which would fail miserably. Make sure the stack pointer is a
228 ++ * virtual address.
229 ++ *
230 ++ * This check isn't as accurate as virt_addr_valid(), but it should be
231 ++ * good enough for this purpose, and it's fast.
232 ++ */
233 ++ if (unlikely((long)__builtin_frame_address(0) >= 0))
234 ++ return;
235 ++
236 + if (unlikely(ftrace_graph_is_dead()))
237 + return;
238 +
239 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
240 +index 27ae2a0ef1b9..ecd075fd5754 100644
241 +--- a/drivers/input/serio/i8042-x86ia64io.h
242 ++++ b/drivers/input/serio/i8042-x86ia64io.h
243 +@@ -613,6 +613,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
244 + DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
245 + },
246 + },
247 ++ {
248 ++ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
249 ++ .matches = {
250 ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
251 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
252 ++ },
253 ++ },
254 + { }
255 + };
256 +
257 +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
258 +index a0dabd4038ba..7ab24c5262f3 100644
259 +--- a/drivers/net/can/usb/gs_usb.c
260 ++++ b/drivers/net/can/usb/gs_usb.c
261 +@@ -740,13 +740,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
262 + static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
263 + {
264 + struct gs_can *dev = netdev_priv(netdev);
265 +- struct gs_identify_mode imode;
266 ++ struct gs_identify_mode *imode;
267 + int rc;
268 +
269 ++ imode = kmalloc(sizeof(*imode), GFP_KERNEL);
270 ++
271 ++ if (!imode)
272 ++ return -ENOMEM;
273 ++
274 + if (do_identify)
275 +- imode.mode = GS_CAN_IDENTIFY_ON;
276 ++ imode->mode = GS_CAN_IDENTIFY_ON;
277 + else
278 +- imode.mode = GS_CAN_IDENTIFY_OFF;
279 ++ imode->mode = GS_CAN_IDENTIFY_OFF;
280 +
281 + rc = usb_control_msg(interface_to_usbdev(dev->iface),
282 + usb_sndctrlpipe(interface_to_usbdev(dev->iface),
283 +@@ -756,10 +761,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
284 + USB_RECIP_INTERFACE,
285 + dev->channel,
286 + 0,
287 +- &imode,
288 +- sizeof(imode),
289 ++ imode,
290 ++ sizeof(*imode),
291 + 100);
292 +
293 ++ kfree(imode);
294 ++
295 + return (rc > 0) ? 0 : rc;
296 + }
297 +
298 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
299 +index 41db47050991..0145765002b3 100644
300 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
301 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
302 +@@ -82,7 +82,7 @@
303 + #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
304 +
305 + #define MLX5_UMR_ALIGN (2048)
306 +-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
307 ++#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
308 +
309 + #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
310 + #define MLX5E_DEFAULT_LRO_TIMEOUT 32
311 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
312 +index f33f72d0237c..32d56cd1b638 100644
313 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
314 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
315 +@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
316 + int idx = 0;
317 + int err = 0;
318 +
319 ++ info->data = MAX_NUM_OF_ETHTOOL_RULES;
320 + while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
321 + err = mlx5e_ethtool_get_flow(priv, info, location);
322 + if (!err)
323 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
324 +index cc718814c378..dc5c594f7c5e 100644
325 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
326 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
327 +@@ -611,7 +611,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
328 +
329 + if (!err && esw->mode == SRIOV_OFFLOADS &&
330 + rep->vport != FDB_UPLINK_VPORT) {
331 +- if (min_inline > esw->offloads.inline_mode) {
332 ++ if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
333 ++ esw->offloads.inline_mode < min_inline) {
334 + netdev_warn(priv->netdev,
335 + "Flow is not offloaded due to min inline setting, required %d actual %d\n",
336 + min_inline, esw->offloads.inline_mode);
337 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
338 +index 7bce2bdbb79b..4d111c129144 100644
339 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
340 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
341 +@@ -908,8 +908,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
342 + struct mlx5_core_dev *dev = devlink_priv(devlink);
343 + struct mlx5_eswitch *esw = dev->priv.eswitch;
344 + int num_vports = esw->enabled_vports;
345 +- int err;
346 +- int vport;
347 ++ int err, vport;
348 + u8 mlx5_mode;
349 +
350 + if (!MLX5_CAP_GEN(dev, vport_group_manager))
351 +@@ -918,9 +917,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
352 + if (esw->mode == SRIOV_NONE)
353 + return -EOPNOTSUPP;
354 +
355 +- if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
356 +- MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
357 ++ switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
358 ++ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
359 ++ if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
360 ++ return 0;
361 ++ /* fall through */
362 ++ case MLX5_CAP_INLINE_MODE_L2:
363 ++ esw_warn(dev, "Inline mode can't be set\n");
364 + return -EOPNOTSUPP;
365 ++ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
366 ++ break;
367 ++ }
368 +
369 + if (esw->offloads.num_flows > 0) {
370 + esw_warn(dev, "Can't set inline mode when flows are configured\n");
371 +@@ -963,18 +970,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
372 + if (esw->mode == SRIOV_NONE)
373 + return -EOPNOTSUPP;
374 +
375 +- if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
376 +- MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
377 +- return -EOPNOTSUPP;
378 +-
379 + return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
380 + }
381 +
382 + int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
383 + {
384 ++ u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
385 + struct mlx5_core_dev *dev = esw->dev;
386 + int vport;
387 +- u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
388 +
389 + if (!MLX5_CAP_GEN(dev, vport_group_manager))
390 + return -EOPNOTSUPP;
391 +@@ -982,10 +985,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
392 + if (esw->mode == SRIOV_NONE)
393 + return -EOPNOTSUPP;
394 +
395 +- if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
396 +- MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
397 +- return -EOPNOTSUPP;
398 ++ switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
399 ++ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
400 ++ mlx5_mode = MLX5_INLINE_MODE_NONE;
401 ++ goto out;
402 ++ case MLX5_CAP_INLINE_MODE_L2:
403 ++ mlx5_mode = MLX5_INLINE_MODE_L2;
404 ++ goto out;
405 ++ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
406 ++ goto query_vports;
407 ++ }
408 +
409 ++query_vports:
410 + for (vport = 1; vport <= nvfs; vport++) {
411 + mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
412 + if (vport > 1 && prev_mlx5_mode != mlx5_mode)
413 +@@ -993,6 +1004,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
414 + prev_mlx5_mode = mlx5_mode;
415 + }
416 +
417 ++out:
418 + *mode = mlx5_mode;
419 + return 0;
420 + }
421 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
422 +index 55957246c0e8..b5d5519542e8 100644
423 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
424 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
425 +@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
426 + struct netdev_notifier_changeupper_info *info)
427 + {
428 + struct net_device *upper = info->upper_dev, *ndev_tmp;
429 +- struct netdev_lag_upper_info *lag_upper_info;
430 ++ struct netdev_lag_upper_info *lag_upper_info = NULL;
431 + bool is_bonded;
432 + int bond_status = 0;
433 + int num_slaves = 0;
434 +@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
435 + if (!netif_is_lag_master(upper))
436 + return 0;
437 +
438 +- lag_upper_info = info->upper_info;
439 ++ if (info->linking)
440 ++ lag_upper_info = info->upper_info;
441 +
442 + /* The event may still be of interest if the slave does not belong to
443 + * us, but is enslaved to a master which has one or more of our netdevs
444 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
445 +index 4aca265d9c14..4ee7ea775a02 100644
446 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
447 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
448 +@@ -1001,7 +1001,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
449 + if (err) {
450 + dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
451 + FW_INIT_TIMEOUT_MILI);
452 +- goto out_err;
453 ++ goto err_cmd_cleanup;
454 + }
455 +
456 + err = mlx5_core_enable_hca(dev, 0);
457 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
458 +index f729a6b43958..1a012b3e0ded 100644
459 +--- a/drivers/net/ethernet/renesas/sh_eth.c
460 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
461 +@@ -1061,12 +1061,70 @@ static struct mdiobb_ops bb_ops = {
462 + .get_mdio_data = sh_get_mdio,
463 + };
464 +
465 ++/* free Tx skb function */
466 ++static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
467 ++{
468 ++ struct sh_eth_private *mdp = netdev_priv(ndev);
469 ++ struct sh_eth_txdesc *txdesc;
470 ++ int free_num = 0;
471 ++ int entry;
472 ++ bool sent;
473 ++
474 ++ for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
475 ++ entry = mdp->dirty_tx % mdp->num_tx_ring;
476 ++ txdesc = &mdp->tx_ring[entry];
477 ++ sent = !(txdesc->status & cpu_to_le32(TD_TACT));
478 ++ if (sent_only && !sent)
479 ++ break;
480 ++ /* TACT bit must be checked before all the following reads */
481 ++ dma_rmb();
482 ++ netif_info(mdp, tx_done, ndev,
483 ++ "tx entry %d status 0x%08x\n",
484 ++ entry, le32_to_cpu(txdesc->status));
485 ++ /* Free the original skb. */
486 ++ if (mdp->tx_skbuff[entry]) {
487 ++ dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
488 ++ le32_to_cpu(txdesc->len) >> 16,
489 ++ DMA_TO_DEVICE);
490 ++ dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
491 ++ mdp->tx_skbuff[entry] = NULL;
492 ++ free_num++;
493 ++ }
494 ++ txdesc->status = cpu_to_le32(TD_TFP);
495 ++ if (entry >= mdp->num_tx_ring - 1)
496 ++ txdesc->status |= cpu_to_le32(TD_TDLE);
497 ++
498 ++ if (sent) {
499 ++ ndev->stats.tx_packets++;
500 ++ ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
501 ++ }
502 ++ }
503 ++ return free_num;
504 ++}
505 ++
506 + /* free skb and descriptor buffer */
507 + static void sh_eth_ring_free(struct net_device *ndev)
508 + {
509 + struct sh_eth_private *mdp = netdev_priv(ndev);
510 + int ringsize, i;
511 +
512 ++ if (mdp->rx_ring) {
513 ++ for (i = 0; i < mdp->num_rx_ring; i++) {
514 ++ if (mdp->rx_skbuff[i]) {
515 ++ struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
516 ++
517 ++ dma_unmap_single(&ndev->dev,
518 ++ le32_to_cpu(rxdesc->addr),
519 ++ ALIGN(mdp->rx_buf_sz, 32),
520 ++ DMA_FROM_DEVICE);
521 ++ }
522 ++ }
523 ++ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
524 ++ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
525 ++ mdp->rx_desc_dma);
526 ++ mdp->rx_ring = NULL;
527 ++ }
528 ++
529 + /* Free Rx skb ringbuffer */
530 + if (mdp->rx_skbuff) {
531 + for (i = 0; i < mdp->num_rx_ring; i++)
532 +@@ -1075,27 +1133,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
533 + kfree(mdp->rx_skbuff);
534 + mdp->rx_skbuff = NULL;
535 +
536 +- /* Free Tx skb ringbuffer */
537 +- if (mdp->tx_skbuff) {
538 +- for (i = 0; i < mdp->num_tx_ring; i++)
539 +- dev_kfree_skb(mdp->tx_skbuff[i]);
540 +- }
541 +- kfree(mdp->tx_skbuff);
542 +- mdp->tx_skbuff = NULL;
543 +-
544 +- if (mdp->rx_ring) {
545 +- ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
546 +- dma_free_coherent(NULL, ringsize, mdp->rx_ring,
547 +- mdp->rx_desc_dma);
548 +- mdp->rx_ring = NULL;
549 +- }
550 +-
551 + if (mdp->tx_ring) {
552 ++ sh_eth_tx_free(ndev, false);
553 ++
554 + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
555 + dma_free_coherent(NULL, ringsize, mdp->tx_ring,
556 + mdp->tx_desc_dma);
557 + mdp->tx_ring = NULL;
558 + }
559 ++
560 ++ /* Free Tx skb ringbuffer */
561 ++ kfree(mdp->tx_skbuff);
562 ++ mdp->tx_skbuff = NULL;
563 + }
564 +
565 + /* format skb and descriptor buffer */
566 +@@ -1343,43 +1392,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
567 + update_mac_address(ndev);
568 + }
569 +
570 +-/* free Tx skb function */
571 +-static int sh_eth_txfree(struct net_device *ndev)
572 +-{
573 +- struct sh_eth_private *mdp = netdev_priv(ndev);
574 +- struct sh_eth_txdesc *txdesc;
575 +- int free_num = 0;
576 +- int entry;
577 +-
578 +- for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
579 +- entry = mdp->dirty_tx % mdp->num_tx_ring;
580 +- txdesc = &mdp->tx_ring[entry];
581 +- if (txdesc->status & cpu_to_le32(TD_TACT))
582 +- break;
583 +- /* TACT bit must be checked before all the following reads */
584 +- dma_rmb();
585 +- netif_info(mdp, tx_done, ndev,
586 +- "tx entry %d status 0x%08x\n",
587 +- entry, le32_to_cpu(txdesc->status));
588 +- /* Free the original skb. */
589 +- if (mdp->tx_skbuff[entry]) {
590 +- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
591 +- le32_to_cpu(txdesc->len) >> 16,
592 +- DMA_TO_DEVICE);
593 +- dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
594 +- mdp->tx_skbuff[entry] = NULL;
595 +- free_num++;
596 +- }
597 +- txdesc->status = cpu_to_le32(TD_TFP);
598 +- if (entry >= mdp->num_tx_ring - 1)
599 +- txdesc->status |= cpu_to_le32(TD_TDLE);
600 +-
601 +- ndev->stats.tx_packets++;
602 +- ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
603 +- }
604 +- return free_num;
605 +-}
606 +-
607 + /* Packet receive function */
608 + static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
609 + {
610 +@@ -1622,7 +1634,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
611 + intr_status, mdp->cur_tx, mdp->dirty_tx,
612 + (u32)ndev->state, edtrr);
613 + /* dirty buffer free */
614 +- sh_eth_txfree(ndev);
615 ++ sh_eth_tx_free(ndev, true);
616 +
617 + /* SH7712 BUG */
618 + if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
619 +@@ -1681,7 +1693,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
620 + /* Clear Tx interrupts */
621 + sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
622 +
623 +- sh_eth_txfree(ndev);
624 ++ sh_eth_tx_free(ndev, true);
625 + netif_wake_queue(ndev);
626 + }
627 +
628 +@@ -2309,7 +2321,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
629 +
630 + spin_lock_irqsave(&mdp->lock, flags);
631 + if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
632 +- if (!sh_eth_txfree(ndev)) {
633 ++ if (!sh_eth_tx_free(ndev, true)) {
634 + netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
635 + netif_stop_queue(ndev);
636 + spin_unlock_irqrestore(&mdp->lock, flags);
637 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
638 +index f83cf6696820..8420069594b3 100644
639 +--- a/drivers/net/macsec.c
640 ++++ b/drivers/net/macsec.c
641 +@@ -2713,7 +2713,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
642 + }
643 +
644 + #define MACSEC_FEATURES \
645 +- (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
646 ++ (NETIF_F_SG | NETIF_F_HIGHDMA)
647 + static struct lock_class_key macsec_netdev_addr_lock_key;
648 +
649 + static int macsec_dev_init(struct net_device *dev)
650 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
651 +index 20b3fdf282c5..7d49a36d6020 100644
652 +--- a/drivers/net/macvlan.c
653 ++++ b/drivers/net/macvlan.c
654 +@@ -1140,6 +1140,7 @@ static int macvlan_port_create(struct net_device *dev)
655 + static void macvlan_port_destroy(struct net_device *dev)
656 + {
657 + struct macvlan_port *port = macvlan_port_get_rtnl(dev);
658 ++ struct sk_buff *skb;
659 +
660 + dev->priv_flags &= ~IFF_MACVLAN_PORT;
661 + netdev_rx_handler_unregister(dev);
662 +@@ -1148,7 +1149,15 @@ static void macvlan_port_destroy(struct net_device *dev)
663 + * but we need to cancel it and purge left skbs if any.
664 + */
665 + cancel_work_sync(&port->bc_work);
666 +- __skb_queue_purge(&port->bc_queue);
667 ++
668 ++ while ((skb = __skb_dequeue(&port->bc_queue))) {
669 ++ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
670 ++
671 ++ if (src)
672 ++ dev_put(src->dev);
673 ++
674 ++ kfree_skb(skb);
675 ++ }
676 +
677 + kfree(port);
678 + }
679 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
680 +index e2460a57e4b1..ed0d10f54f26 100644
681 +--- a/drivers/net/phy/dp83640.c
682 ++++ b/drivers/net/phy/dp83640.c
683 +@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
684 + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
685 + skb_queue_tail(&dp83640->rx_queue, skb);
686 + schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
687 +- } else {
688 +- netif_rx_ni(skb);
689 + }
690 +
691 + return true;
692 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
693 +index 7cc1b7dcfe05..b41a32b26be7 100644
694 +--- a/drivers/net/phy/phy.c
695 ++++ b/drivers/net/phy/phy.c
696 +@@ -591,16 +591,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
697 + EXPORT_SYMBOL(phy_mii_ioctl);
698 +
699 + /**
700 +- * phy_start_aneg - start auto-negotiation for this PHY device
701 ++ * phy_start_aneg_priv - start auto-negotiation for this PHY device
702 + * @phydev: the phy_device struct
703 ++ * @sync: indicate whether we should wait for the workqueue cancelation
704 + *
705 + * Description: Sanitizes the settings (if we're not autonegotiating
706 + * them), and then calls the driver's config_aneg function.
707 + * If the PHYCONTROL Layer is operating, we change the state to
708 + * reflect the beginning of Auto-negotiation or forcing.
709 + */
710 +-int phy_start_aneg(struct phy_device *phydev)
711 ++static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
712 + {
713 ++ bool trigger = 0;
714 + int err;
715 +
716 + mutex_lock(&phydev->lock);
717 +@@ -625,10 +627,40 @@ int phy_start_aneg(struct phy_device *phydev)
718 + }
719 + }
720 +
721 ++ /* Re-schedule a PHY state machine to check PHY status because
722 ++ * negotiation may already be done and aneg interrupt may not be
723 ++ * generated.
724 ++ */
725 ++ if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
726 ++ err = phy_aneg_done(phydev);
727 ++ if (err > 0) {
728 ++ trigger = true;
729 ++ err = 0;
730 ++ }
731 ++ }
732 ++
733 + out_unlock:
734 + mutex_unlock(&phydev->lock);
735 ++
736 ++ if (trigger)
737 ++ phy_trigger_machine(phydev, sync);
738 ++
739 + return err;
740 + }
741 ++
742 ++/**
743 ++ * phy_start_aneg - start auto-negotiation for this PHY device
744 ++ * @phydev: the phy_device struct
745 ++ *
746 ++ * Description: Sanitizes the settings (if we're not autonegotiating
747 ++ * them), and then calls the driver's config_aneg function.
748 ++ * If the PHYCONTROL Layer is operating, we change the state to
749 ++ * reflect the beginning of Auto-negotiation or forcing.
750 ++ */
751 ++int phy_start_aneg(struct phy_device *phydev)
752 ++{
753 ++ return phy_start_aneg_priv(phydev, true);
754 ++}
755 + EXPORT_SYMBOL(phy_start_aneg);
756 +
757 + /**
758 +@@ -656,7 +688,7 @@ void phy_start_machine(struct phy_device *phydev)
759 + * state machine runs.
760 + */
761 +
762 +-static void phy_trigger_machine(struct phy_device *phydev, bool sync)
763 ++void phy_trigger_machine(struct phy_device *phydev, bool sync)
764 + {
765 + if (sync)
766 + cancel_delayed_work_sync(&phydev->state_queue);
767 +@@ -678,7 +710,7 @@ void phy_stop_machine(struct phy_device *phydev)
768 + cancel_delayed_work_sync(&phydev->state_queue);
769 +
770 + mutex_lock(&phydev->lock);
771 +- if (phydev->state > PHY_UP)
772 ++ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
773 + phydev->state = PHY_UP;
774 + mutex_unlock(&phydev->lock);
775 + }
776 +@@ -1151,7 +1183,7 @@ void phy_state_machine(struct work_struct *work)
777 + mutex_unlock(&phydev->lock);
778 +
779 + if (needs_aneg)
780 +- err = phy_start_aneg(phydev);
781 ++ err = phy_start_aneg_priv(phydev, false);
782 + else if (do_suspend)
783 + phy_suspend(phydev);
784 +
785 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
786 +index 921fef275ea4..f2fd52e71a5e 100644
787 +--- a/drivers/net/vrf.c
788 ++++ b/drivers/net/vrf.c
789 +@@ -1126,7 +1126,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
790 + goto nla_put_failure;
791 +
792 + /* rule only needs to appear once */
793 +- nlh->nlmsg_flags &= NLM_F_EXCL;
794 ++ nlh->nlmsg_flags |= NLM_F_EXCL;
795 +
796 + frh = nlmsg_data(nlh);
797 + memset(frh, 0, sizeof(*frh));
798 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
799 +index d438430c49a2..dba671d88377 100644
800 +--- a/drivers/scsi/scsi_lib.c
801 ++++ b/drivers/scsi/scsi_lib.c
802 +@@ -1038,10 +1038,10 @@ int scsi_init_io(struct scsi_cmnd *cmd)
803 + struct scsi_device *sdev = cmd->device;
804 + struct request *rq = cmd->request;
805 + bool is_mq = (rq->mq_ctx != NULL);
806 +- int error;
807 ++ int error = BLKPREP_KILL;
808 +
809 + if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
810 +- return -EINVAL;
811 ++ goto err_exit;
812 +
813 + error = scsi_init_sgtable(rq, &cmd->sdb);
814 + if (error)
815 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
816 +index 5e659d054b40..4299348c880a 100644
817 +--- a/fs/ceph/inode.c
818 ++++ b/fs/ceph/inode.c
819 +@@ -2069,11 +2069,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
820 + if (inode_dirty_flags)
821 + __mark_inode_dirty(inode, inode_dirty_flags);
822 +
823 +- if (ia_valid & ATTR_MODE) {
824 +- err = posix_acl_chmod(inode, attr->ia_mode);
825 +- if (err)
826 +- goto out_put;
827 +- }
828 +
829 + if (mask) {
830 + req->r_inode = inode;
831 +@@ -2087,13 +2082,11 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
832 + ceph_cap_string(dirtied), mask);
833 +
834 + ceph_mdsc_put_request(req);
835 +- if (mask & CEPH_SETATTR_SIZE)
836 +- __ceph_do_pending_vmtruncate(inode);
837 +- ceph_free_cap_flush(prealloc_cf);
838 +- return err;
839 +-out_put:
840 +- ceph_mdsc_put_request(req);
841 + ceph_free_cap_flush(prealloc_cf);
842 ++
843 ++ if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
844 ++ __ceph_do_pending_vmtruncate(inode);
845 ++
846 + return err;
847 + }
848 +
849 +@@ -2112,7 +2105,12 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
850 + if (err != 0)
851 + return err;
852 +
853 +- return __ceph_setattr(inode, attr);
854 ++ err = __ceph_setattr(inode, attr);
855 ++
856 ++ if (err >= 0 && (attr->ia_valid & ATTR_MODE))
857 ++ err = posix_acl_chmod(inode, attr->ia_mode);
858 ++
859 ++ return err;
860 + }
861 +
862 + /*
863 +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
864 +index dba2ff8eaa68..452334694a5d 100644
865 +--- a/fs/nfsd/nfs3xdr.c
866 ++++ b/fs/nfsd/nfs3xdr.c
867 +@@ -358,6 +358,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
868 + {
869 + unsigned int len, v, hdr, dlen;
870 + u32 max_blocksize = svc_max_payload(rqstp);
871 ++ struct kvec *head = rqstp->rq_arg.head;
872 ++ struct kvec *tail = rqstp->rq_arg.tail;
873 +
874 + p = decode_fh(p, &args->fh);
875 + if (!p)
876 +@@ -367,6 +369,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
877 + args->count = ntohl(*p++);
878 + args->stable = ntohl(*p++);
879 + len = args->len = ntohl(*p++);
880 ++ if ((void *)p > head->iov_base + head->iov_len)
881 ++ return 0;
882 + /*
883 + * The count must equal the amount of data passed.
884 + */
885 +@@ -377,9 +381,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
886 + * Check to make sure that we got the right number of
887 + * bytes.
888 + */
889 +- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
890 +- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
891 +- + rqstp->rq_arg.tail[0].iov_len - hdr;
892 ++ hdr = (void*)p - head->iov_base;
893 ++ dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
894 + /*
895 + * Round the length of the data which was specified up to
896 + * the next multiple of XDR units and then compare that
897 +@@ -396,7 +399,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
898 + len = args->len = max_blocksize;
899 + }
900 + rqstp->rq_vec[0].iov_base = (void*)p;
901 +- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
902 ++ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
903 + v = 0;
904 + while (len > rqstp->rq_vec[v].iov_len) {
905 + len -= rqstp->rq_vec[v].iov_len;
906 +@@ -471,6 +474,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
907 + /* first copy and check from the first page */
908 + old = (char*)p;
909 + vec = &rqstp->rq_arg.head[0];
910 ++ if ((void *)old > vec->iov_base + vec->iov_len)
911 ++ return 0;
912 + avail = vec->iov_len - (old - (char*)vec->iov_base);
913 + while (len && avail && *old) {
914 + *new++ = *old++;
915 +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
916 +index e6bfd96734c0..15497cbbc563 100644
917 +--- a/fs/nfsd/nfssvc.c
918 ++++ b/fs/nfsd/nfssvc.c
919 +@@ -733,6 +733,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
920 + return nfserr;
921 + }
922 +
923 ++/*
924 ++ * A write procedure can have a large argument, and a read procedure can
925 ++ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
926 ++ * reply that can both be larger than a page. The xdr code has taken
927 ++ * advantage of this assumption to be a sloppy about bounds checking in
928 ++ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
929 ++ * problem, we enforce these assumptions here:
930 ++ */
931 ++static bool nfs_request_too_big(struct svc_rqst *rqstp,
932 ++ struct svc_procedure *proc)
933 ++{
934 ++ /*
935 ++ * The ACL code has more careful bounds-checking and is not
936 ++ * susceptible to this problem:
937 ++ */
938 ++ if (rqstp->rq_prog != NFS_PROGRAM)
939 ++ return false;
940 ++ /*
941 ++ * Ditto NFSv4 (which can in theory have argument and reply both
942 ++ * more than a page):
943 ++ */
944 ++ if (rqstp->rq_vers >= 4)
945 ++ return false;
946 ++ /* The reply will be small, we're OK: */
947 ++ if (proc->pc_xdrressize > 0 &&
948 ++ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
949 ++ return false;
950 ++
951 ++ return rqstp->rq_arg.len > PAGE_SIZE;
952 ++}
953 ++
954 + int
955 + nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
956 + {
957 +@@ -745,6 +776,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
958 + rqstp->rq_vers, rqstp->rq_proc);
959 + proc = rqstp->rq_procinfo;
960 +
961 ++ if (nfs_request_too_big(rqstp, proc)) {
962 ++ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
963 ++ *statp = rpc_garbage_args;
964 ++ return 1;
965 ++ }
966 + /*
967 + * Give the xdr decoder a chance to change this if it wants
968 + * (necessary in the NFSv4.0 compound case)
969 +diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
970 +index 41b468a6a90f..de07ff625777 100644
971 +--- a/fs/nfsd/nfsxdr.c
972 ++++ b/fs/nfsd/nfsxdr.c
973 +@@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
974 + struct nfsd_writeargs *args)
975 + {
976 + unsigned int len, hdr, dlen;
977 ++ struct kvec *head = rqstp->rq_arg.head;
978 + int v;
979 +
980 + p = decode_fh(p, &args->fh);
981 +@@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
982 + * Check to make sure that we got the right number of
983 + * bytes.
984 + */
985 +- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
986 +- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
987 +- - hdr;
988 ++ hdr = (void*)p - head->iov_base;
989 ++ if (hdr > head->iov_len)
990 ++ return 0;
991 ++ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
992 +
993 + /*
994 + * Round the length of the data which was specified up to
995 +@@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
996 + return 0;
997 +
998 + rqstp->rq_vec[0].iov_base = (void*)p;
999 +- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
1000 ++ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
1001 + v = 0;
1002 + while (len > rqstp->rq_vec[v].iov_len) {
1003 + len -= rqstp->rq_vec[v].iov_len;
1004 +diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
1005 +index 9ca23fcfb5d7..6fdfc884fdeb 100644
1006 +--- a/include/linux/errqueue.h
1007 ++++ b/include/linux/errqueue.h
1008 +@@ -20,6 +20,8 @@ struct sock_exterr_skb {
1009 + struct sock_extended_err ee;
1010 + u16 addr_offset;
1011 + __be16 port;
1012 ++ u8 opt_stats:1,
1013 ++ unused:7;
1014 + };
1015 +
1016 + #endif
1017 +diff --git a/include/linux/phy.h b/include/linux/phy.h
1018 +index 7fc1105605bf..b19ae667c9c4 100644
1019 +--- a/include/linux/phy.h
1020 ++++ b/include/linux/phy.h
1021 +@@ -840,6 +840,7 @@ void phy_change_work(struct work_struct *work);
1022 + void phy_mac_interrupt(struct phy_device *phydev, int new_link);
1023 + void phy_start_machine(struct phy_device *phydev);
1024 + void phy_stop_machine(struct phy_device *phydev);
1025 ++void phy_trigger_machine(struct phy_device *phydev, bool sync);
1026 + int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
1027 + int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
1028 + int phy_ethtool_ksettings_get(struct phy_device *phydev,
1029 +diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
1030 +index f6598d1c886e..316e838b7470 100644
1031 +--- a/include/uapi/linux/ipv6_route.h
1032 ++++ b/include/uapi/linux/ipv6_route.h
1033 +@@ -34,7 +34,7 @@
1034 + #define RTF_PREF(pref) ((pref) << 27)
1035 + #define RTF_PREF_MASK 0x18000000
1036 +
1037 +-#define RTF_PCPU 0x40000000
1038 ++#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
1039 + #define RTF_LOCAL 0x80000000
1040 +
1041 +
1042 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1043 +index cdc43b899f28..f3c938ba87a2 100644
1044 +--- a/kernel/bpf/verifier.c
1045 ++++ b/kernel/bpf/verifier.c
1046 +@@ -1859,14 +1859,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
1047 +
1048 + for (i = 0; i < MAX_BPF_REG; i++)
1049 + if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
1050 +- regs[i].range = dst_reg->off;
1051 ++ /* keep the maximum range already checked */
1052 ++ regs[i].range = max(regs[i].range, dst_reg->off);
1053 +
1054 + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1055 + if (state->stack_slot_type[i] != STACK_SPILL)
1056 + continue;
1057 + reg = &state->spilled_regs[i / BPF_REG_SIZE];
1058 + if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
1059 +- reg->range = dst_reg->off;
1060 ++ reg->range = max(reg->range, dst_reg->off);
1061 + }
1062 + }
1063 +
1064 +diff --git a/kernel/cpu.c b/kernel/cpu.c
1065 +index 0a5f630f5c54..f90ef82076a9 100644
1066 +--- a/kernel/cpu.c
1067 ++++ b/kernel/cpu.c
1068 +@@ -1333,26 +1333,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1069 + struct cpuhp_step *sp;
1070 + int ret = 0;
1071 +
1072 +- mutex_lock(&cpuhp_state_mutex);
1073 +-
1074 + if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
1075 + ret = cpuhp_reserve_state(state);
1076 + if (ret < 0)
1077 +- goto out;
1078 ++ return ret;
1079 + state = ret;
1080 + }
1081 + sp = cpuhp_get_step(state);
1082 +- if (name && sp->name) {
1083 +- ret = -EBUSY;
1084 +- goto out;
1085 +- }
1086 ++ if (name && sp->name)
1087 ++ return -EBUSY;
1088 ++
1089 + sp->startup.single = startup;
1090 + sp->teardown.single = teardown;
1091 + sp->name = name;
1092 + sp->multi_instance = multi_instance;
1093 + INIT_HLIST_HEAD(&sp->list);
1094 +-out:
1095 +- mutex_unlock(&cpuhp_state_mutex);
1096 + return ret;
1097 + }
1098 +
1099 +@@ -1426,6 +1421,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1100 + return -EINVAL;
1101 +
1102 + get_online_cpus();
1103 ++ mutex_lock(&cpuhp_state_mutex);
1104 +
1105 + if (!invoke || !sp->startup.multi)
1106 + goto add_node;
1107 +@@ -1445,16 +1441,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1108 + if (ret) {
1109 + if (sp->teardown.multi)
1110 + cpuhp_rollback_install(cpu, state, node);
1111 +- goto err;
1112 ++ goto unlock;
1113 + }
1114 + }
1115 + add_node:
1116 + ret = 0;
1117 +- mutex_lock(&cpuhp_state_mutex);
1118 + hlist_add_head(node, &sp->list);
1119 ++unlock:
1120 + mutex_unlock(&cpuhp_state_mutex);
1121 +-
1122 +-err:
1123 + put_online_cpus();
1124 + return ret;
1125 + }
1126 +@@ -1489,6 +1483,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1127 + return -EINVAL;
1128 +
1129 + get_online_cpus();
1130 ++ mutex_lock(&cpuhp_state_mutex);
1131 +
1132 + ret = cpuhp_store_callbacks(state, name, startup, teardown,
1133 + multi_instance);
1134 +@@ -1522,6 +1517,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1135 + }
1136 + }
1137 + out:
1138 ++ mutex_unlock(&cpuhp_state_mutex);
1139 + put_online_cpus();
1140 + /*
1141 + * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1142 +@@ -1545,6 +1541,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
1143 + return -EINVAL;
1144 +
1145 + get_online_cpus();
1146 ++ mutex_lock(&cpuhp_state_mutex);
1147 ++
1148 + if (!invoke || !cpuhp_get_teardown_cb(state))
1149 + goto remove;
1150 + /*
1151 +@@ -1561,7 +1559,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
1152 + }
1153 +
1154 + remove:
1155 +- mutex_lock(&cpuhp_state_mutex);
1156 + hlist_del(node);
1157 + mutex_unlock(&cpuhp_state_mutex);
1158 + put_online_cpus();
1159 +@@ -1569,6 +1566,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
1160 + return 0;
1161 + }
1162 + EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1163 ++
1164 + /**
1165 + * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1166 + * @state: The state to remove
1167 +@@ -1587,6 +1585,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1168 +
1169 + get_online_cpus();
1170 +
1171 ++ mutex_lock(&cpuhp_state_mutex);
1172 + if (sp->multi_instance) {
1173 + WARN(!hlist_empty(&sp->list),
1174 + "Error: Removing state %d which has instances left.\n",
1175 +@@ -1611,6 +1610,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1176 + }
1177 + remove:
1178 + cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1179 ++ mutex_unlock(&cpuhp_state_mutex);
1180 + put_online_cpus();
1181 + }
1182 + EXPORT_SYMBOL(__cpuhp_remove_state);
1183 +diff --git a/net/9p/client.c b/net/9p/client.c
1184 +index 3fc94a49ccd5..cf129fec7329 100644
1185 +--- a/net/9p/client.c
1186 ++++ b/net/9p/client.c
1187 +@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1188 + trace_9p_protocol_dump(clnt, req->rc);
1189 + goto free_and_error;
1190 + }
1191 ++ if (rsize < count) {
1192 ++ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
1193 ++ count = rsize;
1194 ++ }
1195 +
1196 + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
1197 +
1198 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1199 +index e7c12caa20c8..4526cbd7e28a 100644
1200 +--- a/net/core/neighbour.c
1201 ++++ b/net/core/neighbour.c
1202 +@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
1203 + if (skb)
1204 + skb = skb_clone(skb, GFP_ATOMIC);
1205 + write_unlock(&neigh->lock);
1206 +- neigh->ops->solicit(neigh, skb);
1207 ++ if (neigh->ops->solicit)
1208 ++ neigh->ops->solicit(neigh, skb);
1209 + atomic_inc(&neigh->probes);
1210 + kfree_skb(skb);
1211 + }
1212 +diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1213 +index 9424673009c1..29be2466970c 100644
1214 +--- a/net/core/netpoll.c
1215 ++++ b/net/core/netpoll.c
1216 +@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
1217 + while ((skb = skb_dequeue(&npinfo->txq))) {
1218 + struct net_device *dev = skb->dev;
1219 + struct netdev_queue *txq;
1220 ++ unsigned int q_index;
1221 +
1222 + if (!netif_device_present(dev) || !netif_running(dev)) {
1223 + kfree_skb(skb);
1224 + continue;
1225 + }
1226 +
1227 +- txq = skb_get_tx_queue(dev, skb);
1228 +-
1229 + local_irq_save(flags);
1230 ++ /* check if skb->queue_mapping is still valid */
1231 ++ q_index = skb_get_queue_mapping(skb);
1232 ++ if (unlikely(q_index >= dev->real_num_tx_queues)) {
1233 ++ q_index = q_index % dev->real_num_tx_queues;
1234 ++ skb_set_queue_mapping(skb, q_index);
1235 ++ }
1236 ++ txq = netdev_get_tx_queue(dev, q_index);
1237 + HARD_TX_LOCK(dev, txq, smp_processor_id());
1238 + if (netif_xmit_frozen_or_stopped(txq) ||
1239 + netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
1240 +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
1241 +index 88a8e429fc3e..0fd421713775 100644
1242 +--- a/net/core/secure_seq.c
1243 ++++ b/net/core/secure_seq.c
1244 +@@ -16,9 +16,11 @@
1245 + #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
1246 +
1247 + static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
1248 ++static u32 ts_secret[2];
1249 +
1250 + static __always_inline void net_secret_init(void)
1251 + {
1252 ++ net_get_random_once(ts_secret, sizeof(ts_secret));
1253 + net_get_random_once(net_secret, sizeof(net_secret));
1254 + }
1255 + #endif
1256 +@@ -41,6 +43,21 @@ static u32 seq_scale(u32 seq)
1257 + #endif
1258 +
1259 + #if IS_ENABLED(CONFIG_IPV6)
1260 ++static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
1261 ++{
1262 ++ u32 hash[4 + 4 + 1];
1263 ++
1264 ++ if (sysctl_tcp_timestamps != 1)
1265 ++ return 0;
1266 ++
1267 ++ memcpy(hash, saddr, 16);
1268 ++ memcpy(hash + 4, daddr, 16);
1269 ++
1270 ++ hash[8] = ts_secret[0];
1271 ++
1272 ++ return jhash2(hash, ARRAY_SIZE(hash), ts_secret[1]);
1273 ++}
1274 ++
1275 + u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
1276 + __be16 sport, __be16 dport, u32 *tsoff)
1277 + {
1278 +@@ -59,7 +76,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
1279 +
1280 + md5_transform(hash, secret);
1281 +
1282 +- *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0;
1283 ++ *tsoff = secure_tcpv6_ts_off(saddr, daddr);
1284 + return seq_scale(hash[0]);
1285 + }
1286 + EXPORT_SYMBOL(secure_tcpv6_sequence_number);
1287 +@@ -87,6 +104,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
1288 + #endif
1289 +
1290 + #ifdef CONFIG_INET
1291 ++static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
1292 ++{
1293 ++ if (sysctl_tcp_timestamps != 1)
1294 ++ return 0;
1295 ++
1296 ++ return jhash_3words((__force u32)saddr, (__force u32)daddr,
1297 ++ ts_secret[0], ts_secret[1]);
1298 ++}
1299 +
1300 + u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1301 + __be16 sport, __be16 dport, u32 *tsoff)
1302 +@@ -101,7 +126,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1303 +
1304 + md5_transform(hash, net_secret);
1305 +
1306 +- *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0;
1307 ++ *tsoff = secure_tcp_ts_off(saddr, daddr);
1308 + return seq_scale(hash[0]);
1309 + }
1310 +
1311 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1312 +index aa3a13378c90..887995e6df9a 100644
1313 +--- a/net/core/skbuff.c
1314 ++++ b/net/core/skbuff.c
1315 +@@ -3078,22 +3078,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
1316 + if (sg && csum && (mss != GSO_BY_FRAGS)) {
1317 + if (!(features & NETIF_F_GSO_PARTIAL)) {
1318 + struct sk_buff *iter;
1319 ++ unsigned int frag_len;
1320 +
1321 + if (!list_skb ||
1322 + !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
1323 + goto normal;
1324 +
1325 +- /* Split the buffer at the frag_list pointer.
1326 +- * This is based on the assumption that all
1327 +- * buffers in the chain excluding the last
1328 +- * containing the same amount of data.
1329 ++ /* If we get here then all the required
1330 ++ * GSO features except frag_list are supported.
1331 ++ * Try to split the SKB to multiple GSO SKBs
1332 ++ * with no frag_list.
1333 ++ * Currently we can do that only when the buffers don't
1334 ++ * have a linear part and all the buffers except
1335 ++ * the last are of the same length.
1336 + */
1337 ++ frag_len = list_skb->len;
1338 + skb_walk_frags(head_skb, iter) {
1339 ++ if (frag_len != iter->len && iter->next)
1340 ++ goto normal;
1341 + if (skb_headlen(iter))
1342 + goto normal;
1343 +
1344 + len -= iter->len;
1345 + }
1346 ++
1347 ++ if (len != frag_len)
1348 ++ goto normal;
1349 + }
1350 +
1351 + /* GSO partial only requires that we trim off any excess that
1352 +@@ -3690,6 +3700,15 @@ static void sock_rmem_free(struct sk_buff *skb)
1353 + atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1354 + }
1355 +
1356 ++static void skb_set_err_queue(struct sk_buff *skb)
1357 ++{
1358 ++ /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
1359 ++ * So, it is safe to (mis)use it to mark skbs on the error queue.
1360 ++ */
1361 ++ skb->pkt_type = PACKET_OUTGOING;
1362 ++ BUILD_BUG_ON(PACKET_OUTGOING == 0);
1363 ++}
1364 ++
1365 + /*
1366 + * Note: We dont mem charge error packets (no sk_forward_alloc changes)
1367 + */
1368 +@@ -3703,6 +3722,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1369 + skb->sk = sk;
1370 + skb->destructor = sock_rmem_free;
1371 + atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1372 ++ skb_set_err_queue(skb);
1373 +
1374 + /* before exiting rcu section, make sure dst is refcounted */
1375 + skb_dst_force(skb);
1376 +@@ -3779,16 +3799,21 @@ EXPORT_SYMBOL(skb_clone_sk);
1377 +
1378 + static void __skb_complete_tx_timestamp(struct sk_buff *skb,
1379 + struct sock *sk,
1380 +- int tstype)
1381 ++ int tstype,
1382 ++ bool opt_stats)
1383 + {
1384 + struct sock_exterr_skb *serr;
1385 + int err;
1386 +
1387 ++ BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
1388 ++
1389 + serr = SKB_EXT_ERR(skb);
1390 + memset(serr, 0, sizeof(*serr));
1391 + serr->ee.ee_errno = ENOMSG;
1392 + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
1393 + serr->ee.ee_info = tstype;
1394 ++ serr->opt_stats = opt_stats;
1395 ++ serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
1396 + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
1397 + serr->ee.ee_data = skb_shinfo(skb)->tskey;
1398 + if (sk->sk_protocol == IPPROTO_TCP &&
1399 +@@ -3829,7 +3854,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
1400 + */
1401 + if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
1402 + *skb_hwtstamps(skb) = *hwtstamps;
1403 +- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
1404 ++ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
1405 + sock_put(sk);
1406 + }
1407 + }
1408 +@@ -3840,7 +3865,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
1409 + struct sock *sk, int tstype)
1410 + {
1411 + struct sk_buff *skb;
1412 +- bool tsonly;
1413 ++ bool tsonly, opt_stats = false;
1414 +
1415 + if (!sk)
1416 + return;
1417 +@@ -3853,9 +3878,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
1418 + #ifdef CONFIG_INET
1419 + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
1420 + sk->sk_protocol == IPPROTO_TCP &&
1421 +- sk->sk_type == SOCK_STREAM)
1422 ++ sk->sk_type == SOCK_STREAM) {
1423 + skb = tcp_get_timestamping_opt_stats(sk);
1424 +- else
1425 ++ opt_stats = true;
1426 ++ } else
1427 + #endif
1428 + skb = alloc_skb(0, GFP_ATOMIC);
1429 + } else {
1430 +@@ -3874,7 +3900,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
1431 + else
1432 + skb->tstamp = ktime_get_real();
1433 +
1434 +- __skb_complete_tx_timestamp(skb, sk, tstype);
1435 ++ __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
1436 + }
1437 + EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
1438 +
1439 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1440 +index fc4bf4d54158..fcf53a399560 100644
1441 +--- a/net/ipv4/ip_sockglue.c
1442 ++++ b/net/ipv4/ip_sockglue.c
1443 +@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
1444 + return false;
1445 +
1446 + /* Support IP_PKTINFO on tstamp packets if requested, to correlate
1447 +- * timestamp with egress dev. Not possible for packets without dev
1448 ++ * timestamp with egress dev. Not possible for packets without iif
1449 + * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
1450 + */
1451 +- if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
1452 +- (!skb->dev))
1453 ++ info = PKTINFO_SKB_CB(skb);
1454 ++ if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
1455 ++ !info->ipi_ifindex)
1456 + return false;
1457 +
1458 +- info = PKTINFO_SKB_CB(skb);
1459 + info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
1460 +- info->ipi_ifindex = skb->dev->ifindex;
1461 + return true;
1462 + }
1463 +
1464 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1465 +index 68d77b1f1495..51e2f3c5e954 100644
1466 +--- a/net/ipv4/ping.c
1467 ++++ b/net/ipv4/ping.c
1468 +@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
1469 + void ping_unhash(struct sock *sk)
1470 + {
1471 + struct inet_sock *isk = inet_sk(sk);
1472 ++
1473 + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
1474 ++ write_lock_bh(&ping_table.lock);
1475 + if (sk_hashed(sk)) {
1476 +- write_lock_bh(&ping_table.lock);
1477 + hlist_nulls_del(&sk->sk_nulls_node);
1478 + sk_nulls_node_init(&sk->sk_nulls_node);
1479 + sock_put(sk);
1480 + isk->inet_num = 0;
1481 + isk->inet_sport = 0;
1482 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1483 +- write_unlock_bh(&ping_table.lock);
1484 + }
1485 ++ write_unlock_bh(&ping_table.lock);
1486 + }
1487 + EXPORT_SYMBOL_GPL(ping_unhash);
1488 +
1489 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1490 +index 8976887dc83e..6263af2f6ce8 100644
1491 +--- a/net/ipv4/route.c
1492 ++++ b/net/ipv4/route.c
1493 +@@ -2608,7 +2608,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
1494 + skb_reset_network_header(skb);
1495 +
1496 + /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
1497 +- ip_hdr(skb)->protocol = IPPROTO_ICMP;
1498 ++ ip_hdr(skb)->protocol = IPPROTO_UDP;
1499 + skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
1500 +
1501 + src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
1502 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1503 +index 0efb4c7f6704..53fa3a4275de 100644
1504 +--- a/net/ipv4/tcp.c
1505 ++++ b/net/ipv4/tcp.c
1506 +@@ -2301,6 +2301,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1507 + tcp_init_send_head(sk);
1508 + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1509 + __sk_dst_reset(sk);
1510 ++ tcp_saved_syn_free(tp);
1511 +
1512 + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
1513 +
1514 +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
1515 +index 79c4817abc94..6e3c512054a6 100644
1516 +--- a/net/ipv4/tcp_cong.c
1517 ++++ b/net/ipv4/tcp_cong.c
1518 +@@ -168,12 +168,8 @@ void tcp_assign_congestion_control(struct sock *sk)
1519 + }
1520 + out:
1521 + rcu_read_unlock();
1522 ++ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
1523 +
1524 +- /* Clear out private data before diag gets it and
1525 +- * the ca has not been initialized.
1526 +- */
1527 +- if (ca->get_info)
1528 +- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
1529 + if (ca->flags & TCP_CONG_NEEDS_ECN)
1530 + INET_ECN_xmit(sk);
1531 + else
1532 +@@ -200,11 +196,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
1533 + tcp_cleanup_congestion_control(sk);
1534 + icsk->icsk_ca_ops = ca;
1535 + icsk->icsk_ca_setsockopt = 1;
1536 ++ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
1537 +
1538 +- if (sk->sk_state != TCP_CLOSE) {
1539 +- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
1540 ++ if (sk->sk_state != TCP_CLOSE)
1541 + tcp_init_congestion_control(sk);
1542 +- }
1543 + }
1544 +
1545 + /* Manage refcounts on socket close. */
1546 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1547 +index a7bcc0ab5e99..ec76bbee2c35 100644
1548 +--- a/net/ipv6/addrconf.c
1549 ++++ b/net/ipv6/addrconf.c
1550 +@@ -3263,14 +3263,24 @@ static void addrconf_gre_config(struct net_device *dev)
1551 + static int fixup_permanent_addr(struct inet6_dev *idev,
1552 + struct inet6_ifaddr *ifp)
1553 + {
1554 +- if (!ifp->rt) {
1555 +- struct rt6_info *rt;
1556 ++ /* rt6i_ref == 0 means the host route was removed from the
1557 ++ * FIB, for example, if 'lo' device is taken down. In that
1558 ++ * case regenerate the host route.
1559 ++ */
1560 ++ if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
1561 ++ struct rt6_info *rt, *prev;
1562 +
1563 + rt = addrconf_dst_alloc(idev, &ifp->addr, false);
1564 + if (unlikely(IS_ERR(rt)))
1565 + return PTR_ERR(rt);
1566 +
1567 ++ /* ifp->rt can be accessed outside of rtnl */
1568 ++ spin_lock(&ifp->lock);
1569 ++ prev = ifp->rt;
1570 + ifp->rt = rt;
1571 ++ spin_unlock(&ifp->lock);
1572 ++
1573 ++ ip6_rt_put(prev);
1574 + }
1575 +
1576 + if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
1577 +@@ -3618,14 +3628,19 @@ static int addrconf_ifdown(struct net_device *dev, int how)
1578 + INIT_LIST_HEAD(&del_list);
1579 + list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
1580 + struct rt6_info *rt = NULL;
1581 ++ bool keep;
1582 +
1583 + addrconf_del_dad_work(ifa);
1584 +
1585 ++ keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
1586 ++ !addr_is_local(&ifa->addr);
1587 ++ if (!keep)
1588 ++ list_move(&ifa->if_list, &del_list);
1589 ++
1590 + write_unlock_bh(&idev->lock);
1591 + spin_lock_bh(&ifa->lock);
1592 +
1593 +- if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
1594 +- !addr_is_local(&ifa->addr)) {
1595 ++ if (keep) {
1596 + /* set state to skip the notifier below */
1597 + state = INET6_IFADDR_STATE_DEAD;
1598 + ifa->state = 0;
1599 +@@ -3637,8 +3652,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
1600 + } else {
1601 + state = ifa->state;
1602 + ifa->state = INET6_IFADDR_STATE_DEAD;
1603 +-
1604 +- list_move(&ifa->if_list, &del_list);
1605 + }
1606 +
1607 + spin_unlock_bh(&ifa->lock);
1608 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1609 +index eec27f87efac..e011122ebd43 100644
1610 +--- a/net/ipv6/datagram.c
1611 ++++ b/net/ipv6/datagram.c
1612 +@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
1613 + * At one point, excluding local errors was a quick test to identify icmp/icmp6
1614 + * errors. This is no longer true, but the test remained, so the v6 stack,
1615 + * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
1616 +- *
1617 +- * Timestamp code paths do not initialize the fields expected by cmsg:
1618 +- * the PKTINFO fields in skb->cb[]. Fill those in here.
1619 + */
1620 + static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
1621 + struct sock_exterr_skb *serr)
1622 +@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
1623 + if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
1624 + return false;
1625 +
1626 +- if (!skb->dev)
1627 ++ if (!IP6CB(skb)->iif)
1628 + return false;
1629 +
1630 +- if (skb->protocol == htons(ETH_P_IPV6))
1631 +- IP6CB(skb)->iif = skb->dev->ifindex;
1632 +- else
1633 +- PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
1634 +-
1635 + return true;
1636 + }
1637 +
1638 +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
1639 +index 275cac628a95..d32e2110aff2 100644
1640 +--- a/net/ipv6/exthdrs.c
1641 ++++ b/net/ipv6/exthdrs.c
1642 +@@ -388,7 +388,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
1643 + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
1644 + ((&hdr->segments_left) -
1645 + skb_network_header(skb)));
1646 +- kfree_skb(skb);
1647 + return -1;
1648 + }
1649 +
1650 +@@ -910,6 +909,8 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
1651 + {
1652 + switch (opt->type) {
1653 + case IPV6_SRCRT_TYPE_0:
1654 ++ case IPV6_SRCRT_STRICT:
1655 ++ case IPV6_SRCRT_TYPE_2:
1656 + ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
1657 + break;
1658 + case IPV6_SRCRT_TYPE_4:
1659 +@@ -1164,6 +1165,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
1660 +
1661 + switch (opt->srcrt->type) {
1662 + case IPV6_SRCRT_TYPE_0:
1663 ++ case IPV6_SRCRT_STRICT:
1664 ++ case IPV6_SRCRT_TYPE_2:
1665 + fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
1666 + break;
1667 + case IPV6_SRCRT_TYPE_4:
1668 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1669 +index 75fac933c209..a9692ec0cd6d 100644
1670 +--- a/net/ipv6/ip6_tunnel.c
1671 ++++ b/net/ipv6/ip6_tunnel.c
1672 +@@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1673 + struct ip6_tnl *t = netdev_priv(dev);
1674 + struct net *net = t->net;
1675 + struct net_device_stats *stats = &t->dev->stats;
1676 +- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1677 ++ struct ipv6hdr *ipv6h;
1678 + struct ipv6_tel_txoption opt;
1679 + struct dst_entry *dst = NULL, *ndst = NULL;
1680 + struct net_device *tdev;
1681 +@@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1682 +
1683 + /* NBMA tunnel */
1684 + if (ipv6_addr_any(&t->parms.raddr)) {
1685 +- struct in6_addr *addr6;
1686 +- struct neighbour *neigh;
1687 +- int addr_type;
1688 ++ if (skb->protocol == htons(ETH_P_IPV6)) {
1689 ++ struct in6_addr *addr6;
1690 ++ struct neighbour *neigh;
1691 ++ int addr_type;
1692 +
1693 +- if (!skb_dst(skb))
1694 +- goto tx_err_link_failure;
1695 ++ if (!skb_dst(skb))
1696 ++ goto tx_err_link_failure;
1697 +
1698 +- neigh = dst_neigh_lookup(skb_dst(skb),
1699 +- &ipv6_hdr(skb)->daddr);
1700 +- if (!neigh)
1701 +- goto tx_err_link_failure;
1702 ++ neigh = dst_neigh_lookup(skb_dst(skb),
1703 ++ &ipv6_hdr(skb)->daddr);
1704 ++ if (!neigh)
1705 ++ goto tx_err_link_failure;
1706 +
1707 +- addr6 = (struct in6_addr *)&neigh->primary_key;
1708 +- addr_type = ipv6_addr_type(addr6);
1709 ++ addr6 = (struct in6_addr *)&neigh->primary_key;
1710 ++ addr_type = ipv6_addr_type(addr6);
1711 +
1712 +- if (addr_type == IPV6_ADDR_ANY)
1713 +- addr6 = &ipv6_hdr(skb)->daddr;
1714 ++ if (addr_type == IPV6_ADDR_ANY)
1715 ++ addr6 = &ipv6_hdr(skb)->daddr;
1716 +
1717 +- memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1718 +- neigh_release(neigh);
1719 ++ memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1720 ++ neigh_release(neigh);
1721 ++ }
1722 + } else if (!(t->parms.flags &
1723 + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1724 + /* enable the cache only only if the routing decision does
1725 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1726 +index 604d8953c775..72a00e4961ba 100644
1727 +--- a/net/ipv6/ip6mr.c
1728 ++++ b/net/ipv6/ip6mr.c
1729 +@@ -774,7 +774,8 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
1730 + * Delete a VIF entry
1731 + */
1732 +
1733 +-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
1734 ++static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
1735 ++ struct list_head *head)
1736 + {
1737 + struct mif_device *v;
1738 + struct net_device *dev;
1739 +@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
1740 + dev->ifindex, &in6_dev->cnf);
1741 + }
1742 +
1743 +- if (v->flags & MIFF_REGISTER)
1744 ++ if ((v->flags & MIFF_REGISTER) && !notify)
1745 + unregister_netdevice_queue(dev, head);
1746 +
1747 + dev_put(dev);
1748 +@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
1749 + struct mr6_table *mrt;
1750 + struct mif_device *v;
1751 + int ct;
1752 +- LIST_HEAD(list);
1753 +
1754 + if (event != NETDEV_UNREGISTER)
1755 + return NOTIFY_DONE;
1756 +@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
1757 + v = &mrt->vif6_table[0];
1758 + for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1759 + if (v->dev == dev)
1760 +- mif6_delete(mrt, ct, &list);
1761 ++ mif6_delete(mrt, ct, 1, NULL);
1762 + }
1763 + }
1764 +- unregister_netdevice_many(&list);
1765 +
1766 + return NOTIFY_DONE;
1767 + }
1768 +@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1769 + for (i = 0; i < mrt->maxvif; i++) {
1770 + if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1771 + continue;
1772 +- mif6_delete(mrt, i, &list);
1773 ++ mif6_delete(mrt, i, 0, &list);
1774 + }
1775 + unregister_netdevice_many(&list);
1776 +
1777 +@@ -1706,7 +1705,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1778 + if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1779 + return -EFAULT;
1780 + rtnl_lock();
1781 +- ret = mif6_delete(mrt, mifi, NULL);
1782 ++ ret = mif6_delete(mrt, mifi, 0, NULL);
1783 + rtnl_unlock();
1784 + return ret;
1785 +
1786 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
1787 +index ea89073c8247..294fb6f743cb 100644
1788 +--- a/net/ipv6/raw.c
1789 ++++ b/net/ipv6/raw.c
1790 +@@ -1174,8 +1174,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1791 + spin_lock_bh(&sk->sk_receive_queue.lock);
1792 + skb = skb_peek(&sk->sk_receive_queue);
1793 + if (skb)
1794 +- amount = skb_tail_pointer(skb) -
1795 +- skb_transport_header(skb);
1796 ++ amount = skb->len;
1797 + spin_unlock_bh(&sk->sk_receive_queue.lock);
1798 + return put_user(amount, (int __user *)arg);
1799 + }
1800 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1801 +index 7ea85370c11c..523681a5c898 100644
1802 +--- a/net/ipv6/route.c
1803 ++++ b/net/ipv6/route.c
1804 +@@ -1831,6 +1831,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1805 + int addr_type;
1806 + int err = -EINVAL;
1807 +
1808 ++ /* RTF_PCPU is an internal flag; can not be set by userspace */
1809 ++ if (cfg->fc_flags & RTF_PCPU)
1810 ++ goto out;
1811 ++
1812 + if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1813 + goto out;
1814 + #ifndef CONFIG_IPV6_SUBTREES
1815 +diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
1816 +index a855eb325b03..5f44ffed2576 100644
1817 +--- a/net/ipv6/seg6.c
1818 ++++ b/net/ipv6/seg6.c
1819 +@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
1820 + struct sr6_tlv *tlv;
1821 + unsigned int tlv_len;
1822 +
1823 ++ if (trailing < sizeof(*tlv))
1824 ++ return false;
1825 ++
1826 + tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
1827 + tlv_len = sizeof(*tlv) + tlv->len;
1828 +
1829 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1830 +index a646f3481240..fecad1098cf8 100644
1831 +--- a/net/kcm/kcmsock.c
1832 ++++ b/net/kcm/kcmsock.c
1833 +@@ -1685,7 +1685,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1834 + struct kcm_attach info;
1835 +
1836 + if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1837 +- err = -EFAULT;
1838 ++ return -EFAULT;
1839 +
1840 + err = kcm_attach_ioctl(sock, &info);
1841 +
1842 +@@ -1695,7 +1695,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1843 + struct kcm_unattach info;
1844 +
1845 + if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1846 +- err = -EFAULT;
1847 ++ return -EFAULT;
1848 +
1849 + err = kcm_unattach_ioctl(sock, &info);
1850 +
1851 +@@ -1706,7 +1706,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1852 + struct socket *newsock = NULL;
1853 +
1854 + if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1855 +- err = -EFAULT;
1856 ++ return -EFAULT;
1857 +
1858 + err = kcm_clone(sock, &info, &newsock);
1859 +
1860 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1861 +index 85948c69b236..56036ab5dcb7 100644
1862 +--- a/net/l2tp/l2tp_core.c
1863 ++++ b/net/l2tp/l2tp_core.c
1864 +@@ -278,7 +278,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
1865 + }
1866 + EXPORT_SYMBOL_GPL(l2tp_session_find);
1867 +
1868 +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
1869 ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
1870 ++ bool do_ref)
1871 + {
1872 + int hash;
1873 + struct l2tp_session *session;
1874 +@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
1875 + for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1876 + hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
1877 + if (++count > nth) {
1878 ++ l2tp_session_inc_refcount(session);
1879 ++ if (do_ref && session->ref)
1880 ++ session->ref(session);
1881 + read_unlock_bh(&tunnel->hlist_lock);
1882 + return session;
1883 + }
1884 +@@ -298,7 +302,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
1885 +
1886 + return NULL;
1887 + }
1888 +-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
1889 ++EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
1890 +
1891 + /* Lookup a session by interface name.
1892 + * This is very inefficient but is only used by management interfaces.
1893 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
1894 +index aebf281d09ee..221648b07b3c 100644
1895 +--- a/net/l2tp/l2tp_core.h
1896 ++++ b/net/l2tp/l2tp_core.h
1897 +@@ -233,7 +233,8 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
1898 + struct l2tp_session *l2tp_session_find(struct net *net,
1899 + struct l2tp_tunnel *tunnel,
1900 + u32 session_id);
1901 +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
1902 ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
1903 ++ bool do_ref);
1904 + struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
1905 + struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
1906 + struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
1907 +diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
1908 +index 2d6760a2ae34..d100aed3d06f 100644
1909 +--- a/net/l2tp/l2tp_debugfs.c
1910 ++++ b/net/l2tp/l2tp_debugfs.c
1911 +@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
1912 +
1913 + static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
1914 + {
1915 +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
1916 ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
1917 + pd->session_idx++;
1918 +
1919 + if (pd->session == NULL) {
1920 +@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
1921 + }
1922 +
1923 + /* Show the tunnel or session context */
1924 +- if (pd->session == NULL)
1925 ++ if (!pd->session) {
1926 + l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
1927 +- else
1928 ++ } else {
1929 + l2tp_dfs_seq_session_show(m, pd->session);
1930 ++ if (pd->session->deref)
1931 ++ pd->session->deref(pd->session);
1932 ++ l2tp_session_dec_refcount(pd->session);
1933 ++ }
1934 +
1935 + out:
1936 + return 0;
1937 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
1938 +index 3ed30153a6f5..fa2bcfce53df 100644
1939 +--- a/net/l2tp/l2tp_ip.c
1940 ++++ b/net/l2tp/l2tp_ip.c
1941 +@@ -171,9 +171,10 @@ static int l2tp_ip_recv(struct sk_buff *skb)
1942 +
1943 + tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
1944 + tunnel = l2tp_tunnel_find(net, tunnel_id);
1945 +- if (tunnel != NULL)
1946 ++ if (tunnel) {
1947 + sk = tunnel->sock;
1948 +- else {
1949 ++ sock_hold(sk);
1950 ++ } else {
1951 + struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
1952 +
1953 + read_lock_bh(&l2tp_ip_lock);
1954 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
1955 +index f47c45250f86..4e4fa1538cbb 100644
1956 +--- a/net/l2tp/l2tp_ip6.c
1957 ++++ b/net/l2tp/l2tp_ip6.c
1958 +@@ -183,9 +183,10 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
1959 +
1960 + tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
1961 + tunnel = l2tp_tunnel_find(net, tunnel_id);
1962 +- if (tunnel != NULL)
1963 ++ if (tunnel) {
1964 + sk = tunnel->sock;
1965 +- else {
1966 ++ sock_hold(sk);
1967 ++ } else {
1968 + struct ipv6hdr *iph = ipv6_hdr(skb);
1969 +
1970 + read_lock_bh(&l2tp_ip6_lock);
1971 +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
1972 +index 3620fba31786..ad191a786806 100644
1973 +--- a/net/l2tp/l2tp_netlink.c
1974 ++++ b/net/l2tp/l2tp_netlink.c
1975 +@@ -852,7 +852,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
1976 + goto out;
1977 + }
1978 +
1979 +- session = l2tp_session_find_nth(tunnel, si);
1980 ++ session = l2tp_session_get_nth(tunnel, si, false);
1981 + if (session == NULL) {
1982 + ti++;
1983 + tunnel = NULL;
1984 +@@ -862,8 +862,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
1985 +
1986 + if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
1987 + cb->nlh->nlmsg_seq, NLM_F_MULTI,
1988 +- session, L2TP_CMD_SESSION_GET) < 0)
1989 ++ session, L2TP_CMD_SESSION_GET) < 0) {
1990 ++ l2tp_session_dec_refcount(session);
1991 + break;
1992 ++ }
1993 ++ l2tp_session_dec_refcount(session);
1994 +
1995 + si++;
1996 + }
1997 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1998 +index 36cc56fd0418..781d22272f4a 100644
1999 +--- a/net/l2tp/l2tp_ppp.c
2000 ++++ b/net/l2tp/l2tp_ppp.c
2001 +@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
2002 + static void pppol2tp_session_destruct(struct sock *sk)
2003 + {
2004 + struct l2tp_session *session = sk->sk_user_data;
2005 ++
2006 ++ skb_queue_purge(&sk->sk_receive_queue);
2007 ++ skb_queue_purge(&sk->sk_write_queue);
2008 ++
2009 + if (session) {
2010 + sk->sk_user_data = NULL;
2011 + BUG_ON(session->magic != L2TP_SESSION_MAGIC);
2012 +@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
2013 + l2tp_session_queue_purge(session);
2014 + sock_put(sk);
2015 + }
2016 +- skb_queue_purge(&sk->sk_receive_queue);
2017 +- skb_queue_purge(&sk->sk_write_queue);
2018 +-
2019 + release_sock(sk);
2020 +
2021 + /* This will delete the session context via
2022 +@@ -1554,7 +1555,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
2023 +
2024 + static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
2025 + {
2026 +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
2027 ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
2028 + pd->session_idx++;
2029 +
2030 + if (pd->session == NULL) {
2031 +@@ -1681,10 +1682,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
2032 +
2033 + /* Show the tunnel or session context.
2034 + */
2035 +- if (pd->session == NULL)
2036 ++ if (!pd->session) {
2037 + pppol2tp_seq_tunnel_show(m, pd->tunnel);
2038 +- else
2039 ++ } else {
2040 + pppol2tp_seq_session_show(m, pd->session);
2041 ++ if (pd->session->deref)
2042 ++ pd->session->deref(pd->session);
2043 ++ l2tp_session_dec_refcount(pd->session);
2044 ++ }
2045 +
2046 + out:
2047 + return 0;
2048 +@@ -1843,4 +1848,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
2049 + MODULE_LICENSE("GPL");
2050 + MODULE_VERSION(PPPOL2TP_DRV_VERSION);
2051 + MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
2052 +-MODULE_ALIAS_L2TP_PWTYPE(11);
2053 ++MODULE_ALIAS_L2TP_PWTYPE(7);
2054 +diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
2055 +index 2c0a00f7f1b7..bb789359a29b 100644
2056 +--- a/net/openvswitch/flow.c
2057 ++++ b/net/openvswitch/flow.c
2058 +@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
2059 +
2060 + /* Link layer. */
2061 + clear_vlan(key);
2062 +- if (key->mac_proto == MAC_PROTO_NONE) {
2063 ++ if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
2064 + if (unlikely(eth_type_vlan(skb->protocol)))
2065 + return -EINVAL;
2066 +
2067 +@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
2068 +
2069 + int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
2070 + {
2071 +- return key_extract(skb, key);
2072 ++ int res;
2073 ++
2074 ++ res = key_extract(skb, key);
2075 ++ if (!res)
2076 ++ key->mac_proto &= ~SW_FLOW_KEY_INVALID;
2077 ++
2078 ++ return res;
2079 + }
2080 +
2081 + static int key_extract_mac_proto(struct sk_buff *skb)
2082 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2083 +index 5c919933a39b..0f074c96f43f 100644
2084 +--- a/net/packet/af_packet.c
2085 ++++ b/net/packet/af_packet.c
2086 +@@ -3644,6 +3644,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2087 + return -EBUSY;
2088 + if (copy_from_user(&val, optval, sizeof(val)))
2089 + return -EFAULT;
2090 ++ if (val > INT_MAX)
2091 ++ return -EINVAL;
2092 + po->tp_reserve = val;
2093 + return 0;
2094 + }
2095 +@@ -4189,6 +4191,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2096 + rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
2097 + if (unlikely(rb->frames_per_block == 0))
2098 + goto out;
2099 ++ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
2100 ++ goto out;
2101 + if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2102 + req->tp_frame_nr))
2103 + goto out;
2104 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2105 +index d04a8b66098c..6932cf34fea8 100644
2106 +--- a/net/sctp/socket.c
2107 ++++ b/net/sctp/socket.c
2108 +@@ -6860,6 +6860,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
2109 + if (sock->state != SS_UNCONNECTED)
2110 + goto out;
2111 +
2112 ++ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
2113 ++ goto out;
2114 ++
2115 + /* If backlog is zero, disable listening. */
2116 + if (!backlog) {
2117 + if (sctp_sstate(sk, CLOSED))
2118 +diff --git a/net/socket.c b/net/socket.c
2119 +index 02bd9249e295..6361d3161120 100644
2120 +--- a/net/socket.c
2121 ++++ b/net/socket.c
2122 +@@ -654,6 +654,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
2123 + }
2124 + EXPORT_SYMBOL(kernel_sendmsg);
2125 +
2126 ++static bool skb_is_err_queue(const struct sk_buff *skb)
2127 ++{
2128 ++ /* pkt_type of skbs enqueued on the error queue are set to
2129 ++ * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
2130 ++ * in recvmsg, since skbs received on a local socket will never
2131 ++ * have a pkt_type of PACKET_OUTGOING.
2132 ++ */
2133 ++ return skb->pkt_type == PACKET_OUTGOING;
2134 ++}
2135 ++
2136 + /*
2137 + * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
2138 + */
2139 +@@ -697,7 +707,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2140 + put_cmsg(msg, SOL_SOCKET,
2141 + SCM_TIMESTAMPING, sizeof(tss), &tss);
2142 +
2143 +- if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
2144 ++ if (skb_is_err_queue(skb) && skb->len &&
2145 ++ SKB_EXT_ERR(skb)->opt_stats)
2146 + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
2147 + skb->len, skb->data);
2148 + }
2149 +diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
2150 +index 3b693e924db7..12ba83367b1b 100644
2151 +--- a/sound/core/seq/seq_lock.c
2152 ++++ b/sound/core/seq/seq_lock.c
2153 +@@ -28,19 +28,16 @@
2154 + /* wait until all locks are released */
2155 + void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
2156 + {
2157 +- int max_count = 5 * HZ;
2158 ++ int warn_count = 5 * HZ;
2159 +
2160 + if (atomic_read(lockp) < 0) {
2161 + pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
2162 + return;
2163 + }
2164 + while (atomic_read(lockp) > 0) {
2165 +- if (max_count == 0) {
2166 +- pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
2167 +- break;
2168 +- }
2169 ++ if (warn_count-- == 0)
2170 ++ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
2171 + schedule_timeout_uninterruptible(1);
2172 +- max_count--;
2173 + }
2174 + }
2175 +
2176 +diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
2177 +index f6769312ebfc..c3768cd494a5 100644
2178 +--- a/sound/firewire/lib.h
2179 ++++ b/sound/firewire/lib.h
2180 +@@ -45,7 +45,7 @@ struct snd_fw_async_midi_port {
2181 +
2182 + struct snd_rawmidi_substream *substream;
2183 + snd_fw_async_midi_port_fill fill;
2184 +- unsigned int consume_bytes;
2185 ++ int consume_bytes;
2186 + };
2187 +
2188 + int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
2189 +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
2190 +index e629b88f7d93..474b06d8acd1 100644
2191 +--- a/sound/firewire/oxfw/oxfw.c
2192 ++++ b/sound/firewire/oxfw/oxfw.c
2193 +@@ -226,11 +226,11 @@ static void do_registration(struct work_struct *work)
2194 + if (err < 0)
2195 + goto error;
2196 +
2197 +- err = detect_quirks(oxfw);
2198 ++ err = snd_oxfw_stream_discover(oxfw);
2199 + if (err < 0)
2200 + goto error;
2201 +
2202 +- err = snd_oxfw_stream_discover(oxfw);
2203 ++ err = detect_quirks(oxfw);
2204 + if (err < 0)
2205 + goto error;
2206 +
2207 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
2208 +index 1bd985f01c73..342d8425bc1f 100644
2209 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
2210 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
2211 +@@ -621,7 +621,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
2212 + .codec_dai_name = "snd-soc-dummy-dai",
2213 + .codec_name = "snd-soc-dummy",
2214 + .platform_name = "sst-mfld-platform",
2215 +- .ignore_suspend = 1,
2216 ++ .nonatomic = true,
2217 + .dynamic = 1,
2218 + .dpcm_playback = 1,
2219 + .dpcm_capture = 1,
2220 +@@ -634,7 +634,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
2221 + .codec_dai_name = "snd-soc-dummy-dai",
2222 + .codec_name = "snd-soc-dummy",
2223 + .platform_name = "sst-mfld-platform",
2224 +- .ignore_suspend = 1,
2225 + .nonatomic = true,
2226 + .dynamic = 1,
2227 + .dpcm_playback = 1,
2228 +@@ -661,6 +660,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
2229 + | SND_SOC_DAIFMT_CBS_CFS,
2230 + .be_hw_params_fixup = byt_rt5640_codec_fixup,
2231 + .ignore_suspend = 1,
2232 ++ .nonatomic = true,
2233 + .dpcm_playback = 1,
2234 + .dpcm_capture = 1,
2235 + .init = byt_rt5640_init,
2236 +diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
2237 +index 2d24dc04b597..d938328dc64f 100644
2238 +--- a/sound/soc/intel/boards/bytcr_rt5651.c
2239 ++++ b/sound/soc/intel/boards/bytcr_rt5651.c
2240 +@@ -235,7 +235,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
2241 + .codec_dai_name = "snd-soc-dummy-dai",
2242 + .codec_name = "snd-soc-dummy",
2243 + .platform_name = "sst-mfld-platform",
2244 +- .ignore_suspend = 1,
2245 + .nonatomic = true,
2246 + .dynamic = 1,
2247 + .dpcm_playback = 1,
2248 +@@ -249,7 +248,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
2249 + .codec_dai_name = "snd-soc-dummy-dai",
2250 + .codec_name = "snd-soc-dummy",
2251 + .platform_name = "sst-mfld-platform",
2252 +- .ignore_suspend = 1,
2253 + .nonatomic = true,
2254 + .dynamic = 1,
2255 + .dpcm_playback = 1,
2256 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
2257 +index 853d7e43434a..e1aea9e60f33 100644
2258 +--- a/tools/testing/selftests/bpf/test_verifier.c
2259 ++++ b/tools/testing/selftests/bpf/test_verifier.c
2260 +@@ -2876,6 +2876,26 @@ static struct bpf_test tests[] = {
2261 + .prog_type = BPF_PROG_TYPE_LWT_XMIT,
2262 + },
2263 + {
2264 ++ "overlapping checks for direct packet access",
2265 ++ .insns = {
2266 ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2267 ++ offsetof(struct __sk_buff, data)),
2268 ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2269 ++ offsetof(struct __sk_buff, data_end)),
2270 ++ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2271 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2272 ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
2273 ++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2274 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2275 ++ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
2276 ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
2277 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
2278 ++ BPF_EXIT_INSN(),
2279 ++ },
2280 ++ .result = ACCEPT,
2281 ++ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
2282 ++ },
2283 ++ {
2284 + "invalid access of tc_classid for LWT_IN",
2285 + .insns = {
2286 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,