Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Mon, 30 Apr 2018 10:30:49
Message-Id: 1525084235.54039fa3feac7ed37530054550520d924419b496.mpagano@gentoo
1 commit: 54039fa3feac7ed37530054550520d924419b496
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Apr 30 10:30:35 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Apr 30 10:30:35 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54039fa3
7
8 Linux patch 4.16.6
9
10 0000_README | 4 +
11 1005_linux-4.16.6.patch | 3759 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3763 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 344c387..d4182dc 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.16.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.16.5
21
22 +Patch: 1005_linux-4.16.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.16.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.16.6.patch b/1005_linux-4.16.6.patch
31 new file mode 100644
32 index 0000000..e5b7089
33 --- /dev/null
34 +++ b/1005_linux-4.16.6.patch
35 @@ -0,0 +1,3759 @@
36 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
37 +index a553d4e4a0fb..f778901c4297 100644
38 +--- a/Documentation/networking/ip-sysctl.txt
39 ++++ b/Documentation/networking/ip-sysctl.txt
40 +@@ -1386,26 +1386,26 @@ mld_qrv - INTEGER
41 + Default: 2 (as specified by RFC3810 9.1)
42 + Minimum: 1 (as specified by RFC6636 4.5)
43 +
44 +-max_dst_opts_cnt - INTEGER
45 ++max_dst_opts_number - INTEGER
46 + Maximum number of non-padding TLVs allowed in a Destination
47 + options extension header. If this value is less than zero
48 + then unknown options are disallowed and the number of known
49 + TLVs allowed is the absolute value of this number.
50 + Default: 8
51 +
52 +-max_hbh_opts_cnt - INTEGER
53 ++max_hbh_opts_number - INTEGER
54 + Maximum number of non-padding TLVs allowed in a Hop-by-Hop
55 + options extension header. If this value is less than zero
56 + then unknown options are disallowed and the number of known
57 + TLVs allowed is the absolute value of this number.
58 + Default: 8
59 +
60 +-max dst_opts_len - INTEGER
61 ++max_dst_opts_length - INTEGER
62 + Maximum length allowed for a Destination options extension
63 + header.
64 + Default: INT_MAX (unlimited)
65 +
66 +-max hbh_opts_len - INTEGER
67 ++max_hbh_length - INTEGER
68 + Maximum length allowed for a Hop-by-Hop options extension
69 + header.
70 + Default: INT_MAX (unlimited)
71 +diff --git a/Makefile b/Makefile
72 +index 6678a90f355b..41f07b2b7905 100644
73 +--- a/Makefile
74 ++++ b/Makefile
75 +@@ -1,7 +1,7 @@
76 + # SPDX-License-Identifier: GPL-2.0
77 + VERSION = 4
78 + PATCHLEVEL = 16
79 +-SUBLEVEL = 5
80 ++SUBLEVEL = 6
81 + EXTRAVERSION =
82 + NAME = Fearless Coyote
83 +
84 +diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
85 +index 5ee27dc9a10c..feebb2944882 100644
86 +--- a/arch/s390/kernel/perf_cpum_cf_events.c
87 ++++ b/arch/s390/kernel/perf_cpum_cf_events.c
88 +@@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
89 + CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
90 + CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
91 + CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
92 +-CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080);
93 ++CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
94 + CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
95 + CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
96 + CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
97 +@@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
98 + CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
99 + CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
100 + CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
101 +-CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
102 ++CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
103 + CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
104 + CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
105 + CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
106 +@@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
107 + };
108 +
109 + static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
110 +- CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL),
111 ++ CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
112 + CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
113 + CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
114 + CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
115 +@@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
116 + };
117 +
118 + static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
119 +- CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
120 ++ CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
121 + CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
122 + CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
123 + CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
124 +diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
125 +index d9d1f512f019..5007fac01bb5 100644
126 +--- a/arch/s390/kernel/uprobes.c
127 ++++ b/arch/s390/kernel/uprobes.c
128 +@@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
129 + return orig;
130 + }
131 +
132 ++bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
133 ++ struct pt_regs *regs)
134 ++{
135 ++ if (ctx == RP_CHECK_CHAIN_CALL)
136 ++ return user_stack_pointer(regs) <= ret->stack;
137 ++ else
138 ++ return user_stack_pointer(regs) < ret->stack;
139 ++}
140 ++
141 + /* Instruction Emulation */
142 +
143 + static void adjust_psw_addr(psw_t *psw, unsigned long len)
144 +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
145 +index 76fb96966f7b..2f2e737be0f8 100644
146 +--- a/drivers/acpi/acpi_video.c
147 ++++ b/drivers/acpi/acpi_video.c
148 +@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
149 + return opregion;
150 + }
151 +
152 ++static bool dmi_is_desktop(void)
153 ++{
154 ++ const char *chassis_type;
155 ++
156 ++ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
157 ++ if (!chassis_type)
158 ++ return false;
159 ++
160 ++ if (!strcmp(chassis_type, "3") || /* 3: Desktop */
161 ++ !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
162 ++ !strcmp(chassis_type, "5") || /* 5: Pizza Box */
163 ++ !strcmp(chassis_type, "6") || /* 6: Mini Tower */
164 ++ !strcmp(chassis_type, "7") || /* 7: Tower */
165 ++ !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
166 ++ return true;
167 ++
168 ++ return false;
169 ++}
170 ++
171 + int acpi_video_register(void)
172 + {
173 + int ret = 0;
174 +@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
175 + * win8 ready (where we also prefer the native backlight driver, so
176 + * normally the acpi_video code should not register there anyways).
177 + */
178 +- if (only_lcd == -1)
179 +- only_lcd = acpi_osi_is_win8();
180 ++ if (only_lcd == -1) {
181 ++ if (dmi_is_desktop() && acpi_osi_is_win8())
182 ++ only_lcd = true;
183 ++ else
184 ++ only_lcd = false;
185 ++ }
186 +
187 + dmi_check_system(video_dmi_table);
188 +
189 +diff --git a/drivers/block/swim.c b/drivers/block/swim.c
190 +index 64e066eba72e..0e31884a9519 100644
191 +--- a/drivers/block/swim.c
192 ++++ b/drivers/block/swim.c
193 +@@ -110,7 +110,7 @@ struct iwm {
194 + /* Select values for swim_select and swim_readbit */
195 +
196 + #define READ_DATA_0 0x074
197 +-#define TWOMEG_DRIVE 0x075
198 ++#define ONEMEG_DRIVE 0x075
199 + #define SINGLE_SIDED 0x076
200 + #define DRIVE_PRESENT 0x077
201 + #define DISK_IN 0x170
202 +@@ -118,9 +118,9 @@ struct iwm {
203 + #define TRACK_ZERO 0x172
204 + #define TACHO 0x173
205 + #define READ_DATA_1 0x174
206 +-#define MFM_MODE 0x175
207 ++#define GCR_MODE 0x175
208 + #define SEEK_COMPLETE 0x176
209 +-#define ONEMEG_MEDIA 0x177
210 ++#define TWOMEG_MEDIA 0x177
211 +
212 + /* Bits in handshake register */
213 +
214 +@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
215 + struct floppy_struct *g;
216 + fs->disk_in = 1;
217 + fs->write_protected = swim_readbit(base, WRITE_PROT);
218 +- fs->type = swim_readbit(base, ONEMEG_MEDIA);
219 +
220 + if (swim_track00(base))
221 + printk(KERN_ERR
222 +@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
223 +
224 + swim_track00(base);
225 +
226 ++ fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
227 ++ HD_MEDIA : DD_MEDIA;
228 ++ fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
229 + get_floppy_geometry(fs, 0, &g);
230 + fs->total_secs = g->size;
231 + fs->secpercyl = g->head * g->sect;
232 +@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
233 +
234 + swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
235 + udelay(10);
236 +- swim_drive(base, INTERNAL_DRIVE);
237 ++ swim_drive(base, fs->location);
238 + swim_motor(base, ON);
239 + swim_action(base, SETMFM);
240 + if (fs->ejected)
241 +@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
242 + goto out;
243 + }
244 +
245 ++ set_capacity(fs->disk, fs->total_secs);
246 ++
247 + if (mode & FMODE_NDELAY)
248 + return 0;
249 +
250 +@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
251 + if (copy_to_user((void __user *) param, (void *) &floppy_type,
252 + sizeof(struct floppy_struct)))
253 + return -EFAULT;
254 +- break;
255 +-
256 +- default:
257 +- printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
258 +- cmd);
259 +- return -ENOSYS;
260 ++ return 0;
261 + }
262 +- return 0;
263 ++ return -ENOTTY;
264 + }
265 +
266 + static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
267 +@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
268 + struct swim_priv *swd = data;
269 + int drive = (*part & 3);
270 +
271 +- if (drive > swd->floppy_count)
272 ++ if (drive >= swd->floppy_count)
273 + return NULL;
274 +
275 + *part = 0;
276 +@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
277 +
278 + swim_motor(base, OFF);
279 +
280 +- if (swim_readbit(base, SINGLE_SIDED))
281 +- fs->head_number = 1;
282 +- else
283 +- fs->head_number = 2;
284 ++ fs->type = HD_MEDIA;
285 ++ fs->head_number = 2;
286 ++
287 + fs->ref_count = 0;
288 + fs->ejected = 1;
289 +
290 +@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
291 + /* scan floppy drives */
292 +
293 + swim_drive(base, INTERNAL_DRIVE);
294 +- if (swim_readbit(base, DRIVE_PRESENT))
295 ++ if (swim_readbit(base, DRIVE_PRESENT) &&
296 ++ !swim_readbit(base, ONEMEG_DRIVE))
297 + swim_add_floppy(swd, INTERNAL_DRIVE);
298 + swim_drive(base, EXTERNAL_DRIVE);
299 +- if (swim_readbit(base, DRIVE_PRESENT))
300 ++ if (swim_readbit(base, DRIVE_PRESENT) &&
301 ++ !swim_readbit(base, ONEMEG_DRIVE))
302 + swim_add_floppy(swd, EXTERNAL_DRIVE);
303 +
304 + /* register floppy drives */
305 +@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
306 + &swd->lock);
307 + if (!swd->unit[drive].disk->queue) {
308 + err = -ENOMEM;
309 +- put_disk(swd->unit[drive].disk);
310 + goto exit_put_disks;
311 + }
312 + blk_queue_bounce_limit(swd->unit[drive].disk->queue,
313 +@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
314 + goto out;
315 + }
316 +
317 +- swim_base = ioremap(res->start, resource_size(res));
318 ++ swim_base = (struct swim __iomem *)res->start;
319 + if (!swim_base) {
320 + ret = -ENOMEM;
321 + goto out_release_io;
322 +@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
323 + if (!get_swim_mode(swim_base)) {
324 + printk(KERN_INFO "SWIM device not found !\n");
325 + ret = -ENODEV;
326 +- goto out_iounmap;
327 ++ goto out_release_io;
328 + }
329 +
330 + /* set platform driver data */
331 +@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
332 + swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
333 + if (!swd) {
334 + ret = -ENOMEM;
335 +- goto out_iounmap;
336 ++ goto out_release_io;
337 + }
338 + platform_set_drvdata(dev, swd);
339 +
340 +@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
341 +
342 + out_kfree:
343 + kfree(swd);
344 +-out_iounmap:
345 +- iounmap(swim_base);
346 + out_release_io:
347 + release_mem_region(res->start, resource_size(res));
348 + out:
349 +@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
350 + for (drive = 0; drive < swd->floppy_count; drive++)
351 + floppy_eject(&swd->unit[drive]);
352 +
353 +- iounmap(swd->base);
354 +-
355 + res = platform_get_resource(dev, IORESOURCE_MEM, 0);
356 + if (res)
357 + release_mem_region(res->start, resource_size(res));
358 +diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
359 +index af51015d056e..469541c1e51e 100644
360 +--- a/drivers/block/swim3.c
361 ++++ b/drivers/block/swim3.c
362 +@@ -148,7 +148,7 @@ struct swim3 {
363 + #define MOTOR_ON 2
364 + #define RELAX 3 /* also eject in progress */
365 + #define READ_DATA_0 4
366 +-#define TWOMEG_DRIVE 5
367 ++#define ONEMEG_DRIVE 5
368 + #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
369 + #define DRIVE_PRESENT 7
370 + #define DISK_IN 8
371 +@@ -156,9 +156,9 @@ struct swim3 {
372 + #define TRACK_ZERO 10
373 + #define TACHO 11
374 + #define READ_DATA_1 12
375 +-#define MFM_MODE 13
376 ++#define GCR_MODE 13
377 + #define SEEK_COMPLETE 14
378 +-#define ONEMEG_MEDIA 15
379 ++#define TWOMEG_MEDIA 15
380 +
381 + /* Definitions of values used in writing and formatting */
382 + #define DATA_ESCAPE 0x99
383 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
384 +index e36d160c458f..5f7d86509f2f 100644
385 +--- a/drivers/cdrom/cdrom.c
386 ++++ b/drivers/cdrom/cdrom.c
387 +@@ -2374,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
388 + if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
389 + return media_changed(cdi, 1);
390 +
391 +- if ((unsigned int)arg >= cdi->capacity)
392 ++ if (arg >= cdi->capacity)
393 + return -EINVAL;
394 +
395 + info = kmalloc(sizeof(*info), GFP_KERNEL);
396 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
397 +index 248c04090dea..255db6fe15c8 100644
398 +--- a/drivers/char/tpm/tpm-interface.c
399 ++++ b/drivers/char/tpm/tpm-interface.c
400 +@@ -369,20 +369,40 @@ static int tpm_validate_command(struct tpm_chip *chip,
401 + return -EINVAL;
402 + }
403 +
404 +-/**
405 +- * tmp_transmit - Internal kernel interface to transmit TPM commands.
406 +- *
407 +- * @chip: TPM chip to use
408 +- * @buf: TPM command buffer
409 +- * @bufsiz: length of the TPM command buffer
410 +- * @flags: tpm transmit flags - bitmap
411 +- *
412 +- * Return:
413 +- * 0 when the operation is successful.
414 +- * A negative number for system errors (errno).
415 +- */
416 +-ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
417 +- u8 *buf, size_t bufsiz, unsigned int flags)
418 ++static int tpm_request_locality(struct tpm_chip *chip)
419 ++{
420 ++ int rc;
421 ++
422 ++ if (!chip->ops->request_locality)
423 ++ return 0;
424 ++
425 ++ rc = chip->ops->request_locality(chip, 0);
426 ++ if (rc < 0)
427 ++ return rc;
428 ++
429 ++ chip->locality = rc;
430 ++
431 ++ return 0;
432 ++}
433 ++
434 ++static void tpm_relinquish_locality(struct tpm_chip *chip)
435 ++{
436 ++ int rc;
437 ++
438 ++ if (!chip->ops->relinquish_locality)
439 ++ return;
440 ++
441 ++ rc = chip->ops->relinquish_locality(chip, chip->locality);
442 ++ if (rc)
443 ++ dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
444 ++
445 ++ chip->locality = -1;
446 ++}
447 ++
448 ++static ssize_t tpm_try_transmit(struct tpm_chip *chip,
449 ++ struct tpm_space *space,
450 ++ u8 *buf, size_t bufsiz,
451 ++ unsigned int flags)
452 + {
453 + struct tpm_output_header *header = (void *)buf;
454 + int rc;
455 +@@ -422,8 +442,6 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
456 + if (!(flags & TPM_TRANSMIT_UNLOCKED))
457 + mutex_lock(&chip->tpm_mutex);
458 +
459 +- if (chip->dev.parent)
460 +- pm_runtime_get_sync(chip->dev.parent);
461 +
462 + if (chip->ops->clk_enable != NULL)
463 + chip->ops->clk_enable(chip, true);
464 +@@ -431,14 +449,15 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
465 + /* Store the decision as chip->locality will be changed. */
466 + need_locality = chip->locality == -1;
467 +
468 +- if (!(flags & TPM_TRANSMIT_RAW) &&
469 +- need_locality && chip->ops->request_locality) {
470 +- rc = chip->ops->request_locality(chip, 0);
471 ++ if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
472 ++ rc = tpm_request_locality(chip);
473 + if (rc < 0)
474 + goto out_no_locality;
475 +- chip->locality = rc;
476 + }
477 +
478 ++ if (chip->dev.parent)
479 ++ pm_runtime_get_sync(chip->dev.parent);
480 ++
481 + rc = tpm2_prepare_space(chip, space, ordinal, buf);
482 + if (rc)
483 + goto out;
484 +@@ -499,27 +518,83 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
485 + rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
486 +
487 + out:
488 +- if (need_locality && chip->ops->relinquish_locality) {
489 +- chip->ops->relinquish_locality(chip, chip->locality);
490 +- chip->locality = -1;
491 +- }
492 ++ if (chip->dev.parent)
493 ++ pm_runtime_put_sync(chip->dev.parent);
494 ++
495 ++ if (need_locality)
496 ++ tpm_relinquish_locality(chip);
497 ++
498 + out_no_locality:
499 + if (chip->ops->clk_enable != NULL)
500 + chip->ops->clk_enable(chip, false);
501 +
502 +- if (chip->dev.parent)
503 +- pm_runtime_put_sync(chip->dev.parent);
504 +-
505 + if (!(flags & TPM_TRANSMIT_UNLOCKED))
506 + mutex_unlock(&chip->tpm_mutex);
507 + return rc ? rc : len;
508 + }
509 +
510 + /**
511 +- * tmp_transmit_cmd - send a tpm command to the device
512 ++ * tpm_transmit - Internal kernel interface to transmit TPM commands.
513 ++ *
514 ++ * @chip: TPM chip to use
515 ++ * @space: tpm space
516 ++ * @buf: TPM command buffer
517 ++ * @bufsiz: length of the TPM command buffer
518 ++ * @flags: tpm transmit flags - bitmap
519 ++ *
520 ++ * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY
521 ++ * returns from the TPM and retransmits the command after a delay up
522 ++ * to a maximum wait of TPM2_DURATION_LONG.
523 ++ *
524 ++ * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2
525 ++ * only
526 ++ *
527 ++ * Return:
528 ++ * the length of the return when the operation is successful.
529 ++ * A negative number for system errors (errno).
530 ++ */
531 ++ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
532 ++ u8 *buf, size_t bufsiz, unsigned int flags)
533 ++{
534 ++ struct tpm_output_header *header = (struct tpm_output_header *)buf;
535 ++ /* space for header and handles */
536 ++ u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
537 ++ unsigned int delay_msec = TPM2_DURATION_SHORT;
538 ++ u32 rc = 0;
539 ++ ssize_t ret;
540 ++ const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE,
541 ++ bufsiz);
542 ++
543 ++ /*
544 ++ * Subtlety here: if we have a space, the handles will be
545 ++ * transformed, so when we restore the header we also have to
546 ++ * restore the handles.
547 ++ */
548 ++ memcpy(save, buf, save_size);
549 ++
550 ++ for (;;) {
551 ++ ret = tpm_try_transmit(chip, space, buf, bufsiz, flags);
552 ++ if (ret < 0)
553 ++ break;
554 ++ rc = be32_to_cpu(header->return_code);
555 ++ if (rc != TPM2_RC_RETRY)
556 ++ break;
557 ++ delay_msec *= 2;
558 ++ if (delay_msec > TPM2_DURATION_LONG) {
559 ++ dev_err(&chip->dev, "TPM is in retry loop\n");
560 ++ break;
561 ++ }
562 ++ tpm_msleep(delay_msec);
563 ++ memcpy(buf, save, save_size);
564 ++ }
565 ++ return ret;
566 ++}
567 ++/**
568 ++ * tpm_transmit_cmd - send a tpm command to the device
569 + * The function extracts tpm out header return code
570 + *
571 + * @chip: TPM chip to use
572 ++ * @space: tpm space
573 + * @buf: TPM command buffer
574 + * @bufsiz: length of the buffer
575 + * @min_rsp_body_length: minimum expected length of response body
576 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
577 +index f895fba4e20d..d73f3fb81b42 100644
578 +--- a/drivers/char/tpm/tpm.h
579 ++++ b/drivers/char/tpm/tpm.h
580 +@@ -108,6 +108,7 @@ enum tpm2_return_codes {
581 + TPM2_RC_COMMAND_CODE = 0x0143,
582 + TPM2_RC_TESTING = 0x090A, /* RC_WARN */
583 + TPM2_RC_REFERENCE_H0 = 0x0910,
584 ++ TPM2_RC_RETRY = 0x0922,
585 + };
586 +
587 + enum tpm2_algorithms {
588 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
589 +index 7b3c2a8aa9de..497edd9848cd 100644
590 +--- a/drivers/char/tpm/tpm_crb.c
591 ++++ b/drivers/char/tpm/tpm_crb.c
592 +@@ -112,6 +112,25 @@ struct tpm2_crb_smc {
593 + u32 smc_func_id;
594 + };
595 +
596 ++static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
597 ++ unsigned long timeout)
598 ++{
599 ++ ktime_t start;
600 ++ ktime_t stop;
601 ++
602 ++ start = ktime_get();
603 ++ stop = ktime_add(start, ms_to_ktime(timeout));
604 ++
605 ++ do {
606 ++ if ((ioread32(reg) & mask) == value)
607 ++ return true;
608 ++
609 ++ usleep_range(50, 100);
610 ++ } while (ktime_before(ktime_get(), stop));
611 ++
612 ++ return ((ioread32(reg) & mask) == value);
613 ++}
614 ++
615 + /**
616 + * crb_go_idle - request tpm crb device to go the idle state
617 + *
618 +@@ -128,7 +147,7 @@ struct tpm2_crb_smc {
619 + *
620 + * Return: 0 always
621 + */
622 +-static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
623 ++static int crb_go_idle(struct device *dev, struct crb_priv *priv)
624 + {
625 + if ((priv->sm == ACPI_TPM2_START_METHOD) ||
626 + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
627 +@@ -136,30 +155,17 @@ static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
628 + return 0;
629 +
630 + iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
631 +- /* we don't really care when this settles */
632 +
633 ++ if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
634 ++ CRB_CTRL_REQ_GO_IDLE/* mask */,
635 ++ 0, /* value */
636 ++ TPM2_TIMEOUT_C)) {
637 ++ dev_warn(dev, "goIdle timed out\n");
638 ++ return -ETIME;
639 ++ }
640 + return 0;
641 + }
642 +
643 +-static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
644 +- unsigned long timeout)
645 +-{
646 +- ktime_t start;
647 +- ktime_t stop;
648 +-
649 +- start = ktime_get();
650 +- stop = ktime_add(start, ms_to_ktime(timeout));
651 +-
652 +- do {
653 +- if ((ioread32(reg) & mask) == value)
654 +- return true;
655 +-
656 +- usleep_range(50, 100);
657 +- } while (ktime_before(ktime_get(), stop));
658 +-
659 +- return false;
660 +-}
661 +-
662 + /**
663 + * crb_cmd_ready - request tpm crb device to enter ready state
664 + *
665 +@@ -175,8 +181,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
666 + *
667 + * Return: 0 on success -ETIME on timeout;
668 + */
669 +-static int __maybe_unused crb_cmd_ready(struct device *dev,
670 +- struct crb_priv *priv)
671 ++static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
672 + {
673 + if ((priv->sm == ACPI_TPM2_START_METHOD) ||
674 + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
675 +@@ -195,11 +200,11 @@ static int __maybe_unused crb_cmd_ready(struct device *dev,
676 + return 0;
677 + }
678 +
679 +-static int crb_request_locality(struct tpm_chip *chip, int loc)
680 ++static int __crb_request_locality(struct device *dev,
681 ++ struct crb_priv *priv, int loc)
682 + {
683 +- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
684 + u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
685 +- CRB_LOC_STATE_TPM_REG_VALID_STS;
686 ++ CRB_LOC_STATE_TPM_REG_VALID_STS;
687 +
688 + if (!priv->regs_h)
689 + return 0;
690 +@@ -207,21 +212,45 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
691 + iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
692 + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
693 + TPM2_TIMEOUT_C)) {
694 +- dev_warn(&chip->dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
695 ++ dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
696 + return -ETIME;
697 + }
698 +
699 + return 0;
700 + }
701 +
702 +-static void crb_relinquish_locality(struct tpm_chip *chip, int loc)
703 ++static int crb_request_locality(struct tpm_chip *chip, int loc)
704 + {
705 + struct crb_priv *priv = dev_get_drvdata(&chip->dev);
706 +
707 ++ return __crb_request_locality(&chip->dev, priv, loc);
708 ++}
709 ++
710 ++static int __crb_relinquish_locality(struct device *dev,
711 ++ struct crb_priv *priv, int loc)
712 ++{
713 ++ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
714 ++ CRB_LOC_STATE_TPM_REG_VALID_STS;
715 ++ u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
716 ++
717 + if (!priv->regs_h)
718 +- return;
719 ++ return 0;
720 +
721 + iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
722 ++ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
723 ++ TPM2_TIMEOUT_C)) {
724 ++ dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
725 ++ return -ETIME;
726 ++ }
727 ++
728 ++ return 0;
729 ++}
730 ++
731 ++static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
732 ++{
733 ++ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
734 ++
735 ++ return __crb_relinquish_locality(&chip->dev, priv, loc);
736 + }
737 +
738 + static u8 crb_status(struct tpm_chip *chip)
739 +@@ -475,6 +504,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
740 + dev_warn(dev, FW_BUG "Bad ACPI memory layout");
741 + }
742 +
743 ++ ret = __crb_request_locality(dev, priv, 0);
744 ++ if (ret)
745 ++ return ret;
746 ++
747 + priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
748 + sizeof(struct crb_regs_tail));
749 + if (IS_ERR(priv->regs_t))
750 +@@ -531,6 +564,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
751 +
752 + crb_go_idle(dev, priv);
753 +
754 ++ __crb_relinquish_locality(dev, priv, 0);
755 ++
756 + return ret;
757 + }
758 +
759 +@@ -588,10 +623,14 @@ static int crb_acpi_add(struct acpi_device *device)
760 + chip->acpi_dev_handle = device->handle;
761 + chip->flags = TPM_CHIP_FLAG_TPM2;
762 +
763 +- rc = crb_cmd_ready(dev, priv);
764 ++ rc = __crb_request_locality(dev, priv, 0);
765 + if (rc)
766 + return rc;
767 +
768 ++ rc = crb_cmd_ready(dev, priv);
769 ++ if (rc)
770 ++ goto out;
771 ++
772 + pm_runtime_get_noresume(dev);
773 + pm_runtime_set_active(dev);
774 + pm_runtime_enable(dev);
775 +@@ -601,12 +640,15 @@ static int crb_acpi_add(struct acpi_device *device)
776 + crb_go_idle(dev, priv);
777 + pm_runtime_put_noidle(dev);
778 + pm_runtime_disable(dev);
779 +- return rc;
780 ++ goto out;
781 + }
782 +
783 +- pm_runtime_put(dev);
784 ++ pm_runtime_put_sync(dev);
785 +
786 +- return 0;
787 ++out:
788 ++ __crb_relinquish_locality(dev, priv, 0);
789 ++
790 ++ return rc;
791 + }
792 +
793 + static int crb_acpi_remove(struct acpi_device *device)
794 +diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
795 +index da074e3db19b..5a1f47b43947 100644
796 +--- a/drivers/char/tpm/tpm_tis_core.c
797 ++++ b/drivers/char/tpm/tpm_tis_core.c
798 +@@ -143,11 +143,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
799 + return false;
800 + }
801 +
802 +-static void release_locality(struct tpm_chip *chip, int l)
803 ++static int release_locality(struct tpm_chip *chip, int l)
804 + {
805 + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
806 +
807 + tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
808 ++
809 ++ return 0;
810 + }
811 +
812 + static int request_locality(struct tpm_chip *chip, int l)
813 +diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
814 +index a38db40ce990..b2447ee3b245 100644
815 +--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
816 ++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
817 +@@ -1637,6 +1637,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
818 + * (and possibly on the platform). So far only i.MX6Q (v1.30a) and
819 + * i.MX6DL (v1.31a) have been identified as needing the workaround, with
820 + * 4 and 1 iterations respectively.
821 ++ * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
822 ++ * the workaround with a single iteration.
823 + */
824 +
825 + switch (hdmi->version) {
826 +@@ -1644,6 +1646,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
827 + count = 4;
828 + break;
829 + case 0x131a:
830 ++ case 0x201a:
831 + count = 1;
832 + break;
833 + default:
834 +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
835 +index 051a72eecb24..d2cc55e21374 100644
836 +--- a/drivers/hwmon/k10temp.c
837 ++++ b/drivers/hwmon/k10temp.c
838 +@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
839 + #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
840 + #endif
841 +
842 ++#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
843 ++#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
844 ++#endif
845 ++
846 + /* CPUID function 0x80000001, ebx */
847 + #define CPUID_PKGTYPE_MASK 0xf0000000
848 + #define CPUID_PKGTYPE_F 0x00000000
849 +@@ -72,6 +76,7 @@ struct k10temp_data {
850 + struct pci_dev *pdev;
851 + void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
852 + int temp_offset;
853 ++ u32 temp_adjust_mask;
854 + };
855 +
856 + struct tctl_offset {
857 +@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
858 + { 0x17, "AMD Ryzen 5 1600X", 20000 },
859 + { 0x17, "AMD Ryzen 7 1700X", 20000 },
860 + { 0x17, "AMD Ryzen 7 1800X", 20000 },
861 ++ { 0x17, "AMD Ryzen 7 2700X", 10000 },
862 + { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
863 + { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
864 + { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
865 +@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
866 +
867 + data->read_tempreg(data->pdev, &regval);
868 + temp = (regval >> 21) * 125;
869 ++ if (regval & data->temp_adjust_mask)
870 ++ temp -= 49000;
871 + if (temp > data->temp_offset)
872 + temp -= data->temp_offset;
873 + else
874 +@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
875 + data->pdev = pdev;
876 +
877 + if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
878 +- boot_cpu_data.x86_model == 0x70))
879 ++ boot_cpu_data.x86_model == 0x70)) {
880 + data->read_tempreg = read_tempreg_nb_f15;
881 +- else if (boot_cpu_data.x86 == 0x17)
882 ++ } else if (boot_cpu_data.x86 == 0x17) {
883 ++ data->temp_adjust_mask = 0x80000;
884 + data->read_tempreg = read_tempreg_nb_f17;
885 +- else
886 ++ } else {
887 + data->read_tempreg = read_tempreg_pci;
888 ++ }
889 +
890 + for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
891 + const struct tctl_offset *entry = &tctl_offset_table[i];
892 +@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
893 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
894 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
895 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
896 ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
897 + {}
898 + };
899 + MODULE_DEVICE_TABLE(pci, k10temp_id_table);
900 +diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
901 +index 439ee9c5f535..c59b5da85321 100644
902 +--- a/drivers/message/fusion/mptsas.c
903 ++++ b/drivers/message/fusion/mptsas.c
904 +@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
905 + .cmd_per_lun = 7,
906 + .use_clustering = ENABLE_CLUSTERING,
907 + .shost_attrs = mptscsih_host_attrs,
908 ++ .no_write_same = 1,
909 + };
910 +
911 + static int mptsas_get_linkerrors(struct sas_phy *phy)
912 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
913 +index b7b113018853..718e4914e3a0 100644
914 +--- a/drivers/net/bonding/bond_main.c
915 ++++ b/drivers/net/bonding/bond_main.c
916 +@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
917 + } /* switch(bond_mode) */
918 +
919 + #ifdef CONFIG_NET_POLL_CONTROLLER
920 +- slave_dev->npinfo = bond->dev->npinfo;
921 +- if (slave_dev->npinfo) {
922 ++ if (bond->dev->npinfo) {
923 + if (slave_enable_netpoll(new_slave)) {
924 + netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
925 + res = -EBUSY;
926 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
927 +index 7ea72ef11a55..d272dc6984ac 100644
928 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
929 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
930 +@@ -1321,6 +1321,10 @@
931 + #define MDIO_VEND2_AN_STAT 0x8002
932 + #endif
933 +
934 ++#ifndef MDIO_VEND2_PMA_CDR_CONTROL
935 ++#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
936 ++#endif
937 ++
938 + #ifndef MDIO_CTRL1_SPEED1G
939 + #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
940 + #endif
941 +@@ -1369,6 +1373,10 @@
942 + #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
943 + #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
944 +
945 ++#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
946 ++#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
947 ++#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
948 ++
949 + /* Bit setting and getting macros
950 + * The get macro will extract the current bit field value from within
951 + * the variable
952 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
953 +index 7d128be61310..b91143947ed2 100644
954 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
955 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
956 +@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
957 + "debugfs_create_file failed\n");
958 + }
959 +
960 ++ if (pdata->vdata->an_cdr_workaround) {
961 ++ pfile = debugfs_create_bool("an_cdr_workaround", 0600,
962 ++ pdata->xgbe_debugfs,
963 ++ &pdata->debugfs_an_cdr_workaround);
964 ++ if (!pfile)
965 ++ netdev_err(pdata->netdev,
966 ++ "debugfs_create_bool failed\n");
967 ++
968 ++ pfile = debugfs_create_bool("an_cdr_track_early", 0600,
969 ++ pdata->xgbe_debugfs,
970 ++ &pdata->debugfs_an_cdr_track_early);
971 ++ if (!pfile)
972 ++ netdev_err(pdata->netdev,
973 ++ "debugfs_create_bool failed\n");
974 ++ }
975 ++
976 + kfree(buf);
977 + }
978 +
979 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
980 +index d91fa595be98..e31d9d1fb6a6 100644
981 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
982 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
983 +@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
984 + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
985 +
986 + /* Call MDIO/PHY initialization routine */
987 ++ pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
988 + ret = pdata->phy_if.phy_init(pdata);
989 + if (ret)
990 + return ret;
991 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
992 +index 072b9f664597..1b45cd73a258 100644
993 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
994 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
995 +@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
996 + xgbe_an73_set(pdata, false, false);
997 + xgbe_an73_disable_interrupts(pdata);
998 +
999 ++ pdata->an_start = 0;
1000 ++
1001 + netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
1002 + }
1003 +
1004 + static void xgbe_an_restart(struct xgbe_prv_data *pdata)
1005 + {
1006 ++ if (pdata->phy_if.phy_impl.an_pre)
1007 ++ pdata->phy_if.phy_impl.an_pre(pdata);
1008 ++
1009 + switch (pdata->an_mode) {
1010 + case XGBE_AN_MODE_CL73:
1011 + case XGBE_AN_MODE_CL73_REDRV:
1012 +@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
1013 +
1014 + static void xgbe_an_disable(struct xgbe_prv_data *pdata)
1015 + {
1016 ++ if (pdata->phy_if.phy_impl.an_post)
1017 ++ pdata->phy_if.phy_impl.an_post(pdata);
1018 ++
1019 + switch (pdata->an_mode) {
1020 + case XGBE_AN_MODE_CL73:
1021 + case XGBE_AN_MODE_CL73_REDRV:
1022 +@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
1023 + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
1024 + reg);
1025 +
1026 +- if (pdata->phy_if.phy_impl.kr_training_post)
1027 +- pdata->phy_if.phy_impl.kr_training_post(pdata);
1028 +-
1029 + netif_dbg(pdata, link, pdata->netdev,
1030 + "KR training initiated\n");
1031 ++
1032 ++ if (pdata->phy_if.phy_impl.kr_training_post)
1033 ++ pdata->phy_if.phy_impl.kr_training_post(pdata);
1034 + }
1035 +
1036 + return XGBE_AN_PAGE_RECEIVED;
1037 +@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
1038 + return XGBE_AN_NO_LINK;
1039 + }
1040 +
1041 +- xgbe_an73_disable(pdata);
1042 ++ xgbe_an_disable(pdata);
1043 +
1044 + xgbe_switch_mode(pdata);
1045 +
1046 +- xgbe_an73_restart(pdata);
1047 ++ xgbe_an_restart(pdata);
1048 +
1049 + return XGBE_AN_INCOMPAT_LINK;
1050 + }
1051 +@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
1052 + pdata->an_result = pdata->an_state;
1053 + pdata->an_state = XGBE_AN_READY;
1054 +
1055 ++ if (pdata->phy_if.phy_impl.an_post)
1056 ++ pdata->phy_if.phy_impl.an_post(pdata);
1057 ++
1058 + netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
1059 + xgbe_state_as_string(pdata->an_result));
1060 + }
1061 +@@ -903,6 +914,9 @@ static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
1062 + pdata->kx_state = XGBE_RX_BPA;
1063 + pdata->an_start = 0;
1064 +
1065 ++ if (pdata->phy_if.phy_impl.an_post)
1066 ++ pdata->phy_if.phy_impl.an_post(pdata);
1067 ++
1068 + netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
1069 + xgbe_state_as_string(pdata->an_result));
1070 + }
1071 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
1072 +index eb23f9ba1a9a..82d1f416ee2a 100644
1073 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
1074 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
1075 +@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
1076 + .irq_reissue_support = 1,
1077 + .tx_desc_prefetch = 5,
1078 + .rx_desc_prefetch = 5,
1079 ++ .an_cdr_workaround = 1,
1080 + };
1081 +
1082 + static const struct xgbe_version_data xgbe_v2b = {
1083 +@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
1084 + .irq_reissue_support = 1,
1085 + .tx_desc_prefetch = 5,
1086 + .rx_desc_prefetch = 5,
1087 ++ .an_cdr_workaround = 1,
1088 + };
1089 +
1090 + static const struct pci_device_id xgbe_pci_table[] = {
1091 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
1092 +index 3304a291aa96..aac884314000 100644
1093 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
1094 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
1095 +@@ -147,6 +147,14 @@
1096 + /* Rate-change complete wait/retry count */
1097 + #define XGBE_RATECHANGE_COUNT 500
1098 +
1099 ++/* CDR delay values for KR support (in usec) */
1100 ++#define XGBE_CDR_DELAY_INIT 10000
1101 ++#define XGBE_CDR_DELAY_INC 10000
1102 ++#define XGBE_CDR_DELAY_MAX 100000
1103 ++
1104 ++/* RRC frequency during link status check */
1105 ++#define XGBE_RRC_FREQUENCY 10
1106 ++
1107 + enum xgbe_port_mode {
1108 + XGBE_PORT_MODE_RSVD = 0,
1109 + XGBE_PORT_MODE_BACKPLANE,
1110 +@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
1111 + #define XGBE_SFP_BASE_VENDOR_SN 4
1112 + #define XGBE_SFP_BASE_VENDOR_SN_LEN 16
1113 +
1114 ++#define XGBE_SFP_EXTD_OPT1 1
1115 ++#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
1116 ++#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
1117 ++
1118 + #define XGBE_SFP_EXTD_DIAG 28
1119 + #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
1120 +
1121 +@@ -324,6 +336,7 @@ struct xgbe_phy_data {
1122 +
1123 + unsigned int sfp_gpio_address;
1124 + unsigned int sfp_gpio_mask;
1125 ++ unsigned int sfp_gpio_inputs;
1126 + unsigned int sfp_gpio_rx_los;
1127 + unsigned int sfp_gpio_tx_fault;
1128 + unsigned int sfp_gpio_mod_absent;
1129 +@@ -355,6 +368,10 @@ struct xgbe_phy_data {
1130 + unsigned int redrv_addr;
1131 + unsigned int redrv_lane;
1132 + unsigned int redrv_model;
1133 ++
1134 ++ /* KR AN support */
1135 ++ unsigned int phy_cdr_notrack;
1136 ++ unsigned int phy_cdr_delay;
1137 + };
1138 +
1139 + /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
1140 +@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
1141 + phy_data->sfp_phy_avail = 1;
1142 + }
1143 +
1144 ++static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
1145 ++{
1146 ++ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
1147 ++
1148 ++ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
1149 ++ return false;
1150 ++
1151 ++ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
1152 ++ return false;
1153 ++
1154 ++ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
1155 ++ return true;
1156 ++
1157 ++ return false;
1158 ++}
1159 ++
1160 ++static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
1161 ++{
1162 ++ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
1163 ++
1164 ++ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
1165 ++ return false;
1166 ++
1167 ++ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
1168 ++ return false;
1169 ++
1170 ++ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
1171 ++ return true;
1172 ++
1173 ++ return false;
1174 ++}
1175 ++
1176 ++static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
1177 ++{
1178 ++ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
1179 ++ return false;
1180 ++
1181 ++ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
1182 ++ return true;
1183 ++
1184 ++ return false;
1185 ++}
1186 ++
1187 + static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
1188 + {
1189 + struct xgbe_phy_data *phy_data = pdata->phy_data;
1190 +@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
1191 + if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
1192 + return;
1193 +
1194 ++ /* Update transceiver signals (eeprom extd/options) */
1195 ++ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
1196 ++ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
1197 ++
1198 + if (xgbe_phy_sfp_parse_quirks(pdata))
1199 + return;
1200 +
1201 +@@ -1184,7 +1248,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
1202 + static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1203 + {
1204 + struct xgbe_phy_data *phy_data = pdata->phy_data;
1205 +- unsigned int gpio_input;
1206 + u8 gpio_reg, gpio_ports[2];
1207 + int ret;
1208 +
1209 +@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1210 + return;
1211 + }
1212 +
1213 +- gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
1214 +-
1215 +- if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
1216 +- /* No GPIO, just assume the module is present for now */
1217 +- phy_data->sfp_mod_absent = 0;
1218 +- } else {
1219 +- if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
1220 +- phy_data->sfp_mod_absent = 0;
1221 +- }
1222 +-
1223 +- if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
1224 +- (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
1225 +- phy_data->sfp_rx_los = 1;
1226 ++ phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
1227 +
1228 +- if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
1229 +- (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
1230 +- phy_data->sfp_tx_fault = 1;
1231 ++ phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
1232 + }
1233 +
1234 + static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
1235 +@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
1236 + return 1;
1237 +
1238 + /* No link, attempt a receiver reset cycle */
1239 +- if (phy_data->rrc_count++) {
1240 ++ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
1241 + phy_data->rrc_count = 0;
1242 + xgbe_phy_rrc(pdata);
1243 + }
1244 +@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
1245 + return true;
1246 + }
1247 +
1248 ++static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
1249 ++{
1250 ++ struct xgbe_phy_data *phy_data = pdata->phy_data;
1251 ++
1252 ++ if (!pdata->debugfs_an_cdr_workaround)
1253 ++ return;
1254 ++
1255 ++ if (!phy_data->phy_cdr_notrack)
1256 ++ return;
1257 ++
1258 ++ usleep_range(phy_data->phy_cdr_delay,
1259 ++ phy_data->phy_cdr_delay + 500);
1260 ++
1261 ++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
1262 ++ XGBE_PMA_CDR_TRACK_EN_MASK,
1263 ++ XGBE_PMA_CDR_TRACK_EN_ON);
1264 ++
1265 ++ phy_data->phy_cdr_notrack = 0;
1266 ++}
1267 ++
1268 ++static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
1269 ++{
1270 ++ struct xgbe_phy_data *phy_data = pdata->phy_data;
1271 ++
1272 ++ if (!pdata->debugfs_an_cdr_workaround)
1273 ++ return;
1274 ++
1275 ++ if (phy_data->phy_cdr_notrack)
1276 ++ return;
1277 ++
1278 ++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
1279 ++ XGBE_PMA_CDR_TRACK_EN_MASK,
1280 ++ XGBE_PMA_CDR_TRACK_EN_OFF);
1281 ++
1282 ++ xgbe_phy_rrc(pdata);
1283 ++
1284 ++ phy_data->phy_cdr_notrack = 1;
1285 ++}
1286 ++
1287 ++static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
1288 ++{
1289 ++ if (!pdata->debugfs_an_cdr_track_early)
1290 ++ xgbe_phy_cdr_track(pdata);
1291 ++}
1292 ++
1293 ++static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
1294 ++{
1295 ++ if (pdata->debugfs_an_cdr_track_early)
1296 ++ xgbe_phy_cdr_track(pdata);
1297 ++}
1298 ++
1299 ++static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
1300 ++{
1301 ++ struct xgbe_phy_data *phy_data = pdata->phy_data;
1302 ++
1303 ++ switch (pdata->an_mode) {
1304 ++ case XGBE_AN_MODE_CL73:
1305 ++ case XGBE_AN_MODE_CL73_REDRV:
1306 ++ if (phy_data->cur_mode != XGBE_MODE_KR)
1307 ++ break;
1308 ++
1309 ++ xgbe_phy_cdr_track(pdata);
1310 ++
1311 ++ switch (pdata->an_result) {
1312 ++ case XGBE_AN_READY:
1313 ++ case XGBE_AN_COMPLETE:
1314 ++ break;
1315 ++ default:
1316 ++ if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
1317 ++ phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
1318 ++ else
1319 ++ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
1320 ++ break;
1321 ++ }
1322 ++ break;
1323 ++ default:
1324 ++ break;
1325 ++ }
1326 ++}
1327 ++
1328 ++static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
1329 ++{
1330 ++ struct xgbe_phy_data *phy_data = pdata->phy_data;
1331 ++
1332 ++ switch (pdata->an_mode) {
1333 ++ case XGBE_AN_MODE_CL73:
1334 ++ case XGBE_AN_MODE_CL73_REDRV:
1335 ++ if (phy_data->cur_mode != XGBE_MODE_KR)
1336 ++ break;
1337 ++
1338 ++ xgbe_phy_cdr_notrack(pdata);
1339 ++ break;
1340 ++ default:
1341 ++ break;
1342 ++ }
1343 ++}
1344 ++
1345 + static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
1346 + {
1347 + struct xgbe_phy_data *phy_data = pdata->phy_data;
1348 +@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
1349 + xgbe_phy_sfp_reset(phy_data);
1350 + xgbe_phy_sfp_mod_absent(pdata);
1351 +
1352 ++ /* Reset CDR support */
1353 ++ xgbe_phy_cdr_track(pdata);
1354 ++
1355 + /* Power off the PHY */
1356 + xgbe_phy_power_off(pdata);
1357 +
1358 +@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
1359 + /* Start in highest supported mode */
1360 + xgbe_phy_set_mode(pdata, phy_data->start_mode);
1361 +
1362 ++ /* Reset CDR support */
1363 ++ xgbe_phy_cdr_track(pdata);
1364 ++
1365 + /* After starting the I2C controller, we can check for an SFP */
1366 + switch (phy_data->port_mode) {
1367 + case XGBE_PORT_MODE_SFP:
1368 +@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
1369 + }
1370 + }
1371 +
1372 ++ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
1373 ++
1374 + /* Register for driving external PHYs */
1375 + mii = devm_mdiobus_alloc(pdata->dev);
1376 + if (!mii) {
1377 +@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
1378 + phy_impl->an_advertising = xgbe_phy_an_advertising;
1379 +
1380 + phy_impl->an_outcome = xgbe_phy_an_outcome;
1381 ++
1382 ++ phy_impl->an_pre = xgbe_phy_an_pre;
1383 ++ phy_impl->an_post = xgbe_phy_an_post;
1384 ++
1385 ++ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
1386 ++ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
1387 + }
1388 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
1389 +index ad102c8bac7b..95d4b56448c6 100644
1390 +--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
1391 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
1392 +@@ -833,6 +833,7 @@ struct xgbe_hw_if {
1393 + /* This structure represents implementation specific routines for an
1394 + * implementation of a PHY. All routines are required unless noted below.
1395 + * Optional routines:
1396 ++ * an_pre, an_post
1397 + * kr_training_pre, kr_training_post
1398 + */
1399 + struct xgbe_phy_impl_if {
1400 +@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
1401 + /* Process results of auto-negotiation */
1402 + enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
1403 +
1404 ++ /* Pre/Post auto-negotiation support */
1405 ++ void (*an_pre)(struct xgbe_prv_data *);
1406 ++ void (*an_post)(struct xgbe_prv_data *);
1407 ++
1408 + /* Pre/Post KR training enablement support */
1409 + void (*kr_training_pre)(struct xgbe_prv_data *);
1410 + void (*kr_training_post)(struct xgbe_prv_data *);
1411 +@@ -989,6 +994,7 @@ struct xgbe_version_data {
1412 + unsigned int irq_reissue_support;
1413 + unsigned int tx_desc_prefetch;
1414 + unsigned int rx_desc_prefetch;
1415 ++ unsigned int an_cdr_workaround;
1416 + };
1417 +
1418 + struct xgbe_vxlan_data {
1419 +@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
1420 + unsigned int debugfs_xprop_reg;
1421 +
1422 + unsigned int debugfs_xi2c_reg;
1423 ++
1424 ++ bool debugfs_an_cdr_workaround;
1425 ++ bool debugfs_an_cdr_track_early;
1426 + };
1427 +
1428 + /* Function prototypes*/
1429 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1430 +index c96a92118b8b..32f6d2e24d66 100644
1431 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1432 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1433 +@@ -951,9 +951,11 @@ void aq_nic_shutdown(struct aq_nic_s *self)
1434 +
1435 + netif_device_detach(self->ndev);
1436 +
1437 +- err = aq_nic_stop(self);
1438 +- if (err < 0)
1439 +- goto err_exit;
1440 ++ if (netif_running(self->ndev)) {
1441 ++ err = aq_nic_stop(self);
1442 ++ if (err < 0)
1443 ++ goto err_exit;
1444 ++ }
1445 + aq_nic_deinit(self);
1446 +
1447 + err_exit:
1448 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
1449 +index d3b847ec7465..c58b2c227260 100644
1450 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
1451 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
1452 +@@ -48,6 +48,8 @@
1453 + #define FORCE_FLASHLESS 0
1454 +
1455 + static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
1456 ++static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
1457 ++ enum hal_atl_utils_fw_state_e state);
1458 +
1459 + int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
1460 + {
1461 +@@ -247,6 +249,20 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
1462 +
1463 + self->rbl_enabled = (boot_exit_code != 0);
1464 +
1465 ++ /* FW 1.x may bootup in an invalid POWER state (WOL feature).
1466 ++ * We should work around this by forcing its state back to DEINIT
1467 ++ */
1468 ++ if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
1469 ++ aq_hw_read_reg(self,
1470 ++ HW_ATL_MPI_FW_VERSION))) {
1471 ++ int err = 0;
1472 ++
1473 ++ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
1474 ++ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
1475 ++ HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
1476 ++ 10, 1000U);
1477 ++ }
1478 ++
1479 + if (self->rbl_enabled)
1480 + return hw_atl_utils_soft_reset_rbl(self);
1481 + else
1482 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1483 +index 1801582076be..9442605f4fd4 100644
1484 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1485 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1486 +@@ -1874,22 +1874,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1487 + return retval;
1488 + }
1489 +
1490 +-static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
1491 ++static void bnxt_get_pkgver(struct net_device *dev)
1492 + {
1493 ++ struct bnxt *bp = netdev_priv(dev);
1494 + u16 index = 0;
1495 +- u32 datalen;
1496 ++ char *pkgver;
1497 ++ u32 pkglen;
1498 ++ u8 *pkgbuf;
1499 ++ int len;
1500 +
1501 + if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1502 + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1503 +- &index, NULL, &datalen) != 0)
1504 +- return NULL;
1505 ++ &index, NULL, &pkglen) != 0)
1506 ++ return;
1507 +
1508 +- memset(buf, 0, buflen);
1509 +- if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
1510 +- return NULL;
1511 ++ pkgbuf = kzalloc(pkglen, GFP_KERNEL);
1512 ++ if (!pkgbuf) {
1513 ++ dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
1514 ++ pkglen);
1515 ++ return;
1516 ++ }
1517 ++
1518 ++ if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
1519 ++ goto err;
1520 +
1521 +- return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
1522 +- datalen);
1523 ++ pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
1524 ++ pkglen);
1525 ++ if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
1526 ++ len = strlen(bp->fw_ver_str);
1527 ++ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
1528 ++ "/pkg %s", pkgver);
1529 ++ }
1530 ++err:
1531 ++ kfree(pkgbuf);
1532 + }
1533 +
1534 + static int bnxt_get_eeprom(struct net_device *dev,
1535 +@@ -2558,22 +2575,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
1536 + struct hwrm_selftest_qlist_input req = {0};
1537 + struct bnxt_test_info *test_info;
1538 + struct net_device *dev = bp->dev;
1539 +- char *pkglog;
1540 + int i, rc;
1541 +
1542 +- pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
1543 +- if (pkglog) {
1544 +- char *pkgver;
1545 +- int len;
1546 ++ bnxt_get_pkgver(dev);
1547 +
1548 +- pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
1549 +- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
1550 +- len = strlen(bp->fw_ver_str);
1551 +- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
1552 +- "/pkg %s", pkgver);
1553 +- }
1554 +- kfree(pkglog);
1555 +- }
1556 + if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
1557 + return;
1558 +
1559 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
1560 +index 73f2249555b5..83444811d3c6 100644
1561 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
1562 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
1563 +@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
1564 + #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
1565 + #define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
1566 +
1567 +-#define BNX_PKG_LOG_MAX_LENGTH 4096
1568 +-
1569 + enum bnxnvm_pkglog_field_index {
1570 + BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
1571 + BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
1572 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1573 +index e9309fb9084b..21a21934e5bf 100644
1574 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1575 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1576 +@@ -2889,6 +2889,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1577 + int ret = 0;
1578 + struct hlist_node *h;
1579 + int bkt;
1580 ++ u8 i;
1581 +
1582 + /* validate the request */
1583 + if (vf_id >= pf->num_alloc_vfs) {
1584 +@@ -2900,6 +2901,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1585 +
1586 + vf = &(pf->vf[vf_id]);
1587 + vsi = pf->vsi[vf->lan_vsi_idx];
1588 ++
1589 ++ /* When the VF is resetting wait until it is done.
1590 ++ * It can take up to 200 milliseconds,
1591 ++ * but wait for up to 300 milliseconds to be safe.
1592 ++ */
1593 ++ for (i = 0; i < 15; i++) {
1594 ++ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
1595 ++ break;
1596 ++ msleep(20);
1597 ++ }
1598 + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1599 + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
1600 + vf_id);
1601 +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
1602 +index 5a1668cdb461..7f1083ce23da 100644
1603 +--- a/drivers/net/ethernet/marvell/mvpp2.c
1604 ++++ b/drivers/net/ethernet/marvell/mvpp2.c
1605 +@@ -838,6 +838,8 @@ enum mvpp2_bm_type {
1606 +
1607 + #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
1608 +
1609 ++#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
1610 ++
1611 + /* Definitions */
1612 +
1613 + /* Shared Packet Processor resources */
1614 +@@ -1336,7 +1338,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1615 + if (port->priv->hw_version == MVPP21)
1616 + return tx_desc->pp21.buf_dma_addr;
1617 + else
1618 +- return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1619 ++ return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
1620 + }
1621 +
1622 + static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1623 +@@ -1354,7 +1356,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1624 + } else {
1625 + u64 val = (u64)addr;
1626 +
1627 +- tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1628 ++ tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
1629 + tx_desc->pp22.buf_dma_addr_ptp |= val;
1630 + tx_desc->pp22.packet_offset = offset;
1631 + }
1632 +@@ -1414,7 +1416,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1633 + if (port->priv->hw_version == MVPP21)
1634 + return rx_desc->pp21.buf_dma_addr;
1635 + else
1636 +- return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1637 ++ return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
1638 + }
1639 +
1640 + static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1641 +@@ -1423,7 +1425,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1642 + if (port->priv->hw_version == MVPP21)
1643 + return rx_desc->pp21.buf_cookie;
1644 + else
1645 +- return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1646 ++ return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
1647 + }
1648 +
1649 + static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1650 +@@ -8347,7 +8349,7 @@ static int mvpp2_probe(struct platform_device *pdev)
1651 + }
1652 +
1653 + if (priv->hw_version == MVPP22) {
1654 +- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1655 ++ err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
1656 + if (err)
1657 + goto err_mg_clk;
1658 + /* Sadly, the BM pools all share the same register to
1659 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1660 +index 7761a26ec9c5..e7565416639b 100644
1661 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1662 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1663 +@@ -343,7 +343,7 @@ enum power_event {
1664 + #define MTL_RX_OVERFLOW_INT BIT(16)
1665 +
1666 + /* Default operating mode of the MAC */
1667 +-#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
1668 ++#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
1669 + GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
1670 +
1671 + /* To dump the core regs excluding the Address Registers */
1672 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1673 +index 63795ecafc8d..26dfb75e927a 100644
1674 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1675 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1676 +@@ -30,13 +30,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
1677 +
1678 + value |= GMAC_CORE_INIT;
1679 +
1680 +- /* Clear ACS bit because Ethernet switch tagging formats such as
1681 +- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
1682 +- * hardware to truncate packets on reception.
1683 +- */
1684 +- if (netdev_uses_dsa(dev))
1685 +- value &= ~GMAC_CONFIG_ACS;
1686 +-
1687 + if (mtu > 1500)
1688 + value |= GMAC_CONFIG_2K;
1689 + if (mtu > 2000)
1690 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1691 +index 7ad841434ec8..3ea343b45d93 100644
1692 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1693 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1694 +@@ -3435,8 +3435,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
1695 +
1696 + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1697 + * Type frames (LLC/LLC-SNAP)
1698 ++ *
1699 ++ * llc_snap is never checked in GMAC >= 4, so this ACS
1700 ++ * feature is always disabled and packets need to be
1701 ++ * stripped manually.
1702 + */
1703 +- if (unlikely(status != llc_snap))
1704 ++ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
1705 ++ unlikely(status != llc_snap))
1706 + frame_len -= ETH_FCS_LEN;
1707 +
1708 + if (netif_msg_rx_status(priv)) {
1709 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
1710 +index b2b30c9df037..33c35b2df7d5 100644
1711 +--- a/drivers/net/ethernet/ti/cpsw.c
1712 ++++ b/drivers/net/ethernet/ti/cpsw.c
1713 +@@ -125,7 +125,7 @@ do { \
1714 +
1715 + #define RX_PRIORITY_MAPPING 0x76543210
1716 + #define TX_PRIORITY_MAPPING 0x33221100
1717 +-#define CPDMA_TX_PRIORITY_MAP 0x01234567
1718 ++#define CPDMA_TX_PRIORITY_MAP 0x76543210
1719 +
1720 + #define CPSW_VLAN_AWARE BIT(1)
1721 + #define CPSW_ALE_VLAN_AWARE 1
1722 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
1723 +index 9cbb0c8a896a..7de88b33d5b9 100644
1724 +--- a/drivers/net/macsec.c
1725 ++++ b/drivers/net/macsec.c
1726 +@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
1727 +
1728 + err = netdev_upper_dev_link(real_dev, dev, extack);
1729 + if (err < 0)
1730 +- goto put_dev;
1731 ++ goto unregister;
1732 +
1733 + /* need to be already registered so that ->init has run and
1734 + * the MAC addr is set
1735 +@@ -3316,8 +3316,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
1736 + macsec_del_dev(macsec);
1737 + unlink:
1738 + netdev_upper_dev_unlink(real_dev, dev);
1739 +-put_dev:
1740 +- dev_put(real_dev);
1741 ++unregister:
1742 + unregister_netdevice(dev);
1743 + return err;
1744 + }
1745 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1746 +index 5aa59f41bf8c..71e2aef6b7a1 100644
1747 +--- a/drivers/net/ppp/pppoe.c
1748 ++++ b/drivers/net/ppp/pppoe.c
1749 +@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
1750 + lock_sock(sk);
1751 +
1752 + error = -EINVAL;
1753 ++
1754 ++ if (sockaddr_len != sizeof(struct sockaddr_pppox))
1755 ++ goto end;
1756 ++
1757 + if (sp->sa_protocol != PX_PROTO_OE)
1758 + goto end;
1759 +
1760 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1761 +index befed2d22bf4..3175f7410baf 100644
1762 +--- a/drivers/net/team/team.c
1763 ++++ b/drivers/net/team/team.c
1764 +@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
1765 + }
1766 + }
1767 +
1768 ++static bool __team_option_inst_tmp_find(const struct list_head *opts,
1769 ++ const struct team_option_inst *needle)
1770 ++{
1771 ++ struct team_option_inst *opt_inst;
1772 ++
1773 ++ list_for_each_entry(opt_inst, opts, tmp_list)
1774 ++ if (opt_inst == needle)
1775 ++ return true;
1776 ++ return false;
1777 ++}
1778 ++
1779 + static int __team_options_register(struct team *team,
1780 + const struct team_option *option,
1781 + size_t option_count)
1782 +@@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
1783 + }
1784 +
1785 + #ifdef CONFIG_NET_POLL_CONTROLLER
1786 +-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1787 ++static int __team_port_enable_netpoll(struct team_port *port)
1788 + {
1789 + struct netpoll *np;
1790 + int err;
1791 +
1792 +- if (!team->dev->npinfo)
1793 +- return 0;
1794 +-
1795 + np = kzalloc(sizeof(*np), GFP_KERNEL);
1796 + if (!np)
1797 + return -ENOMEM;
1798 +@@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1799 + return err;
1800 + }
1801 +
1802 ++static int team_port_enable_netpoll(struct team_port *port)
1803 ++{
1804 ++ if (!port->team->dev->npinfo)
1805 ++ return 0;
1806 ++
1807 ++ return __team_port_enable_netpoll(port);
1808 ++}
1809 ++
1810 + static void team_port_disable_netpoll(struct team_port *port)
1811 + {
1812 + struct netpoll *np = port->np;
1813 +@@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1814 + kfree(np);
1815 + }
1816 + #else
1817 +-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1818 ++static int team_port_enable_netpoll(struct team_port *port)
1819 + {
1820 + return 0;
1821 + }
1822 +@@ -1204,7 +1220,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1823 + goto err_vids_add;
1824 + }
1825 +
1826 +- err = team_port_enable_netpoll(team, port);
1827 ++ err = team_port_enable_netpoll(port);
1828 + if (err) {
1829 + netdev_err(dev, "Failed to enable netpoll on device %s\n",
1830 + portname);
1831 +@@ -1901,7 +1917,7 @@ static int team_netpoll_setup(struct net_device *dev,
1832 +
1833 + mutex_lock(&team->lock);
1834 + list_for_each_entry(port, &team->port_list, list) {
1835 +- err = team_port_enable_netpoll(team, port);
1836 ++ err = __team_port_enable_netpoll(port);
1837 + if (err) {
1838 + __team_netpoll_cleanup(team);
1839 + break;
1840 +@@ -2562,6 +2578,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1841 + if (err)
1842 + goto team_put;
1843 + opt_inst->changed = true;
1844 ++
1845 ++ /* dumb/evil user-space can send us duplicate opt,
1846 ++ * keep only the last one
1847 ++ */
1848 ++ if (__team_option_inst_tmp_find(&opt_inst_list,
1849 ++ opt_inst))
1850 ++ continue;
1851 ++
1852 + list_add(&opt_inst->tmp_list, &opt_inst_list);
1853 + }
1854 + if (!opt_found) {
1855 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1856 +index 28cfa642e39a..6c7bdd0c361a 100644
1857 +--- a/drivers/net/tun.c
1858 ++++ b/drivers/net/tun.c
1859 +@@ -1094,12 +1094,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1860 + goto drop;
1861 +
1862 + len = run_ebpf_filter(tun, skb, len);
1863 +-
1864 +- /* Trim extra bytes since we may insert vlan proto & TCI
1865 +- * in tun_put_user().
1866 +- */
1867 +- len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
1868 +- if (len <= 0 || pskb_trim(skb, len))
1869 ++ if (len == 0 || pskb_trim(skb, len))
1870 + goto drop;
1871 +
1872 + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1873 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1874 +index ca066b785e9f..c853e7410f5a 100644
1875 +--- a/drivers/net/usb/qmi_wwan.c
1876 ++++ b/drivers/net/usb/qmi_wwan.c
1877 +@@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = {
1878 + {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1879 + {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1880 + {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1881 ++ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
1882 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1883 + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
1884 + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
1885 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1886 +index 23374603e4d9..aa21b2225679 100644
1887 +--- a/drivers/net/virtio_net.c
1888 ++++ b/drivers/net/virtio_net.c
1889 +@@ -147,6 +147,17 @@ struct receive_queue {
1890 + struct xdp_rxq_info xdp_rxq;
1891 + };
1892 +
1893 ++/* Control VQ buffers: protected by the rtnl lock */
1894 ++struct control_buf {
1895 ++ struct virtio_net_ctrl_hdr hdr;
1896 ++ virtio_net_ctrl_ack status;
1897 ++ struct virtio_net_ctrl_mq mq;
1898 ++ u8 promisc;
1899 ++ u8 allmulti;
1900 ++ __virtio16 vid;
1901 ++ u64 offloads;
1902 ++};
1903 ++
1904 + struct virtnet_info {
1905 + struct virtio_device *vdev;
1906 + struct virtqueue *cvq;
1907 +@@ -192,14 +203,7 @@ struct virtnet_info {
1908 + struct hlist_node node;
1909 + struct hlist_node node_dead;
1910 +
1911 +- /* Control VQ buffers: protected by the rtnl lock */
1912 +- struct virtio_net_ctrl_hdr ctrl_hdr;
1913 +- virtio_net_ctrl_ack ctrl_status;
1914 +- struct virtio_net_ctrl_mq ctrl_mq;
1915 +- u8 ctrl_promisc;
1916 +- u8 ctrl_allmulti;
1917 +- u16 ctrl_vid;
1918 +- u64 ctrl_offloads;
1919 ++ struct control_buf *ctrl;
1920 +
1921 + /* Ethtool settings */
1922 + u8 duplex;
1923 +@@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1924 + {
1925 + struct receive_queue *rq =
1926 + container_of(napi, struct receive_queue, napi);
1927 +- unsigned int received;
1928 ++ struct virtnet_info *vi = rq->vq->vdev->priv;
1929 ++ struct send_queue *sq;
1930 ++ unsigned int received, qp;
1931 + bool xdp_xmit = false;
1932 +
1933 + virtnet_poll_cleantx(rq);
1934 +@@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1935 + if (received < budget)
1936 + virtqueue_napi_complete(napi, rq->vq, received);
1937 +
1938 +- if (xdp_xmit)
1939 ++ if (xdp_xmit) {
1940 ++ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1941 ++ smp_processor_id();
1942 ++ sq = &vi->sq[qp];
1943 ++ virtqueue_kick(sq->vq);
1944 + xdp_do_flush_map();
1945 ++ }
1946 +
1947 + return received;
1948 + }
1949 +@@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1950 + /* Caller should know better */
1951 + BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1952 +
1953 +- vi->ctrl_status = ~0;
1954 +- vi->ctrl_hdr.class = class;
1955 +- vi->ctrl_hdr.cmd = cmd;
1956 ++ vi->ctrl->status = ~0;
1957 ++ vi->ctrl->hdr.class = class;
1958 ++ vi->ctrl->hdr.cmd = cmd;
1959 + /* Add header */
1960 +- sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
1961 ++ sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1962 + sgs[out_num++] = &hdr;
1963 +
1964 + if (out)
1965 + sgs[out_num++] = out;
1966 +
1967 + /* Add return status. */
1968 +- sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
1969 ++ sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1970 + sgs[out_num] = &stat;
1971 +
1972 + BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1973 + virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1974 +
1975 + if (unlikely(!virtqueue_kick(vi->cvq)))
1976 +- return vi->ctrl_status == VIRTIO_NET_OK;
1977 ++ return vi->ctrl->status == VIRTIO_NET_OK;
1978 +
1979 + /* Spin for a response, the kick causes an ioport write, trapping
1980 + * into the hypervisor, so the request should be handled immediately.
1981 +@@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1982 + !virtqueue_is_broken(vi->cvq))
1983 + cpu_relax();
1984 +
1985 +- return vi->ctrl_status == VIRTIO_NET_OK;
1986 ++ return vi->ctrl->status == VIRTIO_NET_OK;
1987 + }
1988 +
1989 + static int virtnet_set_mac_address(struct net_device *dev, void *p)
1990 +@@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1991 + if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1992 + return 0;
1993 +
1994 +- vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1995 +- sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
1996 ++ vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1997 ++ sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1998 +
1999 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2000 + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2001 +@@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
2002 + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2003 + return;
2004 +
2005 +- vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
2006 +- vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2007 ++ vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2008 ++ vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2009 +
2010 +- sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
2011 ++ sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2012 +
2013 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2014 + VIRTIO_NET_CTRL_RX_PROMISC, sg))
2015 + dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2016 +- vi->ctrl_promisc ? "en" : "dis");
2017 ++ vi->ctrl->promisc ? "en" : "dis");
2018 +
2019 +- sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
2020 ++ sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2021 +
2022 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2023 + VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2024 + dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2025 +- vi->ctrl_allmulti ? "en" : "dis");
2026 ++ vi->ctrl->allmulti ? "en" : "dis");
2027 +
2028 + uc_count = netdev_uc_count(dev);
2029 + mc_count = netdev_mc_count(dev);
2030 +@@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2031 + struct virtnet_info *vi = netdev_priv(dev);
2032 + struct scatterlist sg;
2033 +
2034 +- vi->ctrl_vid = vid;
2035 +- sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
2036 ++ vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2037 ++ sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2038 +
2039 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2040 + VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2041 +@@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2042 + struct virtnet_info *vi = netdev_priv(dev);
2043 + struct scatterlist sg;
2044 +
2045 +- vi->ctrl_vid = vid;
2046 +- sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
2047 ++ vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2048 ++ sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2049 +
2050 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2051 + VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2052 +@@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
2053 + static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2054 + {
2055 + struct scatterlist sg;
2056 +- vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
2057 ++ vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2058 +
2059 +- sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
2060 ++ sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2061 +
2062 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2063 + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2064 +@@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
2065 +
2066 + kfree(vi->rq);
2067 + kfree(vi->sq);
2068 ++ kfree(vi->ctrl);
2069 + }
2070 +
2071 + static void _free_receive_bufs(struct virtnet_info *vi)
2072 +@@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
2073 + {
2074 + int i;
2075 +
2076 ++ vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2077 ++ if (!vi->ctrl)
2078 ++ goto err_ctrl;
2079 + vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2080 + if (!vi->sq)
2081 + goto err_sq;
2082 +@@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
2083 + err_rq:
2084 + kfree(vi->sq);
2085 + err_sq:
2086 ++ kfree(vi->ctrl);
2087 ++err_ctrl:
2088 + return -ENOMEM;
2089 + }
2090 +
2091 +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
2092 +index e04937f44f33..9ebe2a689966 100644
2093 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c
2094 ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
2095 +@@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
2096 + union {
2097 + void *ptr;
2098 + struct ethhdr *eth;
2099 ++ struct vlan_ethhdr *veth;
2100 + struct iphdr *ipv4;
2101 + struct ipv6hdr *ipv6;
2102 + struct tcphdr *tcp;
2103 +@@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
2104 + if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
2105 + return 0;
2106 +
2107 ++ if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
2108 ++ skb->protocol == cpu_to_be16(ETH_P_8021AD))
2109 ++ hlen = sizeof(struct vlan_ethhdr);
2110 ++ else
2111 ++ hlen = sizeof(struct ethhdr);
2112 ++
2113 + hdr.eth = eth_hdr(skb);
2114 + if (gdesc->rcd.v4) {
2115 +- BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
2116 +- hdr.ptr += sizeof(struct ethhdr);
2117 ++ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
2118 ++ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
2119 ++ hdr.ptr += hlen;
2120 + BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
2121 + hlen = hdr.ipv4->ihl << 2;
2122 + hdr.ptr += hdr.ipv4->ihl << 2;
2123 + } else if (gdesc->rcd.v6) {
2124 +- BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
2125 +- hdr.ptr += sizeof(struct ethhdr);
2126 ++ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
2127 ++ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
2128 ++ hdr.ptr += hlen;
2129 + /* Use an estimated value, since we also need to handle
2130 + * TSO case.
2131 + */
2132 +diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
2133 +index 59ec34052a65..a3326463b71f 100644
2134 +--- a/drivers/net/vmxnet3/vmxnet3_int.h
2135 ++++ b/drivers/net/vmxnet3/vmxnet3_int.h
2136 +@@ -69,10 +69,10 @@
2137 + /*
2138 + * Version numbers
2139 + */
2140 +-#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
2141 ++#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
2142 +
2143 + /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
2144 +-#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
2145 ++#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
2146 +
2147 + #if defined(CONFIG_PCI_MSI)
2148 + /* RSS only makes sense if MSI-X is supported. */
2149 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2150 +index ebb3f1b046f3..800a86e2d671 100644
2151 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2152 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2153 +@@ -6028,9 +6028,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
2154 + sta->addr, smps, err);
2155 + }
2156 +
2157 +- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
2158 +- changed & IEEE80211_RC_NSS_CHANGED) {
2159 +- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
2160 ++ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
2161 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
2162 + sta->addr);
2163 +
2164 + err = ath10k_station_assoc(ar, arvif->vif, sta, true);
2165 +diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
2166 +index 96e73e30204e..5f111f0ee7ca 100644
2167 +--- a/drivers/pinctrl/intel/pinctrl-intel.c
2168 ++++ b/drivers/pinctrl/intel/pinctrl-intel.c
2169 +@@ -425,18 +425,6 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
2170 + writel(value, padcfg0);
2171 + }
2172 +
2173 +-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
2174 +-{
2175 +- u32 value;
2176 +-
2177 +- /* Put the pad into GPIO mode */
2178 +- value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
2179 +- /* Disable SCI/SMI/NMI generation */
2180 +- value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
2181 +- value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
2182 +- writel(value, padcfg0);
2183 +-}
2184 +-
2185 + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
2186 + struct pinctrl_gpio_range *range,
2187 + unsigned pin)
2188 +@@ -444,6 +432,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
2189 + struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
2190 + void __iomem *padcfg0;
2191 + unsigned long flags;
2192 ++ u32 value;
2193 +
2194 + raw_spin_lock_irqsave(&pctrl->lock, flags);
2195 +
2196 +@@ -453,7 +442,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
2197 + }
2198 +
2199 + padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
2200 +- intel_gpio_set_gpio_mode(padcfg0);
2201 ++ /* Put the pad into GPIO mode */
2202 ++ value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
2203 ++ /* Disable SCI/SMI/NMI generation */
2204 ++ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
2205 ++ value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
2206 ++ writel(value, padcfg0);
2207 ++
2208 + /* Disable TX buffer and enable RX (this will be input) */
2209 + __intel_gpio_set_direction(padcfg0, true);
2210 +
2211 +@@ -973,8 +968,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
2212 +
2213 + raw_spin_lock_irqsave(&pctrl->lock, flags);
2214 +
2215 +- intel_gpio_set_gpio_mode(reg);
2216 +-
2217 + value = readl(reg);
2218 +
2219 + value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
2220 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
2221 +index 62f5f04d8f61..5e963fe0e38d 100644
2222 +--- a/drivers/s390/block/dasd_alias.c
2223 ++++ b/drivers/s390/block/dasd_alias.c
2224 +@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
2225 + int dasd_alias_add_device(struct dasd_device *device)
2226 + {
2227 + struct dasd_eckd_private *private = device->private;
2228 +- struct alias_lcu *lcu;
2229 ++ __u8 uaddr = private->uid.real_unit_addr;
2230 ++ struct alias_lcu *lcu = private->lcu;
2231 + unsigned long flags;
2232 + int rc;
2233 +
2234 +- lcu = private->lcu;
2235 + rc = 0;
2236 + spin_lock_irqsave(&lcu->lock, flags);
2237 ++ /*
2238 ++ * Check if device and lcu type differ. If so, the uac data may be
2239 ++ * outdated and needs to be updated.
2240 ++ */
2241 ++ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
2242 ++ lcu->flags |= UPDATE_PENDING;
2243 ++ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2244 ++ "uid type mismatch - trigger rescan");
2245 ++ }
2246 + if (!(lcu->flags & UPDATE_PENDING)) {
2247 + rc = _add_device_to_lcu(lcu, device, device);
2248 + if (rc)
2249 +diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
2250 +index c08fc5a8df0c..aea0b25eff29 100644
2251 +--- a/drivers/s390/cio/chsc.c
2252 ++++ b/drivers/s390/cio/chsc.c
2253 +@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
2254 +
2255 + static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
2256 + {
2257 ++ struct channel_path *chp;
2258 + struct chp_link link;
2259 + struct chp_id chpid;
2260 + int status;
2261 +@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
2262 + chpid.id = sei_area->rsid;
2263 + /* allocate a new channel path structure, if needed */
2264 + status = chp_get_status(chpid);
2265 +- if (status < 0)
2266 +- chp_new(chpid);
2267 +- else if (!status)
2268 ++ if (!status)
2269 + return;
2270 ++
2271 ++ if (status < 0) {
2272 ++ chp_new(chpid);
2273 ++ } else {
2274 ++ chp = chpid_to_chp(chpid);
2275 ++ mutex_lock(&chp->lock);
2276 ++ chp_update_desc(chp);
2277 ++ mutex_unlock(&chp->lock);
2278 ++ }
2279 + memset(&link, 0, sizeof(struct chp_link));
2280 + link.chpid = chpid;
2281 + if ((sei_area->vf & 0xc0) != 0) {
2282 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2283 +index 959c65cf75d9..e338ce823c44 100644
2284 +--- a/drivers/s390/net/qeth_core.h
2285 ++++ b/drivers/s390/net/qeth_core.h
2286 +@@ -565,7 +565,6 @@ enum qeth_ip_types {
2287 + enum qeth_cmd_buffer_state {
2288 + BUF_STATE_FREE,
2289 + BUF_STATE_LOCKED,
2290 +- BUF_STATE_PROCESSED,
2291 + };
2292 +
2293 + enum qeth_cq {
2294 +@@ -609,7 +608,6 @@ struct qeth_channel {
2295 + struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
2296 + atomic_t irq_pending;
2297 + int io_buf_no;
2298 +- int buf_no;
2299 + };
2300 +
2301 + /**
2302 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2303 +index 3653bea38470..c11a083cd956 100644
2304 +--- a/drivers/s390/net/qeth_core_main.c
2305 ++++ b/drivers/s390/net/qeth_core_main.c
2306 +@@ -821,7 +821,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
2307 +
2308 + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
2309 + qeth_release_buffer(channel, &channel->iob[cnt]);
2310 +- channel->buf_no = 0;
2311 + channel->io_buf_no = 0;
2312 + }
2313 + EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
2314 +@@ -927,7 +926,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
2315 + kfree(channel->iob[cnt].data);
2316 + return -ENOMEM;
2317 + }
2318 +- channel->buf_no = 0;
2319 + channel->io_buf_no = 0;
2320 + atomic_set(&channel->irq_pending, 0);
2321 + spin_lock_init(&channel->iob_lock);
2322 +@@ -1103,11 +1101,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
2323 + {
2324 + int rc;
2325 + int cstat, dstat;
2326 +- struct qeth_cmd_buffer *buffer;
2327 + struct qeth_channel *channel;
2328 + struct qeth_card *card;
2329 + struct qeth_cmd_buffer *iob;
2330 +- __u8 index;
2331 +
2332 + if (__qeth_check_irb_error(cdev, intparm, irb))
2333 + return;
2334 +@@ -1185,25 +1181,18 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
2335 + channel->state = CH_STATE_RCD_DONE;
2336 + goto out;
2337 + }
2338 +- if (intparm) {
2339 +- buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
2340 +- buffer->state = BUF_STATE_PROCESSED;
2341 +- }
2342 + if (channel == &card->data)
2343 + return;
2344 + if (channel == &card->read &&
2345 + channel->state == CH_STATE_UP)
2346 + __qeth_issue_next_read(card);
2347 +
2348 +- iob = channel->iob;
2349 +- index = channel->buf_no;
2350 +- while (iob[index].state == BUF_STATE_PROCESSED) {
2351 +- if (iob[index].callback != NULL)
2352 +- iob[index].callback(channel, iob + index);
2353 +-
2354 +- index = (index + 1) % QETH_CMD_BUFFER_NO;
2355 ++ if (intparm) {
2356 ++ iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
2357 ++ if (iob->callback)
2358 ++ iob->callback(iob->channel, iob);
2359 + }
2360 +- channel->buf_no = index;
2361 ++
2362 + out:
2363 + wake_up(&card->wait_q);
2364 + return;
2365 +@@ -2217,7 +2206,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2366 + error:
2367 + atomic_set(&card->write.irq_pending, 0);
2368 + qeth_release_buffer(iob->channel, iob);
2369 +- card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
2370 + rc = reply->rc;
2371 + qeth_put_reply(reply);
2372 + return rc;
2373 +@@ -3037,28 +3025,23 @@ static int qeth_send_startlan(struct qeth_card *card)
2374 + return rc;
2375 + }
2376 +
2377 +-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
2378 +- struct qeth_reply *reply, unsigned long data)
2379 ++static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2380 + {
2381 +- struct qeth_ipa_cmd *cmd;
2382 +-
2383 +- QETH_CARD_TEXT(card, 4, "defadpcb");
2384 +-
2385 +- cmd = (struct qeth_ipa_cmd *) data;
2386 +- if (cmd->hdr.return_code == 0)
2387 ++ if (!cmd->hdr.return_code)
2388 + cmd->hdr.return_code =
2389 + cmd->data.setadapterparms.hdr.return_code;
2390 +- return 0;
2391 ++ return cmd->hdr.return_code;
2392 + }
2393 +
2394 + static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2395 + struct qeth_reply *reply, unsigned long data)
2396 + {
2397 +- struct qeth_ipa_cmd *cmd;
2398 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2399 +
2400 + QETH_CARD_TEXT(card, 3, "quyadpcb");
2401 ++ if (qeth_setadpparms_inspect_rc(cmd))
2402 ++ return 0;
2403 +
2404 +- cmd = (struct qeth_ipa_cmd *) data;
2405 + if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2406 + card->info.link_type =
2407 + cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2408 +@@ -3066,7 +3049,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2409 + }
2410 + card->options.adp.supported_funcs =
2411 + cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2412 +- return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2413 ++ return 0;
2414 + }
2415 +
2416 + static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2417 +@@ -3158,22 +3141,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
2418 + static int qeth_query_switch_attributes_cb(struct qeth_card *card,
2419 + struct qeth_reply *reply, unsigned long data)
2420 + {
2421 +- struct qeth_ipa_cmd *cmd;
2422 +- struct qeth_switch_info *sw_info;
2423 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2424 + struct qeth_query_switch_attributes *attrs;
2425 ++ struct qeth_switch_info *sw_info;
2426 +
2427 + QETH_CARD_TEXT(card, 2, "qswiatcb");
2428 +- cmd = (struct qeth_ipa_cmd *) data;
2429 +- sw_info = (struct qeth_switch_info *)reply->param;
2430 +- if (cmd->data.setadapterparms.hdr.return_code == 0) {
2431 +- attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2432 +- sw_info->capabilities = attrs->capabilities;
2433 +- sw_info->settings = attrs->settings;
2434 +- QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2435 +- sw_info->settings);
2436 +- }
2437 +- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
2438 ++ if (qeth_setadpparms_inspect_rc(cmd))
2439 ++ return 0;
2440 +
2441 ++ sw_info = (struct qeth_switch_info *)reply->param;
2442 ++ attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2443 ++ sw_info->capabilities = attrs->capabilities;
2444 ++ sw_info->settings = attrs->settings;
2445 ++ QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2446 ++ sw_info->settings);
2447 + return 0;
2448 + }
2449 +
2450 +@@ -4211,16 +4192,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
2451 + static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
2452 + struct qeth_reply *reply, unsigned long data)
2453 + {
2454 +- struct qeth_ipa_cmd *cmd;
2455 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2456 + struct qeth_ipacmd_setadpparms *setparms;
2457 +
2458 + QETH_CARD_TEXT(card, 4, "prmadpcb");
2459 +
2460 +- cmd = (struct qeth_ipa_cmd *) data;
2461 + setparms = &(cmd->data.setadapterparms);
2462 +-
2463 +- qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2464 +- if (cmd->hdr.return_code) {
2465 ++ if (qeth_setadpparms_inspect_rc(cmd)) {
2466 + QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
2467 + setparms->data.mode = SET_PROMISC_MODE_OFF;
2468 + }
2469 +@@ -4290,18 +4268,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
2470 + static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
2471 + struct qeth_reply *reply, unsigned long data)
2472 + {
2473 +- struct qeth_ipa_cmd *cmd;
2474 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2475 +
2476 + QETH_CARD_TEXT(card, 4, "chgmaccb");
2477 ++ if (qeth_setadpparms_inspect_rc(cmd))
2478 ++ return 0;
2479 +
2480 +- cmd = (struct qeth_ipa_cmd *) data;
2481 + if (!card->options.layer2 ||
2482 + !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
2483 + ether_addr_copy(card->dev->dev_addr,
2484 + cmd->data.setadapterparms.data.change_addr.addr);
2485 + card->info.mac_bits |= QETH_LAYER2_MAC_READ;
2486 + }
2487 +- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
2488 + return 0;
2489 + }
2490 +
2491 +@@ -4332,13 +4310,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
2492 + static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
2493 + struct qeth_reply *reply, unsigned long data)
2494 + {
2495 +- struct qeth_ipa_cmd *cmd;
2496 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2497 + struct qeth_set_access_ctrl *access_ctrl_req;
2498 + int fallback = *(int *)reply->param;
2499 +
2500 + QETH_CARD_TEXT(card, 4, "setaccb");
2501 ++ if (cmd->hdr.return_code)
2502 ++ return 0;
2503 ++ qeth_setadpparms_inspect_rc(cmd);
2504 +
2505 +- cmd = (struct qeth_ipa_cmd *) data;
2506 + access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
2507 + QETH_DBF_TEXT_(SETUP, 2, "setaccb");
2508 + QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
2509 +@@ -4411,7 +4391,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
2510 + card->options.isolation = card->options.prev_isolation;
2511 + break;
2512 + }
2513 +- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
2514 + return 0;
2515 + }
2516 +
2517 +@@ -4699,14 +4678,15 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
2518 + static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
2519 + struct qeth_reply *reply, unsigned long data)
2520 + {
2521 +- struct qeth_ipa_cmd *cmd;
2522 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
2523 + struct qeth_qoat_priv *priv;
2524 + char *resdata;
2525 + int resdatalen;
2526 +
2527 + QETH_CARD_TEXT(card, 3, "qoatcb");
2528 ++ if (qeth_setadpparms_inspect_rc(cmd))
2529 ++ return 0;
2530 +
2531 +- cmd = (struct qeth_ipa_cmd *)data;
2532 + priv = (struct qeth_qoat_priv *)reply->param;
2533 + resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
2534 + resdata = (char *)data + 28;
2535 +@@ -4800,21 +4780,18 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
2536 + static int qeth_query_card_info_cb(struct qeth_card *card,
2537 + struct qeth_reply *reply, unsigned long data)
2538 + {
2539 +- struct qeth_ipa_cmd *cmd;
2540 ++ struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
2541 ++ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
2542 + struct qeth_query_card_info *card_info;
2543 +- struct carrier_info *carrier_info;
2544 +
2545 + QETH_CARD_TEXT(card, 2, "qcrdincb");
2546 +- carrier_info = (struct carrier_info *)reply->param;
2547 +- cmd = (struct qeth_ipa_cmd *)data;
2548 +- card_info = &cmd->data.setadapterparms.data.card_info;
2549 +- if (cmd->data.setadapterparms.hdr.return_code == 0) {
2550 +- carrier_info->card_type = card_info->card_type;
2551 +- carrier_info->port_mode = card_info->port_mode;
2552 +- carrier_info->port_speed = card_info->port_speed;
2553 +- }
2554 ++ if (qeth_setadpparms_inspect_rc(cmd))
2555 ++ return 0;
2556 +
2557 +- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
2558 ++ card_info = &cmd->data.setadapterparms.data.card_info;
2559 ++ carrier_info->card_type = card_info->card_type;
2560 ++ carrier_info->port_mode = card_info->port_mode;
2561 ++ carrier_info->port_speed = card_info->port_speed;
2562 + return 0;
2563 + }
2564 +
2565 +@@ -6567,10 +6544,14 @@ static int __init qeth_core_init(void)
2566 + mutex_init(&qeth_mod_mutex);
2567 +
2568 + qeth_wq = create_singlethread_workqueue("qeth_wq");
2569 ++ if (!qeth_wq) {
2570 ++ rc = -ENOMEM;
2571 ++ goto out_err;
2572 ++ }
2573 +
2574 + rc = qeth_register_dbf_views();
2575 + if (rc)
2576 +- goto out_err;
2577 ++ goto dbf_err;
2578 + qeth_core_root_dev = root_device_register("qeth");
2579 + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
2580 + if (rc)
2581 +@@ -6607,6 +6588,8 @@ static int __init qeth_core_init(void)
2582 + root_device_unregister(qeth_core_root_dev);
2583 + register_err:
2584 + qeth_unregister_dbf_views();
2585 ++dbf_err:
2586 ++ destroy_workqueue(qeth_wq);
2587 + out_err:
2588 + pr_err("Initializing the qeth device driver failed\n");
2589 + return rc;
2590 +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
2591 +index 067d52e95f02..d7191943ecb8 100644
2592 +--- a/include/linux/fsnotify_backend.h
2593 ++++ b/include/linux/fsnotify_backend.h
2594 +@@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
2595 + union { /* Object pointer [lock] */
2596 + struct inode *inode;
2597 + struct vfsmount *mnt;
2598 +- };
2599 +- union {
2600 +- struct hlist_head list;
2601 + /* Used listing heads to free after srcu period expires */
2602 + struct fsnotify_mark_connector *destroy_next;
2603 + };
2604 ++ struct hlist_head list;
2605 + };
2606 +
2607 + /*
2608 +diff --git a/include/linux/hmm.h b/include/linux/hmm.h
2609 +index 36dd21fe5caf..325017ad9311 100644
2610 +--- a/include/linux/hmm.h
2611 ++++ b/include/linux/hmm.h
2612 +@@ -498,16 +498,23 @@ struct hmm_device {
2613 + struct hmm_device *hmm_device_new(void *drvdata);
2614 + void hmm_device_put(struct hmm_device *hmm_device);
2615 + #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
2616 ++#endif /* IS_ENABLED(CONFIG_HMM) */
2617 +
2618 + /* Below are for HMM internal use only! Not to be used by device driver! */
2619 ++#if IS_ENABLED(CONFIG_HMM_MIRROR)
2620 + void hmm_mm_destroy(struct mm_struct *mm);
2621 +
2622 + static inline void hmm_mm_init(struct mm_struct *mm)
2623 + {
2624 + mm->hmm = NULL;
2625 + }
2626 ++#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
2627 ++static inline void hmm_mm_destroy(struct mm_struct *mm) {}
2628 ++static inline void hmm_mm_init(struct mm_struct *mm) {}
2629 ++#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
2630 ++
2631 ++
2632 + #else /* IS_ENABLED(CONFIG_HMM) */
2633 + static inline void hmm_mm_destroy(struct mm_struct *mm) {}
2634 + static inline void hmm_mm_init(struct mm_struct *mm) {}
2635 +-#endif /* IS_ENABLED(CONFIG_HMM) */
2636 + #endif /* LINUX_HMM_H */
2637 +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
2638 +index 7d30892da064..87b8c20d5b27 100644
2639 +--- a/include/linux/if_vlan.h
2640 ++++ b/include/linux/if_vlan.h
2641 +@@ -639,7 +639,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
2642 + * Returns true if the skb is tagged with multiple vlan headers, regardless
2643 + * of whether it is hardware accelerated or not.
2644 + */
2645 +-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2646 ++static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
2647 + {
2648 + __be16 protocol = skb->protocol;
2649 +
2650 +@@ -649,6 +649,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2651 + if (likely(!eth_type_vlan(protocol)))
2652 + return false;
2653 +
2654 ++ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
2655 ++ return false;
2656 ++
2657 + veh = (struct vlan_ethhdr *)skb->data;
2658 + protocol = veh->h_vlan_encapsulated_proto;
2659 + }
2660 +@@ -666,7 +669,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2661 + *
2662 + * Returns features without unsafe ones if the skb has multiple tags.
2663 + */
2664 +-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
2665 ++static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
2666 + netdev_features_t features)
2667 + {
2668 + if (skb_vlan_tagged_multi(skb)) {
2669 +diff --git a/include/linux/tpm.h b/include/linux/tpm.h
2670 +index bcdd3790e94d..06639fb6ab85 100644
2671 +--- a/include/linux/tpm.h
2672 ++++ b/include/linux/tpm.h
2673 +@@ -44,7 +44,7 @@ struct tpm_class_ops {
2674 + bool (*update_timeouts)(struct tpm_chip *chip,
2675 + unsigned long *timeout_cap);
2676 + int (*request_locality)(struct tpm_chip *chip, int loc);
2677 +- void (*relinquish_locality)(struct tpm_chip *chip, int loc);
2678 ++ int (*relinquish_locality)(struct tpm_chip *chip, int loc);
2679 + void (*clk_enable)(struct tpm_chip *chip, bool value);
2680 + };
2681 +
2682 +diff --git a/include/net/ife.h b/include/net/ife.h
2683 +index 44b9c00f7223..e117617e3c34 100644
2684 +--- a/include/net/ife.h
2685 ++++ b/include/net/ife.h
2686 +@@ -12,7 +12,8 @@
2687 + void *ife_encode(struct sk_buff *skb, u16 metalen);
2688 + void *ife_decode(struct sk_buff *skb, u16 *metalen);
2689 +
2690 +-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen);
2691 ++void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
2692 ++ u16 *dlen, u16 *totlen);
2693 + int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
2694 + const void *dval);
2695 +
2696 +diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
2697 +index 5c40f118c0fa..df528a623548 100644
2698 +--- a/include/net/llc_conn.h
2699 ++++ b/include/net/llc_conn.h
2700 +@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
2701 +
2702 + struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
2703 + struct proto *prot, int kern);
2704 ++void llc_sk_stop_all_timers(struct sock *sk, bool sync);
2705 + void llc_sk_free(struct sock *sk);
2706 +
2707 + void llc_sk_reset(struct sock *sk);
2708 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2709 +index 102160ff5c66..ea619021d901 100644
2710 +--- a/kernel/kprobes.c
2711 ++++ b/kernel/kprobes.c
2712 +@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2713 + struct kprobe_blacklist_entry *ent =
2714 + list_entry(v, struct kprobe_blacklist_entry, list);
2715 +
2716 +- seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2717 ++ seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2718 + (void *)ent->end_addr, (void *)ent->start_addr);
2719 + return 0;
2720 + }
2721 +diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
2722 +index e954ae3d82c0..e3a658bac10f 100644
2723 +--- a/kernel/trace/trace_entries.h
2724 ++++ b/kernel/trace/trace_entries.h
2725 +@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
2726 + __field( unsigned int, seqnum )
2727 + ),
2728 +
2729 +- F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
2730 ++ F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
2731 + __entry->seqnum,
2732 + __entry->tv_sec,
2733 + __entry->tv_nsec,
2734 +diff --git a/net/core/dev.c b/net/core/dev.c
2735 +index c4aa2941dbfd..3e550507e9f0 100644
2736 +--- a/net/core/dev.c
2737 ++++ b/net/core/dev.c
2738 +@@ -2942,7 +2942,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
2739 + }
2740 + EXPORT_SYMBOL(passthru_features_check);
2741 +
2742 +-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2743 ++static netdev_features_t dflt_features_check(struct sk_buff *skb,
2744 + struct net_device *dev,
2745 + netdev_features_t features)
2746 + {
2747 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2748 +index 7b7a14abba28..ce519861be59 100644
2749 +--- a/net/core/neighbour.c
2750 ++++ b/net/core/neighbour.c
2751 +@@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t);
2752 + static void __neigh_notify(struct neighbour *n, int type, int flags,
2753 + u32 pid);
2754 + static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
2755 +-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
2756 ++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
2757 ++ struct net_device *dev);
2758 +
2759 + #ifdef CONFIG_PROC_FS
2760 + static const struct file_operations neigh_stat_seq_fops;
2761 +@@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2762 + {
2763 + write_lock_bh(&tbl->lock);
2764 + neigh_flush_dev(tbl, dev);
2765 +- pneigh_ifdown(tbl, dev);
2766 +- write_unlock_bh(&tbl->lock);
2767 ++ pneigh_ifdown_and_unlock(tbl, dev);
2768 +
2769 + del_timer_sync(&tbl->proxy_timer);
2770 + pneigh_queue_purge(&tbl->proxy_queue);
2771 +@@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
2772 + return -ENOENT;
2773 + }
2774 +
2775 +-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2776 ++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
2777 ++ struct net_device *dev)
2778 + {
2779 +- struct pneigh_entry *n, **np;
2780 ++ struct pneigh_entry *n, **np, *freelist = NULL;
2781 + u32 h;
2782 +
2783 + for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2784 +@@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2785 + while ((n = *np) != NULL) {
2786 + if (!dev || n->dev == dev) {
2787 + *np = n->next;
2788 +- if (tbl->pdestructor)
2789 +- tbl->pdestructor(n);
2790 +- if (n->dev)
2791 +- dev_put(n->dev);
2792 +- kfree(n);
2793 ++ n->next = freelist;
2794 ++ freelist = n;
2795 + continue;
2796 + }
2797 + np = &n->next;
2798 + }
2799 + }
2800 ++ write_unlock_bh(&tbl->lock);
2801 ++ while ((n = freelist)) {
2802 ++ freelist = n->next;
2803 ++ n->next = NULL;
2804 ++ if (tbl->pdestructor)
2805 ++ tbl->pdestructor(n);
2806 ++ if (n->dev)
2807 ++ dev_put(n->dev);
2808 ++ kfree(n);
2809 ++ }
2810 + return -ENOENT;
2811 + }
2812 +
2813 +@@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2814 +
2815 + err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2816 + if (!err) {
2817 +- if (tb[NDA_IFINDEX])
2818 ++ if (tb[NDA_IFINDEX]) {
2819 ++ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2820 ++ return -EINVAL;
2821 + filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2822 +-
2823 +- if (tb[NDA_MASTER])
2824 ++ }
2825 ++ if (tb[NDA_MASTER]) {
2826 ++ if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2827 ++ return -EINVAL;
2828 + filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2829 +-
2830 ++ }
2831 + if (filter_idx || filter_master_idx)
2832 + flags |= NLM_F_DUMP_FILTERED;
2833 + }
2834 +diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
2835 +index e1d4d898a007..ed372d550137 100644
2836 +--- a/net/dns_resolver/dns_key.c
2837 ++++ b/net/dns_resolver/dns_key.c
2838 +@@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
2839 +
2840 + next_opt = memchr(opt, '#', end - opt) ?: end;
2841 + opt_len = next_opt - opt;
2842 +- if (!opt_len) {
2843 +- printk(KERN_WARNING
2844 +- "Empty option to dns_resolver key\n");
2845 ++ if (opt_len <= 0 || opt_len > 128) {
2846 ++ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
2847 ++ opt_len);
2848 + return -EINVAL;
2849 + }
2850 +
2851 +@@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
2852 + }
2853 +
2854 + bad_option_value:
2855 +- printk(KERN_WARNING
2856 +- "Option '%*.*s' to dns_resolver key:"
2857 +- " bad/missing value\n",
2858 +- opt_nlen, opt_nlen, opt);
2859 ++ pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
2860 ++ opt_nlen, opt_nlen, opt);
2861 + return -EINVAL;
2862 + } while (opt = next_opt + 1, opt < end);
2863 + }
2864 +diff --git a/net/ife/ife.c b/net/ife/ife.c
2865 +index 7d1ec76e7f43..13bbf8cb6a39 100644
2866 +--- a/net/ife/ife.c
2867 ++++ b/net/ife/ife.c
2868 +@@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
2869 + int total_pull;
2870 + u16 ifehdrln;
2871 +
2872 ++ if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
2873 ++ return NULL;
2874 ++
2875 + ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
2876 + ifehdrln = ntohs(ifehdr->metalen);
2877 + total_pull = skb->dev->hard_header_len + ifehdrln;
2878 +@@ -92,12 +95,43 @@ struct meta_tlvhdr {
2879 + __be16 len;
2880 + };
2881 +
2882 ++static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
2883 ++ const unsigned char *ifehdr_end)
2884 ++{
2885 ++ const struct meta_tlvhdr *tlv;
2886 ++ u16 tlvlen;
2887 ++
2888 ++ if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
2889 ++ return false;
2890 ++
2891 ++ tlv = (const struct meta_tlvhdr *)skbdata;
2892 ++ tlvlen = ntohs(tlv->len);
2893 ++
2894 ++ /* tlv length field is inc header, check on minimum */
2895 ++ if (tlvlen < NLA_HDRLEN)
2896 ++ return false;
2897 ++
2898 ++ /* overflow by NLA_ALIGN check */
2899 ++ if (NLA_ALIGN(tlvlen) < tlvlen)
2900 ++ return false;
2901 ++
2902 ++ if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
2903 ++ return false;
2904 ++
2905 ++ return true;
2906 ++}
2907 ++
2908 + /* Caller takes care of presenting data in network order
2909 + */
2910 +-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen)
2911 ++void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
2912 ++ u16 *dlen, u16 *totlen)
2913 + {
2914 +- struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
2915 ++ struct meta_tlvhdr *tlv;
2916 ++
2917 ++ if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
2918 ++ return NULL;
2919 +
2920 ++ tlv = (struct meta_tlvhdr *)skbdata;
2921 + *dlen = ntohs(tlv->len) - NLA_HDRLEN;
2922 + *attrtype = ntohs(tlv->type);
2923 +
2924 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2925 +index 8b8059b7af4d..1ab8733dac5f 100644
2926 +--- a/net/ipv4/tcp.c
2927 ++++ b/net/ipv4/tcp.c
2928 +@@ -2385,6 +2385,7 @@ void tcp_write_queue_purge(struct sock *sk)
2929 + INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
2930 + sk_mem_reclaim(sk);
2931 + tcp_clear_all_retrans_hints(tcp_sk(sk));
2932 ++ tcp_sk(sk)->packets_out = 0;
2933 + }
2934 +
2935 + int tcp_disconnect(struct sock *sk, int flags)
2936 +@@ -2434,7 +2435,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2937 + icsk->icsk_backoff = 0;
2938 + tp->snd_cwnd = 2;
2939 + icsk->icsk_probes_out = 0;
2940 +- tp->packets_out = 0;
2941 + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2942 + tp->snd_cwnd_cnt = 0;
2943 + tp->window_clamp = 0;
2944 +@@ -2830,8 +2830,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2945 + #ifdef CONFIG_TCP_MD5SIG
2946 + case TCP_MD5SIG:
2947 + case TCP_MD5SIG_EXT:
2948 +- /* Read the IP->Key mappings from userspace */
2949 +- err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
2950 ++ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
2951 ++ err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
2952 ++ else
2953 ++ err = -EINVAL;
2954 + break;
2955 + #endif
2956 + case TCP_USER_TIMEOUT:
2957 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2958 +index ff6cd98ce8d5..31ca27fdde66 100644
2959 +--- a/net/ipv4/tcp_input.c
2960 ++++ b/net/ipv4/tcp_input.c
2961 +@@ -3871,11 +3871,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
2962 + int length = (th->doff << 2) - sizeof(*th);
2963 + const u8 *ptr = (const u8 *)(th + 1);
2964 +
2965 +- /* If the TCP option is too short, we can short cut */
2966 +- if (length < TCPOLEN_MD5SIG)
2967 +- return NULL;
2968 +-
2969 +- while (length > 0) {
2970 ++ /* If not enough data remaining, we can short cut */
2971 ++ while (length >= TCPOLEN_MD5SIG) {
2972 + int opcode = *ptr++;
2973 + int opsize;
2974 +
2975 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2976 +index fc74352fac12..74a2e37412b2 100644
2977 +--- a/net/ipv6/route.c
2978 ++++ b/net/ipv6/route.c
2979 +@@ -3862,6 +3862,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2980 +
2981 + static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2982 + [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2983 ++ [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
2984 + [RTA_OIF] = { .type = NLA_U32 },
2985 + [RTA_IIF] = { .type = NLA_U32 },
2986 + [RTA_PRIORITY] = { .type = NLA_U32 },
2987 +@@ -3873,6 +3874,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2988 + [RTA_EXPIRES] = { .type = NLA_U32 },
2989 + [RTA_UID] = { .type = NLA_U32 },
2990 + [RTA_MARK] = { .type = NLA_U32 },
2991 ++ [RTA_TABLE] = { .type = NLA_U32 },
2992 + };
2993 +
2994 + static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2995 +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
2996 +index f343e6f0fc95..5fe139484919 100644
2997 +--- a/net/ipv6/seg6_iptunnel.c
2998 ++++ b/net/ipv6/seg6_iptunnel.c
2999 +@@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
3000 + isrh->nexthdr = proto;
3001 +
3002 + hdr->daddr = isrh->segments[isrh->first_segment];
3003 +- set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
3004 ++ set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
3005 +
3006 + #ifdef CONFIG_IPV6_SEG6_HMAC
3007 + if (sr_has_hmac(isrh)) {
3008 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
3009 +index 0fbd3ee26165..40261cb68e83 100644
3010 +--- a/net/l2tp/l2tp_core.c
3011 ++++ b/net/l2tp/l2tp_core.c
3012 +@@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
3013 + }
3014 + EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
3015 +
3016 ++struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
3017 ++{
3018 ++ const struct l2tp_net *pn = l2tp_pernet(net);
3019 ++ struct l2tp_tunnel *tunnel;
3020 ++ int count = 0;
3021 ++
3022 ++ rcu_read_lock_bh();
3023 ++ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
3024 ++ if (++count > nth) {
3025 ++ l2tp_tunnel_inc_refcount(tunnel);
3026 ++ rcu_read_unlock_bh();
3027 ++ return tunnel;
3028 ++ }
3029 ++ }
3030 ++ rcu_read_unlock_bh();
3031 ++
3032 ++ return NULL;
3033 ++}
3034 ++EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
3035 ++
3036 + /* Lookup a session. A new reference is held on the returned session. */
3037 + struct l2tp_session *l2tp_session_get(const struct net *net,
3038 + struct l2tp_tunnel *tunnel,
3039 +@@ -335,26 +355,6 @@ int l2tp_session_register(struct l2tp_session *session,
3040 + }
3041 + EXPORT_SYMBOL_GPL(l2tp_session_register);
3042 +
3043 +-struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
3044 +-{
3045 +- struct l2tp_net *pn = l2tp_pernet(net);
3046 +- struct l2tp_tunnel *tunnel;
3047 +- int count = 0;
3048 +-
3049 +- rcu_read_lock_bh();
3050 +- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
3051 +- if (++count > nth) {
3052 +- rcu_read_unlock_bh();
3053 +- return tunnel;
3054 +- }
3055 +- }
3056 +-
3057 +- rcu_read_unlock_bh();
3058 +-
3059 +- return NULL;
3060 +-}
3061 +-EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
3062 +-
3063 + /*****************************************************************************
3064 + * Receive data handling
3065 + *****************************************************************************/
3066 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
3067 +index ba33cbec71eb..c199020f8a8a 100644
3068 +--- a/net/l2tp/l2tp_core.h
3069 ++++ b/net/l2tp/l2tp_core.h
3070 +@@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
3071 + }
3072 +
3073 + struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
3074 ++struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
3075 ++
3076 + void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
3077 +
3078 + struct l2tp_session *l2tp_session_get(const struct net *net,
3079 +@@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
3080 + struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
3081 + struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
3082 + const char *ifname);
3083 +-struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
3084 +
3085 + int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
3086 + u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
3087 +diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
3088 +index 72e713da4733..7f1e842ef05a 100644
3089 +--- a/net/l2tp/l2tp_debugfs.c
3090 ++++ b/net/l2tp/l2tp_debugfs.c
3091 +@@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data {
3092 +
3093 + static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
3094 + {
3095 +- pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
3096 ++ /* Drop reference taken during previous invocation */
3097 ++ if (pd->tunnel)
3098 ++ l2tp_tunnel_dec_refcount(pd->tunnel);
3099 ++
3100 ++ pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
3101 + pd->tunnel_idx++;
3102 + }
3103 +
3104 +@@ -96,7 +100,17 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
3105 +
3106 + static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
3107 + {
3108 +- /* nothing to do */
3109 ++ struct l2tp_dfs_seq_data *pd = v;
3110 ++
3111 ++ if (!pd || pd == SEQ_START_TOKEN)
3112 ++ return;
3113 ++
3114 ++ /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */
3115 ++ if (pd->tunnel) {
3116 ++ l2tp_tunnel_dec_refcount(pd->tunnel);
3117 ++ pd->tunnel = NULL;
3118 ++ pd->session = NULL;
3119 ++ }
3120 + }
3121 +
3122 + static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
3123 +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
3124 +index b05dbd9ffcb2..6616c9fd292f 100644
3125 +--- a/net/l2tp/l2tp_netlink.c
3126 ++++ b/net/l2tp/l2tp_netlink.c
3127 +@@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
3128 + struct net *net = sock_net(skb->sk);
3129 +
3130 + for (;;) {
3131 +- tunnel = l2tp_tunnel_find_nth(net, ti);
3132 ++ tunnel = l2tp_tunnel_get_nth(net, ti);
3133 + if (tunnel == NULL)
3134 + goto out;
3135 +
3136 + if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
3137 + cb->nlh->nlmsg_seq, NLM_F_MULTI,
3138 +- tunnel, L2TP_CMD_TUNNEL_GET) < 0)
3139 ++ tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
3140 ++ l2tp_tunnel_dec_refcount(tunnel);
3141 + goto out;
3142 ++ }
3143 ++ l2tp_tunnel_dec_refcount(tunnel);
3144 +
3145 + ti++;
3146 + }
3147 +@@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
3148 +
3149 + for (;;) {
3150 + if (tunnel == NULL) {
3151 +- tunnel = l2tp_tunnel_find_nth(net, ti);
3152 ++ tunnel = l2tp_tunnel_get_nth(net, ti);
3153 + if (tunnel == NULL)
3154 + goto out;
3155 + }
3156 +@@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
3157 + session = l2tp_session_get_nth(tunnel, si);
3158 + if (session == NULL) {
3159 + ti++;
3160 ++ l2tp_tunnel_dec_refcount(tunnel);
3161 + tunnel = NULL;
3162 + si = 0;
3163 + continue;
3164 +@@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
3165 + cb->nlh->nlmsg_seq, NLM_F_MULTI,
3166 + session, L2TP_CMD_SESSION_GET) < 0) {
3167 + l2tp_session_dec_refcount(session);
3168 ++ l2tp_tunnel_dec_refcount(tunnel);
3169 + break;
3170 + }
3171 + l2tp_session_dec_refcount(session);
3172 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
3173 +index 3d7887cc599b..0c4530ad74be 100644
3174 +--- a/net/l2tp/l2tp_ppp.c
3175 ++++ b/net/l2tp/l2tp_ppp.c
3176 +@@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3177 + lock_sock(sk);
3178 +
3179 + error = -EINVAL;
3180 ++
3181 ++ if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
3182 ++ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
3183 ++ sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
3184 ++ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
3185 ++ goto end;
3186 ++
3187 + if (sp->sa_protocol != PX_PROTO_OL2TP)
3188 + goto end;
3189 +
3190 +@@ -1552,16 +1559,19 @@ struct pppol2tp_seq_data {
3191 +
3192 + static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
3193 + {
3194 ++ /* Drop reference taken during previous invocation */
3195 ++ if (pd->tunnel)
3196 ++ l2tp_tunnel_dec_refcount(pd->tunnel);
3197 ++
3198 + for (;;) {
3199 +- pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
3200 ++ pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx);
3201 + pd->tunnel_idx++;
3202 +
3203 +- if (pd->tunnel == NULL)
3204 +- break;
3205 ++ /* Only accept L2TPv2 tunnels */
3206 ++ if (!pd->tunnel || pd->tunnel->version == 2)
3207 ++ return;
3208 +
3209 +- /* Ignore L2TPv3 tunnels */
3210 +- if (pd->tunnel->version < 3)
3211 +- break;
3212 ++ l2tp_tunnel_dec_refcount(pd->tunnel);
3213 + }
3214 + }
3215 +
3216 +@@ -1610,7 +1620,17 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
3217 +
3218 + static void pppol2tp_seq_stop(struct seq_file *p, void *v)
3219 + {
3220 +- /* nothing to do */
3221 ++ struct pppol2tp_seq_data *pd = v;
3222 ++
3223 ++ if (!pd || pd == SEQ_START_TOKEN)
3224 ++ return;
3225 ++
3226 ++ /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */
3227 ++ if (pd->tunnel) {
3228 ++ l2tp_tunnel_dec_refcount(pd->tunnel);
3229 ++ pd->tunnel = NULL;
3230 ++ pd->session = NULL;
3231 ++ }
3232 + }
3233 +
3234 + static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
3235 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3236 +index c38d16f22d2a..cf41d9b4a0b8 100644
3237 +--- a/net/llc/af_llc.c
3238 ++++ b/net/llc/af_llc.c
3239 +@@ -199,9 +199,19 @@ static int llc_ui_release(struct socket *sock)
3240 + llc->laddr.lsap, llc->daddr.lsap);
3241 + if (!llc_send_disc(sk))
3242 + llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
3243 +- if (!sock_flag(sk, SOCK_ZAPPED))
3244 ++ if (!sock_flag(sk, SOCK_ZAPPED)) {
3245 ++ struct llc_sap *sap = llc->sap;
3246 ++
3247 ++ /* Hold this for release_sock(), so that llc_backlog_rcv()
3248 ++ * could still use it.
3249 ++ */
3250 ++ llc_sap_hold(sap);
3251 + llc_sap_remove_socket(llc->sap, sk);
3252 +- release_sock(sk);
3253 ++ release_sock(sk);
3254 ++ llc_sap_put(sap);
3255 ++ } else {
3256 ++ release_sock(sk);
3257 ++ }
3258 + if (llc->dev)
3259 + dev_put(llc->dev);
3260 + sock_put(sk);
3261 +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
3262 +index 163121192aca..4d78375f9872 100644
3263 +--- a/net/llc/llc_c_ac.c
3264 ++++ b/net/llc/llc_c_ac.c
3265 +@@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
3266 +
3267 + int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
3268 + {
3269 +- struct llc_sock *llc = llc_sk(sk);
3270 +-
3271 +- del_timer(&llc->pf_cycle_timer.timer);
3272 +- del_timer(&llc->ack_timer.timer);
3273 +- del_timer(&llc->rej_sent_timer.timer);
3274 +- del_timer(&llc->busy_state_timer.timer);
3275 +- llc->ack_must_be_send = 0;
3276 +- llc->ack_pf = 0;
3277 ++ llc_sk_stop_all_timers(sk, false);
3278 + return 0;
3279 + }
3280 +
3281 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
3282 +index 110e32bcb399..c0ac522b48a1 100644
3283 +--- a/net/llc/llc_conn.c
3284 ++++ b/net/llc/llc_conn.c
3285 +@@ -961,6 +961,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr
3286 + return sk;
3287 + }
3288 +
3289 ++void llc_sk_stop_all_timers(struct sock *sk, bool sync)
3290 ++{
3291 ++ struct llc_sock *llc = llc_sk(sk);
3292 ++
3293 ++ if (sync) {
3294 ++ del_timer_sync(&llc->pf_cycle_timer.timer);
3295 ++ del_timer_sync(&llc->ack_timer.timer);
3296 ++ del_timer_sync(&llc->rej_sent_timer.timer);
3297 ++ del_timer_sync(&llc->busy_state_timer.timer);
3298 ++ } else {
3299 ++ del_timer(&llc->pf_cycle_timer.timer);
3300 ++ del_timer(&llc->ack_timer.timer);
3301 ++ del_timer(&llc->rej_sent_timer.timer);
3302 ++ del_timer(&llc->busy_state_timer.timer);
3303 ++ }
3304 ++
3305 ++ llc->ack_must_be_send = 0;
3306 ++ llc->ack_pf = 0;
3307 ++}
3308 ++
3309 + /**
3310 + * llc_sk_free - Frees a LLC socket
3311 + * @sk - socket to free
3312 +@@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk)
3313 +
3314 + llc->state = LLC_CONN_OUT_OF_SVC;
3315 + /* Stop all (possibly) running timers */
3316 +- llc_conn_ac_stop_all_timers(sk, NULL);
3317 ++ llc_sk_stop_all_timers(sk, true);
3318 + #ifdef DEBUG_LLC_CONN_ALLOC
3319 + printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
3320 + skb_queue_len(&llc->pdu_unack_q),
3321 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3322 +index e0f3f4aeeb4f..3b43b1fcd618 100644
3323 +--- a/net/packet/af_packet.c
3324 ++++ b/net/packet/af_packet.c
3325 +@@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
3326 + skb_set_queue_mapping(skb, queue_index);
3327 + }
3328 +
3329 +-/* register_prot_hook must be invoked with the po->bind_lock held,
3330 ++/* __register_prot_hook must be invoked through register_prot_hook
3331 + * or from a context in which asynchronous accesses to the packet
3332 + * socket is not possible (packet_create()).
3333 + */
3334 +-static void register_prot_hook(struct sock *sk)
3335 ++static void __register_prot_hook(struct sock *sk)
3336 + {
3337 + struct packet_sock *po = pkt_sk(sk);
3338 +
3339 +@@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk)
3340 + }
3341 + }
3342 +
3343 +-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
3344 +- * held. If the sync parameter is true, we will temporarily drop
3345 ++static void register_prot_hook(struct sock *sk)
3346 ++{
3347 ++ lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
3348 ++ __register_prot_hook(sk);
3349 ++}
3350 ++
3351 ++/* If the sync parameter is true, we will temporarily drop
3352 + * the po->bind_lock and do a synchronize_net to make sure no
3353 + * asynchronous packet processing paths still refer to the elements
3354 + * of po->prot_hook. If the sync parameter is false, it is the
3355 +@@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
3356 + {
3357 + struct packet_sock *po = pkt_sk(sk);
3358 +
3359 ++ lockdep_assert_held_once(&po->bind_lock);
3360 ++
3361 + po->running = 0;
3362 +
3363 + if (po->fanout)
3364 +@@ -3008,6 +3015,7 @@ static int packet_release(struct socket *sock)
3365 +
3366 + packet_flush_mclist(sk);
3367 +
3368 ++ lock_sock(sk);
3369 + if (po->rx_ring.pg_vec) {
3370 + memset(&req_u, 0, sizeof(req_u));
3371 + packet_set_ring(sk, &req_u, 1, 0);
3372 +@@ -3017,6 +3025,7 @@ static int packet_release(struct socket *sock)
3373 + memset(&req_u, 0, sizeof(req_u));
3374 + packet_set_ring(sk, &req_u, 1, 1);
3375 + }
3376 ++ release_sock(sk);
3377 +
3378 + f = fanout_release(sk);
3379 +
3380 +@@ -3250,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3381 +
3382 + if (proto) {
3383 + po->prot_hook.type = proto;
3384 +- register_prot_hook(sk);
3385 ++ __register_prot_hook(sk);
3386 + }
3387 +
3388 + mutex_lock(&net->packet.sklist_lock);
3389 +@@ -3645,6 +3654,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3390 + union tpacket_req_u req_u;
3391 + int len;
3392 +
3393 ++ lock_sock(sk);
3394 + switch (po->tp_version) {
3395 + case TPACKET_V1:
3396 + case TPACKET_V2:
3397 +@@ -3655,12 +3665,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3398 + len = sizeof(req_u.req3);
3399 + break;
3400 + }
3401 +- if (optlen < len)
3402 +- return -EINVAL;
3403 +- if (copy_from_user(&req_u.req, optval, len))
3404 +- return -EFAULT;
3405 +- return packet_set_ring(sk, &req_u, 0,
3406 +- optname == PACKET_TX_RING);
3407 ++ if (optlen < len) {
3408 ++ ret = -EINVAL;
3409 ++ } else {
3410 ++ if (copy_from_user(&req_u.req, optval, len))
3411 ++ ret = -EFAULT;
3412 ++ else
3413 ++ ret = packet_set_ring(sk, &req_u, 0,
3414 ++ optname == PACKET_TX_RING);
3415 ++ }
3416 ++ release_sock(sk);
3417 ++ return ret;
3418 + }
3419 + case PACKET_COPY_THRESH:
3420 + {
3421 +@@ -3726,12 +3741,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3422 +
3423 + if (optlen != sizeof(val))
3424 + return -EINVAL;
3425 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3426 +- return -EBUSY;
3427 + if (copy_from_user(&val, optval, sizeof(val)))
3428 + return -EFAULT;
3429 +- po->tp_loss = !!val;
3430 +- return 0;
3431 ++
3432 ++ lock_sock(sk);
3433 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3434 ++ ret = -EBUSY;
3435 ++ } else {
3436 ++ po->tp_loss = !!val;
3437 ++ ret = 0;
3438 ++ }
3439 ++ release_sock(sk);
3440 ++ return ret;
3441 + }
3442 + case PACKET_AUXDATA:
3443 + {
3444 +@@ -3742,7 +3763,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3445 + if (copy_from_user(&val, optval, sizeof(val)))
3446 + return -EFAULT;
3447 +
3448 ++ lock_sock(sk);
3449 + po->auxdata = !!val;
3450 ++ release_sock(sk);
3451 + return 0;
3452 + }
3453 + case PACKET_ORIGDEV:
3454 +@@ -3754,7 +3777,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3455 + if (copy_from_user(&val, optval, sizeof(val)))
3456 + return -EFAULT;
3457 +
3458 ++ lock_sock(sk);
3459 + po->origdev = !!val;
3460 ++ release_sock(sk);
3461 + return 0;
3462 + }
3463 + case PACKET_VNET_HDR:
3464 +@@ -3763,15 +3788,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3465 +
3466 + if (sock->type != SOCK_RAW)
3467 + return -EINVAL;
3468 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3469 +- return -EBUSY;
3470 + if (optlen < sizeof(val))
3471 + return -EINVAL;
3472 + if (copy_from_user(&val, optval, sizeof(val)))
3473 + return -EFAULT;
3474 +
3475 +- po->has_vnet_hdr = !!val;
3476 +- return 0;
3477 ++ lock_sock(sk);
3478 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3479 ++ ret = -EBUSY;
3480 ++ } else {
3481 ++ po->has_vnet_hdr = !!val;
3482 ++ ret = 0;
3483 ++ }
3484 ++ release_sock(sk);
3485 ++ return ret;
3486 + }
3487 + case PACKET_TIMESTAMP:
3488 + {
3489 +@@ -3809,11 +3839,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3490 +
3491 + if (optlen != sizeof(val))
3492 + return -EINVAL;
3493 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3494 +- return -EBUSY;
3495 + if (copy_from_user(&val, optval, sizeof(val)))
3496 + return -EFAULT;
3497 +- po->tp_tx_has_off = !!val;
3498 ++
3499 ++ lock_sock(sk);
3500 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3501 ++ ret = -EBUSY;
3502 ++ } else {
3503 ++ po->tp_tx_has_off = !!val;
3504 ++ ret = 0;
3505 ++ }
3506 ++ release_sock(sk);
3507 + return 0;
3508 + }
3509 + case PACKET_QDISC_BYPASS:
3510 +@@ -4210,8 +4246,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3511 + /* Added to avoid minimal code churn */
3512 + struct tpacket_req *req = &req_u->req;
3513 +
3514 +- lock_sock(sk);
3515 +-
3516 + rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3517 + rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3518 +
3519 +@@ -4349,7 +4383,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3520 + if (pg_vec)
3521 + free_pg_vec(pg_vec, order, req->tp_block_nr);
3522 + out:
3523 +- release_sock(sk);
3524 + return err;
3525 + }
3526 +
3527 +diff --git a/net/packet/internal.h b/net/packet/internal.h
3528 +index a1d2b2319ae9..3bb7c5fb3bff 100644
3529 +--- a/net/packet/internal.h
3530 ++++ b/net/packet/internal.h
3531 +@@ -112,10 +112,12 @@ struct packet_sock {
3532 + int copy_thresh;
3533 + spinlock_t bind_lock;
3534 + struct mutex pg_vec_lock;
3535 +- unsigned int running:1, /* prot_hook is attached*/
3536 +- auxdata:1,
3537 ++ unsigned int running; /* bind_lock must be held */
3538 ++ unsigned int auxdata:1, /* writer must hold sock lock */
3539 + origdev:1,
3540 +- has_vnet_hdr:1;
3541 ++ has_vnet_hdr:1,
3542 ++ tp_loss:1,
3543 ++ tp_tx_has_off:1;
3544 + int pressure;
3545 + int ifindex; /* bound device */
3546 + __be16 num;
3547 +@@ -125,8 +127,6 @@ struct packet_sock {
3548 + enum tpacket_versions tp_version;
3549 + unsigned int tp_hdrlen;
3550 + unsigned int tp_reserve;
3551 +- unsigned int tp_loss:1;
3552 +- unsigned int tp_tx_has_off:1;
3553 + unsigned int tp_tstamp;
3554 + struct net_device __rcu *cached_dev;
3555 + int (*xmit)(struct sk_buff *skb);
3556 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
3557 +index 5954e992685a..1d477b054f2e 100644
3558 +--- a/net/sched/act_ife.c
3559 ++++ b/net/sched/act_ife.c
3560 +@@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
3561 + }
3562 + }
3563 +
3564 +- return 0;
3565 ++ return -ENOENT;
3566 + }
3567 +
3568 + static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
3569 +@@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
3570 + u16 mtype;
3571 + u16 dlen;
3572 +
3573 +- curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
3574 ++ curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
3575 ++ &dlen, NULL);
3576 ++ if (!curr_data) {
3577 ++ qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
3578 ++ return TC_ACT_SHOT;
3579 ++ }
3580 +
3581 + if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
3582 + /* abuse overlimits to count when we receive metadata
3583 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3584 +index f6d3d0c1e133..07b64719d1bc 100644
3585 +--- a/net/sctp/ipv6.c
3586 ++++ b/net/sctp/ipv6.c
3587 +@@ -521,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
3588 + addr->v6.sin6_scope_id = 0;
3589 + }
3590 +
3591 +-/* Compare addresses exactly.
3592 +- * v4-mapped-v6 is also in consideration.
3593 +- */
3594 +-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
3595 +- const union sctp_addr *addr2)
3596 ++static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
3597 ++ const union sctp_addr *addr2)
3598 + {
3599 + if (addr1->sa.sa_family != addr2->sa.sa_family) {
3600 + if (addr1->sa.sa_family == AF_INET &&
3601 + addr2->sa.sa_family == AF_INET6 &&
3602 +- ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
3603 +- if (addr2->v6.sin6_port == addr1->v4.sin_port &&
3604 +- addr2->v6.sin6_addr.s6_addr32[3] ==
3605 +- addr1->v4.sin_addr.s_addr)
3606 +- return 1;
3607 +- }
3608 ++ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
3609 ++ addr2->v6.sin6_addr.s6_addr32[3] ==
3610 ++ addr1->v4.sin_addr.s_addr)
3611 ++ return 1;
3612 ++
3613 + if (addr2->sa.sa_family == AF_INET &&
3614 + addr1->sa.sa_family == AF_INET6 &&
3615 +- ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
3616 +- if (addr1->v6.sin6_port == addr2->v4.sin_port &&
3617 +- addr1->v6.sin6_addr.s6_addr32[3] ==
3618 +- addr2->v4.sin_addr.s_addr)
3619 +- return 1;
3620 +- }
3621 ++ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
3622 ++ addr1->v6.sin6_addr.s6_addr32[3] ==
3623 ++ addr2->v4.sin_addr.s_addr)
3624 ++ return 1;
3625 ++
3626 + return 0;
3627 + }
3628 +- if (addr1->v6.sin6_port != addr2->v6.sin6_port)
3629 +- return 0;
3630 ++
3631 + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
3632 + return 0;
3633 ++
3634 + /* If this is a linklocal address, compare the scope_id. */
3635 +- if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
3636 +- if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
3637 +- (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
3638 +- return 0;
3639 +- }
3640 +- }
3641 ++ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
3642 ++ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
3643 ++ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
3644 ++ return 0;
3645 +
3646 + return 1;
3647 + }
3648 +
3649 ++/* Compare addresses exactly.
3650 ++ * v4-mapped-v6 is also in consideration.
3651 ++ */
3652 ++static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
3653 ++ const union sctp_addr *addr2)
3654 ++{
3655 ++ return __sctp_v6_cmp_addr(addr1, addr2) &&
3656 ++ addr1->v6.sin6_port == addr2->v6.sin6_port;
3657 ++}
3658 ++
3659 + /* Initialize addr struct to INADDR_ANY. */
3660 + static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
3661 + {
3662 +@@ -846,8 +849,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
3663 + const union sctp_addr *addr2,
3664 + struct sctp_sock *opt)
3665 + {
3666 +- struct sctp_af *af1, *af2;
3667 + struct sock *sk = sctp_opt2sk(opt);
3668 ++ struct sctp_af *af1, *af2;
3669 +
3670 + af1 = sctp_get_af_specific(addr1->sa.sa_family);
3671 + af2 = sctp_get_af_specific(addr2->sa.sa_family);
3672 +@@ -863,10 +866,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
3673 + if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
3674 + return 1;
3675 +
3676 +- if (addr1->sa.sa_family != addr2->sa.sa_family)
3677 +- return 0;
3678 +-
3679 +- return af1->cmp_addr(addr1, addr2);
3680 ++ return __sctp_v6_cmp_addr(addr1, addr2);
3681 + }
3682 +
3683 + /* Verify that the provided sockaddr looks bindable. Common verification,
3684 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
3685 +index 1e0d780855c3..afd5a935bbcb 100644
3686 +--- a/net/smc/af_smc.c
3687 ++++ b/net/smc/af_smc.c
3688 +@@ -1254,14 +1254,12 @@ static int smc_shutdown(struct socket *sock, int how)
3689 + rc = smc_close_shutdown_write(smc);
3690 + break;
3691 + case SHUT_RD:
3692 +- if (sk->sk_state == SMC_LISTEN)
3693 +- rc = smc_close_active(smc);
3694 +- else
3695 +- rc = 0;
3696 +- /* nothing more to do because peer is not involved */
3697 ++ rc = 0;
3698 ++ /* nothing more to do because peer is not involved */
3699 + break;
3700 + }
3701 +- rc1 = kernel_sock_shutdown(smc->clcsock, how);
3702 ++ if (smc->clcsock)
3703 ++ rc1 = kernel_sock_shutdown(smc->clcsock, how);
3704 + /* map sock_shutdown_cmd constants to sk_shutdown value range */
3705 + sk->sk_shutdown |= how + 1;
3706 +
3707 +diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
3708 +index b9283ce5cd85..092bebc70048 100644
3709 +--- a/net/strparser/strparser.c
3710 ++++ b/net/strparser/strparser.c
3711 +@@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
3712 +
3713 + static void strp_start_timer(struct strparser *strp, long timeo)
3714 + {
3715 +- if (timeo)
3716 ++ if (timeo && timeo != LONG_MAX)
3717 + mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
3718 + }
3719 +
3720 +@@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
3721 + strp_start_timer(strp, timeo);
3722 + }
3723 +
3724 ++ stm->accum_len += cand_len;
3725 + strp->need_bytes = stm->strp.full_len -
3726 + stm->accum_len;
3727 +- stm->accum_len += cand_len;
3728 + stm->early_eaten = cand_len;
3729 + STRP_STATS_ADD(strp->stats.bytes, cand_len);
3730 + desc->count = 0; /* Stop reading socket */
3731 +@@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
3732 + /* Hurray, we have a new message! */
3733 + cancel_delayed_work(&strp->msg_timer_work);
3734 + strp->skb_head = NULL;
3735 ++ strp->need_bytes = 0;
3736 + STRP_STATS_INCR(strp->stats.msgs);
3737 +
3738 + /* Give skb to upper layer */
3739 +@@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp)
3740 + return;
3741 +
3742 + if (strp->need_bytes) {
3743 +- if (strp_peek_len(strp) >= strp->need_bytes)
3744 +- strp->need_bytes = 0;
3745 +- else
3746 ++ if (strp_peek_len(strp) < strp->need_bytes)
3747 + return;
3748 + }
3749 +
3750 +diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
3751 +index b76f13f6fea1..d4e0bbeee727 100644
3752 +--- a/net/tipc/netlink.c
3753 ++++ b/net/tipc/netlink.c
3754 +@@ -79,7 +79,8 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
3755 +
3756 + const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
3757 + [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
3758 +- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
3759 ++ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
3760 ++ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
3761 + };
3762 +
3763 + const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
3764 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
3765 +index e0fc84daed94..ad17a985f74e 100644
3766 +--- a/net/vmw_vsock/af_vsock.c
3767 ++++ b/net/vmw_vsock/af_vsock.c
3768 +@@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void)
3769 + }
3770 + EXPORT_SYMBOL_GPL(vsock_core_get_transport);
3771 +
3772 ++static void __exit vsock_exit(void)
3773 ++{
3774 ++ /* Do nothing. This function makes this module removable. */
3775 ++}
3776 ++
3777 + module_init(vsock_init_tables);
3778 ++module_exit(vsock_exit);
3779 +
3780 + MODULE_AUTHOR("VMware, Inc.");
3781 + MODULE_DESCRIPTION("VMware Virtual Socket Family");
3782 +diff --git a/security/commoncap.c b/security/commoncap.c
3783 +index 48620c93d697..1ce701fcb3f3 100644
3784 +--- a/security/commoncap.c
3785 ++++ b/security/commoncap.c
3786 +@@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
3787 + magic |= VFS_CAP_FLAGS_EFFECTIVE;
3788 + memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
3789 + cap->magic_etc = cpu_to_le32(magic);
3790 ++ } else {
3791 ++ size = -ENOMEM;
3792 + }
3793 + }
3794 + kfree(tmpbuf);