Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 06 Feb 2019 17:08:38
Message-Id: 1549472874.5594d5e8065325acf45fff423f009f6537d812bb.mpagano@gentoo
1 commit: 5594d5e8065325acf45fff423f009f6537d812bb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 6 17:07:54 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 6 17:07:54 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5594d5e8
7
8 proj/linux-patches: Linux patch 4.19.20
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1019_linux-4.19.20.patch | 3120 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3124 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index b459632..b213e93 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -119,6 +119,10 @@ Patch: 1018_linux-4.19.19.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.19
23
24 +Patch: 1019_linux-4.19.20.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.20
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1019_linux-4.19.20.patch b/1019_linux-4.19.20.patch
33 new file mode 100644
34 index 0000000..522d50f
35 --- /dev/null
36 +++ b/1019_linux-4.19.20.patch
37 @@ -0,0 +1,3120 @@
38 +diff --git a/Makefile b/Makefile
39 +index 39c4e7c3c13c..f1859811dca1 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 19
47 ++SUBLEVEL = 20
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
52 +index 318394ed5c7a..5e11ad3164e0 100644
53 +--- a/arch/arm/mach-cns3xxx/pcie.c
54 ++++ b/arch/arm/mach-cns3xxx/pcie.c
55 +@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
56 + } else /* remote PCI bus */
57 + base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
58 +
59 +- return base + (where & 0xffc) + (devfn << 12);
60 ++ return base + where + (devfn << 12);
61 + }
62 +
63 + static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
64 +diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
65 +index 29cdc99688f3..9859e1178e6b 100644
66 +--- a/arch/arm64/kernel/hibernate.c
67 ++++ b/arch/arm64/kernel/hibernate.c
68 +@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
69 + dcache_clean_range(__idmap_text_start, __idmap_text_end);
70 +
71 + /* Clean kvm setup code to PoC? */
72 +- if (el2_reset_needed())
73 ++ if (el2_reset_needed()) {
74 + dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
75 ++ dcache_clean_range(__hyp_text_start, __hyp_text_end);
76 ++ }
77 +
78 + /* make the crash dump kernel image protected again */
79 + crash_post_resume();
80 +diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
81 +index e1261fbaa374..17f325ba831e 100644
82 +--- a/arch/arm64/kernel/hyp-stub.S
83 ++++ b/arch/arm64/kernel/hyp-stub.S
84 +@@ -28,6 +28,8 @@
85 + #include <asm/virt.h>
86 +
87 + .text
88 ++ .pushsection .hyp.text, "ax"
89 ++
90 + .align 11
91 +
92 + ENTRY(__hyp_stub_vectors)
93 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
94 +index ba6b41790fcd..b09b6f75f759 100644
95 +--- a/arch/arm64/kernel/kaslr.c
96 ++++ b/arch/arm64/kernel/kaslr.c
97 +@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
98 + * we end up running with module randomization disabled.
99 + */
100 + module_alloc_base = (u64)_etext - MODULES_VSIZE;
101 ++ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
102 +
103 + /*
104 + * Try to map the FDT early. If this fails, we simply bail,
105 +diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
106 +index 30695a868107..5c9073bace83 100644
107 +--- a/arch/arm64/mm/flush.c
108 ++++ b/arch/arm64/mm/flush.c
109 +@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
110 + __clean_dcache_area_pou(kaddr, len);
111 + __flush_icache_all();
112 + } else {
113 +- flush_icache_range(addr, addr + len);
114 ++ /*
115 ++ * Don't issue kick_all_cpus_sync() after I-cache invalidation
116 ++ * for user mappings.
117 ++ */
118 ++ __flush_icache_range(addr, addr + len);
119 + }
120 + }
121 +
122 +diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
123 +index 6b11f1314248..7f9e0304b510 100644
124 +--- a/drivers/gpio/gpio-altera-a10sr.c
125 ++++ b/drivers/gpio/gpio-altera-a10sr.c
126 +@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
127 + static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
128 + unsigned int nr, int value)
129 + {
130 +- if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
131 ++ if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
132 ++ altr_a10sr_gpio_set(gc, nr, value);
133 + return 0;
134 ++ }
135 + return -EINVAL;
136 + }
137 +
138 +diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
139 +index e0d6a0a7bc69..e41223c05f6e 100644
140 +--- a/drivers/gpio/gpio-eic-sprd.c
141 ++++ b/drivers/gpio/gpio-eic-sprd.c
142 +@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
143 +
144 + static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
145 + {
146 +- return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
147 ++ struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
148 ++
149 ++ switch (sprd_eic->type) {
150 ++ case SPRD_EIC_DEBOUNCE:
151 ++ return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
152 ++ case SPRD_EIC_ASYNC:
153 ++ return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
154 ++ case SPRD_EIC_SYNC:
155 ++ return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
156 ++ default:
157 ++ return -ENOTSUPP;
158 ++ }
159 + }
160 +
161 + static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
162 +@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
163 + irq_set_handler_locked(data, handle_edge_irq);
164 + break;
165 + case IRQ_TYPE_EDGE_BOTH:
166 ++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
167 + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
168 + irq_set_handler_locked(data, handle_edge_irq);
169 + break;
170 +diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
171 +index adf72dda25a2..68a35b65925a 100644
172 +--- a/drivers/gpio/gpio-pcf857x.c
173 ++++ b/drivers/gpio/gpio-pcf857x.c
174 +@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
175 + */
176 + struct pcf857x {
177 + struct gpio_chip chip;
178 ++ struct irq_chip irqchip;
179 + struct i2c_client *client;
180 + struct mutex lock; /* protect 'out' */
181 + unsigned out; /* software latch */
182 +@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
183 + mutex_unlock(&gpio->lock);
184 + }
185 +
186 +-static struct irq_chip pcf857x_irq_chip = {
187 +- .name = "pcf857x",
188 +- .irq_enable = pcf857x_irq_enable,
189 +- .irq_disable = pcf857x_irq_disable,
190 +- .irq_ack = noop,
191 +- .irq_mask = noop,
192 +- .irq_unmask = noop,
193 +- .irq_set_wake = pcf857x_irq_set_wake,
194 +- .irq_bus_lock = pcf857x_irq_bus_lock,
195 +- .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
196 +-};
197 +-
198 + /*-------------------------------------------------------------------------*/
199 +
200 + static int pcf857x_probe(struct i2c_client *client,
201 +@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
202 +
203 + /* Enable irqchip if we have an interrupt */
204 + if (client->irq) {
205 ++ gpio->irqchip.name = "pcf857x",
206 ++ gpio->irqchip.irq_enable = pcf857x_irq_enable,
207 ++ gpio->irqchip.irq_disable = pcf857x_irq_disable,
208 ++ gpio->irqchip.irq_ack = noop,
209 ++ gpio->irqchip.irq_mask = noop,
210 ++ gpio->irqchip.irq_unmask = noop,
211 ++ gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
212 ++ gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
213 ++ gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
214 + status = gpiochip_irqchip_add_nested(&gpio->chip,
215 +- &pcf857x_irq_chip,
216 ++ &gpio->irqchip,
217 + 0, handle_level_irq,
218 + IRQ_TYPE_NONE);
219 + if (status) {
220 +@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
221 + if (status)
222 + goto fail;
223 +
224 +- gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
225 ++ gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
226 + client->irq);
227 + gpio->irq_parent = client->irq;
228 + }
229 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
230 +index a8e01d99919c..b3ab6c428423 100644
231 +--- a/drivers/gpio/gpiolib.c
232 ++++ b/drivers/gpio/gpiolib.c
233 +@@ -817,7 +817,15 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
234 + /* Do not leak kernel stack to userspace */
235 + memset(&ge, 0, sizeof(ge));
236 +
237 +- ge.timestamp = le->timestamp;
238 ++ /*
239 ++ * We may be running from a nested threaded interrupt in which case
240 ++ * we didn't get the timestamp from lineevent_irq_handler().
241 ++ */
242 ++ if (!le->timestamp)
243 ++ ge.timestamp = ktime_get_real_ns();
244 ++ else
245 ++ ge.timestamp = le->timestamp;
246 ++
247 + level = gpiod_get_value_cansleep(le->desc);
248 +
249 + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
250 +diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
251 +index 9122ee6e55e4..1fe93920fb25 100644
252 +--- a/drivers/gpu/drm/msm/msm_gpu.h
253 ++++ b/drivers/gpu/drm/msm/msm_gpu.h
254 +@@ -63,7 +63,7 @@ struct msm_gpu_funcs {
255 + struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
256 + void (*recover)(struct msm_gpu *gpu);
257 + void (*destroy)(struct msm_gpu *gpu);
258 +-#ifdef CONFIG_DEBUG_FS
259 ++#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
260 + /* show GPU status in debugfs: */
261 + void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
262 + struct drm_printer *p);
263 +diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
264 +index 1fc75647e47b..34ffca618427 100644
265 +--- a/drivers/infiniband/hw/hfi1/file_ops.c
266 ++++ b/drivers/infiniband/hw/hfi1/file_ops.c
267 +@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
268 + vmf = 1;
269 + break;
270 + case STATUS:
271 +- if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
272 ++ if (flags & VM_WRITE) {
273 + ret = -EPERM;
274 + goto done;
275 + }
276 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
277 +index 4339177629e3..2b8f5ebae821 100644
278 +--- a/drivers/iommu/intel-iommu.c
279 ++++ b/drivers/iommu/intel-iommu.c
280 +@@ -5230,7 +5230,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
281 + struct iommu_resv_region *entry, *next;
282 +
283 + list_for_each_entry_safe(entry, next, head, list) {
284 +- if (entry->type == IOMMU_RESV_RESERVED)
285 ++ if (entry->type == IOMMU_RESV_MSI)
286 + kfree(entry);
287 + }
288 + }
289 +diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
290 +index e6e925add700..6518b012756f 100644
291 +--- a/drivers/md/raid5-cache.c
292 ++++ b/drivers/md/raid5-cache.c
293 +@@ -1935,12 +1935,14 @@ out:
294 + }
295 +
296 + static struct stripe_head *
297 +-r5c_recovery_alloc_stripe(struct r5conf *conf,
298 +- sector_t stripe_sect)
299 ++r5c_recovery_alloc_stripe(
300 ++ struct r5conf *conf,
301 ++ sector_t stripe_sect,
302 ++ int noblock)
303 + {
304 + struct stripe_head *sh;
305 +
306 +- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
307 ++ sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
308 + if (!sh)
309 + return NULL; /* no more stripe available */
310 +
311 +@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
312 + stripe_sect);
313 +
314 + if (!sh) {
315 +- sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
316 ++ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
317 + /*
318 + * cannot get stripe from raid5_get_active_stripe
319 + * try replay some stripes
320 +@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
321 + r5c_recovery_replay_stripes(
322 + cached_stripe_list, ctx);
323 + sh = r5c_recovery_alloc_stripe(
324 +- conf, stripe_sect);
325 ++ conf, stripe_sect, 1);
326 + }
327 + if (!sh) {
328 ++ int new_size = conf->min_nr_stripes * 2;
329 + pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
330 + mdname(mddev),
331 +- conf->min_nr_stripes * 2);
332 +- raid5_set_cache_size(mddev,
333 +- conf->min_nr_stripes * 2);
334 +- sh = r5c_recovery_alloc_stripe(conf,
335 +- stripe_sect);
336 ++ new_size);
337 ++ ret = raid5_set_cache_size(mddev, new_size);
338 ++ if (conf->min_nr_stripes <= new_size / 2) {
339 ++ pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
340 ++ mdname(mddev),
341 ++ ret,
342 ++ new_size,
343 ++ conf->min_nr_stripes,
344 ++ conf->max_nr_stripes);
345 ++ return -ENOMEM;
346 ++ }
347 ++ sh = r5c_recovery_alloc_stripe(
348 ++ conf, stripe_sect, 0);
349 + }
350 + if (!sh) {
351 + pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
352 +- mdname(mddev));
353 ++ mdname(mddev));
354 + return -ENOMEM;
355 + }
356 + list_add_tail(&sh->lru, cached_stripe_list);
357 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
358 +index e4e98f47865d..45a3551d3afd 100644
359 +--- a/drivers/md/raid5.c
360 ++++ b/drivers/md/raid5.c
361 +@@ -6357,6 +6357,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
362 + int
363 + raid5_set_cache_size(struct mddev *mddev, int size)
364 + {
365 ++ int result = 0;
366 + struct r5conf *conf = mddev->private;
367 +
368 + if (size <= 16 || size > 32768)
369 +@@ -6373,11 +6374,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
370 +
371 + mutex_lock(&conf->cache_size_mutex);
372 + while (size > conf->max_nr_stripes)
373 +- if (!grow_one_stripe(conf, GFP_KERNEL))
374 ++ if (!grow_one_stripe(conf, GFP_KERNEL)) {
375 ++ conf->min_nr_stripes = conf->max_nr_stripes;
376 ++ result = -ENOMEM;
377 + break;
378 ++ }
379 + mutex_unlock(&conf->cache_size_mutex);
380 +
381 +- return 0;
382 ++ return result;
383 + }
384 + EXPORT_SYMBOL(raid5_set_cache_size);
385 +
386 +diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
387 +index 768972af8b85..0d3b7473bc21 100644
388 +--- a/drivers/mmc/host/bcm2835.c
389 ++++ b/drivers/mmc/host/bcm2835.c
390 +@@ -1427,6 +1427,8 @@ static int bcm2835_probe(struct platform_device *pdev)
391 +
392 + err:
393 + dev_dbg(dev, "%s -> err %d\n", __func__, ret);
394 ++ if (host->dma_chan_rxtx)
395 ++ dma_release_channel(host->dma_chan_rxtx);
396 + mmc_free_host(mmc);
397 +
398 + return ret;
399 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
400 +index 04841386b65d..f171cce5197d 100644
401 +--- a/drivers/mmc/host/mtk-sd.c
402 ++++ b/drivers/mmc/host/mtk-sd.c
403 +@@ -784,7 +784,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
404 +
405 + if (timing == MMC_TIMING_MMC_HS400 &&
406 + host->dev_comp->hs400_tune)
407 +- sdr_set_field(host->base + PAD_CMD_TUNE,
408 ++ sdr_set_field(host->base + tune_reg,
409 + MSDC_PAD_TUNE_CMDRRDLY,
410 + host->hs400_cmd_int_delay);
411 + dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
412 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
413 +index d0e83db42ae5..94eeed2a1b53 100644
414 +--- a/drivers/mmc/host/sdhci-iproc.c
415 ++++ b/drivers/mmc/host/sdhci-iproc.c
416 +@@ -279,7 +279,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
417 +
418 + iproc_host->data = iproc_data;
419 +
420 +- mmc_of_parse(host->mmc);
421 ++ ret = mmc_of_parse(host->mmc);
422 ++ if (ret)
423 ++ goto err;
424 ++
425 + sdhci_get_of_property(pdev);
426 +
427 + host->mmc->caps |= iproc_host->data->mmc_caps;
428 +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
429 +index 22a817da861e..1e2b53a934fb 100644
430 +--- a/drivers/net/ethernet/freescale/ucc_geth.c
431 ++++ b/drivers/net/ethernet/freescale/ucc_geth.c
432 +@@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
433 + u16 i, j;
434 + u8 __iomem *bd;
435 +
436 ++ netdev_reset_queue(ugeth->ndev);
437 ++
438 + ug_info = ugeth->ug_info;
439 + uf_info = &ug_info->uf_info;
440 +
441 +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
442 +index babcfd9c0571..75213046563c 100644
443 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
444 ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
445 +@@ -2064,9 +2064,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
446 + {
447 + struct mlx4_cmd_mailbox *mailbox;
448 + __be32 *outbox;
449 ++ u64 qword_field;
450 + u32 dword_field;
451 +- int err;
452 ++ u16 word_field;
453 + u8 byte_field;
454 ++ int err;
455 + static const u8 a0_dmfs_query_hw_steering[] = {
456 + [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
457 + [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
458 +@@ -2094,19 +2096,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
459 +
460 + /* QPC/EEC/CQC/EQC/RDMARC attributes */
461 +
462 +- MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
463 +- MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
464 +- MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
465 +- MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
466 +- MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
467 +- MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
468 +- MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
469 +- MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
470 +- MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
471 +- MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
472 +- MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
473 +- MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
474 +- MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
475 ++ MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
476 ++ param->qpc_base = qword_field & ~((u64)0x1f);
477 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
478 ++ param->log_num_qps = byte_field & 0x1f;
479 ++ MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
480 ++ param->srqc_base = qword_field & ~((u64)0x1f);
481 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
482 ++ param->log_num_srqs = byte_field & 0x1f;
483 ++ MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
484 ++ param->cqc_base = qword_field & ~((u64)0x1f);
485 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
486 ++ param->log_num_cqs = byte_field & 0x1f;
487 ++ MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
488 ++ param->altc_base = qword_field;
489 ++ MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
490 ++ param->auxc_base = qword_field;
491 ++ MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
492 ++ param->eqc_base = qword_field & ~((u64)0x1f);
493 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
494 ++ param->log_num_eqs = byte_field & 0x1f;
495 ++ MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
496 ++ param->num_sys_eqs = word_field & 0xfff;
497 ++ MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
498 ++ param->rdmarc_base = qword_field & ~((u64)0x1f);
499 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
500 ++ param->log_rd_per_qp = byte_field & 0x7;
501 +
502 + MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
503 + if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
504 +@@ -2125,22 +2140,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
505 + /* steering attributes */
506 + if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
507 + MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
508 +- MLX4_GET(param->log_mc_entry_sz, outbox,
509 +- INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
510 +- MLX4_GET(param->log_mc_table_sz, outbox,
511 +- INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
512 +- MLX4_GET(byte_field, outbox,
513 +- INIT_HCA_FS_A0_OFFSET);
514 ++ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
515 ++ param->log_mc_entry_sz = byte_field & 0x1f;
516 ++ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
517 ++ param->log_mc_table_sz = byte_field & 0x1f;
518 ++ MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
519 + param->dmfs_high_steer_mode =
520 + a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
521 + } else {
522 + MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
523 +- MLX4_GET(param->log_mc_entry_sz, outbox,
524 +- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
525 +- MLX4_GET(param->log_mc_hash_sz, outbox,
526 +- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
527 +- MLX4_GET(param->log_mc_table_sz, outbox,
528 +- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
529 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
530 ++ param->log_mc_entry_sz = byte_field & 0x1f;
531 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
532 ++ param->log_mc_hash_sz = byte_field & 0x1f;
533 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
534 ++ param->log_mc_table_sz = byte_field & 0x1f;
535 + }
536 +
537 + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
538 +@@ -2164,15 +2178,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
539 + /* TPT attributes */
540 +
541 + MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
542 +- MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
543 +- MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
544 ++ MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
545 ++ param->mw_enabled = byte_field >> 7;
546 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
547 ++ param->log_mpt_sz = byte_field & 0x3f;
548 + MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
549 + MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
550 +
551 + /* UAR attributes */
552 +
553 + MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
554 +- MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
555 ++ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
556 ++ param->log_uar_sz = byte_field & 0xf;
557 +
558 + /* phv_check enable */
559 + MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
560 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
561 +index ea7dedc2d5ad..d6706475a3ba 100644
562 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
563 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
564 +@@ -1133,13 +1133,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
565 + int err = 0;
566 + u8 *smac_v;
567 +
568 +- if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
569 +- mlx5_core_warn(esw->dev,
570 +- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
571 +- vport->vport);
572 +- return -EPERM;
573 +- }
574 +-
575 + esw_vport_cleanup_ingress_rules(esw, vport);
576 +
577 + if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
578 +@@ -1696,7 +1689,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
579 + int vport_num;
580 + int err;
581 +
582 +- if (!MLX5_ESWITCH_MANAGER(dev))
583 ++ if (!MLX5_VPORT_MANAGER(dev))
584 + return 0;
585 +
586 + esw_info(dev,
587 +@@ -1765,7 +1758,7 @@ abort:
588 +
589 + void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
590 + {
591 +- if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
592 ++ if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
593 + return;
594 +
595 + esw_info(esw->dev, "cleanup\n");
596 +@@ -1812,13 +1805,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
597 + mutex_lock(&esw->state_lock);
598 + evport = &esw->vports[vport];
599 +
600 +- if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
601 ++ if (evport->info.spoofchk && !is_valid_ether_addr(mac))
602 + mlx5_core_warn(esw->dev,
603 +- "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
604 ++ "Set invalid MAC while spoofchk is on, vport(%d)\n",
605 + vport);
606 +- err = -EPERM;
607 +- goto unlock;
608 +- }
609 +
610 + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
611 + if (err) {
612 +@@ -1964,6 +1954,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
613 + evport = &esw->vports[vport];
614 + pschk = evport->info.spoofchk;
615 + evport->info.spoofchk = spoofchk;
616 ++ if (pschk && !is_valid_ether_addr(evport->info.mac))
617 ++ mlx5_core_warn(esw->dev,
618 ++ "Spoofchk in set while MAC is invalid, vport(%d)\n",
619 ++ evport->vport);
620 + if (evport->enabled && esw->mode == SRIOV_LEGACY)
621 + err = esw_vport_ingress_config(esw, evport);
622 + if (err)
623 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
624 +index d6f753925352..8441c86d9f3b 100644
625 +--- a/drivers/net/ethernet/renesas/ravb_main.c
626 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
627 +@@ -344,7 +344,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
628 + int i;
629 +
630 + priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
631 +- ETH_HLEN + VLAN_HLEN;
632 ++ ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
633 +
634 + /* Allocate RX and TX skb rings */
635 + priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
636 +@@ -525,13 +525,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
637 + {
638 + u8 *hw_csum;
639 +
640 +- /* The hardware checksum is 2 bytes appended to packet data */
641 +- if (unlikely(skb->len < 2))
642 ++ /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
643 ++ * appended to packet data
644 ++ */
645 ++ if (unlikely(skb->len < sizeof(__sum16)))
646 + return;
647 +- hw_csum = skb_tail_pointer(skb) - 2;
648 ++ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
649 + skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
650 + skb->ip_summed = CHECKSUM_COMPLETE;
651 +- skb_trim(skb, skb->len - 2);
652 ++ skb_trim(skb, skb->len - sizeof(__sum16));
653 + }
654 +
655 + /* Packet receive function for Ethernet AVB */
656 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
657 +index 4a949569ec4c..5fb541897863 100644
658 +--- a/drivers/net/ipvlan/ipvlan_main.c
659 ++++ b/drivers/net/ipvlan/ipvlan_main.c
660 +@@ -97,12 +97,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
661 + err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
662 + if (!err) {
663 + mdev->l3mdev_ops = &ipvl_l3mdev_ops;
664 +- mdev->priv_flags |= IFF_L3MDEV_MASTER;
665 ++ mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
666 + } else
667 + goto fail;
668 + } else if (port->mode == IPVLAN_MODE_L3S) {
669 + /* Old mode was L3S */
670 +- mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
671 ++ mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
672 + ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
673 + mdev->l3mdev_ops = NULL;
674 + }
675 +@@ -162,7 +162,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
676 + struct sk_buff *skb;
677 +
678 + if (port->mode == IPVLAN_MODE_L3S) {
679 +- dev->priv_flags &= ~IFF_L3MDEV_MASTER;
680 ++ dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
681 + ipvlan_unregister_nf_hook(dev_net(dev));
682 + dev->l3mdev_ops = NULL;
683 + }
684 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
685 +index 33978b0cdac8..65844f28db30 100644
686 +--- a/drivers/net/tun.c
687 ++++ b/drivers/net/tun.c
688 +@@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
689 + tun_napi_init(tun, tfile, napi, napi_frags);
690 + }
691 +
692 +- tun_set_real_num_queues(tun);
693 +-
694 + /* device is allowed to go away first, so no need to hold extra
695 + * refcnt.
696 + */
697 +@@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
698 + rcu_assign_pointer(tfile->tun, tun);
699 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
700 + tun->numqueues++;
701 ++ tun_set_real_num_queues(tun);
702 + out:
703 + return err;
704 + }
705 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
706 +index ad14fbfa1864..42feaa4d2916 100644
707 +--- a/drivers/net/virtio_net.c
708 ++++ b/drivers/net/virtio_net.c
709 +@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
710 + #define VIRTIO_XDP_TX BIT(0)
711 + #define VIRTIO_XDP_REDIR BIT(1)
712 +
713 ++#define VIRTIO_XDP_FLAG BIT(0)
714 ++
715 + /* RX packet size EWMA. The average packet size is used to determine the packet
716 + * buffer size when refilling RX rings. As the entire RX ring may be refilled
717 + * at once, the weight is chosen so that the EWMA will be insensitive to short-
718 +@@ -251,6 +253,21 @@ struct padded_vnet_hdr {
719 + char padding[4];
720 + };
721 +
722 ++static bool is_xdp_frame(void *ptr)
723 ++{
724 ++ return (unsigned long)ptr & VIRTIO_XDP_FLAG;
725 ++}
726 ++
727 ++static void *xdp_to_ptr(struct xdp_frame *ptr)
728 ++{
729 ++ return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
730 ++}
731 ++
732 ++static struct xdp_frame *ptr_to_xdp(void *ptr)
733 ++{
734 ++ return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
735 ++}
736 ++
737 + /* Converting between virtqueue no. and kernel tx/rx queue no.
738 + * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
739 + */
740 +@@ -461,7 +478,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
741 +
742 + sg_init_one(sq->sg, xdpf->data, xdpf->len);
743 +
744 +- err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
745 ++ err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
746 ++ GFP_ATOMIC);
747 + if (unlikely(err))
748 + return -ENOSPC; /* Caller handle free/refcnt */
749 +
750 +@@ -481,36 +499,37 @@ static int virtnet_xdp_xmit(struct net_device *dev,
751 + {
752 + struct virtnet_info *vi = netdev_priv(dev);
753 + struct receive_queue *rq = vi->rq;
754 +- struct xdp_frame *xdpf_sent;
755 + struct bpf_prog *xdp_prog;
756 + struct send_queue *sq;
757 + unsigned int len;
758 + int drops = 0;
759 + int kicks = 0;
760 + int ret, err;
761 ++ void *ptr;
762 + int i;
763 +
764 +- sq = virtnet_xdp_sq(vi);
765 +-
766 +- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
767 +- ret = -EINVAL;
768 +- drops = n;
769 +- goto out;
770 +- }
771 +-
772 + /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
773 + * indicate XDP resources have been successfully allocated.
774 + */
775 + xdp_prog = rcu_dereference(rq->xdp_prog);
776 +- if (!xdp_prog) {
777 +- ret = -ENXIO;
778 ++ if (!xdp_prog)
779 ++ return -ENXIO;
780 ++
781 ++ sq = virtnet_xdp_sq(vi);
782 ++
783 ++ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
784 ++ ret = -EINVAL;
785 + drops = n;
786 + goto out;
787 + }
788 +
789 + /* Free up any pending old buffers before queueing new ones. */
790 +- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
791 +- xdp_return_frame(xdpf_sent);
792 ++ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
793 ++ if (likely(is_xdp_frame(ptr)))
794 ++ xdp_return_frame(ptr_to_xdp(ptr));
795 ++ else
796 ++ napi_consume_skb(ptr, false);
797 ++ }
798 +
799 + for (i = 0; i < n; i++) {
800 + struct xdp_frame *xdpf = frames[i];
801 +@@ -1329,20 +1348,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
802 + return stats.packets;
803 + }
804 +
805 +-static void free_old_xmit_skbs(struct send_queue *sq)
806 ++static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
807 + {
808 +- struct sk_buff *skb;
809 + unsigned int len;
810 + unsigned int packets = 0;
811 + unsigned int bytes = 0;
812 ++ void *ptr;
813 +
814 +- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
815 +- pr_debug("Sent skb %p\n", skb);
816 ++ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
817 ++ if (likely(!is_xdp_frame(ptr))) {
818 ++ struct sk_buff *skb = ptr;
819 +
820 +- bytes += skb->len;
821 +- packets++;
822 ++ pr_debug("Sent skb %p\n", skb);
823 ++
824 ++ bytes += skb->len;
825 ++ napi_consume_skb(skb, in_napi);
826 ++ } else {
827 ++ struct xdp_frame *frame = ptr_to_xdp(ptr);
828 +
829 +- dev_consume_skb_any(skb);
830 ++ bytes += frame->len;
831 ++ xdp_return_frame(frame);
832 ++ }
833 ++ packets++;
834 + }
835 +
836 + /* Avoid overhead when no packets have been processed
837 +@@ -1357,6 +1384,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
838 + u64_stats_update_end(&sq->stats.syncp);
839 + }
840 +
841 ++static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
842 ++{
843 ++ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
844 ++ return false;
845 ++ else if (q < vi->curr_queue_pairs)
846 ++ return true;
847 ++ else
848 ++ return false;
849 ++}
850 ++
851 + static void virtnet_poll_cleantx(struct receive_queue *rq)
852 + {
853 + struct virtnet_info *vi = rq->vq->vdev->priv;
854 +@@ -1364,11 +1401,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
855 + struct send_queue *sq = &vi->sq[index];
856 + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
857 +
858 +- if (!sq->napi.weight)
859 ++ if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
860 + return;
861 +
862 + if (__netif_tx_trylock(txq)) {
863 +- free_old_xmit_skbs(sq);
864 ++ free_old_xmit_skbs(sq, true);
865 + __netif_tx_unlock(txq);
866 + }
867 +
868 +@@ -1441,10 +1478,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
869 + {
870 + struct send_queue *sq = container_of(napi, struct send_queue, napi);
871 + struct virtnet_info *vi = sq->vq->vdev->priv;
872 +- struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
873 ++ unsigned int index = vq2txq(sq->vq);
874 ++ struct netdev_queue *txq;
875 +
876 ++ if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
877 ++ /* We don't need to enable cb for XDP */
878 ++ napi_complete_done(napi, 0);
879 ++ return 0;
880 ++ }
881 ++
882 ++ txq = netdev_get_tx_queue(vi->dev, index);
883 + __netif_tx_lock(txq, raw_smp_processor_id());
884 +- free_old_xmit_skbs(sq);
885 ++ free_old_xmit_skbs(sq, true);
886 + __netif_tx_unlock(txq);
887 +
888 + virtqueue_napi_complete(napi, sq->vq, 0);
889 +@@ -1513,7 +1558,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
890 + bool use_napi = sq->napi.weight;
891 +
892 + /* Free up any pending old buffers before queueing new ones. */
893 +- free_old_xmit_skbs(sq);
894 ++ free_old_xmit_skbs(sq, false);
895 +
896 + if (use_napi && kick)
897 + virtqueue_enable_cb_delayed(sq->vq);
898 +@@ -1556,7 +1601,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
899 + if (!use_napi &&
900 + unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
901 + /* More just got used, free them then recheck. */
902 +- free_old_xmit_skbs(sq);
903 ++ free_old_xmit_skbs(sq, false);
904 + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
905 + netif_start_subqueue(dev, qnum);
906 + virtqueue_disable_cb(sq->vq);
907 +@@ -2345,6 +2390,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
908 + return -ENOMEM;
909 + }
910 +
911 ++ old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
912 ++ if (!prog && !old_prog)
913 ++ return 0;
914 ++
915 + if (prog) {
916 + prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
917 + if (IS_ERR(prog))
918 +@@ -2352,36 +2401,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
919 + }
920 +
921 + /* Make sure NAPI is not using any XDP TX queues for RX. */
922 +- if (netif_running(dev))
923 +- for (i = 0; i < vi->max_queue_pairs; i++)
924 ++ if (netif_running(dev)) {
925 ++ for (i = 0; i < vi->max_queue_pairs; i++) {
926 + napi_disable(&vi->rq[i].napi);
927 ++ virtnet_napi_tx_disable(&vi->sq[i].napi);
928 ++ }
929 ++ }
930 ++
931 ++ if (!prog) {
932 ++ for (i = 0; i < vi->max_queue_pairs; i++) {
933 ++ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
934 ++ if (i == 0)
935 ++ virtnet_restore_guest_offloads(vi);
936 ++ }
937 ++ synchronize_net();
938 ++ }
939 +
940 +- netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
941 + err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
942 + if (err)
943 + goto err;
944 ++ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
945 + vi->xdp_queue_pairs = xdp_qp;
946 +
947 +- for (i = 0; i < vi->max_queue_pairs; i++) {
948 +- old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
949 +- rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
950 +- if (i == 0) {
951 +- if (!old_prog)
952 ++ if (prog) {
953 ++ for (i = 0; i < vi->max_queue_pairs; i++) {
954 ++ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
955 ++ if (i == 0 && !old_prog)
956 + virtnet_clear_guest_offloads(vi);
957 +- if (!prog)
958 +- virtnet_restore_guest_offloads(vi);
959 + }
960 ++ }
961 ++
962 ++ for (i = 0; i < vi->max_queue_pairs; i++) {
963 + if (old_prog)
964 + bpf_prog_put(old_prog);
965 +- if (netif_running(dev))
966 ++ if (netif_running(dev)) {
967 + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
968 ++ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
969 ++ &vi->sq[i].napi);
970 ++ }
971 + }
972 +
973 + return 0;
974 +
975 + err:
976 +- for (i = 0; i < vi->max_queue_pairs; i++)
977 +- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
978 ++ if (!prog) {
979 ++ virtnet_clear_guest_offloads(vi);
980 ++ for (i = 0; i < vi->max_queue_pairs; i++)
981 ++ rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
982 ++ }
983 ++
984 ++ if (netif_running(dev)) {
985 ++ for (i = 0; i < vi->max_queue_pairs; i++) {
986 ++ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
987 ++ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
988 ++ &vi->sq[i].napi);
989 ++ }
990 ++ }
991 + if (prog)
992 + bpf_prog_sub(prog, vi->max_queue_pairs - 1);
993 + return err;
994 +@@ -2537,16 +2612,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
995 + put_page(vi->rq[i].alloc_frag.page);
996 + }
997 +
998 +-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
999 +-{
1000 +- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1001 +- return false;
1002 +- else if (q < vi->curr_queue_pairs)
1003 +- return true;
1004 +- else
1005 +- return false;
1006 +-}
1007 +-
1008 + static void free_unused_bufs(struct virtnet_info *vi)
1009 + {
1010 + void *buf;
1011 +@@ -2555,10 +2620,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1012 + for (i = 0; i < vi->max_queue_pairs; i++) {
1013 + struct virtqueue *vq = vi->sq[i].vq;
1014 + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1015 +- if (!is_xdp_raw_buffer_queue(vi, i))
1016 ++ if (!is_xdp_frame(buf))
1017 + dev_kfree_skb(buf);
1018 + else
1019 +- put_page(virt_to_head_page(buf));
1020 ++ xdp_return_frame(ptr_to_xdp(buf));
1021 + }
1022 + }
1023 +
1024 +diff --git a/drivers/of/device.c b/drivers/of/device.c
1025 +index 40b9051a7fce..258742830e36 100644
1026 +--- a/drivers/of/device.c
1027 ++++ b/drivers/of/device.c
1028 +@@ -221,7 +221,8 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
1029 + return -ENODEV;
1030 +
1031 + /* Name & Type */
1032 +- csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
1033 ++ /* %p eats all alphanum characters, so %c must be used here */
1034 ++ csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
1035 + dev->of_node->type);
1036 + tsize = csize;
1037 + len -= csize;
1038 +@@ -300,7 +301,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
1039 + if ((!dev) || (!dev->of_node))
1040 + return;
1041 +
1042 +- add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
1043 ++ add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
1044 + add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
1045 + if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
1046 + add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);
1047 +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
1048 +index ecea92f68c87..45c0b1f4cb69 100644
1049 +--- a/drivers/of/dynamic.c
1050 ++++ b/drivers/of/dynamic.c
1051 +@@ -275,9 +275,6 @@ void __of_detach_node(struct device_node *np)
1052 +
1053 + /**
1054 + * of_detach_node() - "Unplug" a node from the device tree.
1055 +- *
1056 +- * The caller must hold a reference to the node. The memory associated with
1057 +- * the node is not freed until its refcount goes to zero.
1058 + */
1059 + int of_detach_node(struct device_node *np)
1060 + {
1061 +@@ -333,6 +330,25 @@ void of_node_release(struct kobject *kobj)
1062 + if (!of_node_check_flag(node, OF_DYNAMIC))
1063 + return;
1064 +
1065 ++ if (of_node_check_flag(node, OF_OVERLAY)) {
1066 ++
1067 ++ if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
1068 ++ /* premature refcount of zero, do not free memory */
1069 ++ pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
1070 ++ node);
1071 ++ return;
1072 ++ }
1073 ++
1074 ++ /*
1075 ++ * If node->properties non-empty then properties were added
1076 ++ * to this node either by different overlay that has not
1077 ++ * yet been removed, or by a non-overlay mechanism.
1078 ++ */
1079 ++ if (node->properties)
1080 ++ pr_err("ERROR: %s(), unexpected properties in %pOF\n",
1081 ++ __func__, node);
1082 ++ }
1083 ++
1084 + property_list_free(node->properties);
1085 + property_list_free(node->deadprops);
1086 +
1087 +@@ -437,6 +453,16 @@ struct device_node *__of_node_dup(const struct device_node *np,
1088 +
1089 + static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
1090 + {
1091 ++ if (ce->action == OF_RECONFIG_ATTACH_NODE &&
1092 ++ of_node_check_flag(ce->np, OF_OVERLAY)) {
1093 ++ if (kref_read(&ce->np->kobj.kref) > 1) {
1094 ++ pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
1095 ++ kref_read(&ce->np->kobj.kref), ce->np);
1096 ++ } else {
1097 ++ of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
1098 ++ }
1099 ++ }
1100 ++
1101 + of_node_put(ce->np);
1102 + list_del(&ce->node);
1103 + kfree(ce);
1104 +diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
1105 +index 7a0a18980b98..c72eef988041 100644
1106 +--- a/drivers/of/kobj.c
1107 ++++ b/drivers/of/kobj.c
1108 +@@ -133,6 +133,9 @@ int __of_attach_node_sysfs(struct device_node *np)
1109 + }
1110 + if (!name)
1111 + return -ENOMEM;
1112 ++
1113 ++ of_node_get(np);
1114 ++
1115 + rc = kobject_add(&np->kobj, parent, "%s", name);
1116 + kfree(name);
1117 + if (rc)
1118 +@@ -159,6 +162,5 @@ void __of_detach_node_sysfs(struct device_node *np)
1119 + kobject_del(&np->kobj);
1120 + }
1121 +
1122 +- /* finally remove the kobj_init ref */
1123 + of_node_put(np);
1124 + }
1125 +diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
1126 +index e92391d6d1bd..5ad1342f5682 100644
1127 +--- a/drivers/of/of_mdio.c
1128 ++++ b/drivers/of/of_mdio.c
1129 +@@ -97,8 +97,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
1130 + return rc;
1131 + }
1132 +
1133 +- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
1134 +- child->name, addr);
1135 ++ dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
1136 ++ child, addr);
1137 + return 0;
1138 + }
1139 +
1140 +@@ -127,8 +127,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
1141 + return rc;
1142 + }
1143 +
1144 +- dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
1145 +- child->name, addr);
1146 ++ dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
1147 ++ child, addr);
1148 + return 0;
1149 + }
1150 +
1151 +@@ -263,8 +263,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
1152 + continue;
1153 +
1154 + /* be noisy to encourage people to set reg property */
1155 +- dev_info(&mdio->dev, "scan phy %s at address %i\n",
1156 +- child->name, addr);
1157 ++ dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
1158 ++ child, addr);
1159 +
1160 + if (of_mdiobus_child_is_phy(child)) {
1161 + rc = of_mdiobus_register_phy(mdio, child, addr);
1162 +diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
1163 +index 2411ed3c7303..f5b452218092 100644
1164 +--- a/drivers/of/of_numa.c
1165 ++++ b/drivers/of/of_numa.c
1166 +@@ -168,8 +168,8 @@ int of_node_to_nid(struct device_node *device)
1167 + np = of_get_next_parent(np);
1168 + }
1169 + if (np && r)
1170 +- pr_warn("Invalid \"numa-node-id\" property in node %s\n",
1171 +- np->name);
1172 ++ pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
1173 ++ np);
1174 + of_node_put(np);
1175 +
1176 + /*
1177 +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
1178 +index baa9cee6fa2c..9808aae4621a 100644
1179 +--- a/drivers/of/overlay.c
1180 ++++ b/drivers/of/overlay.c
1181 +@@ -23,6 +23,26 @@
1182 +
1183 + #include "of_private.h"
1184 +
1185 ++/**
1186 ++ * struct target - info about current target node as recursing through overlay
1187 ++ * @np: node where current level of overlay will be applied
1188 ++ * @in_livetree: @np is a node in the live devicetree
1189 ++ *
1190 ++ * Used in the algorithm to create the portion of a changeset that describes
1191 ++ * an overlay fragment, which is a devicetree subtree. Initially @np is a node
1192 ++ * in the live devicetree where the overlay subtree is targeted to be grafted
1193 ++ * into. When recursing to the next level of the overlay subtree, the target
1194 ++ * also recurses to the next level of the live devicetree, as long as overlay
1195 ++ * subtree node also exists in the live devicetree. When a node in the overlay
1196 ++ * subtree does not exist at the same level in the live devicetree, target->np
1197 ++ * points to a newly allocated node, and all subsequent targets in the subtree
1198 ++ * will be newly allocated nodes.
1199 ++ */
1200 ++struct target {
1201 ++ struct device_node *np;
1202 ++ bool in_livetree;
1203 ++};
1204 ++
1205 + /**
1206 + * struct fragment - info about fragment nodes in overlay expanded device tree
1207 + * @target: target of the overlay operation
1208 +@@ -72,8 +92,7 @@ static int devicetree_corrupt(void)
1209 + }
1210 +
1211 + static int build_changeset_next_level(struct overlay_changeset *ovcs,
1212 +- struct device_node *target_node,
1213 +- const struct device_node *overlay_node);
1214 ++ struct target *target, const struct device_node *overlay_node);
1215 +
1216 + /*
1217 + * of_resolve_phandles() finds the largest phandle in the live tree.
1218 +@@ -257,14 +276,17 @@ err_free_target_path:
1219 + /**
1220 + * add_changeset_property() - add @overlay_prop to overlay changeset
1221 + * @ovcs: overlay changeset
1222 +- * @target_node: where to place @overlay_prop in live tree
1223 ++ * @target: where @overlay_prop will be placed
1224 + * @overlay_prop: property to add or update, from overlay tree
1225 + * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
1226 + *
1227 +- * If @overlay_prop does not already exist in @target_node, add changeset entry
1228 +- * to add @overlay_prop in @target_node, else add changeset entry to update
1229 ++ * If @overlay_prop does not already exist in live devicetree, add changeset
1230 ++ * entry to add @overlay_prop in @target, else add changeset entry to update
1231 + * value of @overlay_prop.
1232 + *
1233 ++ * @target may be either in the live devicetree or in a new subtree that
1234 ++ * is contained in the changeset.
1235 ++ *
1236 + * Some special properties are not updated (no error returned).
1237 + *
1238 + * Update of property in symbols node is not allowed.
1239 +@@ -273,20 +295,22 @@ err_free_target_path:
1240 + * invalid @overlay.
1241 + */
1242 + static int add_changeset_property(struct overlay_changeset *ovcs,
1243 +- struct device_node *target_node,
1244 +- struct property *overlay_prop,
1245 ++ struct target *target, struct property *overlay_prop,
1246 + bool is_symbols_prop)
1247 + {
1248 + struct property *new_prop = NULL, *prop;
1249 + int ret = 0;
1250 +
1251 +- prop = of_find_property(target_node, overlay_prop->name, NULL);
1252 +-
1253 + if (!of_prop_cmp(overlay_prop->name, "name") ||
1254 + !of_prop_cmp(overlay_prop->name, "phandle") ||
1255 + !of_prop_cmp(overlay_prop->name, "linux,phandle"))
1256 + return 0;
1257 +
1258 ++ if (target->in_livetree)
1259 ++ prop = of_find_property(target->np, overlay_prop->name, NULL);
1260 ++ else
1261 ++ prop = NULL;
1262 ++
1263 + if (is_symbols_prop) {
1264 + if (prop)
1265 + return -EINVAL;
1266 +@@ -299,10 +323,10 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
1267 + return -ENOMEM;
1268 +
1269 + if (!prop)
1270 +- ret = of_changeset_add_property(&ovcs->cset, target_node,
1271 ++ ret = of_changeset_add_property(&ovcs->cset, target->np,
1272 + new_prop);
1273 + else
1274 +- ret = of_changeset_update_property(&ovcs->cset, target_node,
1275 ++ ret = of_changeset_update_property(&ovcs->cset, target->np,
1276 + new_prop);
1277 +
1278 + if (ret) {
1279 +@@ -315,14 +339,14 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
1280 +
1281 + /**
1282 + * add_changeset_node() - add @node (and children) to overlay changeset
1283 +- * @ovcs: overlay changeset
1284 +- * @target_node: where to place @node in live tree
1285 +- * @node: node from within overlay device tree fragment
1286 ++ * @ovcs: overlay changeset
1287 ++ * @target: where @node will be placed in live tree or changeset
1288 ++ * @node: node from within overlay device tree fragment
1289 + *
1290 +- * If @node does not already exist in @target_node, add changeset entry
1291 +- * to add @node in @target_node.
1292 ++ * If @node does not already exist in @target, add changeset entry
1293 ++ * to add @node in @target.
1294 + *
1295 +- * If @node already exists in @target_node, and the existing node has
1296 ++ * If @node already exists in @target, and the existing node has
1297 + * a phandle, the overlay node is not allowed to have a phandle.
1298 + *
1299 + * If @node has child nodes, add the children recursively via
1300 +@@ -355,38 +379,46 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
1301 + * invalid @overlay.
1302 + */
1303 + static int add_changeset_node(struct overlay_changeset *ovcs,
1304 +- struct device_node *target_node, struct device_node *node)
1305 ++ struct target *target, struct device_node *node)
1306 + {
1307 + const char *node_kbasename;
1308 + struct device_node *tchild;
1309 ++ struct target target_child;
1310 + int ret = 0;
1311 +
1312 + node_kbasename = kbasename(node->full_name);
1313 +
1314 +- for_each_child_of_node(target_node, tchild)
1315 ++ for_each_child_of_node(target->np, tchild)
1316 + if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
1317 + break;
1318 +
1319 + if (!tchild) {
1320 +- tchild = __of_node_dup(node, node_kbasename);
1321 ++ tchild = __of_node_dup(NULL, node_kbasename);
1322 + if (!tchild)
1323 + return -ENOMEM;
1324 +
1325 +- tchild->parent = target_node;
1326 ++ tchild->parent = target->np;
1327 ++ of_node_set_flag(tchild, OF_OVERLAY);
1328 +
1329 + ret = of_changeset_attach_node(&ovcs->cset, tchild);
1330 + if (ret)
1331 + return ret;
1332 +
1333 +- ret = build_changeset_next_level(ovcs, tchild, node);
1334 ++ target_child.np = tchild;
1335 ++ target_child.in_livetree = false;
1336 ++
1337 ++ ret = build_changeset_next_level(ovcs, &target_child, node);
1338 + of_node_put(tchild);
1339 + return ret;
1340 + }
1341 +
1342 +- if (node->phandle && tchild->phandle)
1343 ++ if (node->phandle && tchild->phandle) {
1344 + ret = -EINVAL;
1345 +- else
1346 +- ret = build_changeset_next_level(ovcs, tchild, node);
1347 ++ } else {
1348 ++ target_child.np = tchild;
1349 ++ target_child.in_livetree = target->in_livetree;
1350 ++ ret = build_changeset_next_level(ovcs, &target_child, node);
1351 ++ }
1352 + of_node_put(tchild);
1353 +
1354 + return ret;
1355 +@@ -395,7 +427,7 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
1356 + /**
1357 + * build_changeset_next_level() - add level of overlay changeset
1358 + * @ovcs: overlay changeset
1359 +- * @target_node: where to place @overlay_node in live tree
1360 ++ * @target: where to place @overlay_node in live tree
1361 + * @overlay_node: node from within an overlay device tree fragment
1362 + *
1363 + * Add the properties (if any) and nodes (if any) from @overlay_node to the
1364 +@@ -408,27 +440,26 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
1365 + * invalid @overlay_node.
1366 + */
1367 + static int build_changeset_next_level(struct overlay_changeset *ovcs,
1368 +- struct device_node *target_node,
1369 +- const struct device_node *overlay_node)
1370 ++ struct target *target, const struct device_node *overlay_node)
1371 + {
1372 + struct device_node *child;
1373 + struct property *prop;
1374 + int ret;
1375 +
1376 + for_each_property_of_node(overlay_node, prop) {
1377 +- ret = add_changeset_property(ovcs, target_node, prop, 0);
1378 ++ ret = add_changeset_property(ovcs, target, prop, 0);
1379 + if (ret) {
1380 + pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
1381 +- target_node, prop->name, ret);
1382 ++ target->np, prop->name, ret);
1383 + return ret;
1384 + }
1385 + }
1386 +
1387 + for_each_child_of_node(overlay_node, child) {
1388 +- ret = add_changeset_node(ovcs, target_node, child);
1389 ++ ret = add_changeset_node(ovcs, target, child);
1390 + if (ret) {
1391 +- pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
1392 +- target_node, child->name, ret);
1393 ++ pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
1394 ++ target->np, child, ret);
1395 + of_node_put(child);
1396 + return ret;
1397 + }
1398 +@@ -441,17 +472,17 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
1399 + * Add the properties from __overlay__ node to the @ovcs->cset changeset.
1400 + */
1401 + static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
1402 +- struct device_node *target_node,
1403 ++ struct target *target,
1404 + const struct device_node *overlay_symbols_node)
1405 + {
1406 + struct property *prop;
1407 + int ret;
1408 +
1409 + for_each_property_of_node(overlay_symbols_node, prop) {
1410 +- ret = add_changeset_property(ovcs, target_node, prop, 1);
1411 ++ ret = add_changeset_property(ovcs, target, prop, 1);
1412 + if (ret) {
1413 + pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
1414 +- target_node, prop->name, ret);
1415 ++ target->np, prop->name, ret);
1416 + return ret;
1417 + }
1418 + }
1419 +@@ -474,6 +505,7 @@ static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
1420 + static int build_changeset(struct overlay_changeset *ovcs)
1421 + {
1422 + struct fragment *fragment;
1423 ++ struct target target;
1424 + int fragments_count, i, ret;
1425 +
1426 + /*
1427 +@@ -488,7 +520,9 @@ static int build_changeset(struct overlay_changeset *ovcs)
1428 + for (i = 0; i < fragments_count; i++) {
1429 + fragment = &ovcs->fragments[i];
1430 +
1431 +- ret = build_changeset_next_level(ovcs, fragment->target,
1432 ++ target.np = fragment->target;
1433 ++ target.in_livetree = true;
1434 ++ ret = build_changeset_next_level(ovcs, &target,
1435 + fragment->overlay);
1436 + if (ret) {
1437 + pr_debug("apply failed '%pOF'\n", fragment->target);
1438 +@@ -498,7 +532,10 @@ static int build_changeset(struct overlay_changeset *ovcs)
1439 +
1440 + if (ovcs->symbols_fragment) {
1441 + fragment = &ovcs->fragments[ovcs->count - 1];
1442 +- ret = build_changeset_symbols_node(ovcs, fragment->target,
1443 ++
1444 ++ target.np = fragment->target;
1445 ++ target.in_livetree = true;
1446 ++ ret = build_changeset_symbols_node(ovcs, &target,
1447 + fragment->overlay);
1448 + if (ret) {
1449 + pr_debug("apply failed '%pOF'\n", fragment->target);
1450 +@@ -516,7 +553,7 @@ static int build_changeset(struct overlay_changeset *ovcs)
1451 + * 1) "target" property containing the phandle of the target
1452 + * 2) "target-path" property containing the path of the target
1453 + */
1454 +-static struct device_node *find_target_node(struct device_node *info_node)
1455 ++static struct device_node *find_target(struct device_node *info_node)
1456 + {
1457 + struct device_node *node;
1458 + const char *path;
1459 +@@ -622,7 +659,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
1460 +
1461 + fragment = &fragments[cnt];
1462 + fragment->overlay = overlay_node;
1463 +- fragment->target = find_target_node(node);
1464 ++ fragment->target = find_target(node);
1465 + if (!fragment->target) {
1466 + of_node_put(fragment->overlay);
1467 + ret = -EINVAL;
1468 +diff --git a/drivers/of/platform.c b/drivers/of/platform.c
1469 +index 6c59673933e9..04ad312fd85b 100644
1470 +--- a/drivers/of/platform.c
1471 ++++ b/drivers/of/platform.c
1472 +@@ -91,8 +91,8 @@ static void of_device_make_bus_id(struct device *dev)
1473 + */
1474 + reg = of_get_property(node, "reg", NULL);
1475 + if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
1476 +- dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
1477 +- (unsigned long long)addr, node->name,
1478 ++ dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
1479 ++ (unsigned long long)addr, node,
1480 + dev_name(dev));
1481 + return;
1482 + }
1483 +@@ -142,8 +142,8 @@ struct platform_device *of_device_alloc(struct device_node *np,
1484 + WARN_ON(rc);
1485 + }
1486 + if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
1487 +- pr_debug("not all legacy IRQ resources mapped for %s\n",
1488 +- np->name);
1489 ++ pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
1490 ++ np);
1491 + }
1492 +
1493 + dev->dev.of_node = of_node_get(np);
1494 +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
1495 +index 41b49716ac75..7f42314da6ae 100644
1496 +--- a/drivers/of/unittest.c
1497 ++++ b/drivers/of/unittest.c
1498 +@@ -212,8 +212,8 @@ static int __init of_unittest_check_node_linkage(struct device_node *np)
1499 +
1500 + for_each_child_of_node(np, child) {
1501 + if (child->parent != np) {
1502 +- pr_err("Child node %s links to wrong parent %s\n",
1503 +- child->name, np->name);
1504 ++ pr_err("Child node %pOFn links to wrong parent %pOFn\n",
1505 ++ child, np);
1506 + rc = -EINVAL;
1507 + goto put_child;
1508 + }
1509 +@@ -1046,16 +1046,16 @@ static void __init of_unittest_platform_populate(void)
1510 + for_each_child_of_node(np, child) {
1511 + for_each_child_of_node(child, grandchild)
1512 + unittest(of_find_device_by_node(grandchild),
1513 +- "Could not create device for node '%s'\n",
1514 +- grandchild->name);
1515 ++ "Could not create device for node '%pOFn'\n",
1516 ++ grandchild);
1517 + }
1518 +
1519 + of_platform_depopulate(&test_bus->dev);
1520 + for_each_child_of_node(np, child) {
1521 + for_each_child_of_node(child, grandchild)
1522 + unittest(!of_find_device_by_node(grandchild),
1523 +- "device didn't get destroyed '%s'\n",
1524 +- grandchild->name);
1525 ++ "device didn't get destroyed '%pOFn'\n",
1526 ++ grandchild);
1527 + }
1528 +
1529 + platform_device_unregister(test_bus);
1530 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
1531 +index db2af09067db..b6f2ff95c3ed 100644
1532 +--- a/drivers/platform/x86/asus-nb-wmi.c
1533 ++++ b/drivers/platform/x86/asus-nb-wmi.c
1534 +@@ -442,8 +442,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
1535 + { KE_KEY, 0x30, { KEY_VOLUMEUP } },
1536 + { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
1537 + { KE_KEY, 0x32, { KEY_MUTE } },
1538 +- { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
1539 +- { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
1540 ++ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
1541 + { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
1542 + { KE_KEY, 0x41, { KEY_NEXTSONG } },
1543 + { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
1544 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1545 +index a86aa65ad66d..39155d7cc894 100644
1546 +--- a/drivers/vhost/net.c
1547 ++++ b/drivers/vhost/net.c
1548 +@@ -1114,7 +1114,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1549 + n->vqs[i].rx_ring = NULL;
1550 + vhost_net_buf_init(&n->vqs[i].rxq);
1551 + }
1552 +- vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
1553 ++ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1554 ++ UIO_MAXIOV + VHOST_NET_BATCH);
1555 +
1556 + vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1557 + vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
1558 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
1559 +index e7e3ae13516d..0cfa925be4ec 100644
1560 +--- a/drivers/vhost/scsi.c
1561 ++++ b/drivers/vhost/scsi.c
1562 +@@ -1398,7 +1398,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1563 + vqs[i] = &vs->vqs[i].vq;
1564 + vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1565 + }
1566 +- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1567 ++ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
1568 +
1569 + vhost_scsi_init_inflight(vs, NULL);
1570 +
1571 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1572 +index c66fc8308b5e..cf82e7266397 100644
1573 +--- a/drivers/vhost/vhost.c
1574 ++++ b/drivers/vhost/vhost.c
1575 +@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
1576 + vq->indirect = kmalloc_array(UIO_MAXIOV,
1577 + sizeof(*vq->indirect),
1578 + GFP_KERNEL);
1579 +- vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
1580 ++ vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
1581 + GFP_KERNEL);
1582 +- vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
1583 ++ vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
1584 + GFP_KERNEL);
1585 + if (!vq->indirect || !vq->log || !vq->heads)
1586 + goto err_nomem;
1587 +@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
1588 + }
1589 +
1590 + void vhost_dev_init(struct vhost_dev *dev,
1591 +- struct vhost_virtqueue **vqs, int nvqs)
1592 ++ struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
1593 + {
1594 + struct vhost_virtqueue *vq;
1595 + int i;
1596 +@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
1597 + dev->iotlb = NULL;
1598 + dev->mm = NULL;
1599 + dev->worker = NULL;
1600 ++ dev->iov_limit = iov_limit;
1601 + init_llist_head(&dev->work_list);
1602 + init_waitqueue_head(&dev->wait);
1603 + INIT_LIST_HEAD(&dev->read_list);
1604 +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
1605 +index 1b675dad5e05..9490e7ddb340 100644
1606 +--- a/drivers/vhost/vhost.h
1607 ++++ b/drivers/vhost/vhost.h
1608 +@@ -170,9 +170,11 @@ struct vhost_dev {
1609 + struct list_head read_list;
1610 + struct list_head pending_list;
1611 + wait_queue_head_t wait;
1612 ++ int iov_limit;
1613 + };
1614 +
1615 +-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
1616 ++void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
1617 ++ int nvqs, int iov_limit);
1618 + long vhost_dev_set_owner(struct vhost_dev *dev);
1619 + bool vhost_dev_has_owner(struct vhost_dev *dev);
1620 + long vhost_dev_check_owner(struct vhost_dev *);
1621 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
1622 +index 98ed5be132c6..fa93f6711d8d 100644
1623 +--- a/drivers/vhost/vsock.c
1624 ++++ b/drivers/vhost/vsock.c
1625 +@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
1626 + vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
1627 + vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
1628 +
1629 +- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
1630 ++ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
1631 +
1632 + file->private_data = vsock;
1633 + spin_lock_init(&vsock->send_pkt_list_lock);
1634 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
1635 +index 7ad6f2eec711..48ac8b7c43a5 100644
1636 +--- a/fs/btrfs/ctree.c
1637 ++++ b/fs/btrfs/ctree.c
1638 +@@ -1003,6 +1003,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1639 + return 0;
1640 + }
1641 +
1642 ++static struct extent_buffer *alloc_tree_block_no_bg_flush(
1643 ++ struct btrfs_trans_handle *trans,
1644 ++ struct btrfs_root *root,
1645 ++ u64 parent_start,
1646 ++ const struct btrfs_disk_key *disk_key,
1647 ++ int level,
1648 ++ u64 hint,
1649 ++ u64 empty_size)
1650 ++{
1651 ++ struct btrfs_fs_info *fs_info = root->fs_info;
1652 ++ struct extent_buffer *ret;
1653 ++
1654 ++ /*
1655 ++ * If we are COWing a node/leaf from the extent, chunk, device or free
1656 ++ * space trees, make sure that we do not finish block group creation of
1657 ++ * pending block groups. We do this to avoid a deadlock.
1658 ++ * COWing can result in allocation of a new chunk, and flushing pending
1659 ++ * block groups (btrfs_create_pending_block_groups()) can be triggered
1660 ++ * when finishing allocation of a new chunk. Creation of a pending block
1661 ++ * group modifies the extent, chunk, device and free space trees,
1662 ++ * therefore we could deadlock with ourselves since we are holding a
1663 ++ * lock on an extent buffer that btrfs_create_pending_block_groups() may
1664 ++ * try to COW later.
1665 ++ * For similar reasons, we also need to delay flushing pending block
1666 ++ * groups when splitting a leaf or node, from one of those trees, since
1667 ++ * we are holding a write lock on it and its parent or when inserting a
1668 ++ * new root node for one of those trees.
1669 ++ */
1670 ++ if (root == fs_info->extent_root ||
1671 ++ root == fs_info->chunk_root ||
1672 ++ root == fs_info->dev_root ||
1673 ++ root == fs_info->free_space_root)
1674 ++ trans->can_flush_pending_bgs = false;
1675 ++
1676 ++ ret = btrfs_alloc_tree_block(trans, root, parent_start,
1677 ++ root->root_key.objectid, disk_key, level,
1678 ++ hint, empty_size);
1679 ++ trans->can_flush_pending_bgs = true;
1680 ++
1681 ++ return ret;
1682 ++}
1683 ++
1684 + /*
1685 + * does the dirty work in cow of a single block. The parent block (if
1686 + * supplied) is updated to point to the new cow copy. The new buffer is marked
1687 +@@ -1050,28 +1092,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1688 + if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1689 + parent_start = parent->start;
1690 +
1691 +- /*
1692 +- * If we are COWing a node/leaf from the extent, chunk, device or free
1693 +- * space trees, make sure that we do not finish block group creation of
1694 +- * pending block groups. We do this to avoid a deadlock.
1695 +- * COWing can result in allocation of a new chunk, and flushing pending
1696 +- * block groups (btrfs_create_pending_block_groups()) can be triggered
1697 +- * when finishing allocation of a new chunk. Creation of a pending block
1698 +- * group modifies the extent, chunk, device and free space trees,
1699 +- * therefore we could deadlock with ourselves since we are holding a
1700 +- * lock on an extent buffer that btrfs_create_pending_block_groups() may
1701 +- * try to COW later.
1702 +- */
1703 +- if (root == fs_info->extent_root ||
1704 +- root == fs_info->chunk_root ||
1705 +- root == fs_info->dev_root ||
1706 +- root == fs_info->free_space_root)
1707 +- trans->can_flush_pending_bgs = false;
1708 +-
1709 +- cow = btrfs_alloc_tree_block(trans, root, parent_start,
1710 +- root->root_key.objectid, &disk_key, level,
1711 +- search_start, empty_size);
1712 +- trans->can_flush_pending_bgs = true;
1713 ++ cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1714 ++ level, search_start, empty_size);
1715 + if (IS_ERR(cow))
1716 + return PTR_ERR(cow);
1717 +
1718 +@@ -3383,8 +3405,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
1719 + else
1720 + btrfs_node_key(lower, &lower_key, 0);
1721 +
1722 +- c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
1723 +- &lower_key, level, root->node->start, 0);
1724 ++ c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
1725 ++ root->node->start, 0);
1726 + if (IS_ERR(c))
1727 + return PTR_ERR(c);
1728 +
1729 +@@ -3513,8 +3535,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
1730 + mid = (c_nritems + 1) / 2;
1731 + btrfs_node_key(c, &disk_key, mid);
1732 +
1733 +- split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
1734 +- &disk_key, level, c->start, 0);
1735 ++ split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
1736 ++ c->start, 0);
1737 + if (IS_ERR(split))
1738 + return PTR_ERR(split);
1739 +
1740 +@@ -4298,8 +4320,8 @@ again:
1741 + else
1742 + btrfs_item_key(l, &disk_key, mid);
1743 +
1744 +- right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
1745 +- &disk_key, 0, l->start, 0);
1746 ++ right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
1747 ++ l->start, 0);
1748 + if (IS_ERR(right))
1749 + return PTR_ERR(right);
1750 +
1751 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1752 +index 8ad145820ea8..8888337a95b6 100644
1753 +--- a/fs/btrfs/super.c
1754 ++++ b/fs/btrfs/super.c
1755 +@@ -1677,6 +1677,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1756 + flags | SB_RDONLY, device_name, data);
1757 + if (IS_ERR(mnt_root)) {
1758 + root = ERR_CAST(mnt_root);
1759 ++ kfree(subvol_name);
1760 + goto out;
1761 + }
1762 +
1763 +@@ -1686,12 +1687,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1764 + if (error < 0) {
1765 + root = ERR_PTR(error);
1766 + mntput(mnt_root);
1767 ++ kfree(subvol_name);
1768 + goto out;
1769 + }
1770 + }
1771 + }
1772 + if (IS_ERR(mnt_root)) {
1773 + root = ERR_CAST(mnt_root);
1774 ++ kfree(subvol_name);
1775 + goto out;
1776 + }
1777 +
1778 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1779 +index d0bba175117c..a5ea742654aa 100644
1780 +--- a/fs/cifs/connect.c
1781 ++++ b/fs/cifs/connect.c
1782 +@@ -50,6 +50,7 @@
1783 + #include "cifs_unicode.h"
1784 + #include "cifs_debug.h"
1785 + #include "cifs_fs_sb.h"
1786 ++#include "dns_resolve.h"
1787 + #include "ntlmssp.h"
1788 + #include "nterr.h"
1789 + #include "rfc1002pdu.h"
1790 +@@ -317,6 +318,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
1791 + static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
1792 + const char *devname, bool is_smb3);
1793 +
1794 ++/*
1795 ++ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
1796 ++ * get their ip addresses changed at some point.
1797 ++ *
1798 ++ * This should be called with server->srv_mutex held.
1799 ++ */
1800 ++#ifdef CONFIG_CIFS_DFS_UPCALL
1801 ++static int reconn_set_ipaddr(struct TCP_Server_Info *server)
1802 ++{
1803 ++ int rc;
1804 ++ int len;
1805 ++ char *unc, *ipaddr = NULL;
1806 ++
1807 ++ if (!server->hostname)
1808 ++ return -EINVAL;
1809 ++
1810 ++ len = strlen(server->hostname) + 3;
1811 ++
1812 ++ unc = kmalloc(len, GFP_KERNEL);
1813 ++ if (!unc) {
1814 ++ cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
1815 ++ return -ENOMEM;
1816 ++ }
1817 ++ snprintf(unc, len, "\\\\%s", server->hostname);
1818 ++
1819 ++ rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
1820 ++ kfree(unc);
1821 ++
1822 ++ if (rc < 0) {
1823 ++ cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
1824 ++ __func__, server->hostname, rc);
1825 ++ return rc;
1826 ++ }
1827 ++
1828 ++ rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
1829 ++ strlen(ipaddr));
1830 ++ kfree(ipaddr);
1831 ++
1832 ++ return !rc ? -1 : 0;
1833 ++}
1834 ++#else
1835 ++static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
1836 ++{
1837 ++ return 0;
1838 ++}
1839 ++#endif
1840 ++
1841 + /*
1842 + * cifs tcp session reconnection
1843 + *
1844 +@@ -417,6 +465,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
1845 + rc = generic_ip_connect(server);
1846 + if (rc) {
1847 + cifs_dbg(FYI, "reconnect error %d\n", rc);
1848 ++ rc = reconn_set_ipaddr(server);
1849 ++ if (rc) {
1850 ++ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
1851 ++ __func__, rc);
1852 ++ }
1853 + mutex_unlock(&server->srv_mutex);
1854 + msleep(3000);
1855 + } else {
1856 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1857 +index dba986524917..8a01e89ff827 100644
1858 +--- a/fs/cifs/smb2pdu.c
1859 ++++ b/fs/cifs/smb2pdu.c
1860 +@@ -3127,8 +3127,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
1861 + rdata->mr = NULL;
1862 + }
1863 + #endif
1864 +- if (rdata->result)
1865 ++ if (rdata->result && rdata->result != -ENODATA) {
1866 + cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1867 ++ trace_smb3_read_err(0 /* xid */,
1868 ++ rdata->cfile->fid.persistent_fid,
1869 ++ tcon->tid, tcon->ses->Suid, rdata->offset,
1870 ++ rdata->bytes, rdata->result);
1871 ++ } else
1872 ++ trace_smb3_read_done(0 /* xid */,
1873 ++ rdata->cfile->fid.persistent_fid,
1874 ++ tcon->tid, tcon->ses->Suid,
1875 ++ rdata->offset, rdata->got_bytes);
1876 +
1877 + queue_work(cifsiod_wq, &rdata->work);
1878 + DeleteMidQEntry(mid);
1879 +@@ -3203,13 +3212,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
1880 + if (rc) {
1881 + kref_put(&rdata->refcount, cifs_readdata_release);
1882 + cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1883 +- trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
1884 +- io_parms.tcon->tid, io_parms.tcon->ses->Suid,
1885 +- io_parms.offset, io_parms.length);
1886 +- } else
1887 +- trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
1888 +- io_parms.tcon->tid, io_parms.tcon->ses->Suid,
1889 +- io_parms.offset, io_parms.length);
1890 ++ trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
1891 ++ io_parms.tcon->tid,
1892 ++ io_parms.tcon->ses->Suid,
1893 ++ io_parms.offset, io_parms.length, rc);
1894 ++ }
1895 +
1896 + cifs_small_buf_release(buf);
1897 + return rc;
1898 +@@ -3253,10 +3260,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1899 + if (rc != -ENODATA) {
1900 + cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
1901 + cifs_dbg(VFS, "Send error in read = %d\n", rc);
1902 ++ trace_smb3_read_err(xid, req->PersistentFileId,
1903 ++ io_parms->tcon->tid, ses->Suid,
1904 ++ io_parms->offset, io_parms->length,
1905 ++ rc);
1906 + }
1907 +- trace_smb3_read_err(rc, xid, req->PersistentFileId,
1908 +- io_parms->tcon->tid, ses->Suid,
1909 +- io_parms->offset, io_parms->length);
1910 + free_rsp_buf(resp_buftype, rsp_iov.iov_base);
1911 + return rc == -ENODATA ? 0 : rc;
1912 + } else
1913 +@@ -3342,8 +3350,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
1914 + wdata->mr = NULL;
1915 + }
1916 + #endif
1917 +- if (wdata->result)
1918 ++ if (wdata->result) {
1919 + cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1920 ++ trace_smb3_write_err(0 /* no xid */,
1921 ++ wdata->cfile->fid.persistent_fid,
1922 ++ tcon->tid, tcon->ses->Suid, wdata->offset,
1923 ++ wdata->bytes, wdata->result);
1924 ++ } else
1925 ++ trace_smb3_write_done(0 /* no xid */,
1926 ++ wdata->cfile->fid.persistent_fid,
1927 ++ tcon->tid, tcon->ses->Suid,
1928 ++ wdata->offset, wdata->bytes);
1929 +
1930 + queue_work(cifsiod_wq, &wdata->work);
1931 + DeleteMidQEntry(mid);
1932 +@@ -3485,10 +3502,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
1933 + wdata->bytes, rc);
1934 + kref_put(&wdata->refcount, release);
1935 + cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1936 +- } else
1937 +- trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
1938 +- tcon->tid, tcon->ses->Suid, wdata->offset,
1939 +- wdata->bytes);
1940 ++ }
1941 +
1942 + async_writev_out:
1943 + cifs_small_buf_release(req);
1944 +@@ -3714,8 +3728,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
1945 + rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
1946 + srch_inf->endOfSearch = true;
1947 + rc = 0;
1948 +- }
1949 +- cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1950 ++ } else
1951 ++ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1952 + goto qdir_exit;
1953 + }
1954 +
1955 +diff --git a/fs/dcache.c b/fs/dcache.c
1956 +index 2e7e8d85e9b4..cb515f183482 100644
1957 +--- a/fs/dcache.c
1958 ++++ b/fs/dcache.c
1959 +@@ -1202,15 +1202,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1960 + */
1961 + void shrink_dcache_sb(struct super_block *sb)
1962 + {
1963 +- long freed;
1964 +-
1965 + do {
1966 + LIST_HEAD(dispose);
1967 +
1968 +- freed = list_lru_walk(&sb->s_dentry_lru,
1969 ++ list_lru_walk(&sb->s_dentry_lru,
1970 + dentry_lru_isolate_shrink, &dispose, 1024);
1971 +-
1972 +- this_cpu_sub(nr_dentry_unused, freed);
1973 + shrink_dentry_list(&dispose);
1974 + } while (list_lru_count(&sb->s_dentry_lru) > 0);
1975 + }
1976 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1977 +index e978f6930575..449d0cb45a84 100644
1978 +--- a/fs/gfs2/rgrp.c
1979 ++++ b/fs/gfs2/rgrp.c
1980 +@@ -1747,9 +1747,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1981 + goto next_iter;
1982 + }
1983 + if (ret == -E2BIG) {
1984 +- n += rbm->bii - initial_bii;
1985 + rbm->bii = 0;
1986 + rbm->offset = 0;
1987 ++ n += (rbm->bii - initial_bii);
1988 + goto res_covered_end_of_rgrp;
1989 + }
1990 + return ret;
1991 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
1992 +index 586726a590d8..d790faff8e47 100644
1993 +--- a/fs/nfs/write.c
1994 ++++ b/fs/nfs/write.c
1995 +@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
1996 + nfs_set_page_writeback(page);
1997 + WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
1998 +
1999 +- ret = 0;
2000 ++ ret = req->wb_context->error;
2001 + /* If there is a fatal error that covers this write, just exit */
2002 +- if (nfs_error_is_fatal_on_server(req->wb_context->error))
2003 ++ if (nfs_error_is_fatal_on_server(ret))
2004 + goto out_launder;
2005 +
2006 ++ ret = 0;
2007 + if (!nfs_pageio_add_request(pgio, req)) {
2008 + ret = pgio->pg_error;
2009 + /*
2010 +@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
2011 + nfs_context_set_write_error(req->wb_context, ret);
2012 + if (nfs_error_is_fatal_on_server(ret))
2013 + goto out_launder;
2014 +- }
2015 ++ } else
2016 ++ ret = -EAGAIN;
2017 + nfs_redirty_request(req);
2018 +- ret = -EAGAIN;
2019 + } else
2020 + nfs_add_stats(page_file_mapping(page)->host,
2021 + NFSIOS_WRITEPAGES, 1);
2022 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2023 +index d837dad24b4c..21fef8c5eca7 100644
2024 +--- a/include/linux/netdevice.h
2025 ++++ b/include/linux/netdevice.h
2026 +@@ -1455,6 +1455,7 @@ struct net_device_ops {
2027 + * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
2028 + * @IFF_FAILOVER: device is a failover master device
2029 + * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
2030 ++ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
2031 + */
2032 + enum netdev_priv_flags {
2033 + IFF_802_1Q_VLAN = 1<<0,
2034 +@@ -1486,6 +1487,7 @@ enum netdev_priv_flags {
2035 + IFF_NO_RX_HANDLER = 1<<26,
2036 + IFF_FAILOVER = 1<<27,
2037 + IFF_FAILOVER_SLAVE = 1<<28,
2038 ++ IFF_L3MDEV_RX_HANDLER = 1<<29,
2039 + };
2040 +
2041 + #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
2042 +@@ -1516,6 +1518,7 @@ enum netdev_priv_flags {
2043 + #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
2044 + #define IFF_FAILOVER IFF_FAILOVER
2045 + #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
2046 ++#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
2047 +
2048 + /**
2049 + * struct net_device - The DEVICE structure.
2050 +@@ -4464,6 +4467,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
2051 + return dev->priv_flags & IFF_SUPP_NOFCS;
2052 + }
2053 +
2054 ++static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
2055 ++{
2056 ++ return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
2057 ++}
2058 ++
2059 + static inline bool netif_is_l3_master(const struct net_device *dev)
2060 + {
2061 + return dev->priv_flags & IFF_L3MDEV_MASTER;
2062 +diff --git a/include/linux/of.h b/include/linux/of.h
2063 +index 99b0ebf49632..40e58b0e9cf4 100644
2064 +--- a/include/linux/of.h
2065 ++++ b/include/linux/of.h
2066 +@@ -138,11 +138,16 @@ extern struct device_node *of_aliases;
2067 + extern struct device_node *of_stdout;
2068 + extern raw_spinlock_t devtree_lock;
2069 +
2070 +-/* flag descriptions (need to be visible even when !CONFIG_OF) */
2071 +-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
2072 +-#define OF_DETACHED 2 /* node has been detached from the device tree */
2073 +-#define OF_POPULATED 3 /* device already created for the node */
2074 +-#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
2075 ++/*
2076 ++ * struct device_node flag descriptions
2077 ++ * (need to be visible even when !CONFIG_OF)
2078 ++ */
2079 ++#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */
2080 ++#define OF_DETACHED 2 /* detached from the device tree */
2081 ++#define OF_POPULATED 3 /* device already created */
2082 ++#define OF_POPULATED_BUS 4 /* platform bus created for children */
2083 ++#define OF_OVERLAY 5 /* allocated for an overlay */
2084 ++#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */
2085 +
2086 + #define OF_BAD_ADDR ((u64)-1)
2087 +
2088 +diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
2089 +index ec912d01126f..ecdc6542070f 100644
2090 +--- a/include/linux/sched/coredump.h
2091 ++++ b/include/linux/sched/coredump.h
2092 +@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
2093 + #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
2094 + #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
2095 + #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
2096 ++#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
2097 + #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
2098 +
2099 + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
2100 +diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
2101 +index 3832099289c5..128487658ff7 100644
2102 +--- a/include/net/l3mdev.h
2103 ++++ b/include/net/l3mdev.h
2104 +@@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
2105 +
2106 + if (netif_is_l3_slave(skb->dev))
2107 + master = netdev_master_upper_dev_get_rcu(skb->dev);
2108 +- else if (netif_is_l3_master(skb->dev))
2109 ++ else if (netif_is_l3_master(skb->dev) ||
2110 ++ netif_has_l3_rx_handler(skb->dev))
2111 + master = skb->dev;
2112 +
2113 + if (master && master->l3mdev_ops->l3mdev_l3_rcv)
2114 +diff --git a/kernel/exit.c b/kernel/exit.c
2115 +index 0e21e6d21f35..55b4fa6d01eb 100644
2116 +--- a/kernel/exit.c
2117 ++++ b/kernel/exit.c
2118 +@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
2119 + return NULL;
2120 + }
2121 +
2122 +-static struct task_struct *find_child_reaper(struct task_struct *father)
2123 ++static struct task_struct *find_child_reaper(struct task_struct *father,
2124 ++ struct list_head *dead)
2125 + __releases(&tasklist_lock)
2126 + __acquires(&tasklist_lock)
2127 + {
2128 + struct pid_namespace *pid_ns = task_active_pid_ns(father);
2129 + struct task_struct *reaper = pid_ns->child_reaper;
2130 ++ struct task_struct *p, *n;
2131 +
2132 + if (likely(reaper != father))
2133 + return reaper;
2134 +@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
2135 + panic("Attempted to kill init! exitcode=0x%08x\n",
2136 + father->signal->group_exit_code ?: father->exit_code);
2137 + }
2138 ++
2139 ++ list_for_each_entry_safe(p, n, dead, ptrace_entry) {
2140 ++ list_del_init(&p->ptrace_entry);
2141 ++ release_task(p);
2142 ++ }
2143 ++
2144 + zap_pid_ns_processes(pid_ns);
2145 + write_lock_irq(&tasklist_lock);
2146 +
2147 +@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
2148 + exit_ptrace(father, dead);
2149 +
2150 + /* Can drop and reacquire tasklist_lock */
2151 +- reaper = find_child_reaper(father);
2152 ++ reaper = find_child_reaper(father, dead);
2153 + if (list_empty(&father->children))
2154 + return;
2155 +
2156 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2157 +index 309fb8c969af..10e83672bfbe 100644
2158 +--- a/mm/hugetlb.c
2159 ++++ b/mm/hugetlb.c
2160 +@@ -4269,7 +4269,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2161 + break;
2162 + }
2163 + if (ret & VM_FAULT_RETRY) {
2164 +- if (nonblocking)
2165 ++ if (nonblocking &&
2166 ++ !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
2167 + *nonblocking = 0;
2168 + *nr_pages = 0;
2169 + /*
2170 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2171 +index 0cd3de3550f0..d9b8a2490633 100644
2172 +--- a/mm/memory-failure.c
2173 ++++ b/mm/memory-failure.c
2174 +@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
2175 + if (fail || tk->addr_valid == 0) {
2176 + pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
2177 + pfn, tk->tsk->comm, tk->tsk->pid);
2178 +- force_sig(SIGKILL, tk->tsk);
2179 ++ do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
2180 ++ tk->tsk, PIDTYPE_PID);
2181 + }
2182 +
2183 + /*
2184 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2185 +index 8a136ffda370..c6119ad3561e 100644
2186 +--- a/mm/memory_hotplug.c
2187 ++++ b/mm/memory_hotplug.c
2188 +@@ -1326,23 +1326,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
2189 + static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
2190 + {
2191 + unsigned long pfn;
2192 +- struct page *page;
2193 ++
2194 + for (pfn = start; pfn < end; pfn++) {
2195 +- if (pfn_valid(pfn)) {
2196 +- page = pfn_to_page(pfn);
2197 +- if (PageLRU(page))
2198 +- return pfn;
2199 +- if (__PageMovable(page))
2200 +- return pfn;
2201 +- if (PageHuge(page)) {
2202 +- if (hugepage_migration_supported(page_hstate(page)) &&
2203 +- page_huge_active(page))
2204 +- return pfn;
2205 +- else
2206 +- pfn = round_up(pfn + 1,
2207 +- 1 << compound_order(page)) - 1;
2208 +- }
2209 +- }
2210 ++ struct page *page, *head;
2211 ++ unsigned long skip;
2212 ++
2213 ++ if (!pfn_valid(pfn))
2214 ++ continue;
2215 ++ page = pfn_to_page(pfn);
2216 ++ if (PageLRU(page))
2217 ++ return pfn;
2218 ++ if (__PageMovable(page))
2219 ++ return pfn;
2220 ++
2221 ++ if (!PageHuge(page))
2222 ++ continue;
2223 ++ head = compound_head(page);
2224 ++ if (hugepage_migration_supported(page_hstate(head)) &&
2225 ++ page_huge_active(head))
2226 ++ return pfn;
2227 ++ skip = (1 << compound_order(head)) - (page - head);
2228 ++ pfn += skip - 1;
2229 + }
2230 + return 0;
2231 + }
2232 +diff --git a/mm/migrate.c b/mm/migrate.c
2233 +index 84381b55b2bd..ab260260a626 100644
2234 +--- a/mm/migrate.c
2235 ++++ b/mm/migrate.c
2236 +@@ -1118,10 +1118,13 @@ out:
2237 + * If migration is successful, decrease refcount of the newpage
2238 + * which will not free the page because new page owner increased
2239 + * refcounter. As well, if it is LRU page, add the page to LRU
2240 +- * list in here.
2241 ++ * list in here. Use the old state of the isolated source page to
2242 ++ * determine if we migrated a LRU page. newpage was already unlocked
2243 ++ * and possibly modified by its owner - don't rely on the page
2244 ++ * state.
2245 + */
2246 + if (rc == MIGRATEPAGE_SUCCESS) {
2247 +- if (unlikely(__PageMovable(newpage)))
2248 ++ if (unlikely(!is_lru))
2249 + put_page(newpage);
2250 + else
2251 + putback_lru_page(newpage);
2252 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
2253 +index f10aa5360616..e66ac8a47dd6 100644
2254 +--- a/mm/oom_kill.c
2255 ++++ b/mm/oom_kill.c
2256 +@@ -634,8 +634,8 @@ static int oom_reaper(void *unused)
2257 +
2258 + static void wake_oom_reaper(struct task_struct *tsk)
2259 + {
2260 +- /* tsk is already queued? */
2261 +- if (tsk == oom_reaper_list || tsk->oom_reaper_list)
2262 ++ /* mm is already queued? */
2263 ++ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
2264 + return;
2265 +
2266 + get_task_struct(tsk);
2267 +@@ -962,6 +962,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
2268 + * still freeing memory.
2269 + */
2270 + read_lock(&tasklist_lock);
2271 ++
2272 ++ /*
2273 ++ * The task 'p' might have already exited before reaching here. The
2274 ++ * put_task_struct() will free task_struct 'p' while the loop still try
2275 ++ * to access the field of 'p', so, get an extra reference.
2276 ++ */
2277 ++ get_task_struct(p);
2278 + for_each_thread(p, t) {
2279 + list_for_each_entry(child, &t->children, sibling) {
2280 + unsigned int child_points;
2281 +@@ -981,6 +988,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
2282 + }
2283 + }
2284 + }
2285 ++ put_task_struct(p);
2286 + read_unlock(&tasklist_lock);
2287 +
2288 + /*
2289 +diff --git a/net/core/dev.c b/net/core/dev.c
2290 +index 1f1aae27d41f..af097ca9cb4f 100644
2291 +--- a/net/core/dev.c
2292 ++++ b/net/core/dev.c
2293 +@@ -8599,6 +8599,9 @@ int init_dummy_netdev(struct net_device *dev)
2294 + set_bit(__LINK_STATE_PRESENT, &dev->state);
2295 + set_bit(__LINK_STATE_START, &dev->state);
2296 +
2297 ++ /* napi_busy_loop stats accounting wants this */
2298 ++ dev_net_set(dev, &init_net);
2299 ++
2300 + /* Note : We dont allocate pcpu_refcnt for dummy devices,
2301 + * because users of this 'device' dont need to change
2302 + * its refcount.
2303 +diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
2304 +index b798862b6be5..f21ea6125fc2 100644
2305 +--- a/net/ipv4/gre_demux.c
2306 ++++ b/net/ipv4/gre_demux.c
2307 +@@ -25,6 +25,7 @@
2308 + #include <linux/spinlock.h>
2309 + #include <net/protocol.h>
2310 + #include <net/gre.h>
2311 ++#include <net/erspan.h>
2312 +
2313 + #include <net/icmp.h>
2314 + #include <net/route.h>
2315 +@@ -118,6 +119,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2316 + hdr_len += 4;
2317 + }
2318 + tpi->hdr_len = hdr_len;
2319 ++
2320 ++ /* ERSPAN ver 1 and 2 protocol sets GRE key field
2321 ++ * to 0 and sets the configured key in the
2322 ++ * inner erspan header field
2323 ++ */
2324 ++ if (greh->protocol == htons(ETH_P_ERSPAN) ||
2325 ++ greh->protocol == htons(ETH_P_ERSPAN2)) {
2326 ++ struct erspan_base_hdr *ershdr;
2327 ++
2328 ++ if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
2329 ++ return -EINVAL;
2330 ++
2331 ++ ershdr = (struct erspan_base_hdr *)options;
2332 ++ tpi->key = cpu_to_be32(get_session_id(ershdr));
2333 ++ }
2334 ++
2335 + return hdr_len;
2336 + }
2337 + EXPORT_SYMBOL(gre_parse_header);
2338 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2339 +index f8bbd693c19c..d95b32af4a0e 100644
2340 +--- a/net/ipv4/ip_fragment.c
2341 ++++ b/net/ipv4/ip_fragment.c
2342 +@@ -425,6 +425,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
2343 + * fragment.
2344 + */
2345 +
2346 ++ err = -EINVAL;
2347 + /* Find out where to put this fragment. */
2348 + prev_tail = qp->q.fragments_tail;
2349 + if (!prev_tail)
2350 +@@ -501,7 +502,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
2351 +
2352 + discard_qp:
2353 + inet_frag_kill(&qp->q);
2354 +- err = -EINVAL;
2355 + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
2356 + err:
2357 + kfree_skb(skb);
2358 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2359 +index 0bfad3e72509..f199945f6e4a 100644
2360 +--- a/net/ipv4/ip_gre.c
2361 ++++ b/net/ipv4/ip_gre.c
2362 +@@ -269,20 +269,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2363 + int len;
2364 +
2365 + itn = net_generic(net, erspan_net_id);
2366 +- len = gre_hdr_len + sizeof(*ershdr);
2367 +-
2368 +- /* Check based hdr len */
2369 +- if (unlikely(!pskb_may_pull(skb, len)))
2370 +- return PACKET_REJECT;
2371 +
2372 + iph = ip_hdr(skb);
2373 + ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
2374 + ver = ershdr->ver;
2375 +
2376 +- /* The original GRE header does not have key field,
2377 +- * Use ERSPAN 10-bit session ID as key.
2378 +- */
2379 +- tpi->key = cpu_to_be32(get_session_id(ershdr));
2380 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
2381 + tpi->flags | TUNNEL_KEY,
2382 + iph->saddr, iph->daddr, tpi->key);
2383 +@@ -1471,12 +1462,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2384 + {
2385 + struct ip_tunnel *t = netdev_priv(dev);
2386 + struct ip_tunnel_parm *p = &t->parms;
2387 ++ __be16 o_flags = p->o_flags;
2388 ++
2389 ++ if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
2390 ++ !t->collect_md)
2391 ++ o_flags |= TUNNEL_KEY;
2392 +
2393 + if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2394 + nla_put_be16(skb, IFLA_GRE_IFLAGS,
2395 + gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2396 + nla_put_be16(skb, IFLA_GRE_OFLAGS,
2397 +- gre_tnl_flags_to_gre_flags(p->o_flags)) ||
2398 ++ gre_tnl_flags_to_gre_flags(o_flags)) ||
2399 + nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2400 + nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2401 + nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
2402 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
2403 +index 506b2ae07bb3..79fcd9550fd2 100644
2404 +--- a/net/ipv6/af_inet6.c
2405 ++++ b/net/ipv6/af_inet6.c
2406 +@@ -361,6 +361,9 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
2407 + err = -EINVAL;
2408 + goto out_unlock;
2409 + }
2410 ++ }
2411 ++
2412 ++ if (sk->sk_bound_dev_if) {
2413 + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
2414 + if (!dev) {
2415 + err = -ENODEV;
2416 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2417 +index 345e6839f031..be04877b3827 100644
2418 +--- a/net/ipv6/ip6_gre.c
2419 ++++ b/net/ipv6/ip6_gre.c
2420 +@@ -550,13 +550,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
2421 + struct ip6_tnl *tunnel;
2422 + u8 ver;
2423 +
2424 +- if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
2425 +- return PACKET_REJECT;
2426 +-
2427 + ipv6h = ipv6_hdr(skb);
2428 + ershdr = (struct erspan_base_hdr *)skb->data;
2429 + ver = ershdr->ver;
2430 +- tpi->key = cpu_to_be32(get_session_id(ershdr));
2431 +
2432 + tunnel = ip6gre_tunnel_lookup(skb->dev,
2433 + &ipv6h->saddr, &ipv6h->daddr, tpi->key,
2434 +@@ -2124,12 +2120,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2435 + {
2436 + struct ip6_tnl *t = netdev_priv(dev);
2437 + struct __ip6_tnl_parm *p = &t->parms;
2438 ++ __be16 o_flags = p->o_flags;
2439 ++
2440 ++ if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
2441 ++ !p->collect_md)
2442 ++ o_flags |= TUNNEL_KEY;
2443 +
2444 + if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2445 + nla_put_be16(skb, IFLA_GRE_IFLAGS,
2446 + gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2447 + nla_put_be16(skb, IFLA_GRE_OFLAGS,
2448 +- gre_tnl_flags_to_gre_flags(p->o_flags)) ||
2449 ++ gre_tnl_flags_to_gre_flags(o_flags)) ||
2450 + nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2451 + nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2452 + nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2453 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
2454 +index 331e6b6dd252..10aafea3af0f 100644
2455 +--- a/net/ipv6/ip6mr.c
2456 ++++ b/net/ipv6/ip6mr.c
2457 +@@ -1506,6 +1506,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
2458 + continue;
2459 + rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
2460 + list_del_rcu(&c->list);
2461 ++ call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
2462 ++ FIB_EVENT_ENTRY_DEL,
2463 ++ (struct mfc6_cache *)c, mrt->id);
2464 + mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
2465 + mr_cache_put(c);
2466 + }
2467 +@@ -1514,10 +1517,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
2468 + spin_lock_bh(&mfc_unres_lock);
2469 + list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
2470 + list_del(&c->list);
2471 +- call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
2472 +- FIB_EVENT_ENTRY_DEL,
2473 +- (struct mfc6_cache *)c,
2474 +- mrt->id);
2475 + mr6_netlink_event(mrt, (struct mfc6_cache *)c,
2476 + RTM_DELROUTE);
2477 + ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
2478 +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
2479 +index 8181ee7e1e27..ee5403cbe655 100644
2480 +--- a/net/ipv6/seg6_iptunnel.c
2481 ++++ b/net/ipv6/seg6_iptunnel.c
2482 +@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
2483 + } else {
2484 + ip6_flow_hdr(hdr, 0, flowlabel);
2485 + hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
2486 ++
2487 ++ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
2488 + }
2489 +
2490 + hdr->nexthdr = NEXTHDR_ROUTING;
2491 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
2492 +index 26f1d435696a..fed6becc5daf 100644
2493 +--- a/net/l2tp/l2tp_core.c
2494 ++++ b/net/l2tp/l2tp_core.c
2495 +@@ -83,8 +83,7 @@
2496 + #define L2TP_SLFLAG_S 0x40000000
2497 + #define L2TP_SL_SEQ_MASK 0x00ffffff
2498 +
2499 +-#define L2TP_HDR_SIZE_SEQ 10
2500 +-#define L2TP_HDR_SIZE_NOSEQ 6
2501 ++#define L2TP_HDR_SIZE_MAX 14
2502 +
2503 + /* Default trace flags */
2504 + #define L2TP_DEFAULT_DEBUG_FLAGS 0
2505 +@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
2506 + __skb_pull(skb, sizeof(struct udphdr));
2507 +
2508 + /* Short packet? */
2509 +- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
2510 ++ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
2511 + l2tp_info(tunnel, L2TP_MSG_DATA,
2512 + "%s: recv short packet (len=%d)\n",
2513 + tunnel->name, skb->len);
2514 +@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
2515 + goto error;
2516 + }
2517 +
2518 ++ if (tunnel->version == L2TP_HDR_VER_3 &&
2519 ++ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
2520 ++ goto error;
2521 ++
2522 + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
2523 + l2tp_session_dec_refcount(session);
2524 +
2525 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
2526 +index 9c9afe94d389..b2ce90260c35 100644
2527 +--- a/net/l2tp/l2tp_core.h
2528 ++++ b/net/l2tp/l2tp_core.h
2529 +@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
2530 + }
2531 + #endif
2532 +
2533 ++static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
2534 ++ unsigned char **ptr, unsigned char **optr)
2535 ++{
2536 ++ int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
2537 ++
2538 ++ if (opt_len > 0) {
2539 ++ int off = *ptr - *optr;
2540 ++
2541 ++ if (!pskb_may_pull(skb, off + opt_len))
2542 ++ return -1;
2543 ++
2544 ++ if (skb->data != *optr) {
2545 ++ *optr = skb->data;
2546 ++ *ptr = skb->data + off;
2547 ++ }
2548 ++ }
2549 ++
2550 ++ return 0;
2551 ++}
2552 ++
2553 + #define l2tp_printk(ptr, type, func, fmt, ...) \
2554 + do { \
2555 + if (((ptr)->debug) & (type)) \
2556 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
2557 +index 35f6f86d4dcc..d4c60523c549 100644
2558 +--- a/net/l2tp/l2tp_ip.c
2559 ++++ b/net/l2tp/l2tp_ip.c
2560 +@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
2561 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
2562 + }
2563 +
2564 ++ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
2565 ++ goto discard_sess;
2566 ++
2567 + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
2568 + l2tp_session_dec_refcount(session);
2569 +
2570 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
2571 +index 237f1a4a0b0c..0ae6899edac0 100644
2572 +--- a/net/l2tp/l2tp_ip6.c
2573 ++++ b/net/l2tp/l2tp_ip6.c
2574 +@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
2575 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
2576 + }
2577 +
2578 ++ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
2579 ++ goto discard_sess;
2580 ++
2581 + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
2582 + l2tp_session_dec_refcount(session);
2583 +
2584 +diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
2585 +index cbd51ed5a2d7..908e53ab47a4 100644
2586 +--- a/net/netrom/nr_timer.c
2587 ++++ b/net/netrom/nr_timer.c
2588 +@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
2589 + {
2590 + struct nr_sock *nr = nr_sk(sk);
2591 +
2592 +- mod_timer(&nr->t1timer, jiffies + nr->t1);
2593 ++ sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
2594 + }
2595 +
2596 + void nr_start_t2timer(struct sock *sk)
2597 + {
2598 + struct nr_sock *nr = nr_sk(sk);
2599 +
2600 +- mod_timer(&nr->t2timer, jiffies + nr->t2);
2601 ++ sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
2602 + }
2603 +
2604 + void nr_start_t4timer(struct sock *sk)
2605 + {
2606 + struct nr_sock *nr = nr_sk(sk);
2607 +
2608 +- mod_timer(&nr->t4timer, jiffies + nr->t4);
2609 ++ sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
2610 + }
2611 +
2612 + void nr_start_idletimer(struct sock *sk)
2613 +@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
2614 + struct nr_sock *nr = nr_sk(sk);
2615 +
2616 + if (nr->idle > 0)
2617 +- mod_timer(&nr->idletimer, jiffies + nr->idle);
2618 ++ sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
2619 + }
2620 +
2621 + void nr_start_heartbeat(struct sock *sk)
2622 + {
2623 +- mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
2624 ++ sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
2625 + }
2626 +
2627 + void nr_stop_t1timer(struct sock *sk)
2628 + {
2629 +- del_timer(&nr_sk(sk)->t1timer);
2630 ++ sk_stop_timer(sk, &nr_sk(sk)->t1timer);
2631 + }
2632 +
2633 + void nr_stop_t2timer(struct sock *sk)
2634 + {
2635 +- del_timer(&nr_sk(sk)->t2timer);
2636 ++ sk_stop_timer(sk, &nr_sk(sk)->t2timer);
2637 + }
2638 +
2639 + void nr_stop_t4timer(struct sock *sk)
2640 + {
2641 +- del_timer(&nr_sk(sk)->t4timer);
2642 ++ sk_stop_timer(sk, &nr_sk(sk)->t4timer);
2643 + }
2644 +
2645 + void nr_stop_idletimer(struct sock *sk)
2646 + {
2647 +- del_timer(&nr_sk(sk)->idletimer);
2648 ++ sk_stop_timer(sk, &nr_sk(sk)->idletimer);
2649 + }
2650 +
2651 + void nr_stop_heartbeat(struct sock *sk)
2652 + {
2653 +- del_timer(&sk->sk_timer);
2654 ++ sk_stop_timer(sk, &sk->sk_timer);
2655 + }
2656 +
2657 + int nr_t1timer_running(struct sock *sk)
2658 +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
2659 +index 77e9f85a2c92..f2ff21d7df08 100644
2660 +--- a/net/rose/rose_route.c
2661 ++++ b/net/rose/rose_route.c
2662 +@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
2663 +
2664 + /*
2665 + * Route a frame to an appropriate AX.25 connection.
2666 ++ * A NULL ax25_cb indicates an internally generated frame.
2667 + */
2668 + int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
2669 + {
2670 +@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
2671 +
2672 + if (skb->len < ROSE_MIN_LEN)
2673 + return res;
2674 ++
2675 ++ if (!ax25)
2676 ++ return rose_loopback_queue(skb, NULL);
2677 ++
2678 + frametype = skb->data[2];
2679 + lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
2680 + if (frametype == ROSE_CALL_REQUEST &&
2681 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2682 +index 0bae07e9c9e7..4fede55b9010 100644
2683 +--- a/net/sctp/ipv6.c
2684 ++++ b/net/sctp/ipv6.c
2685 +@@ -277,7 +277,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2686 +
2687 + if (saddr) {
2688 + fl6->saddr = saddr->v6.sin6_addr;
2689 +- fl6->fl6_sport = saddr->v6.sin6_port;
2690 ++ if (!fl6->fl6_sport)
2691 ++ fl6->fl6_sport = saddr->v6.sin6_port;
2692 +
2693 + pr_debug("src=%pI6 - ", &fl6->saddr);
2694 + }
2695 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
2696 +index d4352111e69d..1c9f079e8a50 100644
2697 +--- a/net/sctp/protocol.c
2698 ++++ b/net/sctp/protocol.c
2699 +@@ -440,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2700 + }
2701 + if (saddr) {
2702 + fl4->saddr = saddr->v4.sin_addr.s_addr;
2703 +- fl4->fl4_sport = saddr->v4.sin_port;
2704 ++ if (!fl4->fl4_sport)
2705 ++ fl4->fl4_sport = saddr->v4.sin_port;
2706 + }
2707 +
2708 + pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
2709 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2710 +index f4ac6c592e13..d05c57664e36 100644
2711 +--- a/net/sctp/sm_make_chunk.c
2712 ++++ b/net/sctp/sm_make_chunk.c
2713 +@@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
2714 + *
2715 + * [INIT ACK back to where the INIT came from.]
2716 + */
2717 +- retval->transport = chunk->transport;
2718 ++ if (chunk->transport)
2719 ++ retval->transport =
2720 ++ sctp_assoc_lookup_paddr(asoc,
2721 ++ &chunk->transport->ipaddr);
2722 +
2723 + retval->subh.init_hdr =
2724 + sctp_addto_chunk(retval, sizeof(initack), &initack);
2725 +@@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
2726 + *
2727 + * [COOKIE ACK back to where the COOKIE ECHO came from.]
2728 + */
2729 +- if (retval && chunk)
2730 +- retval->transport = chunk->transport;
2731 ++ if (retval && chunk && chunk->transport)
2732 ++ retval->transport =
2733 ++ sctp_assoc_lookup_paddr(asoc,
2734 ++ &chunk->transport->ipaddr);
2735 +
2736 + return retval;
2737 + }
2738 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
2739 +index 3892e7630f3a..80e0ae5534ec 100644
2740 +--- a/net/sctp/stream.c
2741 ++++ b/net/sctp/stream.c
2742 +@@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
2743 + struct sctp_strreset_outreq *outreq = param.v;
2744 + struct sctp_stream *stream = &asoc->stream;
2745 + __u32 result = SCTP_STRRESET_DENIED;
2746 +- __u16 i, nums, flags = 0;
2747 + __be16 *str_p = NULL;
2748 + __u32 request_seq;
2749 ++ __u16 i, nums;
2750 +
2751 + request_seq = ntohl(outreq->request_seq);
2752 +
2753 +@@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
2754 + if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
2755 + goto out;
2756 +
2757 ++ nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
2758 ++ str_p = outreq->list_of_streams;
2759 ++ for (i = 0; i < nums; i++) {
2760 ++ if (ntohs(str_p[i]) >= stream->incnt) {
2761 ++ result = SCTP_STRRESET_ERR_WRONG_SSN;
2762 ++ goto out;
2763 ++ }
2764 ++ }
2765 ++
2766 + if (asoc->strreset_chunk) {
2767 + if (!sctp_chunk_lookup_strreset_param(
2768 + asoc, outreq->response_seq,
2769 +@@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
2770 + sctp_chunk_put(asoc->strreset_chunk);
2771 + asoc->strreset_chunk = NULL;
2772 + }
2773 +-
2774 +- flags = SCTP_STREAM_RESET_INCOMING_SSN;
2775 + }
2776 +
2777 +- nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
2778 +- if (nums) {
2779 +- str_p = outreq->list_of_streams;
2780 +- for (i = 0; i < nums; i++) {
2781 +- if (ntohs(str_p[i]) >= stream->incnt) {
2782 +- result = SCTP_STRRESET_ERR_WRONG_SSN;
2783 +- goto out;
2784 +- }
2785 +- }
2786 +-
2787 ++ if (nums)
2788 + for (i = 0; i < nums; i++)
2789 + SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
2790 +- } else {
2791 ++ else
2792 + for (i = 0; i < stream->incnt; i++)
2793 + SCTP_SI(stream, i)->mid = 0;
2794 +- }
2795 +
2796 + result = SCTP_STRRESET_PERFORMED;
2797 +
2798 + *evp = sctp_ulpevent_make_stream_reset_event(asoc,
2799 +- flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
2800 +- GFP_ATOMIC);
2801 ++ SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
2802 +
2803 + out:
2804 + sctp_update_strreset_result(asoc, result);
2805 +@@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
2806 +
2807 + result = SCTP_STRRESET_PERFORMED;
2808 +
2809 +- *evp = sctp_ulpevent_make_stream_reset_event(asoc,
2810 +- SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
2811 +-
2812 + out:
2813 + sctp_update_strreset_result(asoc, result);
2814 + err:
2815 +@@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
2816 + if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
2817 + goto out;
2818 +
2819 ++ in = ntohs(addstrm->number_of_streams);
2820 ++ incnt = stream->incnt + in;
2821 ++ if (!in || incnt > SCTP_MAX_STREAM)
2822 ++ goto out;
2823 ++
2824 ++ if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
2825 ++ goto out;
2826 ++
2827 + if (asoc->strreset_chunk) {
2828 + if (!sctp_chunk_lookup_strreset_param(
2829 + asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
2830 +@@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
2831 + }
2832 + }
2833 +
2834 +- in = ntohs(addstrm->number_of_streams);
2835 +- incnt = stream->incnt + in;
2836 +- if (!in || incnt > SCTP_MAX_STREAM)
2837 +- goto out;
2838 +-
2839 +- if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
2840 +- goto out;
2841 +-
2842 + stream->incnt = incnt;
2843 +
2844 + result = SCTP_STRRESET_PERFORMED;
2845 +@@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
2846 +
2847 + result = SCTP_STRRESET_PERFORMED;
2848 +
2849 +- *evp = sctp_ulpevent_make_stream_change_event(asoc,
2850 +- 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
2851 +-
2852 + out:
2853 + sctp_update_strreset_result(asoc, result);
2854 + err:
2855 +@@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
2856 + sout->mid_uo = 0;
2857 + }
2858 + }
2859 +-
2860 +- flags = SCTP_STREAM_RESET_OUTGOING_SSN;
2861 + }
2862 +
2863 ++ flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
2864 ++
2865 + for (i = 0; i < stream->outcnt; i++)
2866 + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
2867 +
2868 +@@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
2869 + nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
2870 + sizeof(__u16);
2871 +
2872 ++ flags |= SCTP_STREAM_RESET_INCOMING_SSN;
2873 ++
2874 + *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
2875 + nums, str_p, GFP_ATOMIC);
2876 + } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
2877 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2878 +index f39f34e12fb6..dbb38fe2da7d 100644
2879 +--- a/sound/pci/hda/patch_realtek.c
2880 ++++ b/sound/pci/hda/patch_realtek.c
2881 +@@ -117,6 +117,7 @@ struct alc_spec {
2882 + int codec_variant; /* flag for other variants */
2883 + unsigned int has_alc5505_dsp:1;
2884 + unsigned int no_depop_delay:1;
2885 ++ unsigned int done_hp_init:1;
2886 +
2887 + /* for PLL fix */
2888 + hda_nid_t pll_nid;
2889 +@@ -3372,6 +3373,48 @@ static void alc_default_shutup(struct hda_codec *codec)
2890 + snd_hda_shutup_pins(codec);
2891 + }
2892 +
2893 ++static void alc294_hp_init(struct hda_codec *codec)
2894 ++{
2895 ++ struct alc_spec *spec = codec->spec;
2896 ++ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
2897 ++ int i, val;
2898 ++
2899 ++ if (!hp_pin)
2900 ++ return;
2901 ++
2902 ++ snd_hda_codec_write(codec, hp_pin, 0,
2903 ++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
2904 ++
2905 ++ msleep(100);
2906 ++
2907 ++ snd_hda_codec_write(codec, hp_pin, 0,
2908 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
2909 ++
2910 ++ alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
2911 ++ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
2912 ++
2913 ++ /* Wait for depop procedure finish */
2914 ++ val = alc_read_coefex_idx(codec, 0x58, 0x01);
2915 ++ for (i = 0; i < 20 && val & 0x0080; i++) {
2916 ++ msleep(50);
2917 ++ val = alc_read_coefex_idx(codec, 0x58, 0x01);
2918 ++ }
2919 ++ /* Set HP depop to auto mode */
2920 ++ alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
2921 ++ msleep(50);
2922 ++}
2923 ++
2924 ++static void alc294_init(struct hda_codec *codec)
2925 ++{
2926 ++ struct alc_spec *spec = codec->spec;
2927 ++
2928 ++ if (!spec->done_hp_init) {
2929 ++ alc294_hp_init(codec);
2930 ++ spec->done_hp_init = true;
2931 ++ }
2932 ++ alc_default_init(codec);
2933 ++}
2934 ++
2935 + static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
2936 + unsigned int val)
2937 + {
2938 +@@ -7288,37 +7331,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
2939 + alc_update_coef_idx(codec, 0x4, 0, 1<<11);
2940 + }
2941 +
2942 +-static void alc294_hp_init(struct hda_codec *codec)
2943 +-{
2944 +- struct alc_spec *spec = codec->spec;
2945 +- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
2946 +- int i, val;
2947 +-
2948 +- if (!hp_pin)
2949 +- return;
2950 +-
2951 +- snd_hda_codec_write(codec, hp_pin, 0,
2952 +- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
2953 +-
2954 +- msleep(100);
2955 +-
2956 +- snd_hda_codec_write(codec, hp_pin, 0,
2957 +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
2958 +-
2959 +- alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
2960 +- alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
2961 +-
2962 +- /* Wait for depop procedure finish */
2963 +- val = alc_read_coefex_idx(codec, 0x58, 0x01);
2964 +- for (i = 0; i < 20 && val & 0x0080; i++) {
2965 +- msleep(50);
2966 +- val = alc_read_coefex_idx(codec, 0x58, 0x01);
2967 +- }
2968 +- /* Set HP depop to auto mode */
2969 +- alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
2970 +- msleep(50);
2971 +-}
2972 +-
2973 + /*
2974 + */
2975 + static int patch_alc269(struct hda_codec *codec)
2976 +@@ -7444,7 +7456,7 @@ static int patch_alc269(struct hda_codec *codec)
2977 + spec->codec_variant = ALC269_TYPE_ALC294;
2978 + spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
2979 + alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
2980 +- alc294_hp_init(codec);
2981 ++ spec->init_hook = alc294_init;
2982 + break;
2983 + case 0x10ec0300:
2984 + spec->codec_variant = ALC269_TYPE_ALC300;
2985 +@@ -7456,7 +7468,7 @@ static int patch_alc269(struct hda_codec *codec)
2986 + spec->codec_variant = ALC269_TYPE_ALC700;
2987 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
2988 + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
2989 +- alc294_hp_init(codec);
2990 ++ spec->init_hook = alc294_init;
2991 + break;
2992 +
2993 + }
2994 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2995 +index 6623cafc94f2..7e93686a430a 100644
2996 +--- a/sound/usb/quirks.c
2997 ++++ b/sound/usb/quirks.c
2998 +@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
2999 + return SNDRV_PCM_FMTBIT_DSD_U32_BE;
3000 + break;
3001 +
3002 ++ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
3003 + case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
3004 + case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
3005 + case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
3006 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
3007 +index e1473234968d..83057fa9d391 100644
3008 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
3009 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
3010 +@@ -1563,7 +1563,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
3011 + #ifdef SYSCALL_NUM_RET_SHARE_REG
3012 + # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
3013 + #else
3014 +-# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action)
3015 ++# define EXPECT_SYSCALL_RETURN(val, action) \
3016 ++ do { \
3017 ++ errno = 0; \
3018 ++ if (val < 0) { \
3019 ++ EXPECT_EQ(-1, action); \
3020 ++ EXPECT_EQ(-(val), errno); \
3021 ++ } else { \
3022 ++ EXPECT_EQ(val, action); \
3023 ++ } \
3024 ++ } while (0)
3025 + #endif
3026 +
3027 + /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
3028 +@@ -1602,7 +1611,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
3029 +
3030 + /* Architecture-specific syscall changing routine. */
3031 + void change_syscall(struct __test_metadata *_metadata,
3032 +- pid_t tracee, int syscall)
3033 ++ pid_t tracee, int syscall, int result)
3034 + {
3035 + int ret;
3036 + ARCH_REGS regs;
3037 +@@ -1661,7 +1670,7 @@ void change_syscall(struct __test_metadata *_metadata,
3038 + #ifdef SYSCALL_NUM_RET_SHARE_REG
3039 + TH_LOG("Can't modify syscall return on this architecture");
3040 + #else
3041 +- regs.SYSCALL_RET = EPERM;
3042 ++ regs.SYSCALL_RET = result;
3043 + #endif
3044 +
3045 + #ifdef HAVE_GETREGS
3046 +@@ -1689,14 +1698,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
3047 + case 0x1002:
3048 + /* change getpid to getppid. */
3049 + EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
3050 +- change_syscall(_metadata, tracee, __NR_getppid);
3051 ++ change_syscall(_metadata, tracee, __NR_getppid, 0);
3052 + break;
3053 + case 0x1003:
3054 +- /* skip gettid. */
3055 ++ /* skip gettid with valid return code. */
3056 + EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
3057 +- change_syscall(_metadata, tracee, -1);
3058 ++ change_syscall(_metadata, tracee, -1, 45000);
3059 + break;
3060 + case 0x1004:
3061 ++ /* skip openat with error. */
3062 ++ EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
3063 ++ change_syscall(_metadata, tracee, -1, -ESRCH);
3064 ++ break;
3065 ++ case 0x1005:
3066 + /* do nothing (allow getppid) */
3067 + EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
3068 + break;
3069 +@@ -1729,9 +1743,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
3070 + nr = get_syscall(_metadata, tracee);
3071 +
3072 + if (nr == __NR_getpid)
3073 +- change_syscall(_metadata, tracee, __NR_getppid);
3074 ++ change_syscall(_metadata, tracee, __NR_getppid, 0);
3075 ++ if (nr == __NR_gettid)
3076 ++ change_syscall(_metadata, tracee, -1, 45000);
3077 + if (nr == __NR_openat)
3078 +- change_syscall(_metadata, tracee, -1);
3079 ++ change_syscall(_metadata, tracee, -1, -ESRCH);
3080 + }
3081 +
3082 + FIXTURE_DATA(TRACE_syscall) {
3083 +@@ -1748,8 +1764,10 @@ FIXTURE_SETUP(TRACE_syscall)
3084 + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
3085 + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
3086 + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
3087 +- BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
3088 ++ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
3089 + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
3090 ++ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
3091 ++ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
3092 + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3093 + };
3094 +
3095 +@@ -1797,15 +1815,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
3096 + EXPECT_NE(self->mypid, syscall(__NR_getpid));
3097 + }
3098 +
3099 +-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
3100 ++TEST_F(TRACE_syscall, ptrace_syscall_errno)
3101 ++{
3102 ++ /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
3103 ++ teardown_trace_fixture(_metadata, self->tracer);
3104 ++ self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
3105 ++ true);
3106 ++
3107 ++ /* Tracer should skip the open syscall, resulting in ESRCH. */
3108 ++ EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
3109 ++}
3110 ++
3111 ++TEST_F(TRACE_syscall, ptrace_syscall_faked)
3112 + {
3113 + /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
3114 + teardown_trace_fixture(_metadata, self->tracer);
3115 + self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
3116 + true);
3117 +
3118 +- /* Tracer should skip the open syscall, resulting in EPERM. */
3119 +- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
3120 ++ /* Tracer should skip the gettid syscall, resulting fake pid. */
3121 ++ EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
3122 + }
3123 +
3124 + TEST_F(TRACE_syscall, syscall_allowed)
3125 +@@ -1838,7 +1867,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
3126 + EXPECT_NE(self->mypid, syscall(__NR_getpid));
3127 + }
3128 +
3129 +-TEST_F(TRACE_syscall, syscall_dropped)
3130 ++TEST_F(TRACE_syscall, syscall_errno)
3131 ++{
3132 ++ long ret;
3133 ++
3134 ++ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3135 ++ ASSERT_EQ(0, ret);
3136 ++
3137 ++ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
3138 ++ ASSERT_EQ(0, ret);
3139 ++
3140 ++ /* openat has been skipped and an errno return. */
3141 ++ EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
3142 ++}
3143 ++
3144 ++TEST_F(TRACE_syscall, syscall_faked)
3145 + {
3146 + long ret;
3147 +
3148 +@@ -1849,8 +1892,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
3149 + ASSERT_EQ(0, ret);
3150 +
3151 + /* gettid has been skipped and an altered return value stored. */
3152 +- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
3153 +- EXPECT_NE(self->mytid, syscall(__NR_gettid));
3154 ++ EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
3155 + }
3156 +
3157 + TEST_F(TRACE_syscall, skip_after_RET_TRACE)