Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 07 Oct 2020 12:50:26
Message-Id: 1602075011.acb7e192c86b3b4470f10c8245b744d7947a1982.mpagano@gentoo
1 commit: acb7e192c86b3b4470f10c8245b744d7947a1982
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Oct 7 12:50:11 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Oct 7 12:50:11 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=acb7e192
7
8 Linux patch 4.19.150
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1149_linux-4.19.150.patch | 1698 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1702 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e7a8587..47aa030 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -635,6 +635,10 @@ Patch: 1148_linux-4.19.149.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.149
23
24 +Patch: 1149_linux-4.19.150.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.150
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1149_linux-4.19.150.patch b/1149_linux-4.19.150.patch
33 new file mode 100644
34 index 0000000..adccde0
35 --- /dev/null
36 +++ b/1149_linux-4.19.150.patch
37 @@ -0,0 +1,1698 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3ff5cf33ef55c..65485185bec29 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 149
47 ++SUBLEVEL = 150
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
52 +index 79e5cc70f1fdd..561e2573bd34c 100644
53 +--- a/arch/ia64/mm/init.c
54 ++++ b/arch/ia64/mm/init.c
55 +@@ -499,7 +499,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
56 + if (map_start < map_end)
57 + memmap_init_zone((unsigned long)(map_end - map_start),
58 + args->nid, args->zone, page_to_pfn(map_start),
59 +- MEMMAP_EARLY, NULL);
60 ++ MEMINIT_EARLY, NULL);
61 + return 0;
62 + }
63 +
64 +@@ -508,8 +508,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
65 + unsigned long start_pfn)
66 + {
67 + if (!vmem_map) {
68 +- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
69 +- NULL);
70 ++ memmap_init_zone(size, nid, zone, start_pfn,
71 ++ MEMINIT_EARLY, NULL);
72 + } else {
73 + struct page *start;
74 + struct memmap_init_callback_data args;
75 +diff --git a/drivers/base/node.c b/drivers/base/node.c
76 +index f3565c2dbc527..503e2f90e58ef 100644
77 +--- a/drivers/base/node.c
78 ++++ b/drivers/base/node.c
79 +@@ -403,10 +403,32 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
80 + return pfn_to_nid(pfn);
81 + }
82 +
83 ++static int do_register_memory_block_under_node(int nid,
84 ++ struct memory_block *mem_blk)
85 ++{
86 ++ int ret;
87 ++
88 ++ /*
89 ++ * If this memory block spans multiple nodes, we only indicate
90 ++ * the last processed node.
91 ++ */
92 ++ mem_blk->nid = nid;
93 ++
94 ++ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
95 ++ &mem_blk->dev.kobj,
96 ++ kobject_name(&mem_blk->dev.kobj));
97 ++ if (ret)
98 ++ return ret;
99 ++
100 ++ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
101 ++ &node_devices[nid]->dev.kobj,
102 ++ kobject_name(&node_devices[nid]->dev.kobj));
103 ++}
104 ++
105 + /* register memory section under specified node if it spans that node */
106 +-int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
107 ++int register_mem_block_under_node_early(struct memory_block *mem_blk, void *arg)
108 + {
109 +- int ret, nid = *(int *)arg;
110 ++ int nid = *(int *)arg;
111 + unsigned long pfn, sect_start_pfn, sect_end_pfn;
112 +
113 + sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
114 +@@ -426,38 +448,33 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
115 + }
116 +
117 + /*
118 +- * We need to check if page belongs to nid only for the boot
119 +- * case, during hotplug we know that all pages in the memory
120 +- * block belong to the same node.
121 +- */
122 +- if (system_state == SYSTEM_BOOTING) {
123 +- page_nid = get_nid_for_pfn(pfn);
124 +- if (page_nid < 0)
125 +- continue;
126 +- if (page_nid != nid)
127 +- continue;
128 +- }
129 +-
130 +- /*
131 +- * If this memory block spans multiple nodes, we only indicate
132 +- * the last processed node.
133 ++ * We need to check if page belongs to nid only at the boot
134 ++ * case because node's ranges can be interleaved.
135 + */
136 +- mem_blk->nid = nid;
137 +-
138 +- ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
139 +- &mem_blk->dev.kobj,
140 +- kobject_name(&mem_blk->dev.kobj));
141 +- if (ret)
142 +- return ret;
143 ++ page_nid = get_nid_for_pfn(pfn);
144 ++ if (page_nid < 0)
145 ++ continue;
146 ++ if (page_nid != nid)
147 ++ continue;
148 +
149 +- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
150 +- &node_devices[nid]->dev.kobj,
151 +- kobject_name(&node_devices[nid]->dev.kobj));
152 ++ return do_register_memory_block_under_node(nid, mem_blk);
153 + }
154 + /* mem section does not span the specified node */
155 + return 0;
156 + }
157 +
158 ++/*
159 ++ * During hotplug we know that all pages in the memory block belong to the same
160 ++ * node.
161 ++ */
162 ++static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
163 ++ void *arg)
164 ++{
165 ++ int nid = *(int *)arg;
166 ++
167 ++ return do_register_memory_block_under_node(nid, mem_blk);
168 ++}
169 ++
170 + /*
171 + * Unregister a memory block device under the node it spans. Memory blocks
172 + * with multiple nodes cannot be offlined and therefore also never be removed.
173 +@@ -473,10 +490,17 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
174 + kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
175 + }
176 +
177 +-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
178 ++int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
179 ++ enum meminit_context context)
180 + {
181 +- return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
182 +- register_mem_sect_under_node);
183 ++ walk_memory_blocks_func_t func;
184 ++
185 ++ if (context == MEMINIT_HOTPLUG)
186 ++ func = register_mem_block_under_node_hotplug;
187 ++ else
188 ++ func = register_mem_block_under_node_early;
189 ++
190 ++ return walk_memory_range(start_pfn, end_pfn, (void *)&nid, func);
191 + }
192 +
193 + #ifdef CONFIG_HUGETLBFS
194 +diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
195 +index 442309b569203..8086756e7f076 100644
196 +--- a/drivers/clk/samsung/clk-exynos4.c
197 ++++ b/drivers/clk/samsung/clk-exynos4.c
198 +@@ -1072,7 +1072,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
199 + GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
200 + GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
201 + GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
202 +- GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
203 ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
204 + GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
205 + CLK_IGNORE_UNUSED, 0),
206 + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
207 +@@ -1113,7 +1113,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
208 + 0),
209 + GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
210 + GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
211 +- GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
212 ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
213 + GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
214 + CLK_IGNORE_UNUSED, 0),
215 + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
216 +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
217 +index 5bed36e129516..7327e90735c89 100644
218 +--- a/drivers/clk/socfpga/clk-s10.c
219 ++++ b/drivers/clk/socfpga/clk-s10.c
220 +@@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
221 + { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
222 + 0, 0, 2, 0xB0, 1},
223 + { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
224 +- ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
225 ++ ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2},
226 + { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
227 + ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
228 + { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
229 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
230 +index 945bd13e5e791..cab324eb7df24 100644
231 +--- a/drivers/gpio/gpio-mockup.c
232 ++++ b/drivers/gpio/gpio-mockup.c
233 +@@ -367,6 +367,7 @@ static int __init gpio_mockup_init(void)
234 + err = platform_driver_register(&gpio_mockup_driver);
235 + if (err) {
236 + gpio_mockup_err("error registering platform driver\n");
237 ++ debugfs_remove_recursive(gpio_mockup_dbg_dir);
238 + return err;
239 + }
240 +
241 +@@ -386,6 +387,7 @@ static int __init gpio_mockup_init(void)
242 + gpio_mockup_err("error registering device");
243 + platform_driver_unregister(&gpio_mockup_driver);
244 + gpio_mockup_unregister_pdevs();
245 ++ debugfs_remove_recursive(gpio_mockup_dbg_dir);
246 + return PTR_ERR(pdev);
247 + }
248 +
249 +diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
250 +index 55072d2b367fa..4d53347adcafa 100644
251 +--- a/drivers/gpio/gpio-sprd.c
252 ++++ b/drivers/gpio/gpio-sprd.c
253 +@@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data,
254 + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
255 + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
256 + sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
257 ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
258 + irq_set_handler_locked(data, handle_edge_irq);
259 + break;
260 + case IRQ_TYPE_EDGE_FALLING:
261 + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
262 + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
263 + sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
264 ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
265 + irq_set_handler_locked(data, handle_edge_irq);
266 + break;
267 + case IRQ_TYPE_EDGE_BOTH:
268 + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
269 + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
270 ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
271 + irq_set_handler_locked(data, handle_edge_irq);
272 + break;
273 + case IRQ_TYPE_LEVEL_HIGH:
274 +diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
275 +index 91a8ef8e7f3fd..1436098b16149 100644
276 +--- a/drivers/gpio/gpio-tc3589x.c
277 ++++ b/drivers/gpio/gpio-tc3589x.c
278 +@@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
279 + continue;
280 +
281 + tc3589x_gpio->oldregs[i][j] = new;
282 +- tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new);
283 ++ tc3589x_reg_write(tc3589x, regmap[i] + j, new);
284 + }
285 + }
286 +
287 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
288 +index 049a1961c3fa5..5f85c9586cba1 100644
289 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
290 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
291 +@@ -290,7 +290,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
292 + take the current one */
293 + if (active && !adev->have_disp_power_ref) {
294 + adev->have_disp_power_ref = true;
295 +- goto out;
296 ++ return ret;
297 + }
298 + /* if we have no active crtcs, then drop the power ref
299 + we got before */
300 +diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
301 +index 71a798e5d5591..649b57e5e4b78 100644
302 +--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
303 ++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
304 +@@ -364,7 +364,7 @@ static struct regmap_config sun8i_mixer_regmap_config = {
305 + .reg_bits = 32,
306 + .val_bits = 32,
307 + .reg_stride = 4,
308 +- .max_register = 0xbfffc, /* guessed */
309 ++ .max_register = 0xffffc, /* guessed */
310 + };
311 +
312 + static int sun8i_mixer_of_get_id(struct device_node *node)
313 +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
314 +index 8a8ca945561b0..7eba874a981d3 100644
315 +--- a/drivers/i2c/busses/i2c-cpm.c
316 ++++ b/drivers/i2c/busses/i2c-cpm.c
317 +@@ -74,6 +74,9 @@ struct i2c_ram {
318 + char res1[4]; /* Reserved */
319 + ushort rpbase; /* Relocation pointer */
320 + char res2[2]; /* Reserved */
321 ++ /* The following elements are only for CPM2 */
322 ++ char res3[4]; /* Reserved */
323 ++ uint sdmatmp; /* Internal */
324 + };
325 +
326 + #define I2COM_START 0x80
327 +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
328 +index 31c16b68aa311..e468657854094 100644
329 +--- a/drivers/input/mouse/trackpoint.c
330 ++++ b/drivers/input/mouse/trackpoint.c
331 +@@ -285,6 +285,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse,
332 + case TP_VARIANT_ALPS:
333 + case TP_VARIANT_ELAN:
334 + case TP_VARIANT_NXP:
335 ++ case TP_VARIANT_JYT_SYNAPTICS:
336 ++ case TP_VARIANT_SYNAPTICS:
337 + if (variant_id)
338 + *variant_id = param[0];
339 + if (firmware_id)
340 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
341 +index 7c05e09abacf9..51bd2ebaa342c 100644
342 +--- a/drivers/input/serio/i8042-x86ia64io.h
343 ++++ b/drivers/input/serio/i8042-x86ia64io.h
344 +@@ -725,6 +725,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
345 + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
346 + },
347 + },
348 ++ {
349 ++ /* Acer Aspire 5 A515 */
350 ++ .matches = {
351 ++ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
352 ++ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
353 ++ },
354 ++ },
355 + { }
356 + };
357 +
358 +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
359 +index 1bd0cd7168dfc..4bf6049dd2c79 100644
360 +--- a/drivers/iommu/exynos-iommu.c
361 ++++ b/drivers/iommu/exynos-iommu.c
362 +@@ -1302,13 +1302,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
363 + return -ENODEV;
364 +
365 + data = platform_get_drvdata(sysmmu);
366 +- if (!data)
367 ++ if (!data) {
368 ++ put_device(&sysmmu->dev);
369 + return -ENODEV;
370 ++ }
371 +
372 + if (!owner) {
373 + owner = kzalloc(sizeof(*owner), GFP_KERNEL);
374 +- if (!owner)
375 ++ if (!owner) {
376 ++ put_device(&sysmmu->dev);
377 + return -ENOMEM;
378 ++ }
379 +
380 + INIT_LIST_HEAD(&owner->controllers);
381 + mutex_init(&owner->rpm_lock);
382 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
383 +index 35168b47afe6c..a411300f9d6dc 100644
384 +--- a/drivers/mmc/host/sdhci-pci-core.c
385 ++++ b/drivers/mmc/host/sdhci-pci-core.c
386 +@@ -739,7 +739,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
387 + static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
388 + {
389 + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
390 +- dmi_match(DMI_BIOS_VENDOR, "LENOVO");
391 ++ (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
392 ++ dmi_match(DMI_SYS_VENDOR, "IRBIS"));
393 + }
394 +
395 + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
396 +diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
397 +index 13430f75496cc..b312cd9bce169 100644
398 +--- a/drivers/net/ethernet/dec/tulip/de2104x.c
399 ++++ b/drivers/net/ethernet/dec/tulip/de2104x.c
400 +@@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
401 + #define DSL CONFIG_DE2104X_DSL
402 + #endif
403 +
404 +-#define DE_RX_RING_SIZE 64
405 ++#define DE_RX_RING_SIZE 128
406 + #define DE_TX_RING_SIZE 64
407 + #define DE_RING_BYTES \
408 + ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
409 +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
410 +index b807c91abe1da..a22ae3137a3f8 100644
411 +--- a/drivers/net/usb/rndis_host.c
412 ++++ b/drivers/net/usb/rndis_host.c
413 +@@ -213,7 +213,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
414 + dev_dbg(&info->control->dev,
415 + "rndis response error, code %d\n", retval);
416 + }
417 +- msleep(20);
418 ++ msleep(40);
419 + }
420 + dev_dbg(&info->control->dev, "rndis response timeout\n");
421 + return -ETIMEDOUT;
422 +diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
423 +index c169a26e5359a..2c6e3fa6947a0 100644
424 +--- a/drivers/net/wan/hdlc_cisco.c
425 ++++ b/drivers/net/wan/hdlc_cisco.c
426 +@@ -121,6 +121,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
427 + skb_put(skb, sizeof(struct cisco_packet));
428 + skb->priority = TC_PRIO_CONTROL;
429 + skb->dev = dev;
430 ++ skb->protocol = htons(ETH_P_HDLC);
431 + skb_reset_network_header(skb);
432 +
433 + dev_queue_xmit(skb);
434 +diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
435 +index 038236a9c60ee..03b5f5cce6f47 100644
436 +--- a/drivers/net/wan/hdlc_fr.c
437 ++++ b/drivers/net/wan/hdlc_fr.c
438 +@@ -436,6 +436,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
439 + if (pvc->state.fecn) /* TX Congestion counter */
440 + dev->stats.tx_compressed++;
441 + skb->dev = pvc->frad;
442 ++ skb->protocol = htons(ETH_P_HDLC);
443 ++ skb_reset_network_header(skb);
444 + dev_queue_xmit(skb);
445 + return NETDEV_TX_OK;
446 + }
447 +@@ -558,6 +560,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
448 + skb_put(skb, i);
449 + skb->priority = TC_PRIO_CONTROL;
450 + skb->dev = dev;
451 ++ skb->protocol = htons(ETH_P_HDLC);
452 + skb_reset_network_header(skb);
453 +
454 + dev_queue_xmit(skb);
455 +@@ -1044,7 +1047,7 @@ static void pvc_setup(struct net_device *dev)
456 + {
457 + dev->type = ARPHRD_DLCI;
458 + dev->flags = IFF_POINTOPOINT;
459 +- dev->hard_header_len = 10;
460 ++ dev->hard_header_len = 0;
461 + dev->addr_len = 2;
462 + netif_keep_dst(dev);
463 + }
464 +@@ -1096,6 +1099,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
465 + dev->mtu = HDLC_MAX_MTU;
466 + dev->min_mtu = 68;
467 + dev->max_mtu = HDLC_MAX_MTU;
468 ++ dev->needed_headroom = 10;
469 + dev->priv_flags |= IFF_NO_QUEUE;
470 + dev->ml_priv = pvc;
471 +
472 +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
473 +index 85844f26547dd..20d9b6585fba3 100644
474 +--- a/drivers/net/wan/hdlc_ppp.c
475 ++++ b/drivers/net/wan/hdlc_ppp.c
476 +@@ -254,6 +254,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
477 +
478 + skb->priority = TC_PRIO_CONTROL;
479 + skb->dev = dev;
480 ++ skb->protocol = htons(ETH_P_HDLC);
481 + skb_reset_network_header(skb);
482 + skb_queue_tail(&tx_queue, skb);
483 + }
484 +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
485 +index 15177a54b17d7..e5fc1b95cea6a 100644
486 +--- a/drivers/net/wan/lapbether.c
487 ++++ b/drivers/net/wan/lapbether.c
488 +@@ -201,8 +201,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
489 + struct net_device *dev;
490 + int size = skb->len;
491 +
492 +- skb->protocol = htons(ETH_P_X25);
493 +-
494 + ptr = skb_push(skb, 2);
495 +
496 + *ptr++ = size % 256;
497 +@@ -213,6 +211,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
498 +
499 + skb->dev = dev = lapbeth->ethdev;
500 +
501 ++ skb->protocol = htons(ETH_P_DEC);
502 ++
503 + skb_reset_network_header(skb);
504 +
505 + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
506 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
507 +index 33dad9774da01..9ea3d8e611005 100644
508 +--- a/drivers/nvme/host/core.c
509 ++++ b/drivers/nvme/host/core.c
510 +@@ -2605,10 +2605,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
511 + return -EWOULDBLOCK;
512 + }
513 +
514 ++ nvme_get_ctrl(ctrl);
515 ++ if (!try_module_get(ctrl->ops->module))
516 ++ return -EINVAL;
517 ++
518 + file->private_data = ctrl;
519 + return 0;
520 + }
521 +
522 ++static int nvme_dev_release(struct inode *inode, struct file *file)
523 ++{
524 ++ struct nvme_ctrl *ctrl =
525 ++ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
526 ++
527 ++ module_put(ctrl->ops->module);
528 ++ nvme_put_ctrl(ctrl);
529 ++ return 0;
530 ++}
531 ++
532 + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
533 + {
534 + struct nvme_ns *ns;
535 +@@ -2669,6 +2683,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
536 + static const struct file_operations nvme_dev_fops = {
537 + .owner = THIS_MODULE,
538 + .open = nvme_dev_open,
539 ++ .release = nvme_dev_release,
540 + .unlocked_ioctl = nvme_dev_ioctl,
541 + .compat_ioctl = nvme_dev_ioctl,
542 + };
543 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
544 +index 73db32f97abf3..ed88d50217724 100644
545 +--- a/drivers/nvme/host/fc.c
546 ++++ b/drivers/nvme/host/fc.c
547 +@@ -3294,12 +3294,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
548 + spin_lock_irqsave(&nvme_fc_lock, flags);
549 + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
550 + if (lport->localport.node_name != laddr.nn ||
551 +- lport->localport.port_name != laddr.pn)
552 ++ lport->localport.port_name != laddr.pn ||
553 ++ lport->localport.port_state != FC_OBJSTATE_ONLINE)
554 + continue;
555 +
556 + list_for_each_entry(rport, &lport->endp_list, endp_list) {
557 + if (rport->remoteport.node_name != raddr.nn ||
558 +- rport->remoteport.port_name != raddr.pn)
559 ++ rport->remoteport.port_name != raddr.pn ||
560 ++ rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
561 + continue;
562 +
563 + /* if fail to get reference fall through. Will error */
564 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
565 +index 43231fd065a18..1a9450ef932b5 100644
566 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
567 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
568 +@@ -418,7 +418,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
569 + MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
570 + MPP_MODE(15,
571 + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
572 +- MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)),
573 ++ MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)),
574 + MPP_MODE(16,
575 + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
576 + MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),
577 +diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
578 +index 1e8ff6256079f..b8dd75b8518b5 100644
579 +--- a/drivers/spi/spi-fsl-espi.c
580 ++++ b/drivers/spi/spi-fsl-espi.c
581 +@@ -559,13 +559,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
582 + static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
583 + {
584 + struct fsl_espi *espi = context_data;
585 +- u32 events;
586 ++ u32 events, mask;
587 +
588 + spin_lock(&espi->lock);
589 +
590 + /* Get interrupt events(tx/rx) */
591 + events = fsl_espi_read_reg(espi, ESPI_SPIE);
592 +- if (!events) {
593 ++ mask = fsl_espi_read_reg(espi, ESPI_SPIM);
594 ++ if (!(events & mask)) {
595 + spin_unlock(&espi->lock);
596 + return IRQ_NONE;
597 + }
598 +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
599 +index 8d8c81d430694..e2eefdd8bf786 100644
600 +--- a/drivers/usb/gadget/function/f_ncm.c
601 ++++ b/drivers/usb/gadget/function/f_ncm.c
602 +@@ -1192,7 +1192,6 @@ static int ncm_unwrap_ntb(struct gether *port,
603 + const struct ndp_parser_opts *opts = ncm->parser_opts;
604 + unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
605 + int dgram_counter;
606 +- bool ndp_after_header;
607 +
608 + /* dwSignature */
609 + if (get_unaligned_le32(tmp) != opts->nth_sign) {
610 +@@ -1219,7 +1218,6 @@ static int ncm_unwrap_ntb(struct gether *port,
611 + }
612 +
613 + ndp_index = get_ncm(&tmp, opts->ndp_index);
614 +- ndp_after_header = false;
615 +
616 + /* Run through all the NDP's in the NTB */
617 + do {
618 +@@ -1235,8 +1233,6 @@ static int ncm_unwrap_ntb(struct gether *port,
619 + ndp_index);
620 + goto err;
621 + }
622 +- if (ndp_index == opts->nth_size)
623 +- ndp_after_header = true;
624 +
625 + /*
626 + * walk through NDP
627 +@@ -1315,37 +1311,13 @@ static int ncm_unwrap_ntb(struct gether *port,
628 + index2 = get_ncm(&tmp, opts->dgram_item_len);
629 + dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
630 +
631 +- if (index2 == 0 || dg_len2 == 0)
632 +- break;
633 +-
634 + /* wDatagramIndex[1] */
635 +- if (ndp_after_header) {
636 +- if (index2 < opts->nth_size + opts->ndp_size) {
637 +- INFO(port->func.config->cdev,
638 +- "Bad index: %#X\n", index2);
639 +- goto err;
640 +- }
641 +- } else {
642 +- if (index2 < opts->nth_size + opts->dpe_size) {
643 +- INFO(port->func.config->cdev,
644 +- "Bad index: %#X\n", index2);
645 +- goto err;
646 +- }
647 +- }
648 + if (index2 > block_len - opts->dpe_size) {
649 + INFO(port->func.config->cdev,
650 + "Bad index: %#X\n", index2);
651 + goto err;
652 + }
653 +
654 +- /* wDatagramLength[1] */
655 +- if ((dg_len2 < 14 + crc_len) ||
656 +- (dg_len2 > frame_max)) {
657 +- INFO(port->func.config->cdev,
658 +- "Bad dgram length: %#X\n", dg_len);
659 +- goto err;
660 +- }
661 +-
662 + /*
663 + * Copy the data into a new skb.
664 + * This ensures the truesize is correct
665 +@@ -1362,6 +1334,8 @@ static int ncm_unwrap_ntb(struct gether *port,
666 + ndp_len -= 2 * (opts->dgram_item_len * 2);
667 +
668 + dgram_counter++;
669 ++ if (index2 == 0 || dg_len2 == 0)
670 ++ break;
671 + } while (ndp_len > 2 * (opts->dgram_item_len * 2));
672 + } while (ndp_index);
673 +
674 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
675 +index 7891bd40ebd82..6ee320259e4f7 100644
676 +--- a/drivers/vhost/vsock.c
677 ++++ b/drivers/vhost/vsock.c
678 +@@ -383,6 +383,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
679 + return val < vq->num;
680 + }
681 +
682 ++static struct virtio_transport vhost_transport = {
683 ++ .transport = {
684 ++ .get_local_cid = vhost_transport_get_local_cid,
685 ++
686 ++ .init = virtio_transport_do_socket_init,
687 ++ .destruct = virtio_transport_destruct,
688 ++ .release = virtio_transport_release,
689 ++ .connect = virtio_transport_connect,
690 ++ .shutdown = virtio_transport_shutdown,
691 ++ .cancel_pkt = vhost_transport_cancel_pkt,
692 ++
693 ++ .dgram_enqueue = virtio_transport_dgram_enqueue,
694 ++ .dgram_dequeue = virtio_transport_dgram_dequeue,
695 ++ .dgram_bind = virtio_transport_dgram_bind,
696 ++ .dgram_allow = virtio_transport_dgram_allow,
697 ++
698 ++ .stream_enqueue = virtio_transport_stream_enqueue,
699 ++ .stream_dequeue = virtio_transport_stream_dequeue,
700 ++ .stream_has_data = virtio_transport_stream_has_data,
701 ++ .stream_has_space = virtio_transport_stream_has_space,
702 ++ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
703 ++ .stream_is_active = virtio_transport_stream_is_active,
704 ++ .stream_allow = virtio_transport_stream_allow,
705 ++
706 ++ .notify_poll_in = virtio_transport_notify_poll_in,
707 ++ .notify_poll_out = virtio_transport_notify_poll_out,
708 ++ .notify_recv_init = virtio_transport_notify_recv_init,
709 ++ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
710 ++ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
711 ++ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
712 ++ .notify_send_init = virtio_transport_notify_send_init,
713 ++ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
714 ++ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
715 ++ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
716 ++
717 ++ .set_buffer_size = virtio_transport_set_buffer_size,
718 ++ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
719 ++ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
720 ++ .get_buffer_size = virtio_transport_get_buffer_size,
721 ++ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
722 ++ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
723 ++ },
724 ++
725 ++ .send_pkt = vhost_transport_send_pkt,
726 ++};
727 ++
728 + static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
729 + {
730 + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
731 +@@ -439,7 +485,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
732 + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
733 + le64_to_cpu(pkt->hdr.dst_cid) ==
734 + vhost_transport_get_local_cid())
735 +- virtio_transport_recv_pkt(pkt);
736 ++ virtio_transport_recv_pkt(&vhost_transport, pkt);
737 + else
738 + virtio_transport_free_pkt(pkt);
739 +
740 +@@ -792,52 +838,6 @@ static struct miscdevice vhost_vsock_misc = {
741 + .fops = &vhost_vsock_fops,
742 + };
743 +
744 +-static struct virtio_transport vhost_transport = {
745 +- .transport = {
746 +- .get_local_cid = vhost_transport_get_local_cid,
747 +-
748 +- .init = virtio_transport_do_socket_init,
749 +- .destruct = virtio_transport_destruct,
750 +- .release = virtio_transport_release,
751 +- .connect = virtio_transport_connect,
752 +- .shutdown = virtio_transport_shutdown,
753 +- .cancel_pkt = vhost_transport_cancel_pkt,
754 +-
755 +- .dgram_enqueue = virtio_transport_dgram_enqueue,
756 +- .dgram_dequeue = virtio_transport_dgram_dequeue,
757 +- .dgram_bind = virtio_transport_dgram_bind,
758 +- .dgram_allow = virtio_transport_dgram_allow,
759 +-
760 +- .stream_enqueue = virtio_transport_stream_enqueue,
761 +- .stream_dequeue = virtio_transport_stream_dequeue,
762 +- .stream_has_data = virtio_transport_stream_has_data,
763 +- .stream_has_space = virtio_transport_stream_has_space,
764 +- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
765 +- .stream_is_active = virtio_transport_stream_is_active,
766 +- .stream_allow = virtio_transport_stream_allow,
767 +-
768 +- .notify_poll_in = virtio_transport_notify_poll_in,
769 +- .notify_poll_out = virtio_transport_notify_poll_out,
770 +- .notify_recv_init = virtio_transport_notify_recv_init,
771 +- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
772 +- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
773 +- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
774 +- .notify_send_init = virtio_transport_notify_send_init,
775 +- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
776 +- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
777 +- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
778 +-
779 +- .set_buffer_size = virtio_transport_set_buffer_size,
780 +- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
781 +- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
782 +- .get_buffer_size = virtio_transport_get_buffer_size,
783 +- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
784 +- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
785 +- },
786 +-
787 +- .send_pkt = vhost_transport_send_pkt,
788 +-};
789 +-
790 + static int __init vhost_vsock_init(void)
791 + {
792 + int ret;
793 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
794 +index 61a52bb26d127..a4a32b79e8321 100644
795 +--- a/fs/eventpoll.c
796 ++++ b/fs/eventpoll.c
797 +@@ -222,8 +222,7 @@ struct eventpoll {
798 + struct file *file;
799 +
800 + /* used to optimize loop detection check */
801 +- int visited;
802 +- struct list_head visited_list_link;
803 ++ u64 gen;
804 +
805 + #ifdef CONFIG_NET_RX_BUSY_POLL
806 + /* used to track busy poll napi_id */
807 +@@ -273,6 +272,8 @@ static long max_user_watches __read_mostly;
808 + */
809 + static DEFINE_MUTEX(epmutex);
810 +
811 ++static u64 loop_check_gen = 0;
812 ++
813 + /* Used to check for epoll file descriptor inclusion loops */
814 + static struct nested_calls poll_loop_ncalls;
815 +
816 +@@ -282,9 +283,6 @@ static struct kmem_cache *epi_cache __read_mostly;
817 + /* Slab cache used to allocate "struct eppoll_entry" */
818 + static struct kmem_cache *pwq_cache __read_mostly;
819 +
820 +-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
821 +-static LIST_HEAD(visited_list);
822 +-
823 + /*
824 + * List of files with newly added links, where we may need to limit the number
825 + * of emanating paths. Protected by the epmutex.
826 +@@ -1378,7 +1376,7 @@ static int reverse_path_check(void)
827 +
828 + static int ep_create_wakeup_source(struct epitem *epi)
829 + {
830 +- const char *name;
831 ++ struct name_snapshot n;
832 + struct wakeup_source *ws;
833 +
834 + if (!epi->ep->ws) {
835 +@@ -1387,8 +1385,9 @@ static int ep_create_wakeup_source(struct epitem *epi)
836 + return -ENOMEM;
837 + }
838 +
839 +- name = epi->ffd.file->f_path.dentry->d_name.name;
840 +- ws = wakeup_source_register(name);
841 ++ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
842 ++ ws = wakeup_source_register(n.name);
843 ++ release_dentry_name_snapshot(&n);
844 +
845 + if (!ws)
846 + return -ENOMEM;
847 +@@ -1450,6 +1449,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
848 + RCU_INIT_POINTER(epi->ws, NULL);
849 + }
850 +
851 ++ /* Add the current item to the list of active epoll hook for this file */
852 ++ spin_lock(&tfile->f_lock);
853 ++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
854 ++ spin_unlock(&tfile->f_lock);
855 ++
856 ++ /*
857 ++ * Add the current item to the RB tree. All RB tree operations are
858 ++ * protected by "mtx", and ep_insert() is called with "mtx" held.
859 ++ */
860 ++ ep_rbtree_insert(ep, epi);
861 ++
862 ++ /* now check if we've created too many backpaths */
863 ++ error = -EINVAL;
864 ++ if (full_check && reverse_path_check())
865 ++ goto error_remove_epi;
866 ++
867 + /* Initialize the poll table using the queue callback */
868 + epq.epi = epi;
869 + init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
870 +@@ -1472,22 +1487,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
871 + if (epi->nwait < 0)
872 + goto error_unregister;
873 +
874 +- /* Add the current item to the list of active epoll hook for this file */
875 +- spin_lock(&tfile->f_lock);
876 +- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
877 +- spin_unlock(&tfile->f_lock);
878 +-
879 +- /*
880 +- * Add the current item to the RB tree. All RB tree operations are
881 +- * protected by "mtx", and ep_insert() is called with "mtx" held.
882 +- */
883 +- ep_rbtree_insert(ep, epi);
884 +-
885 +- /* now check if we've created too many backpaths */
886 +- error = -EINVAL;
887 +- if (full_check && reverse_path_check())
888 +- goto error_remove_epi;
889 +-
890 + /* We have to drop the new item inside our item list to keep track of it */
891 + spin_lock_irq(&ep->wq.lock);
892 +
893 +@@ -1516,6 +1515,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
894 +
895 + return 0;
896 +
897 ++error_unregister:
898 ++ ep_unregister_pollwait(ep, epi);
899 + error_remove_epi:
900 + spin_lock(&tfile->f_lock);
901 + list_del_rcu(&epi->fllink);
902 +@@ -1523,9 +1524,6 @@ error_remove_epi:
903 +
904 + rb_erase_cached(&epi->rbn, &ep->rbr);
905 +
906 +-error_unregister:
907 +- ep_unregister_pollwait(ep, epi);
908 +-
909 + /*
910 + * We need to do this because an event could have been arrived on some
911 + * allocated wait queue. Note that we don't care about the ep->ovflist
912 +@@ -1868,13 +1866,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
913 + struct epitem *epi;
914 +
915 + mutex_lock_nested(&ep->mtx, call_nests + 1);
916 +- ep->visited = 1;
917 +- list_add(&ep->visited_list_link, &visited_list);
918 ++ ep->gen = loop_check_gen;
919 + for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
920 + epi = rb_entry(rbp, struct epitem, rbn);
921 + if (unlikely(is_file_epoll(epi->ffd.file))) {
922 + ep_tovisit = epi->ffd.file->private_data;
923 +- if (ep_tovisit->visited)
924 ++ if (ep_tovisit->gen == loop_check_gen)
925 + continue;
926 + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
927 + ep_loop_check_proc, epi->ffd.file,
928 +@@ -1915,18 +1912,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
929 + */
930 + static int ep_loop_check(struct eventpoll *ep, struct file *file)
931 + {
932 +- int ret;
933 +- struct eventpoll *ep_cur, *ep_next;
934 +-
935 +- ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
936 ++ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
937 + ep_loop_check_proc, file, ep, current);
938 +- /* clear visited list */
939 +- list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
940 +- visited_list_link) {
941 +- ep_cur->visited = 0;
942 +- list_del(&ep_cur->visited_list_link);
943 +- }
944 +- return ret;
945 + }
946 +
947 + static void clear_tfile_check_list(void)
948 +@@ -2088,6 +2075,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
949 + mutex_lock_nested(&ep->mtx, 0);
950 + if (op == EPOLL_CTL_ADD) {
951 + if (!list_empty(&f.file->f_ep_links) ||
952 ++ ep->gen == loop_check_gen ||
953 + is_file_epoll(tf.file)) {
954 + full_check = 1;
955 + mutex_unlock(&ep->mtx);
956 +@@ -2148,6 +2136,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
957 + error_tgt_fput:
958 + if (full_check) {
959 + clear_tfile_check_list();
960 ++ loop_check_gen++;
961 + mutex_unlock(&epmutex);
962 + }
963 +
964 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
965 +index 4ae726e70d873..733fd9e4f0a15 100644
966 +--- a/fs/nfs/dir.c
967 ++++ b/fs/nfs/dir.c
968 +@@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
969 + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
970 +
971 + do {
972 ++ if (entry->label)
973 ++ entry->label->len = NFS4_MAXLABELLEN;
974 ++
975 + status = xdr_decode(desc, entry, &stream);
976 + if (status != 0) {
977 + if (status == -EAGAIN)
978 +diff --git a/include/linux/mm.h b/include/linux/mm.h
979 +index 05bc5f25ab85c..83828c118b6b7 100644
980 +--- a/include/linux/mm.h
981 ++++ b/include/linux/mm.h
982 +@@ -2179,7 +2179,7 @@ static inline void zero_resv_unavail(void) {}
983 +
984 + extern void set_dma_reserve(unsigned long new_dma_reserve);
985 + extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
986 +- enum memmap_context, struct vmem_altmap *);
987 ++ enum meminit_context, struct vmem_altmap *);
988 + extern void setup_per_zone_wmarks(void);
989 + extern int __meminit init_per_zone_wmark_min(void);
990 + extern void mem_init(void);
991 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
992 +index fdd93a39f1fa1..fa02014eba8ea 100644
993 +--- a/include/linux/mmzone.h
994 ++++ b/include/linux/mmzone.h
995 +@@ -759,10 +759,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
996 + unsigned int alloc_flags);
997 + bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
998 + unsigned long mark, int classzone_idx);
999 +-enum memmap_context {
1000 +- MEMMAP_EARLY,
1001 +- MEMMAP_HOTPLUG,
1002 ++/*
1003 ++ * Memory initialization context, use to differentiate memory added by
1004 ++ * the platform statically or via memory hotplug interface.
1005 ++ */
1006 ++enum meminit_context {
1007 ++ MEMINIT_EARLY,
1008 ++ MEMINIT_HOTPLUG,
1009 + };
1010 ++
1011 + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
1012 + unsigned long size);
1013 +
1014 +diff --git a/include/linux/node.h b/include/linux/node.h
1015 +index 708939bae9aa8..a79ec4492650c 100644
1016 +--- a/include/linux/node.h
1017 ++++ b/include/linux/node.h
1018 +@@ -32,11 +32,13 @@ extern struct node *node_devices[];
1019 + typedef void (*node_registration_func_t)(struct node *);
1020 +
1021 + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
1022 +-extern int link_mem_sections(int nid, unsigned long start_pfn,
1023 +- unsigned long end_pfn);
1024 ++int link_mem_sections(int nid, unsigned long start_pfn,
1025 ++ unsigned long end_pfn,
1026 ++ enum meminit_context context);
1027 + #else
1028 + static inline int link_mem_sections(int nid, unsigned long start_pfn,
1029 +- unsigned long end_pfn)
1030 ++ unsigned long end_pfn,
1031 ++ enum meminit_context context)
1032 + {
1033 + return 0;
1034 + }
1035 +@@ -61,7 +63,8 @@ static inline int register_one_node(int nid)
1036 + if (error)
1037 + return error;
1038 + /* link memory sections under this node */
1039 +- error = link_mem_sections(nid, start_pfn, end_pfn);
1040 ++ error = link_mem_sections(nid, start_pfn, end_pfn,
1041 ++ MEMINIT_EARLY);
1042 + }
1043 +
1044 + return error;
1045 +diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
1046 +index e223e2632eddd..8b8d13f01caee 100644
1047 +--- a/include/linux/virtio_vsock.h
1048 ++++ b/include/linux/virtio_vsock.h
1049 +@@ -149,7 +149,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
1050 +
1051 + void virtio_transport_destruct(struct vsock_sock *vsk);
1052 +
1053 +-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
1054 ++void virtio_transport_recv_pkt(struct virtio_transport *t,
1055 ++ struct virtio_vsock_pkt *pkt);
1056 + void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
1057 + void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
1058 + u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
1059 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1060 +index 70f7743c16729..992d48774c9e9 100644
1061 +--- a/kernel/trace/ftrace.c
1062 ++++ b/kernel/trace/ftrace.c
1063 +@@ -6370,16 +6370,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
1064 + {
1065 + int bit;
1066 +
1067 +- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
1068 +- return;
1069 +-
1070 + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
1071 + if (bit < 0)
1072 + return;
1073 +
1074 + preempt_disable_notrace();
1075 +
1076 +- op->func(ip, parent_ip, op, regs);
1077 ++ if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
1078 ++ op->func(ip, parent_ip, op, regs);
1079 +
1080 + preempt_enable_notrace();
1081 + trace_clear_recursion(bit);
1082 +diff --git a/lib/random32.c b/lib/random32.c
1083 +index 036de0c93e224..b6f3325e38e43 100644
1084 +--- a/lib/random32.c
1085 ++++ b/lib/random32.c
1086 +@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
1087 + }
1088 + #endif
1089 +
1090 +-DEFINE_PER_CPU(struct rnd_state, net_rand_state);
1091 ++DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
1092 +
1093 + /**
1094 + * prandom_u32_state - seeded pseudo-random number generator.
1095 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
1096 +index aae7ff4856711..e60e28131f679 100644
1097 +--- a/mm/memory_hotplug.c
1098 ++++ b/mm/memory_hotplug.c
1099 +@@ -733,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
1100 + * are reserved so nobody should be touching them so we should be safe
1101 + */
1102 + memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
1103 +- MEMMAP_HOTPLUG, altmap);
1104 ++ MEMINIT_HOTPLUG, altmap);
1105 +
1106 + set_zone_contiguous(zone);
1107 + }
1108 +@@ -1102,7 +1102,8 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
1109 + }
1110 +
1111 + /* link memory sections under this node.*/
1112 +- ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
1113 ++ ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
1114 ++ MEMINIT_HOTPLUG);
1115 + BUG_ON(ret);
1116 +
1117 + /* create new memmap entry */
1118 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1119 +index 5717ee66c8b38..545800433dfba 100644
1120 +--- a/mm/page_alloc.c
1121 ++++ b/mm/page_alloc.c
1122 +@@ -5480,7 +5480,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
1123 + * done. Non-atomic initialization, single-pass.
1124 + */
1125 + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1126 +- unsigned long start_pfn, enum memmap_context context,
1127 ++ unsigned long start_pfn, enum meminit_context context,
1128 + struct vmem_altmap *altmap)
1129 + {
1130 + unsigned long end_pfn = start_pfn + size;
1131 +@@ -5507,7 +5507,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1132 + * There can be holes in boot-time mem_map[]s handed to this
1133 + * function. They do not exist on hotplugged memory.
1134 + */
1135 +- if (context != MEMMAP_EARLY)
1136 ++ if (context != MEMINIT_EARLY)
1137 + goto not_early;
1138 +
1139 + if (!early_pfn_valid(pfn))
1140 +@@ -5542,7 +5542,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1141 + not_early:
1142 + page = pfn_to_page(pfn);
1143 + __init_single_page(page, pfn, zone, nid);
1144 +- if (context == MEMMAP_HOTPLUG)
1145 ++ if (context == MEMINIT_HOTPLUG)
1146 + SetPageReserved(page);
1147 +
1148 + /*
1149 +@@ -5557,7 +5557,7 @@ not_early:
1150 + * check here not to call set_pageblock_migratetype() against
1151 + * pfn out of zone.
1152 + *
1153 +- * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
1154 ++ * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
1155 + * because this is done early in sparse_add_one_section
1156 + */
1157 + if (!(pfn & (pageblock_nr_pages - 1))) {
1158 +@@ -5578,7 +5578,8 @@ static void __meminit zone_init_free_lists(struct zone *zone)
1159 +
1160 + #ifndef __HAVE_ARCH_MEMMAP_INIT
1161 + #define memmap_init(size, nid, zone, start_pfn) \
1162 +- memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
1163 ++ memmap_init_zone((size), (nid), (zone), (start_pfn), \
1164 ++ MEMINIT_EARLY, NULL)
1165 + #endif
1166 +
1167 + static int zone_batchsize(struct zone *zone)
1168 +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
1169 +index 259325cbcc314..4d154efb80c88 100644
1170 +--- a/net/mac80211/vht.c
1171 ++++ b/net/mac80211/vht.c
1172 +@@ -170,10 +170,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1173 + /* take some capabilities as-is */
1174 + cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
1175 + vht_cap->cap = cap_info;
1176 +- vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
1177 +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
1178 +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
1179 +- IEEE80211_VHT_CAP_RXLDPC |
1180 ++ vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
1181 + IEEE80211_VHT_CAP_VHT_TXOP_PS |
1182 + IEEE80211_VHT_CAP_HTC_VHT |
1183 + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
1184 +@@ -182,6 +179,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1185 + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
1186 + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
1187 +
1188 ++ vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
1189 ++ own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
1190 ++
1191 + /* and some based on our own capabilities */
1192 + switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
1193 + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
1194 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
1195 +index 31fa94064a620..0b89609a6e9d6 100644
1196 +--- a/net/netfilter/nf_conntrack_netlink.c
1197 ++++ b/net/netfilter/nf_conntrack_netlink.c
1198 +@@ -1129,6 +1129,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
1199 + if (!tb[CTA_TUPLE_IP])
1200 + return -EINVAL;
1201 +
1202 ++ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
1203 ++ return -EOPNOTSUPP;
1204 + tuple->src.l3num = l3num;
1205 +
1206 + err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
1207 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1208 +index b3caf1eac6aff..16b745d254fea 100644
1209 +--- a/net/packet/af_packet.c
1210 ++++ b/net/packet/af_packet.c
1211 +@@ -2162,7 +2162,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1212 + int skb_len = skb->len;
1213 + unsigned int snaplen, res;
1214 + unsigned long status = TP_STATUS_USER;
1215 +- unsigned short macoff, netoff, hdrlen;
1216 ++ unsigned short macoff, hdrlen;
1217 ++ unsigned int netoff;
1218 + struct sk_buff *copy_skb = NULL;
1219 + struct timespec ts;
1220 + __u32 ts_status;
1221 +@@ -2225,6 +2226,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1222 + }
1223 + macoff = netoff - maclen;
1224 + }
1225 ++ if (netoff > USHRT_MAX) {
1226 ++ spin_lock(&sk->sk_receive_queue.lock);
1227 ++ po->stats.stats1.tp_drops++;
1228 ++ spin_unlock(&sk->sk_receive_queue.lock);
1229 ++ goto drop_n_restore;
1230 ++ }
1231 + if (po->tp_version <= TPACKET_V2) {
1232 + if (macoff + snaplen > po->rx_ring.frame_size) {
1233 + if (po->copy_thresh &&
1234 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
1235 +index 96ab344f17bbc..cc70d651d13e0 100644
1236 +--- a/net/vmw_vsock/virtio_transport.c
1237 ++++ b/net/vmw_vsock/virtio_transport.c
1238 +@@ -39,6 +39,7 @@ struct virtio_vsock {
1239 + * must be accessed with tx_lock held.
1240 + */
1241 + struct mutex tx_lock;
1242 ++ bool tx_run;
1243 +
1244 + struct work_struct send_pkt_work;
1245 + spinlock_t send_pkt_list_lock;
1246 +@@ -54,6 +55,7 @@ struct virtio_vsock {
1247 + * must be accessed with rx_lock held.
1248 + */
1249 + struct mutex rx_lock;
1250 ++ bool rx_run;
1251 + int rx_buf_nr;
1252 + int rx_buf_max_nr;
1253 +
1254 +@@ -61,46 +63,28 @@ struct virtio_vsock {
1255 + * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
1256 + */
1257 + struct mutex event_lock;
1258 ++ bool event_run;
1259 + struct virtio_vsock_event event_list[8];
1260 +
1261 + u32 guest_cid;
1262 + };
1263 +
1264 +-static struct virtio_vsock *virtio_vsock_get(void)
1265 +-{
1266 +- return the_virtio_vsock;
1267 +-}
1268 +-
1269 + static u32 virtio_transport_get_local_cid(void)
1270 + {
1271 +- struct virtio_vsock *vsock = virtio_vsock_get();
1272 +-
1273 +- if (!vsock)
1274 +- return VMADDR_CID_ANY;
1275 +-
1276 +- return vsock->guest_cid;
1277 +-}
1278 +-
1279 +-static void virtio_transport_loopback_work(struct work_struct *work)
1280 +-{
1281 +- struct virtio_vsock *vsock =
1282 +- container_of(work, struct virtio_vsock, loopback_work);
1283 +- LIST_HEAD(pkts);
1284 +-
1285 +- spin_lock_bh(&vsock->loopback_list_lock);
1286 +- list_splice_init(&vsock->loopback_list, &pkts);
1287 +- spin_unlock_bh(&vsock->loopback_list_lock);
1288 +-
1289 +- mutex_lock(&vsock->rx_lock);
1290 +- while (!list_empty(&pkts)) {
1291 +- struct virtio_vsock_pkt *pkt;
1292 +-
1293 +- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
1294 +- list_del_init(&pkt->list);
1295 ++ struct virtio_vsock *vsock;
1296 ++ u32 ret;
1297 +
1298 +- virtio_transport_recv_pkt(pkt);
1299 ++ rcu_read_lock();
1300 ++ vsock = rcu_dereference(the_virtio_vsock);
1301 ++ if (!vsock) {
1302 ++ ret = VMADDR_CID_ANY;
1303 ++ goto out_rcu;
1304 + }
1305 +- mutex_unlock(&vsock->rx_lock);
1306 ++
1307 ++ ret = vsock->guest_cid;
1308 ++out_rcu:
1309 ++ rcu_read_unlock();
1310 ++ return ret;
1311 + }
1312 +
1313 + static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
1314 +@@ -128,6 +112,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
1315 +
1316 + mutex_lock(&vsock->tx_lock);
1317 +
1318 ++ if (!vsock->tx_run)
1319 ++ goto out;
1320 ++
1321 + vq = vsock->vqs[VSOCK_VQ_TX];
1322 +
1323 + for (;;) {
1324 +@@ -186,6 +173,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
1325 + if (added)
1326 + virtqueue_kick(vq);
1327 +
1328 ++out:
1329 + mutex_unlock(&vsock->tx_lock);
1330 +
1331 + if (restart_rx)
1332 +@@ -198,14 +186,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
1333 + struct virtio_vsock *vsock;
1334 + int len = pkt->len;
1335 +
1336 +- vsock = virtio_vsock_get();
1337 ++ rcu_read_lock();
1338 ++ vsock = rcu_dereference(the_virtio_vsock);
1339 + if (!vsock) {
1340 + virtio_transport_free_pkt(pkt);
1341 +- return -ENODEV;
1342 ++ len = -ENODEV;
1343 ++ goto out_rcu;
1344 + }
1345 +
1346 +- if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
1347 +- return virtio_transport_send_pkt_loopback(vsock, pkt);
1348 ++ if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
1349 ++ len = virtio_transport_send_pkt_loopback(vsock, pkt);
1350 ++ goto out_rcu;
1351 ++ }
1352 +
1353 + if (pkt->reply)
1354 + atomic_inc(&vsock->queued_replies);
1355 +@@ -215,6 +207,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
1356 + spin_unlock_bh(&vsock->send_pkt_list_lock);
1357 +
1358 + queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
1359 ++
1360 ++out_rcu:
1361 ++ rcu_read_unlock();
1362 + return len;
1363 + }
1364 +
1365 +@@ -223,12 +218,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
1366 + {
1367 + struct virtio_vsock *vsock;
1368 + struct virtio_vsock_pkt *pkt, *n;
1369 +- int cnt = 0;
1370 ++ int cnt = 0, ret;
1371 + LIST_HEAD(freeme);
1372 +
1373 +- vsock = virtio_vsock_get();
1374 ++ rcu_read_lock();
1375 ++ vsock = rcu_dereference(the_virtio_vsock);
1376 + if (!vsock) {
1377 +- return -ENODEV;
1378 ++ ret = -ENODEV;
1379 ++ goto out_rcu;
1380 + }
1381 +
1382 + spin_lock_bh(&vsock->send_pkt_list_lock);
1383 +@@ -256,7 +253,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
1384 + queue_work(virtio_vsock_workqueue, &vsock->rx_work);
1385 + }
1386 +
1387 +- return 0;
1388 ++ ret = 0;
1389 ++
1390 ++out_rcu:
1391 ++ rcu_read_unlock();
1392 ++ return ret;
1393 + }
1394 +
1395 + static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
1396 +@@ -308,6 +309,10 @@ static void virtio_transport_tx_work(struct work_struct *work)
1397 +
1398 + vq = vsock->vqs[VSOCK_VQ_TX];
1399 + mutex_lock(&vsock->tx_lock);
1400 ++
1401 ++ if (!vsock->tx_run)
1402 ++ goto out;
1403 ++
1404 + do {
1405 + struct virtio_vsock_pkt *pkt;
1406 + unsigned int len;
1407 +@@ -318,6 +323,8 @@ static void virtio_transport_tx_work(struct work_struct *work)
1408 + added = true;
1409 + }
1410 + } while (!virtqueue_enable_cb(vq));
1411 ++
1412 ++out:
1413 + mutex_unlock(&vsock->tx_lock);
1414 +
1415 + if (added)
1416 +@@ -336,56 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
1417 + return val < virtqueue_get_vring_size(vq);
1418 + }
1419 +
1420 +-static void virtio_transport_rx_work(struct work_struct *work)
1421 +-{
1422 +- struct virtio_vsock *vsock =
1423 +- container_of(work, struct virtio_vsock, rx_work);
1424 +- struct virtqueue *vq;
1425 +-
1426 +- vq = vsock->vqs[VSOCK_VQ_RX];
1427 +-
1428 +- mutex_lock(&vsock->rx_lock);
1429 +-
1430 +- do {
1431 +- virtqueue_disable_cb(vq);
1432 +- for (;;) {
1433 +- struct virtio_vsock_pkt *pkt;
1434 +- unsigned int len;
1435 +-
1436 +- if (!virtio_transport_more_replies(vsock)) {
1437 +- /* Stop rx until the device processes already
1438 +- * pending replies. Leave rx virtqueue
1439 +- * callbacks disabled.
1440 +- */
1441 +- goto out;
1442 +- }
1443 +-
1444 +- pkt = virtqueue_get_buf(vq, &len);
1445 +- if (!pkt) {
1446 +- break;
1447 +- }
1448 +-
1449 +- vsock->rx_buf_nr--;
1450 +-
1451 +- /* Drop short/long packets */
1452 +- if (unlikely(len < sizeof(pkt->hdr) ||
1453 +- len > sizeof(pkt->hdr) + pkt->len)) {
1454 +- virtio_transport_free_pkt(pkt);
1455 +- continue;
1456 +- }
1457 +-
1458 +- pkt->len = len - sizeof(pkt->hdr);
1459 +- virtio_transport_deliver_tap_pkt(pkt);
1460 +- virtio_transport_recv_pkt(pkt);
1461 +- }
1462 +- } while (!virtqueue_enable_cb(vq));
1463 +-
1464 +-out:
1465 +- if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
1466 +- virtio_vsock_rx_fill(vsock);
1467 +- mutex_unlock(&vsock->rx_lock);
1468 +-}
1469 +-
1470 + /* event_lock must be held */
1471 + static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
1472 + struct virtio_vsock_event *event)
1473 +@@ -455,6 +412,9 @@ static void virtio_transport_event_work(struct work_struct *work)
1474 +
1475 + mutex_lock(&vsock->event_lock);
1476 +
1477 ++ if (!vsock->event_run)
1478 ++ goto out;
1479 ++
1480 + do {
1481 + struct virtio_vsock_event *event;
1482 + unsigned int len;
1483 +@@ -469,7 +429,7 @@ static void virtio_transport_event_work(struct work_struct *work)
1484 + } while (!virtqueue_enable_cb(vq));
1485 +
1486 + virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
1487 +-
1488 ++out:
1489 + mutex_unlock(&vsock->event_lock);
1490 + }
1491 +
1492 +@@ -546,6 +506,86 @@ static struct virtio_transport virtio_transport = {
1493 + .send_pkt = virtio_transport_send_pkt,
1494 + };
1495 +
1496 ++static void virtio_transport_loopback_work(struct work_struct *work)
1497 ++{
1498 ++ struct virtio_vsock *vsock =
1499 ++ container_of(work, struct virtio_vsock, loopback_work);
1500 ++ LIST_HEAD(pkts);
1501 ++
1502 ++ spin_lock_bh(&vsock->loopback_list_lock);
1503 ++ list_splice_init(&vsock->loopback_list, &pkts);
1504 ++ spin_unlock_bh(&vsock->loopback_list_lock);
1505 ++
1506 ++ mutex_lock(&vsock->rx_lock);
1507 ++
1508 ++ if (!vsock->rx_run)
1509 ++ goto out;
1510 ++
1511 ++ while (!list_empty(&pkts)) {
1512 ++ struct virtio_vsock_pkt *pkt;
1513 ++
1514 ++ pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
1515 ++ list_del_init(&pkt->list);
1516 ++
1517 ++ virtio_transport_recv_pkt(&virtio_transport, pkt);
1518 ++ }
1519 ++out:
1520 ++ mutex_unlock(&vsock->rx_lock);
1521 ++}
1522 ++
1523 ++static void virtio_transport_rx_work(struct work_struct *work)
1524 ++{
1525 ++ struct virtio_vsock *vsock =
1526 ++ container_of(work, struct virtio_vsock, rx_work);
1527 ++ struct virtqueue *vq;
1528 ++
1529 ++ vq = vsock->vqs[VSOCK_VQ_RX];
1530 ++
1531 ++ mutex_lock(&vsock->rx_lock);
1532 ++
1533 ++ if (!vsock->rx_run)
1534 ++ goto out;
1535 ++
1536 ++ do {
1537 ++ virtqueue_disable_cb(vq);
1538 ++ for (;;) {
1539 ++ struct virtio_vsock_pkt *pkt;
1540 ++ unsigned int len;
1541 ++
1542 ++ if (!virtio_transport_more_replies(vsock)) {
1543 ++ /* Stop rx until the device processes already
1544 ++ * pending replies. Leave rx virtqueue
1545 ++ * callbacks disabled.
1546 ++ */
1547 ++ goto out;
1548 ++ }
1549 ++
1550 ++ pkt = virtqueue_get_buf(vq, &len);
1551 ++ if (!pkt) {
1552 ++ break;
1553 ++ }
1554 ++
1555 ++ vsock->rx_buf_nr--;
1556 ++
1557 ++ /* Drop short/long packets */
1558 ++ if (unlikely(len < sizeof(pkt->hdr) ||
1559 ++ len > sizeof(pkt->hdr) + pkt->len)) {
1560 ++ virtio_transport_free_pkt(pkt);
1561 ++ continue;
1562 ++ }
1563 ++
1564 ++ pkt->len = len - sizeof(pkt->hdr);
1565 ++ virtio_transport_deliver_tap_pkt(pkt);
1566 ++ virtio_transport_recv_pkt(&virtio_transport, pkt);
1567 ++ }
1568 ++ } while (!virtqueue_enable_cb(vq));
1569 ++
1570 ++out:
1571 ++ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
1572 ++ virtio_vsock_rx_fill(vsock);
1573 ++ mutex_unlock(&vsock->rx_lock);
1574 ++}
1575 ++
1576 + static int virtio_vsock_probe(struct virtio_device *vdev)
1577 + {
1578 + vq_callback_t *callbacks[] = {
1579 +@@ -566,7 +606,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
1580 + return ret;
1581 +
1582 + /* Only one virtio-vsock device per guest is supported */
1583 +- if (the_virtio_vsock) {
1584 ++ if (rcu_dereference_protected(the_virtio_vsock,
1585 ++ lockdep_is_held(&the_virtio_vsock_mutex))) {
1586 + ret = -EBUSY;
1587 + goto out;
1588 + }
1589 +@@ -591,8 +632,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
1590 + vsock->rx_buf_max_nr = 0;
1591 + atomic_set(&vsock->queued_replies, 0);
1592 +
1593 +- vdev->priv = vsock;
1594 +- the_virtio_vsock = vsock;
1595 + mutex_init(&vsock->tx_lock);
1596 + mutex_init(&vsock->rx_lock);
1597 + mutex_init(&vsock->event_lock);
1598 +@@ -606,14 +645,23 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
1599 + INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
1600 + INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
1601 +
1602 ++ mutex_lock(&vsock->tx_lock);
1603 ++ vsock->tx_run = true;
1604 ++ mutex_unlock(&vsock->tx_lock);
1605 ++
1606 + mutex_lock(&vsock->rx_lock);
1607 + virtio_vsock_rx_fill(vsock);
1608 ++ vsock->rx_run = true;
1609 + mutex_unlock(&vsock->rx_lock);
1610 +
1611 + mutex_lock(&vsock->event_lock);
1612 + virtio_vsock_event_fill(vsock);
1613 ++ vsock->event_run = true;
1614 + mutex_unlock(&vsock->event_lock);
1615 +
1616 ++ vdev->priv = vsock;
1617 ++ rcu_assign_pointer(the_virtio_vsock, vsock);
1618 ++
1619 + mutex_unlock(&the_virtio_vsock_mutex);
1620 + return 0;
1621 +
1622 +@@ -628,6 +676,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
1623 + struct virtio_vsock *vsock = vdev->priv;
1624 + struct virtio_vsock_pkt *pkt;
1625 +
1626 ++ mutex_lock(&the_virtio_vsock_mutex);
1627 ++
1628 ++ vdev->priv = NULL;
1629 ++ rcu_assign_pointer(the_virtio_vsock, NULL);
1630 ++ synchronize_rcu();
1631 ++
1632 + flush_work(&vsock->loopback_work);
1633 + flush_work(&vsock->rx_work);
1634 + flush_work(&vsock->tx_work);
1635 +@@ -637,6 +691,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
1636 + /* Reset all connected sockets when the device disappear */
1637 + vsock_for_each_connected_socket(virtio_vsock_reset_sock);
1638 +
1639 ++ /* Stop all work handlers to make sure no one is accessing the device,
1640 ++ * so we can safely call vdev->config->reset().
1641 ++ */
1642 ++ mutex_lock(&vsock->rx_lock);
1643 ++ vsock->rx_run = false;
1644 ++ mutex_unlock(&vsock->rx_lock);
1645 ++
1646 ++ mutex_lock(&vsock->tx_lock);
1647 ++ vsock->tx_run = false;
1648 ++ mutex_unlock(&vsock->tx_lock);
1649 ++
1650 ++ mutex_lock(&vsock->event_lock);
1651 ++ vsock->event_run = false;
1652 ++ mutex_unlock(&vsock->event_lock);
1653 ++
1654 ++ /* Flush all device writes and interrupts, device will not use any
1655 ++ * more buffers.
1656 ++ */
1657 + vdev->config->reset(vdev);
1658 +
1659 + mutex_lock(&vsock->rx_lock);
1660 +@@ -667,12 +739,11 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
1661 + }
1662 + spin_unlock_bh(&vsock->loopback_list_lock);
1663 +
1664 +- mutex_lock(&the_virtio_vsock_mutex);
1665 +- the_virtio_vsock = NULL;
1666 +- mutex_unlock(&the_virtio_vsock_mutex);
1667 +-
1668 ++ /* Delete virtqueues and flush outstanding callbacks if any */
1669 + vdev->config->del_vqs(vdev);
1670 +
1671 ++ mutex_unlock(&the_virtio_vsock_mutex);
1672 ++
1673 + kfree(vsock);
1674 + }
1675 +
1676 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
1677 +index 52242a148c705..5f8a72d34d313 100644
1678 +--- a/net/vmw_vsock/virtio_transport_common.c
1679 ++++ b/net/vmw_vsock/virtio_transport_common.c
1680 +@@ -669,9 +669,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
1681 + /* Normally packets are associated with a socket. There may be no socket if an
1682 + * attempt was made to connect to a socket that does not exist.
1683 + */
1684 +-static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
1685 ++static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
1686 ++ struct virtio_vsock_pkt *pkt)
1687 + {
1688 +- const struct virtio_transport *t;
1689 + struct virtio_vsock_pkt *reply;
1690 + struct virtio_vsock_pkt_info info = {
1691 + .op = VIRTIO_VSOCK_OP_RST,
1692 +@@ -691,7 +691,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
1693 + if (!reply)
1694 + return -ENOMEM;
1695 +
1696 +- t = virtio_transport_get_ops();
1697 + if (!t) {
1698 + virtio_transport_free_pkt(reply);
1699 + return -ENOTCONN;
1700 +@@ -993,7 +992,8 @@ static bool virtio_transport_space_update(struct sock *sk,
1701 + /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
1702 + * lock.
1703 + */
1704 +-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
1705 ++void virtio_transport_recv_pkt(struct virtio_transport *t,
1706 ++ struct virtio_vsock_pkt *pkt)
1707 + {
1708 + struct sockaddr_vm src, dst;
1709 + struct vsock_sock *vsk;
1710 +@@ -1015,7 +1015,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
1711 + le32_to_cpu(pkt->hdr.fwd_cnt));
1712 +
1713 + if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
1714 +- (void)virtio_transport_reset_no_sock(pkt);
1715 ++ (void)virtio_transport_reset_no_sock(t, pkt);
1716 + goto free_pkt;
1717 + }
1718 +
1719 +@@ -1026,7 +1026,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
1720 + if (!sk) {
1721 + sk = vsock_find_bound_socket(&dst);
1722 + if (!sk) {
1723 +- (void)virtio_transport_reset_no_sock(pkt);
1724 ++ (void)virtio_transport_reset_no_sock(t, pkt);
1725 + goto free_pkt;
1726 + }
1727 + }
1728 +@@ -1060,6 +1060,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
1729 + virtio_transport_free_pkt(pkt);
1730 + break;
1731 + default:
1732 ++ (void)virtio_transport_reset_no_sock(t, pkt);
1733 + virtio_transport_free_pkt(pkt);
1734 + break;
1735 + }