Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 07 Oct 2020 12:48:54
Message-Id: 1602074917.299592e55430c8340e849ba1eeda4c3c9446b9a4.mpagano@gentoo
1 commit: 299592e55430c8340e849ba1eeda4c3c9446b9a4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Oct 7 12:48:37 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Oct 7 12:48:37 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=299592e5
7
8 Linux patch 5.4.70
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1069_linux-5.4.70.patch | 2429 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2433 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dd45626..f195c0d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -319,6 +319,10 @@ Patch: 1068_linux-5.4.69.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.69
23
24 +Patch: 1069_linux-5.4.70.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.70
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1069_linux-5.4.70.patch b/1069_linux-5.4.70.patch
33 new file mode 100644
34 index 0000000..eeb57d3
35 --- /dev/null
36 +++ b/1069_linux-5.4.70.patch
37 @@ -0,0 +1,2429 @@
38 +diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst
39 +index 5d63b18bd6d1f..60c45c916f7d0 100644
40 +--- a/Documentation/admin-guide/iostats.rst
41 ++++ b/Documentation/admin-guide/iostats.rst
42 +@@ -99,7 +99,7 @@ Field 10 -- # of milliseconds spent doing I/Os
43 +
44 + Since 5.0 this field counts jiffies when at least one request was
45 + started or completed. If request runs more than 2 jiffies then some
46 +- I/O time will not be accounted unless there are other requests.
47 ++ I/O time might be not accounted in case of concurrent requests.
48 +
49 + Field 11 -- weighted # of milliseconds spent doing I/Os
50 + This field is incremented at each I/O start, I/O completion, I/O
51 +@@ -133,6 +133,9 @@ are summed (possibly overflowing the unsigned long variable they are
52 + summed to) and the result given to the user. There is no convenient
53 + user interface for accessing the per-CPU counters themselves.
54 +
55 ++Since 4.19 request times are measured with nanoseconds precision and
56 ++truncated to milliseconds before showing in this interface.
57 ++
58 + Disks vs Partitions
59 + -------------------
60 +
61 +diff --git a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt
62 +index d4d83916c09dd..be329ea4794f8 100644
63 +--- a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt
64 ++++ b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt
65 +@@ -20,8 +20,9 @@ Required properties:
66 + - gpio-controller : Marks the device node as a GPIO controller
67 + - interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
68 + - interrupt-controller : Mark the GPIO controller as an interrupt-controller
69 +-- ngpios : number of GPIO lines, see gpio.txt
70 +- (should be multiple of 8, up to 80 pins)
71 ++- ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose
72 ++ 2 software GPIOs per hardware GPIO: one for hardware input, one for hardware
73 ++ output. Up to 80 pins, must be a multiple of 8.
74 + - clocks : A phandle to the APB clock for SGPM clock division
75 + - bus-frequency : SGPM CLK frequency
76 +
77 +diff --git a/Makefile b/Makefile
78 +index adf3847106775..e409fd909560f 100644
79 +--- a/Makefile
80 ++++ b/Makefile
81 +@@ -1,7 +1,7 @@
82 + # SPDX-License-Identifier: GPL-2.0
83 + VERSION = 5
84 + PATCHLEVEL = 4
85 +-SUBLEVEL = 69
86 ++SUBLEVEL = 70
87 + EXTRAVERSION =
88 + NAME = Kleptomaniac Octopus
89 +
90 +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
91 +index a6dd80a2c9392..ee50506d86f42 100644
92 +--- a/arch/ia64/mm/init.c
93 ++++ b/arch/ia64/mm/init.c
94 +@@ -518,7 +518,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
95 + if (map_start < map_end)
96 + memmap_init_zone((unsigned long)(map_end - map_start),
97 + args->nid, args->zone, page_to_pfn(map_start),
98 +- MEMMAP_EARLY, NULL);
99 ++ MEMINIT_EARLY, NULL);
100 + return 0;
101 + }
102 +
103 +@@ -527,8 +527,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
104 + unsigned long start_pfn)
105 + {
106 + if (!vmem_map) {
107 +- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
108 +- NULL);
109 ++ memmap_init_zone(size, nid, zone, start_pfn,
110 ++ MEMINIT_EARLY, NULL);
111 + } else {
112 + struct page *start;
113 + struct memmap_init_callback_data args;
114 +diff --git a/block/bio.c b/block/bio.c
115 +index f07739300dfe3..24704bc2ad6f1 100644
116 +--- a/block/bio.c
117 ++++ b/block/bio.c
118 +@@ -1754,14 +1754,14 @@ defer:
119 + schedule_work(&bio_dirty_work);
120 + }
121 +
122 +-void update_io_ticks(struct hd_struct *part, unsigned long now)
123 ++void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
124 + {
125 + unsigned long stamp;
126 + again:
127 + stamp = READ_ONCE(part->stamp);
128 + if (unlikely(stamp != now)) {
129 + if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
130 +- __part_stat_add(part, io_ticks, 1);
131 ++ __part_stat_add(part, io_ticks, end ? now - stamp : 1);
132 + }
133 + }
134 + if (part->partno) {
135 +@@ -1777,7 +1777,7 @@ void generic_start_io_acct(struct request_queue *q, int op,
136 +
137 + part_stat_lock();
138 +
139 +- update_io_ticks(part, jiffies);
140 ++ update_io_ticks(part, jiffies, false);
141 + part_stat_inc(part, ios[sgrp]);
142 + part_stat_add(part, sectors[sgrp], sectors);
143 + part_inc_in_flight(q, part, op_is_write(op));
144 +@@ -1795,7 +1795,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
145 +
146 + part_stat_lock();
147 +
148 +- update_io_ticks(part, now);
149 ++ update_io_ticks(part, now, true);
150 + part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
151 + part_stat_add(part, time_in_queue, duration);
152 + part_dec_in_flight(q, part, op_is_write(req_op));
153 +diff --git a/block/blk-core.c b/block/blk-core.c
154 +index ca6b677356864..81aafb601df06 100644
155 +--- a/block/blk-core.c
156 ++++ b/block/blk-core.c
157 +@@ -1334,7 +1334,7 @@ void blk_account_io_done(struct request *req, u64 now)
158 + part_stat_lock();
159 + part = req->part;
160 +
161 +- update_io_ticks(part, jiffies);
162 ++ update_io_ticks(part, jiffies, true);
163 + part_stat_inc(part, ios[sgrp]);
164 + part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
165 + part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
166 +@@ -1376,7 +1376,7 @@ void blk_account_io_start(struct request *rq, bool new_io)
167 + rq->part = part;
168 + }
169 +
170 +- update_io_ticks(part, jiffies);
171 ++ update_io_ticks(part, jiffies, false);
172 +
173 + part_stat_unlock();
174 + }
175 +diff --git a/drivers/base/node.c b/drivers/base/node.c
176 +index 296546ffed6c1..9c6e6a7b93545 100644
177 +--- a/drivers/base/node.c
178 ++++ b/drivers/base/node.c
179 +@@ -758,14 +758,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
180 + return pfn_to_nid(pfn);
181 + }
182 +
183 ++static int do_register_memory_block_under_node(int nid,
184 ++ struct memory_block *mem_blk)
185 ++{
186 ++ int ret;
187 ++
188 ++ /*
189 ++ * If this memory block spans multiple nodes, we only indicate
190 ++ * the last processed node.
191 ++ */
192 ++ mem_blk->nid = nid;
193 ++
194 ++ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
195 ++ &mem_blk->dev.kobj,
196 ++ kobject_name(&mem_blk->dev.kobj));
197 ++ if (ret)
198 ++ return ret;
199 ++
200 ++ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
201 ++ &node_devices[nid]->dev.kobj,
202 ++ kobject_name(&node_devices[nid]->dev.kobj));
203 ++}
204 ++
205 + /* register memory section under specified node if it spans that node */
206 +-static int register_mem_sect_under_node(struct memory_block *mem_blk,
207 +- void *arg)
208 ++static int register_mem_block_under_node_early(struct memory_block *mem_blk,
209 ++ void *arg)
210 + {
211 + unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
212 + unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
213 + unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
214 +- int ret, nid = *(int *)arg;
215 ++ int nid = *(int *)arg;
216 + unsigned long pfn;
217 +
218 + for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
219 +@@ -782,38 +804,33 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
220 + }
221 +
222 + /*
223 +- * We need to check if page belongs to nid only for the boot
224 +- * case, during hotplug we know that all pages in the memory
225 +- * block belong to the same node.
226 +- */
227 +- if (system_state == SYSTEM_BOOTING) {
228 +- page_nid = get_nid_for_pfn(pfn);
229 +- if (page_nid < 0)
230 +- continue;
231 +- if (page_nid != nid)
232 +- continue;
233 +- }
234 +-
235 +- /*
236 +- * If this memory block spans multiple nodes, we only indicate
237 +- * the last processed node.
238 ++ * We need to check if page belongs to nid only at the boot
239 ++ * case because node's ranges can be interleaved.
240 + */
241 +- mem_blk->nid = nid;
242 +-
243 +- ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
244 +- &mem_blk->dev.kobj,
245 +- kobject_name(&mem_blk->dev.kobj));
246 +- if (ret)
247 +- return ret;
248 ++ page_nid = get_nid_for_pfn(pfn);
249 ++ if (page_nid < 0)
250 ++ continue;
251 ++ if (page_nid != nid)
252 ++ continue;
253 +
254 +- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
255 +- &node_devices[nid]->dev.kobj,
256 +- kobject_name(&node_devices[nid]->dev.kobj));
257 ++ return do_register_memory_block_under_node(nid, mem_blk);
258 + }
259 + /* mem section does not span the specified node */
260 + return 0;
261 + }
262 +
263 ++/*
264 ++ * During hotplug we know that all pages in the memory block belong to the same
265 ++ * node.
266 ++ */
267 ++static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
268 ++ void *arg)
269 ++{
270 ++ int nid = *(int *)arg;
271 ++
272 ++ return do_register_memory_block_under_node(nid, mem_blk);
273 ++}
274 ++
275 + /*
276 + * Unregister a memory block device under the node it spans. Memory blocks
277 + * with multiple nodes cannot be offlined and therefore also never be removed.
278 +@@ -829,11 +846,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
279 + kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
280 + }
281 +
282 +-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
283 ++int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
284 ++ enum meminit_context context)
285 + {
286 ++ walk_memory_blocks_func_t func;
287 ++
288 ++ if (context == MEMINIT_HOTPLUG)
289 ++ func = register_mem_block_under_node_hotplug;
290 ++ else
291 ++ func = register_mem_block_under_node_early;
292 ++
293 + return walk_memory_blocks(PFN_PHYS(start_pfn),
294 + PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
295 +- register_mem_sect_under_node);
296 ++ func);
297 + }
298 +
299 + #ifdef CONFIG_HUGETLBFS
300 +diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
301 +index 51564fc23c639..f4086287bb71b 100644
302 +--- a/drivers/clk/samsung/clk-exynos4.c
303 ++++ b/drivers/clk/samsung/clk-exynos4.c
304 +@@ -927,7 +927,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
305 + GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
306 + GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
307 + GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
308 +- GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
309 ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
310 + GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
311 + CLK_IGNORE_UNUSED, 0),
312 + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
313 +@@ -969,7 +969,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
314 + 0),
315 + GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
316 + GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
317 +- GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
318 ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
319 + GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
320 + CLK_IGNORE_UNUSED, 0),
321 + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
322 +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
323 +index 993f3a73c71e7..55d3b505b08c9 100644
324 +--- a/drivers/clk/socfpga/clk-s10.c
325 ++++ b/drivers/clk/socfpga/clk-s10.c
326 +@@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
327 + { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
328 + 0, 0, 2, 0xB0, 1},
329 + { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
330 +- ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
331 ++ ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2},
332 + { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
333 + ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
334 + { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
335 +diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
336 +index 1583f5fc992f3..80f640d9ea71c 100644
337 +--- a/drivers/clk/tegra/clk-pll.c
338 ++++ b/drivers/clk/tegra/clk-pll.c
339 +@@ -1569,9 +1569,6 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
340 + unsigned long flags = 0;
341 + unsigned long input_rate;
342 +
343 +- if (clk_pll_is_enabled(hw))
344 +- return 0;
345 +-
346 + input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
347 +
348 + if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
349 +diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c
350 +index 80d0939d040b5..8d386adbe8009 100644
351 +--- a/drivers/clocksource/timer-gx6605s.c
352 ++++ b/drivers/clocksource/timer-gx6605s.c
353 +@@ -28,6 +28,7 @@ static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev)
354 + void __iomem *base = timer_of_base(to_timer_of(ce));
355 +
356 + writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS);
357 ++ writel_relaxed(0, base + TIMER_INI);
358 +
359 + ce->event_handler(ce);
360 +
361 +diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
362 +index 09e53c5f3b0a4..2820c59b5f071 100644
363 +--- a/drivers/gpio/gpio-aspeed.c
364 ++++ b/drivers/gpio/gpio-aspeed.c
365 +@@ -1115,8 +1115,8 @@ static const struct aspeed_gpio_config ast2500_config =
366 +
367 + static const struct aspeed_bank_props ast2600_bank_props[] = {
368 + /* input output */
369 +- {5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */
370 +- {6, 0xffff0000, 0x0fff0000}, /* Y/Z */
371 ++ {5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
372 ++ {6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
373 + { },
374 + };
375 +
376 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
377 +index 213aedc97dc2e..9c1c4d81aa7b6 100644
378 +--- a/drivers/gpio/gpio-mockup.c
379 ++++ b/drivers/gpio/gpio-mockup.c
380 +@@ -497,6 +497,7 @@ static int __init gpio_mockup_init(void)
381 + err = platform_driver_register(&gpio_mockup_driver);
382 + if (err) {
383 + gpio_mockup_err("error registering platform driver\n");
384 ++ debugfs_remove_recursive(gpio_mockup_dbg_dir);
385 + return err;
386 + }
387 +
388 +@@ -527,6 +528,7 @@ static int __init gpio_mockup_init(void)
389 + gpio_mockup_err("error registering device");
390 + platform_driver_unregister(&gpio_mockup_driver);
391 + gpio_mockup_unregister_pdevs();
392 ++ debugfs_remove_recursive(gpio_mockup_dbg_dir);
393 + return PTR_ERR(pdev);
394 + }
395 +
396 +diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
397 +index 006a7e6a75f21..7e70d2d06c3fe 100644
398 +--- a/drivers/gpio/gpio-siox.c
399 ++++ b/drivers/gpio/gpio-siox.c
400 +@@ -245,6 +245,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
401 + girq->chip = &ddata->ichip;
402 + girq->default_type = IRQ_TYPE_NONE;
403 + girq->handler = handle_level_irq;
404 ++ girq->threaded = true;
405 +
406 + ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL);
407 + if (ret)
408 +diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
409 +index d7314d39ab65b..36ea8a3bd4510 100644
410 +--- a/drivers/gpio/gpio-sprd.c
411 ++++ b/drivers/gpio/gpio-sprd.c
412 +@@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data,
413 + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
414 + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
415 + sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
416 ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
417 + irq_set_handler_locked(data, handle_edge_irq);
418 + break;
419 + case IRQ_TYPE_EDGE_FALLING:
420 + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
421 + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
422 + sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
423 ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
424 + irq_set_handler_locked(data, handle_edge_irq);
425 + break;
426 + case IRQ_TYPE_EDGE_BOTH:
427 + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
428 + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
429 ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
430 + irq_set_handler_locked(data, handle_edge_irq);
431 + break;
432 + case IRQ_TYPE_LEVEL_HIGH:
433 +diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
434 +index 75b1135b383a7..daf29044d0f19 100644
435 +--- a/drivers/gpio/gpio-tc3589x.c
436 ++++ b/drivers/gpio/gpio-tc3589x.c
437 +@@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
438 + continue;
439 +
440 + tc3589x_gpio->oldregs[i][j] = new;
441 +- tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new);
442 ++ tc3589x_reg_write(tc3589x, regmap[i] + j, new);
443 + }
444 + }
445 +
446 +diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c
447 +index 8319812593e31..3a5dfb8ded1fb 100644
448 +--- a/drivers/gpio/sgpio-aspeed.c
449 ++++ b/drivers/gpio/sgpio-aspeed.c
450 +@@ -17,7 +17,17 @@
451 + #include <linux/spinlock.h>
452 + #include <linux/string.h>
453 +
454 +-#define MAX_NR_SGPIO 80
455 ++/*
456 ++ * MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie,
457 ++ * slots within the clocked serial GPIO data). Since each HW GPIO is both an
458 ++ * input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip
459 ++ * device.
460 ++ *
461 ++ * We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and
462 ++ * outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET.
463 ++ */
464 ++#define MAX_NR_HW_SGPIO 80
465 ++#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO
466 +
467 + #define ASPEED_SGPIO_CTRL 0x54
468 +
469 +@@ -30,8 +40,8 @@ struct aspeed_sgpio {
470 + struct clk *pclk;
471 + spinlock_t lock;
472 + void __iomem *base;
473 +- uint32_t dir_in[3];
474 + int irq;
475 ++ int n_sgpio;
476 + };
477 +
478 + struct aspeed_sgpio_bank {
479 +@@ -111,31 +121,69 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
480 + }
481 + }
482 +
483 +-#define GPIO_BANK(x) ((x) >> 5)
484 +-#define GPIO_OFFSET(x) ((x) & 0x1f)
485 ++#define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5)
486 ++#define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f)
487 + #define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
488 +
489 + static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
490 + {
491 +- unsigned int bank = GPIO_BANK(offset);
492 ++ unsigned int bank;
493 ++
494 ++ bank = GPIO_BANK(offset);
495 +
496 + WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks));
497 + return &aspeed_sgpio_banks[bank];
498 + }
499 +
500 ++static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc,
501 ++ unsigned long *valid_mask, unsigned int ngpios)
502 ++{
503 ++ struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
504 ++ int n = sgpio->n_sgpio;
505 ++ int c = SGPIO_OUTPUT_OFFSET - n;
506 ++
507 ++ WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
508 ++
509 ++ /* input GPIOs in the lower range */
510 ++ bitmap_set(valid_mask, 0, n);
511 ++ bitmap_clear(valid_mask, n, c);
512 ++
513 ++ /* output GPIOS above SGPIO_OUTPUT_OFFSET */
514 ++ bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n);
515 ++ bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c);
516 ++
517 ++ return 0;
518 ++}
519 ++
520 ++static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc,
521 ++ unsigned long *valid_mask, unsigned int ngpios)
522 ++{
523 ++ struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
524 ++ int n = sgpio->n_sgpio;
525 ++
526 ++ WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
527 ++
528 ++ /* input GPIOs in the lower range */
529 ++ bitmap_set(valid_mask, 0, n);
530 ++ bitmap_clear(valid_mask, n, ngpios - n);
531 ++}
532 ++
533 ++static bool aspeed_sgpio_is_input(unsigned int offset)
534 ++{
535 ++ return offset < SGPIO_OUTPUT_OFFSET;
536 ++}
537 ++
538 + static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
539 + {
540 + struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
541 + const struct aspeed_sgpio_bank *bank = to_bank(offset);
542 + unsigned long flags;
543 + enum aspeed_sgpio_reg reg;
544 +- bool is_input;
545 + int rc = 0;
546 +
547 + spin_lock_irqsave(&gpio->lock, flags);
548 +
549 +- is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
550 +- reg = is_input ? reg_val : reg_rdata;
551 ++ reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
552 + rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
553 +
554 + spin_unlock_irqrestore(&gpio->lock, flags);
555 +@@ -143,22 +191,31 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
556 + return rc;
557 + }
558 +
559 +-static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
560 ++static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
561 + {
562 + struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
563 + const struct aspeed_sgpio_bank *bank = to_bank(offset);
564 +- void __iomem *addr;
565 ++ void __iomem *addr_r, *addr_w;
566 + u32 reg = 0;
567 +
568 +- addr = bank_reg(gpio, bank, reg_val);
569 +- reg = ioread32(addr);
570 ++ if (aspeed_sgpio_is_input(offset))
571 ++ return -EINVAL;
572 ++
573 ++ /* Since this is an output, read the cached value from rdata, then
574 ++ * update val. */
575 ++ addr_r = bank_reg(gpio, bank, reg_rdata);
576 ++ addr_w = bank_reg(gpio, bank, reg_val);
577 ++
578 ++ reg = ioread32(addr_r);
579 +
580 + if (val)
581 + reg |= GPIO_BIT(offset);
582 + else
583 + reg &= ~GPIO_BIT(offset);
584 +
585 +- iowrite32(reg, addr);
586 ++ iowrite32(reg, addr_w);
587 ++
588 ++ return 0;
589 + }
590 +
591 + static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
592 +@@ -175,43 +232,28 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
593 +
594 + static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
595 + {
596 +- struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
597 +- unsigned long flags;
598 +-
599 +- spin_lock_irqsave(&gpio->lock, flags);
600 +- gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset);
601 +- spin_unlock_irqrestore(&gpio->lock, flags);
602 +-
603 +- return 0;
604 ++ return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL;
605 + }
606 +
607 + static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
608 + {
609 + struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
610 + unsigned long flags;
611 ++ int rc;
612 +
613 +- spin_lock_irqsave(&gpio->lock, flags);
614 +-
615 +- gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset);
616 +- sgpio_set_value(gc, offset, val);
617 ++ /* No special action is required for setting the direction; we'll
618 ++ * error-out in sgpio_set_value if this isn't an output GPIO */
619 +
620 ++ spin_lock_irqsave(&gpio->lock, flags);
621 ++ rc = sgpio_set_value(gc, offset, val);
622 + spin_unlock_irqrestore(&gpio->lock, flags);
623 +
624 +- return 0;
625 ++ return rc;
626 + }
627 +
628 + static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
629 + {
630 +- int dir_status;
631 +- struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
632 +- unsigned long flags;
633 +-
634 +- spin_lock_irqsave(&gpio->lock, flags);
635 +- dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
636 +- spin_unlock_irqrestore(&gpio->lock, flags);
637 +-
638 +- return dir_status;
639 +-
640 ++ return !!aspeed_sgpio_is_input(offset);
641 + }
642 +
643 + static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
644 +@@ -402,6 +444,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
645 +
646 + irq = &gpio->chip.irq;
647 + irq->chip = &aspeed_sgpio_irqchip;
648 ++ irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
649 + irq->handler = handle_bad_irq;
650 + irq->default_type = IRQ_TYPE_NONE;
651 + irq->parent_handler = aspeed_sgpio_irq_handler;
652 +@@ -409,17 +452,15 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
653 + irq->parents = &gpio->irq;
654 + irq->num_parents = 1;
655 +
656 +- /* set IRQ settings and Enable Interrupt */
657 ++ /* Apply default IRQ settings */
658 + for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
659 + bank = &aspeed_sgpio_banks[i];
660 + /* set falling or level-low irq */
661 + iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
662 + /* trigger type is edge */
663 + iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
664 +- /* dual edge trigger mode. */
665 +- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2));
666 +- /* enable irq */
667 +- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable));
668 ++ /* single edge trigger */
669 ++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
670 + }
671 +
672 + return 0;
673 +@@ -452,11 +493,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
674 + if (rc < 0) {
675 + dev_err(&pdev->dev, "Could not read ngpios property\n");
676 + return -EINVAL;
677 +- } else if (nr_gpios > MAX_NR_SGPIO) {
678 ++ } else if (nr_gpios > MAX_NR_HW_SGPIO) {
679 + dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
680 +- MAX_NR_SGPIO, nr_gpios);
681 ++ MAX_NR_HW_SGPIO, nr_gpios);
682 + return -EINVAL;
683 + }
684 ++ gpio->n_sgpio = nr_gpios;
685 +
686 + rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
687 + if (rc < 0) {
688 +@@ -497,7 +539,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
689 + spin_lock_init(&gpio->lock);
690 +
691 + gpio->chip.parent = &pdev->dev;
692 +- gpio->chip.ngpio = nr_gpios;
693 ++ gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2;
694 ++ gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask;
695 + gpio->chip.direction_input = aspeed_sgpio_dir_in;
696 + gpio->chip.direction_output = aspeed_sgpio_dir_out;
697 + gpio->chip.get_direction = aspeed_sgpio_get_direction;
698 +@@ -509,9 +552,6 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
699 + gpio->chip.label = dev_name(&pdev->dev);
700 + gpio->chip.base = -1;
701 +
702 +- /* set all SGPIO pins as input (1). */
703 +- memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in));
704 +-
705 + aspeed_sgpio_setup_irqs(gpio, pdev);
706 +
707 + rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
708 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
709 +index e0aed42d9cbda..b588e0e409e72 100644
710 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
711 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
712 +@@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
713 + take the current one */
714 + if (active && !adev->have_disp_power_ref) {
715 + adev->have_disp_power_ref = true;
716 +- goto out;
717 ++ return ret;
718 + }
719 + /* if we have no active crtcs, then drop the power ref
720 + we got before */
721 +diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
722 +index 18b4881f44814..12b99ba575017 100644
723 +--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
724 ++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
725 +@@ -396,7 +396,7 @@ static struct regmap_config sun8i_mixer_regmap_config = {
726 + .reg_bits = 32,
727 + .val_bits = 32,
728 + .reg_stride = 4,
729 +- .max_register = 0xbfffc, /* guessed */
730 ++ .max_register = 0xffffc, /* guessed */
731 + };
732 +
733 + static int sun8i_mixer_of_get_id(struct device_node *node)
734 +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
735 +index 1213e1932ccb5..24d584a1c9a78 100644
736 +--- a/drivers/i2c/busses/i2c-cpm.c
737 ++++ b/drivers/i2c/busses/i2c-cpm.c
738 +@@ -65,6 +65,9 @@ struct i2c_ram {
739 + char res1[4]; /* Reserved */
740 + ushort rpbase; /* Relocation pointer */
741 + char res2[2]; /* Reserved */
742 ++ /* The following elements are only for CPM2 */
743 ++ char res3[4]; /* Reserved */
744 ++ uint sdmatmp; /* Internal */
745 + };
746 +
747 + #define I2COM_START 0x80
748 +diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
749 +index 21fdcde77883f..56e7696aa3c0f 100644
750 +--- a/drivers/iio/adc/qcom-spmi-adc5.c
751 ++++ b/drivers/iio/adc/qcom-spmi-adc5.c
752 +@@ -786,7 +786,7 @@ static int adc5_probe(struct platform_device *pdev)
753 +
754 + static struct platform_driver adc5_driver = {
755 + .driver = {
756 +- .name = "qcom-spmi-adc5.c",
757 ++ .name = "qcom-spmi-adc5",
758 + .of_match_table = adc5_match_table,
759 + },
760 + .probe = adc5_probe,
761 +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
762 +index 854d5e7587241..ef2fa0905208d 100644
763 +--- a/drivers/input/mouse/trackpoint.c
764 ++++ b/drivers/input/mouse/trackpoint.c
765 +@@ -282,6 +282,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse,
766 + case TP_VARIANT_ALPS:
767 + case TP_VARIANT_ELAN:
768 + case TP_VARIANT_NXP:
769 ++ case TP_VARIANT_JYT_SYNAPTICS:
770 ++ case TP_VARIANT_SYNAPTICS:
771 + if (variant_id)
772 + *variant_id = param[0];
773 + if (firmware_id)
774 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
775 +index 42771b9b10a00..98f0c7729b754 100644
776 +--- a/drivers/input/serio/i8042-x86ia64io.h
777 ++++ b/drivers/input/serio/i8042-x86ia64io.h
778 +@@ -721,6 +721,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
779 + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
780 + },
781 + },
782 ++ {
783 ++ /* Acer Aspire 5 A515 */
784 ++ .matches = {
785 ++ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
786 ++ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
787 ++ },
788 ++ },
789 + { }
790 + };
791 +
792 +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
793 +index 9c94e16fb1277..55ed857f804f7 100644
794 +--- a/drivers/iommu/exynos-iommu.c
795 ++++ b/drivers/iommu/exynos-iommu.c
796 +@@ -1299,13 +1299,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
797 + return -ENODEV;
798 +
799 + data = platform_get_drvdata(sysmmu);
800 +- if (!data)
801 ++ if (!data) {
802 ++ put_device(&sysmmu->dev);
803 + return -ENODEV;
804 ++ }
805 +
806 + if (!owner) {
807 + owner = kzalloc(sizeof(*owner), GFP_KERNEL);
808 +- if (!owner)
809 ++ if (!owner) {
810 ++ put_device(&sysmmu->dev);
811 + return -ENOMEM;
812 ++ }
813 +
814 + INIT_LIST_HEAD(&owner->controllers);
815 + mutex_init(&owner->rpm_lock);
816 +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
817 +index 693ee73eb2912..ef03d6fafc5ce 100644
818 +--- a/drivers/memstick/core/memstick.c
819 ++++ b/drivers/memstick/core/memstick.c
820 +@@ -441,6 +441,9 @@ static void memstick_check(struct work_struct *work)
821 + } else if (host->card->stop)
822 + host->card->stop(host->card);
823 +
824 ++ if (host->removing)
825 ++ goto out_power_off;
826 ++
827 + card = memstick_alloc_card(host);
828 +
829 + if (!card) {
830 +@@ -545,6 +548,7 @@ EXPORT_SYMBOL(memstick_add_host);
831 + */
832 + void memstick_remove_host(struct memstick_host *host)
833 + {
834 ++ host->removing = 1;
835 + flush_workqueue(workqueue);
836 + mutex_lock(&host->lock);
837 + if (host->card)
838 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
839 +index 425aa898e797a..91d0cb08238cf 100644
840 +--- a/drivers/mmc/host/sdhci-pci-core.c
841 ++++ b/drivers/mmc/host/sdhci-pci-core.c
842 +@@ -798,7 +798,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
843 + static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
844 + {
845 + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
846 +- dmi_match(DMI_BIOS_VENDOR, "LENOVO");
847 ++ (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
848 ++ dmi_match(DMI_SYS_VENDOR, "IRBIS"));
849 + }
850 +
851 + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
852 +diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
853 +index f1a2da15dd0a6..b14d93da242f1 100644
854 +--- a/drivers/net/ethernet/dec/tulip/de2104x.c
855 ++++ b/drivers/net/ethernet/dec/tulip/de2104x.c
856 +@@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
857 + #define DSL CONFIG_DE2104X_DSL
858 + #endif
859 +
860 +-#define DE_RX_RING_SIZE 64
861 ++#define DE_RX_RING_SIZE 128
862 + #define DE_TX_RING_SIZE 64
863 + #define DE_RING_BYTES \
864 + ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
865 +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
866 +index bd9c07888ebb4..6fa7a009a24a4 100644
867 +--- a/drivers/net/usb/rndis_host.c
868 ++++ b/drivers/net/usb/rndis_host.c
869 +@@ -201,7 +201,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
870 + dev_dbg(&info->control->dev,
871 + "rndis response error, code %d\n", retval);
872 + }
873 +- msleep(20);
874 ++ msleep(40);
875 + }
876 + dev_dbg(&info->control->dev, "rndis response timeout\n");
877 + return -ETIMEDOUT;
878 +diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
879 +index cc33441af4691..50804d0473083 100644
880 +--- a/drivers/net/wan/hdlc_cisco.c
881 ++++ b/drivers/net/wan/hdlc_cisco.c
882 +@@ -118,6 +118,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
883 + skb_put(skb, sizeof(struct cisco_packet));
884 + skb->priority = TC_PRIO_CONTROL;
885 + skb->dev = dev;
886 ++ skb->protocol = htons(ETH_P_HDLC);
887 + skb_reset_network_header(skb);
888 +
889 + dev_queue_xmit(skb);
890 +diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
891 +index 9acad651ea1f6..d6cfd51613ed8 100644
892 +--- a/drivers/net/wan/hdlc_fr.c
893 ++++ b/drivers/net/wan/hdlc_fr.c
894 +@@ -433,6 +433,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
895 + if (pvc->state.fecn) /* TX Congestion counter */
896 + dev->stats.tx_compressed++;
897 + skb->dev = pvc->frad;
898 ++ skb->protocol = htons(ETH_P_HDLC);
899 ++ skb_reset_network_header(skb);
900 + dev_queue_xmit(skb);
901 + return NETDEV_TX_OK;
902 + }
903 +@@ -555,6 +557,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
904 + skb_put(skb, i);
905 + skb->priority = TC_PRIO_CONTROL;
906 + skb->dev = dev;
907 ++ skb->protocol = htons(ETH_P_HDLC);
908 + skb_reset_network_header(skb);
909 +
910 + dev_queue_xmit(skb);
911 +@@ -1041,7 +1044,7 @@ static void pvc_setup(struct net_device *dev)
912 + {
913 + dev->type = ARPHRD_DLCI;
914 + dev->flags = IFF_POINTOPOINT;
915 +- dev->hard_header_len = 10;
916 ++ dev->hard_header_len = 0;
917 + dev->addr_len = 2;
918 + netif_keep_dst(dev);
919 + }
920 +@@ -1093,6 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
921 + dev->mtu = HDLC_MAX_MTU;
922 + dev->min_mtu = 68;
923 + dev->max_mtu = HDLC_MAX_MTU;
924 ++ dev->needed_headroom = 10;
925 + dev->priv_flags |= IFF_NO_QUEUE;
926 + dev->ml_priv = pvc;
927 +
928 +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
929 +index 16f33d1ffbfb9..64f8556513369 100644
930 +--- a/drivers/net/wan/hdlc_ppp.c
931 ++++ b/drivers/net/wan/hdlc_ppp.c
932 +@@ -251,6 +251,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
933 +
934 + skb->priority = TC_PRIO_CONTROL;
935 + skb->dev = dev;
936 ++ skb->protocol = htons(ETH_P_HDLC);
937 + skb_reset_network_header(skb);
938 + skb_queue_tail(&tx_queue, skb);
939 + }
940 +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
941 +index 2cff914aada55..709e3de0f6af1 100644
942 +--- a/drivers/net/wan/lapbether.c
943 ++++ b/drivers/net/wan/lapbether.c
944 +@@ -198,8 +198,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
945 + struct net_device *dev;
946 + int size = skb->len;
947 +
948 +- skb->protocol = htons(ETH_P_X25);
949 +-
950 + ptr = skb_push(skb, 2);
951 +
952 + *ptr++ = size % 256;
953 +@@ -210,6 +208,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
954 +
955 + skb->dev = dev = lapbeth->ethdev;
956 +
957 ++ skb->protocol = htons(ETH_P_DEC);
958 ++
959 + skb_reset_network_header(skb);
960 +
961 + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
962 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
963 +index 2cd32901d95c7..207ed6d49ad7c 100644
964 +--- a/drivers/nvme/host/core.c
965 ++++ b/drivers/nvme/host/core.c
966 +@@ -630,7 +630,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
967 + }
968 +
969 + __rq_for_each_bio(bio, req) {
970 +- u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
971 ++ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
972 + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
973 +
974 + if (n < segments) {
975 +@@ -671,7 +671,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
976 + cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
977 + cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
978 + cmnd->write_zeroes.slba =
979 +- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
980 ++ cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
981 + cmnd->write_zeroes.length =
982 + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
983 + cmnd->write_zeroes.control = 0;
984 +@@ -695,7 +695,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
985 +
986 + cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
987 + cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
988 +- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
989 ++ cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
990 + cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
991 +
992 + if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
993 +@@ -1680,12 +1680,6 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
994 + }
995 + #endif /* CONFIG_BLK_DEV_INTEGRITY */
996 +
997 +-static void nvme_set_chunk_size(struct nvme_ns *ns)
998 +-{
999 +- u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
1000 +- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
1001 +-}
1002 +-
1003 + static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1004 + {
1005 + struct nvme_ctrl *ctrl = ns->ctrl;
1006 +@@ -1719,8 +1713,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1007 +
1008 + static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1009 + {
1010 +- u32 max_sectors;
1011 +- unsigned short bs = 1 << ns->lba_shift;
1012 ++ u64 max_blocks;
1013 +
1014 + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1015 + (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1016 +@@ -1736,11 +1729,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1017 + * nvme_init_identify() if available.
1018 + */
1019 + if (ns->ctrl->max_hw_sectors == UINT_MAX)
1020 +- max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
1021 ++ max_blocks = (u64)USHRT_MAX + 1;
1022 + else
1023 +- max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
1024 ++ max_blocks = ns->ctrl->max_hw_sectors + 1;
1025 +
1026 +- blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
1027 ++ blk_queue_max_write_zeroes_sectors(disk->queue,
1028 ++ nvme_lba_to_sect(ns, max_blocks));
1029 + }
1030 +
1031 + static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1032 +@@ -1774,7 +1768,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1033 + static void nvme_update_disk_info(struct gendisk *disk,
1034 + struct nvme_ns *ns, struct nvme_id_ns *id)
1035 + {
1036 +- sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
1037 ++ sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1038 + unsigned short bs = 1 << ns->lba_shift;
1039 + u32 atomic_bs, phys_bs, io_opt;
1040 +
1041 +@@ -1840,6 +1834,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
1042 + static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1043 + {
1044 + struct nvme_ns *ns = disk->private_data;
1045 ++ u32 iob;
1046 +
1047 + /*
1048 + * If identify namespace failed, use default 512 byte block size so
1049 +@@ -1848,7 +1843,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1050 + ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1051 + if (ns->lba_shift == 0)
1052 + ns->lba_shift = 9;
1053 +- ns->noiob = le16_to_cpu(id->noiob);
1054 ++
1055 ++ if ((ns->ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1056 ++ is_power_of_2(ns->ctrl->max_hw_sectors))
1057 ++ iob = ns->ctrl->max_hw_sectors;
1058 ++ else
1059 ++ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1060 ++
1061 + ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1062 + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1063 + /* the PI implementation requires metadata equal t10 pi tuple size */
1064 +@@ -1857,8 +1858,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1065 + else
1066 + ns->pi_type = 0;
1067 +
1068 +- if (ns->noiob)
1069 +- nvme_set_chunk_size(ns);
1070 ++ if (iob)
1071 ++ blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
1072 + nvme_update_disk_info(disk, ns, id);
1073 + #ifdef CONFIG_NVME_MULTIPATH
1074 + if (ns->head->disk) {
1075 +@@ -2209,9 +2210,6 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1076 + blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1077 + blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1078 + }
1079 +- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1080 +- is_power_of_2(ctrl->max_hw_sectors))
1081 +- blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1082 + blk_queue_virt_boundary(q, ctrl->page_size - 1);
1083 + if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1084 + vwc = true;
1085 +@@ -2933,10 +2931,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
1086 + return -EWOULDBLOCK;
1087 + }
1088 +
1089 ++ nvme_get_ctrl(ctrl);
1090 ++ if (!try_module_get(ctrl->ops->module))
1091 ++ return -EINVAL;
1092 ++
1093 + file->private_data = ctrl;
1094 + return 0;
1095 + }
1096 +
1097 ++static int nvme_dev_release(struct inode *inode, struct file *file)
1098 ++{
1099 ++ struct nvme_ctrl *ctrl =
1100 ++ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
1101 ++
1102 ++ module_put(ctrl->ops->module);
1103 ++ nvme_put_ctrl(ctrl);
1104 ++ return 0;
1105 ++}
1106 ++
1107 + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
1108 + {
1109 + struct nvme_ns *ns;
1110 +@@ -2999,6 +3011,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
1111 + static const struct file_operations nvme_dev_fops = {
1112 + .owner = THIS_MODULE,
1113 + .open = nvme_dev_open,
1114 ++ .release = nvme_dev_release,
1115 + .unlocked_ioctl = nvme_dev_ioctl,
1116 + .compat_ioctl = nvme_dev_ioctl,
1117 + };
1118 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
1119 +index da801a14cd13d..65b3dc9cd693b 100644
1120 +--- a/drivers/nvme/host/fc.c
1121 ++++ b/drivers/nvme/host/fc.c
1122 +@@ -3319,12 +3319,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
1123 + spin_lock_irqsave(&nvme_fc_lock, flags);
1124 + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
1125 + if (lport->localport.node_name != laddr.nn ||
1126 +- lport->localport.port_name != laddr.pn)
1127 ++ lport->localport.port_name != laddr.pn ||
1128 ++ lport->localport.port_state != FC_OBJSTATE_ONLINE)
1129 + continue;
1130 +
1131 + list_for_each_entry(rport, &lport->endp_list, endp_list) {
1132 + if (rport->remoteport.node_name != raddr.nn ||
1133 +- rport->remoteport.port_name != raddr.pn)
1134 ++ rport->remoteport.port_name != raddr.pn ||
1135 ++ rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1136 + continue;
1137 +
1138 + /* if fail to get reference fall through. Will error */
1139 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1140 +index b7117fb09dd0f..d7132d8cb7c5d 100644
1141 +--- a/drivers/nvme/host/nvme.h
1142 ++++ b/drivers/nvme/host/nvme.h
1143 +@@ -384,7 +384,6 @@ struct nvme_ns {
1144 + #define NVME_NS_REMOVING 0
1145 + #define NVME_NS_DEAD 1
1146 + #define NVME_NS_ANA_PENDING 2
1147 +- u16 noiob;
1148 +
1149 + struct nvme_fault_inject fault_inject;
1150 +
1151 +@@ -429,9 +428,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
1152 + return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
1153 + }
1154 +
1155 +-static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
1156 ++/*
1157 ++ * Convert a 512B sector number to a device logical block number.
1158 ++ */
1159 ++static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
1160 ++{
1161 ++ return sector >> (ns->lba_shift - SECTOR_SHIFT);
1162 ++}
1163 ++
1164 ++/*
1165 ++ * Convert a device logical block number to a 512B sector number.
1166 ++ */
1167 ++static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
1168 + {
1169 +- return (sector >> (ns->lba_shift - 9));
1170 ++ return lba << (ns->lba_shift - SECTOR_SHIFT);
1171 + }
1172 +
1173 + static inline void nvme_end_request(struct request *req, __le16 status,
1174 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1175 +index 75f26d2ec6429..af0b51d1d43e8 100644
1176 +--- a/drivers/nvme/host/pci.c
1177 ++++ b/drivers/nvme/host/pci.c
1178 +@@ -941,13 +941,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
1179 + volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
1180 + struct request *req;
1181 +
1182 +- if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
1183 +- dev_warn(nvmeq->dev->ctrl.device,
1184 +- "invalid id %d completed on queue %d\n",
1185 +- cqe->command_id, le16_to_cpu(cqe->sq_id));
1186 +- return;
1187 +- }
1188 +-
1189 + /*
1190 + * AEN requests are special as they don't time out and can
1191 + * survive any kind of queue freeze and often don't respond to
1192 +@@ -962,6 +955,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
1193 + }
1194 +
1195 + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
1196 ++ if (unlikely(!req)) {
1197 ++ dev_warn(nvmeq->dev->ctrl.device,
1198 ++ "invalid id %d completed on queue %d\n",
1199 ++ cqe->command_id, le16_to_cpu(cqe->sq_id));
1200 ++ return;
1201 ++ }
1202 ++
1203 + trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1204 + nvme_end_request(req, cqe->status, cqe->result);
1205 + }
1206 +diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
1207 +index 88a047b9fa6fa..6ef12017ff4e8 100644
1208 +--- a/drivers/phy/ti/phy-am654-serdes.c
1209 ++++ b/drivers/phy/ti/phy-am654-serdes.c
1210 +@@ -625,8 +625,10 @@ static int serdes_am654_probe(struct platform_device *pdev)
1211 + pm_runtime_enable(dev);
1212 +
1213 + phy = devm_phy_create(dev, NULL, &ops);
1214 +- if (IS_ERR(phy))
1215 +- return PTR_ERR(phy);
1216 ++ if (IS_ERR(phy)) {
1217 ++ ret = PTR_ERR(phy);
1218 ++ goto clk_err;
1219 ++ }
1220 +
1221 + phy_set_drvdata(phy, am654_phy);
1222 + phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate);
1223 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
1224 +index a767a05fa3a0d..48e2a6c56a83b 100644
1225 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
1226 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
1227 +@@ -414,7 +414,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
1228 + MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
1229 + MPP_MODE(15,
1230 + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
1231 +- MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)),
1232 ++ MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)),
1233 + MPP_MODE(16,
1234 + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
1235 + MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),
1236 +diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
1237 +index f20326714b9d5..215bf6624e7c3 100644
1238 +--- a/drivers/spi/spi-fsl-espi.c
1239 ++++ b/drivers/spi/spi-fsl-espi.c
1240 +@@ -555,13 +555,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
1241 + static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
1242 + {
1243 + struct fsl_espi *espi = context_data;
1244 +- u32 events;
1245 ++ u32 events, mask;
1246 +
1247 + spin_lock(&espi->lock);
1248 +
1249 + /* Get interrupt events(tx/rx) */
1250 + events = fsl_espi_read_reg(espi, ESPI_SPIE);
1251 +- if (!events) {
1252 ++ mask = fsl_espi_read_reg(espi, ESPI_SPIM);
1253 ++ if (!(events & mask)) {
1254 + spin_unlock(&espi->lock);
1255 + return IRQ_NONE;
1256 + }
1257 +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
1258 +index b4206b0dede54..1f638759a9533 100644
1259 +--- a/drivers/usb/gadget/function/f_ncm.c
1260 ++++ b/drivers/usb/gadget/function/f_ncm.c
1261 +@@ -1189,7 +1189,6 @@ static int ncm_unwrap_ntb(struct gether *port,
1262 + const struct ndp_parser_opts *opts = ncm->parser_opts;
1263 + unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
1264 + int dgram_counter;
1265 +- bool ndp_after_header;
1266 +
1267 + /* dwSignature */
1268 + if (get_unaligned_le32(tmp) != opts->nth_sign) {
1269 +@@ -1216,7 +1215,6 @@ static int ncm_unwrap_ntb(struct gether *port,
1270 + }
1271 +
1272 + ndp_index = get_ncm(&tmp, opts->ndp_index);
1273 +- ndp_after_header = false;
1274 +
1275 + /* Run through all the NDP's in the NTB */
1276 + do {
1277 +@@ -1232,8 +1230,6 @@ static int ncm_unwrap_ntb(struct gether *port,
1278 + ndp_index);
1279 + goto err;
1280 + }
1281 +- if (ndp_index == opts->nth_size)
1282 +- ndp_after_header = true;
1283 +
1284 + /*
1285 + * walk through NDP
1286 +@@ -1312,37 +1308,13 @@ static int ncm_unwrap_ntb(struct gether *port,
1287 + index2 = get_ncm(&tmp, opts->dgram_item_len);
1288 + dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
1289 +
1290 +- if (index2 == 0 || dg_len2 == 0)
1291 +- break;
1292 +-
1293 + /* wDatagramIndex[1] */
1294 +- if (ndp_after_header) {
1295 +- if (index2 < opts->nth_size + opts->ndp_size) {
1296 +- INFO(port->func.config->cdev,
1297 +- "Bad index: %#X\n", index2);
1298 +- goto err;
1299 +- }
1300 +- } else {
1301 +- if (index2 < opts->nth_size + opts->dpe_size) {
1302 +- INFO(port->func.config->cdev,
1303 +- "Bad index: %#X\n", index2);
1304 +- goto err;
1305 +- }
1306 +- }
1307 + if (index2 > block_len - opts->dpe_size) {
1308 + INFO(port->func.config->cdev,
1309 + "Bad index: %#X\n", index2);
1310 + goto err;
1311 + }
1312 +
1313 +- /* wDatagramLength[1] */
1314 +- if ((dg_len2 < 14 + crc_len) ||
1315 +- (dg_len2 > frame_max)) {
1316 +- INFO(port->func.config->cdev,
1317 +- "Bad dgram length: %#X\n", dg_len);
1318 +- goto err;
1319 +- }
1320 +-
1321 + /*
1322 + * Copy the data into a new skb.
1323 + * This ensures the truesize is correct
1324 +@@ -1359,6 +1331,8 @@ static int ncm_unwrap_ntb(struct gether *port,
1325 + ndp_len -= 2 * (opts->dgram_item_len * 2);
1326 +
1327 + dgram_counter++;
1328 ++ if (index2 == 0 || dg_len2 == 0)
1329 ++ break;
1330 + } while (ndp_len > 2 * (opts->dgram_item_len * 2));
1331 + } while (ndp_index);
1332 +
1333 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
1334 +index ca68a27b98edd..f21f5bfbb78dc 100644
1335 +--- a/drivers/vhost/vsock.c
1336 ++++ b/drivers/vhost/vsock.c
1337 +@@ -384,6 +384,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
1338 + return val < vq->num;
1339 + }
1340 +
1341 ++static struct virtio_transport vhost_transport = {
1342 ++ .transport = {
1343 ++ .get_local_cid = vhost_transport_get_local_cid,
1344 ++
1345 ++ .init = virtio_transport_do_socket_init,
1346 ++ .destruct = virtio_transport_destruct,
1347 ++ .release = virtio_transport_release,
1348 ++ .connect = virtio_transport_connect,
1349 ++ .shutdown = virtio_transport_shutdown,
1350 ++ .cancel_pkt = vhost_transport_cancel_pkt,
1351 ++
1352 ++ .dgram_enqueue = virtio_transport_dgram_enqueue,
1353 ++ .dgram_dequeue = virtio_transport_dgram_dequeue,
1354 ++ .dgram_bind = virtio_transport_dgram_bind,
1355 ++ .dgram_allow = virtio_transport_dgram_allow,
1356 ++
1357 ++ .stream_enqueue = virtio_transport_stream_enqueue,
1358 ++ .stream_dequeue = virtio_transport_stream_dequeue,
1359 ++ .stream_has_data = virtio_transport_stream_has_data,
1360 ++ .stream_has_space = virtio_transport_stream_has_space,
1361 ++ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
1362 ++ .stream_is_active = virtio_transport_stream_is_active,
1363 ++ .stream_allow = virtio_transport_stream_allow,
1364 ++
1365 ++ .notify_poll_in = virtio_transport_notify_poll_in,
1366 ++ .notify_poll_out = virtio_transport_notify_poll_out,
1367 ++ .notify_recv_init = virtio_transport_notify_recv_init,
1368 ++ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
1369 ++ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
1370 ++ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
1371 ++ .notify_send_init = virtio_transport_notify_send_init,
1372 ++ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
1373 ++ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
1374 ++ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
1375 ++
1376 ++ .set_buffer_size = virtio_transport_set_buffer_size,
1377 ++ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
1378 ++ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
1379 ++ .get_buffer_size = virtio_transport_get_buffer_size,
1380 ++ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
1381 ++ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
1382 ++ },
1383 ++
1384 ++ .send_pkt = vhost_transport_send_pkt,
1385 ++};
1386 ++
1387 + static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
1388 + {
1389 + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1390 +@@ -440,7 +486,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
1391 + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
1392 + le64_to_cpu(pkt->hdr.dst_cid) ==
1393 + vhost_transport_get_local_cid())
1394 +- virtio_transport_recv_pkt(pkt);
1395 ++ virtio_transport_recv_pkt(&vhost_transport, pkt);
1396 + else
1397 + virtio_transport_free_pkt(pkt);
1398 +
1399 +@@ -793,52 +839,6 @@ static struct miscdevice vhost_vsock_misc = {
1400 + .fops = &vhost_vsock_fops,
1401 + };
1402 +
1403 +-static struct virtio_transport vhost_transport = {
1404 +- .transport = {
1405 +- .get_local_cid = vhost_transport_get_local_cid,
1406 +-
1407 +- .init = virtio_transport_do_socket_init,
1408 +- .destruct = virtio_transport_destruct,
1409 +- .release = virtio_transport_release,
1410 +- .connect = virtio_transport_connect,
1411 +- .shutdown = virtio_transport_shutdown,
1412 +- .cancel_pkt = vhost_transport_cancel_pkt,
1413 +-
1414 +- .dgram_enqueue = virtio_transport_dgram_enqueue,
1415 +- .dgram_dequeue = virtio_transport_dgram_dequeue,
1416 +- .dgram_bind = virtio_transport_dgram_bind,
1417 +- .dgram_allow = virtio_transport_dgram_allow,
1418 +-
1419 +- .stream_enqueue = virtio_transport_stream_enqueue,
1420 +- .stream_dequeue = virtio_transport_stream_dequeue,
1421 +- .stream_has_data = virtio_transport_stream_has_data,
1422 +- .stream_has_space = virtio_transport_stream_has_space,
1423 +- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
1424 +- .stream_is_active = virtio_transport_stream_is_active,
1425 +- .stream_allow = virtio_transport_stream_allow,
1426 +-
1427 +- .notify_poll_in = virtio_transport_notify_poll_in,
1428 +- .notify_poll_out = virtio_transport_notify_poll_out,
1429 +- .notify_recv_init = virtio_transport_notify_recv_init,
1430 +- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
1431 +- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
1432 +- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
1433 +- .notify_send_init = virtio_transport_notify_send_init,
1434 +- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
1435 +- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
1436 +- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
1437 +-
1438 +- .set_buffer_size = virtio_transport_set_buffer_size,
1439 +- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
1440 +- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
1441 +- .get_buffer_size = virtio_transport_get_buffer_size,
1442 +- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
1443 +- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
1444 +- },
1445 +-
1446 +- .send_pkt = vhost_transport_send_pkt,
1447 +-};
1448 +-
1449 + static int __init vhost_vsock_init(void)
1450 + {
1451 + int ret;
1452 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
1453 +index 48890826b5e66..196bd241e701a 100644
1454 +--- a/fs/btrfs/dev-replace.c
1455 ++++ b/fs/btrfs/dev-replace.c
1456 +@@ -562,6 +562,37 @@ static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info)
1457 + wake_up(&fs_info->dev_replace.replace_wait);
1458 + }
1459 +
1460 ++/*
1461 ++ * When finishing the device replace, before swapping the source device with the
1462 ++ * target device we must update the chunk allocation state in the target device,
1463 ++ * as it is empty because replace works by directly copying the chunks and not
1464 ++ * through the normal chunk allocation path.
1465 ++ */
1466 ++static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
1467 ++ struct btrfs_device *tgtdev)
1468 ++{
1469 ++ struct extent_state *cached_state = NULL;
1470 ++ u64 start = 0;
1471 ++ u64 found_start;
1472 ++ u64 found_end;
1473 ++ int ret = 0;
1474 ++
1475 ++ lockdep_assert_held(&srcdev->fs_info->chunk_mutex);
1476 ++
1477 ++ while (!find_first_extent_bit(&srcdev->alloc_state, start,
1478 ++ &found_start, &found_end,
1479 ++ CHUNK_ALLOCATED, &cached_state)) {
1480 ++ ret = set_extent_bits(&tgtdev->alloc_state, found_start,
1481 ++ found_end, CHUNK_ALLOCATED);
1482 ++ if (ret)
1483 ++ break;
1484 ++ start = found_end + 1;
1485 ++ }
1486 ++
1487 ++ free_extent_state(cached_state);
1488 ++ return ret;
1489 ++}
1490 ++
1491 + static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
1492 + int scrub_ret)
1493 + {
1494 +@@ -636,8 +667,14 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
1495 + dev_replace->time_stopped = ktime_get_real_seconds();
1496 + dev_replace->item_needs_writeback = 1;
1497 +
1498 +- /* replace old device with new one in mapping tree */
1499 ++ /*
1500 ++ * Update allocation state in the new device and replace the old device
1501 ++ * with the new one in the mapping tree.
1502 ++ */
1503 + if (!scrub_ret) {
1504 ++ scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device);
1505 ++ if (scrub_ret)
1506 ++ goto error;
1507 + btrfs_dev_replace_update_device_in_mapping_tree(fs_info,
1508 + src_device,
1509 + tgt_device);
1510 +@@ -648,6 +685,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
1511 + btrfs_dev_name(src_device),
1512 + src_device->devid,
1513 + rcu_str_deref(tgt_device->name), scrub_ret);
1514 ++error:
1515 + up_write(&dev_replace->rwsem);
1516 + mutex_unlock(&fs_info->chunk_mutex);
1517 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1518 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1519 +index ae1d32344f7ac..339453ac834cc 100644
1520 +--- a/fs/eventpoll.c
1521 ++++ b/fs/eventpoll.c
1522 +@@ -218,8 +218,7 @@ struct eventpoll {
1523 + struct file *file;
1524 +
1525 + /* used to optimize loop detection check */
1526 +- int visited;
1527 +- struct list_head visited_list_link;
1528 ++ u64 gen;
1529 +
1530 + #ifdef CONFIG_NET_RX_BUSY_POLL
1531 + /* used to track busy poll napi_id */
1532 +@@ -269,6 +268,8 @@ static long max_user_watches __read_mostly;
1533 + */
1534 + static DEFINE_MUTEX(epmutex);
1535 +
1536 ++static u64 loop_check_gen = 0;
1537 ++
1538 + /* Used to check for epoll file descriptor inclusion loops */
1539 + static struct nested_calls poll_loop_ncalls;
1540 +
1541 +@@ -278,9 +279,6 @@ static struct kmem_cache *epi_cache __read_mostly;
1542 + /* Slab cache used to allocate "struct eppoll_entry" */
1543 + static struct kmem_cache *pwq_cache __read_mostly;
1544 +
1545 +-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
1546 +-static LIST_HEAD(visited_list);
1547 +-
1548 + /*
1549 + * List of files with newly added links, where we may need to limit the number
1550 + * of emanating paths. Protected by the epmutex.
1551 +@@ -1455,7 +1453,7 @@ static int reverse_path_check(void)
1552 +
1553 + static int ep_create_wakeup_source(struct epitem *epi)
1554 + {
1555 +- const char *name;
1556 ++ struct name_snapshot n;
1557 + struct wakeup_source *ws;
1558 +
1559 + if (!epi->ep->ws) {
1560 +@@ -1464,8 +1462,9 @@ static int ep_create_wakeup_source(struct epitem *epi)
1561 + return -ENOMEM;
1562 + }
1563 +
1564 +- name = epi->ffd.file->f_path.dentry->d_name.name;
1565 +- ws = wakeup_source_register(NULL, name);
1566 ++ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1567 ++ ws = wakeup_source_register(NULL, n.name.name);
1568 ++ release_dentry_name_snapshot(&n);
1569 +
1570 + if (!ws)
1571 + return -ENOMEM;
1572 +@@ -1527,6 +1526,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1573 + RCU_INIT_POINTER(epi->ws, NULL);
1574 + }
1575 +
1576 ++ /* Add the current item to the list of active epoll hook for this file */
1577 ++ spin_lock(&tfile->f_lock);
1578 ++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1579 ++ spin_unlock(&tfile->f_lock);
1580 ++
1581 ++ /*
1582 ++ * Add the current item to the RB tree. All RB tree operations are
1583 ++ * protected by "mtx", and ep_insert() is called with "mtx" held.
1584 ++ */
1585 ++ ep_rbtree_insert(ep, epi);
1586 ++
1587 ++ /* now check if we've created too many backpaths */
1588 ++ error = -EINVAL;
1589 ++ if (full_check && reverse_path_check())
1590 ++ goto error_remove_epi;
1591 ++
1592 + /* Initialize the poll table using the queue callback */
1593 + epq.epi = epi;
1594 + init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1595 +@@ -1549,22 +1564,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1596 + if (epi->nwait < 0)
1597 + goto error_unregister;
1598 +
1599 +- /* Add the current item to the list of active epoll hook for this file */
1600 +- spin_lock(&tfile->f_lock);
1601 +- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1602 +- spin_unlock(&tfile->f_lock);
1603 +-
1604 +- /*
1605 +- * Add the current item to the RB tree. All RB tree operations are
1606 +- * protected by "mtx", and ep_insert() is called with "mtx" held.
1607 +- */
1608 +- ep_rbtree_insert(ep, epi);
1609 +-
1610 +- /* now check if we've created too many backpaths */
1611 +- error = -EINVAL;
1612 +- if (full_check && reverse_path_check())
1613 +- goto error_remove_epi;
1614 +-
1615 + /* We have to drop the new item inside our item list to keep track of it */
1616 + write_lock_irq(&ep->lock);
1617 +
1618 +@@ -1593,6 +1592,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1619 +
1620 + return 0;
1621 +
1622 ++error_unregister:
1623 ++ ep_unregister_pollwait(ep, epi);
1624 + error_remove_epi:
1625 + spin_lock(&tfile->f_lock);
1626 + list_del_rcu(&epi->fllink);
1627 +@@ -1600,9 +1601,6 @@ error_remove_epi:
1628 +
1629 + rb_erase_cached(&epi->rbn, &ep->rbr);
1630 +
1631 +-error_unregister:
1632 +- ep_unregister_pollwait(ep, epi);
1633 +-
1634 + /*
1635 + * We need to do this because an event could have been arrived on some
1636 + * allocated wait queue. Note that we don't care about the ep->ovflist
1637 +@@ -1969,13 +1967,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1638 + struct epitem *epi;
1639 +
1640 + mutex_lock_nested(&ep->mtx, call_nests + 1);
1641 +- ep->visited = 1;
1642 +- list_add(&ep->visited_list_link, &visited_list);
1643 ++ ep->gen = loop_check_gen;
1644 + for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1645 + epi = rb_entry(rbp, struct epitem, rbn);
1646 + if (unlikely(is_file_epoll(epi->ffd.file))) {
1647 + ep_tovisit = epi->ffd.file->private_data;
1648 +- if (ep_tovisit->visited)
1649 ++ if (ep_tovisit->gen == loop_check_gen)
1650 + continue;
1651 + error = ep_call_nested(&poll_loop_ncalls,
1652 + ep_loop_check_proc, epi->ffd.file,
1653 +@@ -2016,18 +2013,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1654 + */
1655 + static int ep_loop_check(struct eventpoll *ep, struct file *file)
1656 + {
1657 +- int ret;
1658 +- struct eventpoll *ep_cur, *ep_next;
1659 +-
1660 +- ret = ep_call_nested(&poll_loop_ncalls,
1661 ++ return ep_call_nested(&poll_loop_ncalls,
1662 + ep_loop_check_proc, file, ep, current);
1663 +- /* clear visited list */
1664 +- list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1665 +- visited_list_link) {
1666 +- ep_cur->visited = 0;
1667 +- list_del(&ep_cur->visited_list_link);
1668 +- }
1669 +- return ret;
1670 + }
1671 +
1672 + static void clear_tfile_check_list(void)
1673 +@@ -2189,6 +2176,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1674 + mutex_lock_nested(&ep->mtx, 0);
1675 + if (op == EPOLL_CTL_ADD) {
1676 + if (!list_empty(&f.file->f_ep_links) ||
1677 ++ ep->gen == loop_check_gen ||
1678 + is_file_epoll(tf.file)) {
1679 + full_check = 1;
1680 + mutex_unlock(&ep->mtx);
1681 +@@ -2249,6 +2237,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1682 + error_tgt_fput:
1683 + if (full_check) {
1684 + clear_tfile_check_list();
1685 ++ loop_check_gen++;
1686 + mutex_unlock(&epmutex);
1687 + }
1688 +
1689 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
1690 +index f8d8a8e34b808..ab4fc1255aca8 100644
1691 +--- a/fs/fuse/file.c
1692 ++++ b/fs/fuse/file.c
1693 +@@ -3074,11 +3074,10 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1694 + ssize_t ret = 0;
1695 + struct file *file = iocb->ki_filp;
1696 + struct fuse_file *ff = file->private_data;
1697 +- bool async_dio = ff->fc->async_dio;
1698 + loff_t pos = 0;
1699 + struct inode *inode;
1700 + loff_t i_size;
1701 +- size_t count = iov_iter_count(iter);
1702 ++ size_t count = iov_iter_count(iter), shortened = 0;
1703 + loff_t offset = iocb->ki_pos;
1704 + struct fuse_io_priv *io;
1705 +
1706 +@@ -3086,17 +3085,9 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1707 + inode = file->f_mapping->host;
1708 + i_size = i_size_read(inode);
1709 +
1710 +- if ((iov_iter_rw(iter) == READ) && (offset > i_size))
1711 ++ if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
1712 + return 0;
1713 +
1714 +- /* optimization for short read */
1715 +- if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
1716 +- if (offset >= i_size)
1717 +- return 0;
1718 +- iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
1719 +- count = iov_iter_count(iter);
1720 +- }
1721 +-
1722 + io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
1723 + if (!io)
1724 + return -ENOMEM;
1725 +@@ -3112,15 +3103,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1726 + * By default, we want to optimize all I/Os with async request
1727 + * submission to the client filesystem if supported.
1728 + */
1729 +- io->async = async_dio;
1730 ++ io->async = ff->fc->async_dio;
1731 + io->iocb = iocb;
1732 + io->blocking = is_sync_kiocb(iocb);
1733 +
1734 ++ /* optimization for short read */
1735 ++ if (io->async && !io->write && offset + count > i_size) {
1736 ++ iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
1737 ++ shortened = count - iov_iter_count(iter);
1738 ++ count -= shortened;
1739 ++ }
1740 ++
1741 + /*
1742 + * We cannot asynchronously extend the size of a file.
1743 + * In such case the aio will behave exactly like sync io.
1744 + */
1745 +- if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE)
1746 ++ if ((offset + count > i_size) && io->write)
1747 + io->blocking = true;
1748 +
1749 + if (io->async && io->blocking) {
1750 +@@ -3138,6 +3136,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1751 + } else {
1752 + ret = __fuse_direct_read(io, iter, &pos);
1753 + }
1754 ++ iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
1755 +
1756 + if (io->async) {
1757 + bool blocking = io->blocking;
1758 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1759 +index 05ed7be8a6345..188b17a3b19eb 100644
1760 +--- a/fs/nfs/dir.c
1761 ++++ b/fs/nfs/dir.c
1762 +@@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
1763 + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
1764 +
1765 + do {
1766 ++ if (entry->label)
1767 ++ entry->label->len = NFS4_MAXLABELLEN;
1768 ++
1769 + status = xdr_decode(desc, entry, &stream);
1770 + if (status != 0) {
1771 + if (status == -EAGAIN)
1772 +diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
1773 +index f780e223b1185..239c9548b1568 100644
1774 +--- a/fs/xfs/xfs_iomap.c
1775 ++++ b/fs/xfs/xfs_iomap.c
1776 +@@ -1002,9 +1002,15 @@ xfs_file_iomap_begin(
1777 + * I/O, which must be block aligned, we need to report the
1778 + * newly allocated address. If the data fork has a hole, copy
1779 + * the COW fork mapping to avoid allocating to the data fork.
1780 ++ *
1781 ++ * Otherwise, ensure that the imap range does not extend past
1782 ++ * the range allocated/found in cmap.
1783 + */
1784 + if (directio || imap.br_startblock == HOLESTARTBLOCK)
1785 + imap = cmap;
1786 ++ else
1787 ++ xfs_trim_extent(&imap, cmap.br_startoff,
1788 ++ cmap.br_blockcount);
1789 +
1790 + end_fsb = imap.br_startoff + imap.br_blockcount;
1791 + length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1792 +diff --git a/include/linux/genhd.h b/include/linux/genhd.h
1793 +index 62a2ec9f17df8..c1bf9956256f6 100644
1794 +--- a/include/linux/genhd.h
1795 ++++ b/include/linux/genhd.h
1796 +@@ -419,7 +419,7 @@ static inline void free_part_info(struct hd_struct *part)
1797 + kfree(part->info);
1798 + }
1799 +
1800 +-void update_io_ticks(struct hd_struct *part, unsigned long now);
1801 ++void update_io_ticks(struct hd_struct *part, unsigned long now, bool end);
1802 +
1803 + /* block/genhd.c */
1804 + extern void device_add_disk(struct device *parent, struct gendisk *disk,
1805 +diff --git a/include/linux/memstick.h b/include/linux/memstick.h
1806 +index 216a713bef7f0..1198ea3d40126 100644
1807 +--- a/include/linux/memstick.h
1808 ++++ b/include/linux/memstick.h
1809 +@@ -281,6 +281,7 @@ struct memstick_host {
1810 +
1811 + struct memstick_dev *card;
1812 + unsigned int retries;
1813 ++ bool removing;
1814 +
1815 + /* Notify the host that some requests are pending. */
1816 + void (*request)(struct memstick_host *host);
1817 +diff --git a/include/linux/mm.h b/include/linux/mm.h
1818 +index 3285dae06c030..34119f393a802 100644
1819 +--- a/include/linux/mm.h
1820 ++++ b/include/linux/mm.h
1821 +@@ -2208,7 +2208,7 @@ static inline void zero_resv_unavail(void) {}
1822 +
1823 + extern void set_dma_reserve(unsigned long new_dma_reserve);
1824 + extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
1825 +- enum memmap_context, struct vmem_altmap *);
1826 ++ enum meminit_context, struct vmem_altmap *);
1827 + extern void setup_per_zone_wmarks(void);
1828 + extern int __meminit init_per_zone_wmark_min(void);
1829 + extern void mem_init(void);
1830 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
1831 +index 85804ba622152..a90aba3d6afb4 100644
1832 +--- a/include/linux/mmzone.h
1833 ++++ b/include/linux/mmzone.h
1834 +@@ -822,10 +822,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
1835 + unsigned int alloc_flags);
1836 + bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1837 + unsigned long mark, int classzone_idx);
1838 +-enum memmap_context {
1839 +- MEMMAP_EARLY,
1840 +- MEMMAP_HOTPLUG,
1841 ++/*
1842 ++ * Memory initialization context, use to differentiate memory added by
1843 ++ * the platform statically or via memory hotplug interface.
1844 ++ */
1845 ++enum meminit_context {
1846 ++ MEMINIT_EARLY,
1847 ++ MEMINIT_HOTPLUG,
1848 + };
1849 ++
1850 + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
1851 + unsigned long size);
1852 +
1853 +diff --git a/include/linux/node.h b/include/linux/node.h
1854 +index 4866f32a02d8d..014ba3ab2efd8 100644
1855 +--- a/include/linux/node.h
1856 ++++ b/include/linux/node.h
1857 +@@ -99,11 +99,13 @@ extern struct node *node_devices[];
1858 + typedef void (*node_registration_func_t)(struct node *);
1859 +
1860 + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
1861 +-extern int link_mem_sections(int nid, unsigned long start_pfn,
1862 +- unsigned long end_pfn);
1863 ++int link_mem_sections(int nid, unsigned long start_pfn,
1864 ++ unsigned long end_pfn,
1865 ++ enum meminit_context context);
1866 + #else
1867 + static inline int link_mem_sections(int nid, unsigned long start_pfn,
1868 +- unsigned long end_pfn)
1869 ++ unsigned long end_pfn,
1870 ++ enum meminit_context context)
1871 + {
1872 + return 0;
1873 + }
1874 +@@ -128,7 +130,8 @@ static inline int register_one_node(int nid)
1875 + if (error)
1876 + return error;
1877 + /* link memory sections under this node */
1878 +- error = link_mem_sections(nid, start_pfn, end_pfn);
1879 ++ error = link_mem_sections(nid, start_pfn, end_pfn,
1880 ++ MEMINIT_EARLY);
1881 + }
1882 +
1883 + return error;
1884 +diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
1885 +index 07875ccc7bb50..b139f76060a65 100644
1886 +--- a/include/linux/virtio_vsock.h
1887 ++++ b/include/linux/virtio_vsock.h
1888 +@@ -150,7 +150,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
1889 +
1890 + void virtio_transport_destruct(struct vsock_sock *vsk);
1891 +
1892 +-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
1893 ++void virtio_transport_recv_pkt(struct virtio_transport *t,
1894 ++ struct virtio_vsock_pkt *pkt);
1895 + void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
1896 + void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
1897 + u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
1898 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1899 +index 705852c1724aa..fbba31baef53c 100644
1900 +--- a/kernel/trace/ftrace.c
1901 ++++ b/kernel/trace/ftrace.c
1902 +@@ -6382,16 +6382,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
1903 + {
1904 + int bit;
1905 +
1906 +- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
1907 +- return;
1908 +-
1909 + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
1910 + if (bit < 0)
1911 + return;
1912 +
1913 + preempt_disable_notrace();
1914 +
1915 +- op->func(ip, parent_ip, op, regs);
1916 ++ if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
1917 ++ op->func(ip, parent_ip, op, regs);
1918 +
1919 + preempt_enable_notrace();
1920 + trace_clear_recursion(bit);
1921 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1922 +index db8162b34ef64..5b2a664812b10 100644
1923 +--- a/kernel/trace/trace.c
1924 ++++ b/kernel/trace/trace.c
1925 +@@ -3584,14 +3584,14 @@ unsigned long trace_total_entries(struct trace_array *tr)
1926 +
1927 + static void print_lat_help_header(struct seq_file *m)
1928 + {
1929 +- seq_puts(m, "# _------=> CPU# \n"
1930 +- "# / _-----=> irqs-off \n"
1931 +- "# | / _----=> need-resched \n"
1932 +- "# || / _---=> hardirq/softirq \n"
1933 +- "# ||| / _--=> preempt-depth \n"
1934 +- "# |||| / delay \n"
1935 +- "# cmd pid ||||| time | caller \n"
1936 +- "# \\ / ||||| \\ | / \n");
1937 ++ seq_puts(m, "# _------=> CPU# \n"
1938 ++ "# / _-----=> irqs-off \n"
1939 ++ "# | / _----=> need-resched \n"
1940 ++ "# || / _---=> hardirq/softirq \n"
1941 ++ "# ||| / _--=> preempt-depth \n"
1942 ++ "# |||| / delay \n"
1943 ++ "# cmd pid ||||| time | caller \n"
1944 ++ "# \\ / ||||| \\ | / \n");
1945 + }
1946 +
1947 + static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
1948 +@@ -3612,26 +3612,26 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
1949 +
1950 + print_event_info(buf, m);
1951 +
1952 +- seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
1953 +- seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
1954 ++ seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
1955 ++ seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
1956 + }
1957 +
1958 + static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
1959 + unsigned int flags)
1960 + {
1961 + bool tgid = flags & TRACE_ITER_RECORD_TGID;
1962 +- const char *space = " ";
1963 +- int prec = tgid ? 10 : 2;
1964 ++ const char *space = " ";
1965 ++ int prec = tgid ? 12 : 2;
1966 +
1967 + print_event_info(buf, m);
1968 +
1969 +- seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
1970 +- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
1971 +- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
1972 +- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
1973 +- seq_printf(m, "# %.*s||| / delay\n", prec, space);
1974 +- seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
1975 +- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
1976 ++ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
1977 ++ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
1978 ++ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
1979 ++ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
1980 ++ seq_printf(m, "# %.*s||| / delay\n", prec, space);
1981 ++ seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
1982 ++ seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
1983 + }
1984 +
1985 + void
1986 +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
1987 +index d54ce252b05a8..a0a45901dc027 100644
1988 +--- a/kernel/trace/trace_output.c
1989 ++++ b/kernel/trace/trace_output.c
1990 +@@ -482,7 +482,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1991 +
1992 + trace_find_cmdline(entry->pid, comm);
1993 +
1994 +- trace_seq_printf(s, "%8.8s-%-5d %3d",
1995 ++ trace_seq_printf(s, "%8.8s-%-7d %3d",
1996 + comm, entry->pid, cpu);
1997 +
1998 + return trace_print_lat_fmt(s, entry);
1999 +@@ -573,15 +573,15 @@ int trace_print_context(struct trace_iterator *iter)
2000 +
2001 + trace_find_cmdline(entry->pid, comm);
2002 +
2003 +- trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
2004 ++ trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
2005 +
2006 + if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
2007 + unsigned int tgid = trace_find_tgid(entry->pid);
2008 +
2009 + if (!tgid)
2010 +- trace_seq_printf(s, "(-----) ");
2011 ++ trace_seq_printf(s, "(-------) ");
2012 + else
2013 +- trace_seq_printf(s, "(%5d) ", tgid);
2014 ++ trace_seq_printf(s, "(%7d) ", tgid);
2015 + }
2016 +
2017 + trace_seq_printf(s, "[%03d] ", iter->cpu);
2018 +@@ -624,7 +624,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
2019 + trace_find_cmdline(entry->pid, comm);
2020 +
2021 + trace_seq_printf(
2022 +- s, "%16s %5d %3d %d %08x %08lx ",
2023 ++ s, "%16s %7d %3d %d %08x %08lx ",
2024 + comm, entry->pid, iter->cpu, entry->flags,
2025 + entry->preempt_count, iter->idx);
2026 + } else {
2027 +@@ -905,7 +905,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
2028 + S = task_index_to_char(field->prev_state);
2029 + trace_find_cmdline(field->next_pid, comm);
2030 + trace_seq_printf(&iter->seq,
2031 +- " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
2032 ++ " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
2033 + field->prev_pid,
2034 + field->prev_prio,
2035 + S, delim,
2036 +diff --git a/lib/random32.c b/lib/random32.c
2037 +index 3d749abb9e80d..1786f78bf4c53 100644
2038 +--- a/lib/random32.c
2039 ++++ b/lib/random32.c
2040 +@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
2041 + }
2042 + #endif
2043 +
2044 +-DEFINE_PER_CPU(struct rnd_state, net_rand_state);
2045 ++DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
2046 +
2047 + /**
2048 + * prandom_u32_state - seeded pseudo-random number generator.
2049 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2050 +index 3eb0b311b4a12..308beca3ffebc 100644
2051 +--- a/mm/memory_hotplug.c
2052 ++++ b/mm/memory_hotplug.c
2053 +@@ -725,7 +725,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
2054 + * are reserved so nobody should be touching them so we should be safe
2055 + */
2056 + memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
2057 +- MEMMAP_HOTPLUG, altmap);
2058 ++ MEMINIT_HOTPLUG, altmap);
2059 +
2060 + set_zone_contiguous(zone);
2061 + }
2062 +@@ -1082,7 +1082,8 @@ int __ref add_memory_resource(int nid, struct resource *res)
2063 + }
2064 +
2065 + /* link memory sections under this node.*/
2066 +- ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
2067 ++ ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
2068 ++ MEMINIT_HOTPLUG);
2069 + BUG_ON(ret);
2070 +
2071 + /* create new memmap entry */
2072 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2073 +index 67a9943aa595f..373ca57807589 100644
2074 +--- a/mm/page_alloc.c
2075 ++++ b/mm/page_alloc.c
2076 +@@ -5875,7 +5875,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
2077 + * done. Non-atomic initialization, single-pass.
2078 + */
2079 + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2080 +- unsigned long start_pfn, enum memmap_context context,
2081 ++ unsigned long start_pfn, enum meminit_context context,
2082 + struct vmem_altmap *altmap)
2083 + {
2084 + unsigned long pfn, end_pfn = start_pfn + size;
2085 +@@ -5907,7 +5907,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2086 + * There can be holes in boot-time mem_map[]s handed to this
2087 + * function. They do not exist on hotplugged memory.
2088 + */
2089 +- if (context == MEMMAP_EARLY) {
2090 ++ if (context == MEMINIT_EARLY) {
2091 + if (!early_pfn_valid(pfn))
2092 + continue;
2093 + if (!early_pfn_in_nid(pfn, nid))
2094 +@@ -5920,7 +5920,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2095 +
2096 + page = pfn_to_page(pfn);
2097 + __init_single_page(page, pfn, zone, nid);
2098 +- if (context == MEMMAP_HOTPLUG)
2099 ++ if (context == MEMINIT_HOTPLUG)
2100 + __SetPageReserved(page);
2101 +
2102 + /*
2103 +@@ -6002,7 +6002,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
2104 + * check here not to call set_pageblock_migratetype() against
2105 + * pfn out of zone.
2106 + *
2107 +- * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
2108 ++ * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
2109 + * because this is done early in section_activate()
2110 + */
2111 + if (!(pfn & (pageblock_nr_pages - 1))) {
2112 +@@ -6028,7 +6028,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
2113 + void __meminit __weak memmap_init(unsigned long size, int nid,
2114 + unsigned long zone, unsigned long start_pfn)
2115 + {
2116 +- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
2117 ++ memmap_init_zone(size, nid, zone, start_pfn, MEMINIT_EARLY, NULL);
2118 + }
2119 +
2120 + static int zone_batchsize(struct zone *zone)
2121 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2122 +index e5fb9002d3147..3ab85e1e38d82 100644
2123 +--- a/net/mac80211/rx.c
2124 ++++ b/net/mac80211/rx.c
2125 +@@ -419,7 +419,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
2126 + else if (status->bw == RATE_INFO_BW_5)
2127 + channel_flags |= IEEE80211_CHAN_QUARTER;
2128 +
2129 +- if (status->band == NL80211_BAND_5GHZ)
2130 ++ if (status->band == NL80211_BAND_5GHZ ||
2131 ++ status->band == NL80211_BAND_6GHZ)
2132 + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
2133 + else if (status->encoding != RX_ENC_LEGACY)
2134 + channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
2135 +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
2136 +index ccdcb9ad9ac72..aabc63dadf176 100644
2137 +--- a/net/mac80211/vht.c
2138 ++++ b/net/mac80211/vht.c
2139 +@@ -168,10 +168,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
2140 + /* take some capabilities as-is */
2141 + cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
2142 + vht_cap->cap = cap_info;
2143 +- vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
2144 +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
2145 +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
2146 +- IEEE80211_VHT_CAP_RXLDPC |
2147 ++ vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
2148 + IEEE80211_VHT_CAP_VHT_TXOP_PS |
2149 + IEEE80211_VHT_CAP_HTC_VHT |
2150 + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
2151 +@@ -180,6 +177,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
2152 + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
2153 + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
2154 +
2155 ++ vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
2156 ++ own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
2157 ++
2158 + /* and some based on our own capabilities */
2159 + switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
2160 + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
2161 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
2162 +index aa8adf930b3ce..b7f0d52e5f1b6 100644
2163 +--- a/net/netfilter/nf_conntrack_netlink.c
2164 ++++ b/net/netfilter/nf_conntrack_netlink.c
2165 +@@ -1141,6 +1141,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
2166 + if (!tb[CTA_TUPLE_IP])
2167 + return -EINVAL;
2168 +
2169 ++ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
2170 ++ return -EOPNOTSUPP;
2171 + tuple->src.l3num = l3num;
2172 +
2173 + err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
2174 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
2175 +index 861ec9a671f9d..5905f0cddc895 100644
2176 +--- a/net/vmw_vsock/virtio_transport.c
2177 ++++ b/net/vmw_vsock/virtio_transport.c
2178 +@@ -86,33 +86,6 @@ out_rcu:
2179 + return ret;
2180 + }
2181 +
2182 +-static void virtio_transport_loopback_work(struct work_struct *work)
2183 +-{
2184 +- struct virtio_vsock *vsock =
2185 +- container_of(work, struct virtio_vsock, loopback_work);
2186 +- LIST_HEAD(pkts);
2187 +-
2188 +- spin_lock_bh(&vsock->loopback_list_lock);
2189 +- list_splice_init(&vsock->loopback_list, &pkts);
2190 +- spin_unlock_bh(&vsock->loopback_list_lock);
2191 +-
2192 +- mutex_lock(&vsock->rx_lock);
2193 +-
2194 +- if (!vsock->rx_run)
2195 +- goto out;
2196 +-
2197 +- while (!list_empty(&pkts)) {
2198 +- struct virtio_vsock_pkt *pkt;
2199 +-
2200 +- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
2201 +- list_del_init(&pkt->list);
2202 +-
2203 +- virtio_transport_recv_pkt(pkt);
2204 +- }
2205 +-out:
2206 +- mutex_unlock(&vsock->rx_lock);
2207 +-}
2208 +-
2209 + static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
2210 + struct virtio_vsock_pkt *pkt)
2211 + {
2212 +@@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
2213 + return val < virtqueue_get_vring_size(vq);
2214 + }
2215 +
2216 +-static void virtio_transport_rx_work(struct work_struct *work)
2217 +-{
2218 +- struct virtio_vsock *vsock =
2219 +- container_of(work, struct virtio_vsock, rx_work);
2220 +- struct virtqueue *vq;
2221 +-
2222 +- vq = vsock->vqs[VSOCK_VQ_RX];
2223 +-
2224 +- mutex_lock(&vsock->rx_lock);
2225 +-
2226 +- if (!vsock->rx_run)
2227 +- goto out;
2228 +-
2229 +- do {
2230 +- virtqueue_disable_cb(vq);
2231 +- for (;;) {
2232 +- struct virtio_vsock_pkt *pkt;
2233 +- unsigned int len;
2234 +-
2235 +- if (!virtio_transport_more_replies(vsock)) {
2236 +- /* Stop rx until the device processes already
2237 +- * pending replies. Leave rx virtqueue
2238 +- * callbacks disabled.
2239 +- */
2240 +- goto out;
2241 +- }
2242 +-
2243 +- pkt = virtqueue_get_buf(vq, &len);
2244 +- if (!pkt) {
2245 +- break;
2246 +- }
2247 +-
2248 +- vsock->rx_buf_nr--;
2249 +-
2250 +- /* Drop short/long packets */
2251 +- if (unlikely(len < sizeof(pkt->hdr) ||
2252 +- len > sizeof(pkt->hdr) + pkt->len)) {
2253 +- virtio_transport_free_pkt(pkt);
2254 +- continue;
2255 +- }
2256 +-
2257 +- pkt->len = len - sizeof(pkt->hdr);
2258 +- virtio_transport_deliver_tap_pkt(pkt);
2259 +- virtio_transport_recv_pkt(pkt);
2260 +- }
2261 +- } while (!virtqueue_enable_cb(vq));
2262 +-
2263 +-out:
2264 +- if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
2265 +- virtio_vsock_rx_fill(vsock);
2266 +- mutex_unlock(&vsock->rx_lock);
2267 +-}
2268 +-
2269 + /* event_lock must be held */
2270 + static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
2271 + struct virtio_vsock_event *event)
2272 +@@ -586,6 +506,86 @@ static struct virtio_transport virtio_transport = {
2273 + .send_pkt = virtio_transport_send_pkt,
2274 + };
2275 +
2276 ++static void virtio_transport_loopback_work(struct work_struct *work)
2277 ++{
2278 ++ struct virtio_vsock *vsock =
2279 ++ container_of(work, struct virtio_vsock, loopback_work);
2280 ++ LIST_HEAD(pkts);
2281 ++
2282 ++ spin_lock_bh(&vsock->loopback_list_lock);
2283 ++ list_splice_init(&vsock->loopback_list, &pkts);
2284 ++ spin_unlock_bh(&vsock->loopback_list_lock);
2285 ++
2286 ++ mutex_lock(&vsock->rx_lock);
2287 ++
2288 ++ if (!vsock->rx_run)
2289 ++ goto out;
2290 ++
2291 ++ while (!list_empty(&pkts)) {
2292 ++ struct virtio_vsock_pkt *pkt;
2293 ++
2294 ++ pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
2295 ++ list_del_init(&pkt->list);
2296 ++
2297 ++ virtio_transport_recv_pkt(&virtio_transport, pkt);
2298 ++ }
2299 ++out:
2300 ++ mutex_unlock(&vsock->rx_lock);
2301 ++}
2302 ++
2303 ++static void virtio_transport_rx_work(struct work_struct *work)
2304 ++{
2305 ++ struct virtio_vsock *vsock =
2306 ++ container_of(work, struct virtio_vsock, rx_work);
2307 ++ struct virtqueue *vq;
2308 ++
2309 ++ vq = vsock->vqs[VSOCK_VQ_RX];
2310 ++
2311 ++ mutex_lock(&vsock->rx_lock);
2312 ++
2313 ++ if (!vsock->rx_run)
2314 ++ goto out;
2315 ++
2316 ++ do {
2317 ++ virtqueue_disable_cb(vq);
2318 ++ for (;;) {
2319 ++ struct virtio_vsock_pkt *pkt;
2320 ++ unsigned int len;
2321 ++
2322 ++ if (!virtio_transport_more_replies(vsock)) {
2323 ++ /* Stop rx until the device processes already
2324 ++ * pending replies. Leave rx virtqueue
2325 ++ * callbacks disabled.
2326 ++ */
2327 ++ goto out;
2328 ++ }
2329 ++
2330 ++ pkt = virtqueue_get_buf(vq, &len);
2331 ++ if (!pkt) {
2332 ++ break;
2333 ++ }
2334 ++
2335 ++ vsock->rx_buf_nr--;
2336 ++
2337 ++ /* Drop short/long packets */
2338 ++ if (unlikely(len < sizeof(pkt->hdr) ||
2339 ++ len > sizeof(pkt->hdr) + pkt->len)) {
2340 ++ virtio_transport_free_pkt(pkt);
2341 ++ continue;
2342 ++ }
2343 ++
2344 ++ pkt->len = len - sizeof(pkt->hdr);
2345 ++ virtio_transport_deliver_tap_pkt(pkt);
2346 ++ virtio_transport_recv_pkt(&virtio_transport, pkt);
2347 ++ }
2348 ++ } while (!virtqueue_enable_cb(vq));
2349 ++
2350 ++out:
2351 ++ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
2352 ++ virtio_vsock_rx_fill(vsock);
2353 ++ mutex_unlock(&vsock->rx_lock);
2354 ++}
2355 ++
2356 + static int virtio_vsock_probe(struct virtio_device *vdev)
2357 + {
2358 + vq_callback_t *callbacks[] = {
2359 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
2360 +index fb2060dffb0af..efbb521bff135 100644
2361 +--- a/net/vmw_vsock/virtio_transport_common.c
2362 ++++ b/net/vmw_vsock/virtio_transport_common.c
2363 +@@ -696,9 +696,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
2364 + /* Normally packets are associated with a socket. There may be no socket if an
2365 + * attempt was made to connect to a socket that does not exist.
2366 + */
2367 +-static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
2368 ++static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
2369 ++ struct virtio_vsock_pkt *pkt)
2370 + {
2371 +- const struct virtio_transport *t;
2372 + struct virtio_vsock_pkt *reply;
2373 + struct virtio_vsock_pkt_info info = {
2374 + .op = VIRTIO_VSOCK_OP_RST,
2375 +@@ -718,7 +718,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
2376 + if (!reply)
2377 + return -ENOMEM;
2378 +
2379 +- t = virtio_transport_get_ops();
2380 + if (!t) {
2381 + virtio_transport_free_pkt(reply);
2382 + return -ENOTCONN;
2383 +@@ -1060,7 +1059,8 @@ static bool virtio_transport_space_update(struct sock *sk,
2384 + /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
2385 + * lock.
2386 + */
2387 +-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
2388 ++void virtio_transport_recv_pkt(struct virtio_transport *t,
2389 ++ struct virtio_vsock_pkt *pkt)
2390 + {
2391 + struct sockaddr_vm src, dst;
2392 + struct vsock_sock *vsk;
2393 +@@ -1082,7 +1082,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
2394 + le32_to_cpu(pkt->hdr.fwd_cnt));
2395 +
2396 + if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
2397 +- (void)virtio_transport_reset_no_sock(pkt);
2398 ++ (void)virtio_transport_reset_no_sock(t, pkt);
2399 + goto free_pkt;
2400 + }
2401 +
2402 +@@ -1093,7 +1093,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
2403 + if (!sk) {
2404 + sk = vsock_find_bound_socket(&dst);
2405 + if (!sk) {
2406 +- (void)virtio_transport_reset_no_sock(pkt);
2407 ++ (void)virtio_transport_reset_no_sock(t, pkt);
2408 + goto free_pkt;
2409 + }
2410 + }
2411 +@@ -1127,6 +1127,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
2412 + virtio_transport_free_pkt(pkt);
2413 + break;
2414 + default:
2415 ++ (void)virtio_transport_reset_no_sock(t, pkt);
2416 + virtio_transport_free_pkt(pkt);
2417 + break;
2418 + }
2419 +diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
2420 +index b5a5b1c548c9b..c2dac994896b4 100644
2421 +--- a/scripts/dtc/Makefile
2422 ++++ b/scripts/dtc/Makefile
2423 +@@ -9,7 +9,7 @@ dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
2424 + dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
2425 +
2426 + # Source files need to get at the userspace version of libfdt_env.h to compile
2427 +-HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt
2428 ++HOST_EXTRACFLAGS += -I $(srctree)/$(src)/libfdt
2429 +
2430 + ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),)
2431 + ifneq ($(CHECK_DTBS),)
2432 +diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
2433 +index 0f257139b003e..7703f01183854 100644
2434 +--- a/tools/io_uring/io_uring-bench.c
2435 ++++ b/tools/io_uring/io_uring-bench.c
2436 +@@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s)
2437 + s->nr_files);
2438 + }
2439 +
2440 +-static int gettid(void)
2441 ++static int lk_gettid(void)
2442 + {
2443 + return syscall(__NR_gettid);
2444 + }
2445 +@@ -281,7 +281,7 @@ static void *submitter_fn(void *data)
2446 + struct io_sq_ring *ring = &s->sq_ring;
2447 + int ret, prepped;
2448 +
2449 +- printf("submitter=%d\n", gettid());
2450 ++ printf("submitter=%d\n", lk_gettid());
2451 +
2452 + srand48_r(pthread_self(), &s->rand);
2453 +
2454 +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
2455 +index d045707e7c9a4..283caeaaffc30 100644
2456 +--- a/tools/lib/bpf/Makefile
2457 ++++ b/tools/lib/bpf/Makefile
2458 +@@ -59,7 +59,7 @@ FEATURE_USER = .libbpf
2459 + FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx
2460 + FEATURE_DISPLAY = libelf bpf
2461 +
2462 +-INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
2463 ++INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
2464 + FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
2465 +
2466 + check_feat := 1