Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 01 Dec 2021 12:51:10
Message-Id: 1638363044.4e25534a85d84d184b0dc1e699be423fd9d9b85b.mpagano@gentoo
1 commit: 4e25534a85d84d184b0dc1e699be423fd9d9b85b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 1 12:50:44 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 1 12:50:44 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4e25534a
7
8 Linux patch 4.19.219
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1218_linux-4.19.219.patch | 3456 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3460 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2c528d2c..c2e992ef 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -911,6 +911,10 @@ Patch: 1217_linux-4.19.218.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.218
23
24 +Patch: 1218_linux-4.19.219.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.219
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1218_linux-4.19.219.patch b/1218_linux-4.19.219.patch
33 new file mode 100644
34 index 00000000..a394c820
35 --- /dev/null
36 +++ b/1218_linux-4.19.219.patch
37 @@ -0,0 +1,3456 @@
38 +diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
39 +index c7c088d2dd503..fb8ec9b0f8c70 100644
40 +--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
41 ++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
42 +@@ -43,26 +43,26 @@ group emmc_nb
43 +
44 + group pwm0
45 + - pin 11 (GPIO1-11)
46 +- - functions pwm, gpio
47 ++ - functions pwm, led, gpio
48 +
49 + group pwm1
50 + - pin 12
51 +- - functions pwm, gpio
52 ++ - functions pwm, led, gpio
53 +
54 + group pwm2
55 + - pin 13
56 +- - functions pwm, gpio
57 ++ - functions pwm, led, gpio
58 +
59 + group pwm3
60 + - pin 14
61 +- - functions pwm, gpio
62 ++ - functions pwm, led, gpio
63 +
64 + group pmic1
65 +- - pin 17
66 ++ - pin 7
67 + - functions pmic, gpio
68 +
69 + group pmic0
70 +- - pin 16
71 ++ - pin 6
72 + - functions pmic, gpio
73 +
74 + group i2c2
75 +@@ -112,17 +112,25 @@ group usb2_drvvbus1
76 + - functions drvbus, gpio
77 +
78 + group sdio_sb
79 +- - pins 60-64
80 ++ - pins 60-65
81 + - functions sdio, gpio
82 +
83 + group rgmii
84 +- - pins 42-55
85 ++ - pins 42-53
86 + - functions mii, gpio
87 +
88 + group pcie1
89 +- - pins 39-40
90 ++ - pins 39
91 ++ - functions pcie, gpio
92 ++
93 ++group pcie1_clkreq
94 ++ - pins 40
95 + - functions pcie, gpio
96 +
97 ++group smi
98 ++ - pins 54-55
99 ++ - functions smi, gpio
100 ++
101 + group ptp
102 + - pins 56-58
103 + - functions ptp, gpio
104 +diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
105 +index 056898685d408..fc531c29a2e83 100644
106 +--- a/Documentation/networking/ipvs-sysctl.txt
107 ++++ b/Documentation/networking/ipvs-sysctl.txt
108 +@@ -30,8 +30,7 @@ conn_reuse_mode - INTEGER
109 +
110 + 0: disable any special handling on port reuse. The new
111 + connection will be delivered to the same real server that was
112 +- servicing the previous connection. This will effectively
113 +- disable expire_nodest_conn.
114 ++ servicing the previous connection.
115 +
116 + bit 1: enable rescheduling of new connections when it is safe.
117 + That is, whenever expire_nodest_conn and for TCP sockets, when
118 +diff --git a/Makefile b/Makefile
119 +index 455ba411998f9..310cc8508b9e8 100644
120 +--- a/Makefile
121 ++++ b/Makefile
122 +@@ -1,7 +1,7 @@
123 + # SPDX-License-Identifier: GPL-2.0
124 + VERSION = 4
125 + PATCHLEVEL = 19
126 +-SUBLEVEL = 218
127 ++SUBLEVEL = 219
128 + EXTRAVERSION =
129 + NAME = "People's Front"
130 +
131 +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
132 +index fa3422c4caec1..6edc4bd1e7eaf 100644
133 +--- a/arch/arm/boot/dts/bcm5301x.dtsi
134 ++++ b/arch/arm/boot/dts/bcm5301x.dtsi
135 +@@ -239,6 +239,8 @@
136 +
137 + gpio-controller;
138 + #gpio-cells = <2>;
139 ++ interrupt-controller;
140 ++ #interrupt-cells = <2>;
141 + };
142 +
143 + pcie0: pcie@12000 {
144 +@@ -384,7 +386,7 @@
145 + i2c0: i2c@18009000 {
146 + compatible = "brcm,iproc-i2c";
147 + reg = <0x18009000 0x50>;
148 +- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
149 ++ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
150 + #address-cells = <1>;
151 + #size-cells = <0>;
152 + clock-frequency = <100000>;
153 +diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
154 +index f854148c8d7c2..00baa13c158d7 100644
155 +--- a/arch/arm/include/asm/tlb.h
156 ++++ b/arch/arm/include/asm/tlb.h
157 +@@ -280,6 +280,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
158 + tlb_add_flush(tlb, addr);
159 + }
160 +
161 ++static inline void
162 ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
163 ++ unsigned long size)
164 ++{
165 ++ tlb_add_flush(tlb, address);
166 ++ tlb_add_flush(tlb, address + size - PMD_SIZE);
167 ++}
168 ++
169 + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
170 + #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
171 + #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
172 +diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
173 +index 65e1817d8afe6..692a287a8712d 100644
174 +--- a/arch/arm/mach-socfpga/core.h
175 ++++ b/arch/arm/mach-socfpga/core.h
176 +@@ -48,7 +48,7 @@ extern void __iomem *sdr_ctl_base_addr;
177 + u32 socfpga_sdram_self_refresh(u32 sdr_base);
178 + extern unsigned int socfpga_sdram_self_refresh_sz;
179 +
180 +-extern char secondary_trampoline, secondary_trampoline_end;
181 ++extern char secondary_trampoline[], secondary_trampoline_end[];
182 +
183 + extern unsigned long socfpga_cpu1start_addr;
184 +
185 +diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
186 +index 0ee76772b5074..a272999ce04b9 100644
187 +--- a/arch/arm/mach-socfpga/platsmp.c
188 ++++ b/arch/arm/mach-socfpga/platsmp.c
189 +@@ -31,14 +31,14 @@
190 +
191 + static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
192 + {
193 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
194 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
195 +
196 + if (socfpga_cpu1start_addr) {
197 + /* This will put CPU #1 into reset. */
198 + writel(RSTMGR_MPUMODRST_CPU1,
199 + rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
200 +
201 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
202 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
203 +
204 + writel(__pa_symbol(secondary_startup),
205 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
206 +@@ -56,12 +56,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
207 +
208 + static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
209 + {
210 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
211 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
212 +
213 + if (socfpga_cpu1start_addr) {
214 + writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
215 + SOCFPGA_A10_RSTMGR_MODMPURST);
216 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
217 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
218 +
219 + writel(__pa_symbol(secondary_startup),
220 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
221 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
222 +index f2cc00594d64a..3e5789f372069 100644
223 +--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
224 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
225 +@@ -128,6 +128,9 @@
226 +
227 + /* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
228 + &pcie0 {
229 ++ pinctrl-names = "default";
230 ++ pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
231 ++ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
232 + status = "okay";
233 + };
234 +
235 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
236 +index 1a3e6e3b04eba..f360891982434 100644
237 +--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
238 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
239 +@@ -55,6 +55,9 @@
240 +
241 + /* J9 */
242 + &pcie0 {
243 ++ pinctrl-names = "default";
244 ++ pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
245 ++ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
246 + status = "okay";
247 + };
248 +
249 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
250 +index fca78eb334b19..7500be1a11a3c 100644
251 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
252 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
253 +@@ -254,6 +254,15 @@
254 + function = "mii";
255 + };
256 +
257 ++ pcie_reset_pins: pcie-reset-pins {
258 ++ groups = "pcie1";
259 ++ function = "gpio";
260 ++ };
261 ++
262 ++ pcie_clkreq_pins: pcie-clkreq-pins {
263 ++ groups = "pcie1_clkreq";
264 ++ function = "pcie";
265 ++ };
266 + };
267 +
268 + eth0: ethernet@30000 {
269 +diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
270 +index 516355a774bfe..5d032d97c254e 100644
271 +--- a/arch/ia64/include/asm/tlb.h
272 ++++ b/arch/ia64/include/asm/tlb.h
273 +@@ -268,6 +268,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre
274 + tlb->end_addr = address + PAGE_SIZE;
275 + }
276 +
277 ++static inline void
278 ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
279 ++ unsigned long size)
280 ++{
281 ++ if (tlb->start_addr > address)
282 ++ tlb->start_addr = address;
283 ++ if (tlb->end_addr < address + size)
284 ++ tlb->end_addr = address + size;
285 ++}
286 ++
287 + #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
288 +
289 + #define tlb_start_vma(tlb, vma) do { } while (0)
290 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
291 +index e513528be3ad7..8a227a80f6bd5 100644
292 +--- a/arch/mips/Kconfig
293 ++++ b/arch/mips/Kconfig
294 +@@ -2991,7 +2991,7 @@ config HAVE_LATENCYTOP_SUPPORT
295 + config PGTABLE_LEVELS
296 + int
297 + default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
298 +- default 3 if 64BIT && !PAGE_SIZE_64KB
299 ++ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
300 + default 2
301 +
302 + config MIPS_AUTO_PFN_OFFSET
303 +diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
304 +index b31c779cf5817..1df28a8e2f19e 100644
305 +--- a/arch/s390/include/asm/tlb.h
306 ++++ b/arch/s390/include/asm/tlb.h
307 +@@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
308 + return tlb_remove_page(tlb, page);
309 + }
310 +
311 ++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
312 ++ unsigned long address, unsigned long size)
313 ++{
314 ++ /*
315 ++ * the range might exceed the original range that was provided to
316 ++ * tlb_gather_mmu(), so we need to update it despite the fact it is
317 ++ * usually not updated.
318 ++ */
319 ++ if (tlb->start > address)
320 ++ tlb->start = address;
321 ++ if (tlb->end < address + size)
322 ++ tlb->end = address + size;
323 ++}
324 ++
325 + /*
326 + * pte_free_tlb frees a pte table and clears the CRSTE for the
327 + * page table from the tlb.
328 +@@ -177,6 +191,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
329 + #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
330 + #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
331 + #define tlb_migrate_finish(mm) do { } while (0)
332 ++#define tlb_flush_pmd_range(tlb, addr, sz) do { } while (0)
333 ++
334 + #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
335 + tlb_remove_tlb_entry(tlb, ptep, address)
336 +
337 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
338 +index ae894ac83fd61..4354ac6077503 100644
339 +--- a/arch/s390/mm/pgtable.c
340 ++++ b/arch/s390/mm/pgtable.c
341 +@@ -970,6 +970,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
342 + int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
343 + unsigned long *oldpte, unsigned long *oldpgste)
344 + {
345 ++ struct vm_area_struct *vma;
346 + unsigned long pgstev;
347 + spinlock_t *ptl;
348 + pgste_t pgste;
349 +@@ -979,6 +980,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
350 + WARN_ON_ONCE(orc > ESSA_MAX);
351 + if (unlikely(orc > ESSA_MAX))
352 + return -EINVAL;
353 ++
354 ++ vma = find_vma(mm, hva);
355 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
356 ++ return -EFAULT;
357 + ptep = get_locked_pte(mm, hva, &ptl);
358 + if (unlikely(!ptep))
359 + return -EFAULT;
360 +@@ -1071,10 +1076,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
361 + int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
362 + unsigned long bits, unsigned long value)
363 + {
364 ++ struct vm_area_struct *vma;
365 + spinlock_t *ptl;
366 + pgste_t new;
367 + pte_t *ptep;
368 +
369 ++ vma = find_vma(mm, hva);
370 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
371 ++ return -EFAULT;
372 + ptep = get_locked_pte(mm, hva, &ptl);
373 + if (unlikely(!ptep))
374 + return -EFAULT;
375 +@@ -1099,9 +1108,13 @@ EXPORT_SYMBOL(set_pgste_bits);
376 + */
377 + int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
378 + {
379 ++ struct vm_area_struct *vma;
380 + spinlock_t *ptl;
381 + pte_t *ptep;
382 +
383 ++ vma = find_vma(mm, hva);
384 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
385 ++ return -EFAULT;
386 + ptep = get_locked_pte(mm, hva, &ptl);
387 + if (unlikely(!ptep))
388 + return -EFAULT;
389 +diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
390 +index 77abe192fb43d..adcb0bfe238e3 100644
391 +--- a/arch/sh/include/asm/tlb.h
392 ++++ b/arch/sh/include/asm/tlb.h
393 +@@ -127,6 +127,16 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
394 + return tlb_remove_page(tlb, page);
395 + }
396 +
397 ++static inline void
398 ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
399 ++ unsigned long size)
400 ++{
401 ++ if (tlb->start > address)
402 ++ tlb->start = address;
403 ++ if (tlb->end < address + size)
404 ++ tlb->end = address + size;
405 ++}
406 ++
407 + #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
408 + static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
409 + unsigned int page_size)
410 +diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
411 +index dce6db147f245..02e61f6abfcab 100644
412 +--- a/arch/um/include/asm/tlb.h
413 ++++ b/arch/um/include/asm/tlb.h
414 +@@ -130,6 +130,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
415 + return tlb_remove_page(tlb, page);
416 + }
417 +
418 ++static inline void
419 ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
420 ++ unsigned long size)
421 ++{
422 ++ tlb->need_flush = 1;
423 ++
424 ++ if (tlb->start > address)
425 ++ tlb->start = address;
426 ++ if (tlb->end < address + size)
427 ++ tlb->end = address + size;
428 ++}
429 ++
430 + /**
431 + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
432 + *
433 +diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
434 +index 7111280c88422..2d3e0cca9ba0f 100644
435 +--- a/arch/xtensa/include/asm/vectors.h
436 ++++ b/arch/xtensa/include/asm/vectors.h
437 +@@ -31,7 +31,7 @@
438 + #endif
439 + #define XCHAL_KIO_SIZE 0x10000000
440 +
441 +-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
442 ++#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
443 + #define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
444 + #ifndef __ASSEMBLY__
445 + extern unsigned long xtensa_kio_paddr;
446 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
447 +index 6a0167ac803c6..901990b8296c1 100644
448 +--- a/arch/xtensa/kernel/setup.c
449 ++++ b/arch/xtensa/kernel/setup.c
450 +@@ -65,7 +65,7 @@ int initrd_is_mapped = 0;
451 + extern int initrd_below_start_ok;
452 + #endif
453 +
454 +-#ifdef CONFIG_OF
455 ++#ifdef CONFIG_USE_OF
456 + void *dtb_start = __dtb_start;
457 + #endif
458 +
459 +@@ -127,7 +127,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
460 +
461 + #endif /* CONFIG_BLK_DEV_INITRD */
462 +
463 +-#ifdef CONFIG_OF
464 ++#ifdef CONFIG_USE_OF
465 +
466 + static int __init parse_tag_fdt(const bp_tag_t *tag)
467 + {
468 +@@ -137,7 +137,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
469 +
470 + __tagtable(BP_TAG_FDT, parse_tag_fdt);
471 +
472 +-#endif /* CONFIG_OF */
473 ++#endif /* CONFIG_USE_OF */
474 +
475 + static int __init parse_tag_cmdline(const bp_tag_t* tag)
476 + {
477 +@@ -185,7 +185,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
478 + }
479 + #endif
480 +
481 +-#ifdef CONFIG_OF
482 ++#ifdef CONFIG_USE_OF
483 +
484 + #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
485 + unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
486 +@@ -234,7 +234,7 @@ void __init early_init_devtree(void *params)
487 + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
488 + }
489 +
490 +-#endif /* CONFIG_OF */
491 ++#endif /* CONFIG_USE_OF */
492 +
493 + /*
494 + * Initialize architecture. (Early stage)
495 +@@ -255,7 +255,7 @@ void __init init_arch(bp_tag_t *bp_start)
496 + if (bp_start)
497 + parse_bootparam(bp_start);
498 +
499 +-#ifdef CONFIG_OF
500 ++#ifdef CONFIG_USE_OF
501 + early_init_devtree(dtb_start);
502 + #endif
503 +
504 +diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
505 +index 9d1ecfc536708..470843188f2fc 100644
506 +--- a/arch/xtensa/mm/mmu.c
507 ++++ b/arch/xtensa/mm/mmu.c
508 +@@ -98,7 +98,7 @@ void init_mmu(void)
509 +
510 + void init_kio(void)
511 + {
512 +-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
513 ++#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
514 + /*
515 + * Update the IO area mapping in case xtensa_kio_paddr has changed
516 + */
517 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
518 +index 83be89c8627b9..9229c5c9ad473 100644
519 +--- a/drivers/android/binder.c
520 ++++ b/drivers/android/binder.c
521 +@@ -2966,7 +2966,7 @@ static void binder_transaction(struct binder_proc *proc,
522 + t->from = thread;
523 + else
524 + t->from = NULL;
525 +- t->sender_euid = proc->cred->euid;
526 ++ t->sender_euid = task_euid(proc->tsk);
527 + t->to_proc = target_proc;
528 + t->to_thread = target_thread;
529 + t->code = tr->code;
530 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
531 +index 1b06c8e46ffa4..bd756b294d307 100644
532 +--- a/drivers/block/xen-blkfront.c
533 ++++ b/drivers/block/xen-blkfront.c
534 +@@ -80,6 +80,7 @@ enum blkif_state {
535 + BLKIF_STATE_DISCONNECTED,
536 + BLKIF_STATE_CONNECTED,
537 + BLKIF_STATE_SUSPENDED,
538 ++ BLKIF_STATE_ERROR,
539 + };
540 +
541 + struct grant {
542 +@@ -89,6 +90,7 @@ struct grant {
543 + };
544 +
545 + enum blk_req_status {
546 ++ REQ_PROCESSING,
547 + REQ_WAITING,
548 + REQ_DONE,
549 + REQ_ERROR,
550 +@@ -533,10 +535,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
551 +
552 + id = get_id_from_freelist(rinfo);
553 + rinfo->shadow[id].request = req;
554 +- rinfo->shadow[id].status = REQ_WAITING;
555 ++ rinfo->shadow[id].status = REQ_PROCESSING;
556 + rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
557 +
558 +- (*ring_req)->u.rw.id = id;
559 ++ rinfo->shadow[id].req.u.rw.id = id;
560 +
561 + return id;
562 + }
563 +@@ -544,11 +546,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
564 + static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
565 + {
566 + struct blkfront_info *info = rinfo->dev_info;
567 +- struct blkif_request *ring_req;
568 ++ struct blkif_request *ring_req, *final_ring_req;
569 + unsigned long id;
570 +
571 + /* Fill out a communications ring structure. */
572 +- id = blkif_ring_get_request(rinfo, req, &ring_req);
573 ++ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
574 ++ ring_req = &rinfo->shadow[id].req;
575 +
576 + ring_req->operation = BLKIF_OP_DISCARD;
577 + ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
578 +@@ -559,8 +562,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
579 + else
580 + ring_req->u.discard.flag = 0;
581 +
582 +- /* Keep a private copy so we can reissue requests when recovering. */
583 +- rinfo->shadow[id].req = *ring_req;
584 ++ /* Copy the request to the ring page. */
585 ++ *final_ring_req = *ring_req;
586 ++ rinfo->shadow[id].status = REQ_WAITING;
587 +
588 + return 0;
589 + }
590 +@@ -693,6 +697,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
591 + {
592 + struct blkfront_info *info = rinfo->dev_info;
593 + struct blkif_request *ring_req, *extra_ring_req = NULL;
594 ++ struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
595 + unsigned long id, extra_id = NO_ASSOCIATED_ID;
596 + bool require_extra_req = false;
597 + int i;
598 +@@ -737,7 +742,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
599 + }
600 +
601 + /* Fill out a communications ring structure. */
602 +- id = blkif_ring_get_request(rinfo, req, &ring_req);
603 ++ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
604 ++ ring_req = &rinfo->shadow[id].req;
605 +
606 + num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
607 + num_grant = 0;
608 +@@ -788,7 +794,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
609 + ring_req->u.rw.nr_segments = num_grant;
610 + if (unlikely(require_extra_req)) {
611 + extra_id = blkif_ring_get_request(rinfo, req,
612 +- &extra_ring_req);
613 ++ &final_extra_ring_req);
614 ++ extra_ring_req = &rinfo->shadow[extra_id].req;
615 ++
616 + /*
617 + * Only the first request contains the scatter-gather
618 + * list.
619 +@@ -830,10 +838,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
620 + if (setup.segments)
621 + kunmap_atomic(setup.segments);
622 +
623 +- /* Keep a private copy so we can reissue requests when recovering. */
624 +- rinfo->shadow[id].req = *ring_req;
625 +- if (unlikely(require_extra_req))
626 +- rinfo->shadow[extra_id].req = *extra_ring_req;
627 ++ /* Copy request(s) to the ring page. */
628 ++ *final_ring_req = *ring_req;
629 ++ rinfo->shadow[id].status = REQ_WAITING;
630 ++ if (unlikely(require_extra_req)) {
631 ++ *final_extra_ring_req = *extra_ring_req;
632 ++ rinfo->shadow[extra_id].status = REQ_WAITING;
633 ++ }
634 +
635 + if (new_persistent_gnts)
636 + gnttab_free_grant_references(setup.gref_head);
637 +@@ -1407,8 +1418,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
638 + static int blkif_get_final_status(enum blk_req_status s1,
639 + enum blk_req_status s2)
640 + {
641 +- BUG_ON(s1 == REQ_WAITING);
642 +- BUG_ON(s2 == REQ_WAITING);
643 ++ BUG_ON(s1 < REQ_DONE);
644 ++ BUG_ON(s2 < REQ_DONE);
645 +
646 + if (s1 == REQ_ERROR || s2 == REQ_ERROR)
647 + return BLKIF_RSP_ERROR;
648 +@@ -1441,7 +1452,7 @@ static bool blkif_completion(unsigned long *id,
649 + s->status = blkif_rsp_to_req_status(bret->status);
650 +
651 + /* Wait the second response if not yet here. */
652 +- if (s2->status == REQ_WAITING)
653 ++ if (s2->status < REQ_DONE)
654 + return false;
655 +
656 + bret->status = blkif_get_final_status(s->status,
657 +@@ -1549,7 +1560,7 @@ static bool blkif_completion(unsigned long *id,
658 + static irqreturn_t blkif_interrupt(int irq, void *dev_id)
659 + {
660 + struct request *req;
661 +- struct blkif_response *bret;
662 ++ struct blkif_response bret;
663 + RING_IDX i, rp;
664 + unsigned long flags;
665 + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
666 +@@ -1560,54 +1571,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
667 +
668 + spin_lock_irqsave(&rinfo->ring_lock, flags);
669 + again:
670 +- rp = rinfo->ring.sring->rsp_prod;
671 +- rmb(); /* Ensure we see queued responses up to 'rp'. */
672 ++ rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
673 ++ virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
674 ++ if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
675 ++ pr_alert("%s: illegal number of responses %u\n",
676 ++ info->gd->disk_name, rp - rinfo->ring.rsp_cons);
677 ++ goto err;
678 ++ }
679 +
680 + for (i = rinfo->ring.rsp_cons; i != rp; i++) {
681 + unsigned long id;
682 ++ unsigned int op;
683 ++
684 ++ RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
685 ++ id = bret.id;
686 +
687 +- bret = RING_GET_RESPONSE(&rinfo->ring, i);
688 +- id = bret->id;
689 + /*
690 + * The backend has messed up and given us an id that we would
691 + * never have given to it (we stamp it up to BLK_RING_SIZE -
692 + * look in get_id_from_freelist.
693 + */
694 + if (id >= BLK_RING_SIZE(info)) {
695 +- WARN(1, "%s: response to %s has incorrect id (%ld)\n",
696 +- info->gd->disk_name, op_name(bret->operation), id);
697 +- /* We can't safely get the 'struct request' as
698 +- * the id is busted. */
699 +- continue;
700 ++ pr_alert("%s: response has incorrect id (%ld)\n",
701 ++ info->gd->disk_name, id);
702 ++ goto err;
703 + }
704 ++ if (rinfo->shadow[id].status != REQ_WAITING) {
705 ++ pr_alert("%s: response references no pending request\n",
706 ++ info->gd->disk_name);
707 ++ goto err;
708 ++ }
709 ++
710 ++ rinfo->shadow[id].status = REQ_PROCESSING;
711 + req = rinfo->shadow[id].request;
712 +
713 +- if (bret->operation != BLKIF_OP_DISCARD) {
714 ++ op = rinfo->shadow[id].req.operation;
715 ++ if (op == BLKIF_OP_INDIRECT)
716 ++ op = rinfo->shadow[id].req.u.indirect.indirect_op;
717 ++ if (bret.operation != op) {
718 ++ pr_alert("%s: response has wrong operation (%u instead of %u)\n",
719 ++ info->gd->disk_name, bret.operation, op);
720 ++ goto err;
721 ++ }
722 ++
723 ++ if (bret.operation != BLKIF_OP_DISCARD) {
724 + /*
725 + * We may need to wait for an extra response if the
726 + * I/O request is split in 2
727 + */
728 +- if (!blkif_completion(&id, rinfo, bret))
729 ++ if (!blkif_completion(&id, rinfo, &bret))
730 + continue;
731 + }
732 +
733 + if (add_id_to_freelist(rinfo, id)) {
734 + WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
735 +- info->gd->disk_name, op_name(bret->operation), id);
736 ++ info->gd->disk_name, op_name(bret.operation), id);
737 + continue;
738 + }
739 +
740 +- if (bret->status == BLKIF_RSP_OKAY)
741 ++ if (bret.status == BLKIF_RSP_OKAY)
742 + blkif_req(req)->error = BLK_STS_OK;
743 + else
744 + blkif_req(req)->error = BLK_STS_IOERR;
745 +
746 +- switch (bret->operation) {
747 ++ switch (bret.operation) {
748 + case BLKIF_OP_DISCARD:
749 +- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
750 ++ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
751 + struct request_queue *rq = info->rq;
752 +- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
753 +- info->gd->disk_name, op_name(bret->operation));
754 ++
755 ++ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
756 ++ info->gd->disk_name, op_name(bret.operation));
757 + blkif_req(req)->error = BLK_STS_NOTSUPP;
758 + info->feature_discard = 0;
759 + info->feature_secdiscard = 0;
760 +@@ -1617,15 +1650,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
761 + break;
762 + case BLKIF_OP_FLUSH_DISKCACHE:
763 + case BLKIF_OP_WRITE_BARRIER:
764 +- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
765 +- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
766 +- info->gd->disk_name, op_name(bret->operation));
767 ++ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
768 ++ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
769 ++ info->gd->disk_name, op_name(bret.operation));
770 + blkif_req(req)->error = BLK_STS_NOTSUPP;
771 + }
772 +- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
773 ++ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
774 + rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
775 +- printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
776 +- info->gd->disk_name, op_name(bret->operation));
777 ++ pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
778 ++ info->gd->disk_name, op_name(bret.operation));
779 + blkif_req(req)->error = BLK_STS_NOTSUPP;
780 + }
781 + if (unlikely(blkif_req(req)->error)) {
782 +@@ -1638,9 +1671,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
783 + /* fall through */
784 + case BLKIF_OP_READ:
785 + case BLKIF_OP_WRITE:
786 +- if (unlikely(bret->status != BLKIF_RSP_OKAY))
787 +- dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
788 +- "request: %x\n", bret->status);
789 ++ if (unlikely(bret.status != BLKIF_RSP_OKAY))
790 ++ dev_dbg_ratelimited(&info->xbdev->dev,
791 ++ "Bad return from blkdev data request: %#x\n",
792 ++ bret.status);
793 +
794 + break;
795 + default:
796 +@@ -1665,6 +1699,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
797 + spin_unlock_irqrestore(&rinfo->ring_lock, flags);
798 +
799 + return IRQ_HANDLED;
800 ++
801 ++ err:
802 ++ info->connected = BLKIF_STATE_ERROR;
803 ++
804 ++ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
805 ++
806 ++ pr_alert("%s disabled for further use\n", info->gd->disk_name);
807 ++ return IRQ_HANDLED;
808 + }
809 +
810 +
811 +diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
812 +index 041f8152272bf..177874adccf0d 100644
813 +--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
814 ++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
815 +@@ -106,9 +106,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
816 + scmi_pd_data->domains = domains;
817 + scmi_pd_data->num_domains = num_domains;
818 +
819 +- of_genpd_add_provider_onecell(np, scmi_pd_data);
820 +-
821 +- return 0;
822 ++ return of_genpd_add_provider_onecell(np, scmi_pd_data);
823 + }
824 +
825 + static const struct scmi_device_id scmi_id_table[] = {
826 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
827 +index 8dcce7182bb7c..1e28ff9815997 100644
828 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
829 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
830 +@@ -417,7 +417,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
831 +
832 + bo = kzalloc(sizeof(*bo), GFP_KERNEL);
833 + if (!bo)
834 +- return ERR_PTR(-ENOMEM);
835 ++ return NULL;
836 +
837 + bo->madv = VC4_MADV_WILLNEED;
838 + refcount_set(&bo->usecnt, 0);
839 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
840 +index 10524c93f8b62..f22f59df02601 100644
841 +--- a/drivers/hid/wacom_wac.c
842 ++++ b/drivers/hid/wacom_wac.c
843 +@@ -2538,6 +2538,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
844 + struct wacom_features *features = &wacom->wacom_wac.features;
845 +
846 + switch (equivalent_usage) {
847 ++ case HID_DG_CONFIDENCE:
848 ++ wacom_wac->hid_data.confidence = value;
849 ++ break;
850 + case HID_GD_X:
851 + wacom_wac->hid_data.x = value;
852 + break;
853 +@@ -2568,7 +2571,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
854 +
855 +
856 + if (usage->usage_index + 1 == field->report_count) {
857 +- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
858 ++ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
859 ++ wacom_wac->hid_data.confidence)
860 + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
861 + }
862 + }
863 +@@ -2581,6 +2585,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
864 + struct hid_data* hid_data = &wacom_wac->hid_data;
865 + int i;
866 +
867 ++ hid_data->confidence = true;
868 ++
869 + for (i = 0; i < report->maxfield; i++) {
870 + struct hid_field *field = report->field[i];
871 + int j;
872 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
873 +index 0abed1e5b5260..48ce2b0a4549e 100644
874 +--- a/drivers/hid/wacom_wac.h
875 ++++ b/drivers/hid/wacom_wac.h
876 +@@ -302,6 +302,7 @@ struct hid_data {
877 + bool tipswitch;
878 + bool barrelswitch;
879 + bool barrelswitch2;
880 ++ bool confidence;
881 + int x;
882 + int y;
883 + int pressure;
884 +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
885 +index 60b20ae02b055..5eeadab15a5f5 100644
886 +--- a/drivers/media/cec/cec-adap.c
887 ++++ b/drivers/media/cec/cec-adap.c
888 +@@ -1146,6 +1146,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
889 + if (abort)
890 + dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
891 + msg->flags = dst->flags;
892 ++ msg->sequence = dst->sequence;
893 + /* Remove it from the wait_queue */
894 + list_del_init(&data->list);
895 +
896 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
897 +index fd5375b5991bb..a257bf635bc24 100644
898 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
899 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
900 +@@ -451,9 +451,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
901 + roundup_size = ilog2(roundup_size);
902 +
903 + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
904 +- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
905 ++ tc_valid[i] = 1;
906 + tc_size[i] = roundup_size;
907 +- tc_offset[i] = rss_size * i;
908 ++ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
909 + }
910 +
911 + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
912 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
913 +index a1c828ffac8b7..434a009c52d90 100644
914 +--- a/drivers/net/xen-netfront.c
915 ++++ b/drivers/net/xen-netfront.c
916 +@@ -121,21 +121,17 @@ struct netfront_queue {
917 +
918 + /*
919 + * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
920 +- * are linked from tx_skb_freelist through skb_entry.link.
921 +- *
922 +- * NB. Freelist index entries are always going to be less than
923 +- * PAGE_OFFSET, whereas pointers to skbs will always be equal or
924 +- * greater than PAGE_OFFSET: we use this property to distinguish
925 +- * them.
926 ++ * are linked from tx_skb_freelist through tx_link.
927 + */
928 +- union skb_entry {
929 +- struct sk_buff *skb;
930 +- unsigned long link;
931 +- } tx_skbs[NET_TX_RING_SIZE];
932 ++ struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
933 ++ unsigned short tx_link[NET_TX_RING_SIZE];
934 ++#define TX_LINK_NONE 0xffff
935 ++#define TX_PENDING 0xfffe
936 + grant_ref_t gref_tx_head;
937 + grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
938 + struct page *grant_tx_page[NET_TX_RING_SIZE];
939 + unsigned tx_skb_freelist;
940 ++ unsigned int tx_pend_queue;
941 +
942 + spinlock_t rx_lock ____cacheline_aligned_in_smp;
943 + struct xen_netif_rx_front_ring rx;
944 +@@ -161,6 +157,9 @@ struct netfront_info {
945 + struct netfront_stats __percpu *rx_stats;
946 + struct netfront_stats __percpu *tx_stats;
947 +
948 ++ /* Is device behaving sane? */
949 ++ bool broken;
950 ++
951 + atomic_t rx_gso_checksum_fixup;
952 + };
953 +
954 +@@ -169,33 +168,25 @@ struct netfront_rx_info {
955 + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
956 + };
957 +
958 +-static void skb_entry_set_link(union skb_entry *list, unsigned short id)
959 +-{
960 +- list->link = id;
961 +-}
962 +-
963 +-static int skb_entry_is_link(const union skb_entry *list)
964 +-{
965 +- BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
966 +- return (unsigned long)list->skb < PAGE_OFFSET;
967 +-}
968 +-
969 + /*
970 + * Access macros for acquiring freeing slots in tx_skbs[].
971 + */
972 +
973 +-static void add_id_to_freelist(unsigned *head, union skb_entry *list,
974 +- unsigned short id)
975 ++static void add_id_to_list(unsigned *head, unsigned short *list,
976 ++ unsigned short id)
977 + {
978 +- skb_entry_set_link(&list[id], *head);
979 ++ list[id] = *head;
980 + *head = id;
981 + }
982 +
983 +-static unsigned short get_id_from_freelist(unsigned *head,
984 +- union skb_entry *list)
985 ++static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
986 + {
987 + unsigned int id = *head;
988 +- *head = list[id].link;
989 ++
990 ++ if (id != TX_LINK_NONE) {
991 ++ *head = list[id];
992 ++ list[id] = TX_LINK_NONE;
993 ++ }
994 + return id;
995 + }
996 +
997 +@@ -353,7 +344,7 @@ static int xennet_open(struct net_device *dev)
998 + unsigned int i = 0;
999 + struct netfront_queue *queue = NULL;
1000 +
1001 +- if (!np->queues)
1002 ++ if (!np->queues || np->broken)
1003 + return -ENODEV;
1004 +
1005 + for (i = 0; i < num_queues; ++i) {
1006 +@@ -381,27 +372,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1007 + unsigned short id;
1008 + struct sk_buff *skb;
1009 + bool more_to_do;
1010 ++ const struct device *dev = &queue->info->netdev->dev;
1011 +
1012 + BUG_ON(!netif_carrier_ok(queue->info->netdev));
1013 +
1014 + do {
1015 + prod = queue->tx.sring->rsp_prod;
1016 ++ if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
1017 ++ dev_alert(dev, "Illegal number of responses %u\n",
1018 ++ prod - queue->tx.rsp_cons);
1019 ++ goto err;
1020 ++ }
1021 + rmb(); /* Ensure we see responses up to 'rp'. */
1022 +
1023 + for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
1024 +- struct xen_netif_tx_response *txrsp;
1025 ++ struct xen_netif_tx_response txrsp;
1026 +
1027 +- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
1028 +- if (txrsp->status == XEN_NETIF_RSP_NULL)
1029 ++ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
1030 ++ if (txrsp.status == XEN_NETIF_RSP_NULL)
1031 + continue;
1032 +
1033 +- id = txrsp->id;
1034 +- skb = queue->tx_skbs[id].skb;
1035 ++ id = txrsp.id;
1036 ++ if (id >= RING_SIZE(&queue->tx)) {
1037 ++ dev_alert(dev,
1038 ++ "Response has incorrect id (%u)\n",
1039 ++ id);
1040 ++ goto err;
1041 ++ }
1042 ++ if (queue->tx_link[id] != TX_PENDING) {
1043 ++ dev_alert(dev,
1044 ++ "Response for inactive request\n");
1045 ++ goto err;
1046 ++ }
1047 ++
1048 ++ queue->tx_link[id] = TX_LINK_NONE;
1049 ++ skb = queue->tx_skbs[id];
1050 ++ queue->tx_skbs[id] = NULL;
1051 + if (unlikely(gnttab_query_foreign_access(
1052 + queue->grant_tx_ref[id]) != 0)) {
1053 +- pr_alert("%s: warning -- grant still in use by backend domain\n",
1054 +- __func__);
1055 +- BUG();
1056 ++ dev_alert(dev,
1057 ++ "Grant still in use by backend domain\n");
1058 ++ goto err;
1059 + }
1060 + gnttab_end_foreign_access_ref(
1061 + queue->grant_tx_ref[id], GNTMAP_readonly);
1062 +@@ -409,7 +420,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1063 + &queue->gref_tx_head, queue->grant_tx_ref[id]);
1064 + queue->grant_tx_ref[id] = GRANT_INVALID_REF;
1065 + queue->grant_tx_page[id] = NULL;
1066 +- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
1067 ++ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
1068 + dev_kfree_skb_irq(skb);
1069 + }
1070 +
1071 +@@ -419,13 +430,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1072 + } while (more_to_do);
1073 +
1074 + xennet_maybe_wake_tx(queue);
1075 ++
1076 ++ return;
1077 ++
1078 ++ err:
1079 ++ queue->info->broken = true;
1080 ++ dev_alert(dev, "Disabled for further use\n");
1081 + }
1082 +
1083 + struct xennet_gnttab_make_txreq {
1084 + struct netfront_queue *queue;
1085 + struct sk_buff *skb;
1086 + struct page *page;
1087 +- struct xen_netif_tx_request *tx; /* Last request */
1088 ++ struct xen_netif_tx_request *tx; /* Last request on ring page */
1089 ++ struct xen_netif_tx_request tx_local; /* Last request local copy*/
1090 + unsigned int size;
1091 + };
1092 +
1093 +@@ -441,7 +459,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1094 + struct netfront_queue *queue = info->queue;
1095 + struct sk_buff *skb = info->skb;
1096 +
1097 +- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
1098 ++ id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
1099 + tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1100 + ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
1101 + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
1102 +@@ -449,34 +467,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1103 + gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
1104 + gfn, GNTMAP_readonly);
1105 +
1106 +- queue->tx_skbs[id].skb = skb;
1107 ++ queue->tx_skbs[id] = skb;
1108 + queue->grant_tx_page[id] = page;
1109 + queue->grant_tx_ref[id] = ref;
1110 +
1111 +- tx->id = id;
1112 +- tx->gref = ref;
1113 +- tx->offset = offset;
1114 +- tx->size = len;
1115 +- tx->flags = 0;
1116 ++ info->tx_local.id = id;
1117 ++ info->tx_local.gref = ref;
1118 ++ info->tx_local.offset = offset;
1119 ++ info->tx_local.size = len;
1120 ++ info->tx_local.flags = 0;
1121 ++
1122 ++ *tx = info->tx_local;
1123 ++
1124 ++ /*
1125 ++ * Put the request in the pending queue, it will be set to be pending
1126 ++ * when the producer index is about to be raised.
1127 ++ */
1128 ++ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
1129 +
1130 + info->tx = tx;
1131 +- info->size += tx->size;
1132 ++ info->size += info->tx_local.size;
1133 + }
1134 +
1135 + static struct xen_netif_tx_request *xennet_make_first_txreq(
1136 +- struct netfront_queue *queue, struct sk_buff *skb,
1137 +- struct page *page, unsigned int offset, unsigned int len)
1138 ++ struct xennet_gnttab_make_txreq *info,
1139 ++ unsigned int offset, unsigned int len)
1140 + {
1141 +- struct xennet_gnttab_make_txreq info = {
1142 +- .queue = queue,
1143 +- .skb = skb,
1144 +- .page = page,
1145 +- .size = 0,
1146 +- };
1147 ++ info->size = 0;
1148 +
1149 +- gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
1150 ++ gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
1151 +
1152 +- return info.tx;
1153 ++ return info->tx;
1154 + }
1155 +
1156 + static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1157 +@@ -489,35 +510,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1158 + xennet_tx_setup_grant(gfn, offset, len, data);
1159 + }
1160 +
1161 +-static struct xen_netif_tx_request *xennet_make_txreqs(
1162 +- struct netfront_queue *queue, struct xen_netif_tx_request *tx,
1163 +- struct sk_buff *skb, struct page *page,
1164 ++static void xennet_make_txreqs(
1165 ++ struct xennet_gnttab_make_txreq *info,
1166 ++ struct page *page,
1167 + unsigned int offset, unsigned int len)
1168 + {
1169 +- struct xennet_gnttab_make_txreq info = {
1170 +- .queue = queue,
1171 +- .skb = skb,
1172 +- .tx = tx,
1173 +- };
1174 +-
1175 + /* Skip unused frames from start of page */
1176 + page += offset >> PAGE_SHIFT;
1177 + offset &= ~PAGE_MASK;
1178 +
1179 + while (len) {
1180 +- info.page = page;
1181 +- info.size = 0;
1182 ++ info->page = page;
1183 ++ info->size = 0;
1184 +
1185 + gnttab_foreach_grant_in_range(page, offset, len,
1186 + xennet_make_one_txreq,
1187 +- &info);
1188 ++ info);
1189 +
1190 + page++;
1191 + offset = 0;
1192 +- len -= info.size;
1193 ++ len -= info->size;
1194 + }
1195 +-
1196 +- return info.tx;
1197 + }
1198 +
1199 + /*
1200 +@@ -565,13 +578,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
1201 + return queue_idx;
1202 + }
1203 +
1204 ++static void xennet_mark_tx_pending(struct netfront_queue *queue)
1205 ++{
1206 ++ unsigned int i;
1207 ++
1208 ++ while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
1209 ++ TX_LINK_NONE)
1210 ++ queue->tx_link[i] = TX_PENDING;
1211 ++}
1212 ++
1213 + #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
1214 +
1215 + static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1216 + {
1217 + struct netfront_info *np = netdev_priv(dev);
1218 + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
1219 +- struct xen_netif_tx_request *tx, *first_tx;
1220 ++ struct xen_netif_tx_request *first_tx;
1221 + unsigned int i;
1222 + int notify;
1223 + int slots;
1224 +@@ -580,6 +602,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1225 + unsigned int len;
1226 + unsigned long flags;
1227 + struct netfront_queue *queue = NULL;
1228 ++ struct xennet_gnttab_make_txreq info = { };
1229 + unsigned int num_queues = dev->real_num_tx_queues;
1230 + u16 queue_index;
1231 + struct sk_buff *nskb;
1232 +@@ -587,6 +610,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1233 + /* Drop the packet if no queues are set up */
1234 + if (num_queues < 1)
1235 + goto drop;
1236 ++ if (unlikely(np->broken))
1237 ++ goto drop;
1238 + /* Determine which queue to transmit this SKB on */
1239 + queue_index = skb_get_queue_mapping(skb);
1240 + queue = &np->queues[queue_index];
1241 +@@ -637,21 +662,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1242 + }
1243 +
1244 + /* First request for the linear area. */
1245 +- first_tx = tx = xennet_make_first_txreq(queue, skb,
1246 +- page, offset, len);
1247 +- offset += tx->size;
1248 ++ info.queue = queue;
1249 ++ info.skb = skb;
1250 ++ info.page = page;
1251 ++ first_tx = xennet_make_first_txreq(&info, offset, len);
1252 ++ offset += info.tx_local.size;
1253 + if (offset == PAGE_SIZE) {
1254 + page++;
1255 + offset = 0;
1256 + }
1257 +- len -= tx->size;
1258 ++ len -= info.tx_local.size;
1259 +
1260 + if (skb->ip_summed == CHECKSUM_PARTIAL)
1261 + /* local packet? */
1262 +- tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
1263 ++ first_tx->flags |= XEN_NETTXF_csum_blank |
1264 ++ XEN_NETTXF_data_validated;
1265 + else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1266 + /* remote but checksummed. */
1267 +- tx->flags |= XEN_NETTXF_data_validated;
1268 ++ first_tx->flags |= XEN_NETTXF_data_validated;
1269 +
1270 + /* Optional extra info after the first request. */
1271 + if (skb_shinfo(skb)->gso_size) {
1272 +@@ -660,7 +688,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1273 + gso = (struct xen_netif_extra_info *)
1274 + RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1275 +
1276 +- tx->flags |= XEN_NETTXF_extra_info;
1277 ++ first_tx->flags |= XEN_NETTXF_extra_info;
1278 +
1279 + gso->u.gso.size = skb_shinfo(skb)->gso_size;
1280 + gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
1281 +@@ -674,19 +702,21 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1282 + }
1283 +
1284 + /* Requests for the rest of the linear area. */
1285 +- tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
1286 ++ xennet_make_txreqs(&info, page, offset, len);
1287 +
1288 + /* Requests for all the frags. */
1289 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1290 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1291 +- tx = xennet_make_txreqs(queue, tx, skb,
1292 +- skb_frag_page(frag), frag->page_offset,
1293 ++ xennet_make_txreqs(&info, skb_frag_page(frag),
1294 ++ frag->page_offset,
1295 + skb_frag_size(frag));
1296 + }
1297 +
1298 + /* First request has the packet length. */
1299 + first_tx->size = skb->len;
1300 +
1301 ++ xennet_mark_tx_pending(queue);
1302 ++
1303 + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
1304 + if (notify)
1305 + notify_remote_via_irq(queue->tx_irq);
1306 +@@ -744,7 +774,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
1307 + RING_IDX rp)
1308 +
1309 + {
1310 +- struct xen_netif_extra_info *extra;
1311 ++ struct xen_netif_extra_info extra;
1312 + struct device *dev = &queue->info->netdev->dev;
1313 + RING_IDX cons = queue->rx.rsp_cons;
1314 + int err = 0;
1315 +@@ -760,24 +790,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
1316 + break;
1317 + }
1318 +
1319 +- extra = (struct xen_netif_extra_info *)
1320 +- RING_GET_RESPONSE(&queue->rx, ++cons);
1321 ++ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
1322 +
1323 +- if (unlikely(!extra->type ||
1324 +- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1325 ++ if (unlikely(!extra.type ||
1326 ++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1327 + if (net_ratelimit())
1328 + dev_warn(dev, "Invalid extra type: %d\n",
1329 +- extra->type);
1330 ++ extra.type);
1331 + err = -EINVAL;
1332 + } else {
1333 +- memcpy(&extras[extra->type - 1], extra,
1334 +- sizeof(*extra));
1335 ++ extras[extra.type - 1] = extra;
1336 + }
1337 +
1338 + skb = xennet_get_rx_skb(queue, cons);
1339 + ref = xennet_get_rx_ref(queue, cons);
1340 + xennet_move_rx_slot(queue, skb, ref);
1341 +- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1342 ++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1343 +
1344 + queue->rx.rsp_cons = cons;
1345 + return err;
1346 +@@ -787,7 +815,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
1347 + struct netfront_rx_info *rinfo, RING_IDX rp,
1348 + struct sk_buff_head *list)
1349 + {
1350 +- struct xen_netif_rx_response *rx = &rinfo->rx;
1351 ++ struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1352 + struct xen_netif_extra_info *extras = rinfo->extras;
1353 + struct device *dev = &queue->info->netdev->dev;
1354 + RING_IDX cons = queue->rx.rsp_cons;
1355 +@@ -845,7 +873,8 @@ next:
1356 + break;
1357 + }
1358 +
1359 +- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
1360 ++ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1361 ++ rx = &rx_local;
1362 + skb = xennet_get_rx_skb(queue, cons + slots);
1363 + ref = xennet_get_rx_ref(queue, cons + slots);
1364 + slots++;
1365 +@@ -900,10 +929,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1366 + struct sk_buff *nskb;
1367 +
1368 + while ((nskb = __skb_dequeue(list))) {
1369 +- struct xen_netif_rx_response *rx =
1370 +- RING_GET_RESPONSE(&queue->rx, ++cons);
1371 ++ struct xen_netif_rx_response rx;
1372 + skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1373 +
1374 ++ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1375 ++
1376 + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1377 + unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1378 +
1379 +@@ -918,7 +948,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1380 +
1381 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1382 + skb_frag_page(nfrag),
1383 +- rx->offset, rx->status, PAGE_SIZE);
1384 ++ rx.offset, rx.status, PAGE_SIZE);
1385 +
1386 + skb_shinfo(nskb)->nr_frags = 0;
1387 + kfree_skb(nskb);
1388 +@@ -1011,12 +1041,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
1389 + skb_queue_head_init(&tmpq);
1390 +
1391 + rp = queue->rx.sring->rsp_prod;
1392 ++ if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1393 ++ dev_alert(&dev->dev, "Illegal number of responses %u\n",
1394 ++ rp - queue->rx.rsp_cons);
1395 ++ queue->info->broken = true;
1396 ++ spin_unlock(&queue->rx_lock);
1397 ++ return 0;
1398 ++ }
1399 + rmb(); /* Ensure we see queued responses up to 'rp'. */
1400 +
1401 + i = queue->rx.rsp_cons;
1402 + work_done = 0;
1403 + while ((i != rp) && (work_done < budget)) {
1404 +- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1405 ++ RING_COPY_RESPONSE(&queue->rx, i, rx);
1406 + memset(extras, 0, sizeof(rinfo.extras));
1407 +
1408 + err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1409 +@@ -1138,17 +1175,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
1410 +
1411 + for (i = 0; i < NET_TX_RING_SIZE; i++) {
1412 + /* Skip over entries which are actually freelist references */
1413 +- if (skb_entry_is_link(&queue->tx_skbs[i]))
1414 ++ if (!queue->tx_skbs[i])
1415 + continue;
1416 +
1417 +- skb = queue->tx_skbs[i].skb;
1418 ++ skb = queue->tx_skbs[i];
1419 ++ queue->tx_skbs[i] = NULL;
1420 + get_page(queue->grant_tx_page[i]);
1421 + gnttab_end_foreign_access(queue->grant_tx_ref[i],
1422 + GNTMAP_readonly,
1423 + (unsigned long)page_address(queue->grant_tx_page[i]));
1424 + queue->grant_tx_page[i] = NULL;
1425 + queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1426 +- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1427 ++ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1428 + dev_kfree_skb_irq(skb);
1429 + }
1430 + }
1431 +@@ -1228,6 +1266,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1432 + struct netfront_queue *queue = dev_id;
1433 + unsigned long flags;
1434 +
1435 ++ if (queue->info->broken)
1436 ++ return IRQ_HANDLED;
1437 ++
1438 + spin_lock_irqsave(&queue->tx_lock, flags);
1439 + xennet_tx_buf_gc(queue);
1440 + spin_unlock_irqrestore(&queue->tx_lock, flags);
1441 +@@ -1240,6 +1281,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1442 + struct netfront_queue *queue = dev_id;
1443 + struct net_device *dev = queue->info->netdev;
1444 +
1445 ++ if (queue->info->broken)
1446 ++ return IRQ_HANDLED;
1447 ++
1448 + if (likely(netif_carrier_ok(dev) &&
1449 + RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1450 + napi_schedule(&queue->napi);
1451 +@@ -1261,6 +1305,10 @@ static void xennet_poll_controller(struct net_device *dev)
1452 + struct netfront_info *info = netdev_priv(dev);
1453 + unsigned int num_queues = dev->real_num_tx_queues;
1454 + unsigned int i;
1455 ++
1456 ++ if (info->broken)
1457 ++ return;
1458 ++
1459 + for (i = 0; i < num_queues; ++i)
1460 + xennet_interrupt(0, &info->queues[i]);
1461 + }
1462 +@@ -1630,13 +1678,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
1463 + snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1464 + devid, queue->id);
1465 +
1466 +- /* Initialise tx_skbs as a free chain containing every entry. */
1467 ++ /* Initialise tx_skb_freelist as a free chain containing every entry. */
1468 + queue->tx_skb_freelist = 0;
1469 ++ queue->tx_pend_queue = TX_LINK_NONE;
1470 + for (i = 0; i < NET_TX_RING_SIZE; i++) {
1471 +- skb_entry_set_link(&queue->tx_skbs[i], i+1);
1472 ++ queue->tx_link[i] = i + 1;
1473 + queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1474 + queue->grant_tx_page[i] = NULL;
1475 + }
1476 ++ queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1477 +
1478 + /* Clear out rx_skbs */
1479 + for (i = 0; i < NET_RX_RING_SIZE; i++) {
1480 +@@ -1841,6 +1891,9 @@ static int talk_to_netback(struct xenbus_device *dev,
1481 + if (info->queues)
1482 + xennet_destroy_queues(info);
1483 +
1484 ++ /* For the case of a reconnect reset the "broken" indicator. */
1485 ++ info->broken = false;
1486 ++
1487 + err = xennet_create_queues(info, &num_queues);
1488 + if (err < 0) {
1489 + xenbus_dev_fatal(dev, err, "creating queues");
1490 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
1491 +index 98fb3c1f45e4d..e6d60fa2217da 100644
1492 +--- a/drivers/pci/controller/pci-aardvark.c
1493 ++++ b/drivers/pci/controller/pci-aardvark.c
1494 +@@ -9,6 +9,7 @@
1495 + */
1496 +
1497 + #include <linux/delay.h>
1498 ++#include <linux/gpio/consumer.h>
1499 + #include <linux/interrupt.h>
1500 + #include <linux/irq.h>
1501 + #include <linux/irqdomain.h>
1502 +@@ -17,6 +18,7 @@
1503 + #include <linux/init.h>
1504 + #include <linux/platform_device.h>
1505 + #include <linux/of_address.h>
1506 ++#include <linux/of_gpio.h>
1507 + #include <linux/of_pci.h>
1508 +
1509 + #include "../pci.h"
1510 +@@ -26,16 +28,7 @@
1511 + #define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
1512 + #define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
1513 + #define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
1514 +-#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
1515 +-#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
1516 +-#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
1517 +-#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
1518 +-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
1519 +-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
1520 +-#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
1521 +-#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
1522 +-#define PCIE_CORE_LINK_TRAINING BIT(5)
1523 +-#define PCIE_CORE_LINK_WIDTH_SHIFT 20
1524 ++#define PCIE_CORE_PCIEXP_CAP 0xc0
1525 + #define PCIE_CORE_ERR_CAPCTL_REG 0x118
1526 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
1527 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
1528 +@@ -113,14 +106,95 @@
1529 + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
1530 + #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
1531 +
1532 ++/* PCIe window configuration */
1533 ++#define OB_WIN_BASE_ADDR 0x4c00
1534 ++#define OB_WIN_BLOCK_SIZE 0x20
1535 ++#define OB_WIN_COUNT 8
1536 ++#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
1537 ++ OB_WIN_BLOCK_SIZE * (win) + \
1538 ++ (offset))
1539 ++#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
1540 ++#define OB_WIN_ENABLE BIT(0)
1541 ++#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
1542 ++#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
1543 ++#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
1544 ++#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
1545 ++#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
1546 ++#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
1547 ++#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
1548 ++#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
1549 ++#define OB_WIN_FUNC_NUM_SHIFT 24
1550 ++#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
1551 ++#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
1552 ++#define OB_WIN_BUS_NUM_BITS_SHIFT 20
1553 ++#define OB_WIN_MSG_CODE_ENABLE BIT(22)
1554 ++#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
1555 ++#define OB_WIN_MSG_CODE_SHIFT 14
1556 ++#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
1557 ++#define OB_WIN_ATTR_ENABLE BIT(11)
1558 ++#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
1559 ++#define OB_WIN_ATTR_TC_SHIFT 8
1560 ++#define OB_WIN_ATTR_RELAXED BIT(7)
1561 ++#define OB_WIN_ATTR_NOSNOOP BIT(6)
1562 ++#define OB_WIN_ATTR_POISON BIT(5)
1563 ++#define OB_WIN_ATTR_IDO BIT(4)
1564 ++#define OB_WIN_TYPE_MASK GENMASK(3, 0)
1565 ++#define OB_WIN_TYPE_SHIFT 0
1566 ++#define OB_WIN_TYPE_MEM 0x0
1567 ++#define OB_WIN_TYPE_IO 0x4
1568 ++#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
1569 ++#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
1570 ++#define OB_WIN_TYPE_MSG 0xc
1571 ++
1572 + /* LMI registers base address and register offsets */
1573 + #define LMI_BASE_ADDR 0x6000
1574 + #define CFG_REG (LMI_BASE_ADDR + 0x0)
1575 + #define LTSSM_SHIFT 24
1576 + #define LTSSM_MASK 0x3f
1577 +-#define LTSSM_L0 0x10
1578 + #define RC_BAR_CONFIG 0x300
1579 +
1580 ++/* LTSSM values in CFG_REG */
1581 ++enum {
1582 ++ LTSSM_DETECT_QUIET = 0x0,
1583 ++ LTSSM_DETECT_ACTIVE = 0x1,
1584 ++ LTSSM_POLLING_ACTIVE = 0x2,
1585 ++ LTSSM_POLLING_COMPLIANCE = 0x3,
1586 ++ LTSSM_POLLING_CONFIGURATION = 0x4,
1587 ++ LTSSM_CONFIG_LINKWIDTH_START = 0x5,
1588 ++ LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
1589 ++ LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
1590 ++ LTSSM_CONFIG_LANENUM_WAIT = 0x8,
1591 ++ LTSSM_CONFIG_COMPLETE = 0x9,
1592 ++ LTSSM_CONFIG_IDLE = 0xa,
1593 ++ LTSSM_RECOVERY_RCVR_LOCK = 0xb,
1594 ++ LTSSM_RECOVERY_SPEED = 0xc,
1595 ++ LTSSM_RECOVERY_RCVR_CFG = 0xd,
1596 ++ LTSSM_RECOVERY_IDLE = 0xe,
1597 ++ LTSSM_L0 = 0x10,
1598 ++ LTSSM_RX_L0S_ENTRY = 0x11,
1599 ++ LTSSM_RX_L0S_IDLE = 0x12,
1600 ++ LTSSM_RX_L0S_FTS = 0x13,
1601 ++ LTSSM_TX_L0S_ENTRY = 0x14,
1602 ++ LTSSM_TX_L0S_IDLE = 0x15,
1603 ++ LTSSM_TX_L0S_FTS = 0x16,
1604 ++ LTSSM_L1_ENTRY = 0x17,
1605 ++ LTSSM_L1_IDLE = 0x18,
1606 ++ LTSSM_L2_IDLE = 0x19,
1607 ++ LTSSM_L2_TRANSMIT_WAKE = 0x1a,
1608 ++ LTSSM_DISABLED = 0x20,
1609 ++ LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
1610 ++ LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
1611 ++ LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
1612 ++ LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
1613 ++ LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
1614 ++ LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
1615 ++ LTSSM_HOT_RESET = 0x27,
1616 ++ LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
1617 ++ LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
1618 ++ LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
1619 ++ LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
1620 ++};
1621 ++
1622 + /* PCIe core controller registers */
1623 + #define CTRL_CORE_BASE_ADDR 0x18000
1624 + #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
1625 +@@ -181,6 +255,13 @@ struct advk_pcie {
1626 + struct platform_device *pdev;
1627 + void __iomem *base;
1628 + struct list_head resources;
1629 ++ struct {
1630 ++ phys_addr_t match;
1631 ++ phys_addr_t remap;
1632 ++ phys_addr_t mask;
1633 ++ u32 actions;
1634 ++ } wins[OB_WIN_COUNT];
1635 ++ u8 wins_count;
1636 + struct irq_domain *irq_domain;
1637 + struct irq_chip irq_chip;
1638 + raw_spinlock_t irq_lock;
1639 +@@ -193,6 +274,8 @@ struct advk_pcie {
1640 + struct mutex msi_used_lock;
1641 + u16 msi_msg;
1642 + int root_bus_nr;
1643 ++ int link_gen;
1644 ++ struct gpio_desc *reset_gpio;
1645 + };
1646 +
1647 + static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
1648 +@@ -205,37 +288,161 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
1649 + return readl(pcie->base + reg);
1650 + }
1651 +
1652 +-static int advk_pcie_link_up(struct advk_pcie *pcie)
1653 ++static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
1654 + {
1655 +- u32 val, ltssm_state;
1656 ++ u32 val;
1657 ++ u8 ltssm_state;
1658 +
1659 + val = advk_readl(pcie, CFG_REG);
1660 + ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
1661 +- return ltssm_state >= LTSSM_L0;
1662 ++ return ltssm_state;
1663 ++}
1664 ++
1665 ++static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
1666 ++{
1667 ++ /* check if LTSSM is in normal operation - some L* state */
1668 ++ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
1669 ++ return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
1670 ++}
1671 ++
1672 ++static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
1673 ++{
1674 ++ /*
1675 ++ * According to PCIe Base specification 3.0, Table 4-14: Link
1676 ++ * Status Mapped to the LTSSM is Link Training mapped to LTSSM
1677 ++ * Configuration and Recovery states.
1678 ++ */
1679 ++ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
1680 ++ return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
1681 ++ ltssm_state < LTSSM_L0) ||
1682 ++ (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
1683 ++ ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
1684 + }
1685 +
1686 + static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
1687 + {
1688 +- struct device *dev = &pcie->pdev->dev;
1689 + int retries;
1690 +
1691 + /* check if the link is up or not */
1692 + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1693 +- if (advk_pcie_link_up(pcie)) {
1694 +- dev_info(dev, "link up\n");
1695 ++ if (advk_pcie_link_up(pcie))
1696 + return 0;
1697 +- }
1698 +
1699 + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
1700 + }
1701 +
1702 +- dev_err(dev, "link never came up\n");
1703 + return -ETIMEDOUT;
1704 + }
1705 +
1706 ++static void advk_pcie_issue_perst(struct advk_pcie *pcie)
1707 ++{
1708 ++ if (!pcie->reset_gpio)
1709 ++ return;
1710 ++
1711 ++ /* 10ms delay is needed for some cards */
1712 ++ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
1713 ++ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
1714 ++ usleep_range(10000, 11000);
1715 ++ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
1716 ++}
1717 ++
1718 ++static void advk_pcie_train_link(struct advk_pcie *pcie)
1719 ++{
1720 ++ struct device *dev = &pcie->pdev->dev;
1721 ++ u32 reg;
1722 ++ int ret;
1723 ++
1724 ++ /*
1725 ++ * Setup PCIe rev / gen compliance based on device tree property
1726 ++ * 'max-link-speed' which also forces maximal link speed.
1727 ++ */
1728 ++ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1729 ++ reg &= ~PCIE_GEN_SEL_MSK;
1730 ++ if (pcie->link_gen == 3)
1731 ++ reg |= SPEED_GEN_3;
1732 ++ else if (pcie->link_gen == 2)
1733 ++ reg |= SPEED_GEN_2;
1734 ++ else
1735 ++ reg |= SPEED_GEN_1;
1736 ++ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1737 ++
1738 ++ /*
1739 ++ * Set maximal link speed value also into PCIe Link Control 2 register.
1740 ++ * Armada 3700 Functional Specification says that default value is based
1741 ++ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
1742 ++ */
1743 ++ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
1744 ++ reg &= ~PCI_EXP_LNKCTL2_TLS;
1745 ++ if (pcie->link_gen == 3)
1746 ++ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
1747 ++ else if (pcie->link_gen == 2)
1748 ++ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
1749 ++ else
1750 ++ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
1751 ++ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
1752 ++
1753 ++ /* Enable link training after selecting PCIe generation */
1754 ++ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1755 ++ reg |= LINK_TRAINING_EN;
1756 ++ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1757 ++
1758 ++ /*
1759 ++ * Reset PCIe card via PERST# signal. Some cards are not detected
1760 ++ * during link training when they are in some non-initial state.
1761 ++ */
1762 ++ advk_pcie_issue_perst(pcie);
1763 ++
1764 ++ /*
1765 ++ * PERST# signal could have been asserted by pinctrl subsystem before
1766 ++ * probe() callback has been called or issued explicitly by reset gpio
1767 ++ * function advk_pcie_issue_perst(), making the endpoint going into
1768 ++ * fundamental reset. As required by PCI Express spec (PCI Express
1769 ++ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
1770 ++ * Conventional Reset) a delay for at least 100ms after such a reset
1771 ++ * before sending a Configuration Request to the device is needed.
1772 ++ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
1773 ++ * waits for link at least 900ms.
1774 ++ */
1775 ++ ret = advk_pcie_wait_for_link(pcie);
1776 ++ if (ret < 0)
1777 ++ dev_err(dev, "link never came up\n");
1778 ++ else
1779 ++ dev_info(dev, "link up\n");
1780 ++}
1781 ++
1782 ++/*
1783 ++ * Set PCIe address window register which could be used for memory
1784 ++ * mapping.
1785 ++ */
1786 ++static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
1787 ++ phys_addr_t match, phys_addr_t remap,
1788 ++ phys_addr_t mask, u32 actions)
1789 ++{
1790 ++ advk_writel(pcie, OB_WIN_ENABLE |
1791 ++ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
1792 ++ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
1793 ++ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
1794 ++ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
1795 ++ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
1796 ++ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
1797 ++ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
1798 ++}
1799 ++
1800 ++static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
1801 ++{
1802 ++ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
1803 ++ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
1804 ++ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
1805 ++ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
1806 ++ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
1807 ++ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
1808 ++ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
1809 ++}
1810 ++
1811 + static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1812 + {
1813 + u32 reg;
1814 ++ int i;
1815 +
1816 + /* Set to Direct mode */
1817 + reg = advk_readl(pcie, CTRL_CONFIG_REG);
1818 +@@ -255,36 +462,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1819 + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
1820 + advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
1821 +
1822 +- /* Set PCIe Device Control and Status 1 PF0 register */
1823 +- reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
1824 +- (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
1825 +- PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
1826 +- (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
1827 +- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
1828 +- advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
1829 ++ /* Set PCIe Device Control register */
1830 ++ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
1831 ++ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
1832 ++ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
1833 ++ reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
1834 ++ reg &= ~PCI_EXP_DEVCTL_READRQ;
1835 ++ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
1836 ++ reg |= PCI_EXP_DEVCTL_READRQ_512B;
1837 ++ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
1838 +
1839 + /* Program PCIe Control 2 to disable strict ordering */
1840 + reg = PCIE_CORE_CTRL2_RESERVED |
1841 + PCIE_CORE_CTRL2_TD_ENABLE;
1842 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
1843 +
1844 +- /* Set GEN2 */
1845 +- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1846 +- reg &= ~PCIE_GEN_SEL_MSK;
1847 +- reg |= SPEED_GEN_2;
1848 +- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1849 +-
1850 + /* Set lane X1 */
1851 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1852 + reg &= ~LANE_CNT_MSK;
1853 + reg |= LANE_COUNT_1;
1854 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1855 +
1856 +- /* Enable link training */
1857 +- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1858 +- reg |= LINK_TRAINING_EN;
1859 +- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1860 +-
1861 + /* Enable MSI */
1862 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1863 + reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
1864 +@@ -309,21 +507,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1865 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
1866 + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
1867 +
1868 ++ /*
1869 ++ * Enable AXI address window location generation:
1870 ++ * When it is enabled, the default outbound window
1871 ++ * configurations (Default User Field: 0xD0074CFC)
1872 ++ * are used to transparent address translation for
1873 ++ * the outbound transactions. Thus, PCIe address
1874 ++ * windows are not required for transparent memory
1875 ++ * access when default outbound window configuration
1876 ++ * is set for memory access.
1877 ++ */
1878 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1879 + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
1880 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
1881 +
1882 +- /* Bypass the address window mapping for PIO */
1883 ++ /*
1884 ++ * Set memory access in Default User Field so it
1885 ++ * is not required to configure PCIe address for
1886 ++ * transparent memory access.
1887 ++ */
1888 ++ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
1889 ++
1890 ++ /*
1891 ++ * Bypass the address window mapping for PIO:
1892 ++ * Since PIO access already contains all required
1893 ++ * info over AXI interface by PIO registers, the
1894 ++ * address window is not required.
1895 ++ */
1896 + reg = advk_readl(pcie, PIO_CTRL);
1897 + reg |= PIO_CTRL_ADDR_WIN_DISABLE;
1898 + advk_writel(pcie, reg, PIO_CTRL);
1899 +
1900 +- /* Start link training */
1901 +- reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
1902 +- reg |= PCIE_CORE_LINK_TRAINING;
1903 +- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
1904 ++ /*
1905 ++ * Configure PCIe address windows for non-memory or
1906 ++ * non-transparent access as by default PCIe uses
1907 ++ * transparent memory access.
1908 ++ */
1909 ++ for (i = 0; i < pcie->wins_count; i++)
1910 ++ advk_pcie_set_ob_win(pcie, i,
1911 ++ pcie->wins[i].match, pcie->wins[i].remap,
1912 ++ pcie->wins[i].mask, pcie->wins[i].actions);
1913 +
1914 +- advk_pcie_wait_for_link(pcie);
1915 ++ /* Disable remaining PCIe outbound windows */
1916 ++ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
1917 ++ advk_pcie_disable_ob_win(pcie, i);
1918 ++
1919 ++ advk_pcie_train_link(pcie);
1920 +
1921 + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1922 + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
1923 +@@ -435,6 +664,13 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
1924 + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
1925 + return false;
1926 +
1927 ++ /*
1928 ++ * If the link goes down after we check for link-up, nothing bad
1929 ++ * happens but the config access times out.
1930 ++ */
1931 ++ if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
1932 ++ return false;
1933 ++
1934 + return true;
1935 + }
1936 +
1937 +@@ -506,8 +742,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1938 + advk_writel(pcie, 1, PIO_START);
1939 +
1940 + ret = advk_pcie_wait_pio(pcie);
1941 +- if (ret < 0)
1942 ++ if (ret < 0) {
1943 ++ *val = 0xffffffff;
1944 + return PCIBIOS_SET_FAILED;
1945 ++ }
1946 +
1947 + /* Check PIO status and get the read result */
1948 + ret = advk_pcie_check_pio_status(pcie, val);
1949 +@@ -754,6 +992,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
1950 + struct device_node *node = dev->of_node;
1951 + struct device_node *pcie_intc_node;
1952 + struct irq_chip *irq_chip;
1953 ++ int ret = 0;
1954 +
1955 + raw_spin_lock_init(&pcie->irq_lock);
1956 +
1957 +@@ -768,8 +1007,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
1958 + irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
1959 + dev_name(dev));
1960 + if (!irq_chip->name) {
1961 +- of_node_put(pcie_intc_node);
1962 +- return -ENOMEM;
1963 ++ ret = -ENOMEM;
1964 ++ goto out_put_node;
1965 + }
1966 +
1967 + irq_chip->irq_mask = advk_pcie_irq_mask;
1968 +@@ -781,11 +1020,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
1969 + &advk_pcie_irq_domain_ops, pcie);
1970 + if (!pcie->irq_domain) {
1971 + dev_err(dev, "Failed to get a INTx IRQ domain\n");
1972 +- of_node_put(pcie_intc_node);
1973 +- return -ENOMEM;
1974 ++ ret = -ENOMEM;
1975 ++ goto out_put_node;
1976 + }
1977 +
1978 +- return 0;
1979 ++out_put_node:
1980 ++ of_node_put(pcie_intc_node);
1981 ++ return ret;
1982 + }
1983 +
1984 + static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
1985 +@@ -925,6 +1166,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
1986 + struct advk_pcie *pcie;
1987 + struct resource *res;
1988 + struct pci_host_bridge *bridge;
1989 ++ struct resource_entry *entry;
1990 + int ret, irq;
1991 +
1992 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
1993 +@@ -954,6 +1196,102 @@ static int advk_pcie_probe(struct platform_device *pdev)
1994 + return ret;
1995 + }
1996 +
1997 ++ resource_list_for_each_entry(entry, &pcie->resources) {
1998 ++ resource_size_t start = entry->res->start;
1999 ++ resource_size_t size = resource_size(entry->res);
2000 ++ unsigned long type = resource_type(entry->res);
2001 ++ u64 win_size;
2002 ++
2003 ++ /*
2004 ++ * Aardvark hardware allows to configure also PCIe window
2005 ++ * for config type 0 and type 1 mapping, but driver uses
2006 ++ * only PIO for issuing configuration transfers which does
2007 ++ * not use PCIe window configuration.
2008 ++ */
2009 ++ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
2010 ++ type != IORESOURCE_IO)
2011 ++ continue;
2012 ++
2013 ++ /*
2014 ++ * Skip transparent memory resources. Default outbound access
2015 ++ * configuration is set to transparent memory access so it
2016 ++ * does not need window configuration.
2017 ++ */
2018 ++ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
2019 ++ entry->offset == 0)
2020 ++ continue;
2021 ++
2022 ++ /*
2023 ++ * The n-th PCIe window is configured by tuple (match, remap, mask)
2024 ++ * and an access to address A uses this window if A matches the
2025 ++ * match with given mask.
2026 ++ * So every PCIe window size must be a power of two and every start
2027 ++ * address must be aligned to window size. Minimal size is 64 KiB
2028 ++ * because lower 16 bits of mask must be zero. Remapped address
2029 ++ * may have set only bits from the mask.
2030 ++ */
2031 ++ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
2032 ++ /* Calculate the largest aligned window size */
2033 ++ win_size = (1ULL << (fls64(size)-1)) |
2034 ++ (start ? (1ULL << __ffs64(start)) : 0);
2035 ++ win_size = 1ULL << __ffs64(win_size);
2036 ++ if (win_size < 0x10000)
2037 ++ break;
2038 ++
2039 ++ dev_dbg(dev,
2040 ++ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
2041 ++ pcie->wins_count, (unsigned long long)start,
2042 ++ (unsigned long long)start + win_size, type);
2043 ++
2044 ++ if (type == IORESOURCE_IO) {
2045 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
2046 ++ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
2047 ++ } else {
2048 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
2049 ++ pcie->wins[pcie->wins_count].match = start;
2050 ++ }
2051 ++ pcie->wins[pcie->wins_count].remap = start - entry->offset;
2052 ++ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
2053 ++
2054 ++ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
2055 ++ break;
2056 ++
2057 ++ start += win_size;
2058 ++ size -= win_size;
2059 ++ pcie->wins_count++;
2060 ++ }
2061 ++
2062 ++ if (size > 0) {
2063 ++ dev_err(&pcie->pdev->dev,
2064 ++ "Invalid PCIe region [0x%llx-0x%llx]\n",
2065 ++ (unsigned long long)entry->res->start,
2066 ++ (unsigned long long)entry->res->end + 1);
2067 ++ return -EINVAL;
2068 ++ }
2069 ++ }
2070 ++
2071 ++ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
2072 ++ "reset-gpios", 0,
2073 ++ GPIOD_OUT_LOW,
2074 ++ "pcie1-reset");
2075 ++ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
2076 ++ if (ret) {
2077 ++ if (ret == -ENOENT) {
2078 ++ pcie->reset_gpio = NULL;
2079 ++ } else {
2080 ++ if (ret != -EPROBE_DEFER)
2081 ++ dev_err(dev, "Failed to get reset-gpio: %i\n",
2082 ++ ret);
2083 ++ return ret;
2084 ++ }
2085 ++ }
2086 ++
2087 ++ ret = of_pci_get_max_link_speed(dev->of_node);
2088 ++ if (ret <= 0 || ret > 3)
2089 ++ pcie->link_gen = 3;
2090 ++ else
2091 ++ pcie->link_gen = ret;
2092 ++
2093 + advk_pcie_setup_hw(pcie);
2094 +
2095 + ret = advk_pcie_init_irq_domain(pcie);
2096 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2097 +index d76ac6b4b40df..e69b84d9538a0 100644
2098 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2099 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2100 +@@ -166,12 +166,16 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
2101 + PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
2102 + PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
2103 + PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
2104 +- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
2105 +- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
2106 +- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
2107 +- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
2108 +- PIN_GRP_GPIO("pmic1", 17, 1, BIT(7), "pmic"),
2109 +- PIN_GRP_GPIO("pmic0", 16, 1, BIT(8), "pmic"),
2110 ++ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
2111 ++ "pwm", "led"),
2112 ++ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
2113 ++ "pwm", "led"),
2114 ++ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
2115 ++ "pwm", "led"),
2116 ++ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
2117 ++ "pwm", "led"),
2118 ++ PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
2119 ++ PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
2120 + PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
2121 + PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"),
2122 + PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"),
2123 +@@ -183,11 +187,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
2124 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
2125 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
2126 + 18, 2, "gpio", "uart"),
2127 +- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
2128 +- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
2129 +- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
2130 +- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
2131 +-
2132 + };
2133 +
2134 + static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
2135 +@@ -195,8 +194,11 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
2136 + PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
2137 + PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
2138 + PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
2139 +- PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
2140 +- PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
2141 ++ PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
2142 ++ PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"),
2143 ++ PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
2144 ++ PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
2145 ++ PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
2146 + PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
2147 + PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
2148 + PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
2149 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2150 +index add699b01836f..d899f216245e5 100644
2151 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2152 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2153 +@@ -3364,7 +3364,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
2154 +
2155 + shost_for_each_device(sdev, ioc->shost) {
2156 + sas_device_priv_data = sdev->hostdata;
2157 +- if (!sas_device_priv_data)
2158 ++ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
2159 + continue;
2160 + if (sas_device_priv_data->sas_target->sas_address
2161 + != sas_address)
2162 +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2163 +index 7cdced0b0581e..da73998bc5f70 100644
2164 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2165 ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2166 +@@ -2579,13 +2579,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
2167 + free_irq(dev->irq, dev);
2168 + priv->irq = 0;
2169 + }
2170 +- free_rtllib(dev);
2171 +
2172 + if (dev->mem_start != 0) {
2173 + iounmap((void __iomem *)dev->mem_start);
2174 + release_mem_region(pci_resource_start(pdev, 1),
2175 + pci_resource_len(pdev, 1));
2176 + }
2177 ++
2178 ++ free_rtllib(dev);
2179 + } else {
2180 + priv = rtllib_priv(dev);
2181 + }
2182 +diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
2183 +index dc43fa96c3de7..7874aaf30ef48 100644
2184 +--- a/drivers/tty/hvc/hvc_xen.c
2185 ++++ b/drivers/tty/hvc/hvc_xen.c
2186 +@@ -86,7 +86,11 @@ static int __write_console(struct xencons_info *xencons,
2187 + cons = intf->out_cons;
2188 + prod = intf->out_prod;
2189 + mb(); /* update queue values before going on */
2190 +- BUG_ON((prod - cons) > sizeof(intf->out));
2191 ++
2192 ++ if ((prod - cons) > sizeof(intf->out)) {
2193 ++ pr_err_once("xencons: Illegal ring page indices");
2194 ++ return -EINVAL;
2195 ++ }
2196 +
2197 + while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
2198 + intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
2199 +@@ -114,7 +118,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
2200 + */
2201 + while (len) {
2202 + int sent = __write_console(cons, data, len);
2203 +-
2204 ++
2205 ++ if (sent < 0)
2206 ++ return sent;
2207 ++
2208 + data += sent;
2209 + len -= sent;
2210 +
2211 +@@ -138,7 +145,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
2212 + cons = intf->in_cons;
2213 + prod = intf->in_prod;
2214 + mb(); /* get pointers before reading ring */
2215 +- BUG_ON((prod - cons) > sizeof(intf->in));
2216 ++
2217 ++ if ((prod - cons) > sizeof(intf->in)) {
2218 ++ pr_err_once("xencons: Illegal ring page indices");
2219 ++ return -EINVAL;
2220 ++ }
2221 +
2222 + while (cons != prod && recv < len)
2223 + buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
2224 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2225 +index f1a11032a0a01..73ad4af487039 100644
2226 +--- a/drivers/usb/core/hub.c
2227 ++++ b/drivers/usb/core/hub.c
2228 +@@ -4575,8 +4575,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
2229 + if (oldspeed == USB_SPEED_LOW)
2230 + delay = HUB_LONG_RESET_TIME;
2231 +
2232 +- mutex_lock(hcd->address0_mutex);
2233 +-
2234 + /* Reset the device; full speed may morph to high speed */
2235 + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2236 + retval = hub_port_reset(hub, port1, udev, delay, false);
2237 +@@ -4891,7 +4889,6 @@ fail:
2238 + hub_port_disable(hub, port1, 0);
2239 + update_devnum(udev, devnum); /* for disconnect processing */
2240 + }
2241 +- mutex_unlock(hcd->address0_mutex);
2242 + return retval;
2243 + }
2244 +
2245 +@@ -4981,6 +4978,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2246 + struct usb_port *port_dev = hub->ports[port1 - 1];
2247 + struct usb_device *udev = port_dev->child;
2248 + static int unreliable_port = -1;
2249 ++ bool retry_locked;
2250 +
2251 + /* Disconnect any existing devices under this port */
2252 + if (udev) {
2253 +@@ -5036,7 +5034,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2254 + unit_load = 100;
2255 +
2256 + status = 0;
2257 ++
2258 + for (i = 0; i < SET_CONFIG_TRIES; i++) {
2259 ++ usb_lock_port(port_dev);
2260 ++ mutex_lock(hcd->address0_mutex);
2261 ++ retry_locked = true;
2262 +
2263 + /* reallocate for each attempt, since references
2264 + * to the previous one can escape in various ways
2265 +@@ -5045,6 +5047,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2266 + if (!udev) {
2267 + dev_err(&port_dev->dev,
2268 + "couldn't allocate usb_device\n");
2269 ++ mutex_unlock(hcd->address0_mutex);
2270 ++ usb_unlock_port(port_dev);
2271 + goto done;
2272 + }
2273 +
2274 +@@ -5066,12 +5070,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2275 + }
2276 +
2277 + /* reset (non-USB 3.0 devices) and get descriptor */
2278 +- usb_lock_port(port_dev);
2279 + status = hub_port_init(hub, udev, port1, i);
2280 +- usb_unlock_port(port_dev);
2281 + if (status < 0)
2282 + goto loop;
2283 +
2284 ++ mutex_unlock(hcd->address0_mutex);
2285 ++ usb_unlock_port(port_dev);
2286 ++ retry_locked = false;
2287 ++
2288 + if (udev->quirks & USB_QUIRK_DELAY_INIT)
2289 + msleep(2000);
2290 +
2291 +@@ -5164,6 +5170,10 @@ loop:
2292 + usb_ep0_reinit(udev);
2293 + release_devnum(udev);
2294 + hub_free_dev(udev);
2295 ++ if (retry_locked) {
2296 ++ mutex_unlock(hcd->address0_mutex);
2297 ++ usb_unlock_port(port_dev);
2298 ++ }
2299 + usb_put_dev(udev);
2300 + if ((status == -ENOTCONN) || (status == -ENOTSUPP))
2301 + break;
2302 +@@ -5722,6 +5732,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2303 + bos = udev->bos;
2304 + udev->bos = NULL;
2305 +
2306 ++ mutex_lock(hcd->address0_mutex);
2307 ++
2308 + for (i = 0; i < SET_CONFIG_TRIES; ++i) {
2309 +
2310 + /* ep0 maxpacket size may change; let the HCD know about it.
2311 +@@ -5731,6 +5743,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2312 + if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
2313 + break;
2314 + }
2315 ++ mutex_unlock(hcd->address0_mutex);
2316 +
2317 + if (ret < 0)
2318 + goto re_enumerate;
2319 +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
2320 +index 68bbac64b7536..94af71e9856f2 100644
2321 +--- a/drivers/usb/dwc2/hcd_queue.c
2322 ++++ b/drivers/usb/dwc2/hcd_queue.c
2323 +@@ -59,7 +59,7 @@
2324 + #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
2325 +
2326 + /* If we get a NAK, wait this long before retrying */
2327 +-#define DWC2_RETRY_WAIT_DELAY 1*1E6L
2328 ++#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
2329 +
2330 + /**
2331 + * dwc2_periodic_channel_available() - Checks that a channel is available for a
2332 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2333 +index eaf118ee2a865..818097e86cb58 100644
2334 +--- a/drivers/usb/serial/option.c
2335 ++++ b/drivers/usb/serial/option.c
2336 +@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
2337 + .driver_info = NCTRL(2) },
2338 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
2339 + .driver_info = NCTRL(0) | ZLP },
2340 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
2341 ++ .driver_info = NCTRL(0) | ZLP },
2342 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2343 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
2344 + .driver_info = RSVD(1) },
2345 +@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
2346 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
2347 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
2348 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
2349 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
2350 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
2351 ++ .driver_info = RSVD(4) },
2352 + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
2353 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
2354 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
2355 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
2356 +index 6ee320259e4f7..d61abf569dc1d 100644
2357 +--- a/drivers/vhost/vsock.c
2358 ++++ b/drivers/vhost/vsock.c
2359 +@@ -490,7 +490,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
2360 + virtio_transport_free_pkt(pkt);
2361 +
2362 + len += sizeof(pkt->hdr);
2363 +- vhost_add_used(vq, head, len);
2364 ++ vhost_add_used(vq, head, 0);
2365 + total_len += len;
2366 + added = true;
2367 + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
2368 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
2369 +index 652894d619677..b911a91bce6b7 100644
2370 +--- a/drivers/xen/xenbus/xenbus_probe.c
2371 ++++ b/drivers/xen/xenbus/xenbus_probe.c
2372 +@@ -846,7 +846,7 @@ static struct notifier_block xenbus_resume_nb = {
2373 +
2374 + static int __init xenbus_init(void)
2375 + {
2376 +- int err = 0;
2377 ++ int err;
2378 + uint64_t v = 0;
2379 + xen_store_domain_type = XS_UNKNOWN;
2380 +
2381 +@@ -886,6 +886,29 @@ static int __init xenbus_init(void)
2382 + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
2383 + if (err)
2384 + goto out_error;
2385 ++ /*
2386 ++ * Uninitialized hvm_params are zero and return no error.
2387 ++ * Although it is theoretically possible to have
2388 ++ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
2389 ++ * not zero when valid. If zero, it means that Xenstore hasn't
2390 ++ * been properly initialized. Instead of attempting to map a
2391 ++ * wrong guest physical address return error.
2392 ++ *
2393 ++ * Also recognize all bits set as an invalid value.
2394 ++ */
2395 ++ if (!v || !~v) {
2396 ++ err = -ENOENT;
2397 ++ goto out_error;
2398 ++ }
2399 ++ /* Avoid truncation on 32-bit. */
2400 ++#if BITS_PER_LONG == 32
2401 ++ if (v > ULONG_MAX) {
2402 ++ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
2403 ++ __func__, v);
2404 ++ err = -EINVAL;
2405 ++ goto out_error;
2406 ++ }
2407 ++#endif
2408 + xen_store_gfn = (unsigned long)v;
2409 + xen_store_interface =
2410 + xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
2411 +@@ -920,8 +943,10 @@ static int __init xenbus_init(void)
2412 + */
2413 + proc_create_mount_point("xen");
2414 + #endif
2415 ++ return 0;
2416 +
2417 + out_error:
2418 ++ xen_store_domain_type = XS_UNKNOWN;
2419 + return err;
2420 + }
2421 +
2422 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2423 +index 8b22f8705dd48..d1dc545302528 100644
2424 +--- a/fs/fuse/dev.c
2425 ++++ b/fs/fuse/dev.c
2426 +@@ -910,6 +910,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
2427 + if (!(buf->flags & PIPE_BUF_FLAG_LRU))
2428 + lru_cache_add_file(newpage);
2429 +
2430 ++ /*
2431 ++ * Release while we have extra ref on stolen page. Otherwise
2432 ++ * anon_pipe_buf_release() might think the page can be reused.
2433 ++ */
2434 ++ pipe_buf_release(cs->pipe, buf);
2435 ++
2436 + err = 0;
2437 + spin_lock(&cs->req->waitq.lock);
2438 + if (test_bit(FR_ABORTED, &cs->req->flags))
2439 +@@ -2054,8 +2060,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2440 +
2441 + pipe_lock(pipe);
2442 + out_free:
2443 +- for (idx = 0; idx < nbuf; idx++)
2444 +- pipe_buf_release(pipe, &bufs[idx]);
2445 ++ for (idx = 0; idx < nbuf; idx++) {
2446 ++ struct pipe_buffer *buf = &bufs[idx];
2447 ++
2448 ++ if (buf->ops)
2449 ++ pipe_buf_release(pipe, buf);
2450 ++ }
2451 + pipe_unlock(pipe);
2452 +
2453 + kvfree(bufs);
2454 +diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
2455 +index ec9803088f6b8..eee011de3f58b 100644
2456 +--- a/fs/nfs/nfs42xdr.c
2457 ++++ b/fs/nfs/nfs42xdr.c
2458 +@@ -707,8 +707,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
2459 + status = decode_clone(xdr);
2460 + if (status)
2461 + goto out;
2462 +- status = decode_getfattr(xdr, res->dst_fattr, res->server);
2463 +-
2464 ++ decode_getfattr(xdr, res->dst_fattr, res->server);
2465 + out:
2466 + res->rpc_status = status;
2467 + return status;
2468 +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
2469 +index c4147e50af98a..f5dfedc015520 100644
2470 +--- a/fs/proc/vmcore.c
2471 ++++ b/fs/proc/vmcore.c
2472 +@@ -117,14 +117,19 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
2473 + nr_bytes = count;
2474 +
2475 + /* If pfn is not ram, return zeros for sparse dump files */
2476 +- if (pfn_is_ram(pfn) == 0)
2477 +- memset(buf, 0, nr_bytes);
2478 +- else {
2479 ++ if (pfn_is_ram(pfn) == 0) {
2480 ++ tmp = 0;
2481 ++ if (!userbuf)
2482 ++ memset(buf, 0, nr_bytes);
2483 ++ else if (clear_user(buf, nr_bytes))
2484 ++ tmp = -EFAULT;
2485 ++ } else {
2486 + tmp = copy_oldmem_page(pfn, buf, nr_bytes,
2487 + offset, userbuf);
2488 +- if (tmp < 0)
2489 +- return tmp;
2490 + }
2491 ++ if (tmp < 0)
2492 ++ return tmp;
2493 ++
2494 + *ppos += nr_bytes;
2495 + count -= nr_bytes;
2496 + buf += nr_bytes;
2497 +diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
2498 +index b3353e21f3b3e..db72ad39853b9 100644
2499 +--- a/include/asm-generic/tlb.h
2500 ++++ b/include/asm-generic/tlb.h
2501 +@@ -118,6 +118,8 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
2502 + void tlb_flush_mmu(struct mmu_gather *tlb);
2503 + void arch_tlb_finish_mmu(struct mmu_gather *tlb,
2504 + unsigned long start, unsigned long end, bool force);
2505 ++void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
2506 ++ unsigned long size);
2507 + extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
2508 + int page_size);
2509 +
2510 +diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
2511 +index e67564af6f934..1560ce548add5 100644
2512 +--- a/include/net/nfc/nci_core.h
2513 ++++ b/include/net/nfc/nci_core.h
2514 +@@ -42,6 +42,7 @@ enum nci_flag {
2515 + NCI_UP,
2516 + NCI_DATA_EXCHANGE,
2517 + NCI_DATA_EXCHANGE_TO,
2518 ++ NCI_UNREG,
2519 + };
2520 +
2521 + /* NCI device states */
2522 +diff --git a/include/net/nl802154.h b/include/net/nl802154.h
2523 +index ddcee128f5d9a..145acb8f25095 100644
2524 +--- a/include/net/nl802154.h
2525 ++++ b/include/net/nl802154.h
2526 +@@ -19,6 +19,8 @@
2527 + *
2528 + */
2529 +
2530 ++#include <linux/types.h>
2531 ++
2532 + #define NL802154_GENL_NAME "nl802154"
2533 +
2534 + enum nl802154_commands {
2535 +@@ -150,10 +152,9 @@ enum nl802154_attrs {
2536 + };
2537 +
2538 + enum nl802154_iftype {
2539 +- /* for backwards compatibility TODO */
2540 +- NL802154_IFTYPE_UNSPEC = -1,
2541 ++ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
2542 +
2543 +- NL802154_IFTYPE_NODE,
2544 ++ NL802154_IFTYPE_NODE = 0,
2545 + NL802154_IFTYPE_MONITOR,
2546 + NL802154_IFTYPE_COORD,
2547 +
2548 +diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
2549 +index 3f40501fc60b1..b39cdbc522ec7 100644
2550 +--- a/include/xen/interface/io/ring.h
2551 ++++ b/include/xen/interface/io/ring.h
2552 +@@ -1,21 +1,53 @@
2553 +-/* SPDX-License-Identifier: GPL-2.0 */
2554 + /******************************************************************************
2555 + * ring.h
2556 + *
2557 + * Shared producer-consumer ring macros.
2558 + *
2559 ++ * Permission is hereby granted, free of charge, to any person obtaining a copy
2560 ++ * of this software and associated documentation files (the "Software"), to
2561 ++ * deal in the Software without restriction, including without limitation the
2562 ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2563 ++ * sell copies of the Software, and to permit persons to whom the Software is
2564 ++ * furnished to do so, subject to the following conditions:
2565 ++ *
2566 ++ * The above copyright notice and this permission notice shall be included in
2567 ++ * all copies or substantial portions of the Software.
2568 ++ *
2569 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2570 ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2571 ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2572 ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2573 ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2574 ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2575 ++ * DEALINGS IN THE SOFTWARE.
2576 ++ *
2577 + * Tim Deegan and Andrew Warfield November 2004.
2578 + */
2579 +
2580 + #ifndef __XEN_PUBLIC_IO_RING_H__
2581 + #define __XEN_PUBLIC_IO_RING_H__
2582 +
2583 ++/*
2584 ++ * When #include'ing this header, you need to provide the following
2585 ++ * declaration upfront:
2586 ++ * - standard integers types (uint8_t, uint16_t, etc)
2587 ++ * They are provided by stdint.h of the standard headers.
2588 ++ *
2589 ++ * In addition, if you intend to use the FLEX macros, you also need to
2590 ++ * provide the following, before invoking the FLEX macros:
2591 ++ * - size_t
2592 ++ * - memcpy
2593 ++ * - grant_ref_t
2594 ++ * These declarations are provided by string.h of the standard headers,
2595 ++ * and grant_table.h from the Xen public headers.
2596 ++ */
2597 ++
2598 + #include <xen/interface/grant_table.h>
2599 +
2600 + typedef unsigned int RING_IDX;
2601 +
2602 + /* Round a 32-bit unsigned constant down to the nearest power of two. */
2603 +-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
2604 ++#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
2605 + #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
2606 + #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
2607 + #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
2608 +@@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
2609 + * A ring contains as many entries as will fit, rounded down to the nearest
2610 + * power of two (so we can mask with (size-1) to loop around).
2611 + */
2612 +-#define __CONST_RING_SIZE(_s, _sz) \
2613 +- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
2614 +- sizeof(((struct _s##_sring *)0)->ring[0])))
2615 +-
2616 ++#define __CONST_RING_SIZE(_s, _sz) \
2617 ++ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
2618 ++ sizeof(((struct _s##_sring *)0)->ring[0])))
2619 + /*
2620 + * The same for passing in an actual pointer instead of a name tag.
2621 + */
2622 +-#define __RING_SIZE(_s, _sz) \
2623 +- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
2624 ++#define __RING_SIZE(_s, _sz) \
2625 ++ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
2626 +
2627 + /*
2628 + * Macros to make the correct C datatypes for a new kind of ring.
2629 + *
2630 + * To make a new ring datatype, you need to have two message structures,
2631 +- * let's say struct request, and struct response already defined.
2632 ++ * let's say request_t, and response_t already defined.
2633 + *
2634 + * In a header where you want the ring datatype declared, you then do:
2635 + *
2636 +- * DEFINE_RING_TYPES(mytag, struct request, struct response);
2637 ++ * DEFINE_RING_TYPES(mytag, request_t, response_t);
2638 + *
2639 + * These expand out to give you a set of types, as you can see below.
2640 + * The most important of these are:
2641 + *
2642 +- * struct mytag_sring - The shared ring.
2643 +- * struct mytag_front_ring - The 'front' half of the ring.
2644 +- * struct mytag_back_ring - The 'back' half of the ring.
2645 ++ * mytag_sring_t - The shared ring.
2646 ++ * mytag_front_ring_t - The 'front' half of the ring.
2647 ++ * mytag_back_ring_t - The 'back' half of the ring.
2648 + *
2649 + * To initialize a ring in your code you need to know the location and size
2650 + * of the shared memory area (PAGE_SIZE, for instance). To initialise
2651 + * the front half:
2652 + *
2653 +- * struct mytag_front_ring front_ring;
2654 +- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
2655 +- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
2656 +- * PAGE_SIZE);
2657 ++ * mytag_front_ring_t front_ring;
2658 ++ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
2659 ++ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
2660 + *
2661 + * Initializing the back follows similarly (note that only the front
2662 + * initializes the shared ring):
2663 + *
2664 +- * struct mytag_back_ring back_ring;
2665 +- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
2666 +- * PAGE_SIZE);
2667 ++ * mytag_back_ring_t back_ring;
2668 ++ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
2669 + */
2670 +
2671 +-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
2672 +- \
2673 +-/* Shared ring entry */ \
2674 +-union __name##_sring_entry { \
2675 +- __req_t req; \
2676 +- __rsp_t rsp; \
2677 +-}; \
2678 +- \
2679 +-/* Shared ring page */ \
2680 +-struct __name##_sring { \
2681 +- RING_IDX req_prod, req_event; \
2682 +- RING_IDX rsp_prod, rsp_event; \
2683 +- uint8_t pad[48]; \
2684 +- union __name##_sring_entry ring[1]; /* variable-length */ \
2685 +-}; \
2686 +- \
2687 +-/* "Front" end's private variables */ \
2688 +-struct __name##_front_ring { \
2689 +- RING_IDX req_prod_pvt; \
2690 +- RING_IDX rsp_cons; \
2691 +- unsigned int nr_ents; \
2692 +- struct __name##_sring *sring; \
2693 +-}; \
2694 +- \
2695 +-/* "Back" end's private variables */ \
2696 +-struct __name##_back_ring { \
2697 +- RING_IDX rsp_prod_pvt; \
2698 +- RING_IDX req_cons; \
2699 +- unsigned int nr_ents; \
2700 +- struct __name##_sring *sring; \
2701 +-};
2702 +-
2703 ++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
2704 ++ \
2705 ++/* Shared ring entry */ \
2706 ++union __name##_sring_entry { \
2707 ++ __req_t req; \
2708 ++ __rsp_t rsp; \
2709 ++}; \
2710 ++ \
2711 ++/* Shared ring page */ \
2712 ++struct __name##_sring { \
2713 ++ RING_IDX req_prod, req_event; \
2714 ++ RING_IDX rsp_prod, rsp_event; \
2715 ++ uint8_t __pad[48]; \
2716 ++ union __name##_sring_entry ring[1]; /* variable-length */ \
2717 ++}; \
2718 ++ \
2719 ++/* "Front" end's private variables */ \
2720 ++struct __name##_front_ring { \
2721 ++ RING_IDX req_prod_pvt; \
2722 ++ RING_IDX rsp_cons; \
2723 ++ unsigned int nr_ents; \
2724 ++ struct __name##_sring *sring; \
2725 ++}; \
2726 ++ \
2727 ++/* "Back" end's private variables */ \
2728 ++struct __name##_back_ring { \
2729 ++ RING_IDX rsp_prod_pvt; \
2730 ++ RING_IDX req_cons; \
2731 ++ unsigned int nr_ents; \
2732 ++ struct __name##_sring *sring; \
2733 ++}; \
2734 ++ \
2735 + /*
2736 + * Macros for manipulating rings.
2737 + *
2738 +@@ -119,105 +148,99 @@ struct __name##_back_ring { \
2739 + */
2740 +
2741 + /* Initialising empty rings */
2742 +-#define SHARED_RING_INIT(_s) do { \
2743 +- (_s)->req_prod = (_s)->rsp_prod = 0; \
2744 +- (_s)->req_event = (_s)->rsp_event = 1; \
2745 +- memset((_s)->pad, 0, sizeof((_s)->pad)); \
2746 ++#define SHARED_RING_INIT(_s) do { \
2747 ++ (_s)->req_prod = (_s)->rsp_prod = 0; \
2748 ++ (_s)->req_event = (_s)->rsp_event = 1; \
2749 ++ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
2750 + } while(0)
2751 +
2752 +-#define FRONT_RING_INIT(_r, _s, __size) do { \
2753 +- (_r)->req_prod_pvt = 0; \
2754 +- (_r)->rsp_cons = 0; \
2755 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
2756 +- (_r)->sring = (_s); \
2757 ++#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
2758 ++ (_r)->req_prod_pvt = (_i); \
2759 ++ (_r)->rsp_cons = (_i); \
2760 ++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
2761 ++ (_r)->sring = (_s); \
2762 + } while (0)
2763 +
2764 +-#define BACK_RING_INIT(_r, _s, __size) do { \
2765 +- (_r)->rsp_prod_pvt = 0; \
2766 +- (_r)->req_cons = 0; \
2767 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
2768 +- (_r)->sring = (_s); \
2769 +-} while (0)
2770 ++#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
2771 +
2772 +-/* Initialize to existing shared indexes -- for recovery */
2773 +-#define FRONT_RING_ATTACH(_r, _s, __size) do { \
2774 +- (_r)->sring = (_s); \
2775 +- (_r)->req_prod_pvt = (_s)->req_prod; \
2776 +- (_r)->rsp_cons = (_s)->rsp_prod; \
2777 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
2778 ++#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
2779 ++ (_r)->rsp_prod_pvt = (_i); \
2780 ++ (_r)->req_cons = (_i); \
2781 ++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
2782 ++ (_r)->sring = (_s); \
2783 + } while (0)
2784 +
2785 +-#define BACK_RING_ATTACH(_r, _s, __size) do { \
2786 +- (_r)->sring = (_s); \
2787 +- (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
2788 +- (_r)->req_cons = (_s)->req_prod; \
2789 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
2790 +-} while (0)
2791 ++#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
2792 +
2793 + /* How big is this ring? */
2794 +-#define RING_SIZE(_r) \
2795 ++#define RING_SIZE(_r) \
2796 + ((_r)->nr_ents)
2797 +
2798 + /* Number of free requests (for use on front side only). */
2799 +-#define RING_FREE_REQUESTS(_r) \
2800 ++#define RING_FREE_REQUESTS(_r) \
2801 + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
2802 +
2803 + /* Test if there is an empty slot available on the front ring.
2804 + * (This is only meaningful from the front. )
2805 + */
2806 +-#define RING_FULL(_r) \
2807 ++#define RING_FULL(_r) \
2808 + (RING_FREE_REQUESTS(_r) == 0)
2809 +
2810 + /* Test if there are outstanding messages to be processed on a ring. */
2811 +-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
2812 ++#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
2813 + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
2814 +
2815 +-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
2816 +- ({ \
2817 +- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
2818 +- unsigned int rsp = RING_SIZE(_r) - \
2819 +- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
2820 +- req < rsp ? req : rsp; \
2821 +- })
2822 ++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
2823 ++ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
2824 ++ unsigned int rsp = RING_SIZE(_r) - \
2825 ++ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
2826 ++ req < rsp ? req : rsp; \
2827 ++})
2828 +
2829 + /* Direct access to individual ring elements, by index. */
2830 +-#define RING_GET_REQUEST(_r, _idx) \
2831 ++#define RING_GET_REQUEST(_r, _idx) \
2832 + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
2833 +
2834 ++#define RING_GET_RESPONSE(_r, _idx) \
2835 ++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
2836 ++
2837 + /*
2838 +- * Get a local copy of a request.
2839 ++ * Get a local copy of a request/response.
2840 + *
2841 +- * Use this in preference to RING_GET_REQUEST() so all processing is
2842 ++ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
2843 + * done on a local copy that cannot be modified by the other end.
2844 + *
2845 + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
2846 +- * to be ineffective where _req is a struct which consists of only bitfields.
2847 ++ * to be ineffective where dest is a struct which consists of only bitfields.
2848 + */
2849 +-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
2850 +- /* Use volatile to force the copy into _req. */ \
2851 +- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
2852 ++#define RING_COPY_(type, r, idx, dest) do { \
2853 ++ /* Use volatile to force the copy into dest. */ \
2854 ++ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
2855 + } while (0)
2856 +
2857 +-#define RING_GET_RESPONSE(_r, _idx) \
2858 +- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
2859 ++#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
2860 ++#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
2861 +
2862 + /* Loop termination condition: Would the specified index overflow the ring? */
2863 +-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
2864 ++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
2865 + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
2866 +
2867 + /* Ill-behaved frontend determination: Can there be this many requests? */
2868 +-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
2869 ++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
2870 + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
2871 +
2872 ++/* Ill-behaved backend determination: Can there be this many responses? */
2873 ++#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
2874 ++ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
2875 +
2876 +-#define RING_PUSH_REQUESTS(_r) do { \
2877 +- virt_wmb(); /* back sees requests /before/ updated producer index */ \
2878 +- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
2879 ++#define RING_PUSH_REQUESTS(_r) do { \
2880 ++ virt_wmb(); /* back sees requests /before/ updated producer index */\
2881 ++ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
2882 + } while (0)
2883 +
2884 +-#define RING_PUSH_RESPONSES(_r) do { \
2885 +- virt_wmb(); /* front sees responses /before/ updated producer index */ \
2886 +- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
2887 ++#define RING_PUSH_RESPONSES(_r) do { \
2888 ++ virt_wmb(); /* front sees resps /before/ updated producer index */ \
2889 ++ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
2890 + } while (0)
2891 +
2892 + /*
2893 +@@ -250,40 +273,40 @@ struct __name##_back_ring { \
2894 + * field appropriately.
2895 + */
2896 +
2897 +-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
2898 +- RING_IDX __old = (_r)->sring->req_prod; \
2899 +- RING_IDX __new = (_r)->req_prod_pvt; \
2900 +- virt_wmb(); /* back sees requests /before/ updated producer index */ \
2901 +- (_r)->sring->req_prod = __new; \
2902 +- virt_mb(); /* back sees new requests /before/ we check req_event */ \
2903 +- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
2904 +- (RING_IDX)(__new - __old)); \
2905 ++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
2906 ++ RING_IDX __old = (_r)->sring->req_prod; \
2907 ++ RING_IDX __new = (_r)->req_prod_pvt; \
2908 ++ virt_wmb(); /* back sees requests /before/ updated producer index */\
2909 ++ (_r)->sring->req_prod = __new; \
2910 ++ virt_mb(); /* back sees new requests /before/ we check req_event */ \
2911 ++ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
2912 ++ (RING_IDX)(__new - __old)); \
2913 + } while (0)
2914 +
2915 +-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
2916 +- RING_IDX __old = (_r)->sring->rsp_prod; \
2917 +- RING_IDX __new = (_r)->rsp_prod_pvt; \
2918 +- virt_wmb(); /* front sees responses /before/ updated producer index */ \
2919 +- (_r)->sring->rsp_prod = __new; \
2920 +- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
2921 +- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
2922 +- (RING_IDX)(__new - __old)); \
2923 ++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
2924 ++ RING_IDX __old = (_r)->sring->rsp_prod; \
2925 ++ RING_IDX __new = (_r)->rsp_prod_pvt; \
2926 ++ virt_wmb(); /* front sees resps /before/ updated producer index */ \
2927 ++ (_r)->sring->rsp_prod = __new; \
2928 ++ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
2929 ++ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
2930 ++ (RING_IDX)(__new - __old)); \
2931 + } while (0)
2932 +
2933 +-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
2934 +- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2935 +- if (_work_to_do) break; \
2936 +- (_r)->sring->req_event = (_r)->req_cons + 1; \
2937 +- virt_mb(); \
2938 +- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2939 ++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
2940 ++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2941 ++ if (_work_to_do) break; \
2942 ++ (_r)->sring->req_event = (_r)->req_cons + 1; \
2943 ++ virt_mb(); \
2944 ++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2945 + } while (0)
2946 +
2947 +-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
2948 +- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2949 +- if (_work_to_do) break; \
2950 +- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
2951 +- virt_mb(); \
2952 +- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2953 ++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
2954 ++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2955 ++ if (_work_to_do) break; \
2956 ++ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
2957 ++ virt_mb(); \
2958 ++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2959 + } while (0)
2960 +
2961 +
2962 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
2963 +index 28db51274ed0e..6670a44ec5d45 100644
2964 +--- a/kernel/power/hibernate.c
2965 ++++ b/kernel/power/hibernate.c
2966 +@@ -677,7 +677,7 @@ static int load_image_and_restore(void)
2967 + goto Unlock;
2968 +
2969 + error = swsusp_read(&flags);
2970 +- swsusp_close(FMODE_READ);
2971 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
2972 + if (!error)
2973 + hibernation_restore(flags & SF_PLATFORM_MODE);
2974 +
2975 +@@ -874,7 +874,7 @@ static int software_resume(void)
2976 + /* The snapshot device should not be opened while we're running */
2977 + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
2978 + error = -EBUSY;
2979 +- swsusp_close(FMODE_READ);
2980 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
2981 + goto Unlock;
2982 + }
2983 +
2984 +@@ -910,7 +910,7 @@ static int software_resume(void)
2985 + pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
2986 + return error;
2987 + Close_Finish:
2988 +- swsusp_close(FMODE_READ);
2989 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
2990 + goto Finish;
2991 + }
2992 +
2993 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
2994 +index 2c4068d8776ea..74185fb040f33 100644
2995 +--- a/kernel/trace/trace.h
2996 ++++ b/kernel/trace/trace.h
2997 +@@ -1365,14 +1365,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
2998 + if (eflags & EVENT_FILE_FL_TRIGGER_COND)
2999 + *tt = event_triggers_call(file, entry, event);
3000 +
3001 +- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
3002 +- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
3003 +- !filter_match_preds(file->filter, entry))) {
3004 +- __trace_event_discard_commit(buffer, event);
3005 +- return true;
3006 +- }
3007 ++ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
3008 ++ EVENT_FILE_FL_FILTERED |
3009 ++ EVENT_FILE_FL_PID_FILTER))))
3010 ++ return false;
3011 ++
3012 ++ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
3013 ++ goto discard;
3014 ++
3015 ++ if (file->flags & EVENT_FILE_FL_FILTERED &&
3016 ++ !filter_match_preds(file->filter, entry))
3017 ++ goto discard;
3018 ++
3019 ++ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
3020 ++ trace_event_ignore_this_pid(file))
3021 ++ goto discard;
3022 +
3023 + return false;
3024 ++ discard:
3025 ++ __trace_event_discard_commit(buffer, event);
3026 ++ return true;
3027 + }
3028 +
3029 + /**
3030 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3031 +index ea43be6b9cc3c..1ca64a9296d0d 100644
3032 +--- a/kernel/trace/trace_events.c
3033 ++++ b/kernel/trace/trace_events.c
3034 +@@ -2255,12 +2255,19 @@ static struct trace_event_file *
3035 + trace_create_new_event(struct trace_event_call *call,
3036 + struct trace_array *tr)
3037 + {
3038 ++ struct trace_pid_list *pid_list;
3039 + struct trace_event_file *file;
3040 +
3041 + file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3042 + if (!file)
3043 + return NULL;
3044 +
3045 ++ pid_list = rcu_dereference_protected(tr->filtered_pids,
3046 ++ lockdep_is_held(&event_mutex));
3047 ++
3048 ++ if (pid_list)
3049 ++ file->flags |= EVENT_FILE_FL_PID_FILTER;
3050 ++
3051 + file->event_call = call;
3052 + file->tr = tr;
3053 + atomic_set(&file->sm_ref, 0);
3054 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3055 +index ebcf26bc4cd4b..0c5a2b4e003d5 100644
3056 +--- a/mm/hugetlb.c
3057 ++++ b/mm/hugetlb.c
3058 +@@ -3425,6 +3425,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3059 + unsigned long sz = huge_page_size(h);
3060 + unsigned long mmun_start = start; /* For mmu_notifiers */
3061 + unsigned long mmun_end = end; /* For mmu_notifiers */
3062 ++ bool force_flush = false;
3063 +
3064 + WARN_ON(!is_vm_hugetlb_page(vma));
3065 + BUG_ON(start & ~huge_page_mask(h));
3066 +@@ -3451,10 +3452,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3067 + ptl = huge_pte_lock(h, mm, ptep);
3068 + if (huge_pmd_unshare(mm, &address, ptep)) {
3069 + spin_unlock(ptl);
3070 +- /*
3071 +- * We just unmapped a page of PMDs by clearing a PUD.
3072 +- * The caller's TLB flush range should cover this area.
3073 +- */
3074 ++ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
3075 ++ force_flush = true;
3076 + continue;
3077 + }
3078 +
3079 +@@ -3511,6 +3510,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3080 + }
3081 + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3082 + tlb_end_vma(tlb, vma);
3083 ++
3084 ++ /*
3085 ++ * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
3086 ++ * could defer the flush until now, since by holding i_mmap_rwsem we
3087 ++ * guaranteed that the last refernece would not be dropped. But we must
3088 ++ * do the flushing before we return, as otherwise i_mmap_rwsem will be
3089 ++ * dropped and the last reference to the shared PMDs page might be
3090 ++ * dropped as well.
3091 ++ *
3092 ++ * In theory we could defer the freeing of the PMD pages as well, but
3093 ++ * huge_pmd_unshare() relies on the exact page_count for the PMD page to
3094 ++ * detect sharing, so we cannot defer the release of the page either.
3095 ++ * Instead, do flush now.
3096 ++ */
3097 ++ if (force_flush)
3098 ++ tlb_flush_mmu_tlbonly(tlb);
3099 + }
3100 +
3101 + void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3102 +diff --git a/mm/memory.c b/mm/memory.c
3103 +index 49b546cdce0d2..1d03085fde02b 100644
3104 +--- a/mm/memory.c
3105 ++++ b/mm/memory.c
3106 +@@ -324,6 +324,16 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
3107 + return false;
3108 + }
3109 +
3110 ++void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
3111 ++ unsigned long size)
3112 ++{
3113 ++ if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
3114 ++ tlb_flush_mmu(tlb);
3115 ++
3116 ++ tlb->page_size = PMD_SIZE;
3117 ++ tlb->start = min(tlb->start, address);
3118 ++ tlb->end = max(tlb->end, address + size);
3119 ++}
3120 + #endif /* HAVE_GENERIC_MMU_GATHER */
3121 +
3122 + #ifdef CONFIG_HAVE_RCU_TABLE_FREE
3123 +diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
3124 +index 8b5ba0a5cd386..93530bd332470 100644
3125 +--- a/net/ipv4/tcp_cubic.c
3126 ++++ b/net/ipv4/tcp_cubic.c
3127 +@@ -340,8 +340,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3128 + return;
3129 +
3130 + if (tcp_in_slow_start(tp)) {
3131 +- if (hystart && after(ack, ca->end_seq))
3132 +- bictcp_hystart_reset(sk);
3133 + acked = tcp_slow_start(tp, acked);
3134 + if (!acked)
3135 + return;
3136 +@@ -383,6 +381,9 @@ static void hystart_update(struct sock *sk, u32 delay)
3137 + if (ca->found & hystart_detect)
3138 + return;
3139 +
3140 ++ if (after(tp->snd_una, ca->end_seq))
3141 ++ bictcp_hystart_reset(sk);
3142 ++
3143 + if (hystart_detect & HYSTART_ACK_TRAIN) {
3144 + u32 now = bictcp_clock();
3145 +
3146 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3147 +index fc36f3b0dceb3..251ec12517e93 100644
3148 +--- a/net/ipv6/ip6_output.c
3149 ++++ b/net/ipv6/ip6_output.c
3150 +@@ -175,7 +175,7 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
3151 + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
3152 + /* Policy lookup after SNAT yielded a new policy */
3153 + if (skb_dst(skb)->xfrm) {
3154 +- IPCB(skb)->flags |= IPSKB_REROUTED;
3155 ++ IP6CB(skb)->flags |= IP6SKB_REROUTED;
3156 + return dst_output(net, sk, skb);
3157 + }
3158 + #endif
3159 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
3160 +index acaeeaf814415..f20b08db9fe91 100644
3161 +--- a/net/netfilter/ipvs/ip_vs_core.c
3162 ++++ b/net/netfilter/ipvs/ip_vs_core.c
3163 +@@ -1850,7 +1850,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
3164 + struct ip_vs_proto_data *pd;
3165 + struct ip_vs_conn *cp;
3166 + int ret, pkts;
3167 +- int conn_reuse_mode;
3168 + struct sock *sk;
3169 +
3170 + /* Already marked as IPVS request or reply? */
3171 +@@ -1926,15 +1925,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
3172 + */
3173 + cp = pp->conn_in_get(ipvs, af, skb, &iph);
3174 +
3175 +- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
3176 +- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
3177 ++ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
3178 ++ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
3179 + bool old_ct = false, resched = false;
3180 +
3181 + if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
3182 + unlikely(!atomic_read(&cp->dest->weight))) {
3183 + resched = true;
3184 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
3185 +- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
3186 ++ } else if (conn_reuse_mode &&
3187 ++ is_new_conn_expected(cp, conn_reuse_mode)) {
3188 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
3189 + if (!atomic_read(&cp->n_control)) {
3190 + resched = true;
3191 +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
3192 +index 1008bbbb3af9c..0e0dff72a9e4f 100644
3193 +--- a/net/nfc/nci/core.c
3194 ++++ b/net/nfc/nci/core.c
3195 +@@ -485,6 +485,11 @@ static int nci_open_device(struct nci_dev *ndev)
3196 +
3197 + mutex_lock(&ndev->req_lock);
3198 +
3199 ++ if (test_bit(NCI_UNREG, &ndev->flags)) {
3200 ++ rc = -ENODEV;
3201 ++ goto done;
3202 ++ }
3203 ++
3204 + if (test_bit(NCI_UP, &ndev->flags)) {
3205 + rc = -EALREADY;
3206 + goto done;
3207 +@@ -548,6 +553,10 @@ done:
3208 + static int nci_close_device(struct nci_dev *ndev)
3209 + {
3210 + nci_req_cancel(ndev, ENODEV);
3211 ++
3212 ++ /* This mutex needs to be held as a barrier for
3213 ++ * caller nci_unregister_device
3214 ++ */
3215 + mutex_lock(&ndev->req_lock);
3216 +
3217 + if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
3218 +@@ -585,8 +594,8 @@ static int nci_close_device(struct nci_dev *ndev)
3219 + /* Flush cmd wq */
3220 + flush_workqueue(ndev->cmd_wq);
3221 +
3222 +- /* Clear flags */
3223 +- ndev->flags = 0;
3224 ++ /* Clear flags except NCI_UNREG */
3225 ++ ndev->flags &= BIT(NCI_UNREG);
3226 +
3227 + mutex_unlock(&ndev->req_lock);
3228 +
3229 +@@ -1268,6 +1277,12 @@ void nci_unregister_device(struct nci_dev *ndev)
3230 + {
3231 + struct nci_conn_info *conn_info, *n;
3232 +
3233 ++ /* This set_bit is not protected with specialized barrier,
3234 ++ * However, it is fine because the mutex_lock(&ndev->req_lock);
3235 ++ * in nci_close_device() will help to emit one.
3236 ++ */
3237 ++ set_bit(NCI_UNREG, &ndev->flags);
3238 ++
3239 + nci_close_device(ndev);
3240 +
3241 + destroy_workqueue(ndev->cmd_wq);
3242 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
3243 +index 9aab4ab8161bd..4c904ab29e0e6 100644
3244 +--- a/net/smc/af_smc.c
3245 ++++ b/net/smc/af_smc.c
3246 +@@ -1589,8 +1589,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
3247 + static int smc_shutdown(struct socket *sock, int how)
3248 + {
3249 + struct sock *sk = sock->sk;
3250 ++ bool do_shutdown = true;
3251 + struct smc_sock *smc;
3252 + int rc = -EINVAL;
3253 ++ int old_state;
3254 + int rc1 = 0;
3255 +
3256 + smc = smc_sk(sk);
3257 +@@ -1617,7 +1619,11 @@ static int smc_shutdown(struct socket *sock, int how)
3258 + }
3259 + switch (how) {
3260 + case SHUT_RDWR: /* shutdown in both directions */
3261 ++ old_state = sk->sk_state;
3262 + rc = smc_close_active(smc);
3263 ++ if (old_state == SMC_ACTIVE &&
3264 ++ sk->sk_state == SMC_PEERCLOSEWAIT1)
3265 ++ do_shutdown = false;
3266 + break;
3267 + case SHUT_WR:
3268 + rc = smc_close_shutdown_write(smc);
3269 +@@ -1627,7 +1633,7 @@ static int smc_shutdown(struct socket *sock, int how)
3270 + /* nothing more to do because peer is not involved */
3271 + break;
3272 + }
3273 +- if (smc->clcsock)
3274 ++ if (do_shutdown && smc->clcsock)
3275 + rc1 = kernel_sock_shutdown(smc->clcsock, how);
3276 + /* map sock_shutdown_cmd constants to sk_shutdown value range */
3277 + sk->sk_shutdown |= how + 1;
3278 +diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
3279 +index ea2b87f294696..e25c023582f9e 100644
3280 +--- a/net/smc/smc_close.c
3281 ++++ b/net/smc/smc_close.c
3282 +@@ -202,6 +202,12 @@ again:
3283 + if (rc)
3284 + break;
3285 + sk->sk_state = SMC_PEERCLOSEWAIT1;
3286 ++
3287 ++ /* actively shutdown clcsock before peer close it,
3288 ++ * prevent peer from entering TIME_WAIT state.
3289 ++ */
3290 ++ if (smc->clcsock && smc->clcsock->sk)
3291 ++ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
3292 + } else {
3293 + /* peer event has changed the state */
3294 + goto again;
3295 +diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
3296 +index 5fcbb065d8702..d32685ce6c059 100644
3297 +--- a/sound/pci/ctxfi/ctamixer.c
3298 ++++ b/sound/pci/ctxfi/ctamixer.c
3299 +@@ -27,16 +27,15 @@
3300 +
3301 + #define BLANK_SLOT 4094
3302 +
3303 +-static int amixer_master(struct rsc *rsc)
3304 ++static void amixer_master(struct rsc *rsc)
3305 + {
3306 + rsc->conj = 0;
3307 +- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
3308 ++ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
3309 + }
3310 +
3311 +-static int amixer_next_conj(struct rsc *rsc)
3312 ++static void amixer_next_conj(struct rsc *rsc)
3313 + {
3314 + rsc->conj++;
3315 +- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
3316 + }
3317 +
3318 + static int amixer_index(const struct rsc *rsc)
3319 +@@ -335,16 +334,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
3320 +
3321 + /* SUM resource management */
3322 +
3323 +-static int sum_master(struct rsc *rsc)
3324 ++static void sum_master(struct rsc *rsc)
3325 + {
3326 + rsc->conj = 0;
3327 +- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
3328 ++ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
3329 + }
3330 +
3331 +-static int sum_next_conj(struct rsc *rsc)
3332 ++static void sum_next_conj(struct rsc *rsc)
3333 + {
3334 + rsc->conj++;
3335 +- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
3336 + }
3337 +
3338 + static int sum_index(const struct rsc *rsc)
3339 +diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
3340 +index f35a7341e4463..ed6e15d1f10f4 100644
3341 +--- a/sound/pci/ctxfi/ctdaio.c
3342 ++++ b/sound/pci/ctxfi/ctdaio.c
3343 +@@ -55,12 +55,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
3344 + [SPDIFIO] = {.left = 0x05, .right = 0x85},
3345 + };
3346 +
3347 +-static int daio_master(struct rsc *rsc)
3348 ++static void daio_master(struct rsc *rsc)
3349 + {
3350 + /* Actually, this is not the resource index of DAIO.
3351 + * For DAO, it is the input mapper index. And, for DAI,
3352 + * it is the output time-slot index. */
3353 +- return rsc->conj = rsc->idx;
3354 ++ rsc->conj = rsc->idx;
3355 + }
3356 +
3357 + static int daio_index(const struct rsc *rsc)
3358 +@@ -68,19 +68,19 @@ static int daio_index(const struct rsc *rsc)
3359 + return rsc->conj;
3360 + }
3361 +
3362 +-static int daio_out_next_conj(struct rsc *rsc)
3363 ++static void daio_out_next_conj(struct rsc *rsc)
3364 + {
3365 +- return rsc->conj += 2;
3366 ++ rsc->conj += 2;
3367 + }
3368 +
3369 +-static int daio_in_next_conj_20k1(struct rsc *rsc)
3370 ++static void daio_in_next_conj_20k1(struct rsc *rsc)
3371 + {
3372 +- return rsc->conj += 0x200;
3373 ++ rsc->conj += 0x200;
3374 + }
3375 +
3376 +-static int daio_in_next_conj_20k2(struct rsc *rsc)
3377 ++static void daio_in_next_conj_20k2(struct rsc *rsc)
3378 + {
3379 +- return rsc->conj += 0x100;
3380 ++ rsc->conj += 0x100;
3381 + }
3382 +
3383 + static const struct rsc_ops daio_out_rsc_ops = {
3384 +diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
3385 +index 80c4d84f9667f..f05a09ed42b8d 100644
3386 +--- a/sound/pci/ctxfi/ctresource.c
3387 ++++ b/sound/pci/ctxfi/ctresource.c
3388 +@@ -113,18 +113,17 @@ static int audio_ring_slot(const struct rsc *rsc)
3389 + return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
3390 + }
3391 +
3392 +-static int rsc_next_conj(struct rsc *rsc)
3393 ++static void rsc_next_conj(struct rsc *rsc)
3394 + {
3395 + unsigned int i;
3396 + for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
3397 + i++;
3398 + rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
3399 +- return rsc->conj;
3400 + }
3401 +
3402 +-static int rsc_master(struct rsc *rsc)
3403 ++static void rsc_master(struct rsc *rsc)
3404 + {
3405 +- return rsc->conj = rsc->idx;
3406 ++ rsc->conj = rsc->idx;
3407 + }
3408 +
3409 + static const struct rsc_ops rsc_generic_ops = {
3410 +diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
3411 +index 736d9f7e9e165..29b6fe6de659c 100644
3412 +--- a/sound/pci/ctxfi/ctresource.h
3413 ++++ b/sound/pci/ctxfi/ctresource.h
3414 +@@ -43,8 +43,8 @@ struct rsc {
3415 + };
3416 +
3417 + struct rsc_ops {
3418 +- int (*master)(struct rsc *rsc); /* Move to master resource */
3419 +- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
3420 ++ void (*master)(struct rsc *rsc); /* Move to master resource */
3421 ++ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
3422 + int (*index)(const struct rsc *rsc); /* Return the index of resource */
3423 + /* Return the output slot number */
3424 + int (*output_slot)(const struct rsc *rsc);
3425 +diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
3426 +index a4fc10723fc6b..660ca0272e395 100644
3427 +--- a/sound/pci/ctxfi/ctsrc.c
3428 ++++ b/sound/pci/ctxfi/ctsrc.c
3429 +@@ -594,16 +594,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
3430 +
3431 + /* SRCIMP resource manager operations */
3432 +
3433 +-static int srcimp_master(struct rsc *rsc)
3434 ++static void srcimp_master(struct rsc *rsc)
3435 + {
3436 + rsc->conj = 0;
3437 +- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
3438 ++ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
3439 + }
3440 +
3441 +-static int srcimp_next_conj(struct rsc *rsc)
3442 ++static void srcimp_next_conj(struct rsc *rsc)
3443 + {
3444 + rsc->conj++;
3445 +- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
3446 + }
3447 +
3448 + static int srcimp_index(const struct rsc *rsc)
3449 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
3450 +index 44eee18c658ae..7d2c5de380317 100644
3451 +--- a/sound/soc/qcom/qdsp6/q6routing.c
3452 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
3453 +@@ -443,7 +443,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
3454 + session->port_id = be_id;
3455 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
3456 + } else {
3457 +- session->port_id = -1;
3458 ++ if (session->port_id == be_id) {
3459 ++ session->port_id = -1;
3460 ++ return 0;
3461 ++ }
3462 ++
3463 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
3464 + }
3465 +
3466 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
3467 +index 2c6598e07dde3..ccf6dd9411975 100644
3468 +--- a/sound/soc/soc-topology.c
3469 ++++ b/sound/soc/soc-topology.c
3470 +@@ -2565,6 +2565,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
3471 + /* remove dynamic controls from the component driver */
3472 + int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
3473 + {
3474 ++ struct snd_card *card = comp->card->snd_card;
3475 + struct snd_soc_dobj *dobj, *next_dobj;
3476 + int pass = SOC_TPLG_PASS_END;
3477 +
3478 +@@ -2572,6 +2573,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
3479 + while (pass >= SOC_TPLG_PASS_START) {
3480 +
3481 + /* remove mixer controls */
3482 ++ down_write(&card->controls_rwsem);
3483 + list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
3484 + list) {
3485 +
3486 +@@ -2605,6 +2607,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
3487 + break;
3488 + }
3489 + }
3490 ++ up_write(&card->controls_rwsem);
3491 + pass--;
3492 + }
3493 +