Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.9 commit in: /
Date: Wed, 02 Dec 2020 12:51:13
Message-Id: 1606913456.54f932d5566957b4eb41aea06e6febb1c65245bd.mpagano@gentoo
1 commit: 54f932d5566957b4eb41aea06e6febb1c65245bd
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 2 12:50:56 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 2 12:50:56 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54f932d5
7
8 Linux patch 5.9.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-5.9.12.patch | 6940 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6944 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7528f5d..22fb04b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-5.9.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.9.11
23
24 +Patch: 1011_linux-5.9.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.9.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-5.9.12.patch b/1011_linux-5.9.12.patch
33 new file mode 100644
34 index 0000000..4267414
35 --- /dev/null
36 +++ b/1011_linux-5.9.12.patch
37 @@ -0,0 +1,6940 @@
38 +diff --git a/Makefile b/Makefile
39 +index bacb52fac2a54..1dd088b0ac993 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 9
46 +-SUBLEVEL = 11
47 ++SUBLEVEL = 12
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
52 +index f1ed17edb085b..163641726a2b9 100644
53 +--- a/arch/arc/include/asm/pgtable.h
54 ++++ b/arch/arc/include/asm/pgtable.h
55 +@@ -134,8 +134,10 @@
56 +
57 + #ifdef CONFIG_ARC_HAS_PAE40
58 + #define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
59 ++#define MAX_POSSIBLE_PHYSMEM_BITS 40
60 + #else
61 + #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
62 ++#define MAX_POSSIBLE_PHYSMEM_BITS 32
63 + #endif
64 +
65 + /**************************************************************************
66 +diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
67 +index b69c7d40f5d82..2f326151116b7 100644
68 +--- a/arch/arm/boot/dts/dra76x.dtsi
69 ++++ b/arch/arm/boot/dts/dra76x.dtsi
70 +@@ -32,8 +32,8 @@
71 + interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
72 + <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
73 + interrupt-names = "int0", "int1";
74 +- clocks = <&mcan_clk>, <&l3_iclk_div>;
75 +- clock-names = "cclk", "hclk";
76 ++ clocks = <&l3_iclk_div>, <&mcan_clk>;
77 ++ clock-names = "hclk", "cclk";
78 + bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
79 + };
80 + };
81 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
82 +index 3502c2f746ca7..baf7d0204eb5a 100644
83 +--- a/arch/arm/include/asm/pgtable-2level.h
84 ++++ b/arch/arm/include/asm/pgtable-2level.h
85 +@@ -75,6 +75,8 @@
86 + #define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
87 + #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
88 +
89 ++#define MAX_POSSIBLE_PHYSMEM_BITS 32
90 ++
91 + /*
92 + * PMD_SHIFT determines the size of the area a second-level page table can map
93 + * PGDIR_SHIFT determines what a third-level page table entry can map
94 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
95 +index fbb6693c33528..2b85d175e9996 100644
96 +--- a/arch/arm/include/asm/pgtable-3level.h
97 ++++ b/arch/arm/include/asm/pgtable-3level.h
98 +@@ -25,6 +25,8 @@
99 + #define PTE_HWTABLE_OFF (0)
100 + #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
101 +
102 ++#define MAX_POSSIBLE_PHYSMEM_BITS 40
103 ++
104 + /*
105 + * PGDIR_SHIFT determines the size a top-level page table entry can map.
106 + */
107 +diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
108 +index a92d277f81a08..c8d317fafe2ea 100644
109 +--- a/arch/arm/mach-omap2/cpuidle44xx.c
110 ++++ b/arch/arm/mach-omap2/cpuidle44xx.c
111 +@@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
112 + if (mpuss_can_lose_context) {
113 + error = cpu_cluster_pm_enter();
114 + if (error) {
115 +- omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
116 +- goto cpu_cluster_pm_out;
117 ++ index = 0;
118 ++ cx = state_ptr + index;
119 ++ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
120 ++ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
121 ++ mpuss_can_lose_context = 0;
122 + }
123 + }
124 + }
125 +@@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
126 + omap4_enter_lowpower(dev->cpu, cx->cpu_state);
127 + cpu_done[dev->cpu] = true;
128 +
129 +-cpu_cluster_pm_out:
130 + /* Wakeup CPU1 only if it is not offlined */
131 + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
132 +
133 +diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
134 +index 10cb836aea7ea..e970d8860a1fd 100644
135 +--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
136 ++++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
137 +@@ -54,7 +54,7 @@
138 + status = "okay";
139 + };
140 +
141 +- serial@c280000 {
142 ++ serial@3100000 {
143 + status = "okay";
144 + };
145 +
146 +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
147 +index ca5cb6aef5ee4..6f6d460c931aa 100644
148 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
149 ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
150 +@@ -924,7 +924,7 @@
151 +
152 + hsp_aon: hsp@c150000 {
153 + compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
154 +- reg = <0x0c150000 0xa0000>;
155 ++ reg = <0x0c150000 0x90000>;
156 + interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
157 + <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
158 + <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
159 +diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
160 +index e18e1a9a30113..a9caaf7c0d67e 100644
161 +--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
162 ++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
163 +@@ -1663,16 +1663,6 @@
164 + vin-supply = <&vdd_5v0_sys>;
165 + };
166 +
167 +- vdd_usb_vbus_otg: regulator@11 {
168 +- compatible = "regulator-fixed";
169 +- regulator-name = "USB_VBUS_EN0";
170 +- regulator-min-microvolt = <5000000>;
171 +- regulator-max-microvolt = <5000000>;
172 +- gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
173 +- enable-active-high;
174 +- vin-supply = <&vdd_5v0_sys>;
175 +- };
176 +-
177 + vdd_hdmi: regulator@10 {
178 + compatible = "regulator-fixed";
179 + regulator-name = "VDD_HDMI_5V0";
180 +@@ -1712,4 +1702,14 @@
181 + enable-active-high;
182 + vin-supply = <&vdd_3v3_sys>;
183 + };
184 ++
185 ++ vdd_usb_vbus_otg: regulator@14 {
186 ++ compatible = "regulator-fixed";
187 ++ regulator-name = "USB_VBUS_EN0";
188 ++ regulator-min-microvolt = <5000000>;
189 ++ regulator-max-microvolt = <5000000>;
190 ++ gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
191 ++ enable-active-high;
192 ++ vin-supply = <&vdd_5v0_sys>;
193 ++ };
194 + };
195 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
196 +index 88233d42d9c29..db16919a53e4a 100644
197 +--- a/arch/arm64/include/asm/pgtable.h
198 ++++ b/arch/arm64/include/asm/pgtable.h
199 +@@ -108,8 +108,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
200 + #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
201 + #define pte_valid_not_user(pte) \
202 + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
203 +-#define pte_valid_young(pte) \
204 +- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
205 + #define pte_valid_user(pte) \
206 + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
207 +
208 +@@ -117,9 +115,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
209 + * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
210 + * so that we don't erroneously return false for pages that have been
211 + * remapped as PROT_NONE but are yet to be flushed from the TLB.
212 ++ * Note that we can't make any assumptions based on the state of the access
213 ++ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
214 ++ * TLB.
215 + */
216 + #define pte_accessible(mm, pte) \
217 +- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
218 ++ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
219 +
220 + /*
221 + * p??_access_permitted() is true for valid user mappings (subject to the
222 +@@ -145,13 +146,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
223 + return pte;
224 + }
225 +
226 +-static inline pte_t pte_wrprotect(pte_t pte)
227 +-{
228 +- pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
229 +- pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
230 +- return pte;
231 +-}
232 +-
233 + static inline pte_t pte_mkwrite(pte_t pte)
234 + {
235 + pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
236 +@@ -177,6 +171,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
237 + return pte;
238 + }
239 +
240 ++static inline pte_t pte_wrprotect(pte_t pte)
241 ++{
242 ++ /*
243 ++ * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
244 ++ * clear), set the PTE_DIRTY bit.
245 ++ */
246 ++ if (pte_hw_dirty(pte))
247 ++ pte = pte_mkdirty(pte);
248 ++
249 ++ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
250 ++ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
251 ++ return pte;
252 ++}
253 ++
254 + static inline pte_t pte_mkold(pte_t pte)
255 + {
256 + return clear_pte_bit(pte, __pgprot(PTE_AF));
257 +@@ -798,12 +806,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
258 + pte = READ_ONCE(*ptep);
259 + do {
260 + old_pte = pte;
261 +- /*
262 +- * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
263 +- * clear), set the PTE_DIRTY bit.
264 +- */
265 +- if (pte_hw_dirty(pte))
266 +- pte = pte_mkdirty(pte);
267 + pte = pte_wrprotect(pte);
268 + pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
269 + pte_val(old_pte), pte_val(pte));
270 +diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
271 +index 5c786b915cd34..39c34d92b6017 100644
272 +--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
273 ++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
274 +@@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
275 + return extract_bytes(value, addr & 7, len);
276 + }
277 +
278 ++static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
279 ++ gpa_t addr, unsigned int len)
280 ++{
281 ++ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
282 ++ int target_vcpu_id = vcpu->vcpu_id;
283 ++ u64 value;
284 ++
285 ++ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
286 ++ value |= ((target_vcpu_id & 0xffff) << 8);
287 ++
288 ++ if (vgic_has_its(vcpu->kvm))
289 ++ value |= GICR_TYPER_PLPIS;
290 ++
291 ++ /* reporting of the Last bit is not supported for userspace */
292 ++ return extract_bytes(value, addr & 7, len);
293 ++}
294 ++
295 + static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
296 + gpa_t addr, unsigned int len)
297 + {
298 +@@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
299 + REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
300 + vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
301 + VGIC_ACCESS_32bit),
302 +- REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
303 +- vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
304 ++ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
305 ++ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
306 ++ vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
307 + VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
308 + REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
309 + vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
310 +diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
311 +index a950fc1ddb4da..6c0532d7b2119 100644
312 +--- a/arch/mips/include/asm/pgtable-32.h
313 ++++ b/arch/mips/include/asm/pgtable-32.h
314 +@@ -154,6 +154,7 @@ static inline void pmd_clear(pmd_t *pmdp)
315 +
316 + #if defined(CONFIG_XPA)
317 +
318 ++#define MAX_POSSIBLE_PHYSMEM_BITS 40
319 + #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
320 + static inline pte_t
321 + pfn_pte(unsigned long pfn, pgprot_t prot)
322 +@@ -169,6 +170,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
323 +
324 + #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
325 +
326 ++#define MAX_POSSIBLE_PHYSMEM_BITS 36
327 + #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
328 +
329 + static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
330 +@@ -183,6 +185,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
331 +
332 + #else
333 +
334 ++#define MAX_POSSIBLE_PHYSMEM_BITS 32
335 + #ifdef CONFIG_CPU_VR41XX
336 + #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
337 + #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
338 +diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
339 +index 36443cda8dcf2..1376be95e975f 100644
340 +--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
341 ++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
342 +@@ -36,8 +36,10 @@ static inline bool pte_user(pte_t pte)
343 + */
344 + #ifdef CONFIG_PTE_64BIT
345 + #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
346 ++#define MAX_POSSIBLE_PHYSMEM_BITS 36
347 + #else
348 + #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
349 ++#define MAX_POSSIBLE_PHYSMEM_BITS 32
350 + #endif
351 +
352 + /*
353 +diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
354 +index 28716e2f13e31..a39e2d193fdc1 100644
355 +--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
356 ++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
357 +@@ -63,6 +63,8 @@
358 +
359 + #else /* !__ASSEMBLY__ */
360 +
361 ++#include <linux/jump_label.h>
362 ++
363 + DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
364 +
365 + #ifdef CONFIG_PPC_KUAP
366 +diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
367 +index b9e134d0f03ad..5f5049c1ddb7f 100644
368 +--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
369 ++++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
370 +@@ -153,8 +153,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
371 + */
372 + #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
373 + #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
374 ++#define MAX_POSSIBLE_PHYSMEM_BITS 36
375 + #else
376 + #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
377 ++#define MAX_POSSIBLE_PHYSMEM_BITS 32
378 + #endif
379 +
380 + /*
381 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
382 +index f63a3d3bca3d3..4d01f09ecf808 100644
383 +--- a/arch/powerpc/kernel/exceptions-64s.S
384 ++++ b/arch/powerpc/kernel/exceptions-64s.S
385 +@@ -1000,8 +1000,6 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
386 + * Vectors for the FWNMI option. Share common code.
387 + */
388 + TRAMP_REAL_BEGIN(system_reset_fwnmi)
389 +- /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
390 +- __IKVM_REAL(system_reset)=0
391 + GEN_INT_ENTRY system_reset, virt=0
392 +
393 + #endif /* CONFIG_PPC_PSERIES */
394 +@@ -1412,6 +1410,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
395 + * If none is found, do a Linux page fault. Linux page faults can happen in
396 + * kernel mode due to user copy operations of course.
397 + *
398 ++ * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
399 ++ * MMU context, which may cause a DSI in the host, which must go to the
400 ++ * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
401 ++ * always be used regardless of AIL setting.
402 ++ *
403 + * - Radix MMU
404 + * The hardware loads from the Linux page table directly, so a fault goes
405 + * immediately to Linux page fault.
406 +@@ -1422,10 +1425,8 @@ INT_DEFINE_BEGIN(data_access)
407 + IVEC=0x300
408 + IDAR=1
409 + IDSISR=1
410 +-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
411 + IKVM_SKIP=1
412 + IKVM_REAL=1
413 +-#endif
414 + INT_DEFINE_END(data_access)
415 +
416 + EXC_REAL_BEGIN(data_access, 0x300, 0x80)
417 +@@ -1464,6 +1465,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
418 + * ppc64_bolted_size (first segment). The kernel handler must avoid stomping
419 + * on user-handler data structures.
420 + *
421 ++ * KVM: Same as 0x300, DSLB must test for KVM guest.
422 ++ *
423 + * A dedicated save area EXSLB is used (XXX: but it actually need not be
424 + * these days, we could use EXGEN).
425 + */
426 +@@ -1472,10 +1475,8 @@ INT_DEFINE_BEGIN(data_access_slb)
427 + IAREA=PACA_EXSLB
428 + IRECONCILE=0
429 + IDAR=1
430 +-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
431 + IKVM_SKIP=1
432 + IKVM_REAL=1
433 +-#endif
434 + INT_DEFINE_END(data_access_slb)
435 +
436 + EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
437 +diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
438 +index bdea91df14974..eed17b1ef7728 100644
439 +--- a/arch/powerpc/kvm/book3s_xive_native.c
440 ++++ b/arch/powerpc/kvm/book3s_xive_native.c
441 +@@ -251,6 +251,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
442 + }
443 +
444 + state = &sb->irq_state[src];
445 ++
446 ++ /* Some sanity checking */
447 ++ if (!state->valid) {
448 ++ pr_devel("%s: source %lx invalid !\n", __func__, irq);
449 ++ return VM_FAULT_SIGBUS;
450 ++ }
451 ++
452 + kvmppc_xive_select_irq(state, &hw_num, &xd);
453 +
454 + arch_spin_lock(&sb->lock);
455 +diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
456 +index b0ab66e5fdb1d..5b2e79e5bfa5b 100644
457 +--- a/arch/riscv/include/asm/pgtable-32.h
458 ++++ b/arch/riscv/include/asm/pgtable-32.h
459 +@@ -14,4 +14,6 @@
460 + #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
461 + #define PGDIR_MASK (~(PGDIR_SIZE - 1))
462 +
463 ++#define MAX_POSSIBLE_PHYSMEM_BITS 34
464 ++
465 + #endif /* _ASM_RISCV_PGTABLE_32_H */
466 +diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
467 +index 82a5693b18614..134388cbaaa1d 100644
468 +--- a/arch/riscv/include/asm/vdso/processor.h
469 ++++ b/arch/riscv/include/asm/vdso/processor.h
470 +@@ -4,6 +4,8 @@
471 +
472 + #ifndef __ASSEMBLY__
473 +
474 ++#include <asm/barrier.h>
475 ++
476 + static inline void cpu_relax(void)
477 + {
478 + #ifdef __riscv_muldiv
479 +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
480 +index 2c6dd329312bd..3de5234b6de5b 100644
481 +--- a/arch/riscv/kernel/setup.c
482 ++++ b/arch/riscv/kernel/setup.c
483 +@@ -69,6 +69,7 @@ void __init setup_arch(char **cmdline_p)
484 +
485 + *cmdline_p = boot_command_line;
486 +
487 ++ jump_label_init();
488 + parse_early_param();
489 +
490 + setup_bootmem();
491 +diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
492 +index cb8f9e4cfcbf8..0cfd6da784f84 100644
493 +--- a/arch/riscv/kernel/vdso/Makefile
494 ++++ b/arch/riscv/kernel/vdso/Makefile
495 +@@ -44,7 +44,7 @@ SYSCFLAGS_vdso.so.dbg = $(c_flags)
496 + $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
497 + $(call if_changed,vdsold)
498 + SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
499 +- -Wl,--build-id -Wl,--hash-style=both
500 ++ -Wl,--build-id=sha1 -Wl,--hash-style=both
501 +
502 + # We also create a special relocatable object that should mirror the symbol
503 + # table and layout of the linked DSO. With ld --just-symbols we can then
504 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
505 +index 5d8cc1864566d..62a18dee4c36d 100644
506 +--- a/arch/s390/kernel/asm-offsets.c
507 ++++ b/arch/s390/kernel/asm-offsets.c
508 +@@ -53,11 +53,11 @@ int main(void)
509 + /* stack_frame offsets */
510 + OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
511 + OFFSET(__SF_GPRS, stack_frame, gprs);
512 +- OFFSET(__SF_EMPTY, stack_frame, empty1);
513 +- OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
514 +- OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
515 +- OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
516 +- OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
517 ++ OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
518 ++ OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
519 ++ OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
520 ++ OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
521 ++ OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
522 + BLANK();
523 + /* timeval/timezone offsets for use by vdso */
524 + OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
525 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
526 +index 3349750f930ee..ca55db0823534 100644
527 +--- a/arch/s390/kernel/entry.S
528 ++++ b/arch/s390/kernel/entry.S
529 +@@ -1072,6 +1072,7 @@ EXPORT_SYMBOL(save_fpu_regs)
530 + * %r4
531 + */
532 + load_fpu_regs:
533 ++ stnsm __SF_EMPTY(%r15),0xfc
534 + lg %r4,__LC_CURRENT
535 + aghi %r4,__TASK_thread
536 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
537 +@@ -1103,6 +1104,7 @@ load_fpu_regs:
538 + .Lload_fpu_regs_done:
539 + ni __LC_CPU_FLAGS+7,255-_CIF_FPU
540 + .Lload_fpu_regs_exit:
541 ++ ssm __SF_EMPTY(%r15)
542 + BR_EX %r14
543 + .Lload_fpu_regs_end:
544 + ENDPROC(load_fpu_regs)
545 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
546 +index 6b74b92c1a586..425d3d75320bf 100644
547 +--- a/arch/s390/kvm/kvm-s390.c
548 ++++ b/arch/s390/kvm/kvm-s390.c
549 +@@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
550 + struct kvm_s390_pv_unp unp = {};
551 +
552 + r = -EINVAL;
553 +- if (!kvm_s390_pv_is_protected(kvm))
554 ++ if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
555 + break;
556 +
557 + r = -EFAULT;
558 +@@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
559 + vcpu->arch.sie_block->pp = 0;
560 + vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
561 + vcpu->arch.sie_block->todpr = 0;
562 +- vcpu->arch.sie_block->cpnc = 0;
563 + }
564 + }
565 +
566 +@@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
567 +
568 + regs->etoken = 0;
569 + regs->etoken_extension = 0;
570 +- regs->diag318 = 0;
571 + }
572 +
573 + int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
574 +diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
575 +index eb99e2f95ebed..f5847f9dec7c9 100644
576 +--- a/arch/s390/kvm/pv.c
577 ++++ b/arch/s390/kvm/pv.c
578 +@@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
579 + return -EIO;
580 + }
581 + kvm->arch.gmap->guest_handle = uvcb.guest_handle;
582 +- atomic_set(&kvm->mm->context.is_protected, 1);
583 + return 0;
584 + }
585 +
586 +@@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
587 + *rrc = uvcb.header.rrc;
588 + KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
589 + *rc, *rrc);
590 ++ if (!cc)
591 ++ atomic_set(&kvm->mm->context.is_protected, 1);
592 + return cc ? -EINVAL : 0;
593 + }
594 +
595 +diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
596 +index 373542ca1113e..78dbba6a4500c 100644
597 +--- a/arch/s390/mm/gmap.c
598 ++++ b/arch/s390/mm/gmap.c
599 +@@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
600 + #include <linux/sched/mm.h>
601 + void s390_reset_acc(struct mm_struct *mm)
602 + {
603 ++ if (!mm_is_protected(mm))
604 ++ return;
605 + /*
606 + * we might be called during
607 + * reset: we walk the pages and clear
608 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
609 +index 442e1ed4acd49..4eb7ee5fed72d 100644
610 +--- a/arch/x86/events/intel/cstate.c
611 ++++ b/arch/x86/events/intel/cstate.c
612 +@@ -107,14 +107,14 @@
613 + MODULE_LICENSE("GPL");
614 +
615 + #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
616 +-static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
617 +- struct kobj_attribute *attr, \
618 ++static ssize_t __cstate_##_var##_show(struct device *dev, \
619 ++ struct device_attribute *attr, \
620 + char *page) \
621 + { \
622 + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
623 + return sprintf(page, _format "\n"); \
624 + } \
625 +-static struct kobj_attribute format_attr_##_var = \
626 ++static struct device_attribute format_attr_##_var = \
627 + __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
628 +
629 + static ssize_t cstate_get_attr_cpumask(struct device *dev,
630 +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
631 +index d5c6d3b340c50..803601baa753d 100644
632 +--- a/arch/x86/events/intel/uncore.c
633 ++++ b/arch/x86/events/intel/uncore.c
634 +@@ -92,8 +92,8 @@ end:
635 + return map;
636 + }
637 +
638 +-ssize_t uncore_event_show(struct kobject *kobj,
639 +- struct kobj_attribute *attr, char *buf)
640 ++ssize_t uncore_event_show(struct device *dev,
641 ++ struct device_attribute *attr, char *buf)
642 + {
643 + struct uncore_event_desc *event =
644 + container_of(attr, struct uncore_event_desc, attr);
645 +diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
646 +index 105fdc69825eb..c5744783e05d0 100644
647 +--- a/arch/x86/events/intel/uncore.h
648 ++++ b/arch/x86/events/intel/uncore.h
649 +@@ -157,7 +157,7 @@ struct intel_uncore_box {
650 + #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
651 +
652 + struct uncore_event_desc {
653 +- struct kobj_attribute attr;
654 ++ struct device_attribute attr;
655 + const char *config;
656 + };
657 +
658 +@@ -179,8 +179,8 @@ struct pci2phy_map {
659 + struct pci2phy_map *__find_pci2phy_map(int segment);
660 + int uncore_pcibus_to_physid(struct pci_bus *bus);
661 +
662 +-ssize_t uncore_event_show(struct kobject *kobj,
663 +- struct kobj_attribute *attr, char *buf);
664 ++ssize_t uncore_event_show(struct device *dev,
665 ++ struct device_attribute *attr, char *buf);
666 +
667 + static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
668 + {
669 +@@ -201,14 +201,14 @@ extern int __uncore_max_dies;
670 + }
671 +
672 + #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
673 +-static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
674 +- struct kobj_attribute *attr, \
675 ++static ssize_t __uncore_##_var##_show(struct device *dev, \
676 ++ struct device_attribute *attr, \
677 + char *page) \
678 + { \
679 + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
680 + return sprintf(page, _format "\n"); \
681 + } \
682 +-static struct kobj_attribute format_attr_##_var = \
683 ++static struct device_attribute format_attr_##_var = \
684 + __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
685 +
686 + static inline bool uncore_pmc_fixed(int idx)
687 +diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
688 +index 67b411f7e8c41..abaed36212250 100644
689 +--- a/arch/x86/events/rapl.c
690 ++++ b/arch/x86/events/rapl.c
691 +@@ -93,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
692 + * any other bit is reserved
693 + */
694 + #define RAPL_EVENT_MASK 0xFFULL
695 +-
696 +-#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
697 +-static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
698 +- struct kobj_attribute *attr, \
699 +- char *page) \
700 +-{ \
701 +- BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
702 +- return sprintf(page, _format "\n"); \
703 +-} \
704 +-static struct kobj_attribute format_attr_##_var = \
705 +- __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
706 +-
707 + #define RAPL_CNTR_WIDTH 32
708 +
709 + #define RAPL_EVENT_ATTR_STR(_name, v, str) \
710 +@@ -441,7 +429,7 @@ static struct attribute_group rapl_pmu_events_group = {
711 + .attrs = attrs_empty,
712 + };
713 +
714 +-DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
715 ++PMU_FORMAT_ATTR(event, "config:0-7");
716 + static struct attribute *rapl_formats_attr[] = {
717 + &format_attr_event.attr,
718 + NULL,
719 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
720 +index 5303dbc5c9bce..7b54213551c6f 100644
721 +--- a/arch/x86/include/asm/kvm_host.h
722 ++++ b/arch/x86/include/asm/kvm_host.h
723 +@@ -1603,6 +1603,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
724 + int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
725 + int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
726 + int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
727 ++int kvm_cpu_has_extint(struct kvm_vcpu *v);
728 + int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
729 + int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
730 + void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
731 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
732 +index 581fb7223ad0e..d41b70fe4918e 100644
733 +--- a/arch/x86/kernel/cpu/bugs.c
734 ++++ b/arch/x86/kernel/cpu/bugs.c
735 +@@ -739,11 +739,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
736 + if (boot_cpu_has(X86_FEATURE_IBPB)) {
737 + setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
738 +
739 ++ spectre_v2_user_ibpb = mode;
740 + switch (cmd) {
741 + case SPECTRE_V2_USER_CMD_FORCE:
742 + case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
743 + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
744 + static_branch_enable(&switch_mm_always_ibpb);
745 ++ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
746 + break;
747 + case SPECTRE_V2_USER_CMD_PRCTL:
748 + case SPECTRE_V2_USER_CMD_AUTO:
749 +@@ -757,8 +759,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
750 + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
751 + static_key_enabled(&switch_mm_always_ibpb) ?
752 + "always-on" : "conditional");
753 +-
754 +- spectre_v2_user_ibpb = mode;
755 + }
756 +
757 + /*
758 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
759 +index de29c4a267c05..35a6eb03a6a02 100644
760 +--- a/arch/x86/kernel/cpu/mce/core.c
761 ++++ b/arch/x86/kernel/cpu/mce/core.c
762 +@@ -1363,8 +1363,10 @@ noinstr void do_machine_check(struct pt_regs *regs)
763 + * When there's any problem use only local no_way_out state.
764 + */
765 + if (!lmce) {
766 +- if (mce_end(order) < 0)
767 +- no_way_out = worst >= MCE_PANIC_SEVERITY;
768 ++ if (mce_end(order) < 0) {
769 ++ if (!no_way_out)
770 ++ no_way_out = worst >= MCE_PANIC_SEVERITY;
771 ++ }
772 + } else {
773 + /*
774 + * If there was a fatal machine check we should have
775 +diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
776 +index 3f844f14fc0a6..799b60c9f8927 100644
777 +--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
778 ++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
779 +@@ -507,6 +507,24 @@ unlock:
780 + return ret ?: nbytes;
781 + }
782 +
783 ++/**
784 ++ * rdtgroup_remove - the helper to remove resource group safely
785 ++ * @rdtgrp: resource group to remove
786 ++ *
787 ++ * On resource group creation via a mkdir, an extra kernfs_node reference is
788 ++ * taken to ensure that the rdtgroup structure remains accessible for the
789 ++ * rdtgroup_kn_unlock() calls where it is removed.
790 ++ *
791 ++ * Drop the extra reference here, then free the rdtgroup structure.
792 ++ *
793 ++ * Return: void
794 ++ */
795 ++static void rdtgroup_remove(struct rdtgroup *rdtgrp)
796 ++{
797 ++ kernfs_put(rdtgrp->kn);
798 ++ kfree(rdtgrp);
799 ++}
800 ++
801 + struct task_move_callback {
802 + struct callback_head work;
803 + struct rdtgroup *rdtgrp;
804 +@@ -529,7 +547,7 @@ static void move_myself(struct callback_head *head)
805 + (rdtgrp->flags & RDT_DELETED)) {
806 + current->closid = 0;
807 + current->rmid = 0;
808 +- kfree(rdtgrp);
809 ++ rdtgroup_remove(rdtgrp);
810 + }
811 +
812 + if (unlikely(current->flags & PF_EXITING))
813 +@@ -1708,7 +1726,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
814 + if (IS_ERR(kn_subdir))
815 + return PTR_ERR(kn_subdir);
816 +
817 +- kernfs_get(kn_subdir);
818 + ret = rdtgroup_kn_set_ugid(kn_subdir);
819 + if (ret)
820 + return ret;
821 +@@ -1731,7 +1748,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
822 + kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
823 + if (IS_ERR(kn_info))
824 + return PTR_ERR(kn_info);
825 +- kernfs_get(kn_info);
826 +
827 + ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
828 + if (ret)
829 +@@ -1752,12 +1768,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
830 + goto out_destroy;
831 + }
832 +
833 +- /*
834 +- * This extra ref will be put in kernfs_remove() and guarantees
835 +- * that @rdtgrp->kn is always accessible.
836 +- */
837 +- kernfs_get(kn_info);
838 +-
839 + ret = rdtgroup_kn_set_ugid(kn_info);
840 + if (ret)
841 + goto out_destroy;
842 +@@ -1786,12 +1796,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
843 + if (dest_kn)
844 + *dest_kn = kn;
845 +
846 +- /*
847 +- * This extra ref will be put in kernfs_remove() and guarantees
848 +- * that @rdtgrp->kn is always accessible.
849 +- */
850 +- kernfs_get(kn);
851 +-
852 + ret = rdtgroup_kn_set_ugid(kn);
853 + if (ret)
854 + goto out_destroy;
855 +@@ -2018,8 +2022,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
856 + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
857 + rdtgroup_pseudo_lock_remove(rdtgrp);
858 + kernfs_unbreak_active_protection(kn);
859 +- kernfs_put(rdtgrp->kn);
860 +- kfree(rdtgrp);
861 ++ rdtgroup_remove(rdtgrp);
862 + } else {
863 + kernfs_unbreak_active_protection(kn);
864 + }
865 +@@ -2078,13 +2081,11 @@ static int rdt_get_tree(struct fs_context *fc)
866 + &kn_mongrp);
867 + if (ret < 0)
868 + goto out_info;
869 +- kernfs_get(kn_mongrp);
870 +
871 + ret = mkdir_mondata_all(rdtgroup_default.kn,
872 + &rdtgroup_default, &kn_mondata);
873 + if (ret < 0)
874 + goto out_mongrp;
875 +- kernfs_get(kn_mondata);
876 + rdtgroup_default.mon.mon_data_kn = kn_mondata;
877 + }
878 +
879 +@@ -2308,7 +2309,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
880 + if (atomic_read(&sentry->waitcount) != 0)
881 + sentry->flags = RDT_DELETED;
882 + else
883 +- kfree(sentry);
884 ++ rdtgroup_remove(sentry);
885 + }
886 + }
887 +
888 +@@ -2350,7 +2351,7 @@ static void rmdir_all_sub(void)
889 + if (atomic_read(&rdtgrp->waitcount) != 0)
890 + rdtgrp->flags = RDT_DELETED;
891 + else
892 +- kfree(rdtgrp);
893 ++ rdtgroup_remove(rdtgrp);
894 + }
895 + /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
896 + update_closid_rmid(cpu_online_mask, &rdtgroup_default);
897 +@@ -2450,11 +2451,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
898 + if (IS_ERR(kn))
899 + return PTR_ERR(kn);
900 +
901 +- /*
902 +- * This extra ref will be put in kernfs_remove() and guarantees
903 +- * that kn is always accessible.
904 +- */
905 +- kernfs_get(kn);
906 + ret = rdtgroup_kn_set_ugid(kn);
907 + if (ret)
908 + goto out_destroy;
909 +@@ -2789,8 +2785,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
910 + /*
911 + * kernfs_remove() will drop the reference count on "kn" which
912 + * will free it. But we still need it to stick around for the
913 +- * rdtgroup_kn_unlock(kn} call below. Take one extra reference
914 +- * here, which will be dropped inside rdtgroup_kn_unlock().
915 ++ * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
916 ++ * which will be dropped by kernfs_put() in rdtgroup_remove().
917 + */
918 + kernfs_get(kn);
919 +
920 +@@ -2831,6 +2827,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
921 + out_idfree:
922 + free_rmid(rdtgrp->mon.rmid);
923 + out_destroy:
924 ++ kernfs_put(rdtgrp->kn);
925 + kernfs_remove(rdtgrp->kn);
926 + out_free_rgrp:
927 + kfree(rdtgrp);
928 +@@ -2843,7 +2840,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
929 + {
930 + kernfs_remove(rgrp->kn);
931 + free_rmid(rgrp->mon.rmid);
932 +- kfree(rgrp);
933 ++ rdtgroup_remove(rgrp);
934 + }
935 +
936 + /*
937 +@@ -3000,11 +2997,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
938 + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
939 + list_del(&rdtgrp->mon.crdtgrp_list);
940 +
941 +- /*
942 +- * one extra hold on this, will drop when we kfree(rdtgrp)
943 +- * in rdtgroup_kn_unlock()
944 +- */
945 +- kernfs_get(kn);
946 + kernfs_remove(rdtgrp->kn);
947 +
948 + return 0;
949 +@@ -3016,11 +3008,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
950 + rdtgrp->flags = RDT_DELETED;
951 + list_del(&rdtgrp->rdtgroup_list);
952 +
953 +- /*
954 +- * one extra hold on this, will drop when we kfree(rdtgrp)
955 +- * in rdtgroup_kn_unlock()
956 +- */
957 +- kernfs_get(kn);
958 + kernfs_remove(rdtgrp->kn);
959 + return 0;
960 + }
961 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
962 +index ea8d51ec251bb..4da8345d34bb0 100644
963 +--- a/arch/x86/kernel/dumpstack.c
964 ++++ b/arch/x86/kernel/dumpstack.c
965 +@@ -77,6 +77,9 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
966 + if (!user_mode(regs))
967 + return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
968 +
969 ++ /* The user space code from other tasks cannot be accessed. */
970 ++ if (regs != task_pt_regs(current))
971 ++ return -EPERM;
972 + /*
973 + * Make sure userspace isn't trying to trick us into dumping kernel
974 + * memory by pointing the userspace instruction pointer at it.
975 +@@ -84,6 +87,12 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
976 + if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
977 + return -EINVAL;
978 +
979 ++ /*
980 ++ * Even if named copy_from_user_nmi() this can be invoked from
981 ++ * other contexts and will not try to resolve a pagefault, which is
982 ++ * the correct thing to do here as this code can be called from any
983 ++ * context.
984 ++ */
985 + return copy_from_user_nmi(buf, (void __user *)src, nbytes);
986 + }
987 +
988 +@@ -114,13 +123,19 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
989 + u8 opcodes[OPCODE_BUFSIZE];
990 + unsigned long prologue = regs->ip - PROLOGUE_SIZE;
991 +
992 +- if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
993 +- printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
994 +- loglvl, prologue);
995 +- } else {
996 ++ switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
997 ++ case 0:
998 + printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
999 + __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
1000 + opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
1001 ++ break;
1002 ++ case -EPERM:
1003 ++ /* No access to the user space stack of other tasks. Ignore. */
1004 ++ break;
1005 ++ default:
1006 ++ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
1007 ++ loglvl, prologue);
1008 ++ break;
1009 + }
1010 + }
1011 +
1012 +diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
1013 +index 420be871d9d45..ae64f98ec2ab6 100644
1014 +--- a/arch/x86/kernel/tboot.c
1015 ++++ b/arch/x86/kernel/tboot.c
1016 +@@ -514,13 +514,10 @@ int tboot_force_iommu(void)
1017 + if (!tboot_enabled())
1018 + return 0;
1019 +
1020 +- if (no_iommu || swiotlb || dmar_disabled)
1021 ++ if (no_iommu || dmar_disabled)
1022 + pr_warn("Forcing Intel-IOMMU to enabled\n");
1023 +
1024 + dmar_disabled = 0;
1025 +-#ifdef CONFIG_SWIOTLB
1026 +- swiotlb = 0;
1027 +-#endif
1028 + no_iommu = 0;
1029 +
1030 + return 1;
1031 +diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
1032 +index 99d118ffc67db..814698e5b1526 100644
1033 +--- a/arch/x86/kvm/irq.c
1034 ++++ b/arch/x86/kvm/irq.c
1035 +@@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
1036 + * check if there is pending interrupt from
1037 + * non-APIC source without intack.
1038 + */
1039 +-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
1040 +-{
1041 +- u8 accept = kvm_apic_accept_pic_intr(v);
1042 +-
1043 +- if (accept) {
1044 +- if (irqchip_split(v->kvm))
1045 +- return pending_userspace_extint(v);
1046 +- else
1047 +- return v->kvm->arch.vpic->output;
1048 +- } else
1049 +- return 0;
1050 +-}
1051 +-
1052 +-/*
1053 +- * check if there is injectable interrupt:
1054 +- * when virtual interrupt delivery enabled,
1055 +- * interrupt from apic will handled by hardware,
1056 +- * we don't need to check it here.
1057 +- */
1058 +-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
1059 ++int kvm_cpu_has_extint(struct kvm_vcpu *v)
1060 + {
1061 + /*
1062 +- * FIXME: interrupt.injected represents an interrupt that it's
1063 ++ * FIXME: interrupt.injected represents an interrupt whose
1064 + * side-effects have already been applied (e.g. bit from IRR
1065 + * already moved to ISR). Therefore, it is incorrect to rely
1066 + * on interrupt.injected to know if there is a pending
1067 +@@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
1068 + if (!lapic_in_kernel(v))
1069 + return v->arch.interrupt.injected;
1070 +
1071 ++ if (!kvm_apic_accept_pic_intr(v))
1072 ++ return 0;
1073 ++
1074 ++ if (irqchip_split(v->kvm))
1075 ++ return pending_userspace_extint(v);
1076 ++ else
1077 ++ return v->kvm->arch.vpic->output;
1078 ++}
1079 ++
1080 ++/*
1081 ++ * check if there is injectable interrupt:
1082 ++ * when virtual interrupt delivery enabled,
1083 ++ * interrupt from apic will handled by hardware,
1084 ++ * we don't need to check it here.
1085 ++ */
1086 ++int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
1087 ++{
1088 + if (kvm_cpu_has_extint(v))
1089 + return 1;
1090 +
1091 +@@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
1092 + */
1093 + int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
1094 + {
1095 +- /*
1096 +- * FIXME: interrupt.injected represents an interrupt that it's
1097 +- * side-effects have already been applied (e.g. bit from IRR
1098 +- * already moved to ISR). Therefore, it is incorrect to rely
1099 +- * on interrupt.injected to know if there is a pending
1100 +- * interrupt in the user-mode LAPIC.
1101 +- * This leads to nVMX/nSVM not be able to distinguish
1102 +- * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
1103 +- * pending interrupt or should re-inject an injected
1104 +- * interrupt.
1105 +- */
1106 +- if (!lapic_in_kernel(v))
1107 +- return v->arch.interrupt.injected;
1108 +-
1109 + if (kvm_cpu_has_extint(v))
1110 + return 1;
1111 +
1112 +@@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
1113 + */
1114 + static int kvm_cpu_get_extint(struct kvm_vcpu *v)
1115 + {
1116 +- if (kvm_cpu_has_extint(v)) {
1117 +- if (irqchip_split(v->kvm)) {
1118 +- int vector = v->arch.pending_external_vector;
1119 +-
1120 +- v->arch.pending_external_vector = -1;
1121 +- return vector;
1122 +- } else
1123 +- return kvm_pic_read_irq(v->kvm); /* PIC */
1124 +- } else
1125 ++ if (!kvm_cpu_has_extint(v)) {
1126 ++ WARN_ON(!lapic_in_kernel(v));
1127 + return -1;
1128 ++ }
1129 ++
1130 ++ if (!lapic_in_kernel(v))
1131 ++ return v->arch.interrupt.nr;
1132 ++
1133 ++ if (irqchip_split(v->kvm)) {
1134 ++ int vector = v->arch.pending_external_vector;
1135 ++
1136 ++ v->arch.pending_external_vector = -1;
1137 ++ return vector;
1138 ++ } else
1139 ++ return kvm_pic_read_irq(v->kvm); /* PIC */
1140 + }
1141 +
1142 + /*
1143 +@@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
1144 + */
1145 + int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
1146 + {
1147 +- int vector;
1148 +-
1149 +- if (!lapic_in_kernel(v))
1150 +- return v->arch.interrupt.nr;
1151 +-
1152 +- vector = kvm_cpu_get_extint(v);
1153 +-
1154 ++ int vector = kvm_cpu_get_extint(v);
1155 + if (vector != -1)
1156 + return vector; /* PIC */
1157 +
1158 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1159 +index 8055a486d843d..1fc1a8e8cce02 100644
1160 +--- a/arch/x86/kvm/lapic.c
1161 ++++ b/arch/x86/kvm/lapic.c
1162 +@@ -2461,7 +2461,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
1163 + struct kvm_lapic *apic = vcpu->arch.apic;
1164 + u32 ppr;
1165 +
1166 +- if (!kvm_apic_hw_enabled(apic))
1167 ++ if (!kvm_apic_present(vcpu))
1168 + return -1;
1169 +
1170 + __apic_update_ppr(apic, &ppr);
1171 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1172 +index bacfc9e94a62b..6e5ed3dc4f298 100644
1173 +--- a/arch/x86/kvm/x86.c
1174 ++++ b/arch/x86/kvm/x86.c
1175 +@@ -3839,21 +3839,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1176 +
1177 + static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
1178 + {
1179 ++ /*
1180 ++ * We can accept userspace's request for interrupt injection
1181 ++ * as long as we have a place to store the interrupt number.
1182 ++ * The actual injection will happen when the CPU is able to
1183 ++ * deliver the interrupt.
1184 ++ */
1185 ++ if (kvm_cpu_has_extint(vcpu))
1186 ++ return false;
1187 ++
1188 ++ /* Acknowledging ExtINT does not happen if LINT0 is masked. */
1189 + return (!lapic_in_kernel(vcpu) ||
1190 + kvm_apic_accept_pic_intr(vcpu));
1191 + }
1192 +
1193 +-/*
1194 +- * if userspace requested an interrupt window, check that the
1195 +- * interrupt window is open.
1196 +- *
1197 +- * No need to exit to userspace if we already have an interrupt queued.
1198 +- */
1199 + static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
1200 + {
1201 + return kvm_arch_interrupt_allowed(vcpu) &&
1202 +- !kvm_cpu_has_interrupt(vcpu) &&
1203 +- !kvm_event_needs_reinjection(vcpu) &&
1204 + kvm_cpu_accept_dm_intr(vcpu);
1205 + }
1206 +
1207 +diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
1208 +index 799f4eba0a621..043c73dfd2c98 100644
1209 +--- a/arch/x86/xen/spinlock.c
1210 ++++ b/arch/x86/xen/spinlock.c
1211 +@@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu)
1212 +
1213 + void xen_uninit_lock_cpu(int cpu)
1214 + {
1215 ++ int irq;
1216 ++
1217 + if (!xen_pvspin)
1218 + return;
1219 +
1220 +- unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
1221 ++ /*
1222 ++ * When booting the kernel with 'mitigations=auto,nosmt', the secondary
1223 ++ * CPUs are not activated, and lock_kicker_irq is not initialized.
1224 ++ */
1225 ++ irq = per_cpu(lock_kicker_irq, cpu);
1226 ++ if (irq == -1)
1227 ++ return;
1228 ++
1229 ++ unbind_from_irqhandler(irq, NULL);
1230 + per_cpu(lock_kicker_irq, cpu) = -1;
1231 + kfree(per_cpu(irq_name, cpu));
1232 + per_cpu(irq_name, cpu) = NULL;
1233 +diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
1234 +index b9758119feca1..5c9fb8005aa89 100644
1235 +--- a/arch/xtensa/include/asm/uaccess.h
1236 ++++ b/arch/xtensa/include/asm/uaccess.h
1237 +@@ -302,7 +302,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
1238 + return -EFAULT;
1239 + }
1240 + #else
1241 +-long strncpy_from_user(char *dst, const char *src, long count);
1242 ++long strncpy_from_user(char *dst, const char __user *src, long count);
1243 + #endif
1244 +
1245 + /*
1246 +diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c
1247 +index 35abcb1ec051d..86f8195d8039e 100644
1248 +--- a/block/keyslot-manager.c
1249 ++++ b/block/keyslot-manager.c
1250 +@@ -103,6 +103,13 @@ int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots)
1251 + spin_lock_init(&ksm->idle_slots_lock);
1252 +
1253 + slot_hashtable_size = roundup_pow_of_two(num_slots);
1254 ++ /*
1255 ++ * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
1256 ++ * buckets. This only makes a difference when there is only 1 keyslot.
1257 ++ */
1258 ++ if (slot_hashtable_size < 2)
1259 ++ slot_hashtable_size = 2;
1260 ++
1261 + ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
1262 + ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
1263 + sizeof(ksm->slot_hashtable[0]),
1264 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
1265 +index efb088df12766..92ecf1a78ec73 100644
1266 +--- a/drivers/bus/ti-sysc.c
1267 ++++ b/drivers/bus/ti-sysc.c
1268 +@@ -227,6 +227,9 @@ static int sysc_wait_softreset(struct sysc *ddata)
1269 + u32 sysc_mask, syss_done, rstval;
1270 + int syss_offset, error = 0;
1271 +
1272 ++ if (ddata->cap->regbits->srst_shift < 0)
1273 ++ return 0;
1274 ++
1275 + syss_offset = ddata->offsets[SYSC_SYSSTATUS];
1276 + sysc_mask = BIT(ddata->cap->regbits->srst_shift);
1277 +
1278 +@@ -970,9 +973,15 @@ static int sysc_enable_module(struct device *dev)
1279 + return error;
1280 + }
1281 + }
1282 +- error = sysc_wait_softreset(ddata);
1283 +- if (error)
1284 +- dev_warn(ddata->dev, "OCP softreset timed out\n");
1285 ++ /*
1286 ++ * Some modules like i2c and hdq1w have unusable reset status unless
1287 ++ * the module reset quirk is enabled. Skip status check on enable.
1288 ++ */
1289 ++ if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
1290 ++ error = sysc_wait_softreset(ddata);
1291 ++ if (error)
1292 ++ dev_warn(ddata->dev, "OCP softreset timed out\n");
1293 ++ }
1294 + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1295 + sysc_disable_opt_clocks(ddata);
1296 +
1297 +@@ -1373,17 +1382,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1298 + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
1299 + SYSC_QUIRK_OPT_CLKS_NEEDED),
1300 + SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1301 +- SYSC_MODULE_QUIRK_HDQ1W),
1302 ++ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1303 + SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1304 +- SYSC_MODULE_QUIRK_HDQ1W),
1305 ++ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1306 + SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1307 +- SYSC_MODULE_QUIRK_I2C),
1308 ++ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1309 + SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1310 +- SYSC_MODULE_QUIRK_I2C),
1311 ++ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1312 + SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1313 +- SYSC_MODULE_QUIRK_I2C),
1314 ++ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1315 + SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1316 +- SYSC_MODULE_QUIRK_I2C),
1317 ++ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1318 + SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
1319 + SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
1320 + SYSC_MODULE_QUIRK_SGX),
1321 +@@ -2880,7 +2889,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
1322 +
1323 + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
1324 + (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
1325 +- return -EBUSY;
1326 ++ return -ENXIO;
1327 +
1328 + return 0;
1329 + }
1330 +diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
1331 +index e8956706a2917..191966dc8d023 100644
1332 +--- a/drivers/cpuidle/cpuidle-tegra.c
1333 ++++ b/drivers/cpuidle/cpuidle-tegra.c
1334 +@@ -189,7 +189,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
1335 + }
1336 +
1337 + local_fiq_disable();
1338 +- tegra_pm_set_cpu_in_lp2();
1339 ++ RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
1340 + cpu_pm_enter();
1341 +
1342 + switch (index) {
1343 +@@ -207,7 +207,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
1344 + }
1345 +
1346 + cpu_pm_exit();
1347 +- tegra_pm_clear_cpu_in_lp2();
1348 ++ RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
1349 + local_fiq_enable();
1350 +
1351 + return err ?: index;
1352 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1353 +index 5274a0704d960..2c3c47e4f7770 100644
1354 +--- a/drivers/dma/pl330.c
1355 ++++ b/drivers/dma/pl330.c
1356 +@@ -2802,7 +2802,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
1357 + * If burst size is smaller than bus width then make sure we only
1358 + * transfer one at a time to avoid a burst stradling an MFIFO entry.
1359 + */
1360 +- if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
1361 ++ if (burst * 8 < pl330->pcfg.data_bus_width)
1362 + desc->rqcfg.brst_len = 1;
1363 +
1364 + desc->bytes_requested = len;
1365 +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
1366 +index 0fc432567b857..993297d585c01 100644
1367 +--- a/drivers/dma/xilinx/xilinx_dma.c
1368 ++++ b/drivers/dma/xilinx/xilinx_dma.c
1369 +@@ -517,8 +517,8 @@ struct xilinx_dma_device {
1370 + #define to_dma_tx_descriptor(tx) \
1371 + container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
1372 + #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
1373 +- readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
1374 +- cond, delay_us, timeout_us)
1375 ++ readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
1376 ++ val, cond, delay_us, timeout_us)
1377 +
1378 + /* IO accessors */
1379 + static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
1380 +diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
1381 +index 3939699e62fe0..929d6f05b6bb1 100644
1382 +--- a/drivers/firmware/efi/Kconfig
1383 ++++ b/drivers/firmware/efi/Kconfig
1384 +@@ -275,7 +275,7 @@ config EFI_DEV_PATH_PARSER
1385 +
1386 + config EFI_EARLYCON
1387 + def_bool y
1388 +- depends on SERIAL_EARLYCON && !ARM && !IA64
1389 ++ depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
1390 + select FONT_SUPPORT
1391 + select ARCH_USE_MEMREMAP_PROT
1392 +
1393 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1394 +index 3aa07c3b51369..8ead4379e6e85 100644
1395 +--- a/drivers/firmware/efi/efi.c
1396 ++++ b/drivers/firmware/efi/efi.c
1397 +@@ -387,10 +387,10 @@ static int __init efisubsys_init(void)
1398 +
1399 + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
1400 + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
1401 +- efivar_ssdt_load();
1402 + error = generic_ops_register();
1403 + if (error)
1404 + goto err_put;
1405 ++ efivar_ssdt_load();
1406 + platform_device_register_simple("efivars", 0, NULL, 0);
1407 + }
1408 +
1409 +diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
1410 +index efb8a66efc684..d08ac824c993c 100644
1411 +--- a/drivers/firmware/xilinx/zynqmp.c
1412 ++++ b/drivers/firmware/xilinx/zynqmp.c
1413 +@@ -20,12 +20,28 @@
1414 + #include <linux/of_platform.h>
1415 + #include <linux/slab.h>
1416 + #include <linux/uaccess.h>
1417 ++#include <linux/hashtable.h>
1418 +
1419 + #include <linux/firmware/xlnx-zynqmp.h>
1420 + #include "zynqmp-debug.h"
1421 +
1422 ++/* Max HashMap Order for PM API feature check (1<<7 = 128) */
1423 ++#define PM_API_FEATURE_CHECK_MAX_ORDER 7
1424 ++
1425 + static bool feature_check_enabled;
1426 +-static u32 zynqmp_pm_features[PM_API_MAX];
1427 ++DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
1428 ++
1429 ++/**
1430 ++ * struct pm_api_feature_data - PM API Feature data
1431 ++ * @pm_api_id: PM API Id, used as key to index into hashmap
1432 ++ * @feature_status: status of PM API feature: valid, invalid
1433 ++ * @hentry: hlist_node that hooks this entry into hashtable
1434 ++ */
1435 ++struct pm_api_feature_data {
1436 ++ u32 pm_api_id;
1437 ++ int feature_status;
1438 ++ struct hlist_node hentry;
1439 ++};
1440 +
1441 + static const struct mfd_cell firmware_devs[] = {
1442 + {
1443 +@@ -142,29 +158,37 @@ static int zynqmp_pm_feature(u32 api_id)
1444 + int ret;
1445 + u32 ret_payload[PAYLOAD_ARG_CNT];
1446 + u64 smc_arg[2];
1447 ++ struct pm_api_feature_data *feature_data;
1448 +
1449 + if (!feature_check_enabled)
1450 + return 0;
1451 +
1452 +- /* Return value if feature is already checked */
1453 +- if (api_id > ARRAY_SIZE(zynqmp_pm_features))
1454 +- return PM_FEATURE_INVALID;
1455 ++ /* Check for existing entry in hash table for given api */
1456 ++ hash_for_each_possible(pm_api_features_map, feature_data, hentry,
1457 ++ api_id) {
1458 ++ if (feature_data->pm_api_id == api_id)
1459 ++ return feature_data->feature_status;
1460 ++ }
1461 +
1462 +- if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
1463 +- return zynqmp_pm_features[api_id];
1464 ++ /* Add new entry if not present */
1465 ++ feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
1466 ++ if (!feature_data)
1467 ++ return -ENOMEM;
1468 +
1469 ++ feature_data->pm_api_id = api_id;
1470 + smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
1471 + smc_arg[1] = api_id;
1472 +
1473 + ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
1474 +- if (ret) {
1475 +- zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
1476 +- return PM_FEATURE_INVALID;
1477 +- }
1478 ++ if (ret)
1479 ++ ret = -EOPNOTSUPP;
1480 ++ else
1481 ++ ret = ret_payload[1];
1482 +
1483 +- zynqmp_pm_features[api_id] = ret_payload[1];
1484 ++ feature_data->feature_status = ret;
1485 ++ hash_add(pm_api_features_map, &feature_data->hentry, api_id);
1486 +
1487 +- return zynqmp_pm_features[api_id];
1488 ++ return ret;
1489 + }
1490 +
1491 + /**
1492 +@@ -200,9 +224,12 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
1493 + * Make sure to stay in x0 register
1494 + */
1495 + u64 smc_arg[4];
1496 ++ int ret;
1497 +
1498 +- if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
1499 +- return -ENOTSUPP;
1500 ++ /* Check if feature is supported or not */
1501 ++ ret = zynqmp_pm_feature(pm_api_id);
1502 ++ if (ret < 0)
1503 ++ return ret;
1504 +
1505 + smc_arg[0] = PM_SIP_SVC | pm_api_id;
1506 + smc_arg[1] = ((u64)arg1 << 32) | arg0;
1507 +@@ -615,7 +642,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
1508 + */
1509 + int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
1510 + {
1511 +- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
1512 ++ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
1513 + type, 0, NULL);
1514 + }
1515 + EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
1516 +@@ -1252,9 +1279,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
1517 +
1518 + static int zynqmp_firmware_remove(struct platform_device *pdev)
1519 + {
1520 ++ struct pm_api_feature_data *feature_data;
1521 ++ int i;
1522 ++
1523 + mfd_remove_devices(&pdev->dev);
1524 + zynqmp_pm_api_debugfs_exit();
1525 +
1526 ++ hash_for_each(pm_api_features_map, i, feature_data, hentry) {
1527 ++ hash_del(&feature_data->hentry);
1528 ++ kfree(feature_data);
1529 ++ }
1530 ++
1531 + return 0;
1532 + }
1533 +
1534 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1535 +index b4a8da8fc8fd7..1595b124c1457 100644
1536 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1537 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1538 +@@ -4593,7 +4593,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
1539 + if (!amdgpu_device_supports_baco(adev->ddev))
1540 + return -ENOTSUPP;
1541 +
1542 +- if (ras && ras->supported)
1543 ++ if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
1544 + adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
1545 +
1546 + return amdgpu_dpm_baco_enter(adev);
1547 +@@ -4612,7 +4612,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
1548 + if (ret)
1549 + return ret;
1550 +
1551 +- if (ras && ras->supported)
1552 ++ if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
1553 + adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
1554 +
1555 + return 0;
1556 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1557 +index 7c787ec598f18..d5e95e4ea5bd2 100644
1558 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1559 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1560 +@@ -1571,6 +1571,12 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
1561 + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
1562 + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
1563 + break;
1564 ++ case AMDGPU_UCODE_ID_RLC_IRAM:
1565 ++ *type = GFX_FW_TYPE_RLC_IRAM;
1566 ++ break;
1567 ++ case AMDGPU_UCODE_ID_RLC_DRAM:
1568 ++ *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
1569 ++ break;
1570 + case AMDGPU_UCODE_ID_SMC:
1571 + *type = GFX_FW_TYPE_SMU;
1572 + break;
1573 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
1574 +index 60bb3e8b31188..aeaaae713c59d 100644
1575 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
1576 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
1577 +@@ -168,12 +168,16 @@ struct amdgpu_rlc {
1578 + u32 save_restore_list_cntl_size_bytes;
1579 + u32 save_restore_list_gpm_size_bytes;
1580 + u32 save_restore_list_srm_size_bytes;
1581 ++ u32 rlc_iram_ucode_size_bytes;
1582 ++ u32 rlc_dram_ucode_size_bytes;
1583 +
1584 + u32 *register_list_format;
1585 + u32 *register_restore;
1586 + u8 *save_restore_list_cntl;
1587 + u8 *save_restore_list_gpm;
1588 + u8 *save_restore_list_srm;
1589 ++ u8 *rlc_iram_ucode;
1590 ++ u8 *rlc_dram_ucode;
1591 +
1592 + bool is_rlc_v2_1;
1593 +
1594 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1595 +index 183743c5fb7bf..c3cc2e8b24064 100644
1596 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1597 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
1598 +@@ -500,6 +500,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
1599 + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
1600 + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
1601 + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
1602 ++ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM &&
1603 ++ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM &&
1604 + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
1605 + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
1606 + ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
1607 +@@ -556,6 +558,14 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
1608 + ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
1609 + memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
1610 + ucode->ucode_size);
1611 ++ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
1612 ++ ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
1613 ++ memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
1614 ++ ucode->ucode_size);
1615 ++ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
1616 ++ ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
1617 ++ memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
1618 ++ ucode->ucode_size);
1619 + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
1620 + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1621 + memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
1622 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
1623 +index 12a8bc8fca0b0..97c78d91fc2fa 100644
1624 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
1625 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
1626 +@@ -221,6 +221,15 @@ struct rlc_firmware_header_v2_1 {
1627 + uint32_t save_restore_list_srm_offset_bytes;
1628 + };
1629 +
1630 ++/* version_major=2, version_minor=1 */
1631 ++struct rlc_firmware_header_v2_2 {
1632 ++ struct rlc_firmware_header_v2_1 v2_1;
1633 ++ uint32_t rlc_iram_ucode_size_bytes;
1634 ++ uint32_t rlc_iram_ucode_offset_bytes;
1635 ++ uint32_t rlc_dram_ucode_size_bytes;
1636 ++ uint32_t rlc_dram_ucode_offset_bytes;
1637 ++};
1638 ++
1639 + /* version_major=1, version_minor=0 */
1640 + struct sdma_firmware_header_v1_0 {
1641 + struct common_firmware_header header;
1642 +@@ -338,6 +347,8 @@ enum AMDGPU_UCODE_ID {
1643 + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
1644 + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
1645 + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
1646 ++ AMDGPU_UCODE_ID_RLC_IRAM,
1647 ++ AMDGPU_UCODE_ID_RLC_DRAM,
1648 + AMDGPU_UCODE_ID_RLC_G,
1649 + AMDGPU_UCODE_ID_STORAGE,
1650 + AMDGPU_UCODE_ID_SMC,
1651 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
1652 +index 5eb63288d1574..edbb8194ee81b 100644
1653 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
1654 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
1655 +@@ -67,6 +67,7 @@ struct amdgpu_uvd {
1656 + unsigned harvest_config;
1657 + /* store image width to adjust nb memory state */
1658 + unsigned decode_image_width;
1659 ++ uint32_t keyselect;
1660 + };
1661 +
1662 + int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
1663 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1664 +index 3a2af95f2bf0d..514cb4b1e537a 100644
1665 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1666 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1667 +@@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
1668 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
1669 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
1670 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
1671 ++ SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
1672 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
1673 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
1674 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
1675 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
1676 +@@ -3594,6 +3596,17 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1677 + le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1678 + }
1679 +
1680 ++static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
1681 ++{
1682 ++ const struct rlc_firmware_header_v2_2 *rlc_hdr;
1683 ++
1684 ++ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1685 ++ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
1686 ++ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
1687 ++ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
1688 ++ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
1689 ++}
1690 ++
1691 + static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
1692 + {
1693 + bool ret = false;
1694 +@@ -3709,8 +3722,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
1695 + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1696 + version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1697 + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1698 +- if (version_major == 2 && version_minor == 1)
1699 +- adev->gfx.rlc.is_rlc_v2_1 = true;
1700 +
1701 + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1702 + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1703 +@@ -3752,8 +3763,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
1704 + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
1705 + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1706 +
1707 +- if (adev->gfx.rlc.is_rlc_v2_1)
1708 +- gfx_v10_0_init_rlc_ext_microcode(adev);
1709 ++ if (version_major == 2) {
1710 ++ if (version_minor >= 1)
1711 ++ gfx_v10_0_init_rlc_ext_microcode(adev);
1712 ++ if (version_minor == 2)
1713 ++ gfx_v10_0_init_rlc_iram_dram_microcode(adev);
1714 ++ }
1715 + }
1716 +
1717 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
1718 +@@ -3814,8 +3829,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
1719 + adev->firmware.fw_size +=
1720 + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1721 + }
1722 +- if (adev->gfx.rlc.is_rlc_v2_1 &&
1723 +- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1724 ++ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1725 + adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1726 + adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1727 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1728 +@@ -3835,6 +3849,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
1729 + info->fw = adev->gfx.rlc_fw;
1730 + adev->firmware.fw_size +=
1731 + ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1732 ++
1733 ++ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
1734 ++ adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
1735 ++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
1736 ++ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
1737 ++ info->fw = adev->gfx.rlc_fw;
1738 ++ adev->firmware.fw_size +=
1739 ++ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
1740 ++
1741 ++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
1742 ++ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
1743 ++ info->fw = adev->gfx.rlc_fw;
1744 ++ adev->firmware.fw_size +=
1745 ++ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
1746 ++ }
1747 + }
1748 +
1749 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1750 +diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
1751 +index cbc04a5c0fe1d..baf994627b0d7 100644
1752 +--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
1753 ++++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
1754 +@@ -214,7 +214,7 @@ enum psp_gfx_fw_type {
1755 + GFX_FW_TYPE_UVD1 = 23, /* UVD1 VG-20 */
1756 + GFX_FW_TYPE_TOC = 24, /* TOC NV-10 */
1757 + GFX_FW_TYPE_RLC_P = 25, /* RLC P NV */
1758 +- GFX_FW_TYPE_RLX6 = 26, /* RLX6 NV */
1759 ++ GFX_FW_TYPE_RLC_IRAM = 26, /* RLC_IRAM NV */
1760 + GFX_FW_TYPE_GLOBAL_TAP_DELAYS = 27, /* GLOBAL TAP DELAYS NV */
1761 + GFX_FW_TYPE_SE0_TAP_DELAYS = 28, /* SE0 TAP DELAYS NV */
1762 + GFX_FW_TYPE_SE1_TAP_DELAYS = 29, /* SE1 TAP DELAYS NV */
1763 +@@ -236,7 +236,7 @@ enum psp_gfx_fw_type {
1764 + GFX_FW_TYPE_ACCUM_CTRL_RAM = 45, /* ACCUM CTRL RAM NV */
1765 + GFX_FW_TYPE_RLCP_CAM = 46, /* RLCP CAM NV */
1766 + GFX_FW_TYPE_RLC_SPP_CAM_EXT = 47, /* RLC SPP CAM EXT NV */
1767 +- GFX_FW_TYPE_RLX6_DRAM_BOOT = 48, /* RLX6 DRAM BOOT NV */
1768 ++ GFX_FW_TYPE_RLC_DRAM_BOOT = 48, /* RLC DRAM BOOT NV */
1769 + GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV + RN */
1770 + GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV + RN */
1771 + GFX_FW_TYPE_DMUB = 51, /* DMUB RN */
1772 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
1773 +index 7cf4b11a65c5c..41800fcad4102 100644
1774 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
1775 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
1776 +@@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
1777 + */
1778 + static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
1779 + {
1780 +- void *ptr;
1781 +- uint32_t ucode_len, i;
1782 +- uint32_t keysel;
1783 +-
1784 +- ptr = adev->uvd.inst[0].cpu_addr;
1785 +- ptr += 192 + 16;
1786 +- memcpy(&ucode_len, ptr, 4);
1787 +- ptr += ucode_len;
1788 +- memcpy(&keysel, ptr, 4);
1789 ++ int i;
1790 ++ uint32_t keysel = adev->uvd.keyselect;
1791 +
1792 + WREG32(mmUVD_FW_START, keysel);
1793 +
1794 +@@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
1795 + struct amdgpu_ring *ring;
1796 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1797 + int r;
1798 ++ void *ptr;
1799 ++ uint32_t ucode_len;
1800 +
1801 + /* UVD TRAP */
1802 + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
1803 +@@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
1804 + if (r)
1805 + return r;
1806 +
1807 ++ /* Retrieval firmware validate key */
1808 ++ ptr = adev->uvd.inst[0].cpu_addr;
1809 ++ ptr += 192 + 16;
1810 ++ memcpy(&ucode_len, ptr, 4);
1811 ++ ptr += ucode_len;
1812 ++ memcpy(&adev->uvd.keyselect, ptr, 4);
1813 ++
1814 + r = amdgpu_uvd_entity_init(adev);
1815 +
1816 + return r;
1817 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1818 +index 6beccd5a0941a..640cbafdde101 100644
1819 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1820 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1821 +@@ -960,7 +960,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
1822 + amdgpu_dm_init_color_mod();
1823 +
1824 + #ifdef CONFIG_DRM_AMD_DC_HDCP
1825 +- if (adev->asic_type >= CHIP_RAVEN) {
1826 ++ if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1827 + adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1828 +
1829 + if (!adev->dm.hdcp_workqueue)
1830 +diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
1831 +index 80b7a082e8740..d6e0a29ea6b28 100644
1832 +--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
1833 ++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
1834 +@@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
1835 + u32 horizontal_sync_active_byte;
1836 + u32 horizontal_backporch_byte;
1837 + u32 horizontal_frontporch_byte;
1838 ++ u32 horizontal_front_back_byte;
1839 ++ u32 data_phy_cycles_byte;
1840 + u32 dsi_tmp_buf_bpp, data_phy_cycles;
1841 ++ u32 delta;
1842 + struct mtk_phy_timing *timing = &dsi->phy_timing;
1843 +
1844 + struct videomode *vm = &dsi->vm;
1845 +@@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
1846 + horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
1847 +
1848 + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
1849 +- horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
1850 ++ horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
1851 + else
1852 + horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
1853 +- dsi_tmp_buf_bpp;
1854 ++ dsi_tmp_buf_bpp - 10;
1855 +
1856 + data_phy_cycles = timing->lpx + timing->da_hs_prepare +
1857 +- timing->da_hs_zero + timing->da_hs_exit;
1858 +-
1859 +- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
1860 +- if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
1861 +- data_phy_cycles * dsi->lanes + 18) {
1862 +- horizontal_frontporch_byte =
1863 +- vm->hfront_porch * dsi_tmp_buf_bpp -
1864 +- (data_phy_cycles * dsi->lanes + 18) *
1865 +- vm->hfront_porch /
1866 +- (vm->hfront_porch + vm->hback_porch);
1867 +-
1868 +- horizontal_backporch_byte =
1869 +- horizontal_backporch_byte -
1870 +- (data_phy_cycles * dsi->lanes + 18) *
1871 +- vm->hback_porch /
1872 +- (vm->hfront_porch + vm->hback_porch);
1873 +- } else {
1874 +- DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
1875 +- horizontal_frontporch_byte = vm->hfront_porch *
1876 +- dsi_tmp_buf_bpp;
1877 +- }
1878 ++ timing->da_hs_zero + timing->da_hs_exit + 3;
1879 ++
1880 ++ delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
1881 ++
1882 ++ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
1883 ++ horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
1884 ++ data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
1885 ++
1886 ++ if (horizontal_front_back_byte > data_phy_cycles_byte) {
1887 ++ horizontal_frontporch_byte -= data_phy_cycles_byte *
1888 ++ horizontal_frontporch_byte /
1889 ++ horizontal_front_back_byte;
1890 ++
1891 ++ horizontal_backporch_byte -= data_phy_cycles_byte *
1892 ++ horizontal_backporch_byte /
1893 ++ horizontal_front_back_byte;
1894 + } else {
1895 +- if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
1896 +- data_phy_cycles * dsi->lanes + 12) {
1897 +- horizontal_frontporch_byte =
1898 +- vm->hfront_porch * dsi_tmp_buf_bpp -
1899 +- (data_phy_cycles * dsi->lanes + 12) *
1900 +- vm->hfront_porch /
1901 +- (vm->hfront_porch + vm->hback_porch);
1902 +- horizontal_backporch_byte = horizontal_backporch_byte -
1903 +- (data_phy_cycles * dsi->lanes + 12) *
1904 +- vm->hback_porch /
1905 +- (vm->hfront_porch + vm->hback_porch);
1906 +- } else {
1907 +- DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
1908 +- horizontal_frontporch_byte = vm->hfront_porch *
1909 +- dsi_tmp_buf_bpp;
1910 +- }
1911 ++ DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
1912 + }
1913 +
1914 + writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
1915 +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
1916 +index 124d3dcc5c590..98e99aa8a547e 100644
1917 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
1918 ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
1919 +@@ -570,8 +570,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
1920 + NV_PRINTK(err, cli, "validating bo list\n");
1921 + validate_fini(op, chan, NULL, NULL);
1922 + return ret;
1923 ++ } else if (ret > 0) {
1924 ++ *apply_relocs = true;
1925 + }
1926 +- *apply_relocs = ret;
1927 ++
1928 + return 0;
1929 + }
1930 +
1931 +@@ -674,7 +676,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
1932 + nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
1933 + }
1934 +
1935 +- u_free(reloc);
1936 + return ret;
1937 + }
1938 +
1939 +@@ -884,9 +885,10 @@ out:
1940 + break;
1941 + }
1942 + }
1943 +- u_free(reloc);
1944 + }
1945 + out_prevalid:
1946 ++ if (!IS_ERR(reloc))
1947 ++ u_free(reloc);
1948 + u_free(bo);
1949 + u_free(push);
1950 +
1951 +diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
1952 +index a50ba4a4a1d71..b88f889b3932e 100644
1953 +--- a/drivers/hid/hid-cypress.c
1954 ++++ b/drivers/hid/hid-cypress.c
1955 +@@ -23,19 +23,17 @@
1956 + #define CP_2WHEEL_MOUSE_HACK 0x02
1957 + #define CP_2WHEEL_MOUSE_HACK_ON 0x04
1958 +
1959 ++#define VA_INVAL_LOGICAL_BOUNDARY 0x08
1960 ++
1961 + /*
1962 + * Some USB barcode readers from cypress have usage min and usage max in
1963 + * the wrong order
1964 + */
1965 +-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1966 ++static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc,
1967 + unsigned int *rsize)
1968 + {
1969 +- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
1970 + unsigned int i;
1971 +
1972 +- if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
1973 +- return rdesc;
1974 +-
1975 + if (*rsize < 4)
1976 + return rdesc;
1977 +
1978 +@@ -48,6 +46,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1979 + return rdesc;
1980 + }
1981 +
1982 ++static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
1983 ++ unsigned int *rsize)
1984 ++{
1985 ++ /*
1986 ++ * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly
1987 ++ * reports Logical Minimum of its Consumer Control device as 572
1988 ++ * (0x02 0x3c). Fix this by setting its Logical Minimum to zero.
1989 ++ */
1990 ++ if (*rsize == 25 &&
1991 ++ rdesc[0] == 0x05 && rdesc[1] == 0x0c &&
1992 ++ rdesc[2] == 0x09 && rdesc[3] == 0x01 &&
1993 ++ rdesc[6] == 0x19 && rdesc[7] == 0x00 &&
1994 ++ rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) {
1995 ++ hid_info(hdev,
1996 ++ "fixing up varmilo VA104M consumer control report descriptor\n");
1997 ++ rdesc[12] = 0x00;
1998 ++ rdesc[13] = 0x00;
1999 ++ }
2000 ++ return rdesc;
2001 ++}
2002 ++
2003 ++static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
2004 ++ unsigned int *rsize)
2005 ++{
2006 ++ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
2007 ++
2008 ++ if (quirks & CP_RDESC_SWAPPED_MIN_MAX)
2009 ++ rdesc = cp_rdesc_fixup(hdev, rdesc, rsize);
2010 ++ if (quirks & VA_INVAL_LOGICAL_BOUNDARY)
2011 ++ rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize);
2012 ++
2013 ++ return rdesc;
2014 ++}
2015 ++
2016 + static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
2017 + struct hid_field *field, struct hid_usage *usage,
2018 + unsigned long **bit, int *max)
2019 +@@ -128,6 +160,8 @@ static const struct hid_device_id cp_devices[] = {
2020 + .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
2021 + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
2022 + .driver_data = CP_2WHEEL_MOUSE_HACK },
2023 ++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1),
2024 ++ .driver_data = VA_INVAL_LOGICAL_BOUNDARY },
2025 + { }
2026 + };
2027 + MODULE_DEVICE_TABLE(hid, cp_devices);
2028 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2029 +index 79495e218b7fc..a6d63a7590434 100644
2030 +--- a/drivers/hid/hid-ids.h
2031 ++++ b/drivers/hid/hid-ids.h
2032 +@@ -331,6 +331,8 @@
2033 + #define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
2034 + #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
2035 +
2036 ++#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1 0X07b1
2037 ++
2038 + #define USB_VENDOR_ID_DATA_MODUL 0x7374
2039 + #define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201
2040 +
2041 +@@ -443,6 +445,10 @@
2042 + #define USB_VENDOR_ID_FRUCTEL 0x25B6
2043 + #define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002
2044 +
2045 ++#define USB_VENDOR_ID_GAMEVICE 0x27F8
2046 ++#define USB_DEVICE_ID_GAMEVICE_GV186 0x0BBE
2047 ++#define USB_DEVICE_ID_GAMEVICE_KISHI 0x0BBF
2048 ++
2049 + #define USB_VENDOR_ID_GAMERON 0x0810
2050 + #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
2051 + #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
2052 +@@ -485,6 +491,7 @@
2053 + #define USB_DEVICE_ID_PENPOWER 0x00f4
2054 +
2055 + #define USB_VENDOR_ID_GREENASIA 0x0e8f
2056 ++#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010
2057 + #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
2058 +
2059 + #define USB_VENDOR_ID_GRETAGMACBETH 0x0971
2060 +@@ -742,6 +749,7 @@
2061 + #define USB_VENDOR_ID_LOGITECH 0x046d
2062 + #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
2063 + #define USB_DEVICE_ID_LOGITECH_T651 0xb00c
2064 ++#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
2065 + #define USB_DEVICE_ID_LOGITECH_C007 0xc007
2066 + #define USB_DEVICE_ID_LOGITECH_C077 0xc077
2067 + #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
2068 +@@ -1297,6 +1305,7 @@
2069 +
2070 + #define USB_VENDOR_ID_UGTIZER 0x2179
2071 + #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
2072 ++#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
2073 +
2074 + #define USB_VENDOR_ID_VIEWSONIC 0x0543
2075 + #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
2076 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
2077 +index 9770db624bfaf..4dca113924593 100644
2078 +--- a/drivers/hid/hid-input.c
2079 ++++ b/drivers/hid/hid-input.c
2080 +@@ -319,6 +319,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
2081 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
2082 + USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
2083 + HID_BATTERY_QUIRK_IGNORE },
2084 ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
2085 ++ USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
2086 ++ HID_BATTERY_QUIRK_IGNORE },
2087 + {}
2088 + };
2089 +
2090 +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
2091 +index 044a93f3c1178..742c052b0110a 100644
2092 +--- a/drivers/hid/hid-ite.c
2093 ++++ b/drivers/hid/hid-ite.c
2094 +@@ -11,6 +11,48 @@
2095 +
2096 + #include "hid-ids.h"
2097 +
2098 ++#define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0)
2099 ++
2100 ++static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
2101 ++{
2102 ++ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
2103 ++
2104 ++ if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
2105 ++ if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
2106 ++ hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
2107 ++ rdesc[163] = HID_MAIN_ITEM_RELATIVE;
2108 ++ }
2109 ++ }
2110 ++
2111 ++ return rdesc;
2112 ++}
2113 ++
2114 ++static int ite_input_mapping(struct hid_device *hdev,
2115 ++ struct hid_input *hi, struct hid_field *field,
2116 ++ struct hid_usage *usage, unsigned long **bit,
2117 ++ int *max)
2118 ++{
2119 ++
2120 ++ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
2121 ++
2122 ++ if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) &&
2123 ++ (usage->hid & HID_USAGE_PAGE) == 0x00880000) {
2124 ++ if (usage->hid == 0x00880078) {
2125 ++ /* Touchpad on, userspace expects F22 for this */
2126 ++ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22);
2127 ++ return 1;
2128 ++ }
2129 ++ if (usage->hid == 0x00880079) {
2130 ++ /* Touchpad off, userspace expects F23 for this */
2131 ++ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23);
2132 ++ return 1;
2133 ++ }
2134 ++ return -1;
2135 ++ }
2136 ++
2137 ++ return 0;
2138 ++}
2139 ++
2140 + static int ite_event(struct hid_device *hdev, struct hid_field *field,
2141 + struct hid_usage *usage, __s32 value)
2142 + {
2143 +@@ -37,13 +79,27 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
2144 + return 0;
2145 + }
2146 +
2147 ++static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id)
2148 ++{
2149 ++ int ret;
2150 ++
2151 ++ hid_set_drvdata(hdev, (void *)id->driver_data);
2152 ++
2153 ++ ret = hid_open_report(hdev);
2154 ++ if (ret)
2155 ++ return ret;
2156 ++
2157 ++ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2158 ++}
2159 ++
2160 + static const struct hid_device_id ite_devices[] = {
2161 + { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
2162 + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
2163 + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
2164 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
2165 + USB_VENDOR_ID_SYNAPTICS,
2166 +- USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
2167 ++ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012),
2168 ++ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
2169 + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
2170 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
2171 + USB_VENDOR_ID_SYNAPTICS,
2172 +@@ -55,6 +111,9 @@ MODULE_DEVICE_TABLE(hid, ite_devices);
2173 + static struct hid_driver ite_driver = {
2174 + .name = "itetech",
2175 + .id_table = ite_devices,
2176 ++ .probe = ite_probe,
2177 ++ .report_fixup = ite_report_fixup,
2178 ++ .input_mapping = ite_input_mapping,
2179 + .event = ite_event,
2180 + };
2181 + module_hid_driver(ite_driver);
2182 +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
2183 +index a2991622702ae..0ca7231195473 100644
2184 +--- a/drivers/hid/hid-logitech-hidpp.c
2185 ++++ b/drivers/hid/hid-logitech-hidpp.c
2186 +@@ -3997,6 +3997,9 @@ static const struct hid_device_id hidpp_devices[] = {
2187 + { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
2188 + LDJ_DEVICE(0xb305),
2189 + .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
2190 ++ { /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */
2191 ++ LDJ_DEVICE(0xb309),
2192 ++ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
2193 + { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
2194 + LDJ_DEVICE(0xb30b),
2195 + .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
2196 +@@ -4039,6 +4042,9 @@ static const struct hid_device_id hidpp_devices[] = {
2197 + { /* MX5000 keyboard over Bluetooth */
2198 + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
2199 + .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
2200 ++ { /* Dinovo Edge keyboard over Bluetooth */
2201 ++ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309),
2202 ++ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
2203 + { /* MX5500 keyboard over Bluetooth */
2204 + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
2205 + .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
2206 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
2207 +index 7a2be0205dfd1..bf7ecab5d9e5e 100644
2208 +--- a/drivers/hid/hid-quirks.c
2209 ++++ b/drivers/hid/hid-quirks.c
2210 +@@ -83,7 +83,12 @@ static const struct hid_device_id hid_quirks[] = {
2211 + { HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
2212 + { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
2213 + { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
2214 ++ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT },
2215 + { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
2216 ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186),
2217 ++ HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
2218 ++ { HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI),
2219 ++ HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
2220 + { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
2221 + { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
2222 + { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
2223 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
2224 +index 94c7398b5c279..3dd7d32467378 100644
2225 +--- a/drivers/hid/hid-sensor-hub.c
2226 ++++ b/drivers/hid/hid-sensor-hub.c
2227 +@@ -483,7 +483,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
2228 + return 1;
2229 +
2230 + ptr = raw_data;
2231 +- ptr++; /* Skip report id */
2232 ++ if (report->id)
2233 ++ ptr++; /* Skip report id */
2234 +
2235 + spin_lock_irqsave(&pdata->lock, flags);
2236 +
2237 +diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
2238 +index 86b568037cb8a..8e9c9e646cb7d 100644
2239 +--- a/drivers/hid/hid-uclogic-core.c
2240 ++++ b/drivers/hid/hid-uclogic-core.c
2241 +@@ -385,6 +385,8 @@ static const struct hid_device_id uclogic_devices[] = {
2242 + USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2243 + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
2244 + USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2245 ++ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
2246 ++ USB_DEVICE_ID_UGTIZER_TABLET_GT5040) },
2247 + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
2248 + USB_DEVICE_ID_UGEE_TABLET_G5) },
2249 + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
2250 +diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
2251 +index 7d20d1fcf8d20..d26d8cd98efcf 100644
2252 +--- a/drivers/hid/hid-uclogic-params.c
2253 ++++ b/drivers/hid/hid-uclogic-params.c
2254 +@@ -997,6 +997,8 @@ int uclogic_params_init(struct uclogic_params *params,
2255 + break;
2256 + case VID_PID(USB_VENDOR_ID_UGTIZER,
2257 + USB_DEVICE_ID_UGTIZER_TABLET_GP0610):
2258 ++ case VID_PID(USB_VENDOR_ID_UGTIZER,
2259 ++ USB_DEVICE_ID_UGTIZER_TABLET_GT5040):
2260 + case VID_PID(USB_VENDOR_ID_UGEE,
2261 + USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540):
2262 + case VID_PID(USB_VENDOR_ID_UGEE,
2263 +diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
2264 +index 8ca51e43cf530..329ee4f48d957 100644
2265 +--- a/drivers/infiniband/hw/hfi1/file_ops.c
2266 ++++ b/drivers/infiniband/hw/hfi1/file_ops.c
2267 +@@ -1,4 +1,5 @@
2268 + /*
2269 ++ * Copyright(c) 2020 Cornelis Networks, Inc.
2270 + * Copyright(c) 2015-2020 Intel Corporation.
2271 + *
2272 + * This file is provided under a dual BSD/GPLv2 license. When using or
2273 +@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
2274 + spin_lock_init(&fd->tid_lock);
2275 + spin_lock_init(&fd->invalid_lock);
2276 + fd->rec_cpu_num = -1; /* no cpu affinity by default */
2277 +- fd->mm = current->mm;
2278 +- mmgrab(fd->mm);
2279 + fd->dd = dd;
2280 + fp->private_data = fd;
2281 + return 0;
2282 +@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
2283 +
2284 + deallocate_ctxt(uctxt);
2285 + done:
2286 +- mmdrop(fdata->mm);
2287 +
2288 + if (atomic_dec_and_test(&dd->user_refcount))
2289 + complete(&dd->user_comp);
2290 +diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
2291 +index b4c6bff60a4e8..e09e8244a94c4 100644
2292 +--- a/drivers/infiniband/hw/hfi1/hfi.h
2293 ++++ b/drivers/infiniband/hw/hfi1/hfi.h
2294 +@@ -1,6 +1,7 @@
2295 + #ifndef _HFI1_KERNEL_H
2296 + #define _HFI1_KERNEL_H
2297 + /*
2298 ++ * Copyright(c) 2020 Cornelis Networks, Inc.
2299 + * Copyright(c) 2015-2020 Intel Corporation.
2300 + *
2301 + * This file is provided under a dual BSD/GPLv2 license. When using or
2302 +@@ -1451,7 +1452,6 @@ struct hfi1_filedata {
2303 + u32 invalid_tid_idx;
2304 + /* protect invalid_tids array and invalid_tid_idx */
2305 + spinlock_t invalid_lock;
2306 +- struct mm_struct *mm;
2307 + };
2308 +
2309 + extern struct xarray hfi1_dev_table;
2310 +diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
2311 +index 24ca17b77b72b..f3fb28e3d5d74 100644
2312 +--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
2313 ++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
2314 +@@ -1,4 +1,5 @@
2315 + /*
2316 ++ * Copyright(c) 2020 Cornelis Networks, Inc.
2317 + * Copyright(c) 2016 - 2017 Intel Corporation.
2318 + *
2319 + * This file is provided under a dual BSD/GPLv2 license. When using or
2320 +@@ -48,23 +49,11 @@
2321 + #include <linux/rculist.h>
2322 + #include <linux/mmu_notifier.h>
2323 + #include <linux/interval_tree_generic.h>
2324 ++#include <linux/sched/mm.h>
2325 +
2326 + #include "mmu_rb.h"
2327 + #include "trace.h"
2328 +
2329 +-struct mmu_rb_handler {
2330 +- struct mmu_notifier mn;
2331 +- struct rb_root_cached root;
2332 +- void *ops_arg;
2333 +- spinlock_t lock; /* protect the RB tree */
2334 +- struct mmu_rb_ops *ops;
2335 +- struct mm_struct *mm;
2336 +- struct list_head lru_list;
2337 +- struct work_struct del_work;
2338 +- struct list_head del_list;
2339 +- struct workqueue_struct *wq;
2340 +-};
2341 +-
2342 + static unsigned long mmu_node_start(struct mmu_rb_node *);
2343 + static unsigned long mmu_node_last(struct mmu_rb_node *);
2344 + static int mmu_notifier_range_start(struct mmu_notifier *,
2345 +@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
2346 + return PAGE_ALIGN(node->addr + node->len) - 1;
2347 + }
2348 +
2349 +-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
2350 ++int hfi1_mmu_rb_register(void *ops_arg,
2351 + struct mmu_rb_ops *ops,
2352 + struct workqueue_struct *wq,
2353 + struct mmu_rb_handler **handler)
2354 + {
2355 +- struct mmu_rb_handler *handlr;
2356 ++ struct mmu_rb_handler *h;
2357 + int ret;
2358 +
2359 +- handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
2360 +- if (!handlr)
2361 ++ h = kmalloc(sizeof(*h), GFP_KERNEL);
2362 ++ if (!h)
2363 + return -ENOMEM;
2364 +
2365 +- handlr->root = RB_ROOT_CACHED;
2366 +- handlr->ops = ops;
2367 +- handlr->ops_arg = ops_arg;
2368 +- INIT_HLIST_NODE(&handlr->mn.hlist);
2369 +- spin_lock_init(&handlr->lock);
2370 +- handlr->mn.ops = &mn_opts;
2371 +- handlr->mm = mm;
2372 +- INIT_WORK(&handlr->del_work, handle_remove);
2373 +- INIT_LIST_HEAD(&handlr->del_list);
2374 +- INIT_LIST_HEAD(&handlr->lru_list);
2375 +- handlr->wq = wq;
2376 +-
2377 +- ret = mmu_notifier_register(&handlr->mn, handlr->mm);
2378 ++ h->root = RB_ROOT_CACHED;
2379 ++ h->ops = ops;
2380 ++ h->ops_arg = ops_arg;
2381 ++ INIT_HLIST_NODE(&h->mn.hlist);
2382 ++ spin_lock_init(&h->lock);
2383 ++ h->mn.ops = &mn_opts;
2384 ++ INIT_WORK(&h->del_work, handle_remove);
2385 ++ INIT_LIST_HEAD(&h->del_list);
2386 ++ INIT_LIST_HEAD(&h->lru_list);
2387 ++ h->wq = wq;
2388 ++
2389 ++ ret = mmu_notifier_register(&h->mn, current->mm);
2390 + if (ret) {
2391 +- kfree(handlr);
2392 ++ kfree(h);
2393 + return ret;
2394 + }
2395 +
2396 +- *handler = handlr;
2397 ++ *handler = h;
2398 + return 0;
2399 + }
2400 +
2401 +@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
2402 + struct list_head del_list;
2403 +
2404 + /* Unregister first so we don't get any more notifications. */
2405 +- mmu_notifier_unregister(&handler->mn, handler->mm);
2406 ++ mmu_notifier_unregister(&handler->mn, handler->mn.mm);
2407 +
2408 + /*
2409 + * Make sure the wq delete handler is finished running. It will not
2410 +@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
2411 + int ret = 0;
2412 +
2413 + trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
2414 ++
2415 ++ if (current->mm != handler->mn.mm)
2416 ++ return -EPERM;
2417 ++
2418 + spin_lock_irqsave(&handler->lock, flags);
2419 + node = __mmu_rb_search(handler, mnode->addr, mnode->len);
2420 + if (node) {
2421 +@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
2422 + __mmu_int_rb_remove(mnode, &handler->root);
2423 + list_del(&mnode->list); /* remove from LRU list */
2424 + }
2425 ++ mnode->handler = handler;
2426 + unlock:
2427 + spin_unlock_irqrestore(&handler->lock, flags);
2428 + return ret;
2429 +@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
2430 + unsigned long flags;
2431 + bool ret = false;
2432 +
2433 ++ if (current->mm != handler->mn.mm)
2434 ++ return ret;
2435 ++
2436 + spin_lock_irqsave(&handler->lock, flags);
2437 + node = __mmu_rb_search(handler, addr, len);
2438 + if (node) {
2439 +@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
2440 + unsigned long flags;
2441 + bool stop = false;
2442 +
2443 ++ if (current->mm != handler->mn.mm)
2444 ++ return;
2445 ++
2446 + INIT_LIST_HEAD(&del_list);
2447 +
2448 + spin_lock_irqsave(&handler->lock, flags);
2449 +@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
2450 + {
2451 + unsigned long flags;
2452 +
2453 ++ if (current->mm != handler->mn.mm)
2454 ++ return;
2455 ++
2456 + /* Validity of handler and node pointers has been checked by caller. */
2457 + trace_hfi1_mmu_rb_remove(node->addr, node->len);
2458 + spin_lock_irqsave(&handler->lock, flags);
2459 +diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
2460 +index f04cec1e99d11..423aacc67e948 100644
2461 +--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
2462 ++++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
2463 +@@ -1,4 +1,5 @@
2464 + /*
2465 ++ * Copyright(c) 2020 Cornelis Networks, Inc.
2466 + * Copyright(c) 2016 Intel Corporation.
2467 + *
2468 + * This file is provided under a dual BSD/GPLv2 license. When using or
2469 +@@ -54,6 +55,7 @@ struct mmu_rb_node {
2470 + unsigned long len;
2471 + unsigned long __last;
2472 + struct rb_node node;
2473 ++ struct mmu_rb_handler *handler;
2474 + struct list_head list;
2475 + };
2476 +
2477 +@@ -71,7 +73,19 @@ struct mmu_rb_ops {
2478 + void *evict_arg, bool *stop);
2479 + };
2480 +
2481 +-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
2482 ++struct mmu_rb_handler {
2483 ++ struct mmu_notifier mn;
2484 ++ struct rb_root_cached root;
2485 ++ void *ops_arg;
2486 ++ spinlock_t lock; /* protect the RB tree */
2487 ++ struct mmu_rb_ops *ops;
2488 ++ struct list_head lru_list;
2489 ++ struct work_struct del_work;
2490 ++ struct list_head del_list;
2491 ++ struct workqueue_struct *wq;
2492 ++};
2493 ++
2494 ++int hfi1_mmu_rb_register(void *ops_arg,
2495 + struct mmu_rb_ops *ops,
2496 + struct workqueue_struct *wq,
2497 + struct mmu_rb_handler **handler);
2498 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
2499 +index f81ca20f4b693..b94fc7fd75a96 100644
2500 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
2501 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
2502 +@@ -1,4 +1,5 @@
2503 + /*
2504 ++ * Copyright(c) 2020 Cornelis Networks, Inc.
2505 + * Copyright(c) 2015-2018 Intel Corporation.
2506 + *
2507 + * This file is provided under a dual BSD/GPLv2 license. When using or
2508 +@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
2509 + {
2510 + struct page **pages;
2511 + struct hfi1_devdata *dd = fd->uctxt->dd;
2512 ++ struct mm_struct *mm;
2513 +
2514 + if (mapped) {
2515 + pci_unmap_single(dd->pcidev, node->dma_addr,
2516 + node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
2517 + pages = &node->pages[idx];
2518 ++ mm = mm_from_tid_node(node);
2519 + } else {
2520 + pages = &tidbuf->pages[idx];
2521 ++ mm = current->mm;
2522 + }
2523 +- hfi1_release_user_pages(fd->mm, pages, npages, mapped);
2524 ++ hfi1_release_user_pages(mm, pages, npages, mapped);
2525 + fd->tid_n_pinned -= npages;
2526 + }
2527 +
2528 +@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
2529 + * pages, accept the amount pinned so far and program only that.
2530 + * User space knows how to deal with partially programmed buffers.
2531 + */
2532 +- if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
2533 ++ if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
2534 + kfree(pages);
2535 + return -ENOMEM;
2536 + }
2537 +
2538 +- pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
2539 ++ pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
2540 + if (pinned <= 0) {
2541 + kfree(pages);
2542 + return pinned;
2543 +@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
2544 +
2545 + if (fd->use_mn) {
2546 + ret = mmu_interval_notifier_insert(
2547 +- &node->notifier, fd->mm,
2548 ++ &node->notifier, current->mm,
2549 + tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
2550 + &tid_mn_ops);
2551 + if (ret)
2552 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
2553 +index 332abb446861a..d45c7b6988d4d 100644
2554 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
2555 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
2556 +@@ -1,6 +1,7 @@
2557 + #ifndef _HFI1_USER_EXP_RCV_H
2558 + #define _HFI1_USER_EXP_RCV_H
2559 + /*
2560 ++ * Copyright(c) 2020 - Cornelis Networks, Inc.
2561 + * Copyright(c) 2015 - 2017 Intel Corporation.
2562 + *
2563 + * This file is provided under a dual BSD/GPLv2 license. When using or
2564 +@@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
2565 + int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
2566 + struct hfi1_tid_info *tinfo);
2567 +
2568 ++static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
2569 ++{
2570 ++ return node->notifier.mm;
2571 ++}
2572 ++
2573 + #endif /* _HFI1_USER_EXP_RCV_H */
2574 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
2575 +index a92346e88628b..4a4956f96a7eb 100644
2576 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c
2577 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
2578 +@@ -1,4 +1,5 @@
2579 + /*
2580 ++ * Copyright(c) 2020 - Cornelis Networks, Inc.
2581 + * Copyright(c) 2015 - 2018 Intel Corporation.
2582 + *
2583 + * This file is provided under a dual BSD/GPLv2 license. When using or
2584 +@@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
2585 + atomic_set(&pq->n_reqs, 0);
2586 + init_waitqueue_head(&pq->wait);
2587 + atomic_set(&pq->n_locked, 0);
2588 +- pq->mm = fd->mm;
2589 +
2590 + iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
2591 + activate_packet_queue, NULL, NULL);
2592 +@@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
2593 +
2594 + cq->nentries = hfi1_sdma_comp_ring_size;
2595 +
2596 +- ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
2597 ++ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
2598 + &pq->handler);
2599 + if (ret) {
2600 + dd_dev_err(dd, "Failed to register with MMU %d", ret);
2601 +@@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
2602 +
2603 + npages -= node->npages;
2604 + retry:
2605 +- if (!hfi1_can_pin_pages(pq->dd, pq->mm,
2606 ++ if (!hfi1_can_pin_pages(pq->dd, current->mm,
2607 + atomic_read(&pq->n_locked), npages)) {
2608 + cleared = sdma_cache_evict(pq, npages);
2609 + if (cleared >= npages)
2610 + goto retry;
2611 + }
2612 +- pinned = hfi1_acquire_user_pages(pq->mm,
2613 ++ pinned = hfi1_acquire_user_pages(current->mm,
2614 + ((unsigned long)iovec->iov.iov_base +
2615 + (node->npages * PAGE_SIZE)), npages, 0,
2616 + pages + node->npages);
2617 +@@ -995,7 +995,7 @@ retry:
2618 + return pinned;
2619 + }
2620 + if (pinned != npages) {
2621 +- unpin_vector_pages(pq->mm, pages, node->npages, pinned);
2622 ++ unpin_vector_pages(current->mm, pages, node->npages, pinned);
2623 + return -EFAULT;
2624 + }
2625 + kfree(node->pages);
2626 +@@ -1008,7 +1008,8 @@ retry:
2627 + static void unpin_sdma_pages(struct sdma_mmu_node *node)
2628 + {
2629 + if (node->npages) {
2630 +- unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
2631 ++ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
2632 ++ node->npages);
2633 + atomic_sub(node->npages, &node->pq->n_locked);
2634 + }
2635 + }
2636 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
2637 +index 9972e0e6545e8..1e8c02fe8ad1d 100644
2638 +--- a/drivers/infiniband/hw/hfi1/user_sdma.h
2639 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
2640 +@@ -1,6 +1,7 @@
2641 + #ifndef _HFI1_USER_SDMA_H
2642 + #define _HFI1_USER_SDMA_H
2643 + /*
2644 ++ * Copyright(c) 2020 - Cornelis Networks, Inc.
2645 + * Copyright(c) 2015 - 2018 Intel Corporation.
2646 + *
2647 + * This file is provided under a dual BSD/GPLv2 license. When using or
2648 +@@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
2649 + unsigned long unpinned;
2650 + struct mmu_rb_handler *handler;
2651 + atomic_t n_locked;
2652 +- struct mm_struct *mm;
2653 + };
2654 +
2655 + struct hfi1_user_sdma_comp_q {
2656 +@@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
2657 + struct iovec *iovec, unsigned long dim,
2658 + unsigned long *count);
2659 +
2660 ++static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
2661 ++{
2662 ++ return node->rb.handler->mn.mm;
2663 ++}
2664 ++
2665 + #endif /* _HFI1_USER_SDMA_H */
2666 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2667 +index cee140920c579..4c02839b7b418 100644
2668 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2669 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2670 +@@ -2738,6 +2738,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2671 +
2672 + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2673 + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2674 ++ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
2675 +
2676 + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2677 + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2678 +@@ -4771,11 +4772,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
2679 + V2_QPC_BYTE_28_AT_M,
2680 + V2_QPC_BYTE_28_AT_S);
2681 + qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
2682 +- V2_QPC_BYTE_212_RETRY_CNT_M,
2683 +- V2_QPC_BYTE_212_RETRY_CNT_S);
2684 ++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
2685 ++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
2686 + qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
2687 +- V2_QPC_BYTE_244_RNR_CNT_M,
2688 +- V2_QPC_BYTE_244_RNR_CNT_S);
2689 ++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
2690 ++ V2_QPC_BYTE_244_RNR_NUM_INIT_S);
2691 +
2692 + done:
2693 + qp_attr->cur_qp_state = qp_attr->qp_state;
2694 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
2695 +index 17f35f91f4ad2..9d27dfe86821b 100644
2696 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
2697 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
2698 +@@ -1639,7 +1639,7 @@ struct hns_roce_query_pf_caps_d {
2699 + __le32 rsv_uars_rsv_qps;
2700 + };
2701 + #define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
2702 +-#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
2703 ++#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0)
2704 +
2705 + #define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
2706 + #define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
2707 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
2708 +index 58a433135a038..9023ad9c30182 100644
2709 +--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
2710 ++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
2711 +@@ -54,10 +54,6 @@
2712 + #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
2713 + __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
2714 +
2715 +-static int push_mode;
2716 +-module_param(push_mode, int, 0644);
2717 +-MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
2718 +-
2719 + static int debug;
2720 + module_param(debug, int, 0644);
2721 + MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
2722 +@@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
2723 + if (status)
2724 + goto exit;
2725 + iwdev->obj_next = iwdev->obj_mem;
2726 +- iwdev->push_mode = push_mode;
2727 +
2728 + init_waitqueue_head(&iwdev->vchnl_waitq);
2729 + init_waitqueue_head(&dev->vf_reqs);
2730 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
2731 +index 09caad228aa4f..e40c505196645 100644
2732 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
2733 ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
2734 +@@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
2735 + */
2736 + static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2737 + {
2738 +- struct i40iw_ucontext *ucontext;
2739 +- u64 db_addr_offset, push_offset, pfn;
2740 +-
2741 +- ucontext = to_ucontext(context);
2742 +- if (ucontext->iwdev->sc_dev.is_pf) {
2743 +- db_addr_offset = I40IW_DB_ADDR_OFFSET;
2744 +- push_offset = I40IW_PUSH_OFFSET;
2745 +- if (vma->vm_pgoff)
2746 +- vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
2747 +- } else {
2748 +- db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
2749 +- push_offset = I40IW_VF_PUSH_OFFSET;
2750 +- if (vma->vm_pgoff)
2751 +- vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
2752 +- }
2753 ++ struct i40iw_ucontext *ucontext = to_ucontext(context);
2754 ++ u64 dbaddr;
2755 +
2756 +- vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
2757 +-
2758 +- if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
2759 +- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2760 +- } else {
2761 +- if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
2762 +- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2763 +- else
2764 +- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2765 +- }
2766 ++ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
2767 ++ return -EINVAL;
2768 +
2769 +- pfn = vma->vm_pgoff +
2770 +- (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
2771 +- PAGE_SHIFT);
2772 ++ dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
2773 +
2774 +- return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
2775 +- vma->vm_page_prot, NULL);
2776 ++ return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
2777 ++ pgprot_noncached(vma->vm_page_prot), NULL);
2778 + }
2779 +
2780 + /**
2781 +diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
2782 +index c3cfea243af8c..119b2573c9a08 100644
2783 +--- a/drivers/infiniband/hw/mthca/mthca_cq.c
2784 ++++ b/drivers/infiniband/hw/mthca/mthca_cq.c
2785 +@@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
2786 + }
2787 +
2788 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
2789 +- if (IS_ERR(mailbox))
2790 ++ if (IS_ERR(mailbox)) {
2791 ++ err = PTR_ERR(mailbox);
2792 + goto err_out_arm;
2793 ++ }
2794 +
2795 + cq_context = mailbox->buf;
2796 +
2797 +@@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
2798 + }
2799 +
2800 + spin_lock_irq(&dev->cq_table.lock);
2801 +- if (mthca_array_set(&dev->cq_table.cq,
2802 +- cq->cqn & (dev->limits.num_cqs - 1),
2803 +- cq)) {
2804 ++ err = mthca_array_set(&dev->cq_table.cq,
2805 ++ cq->cqn & (dev->limits.num_cqs - 1), cq);
2806 ++ if (err) {
2807 + spin_unlock_irq(&dev->cq_table.lock);
2808 + goto err_out_free_mr;
2809 + }
2810 +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
2811 +index d3eda48032e39..944cbb519c6d7 100644
2812 +--- a/drivers/input/serio/i8042.c
2813 ++++ b/drivers/input/serio/i8042.c
2814 +@@ -122,6 +122,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
2815 + MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
2816 + #endif
2817 +
2818 ++static bool i8042_present;
2819 + static bool i8042_bypass_aux_irq_test;
2820 + static char i8042_kbd_firmware_id[128];
2821 + static char i8042_aux_firmware_id[128];
2822 +@@ -343,6 +344,9 @@ int i8042_command(unsigned char *param, int command)
2823 + unsigned long flags;
2824 + int retval;
2825 +
2826 ++ if (!i8042_present)
2827 ++ return -1;
2828 ++
2829 + spin_lock_irqsave(&i8042_lock, flags);
2830 + retval = __i8042_command(param, command);
2831 + spin_unlock_irqrestore(&i8042_lock, flags);
2832 +@@ -1612,12 +1616,15 @@ static int __init i8042_init(void)
2833 +
2834 + err = i8042_platform_init();
2835 + if (err)
2836 +- return err;
2837 ++ return (err == -ENODEV) ? 0 : err;
2838 +
2839 + err = i8042_controller_check();
2840 + if (err)
2841 + goto err_platform_exit;
2842 +
2843 ++ /* Set this before creating the dev to allow i8042_command to work right away */
2844 ++ i8042_present = true;
2845 ++
2846 + pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
2847 + if (IS_ERR(pdev)) {
2848 + err = PTR_ERR(pdev);
2849 +@@ -1636,6 +1643,9 @@ static int __init i8042_init(void)
2850 +
2851 + static void __exit i8042_exit(void)
2852 + {
2853 ++ if (!i8042_present)
2854 ++ return;
2855 ++
2856 + platform_device_unregister(i8042_platform_device);
2857 + platform_driver_unregister(&i8042_driver);
2858 + i8042_platform_exit();
2859 +diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
2860 +index 93e6345f3414f..48cda86f43a2c 100644
2861 +--- a/drivers/iommu/intel/dmar.c
2862 ++++ b/drivers/iommu/intel/dmar.c
2863 +@@ -964,7 +964,8 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
2864 + warn_invalid_dmar(phys_addr, " returns all ones");
2865 + goto unmap;
2866 + }
2867 +- iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
2868 ++ if (ecap_vcs(iommu->ecap))
2869 ++ iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
2870 +
2871 + /* the registers might be more than one page */
2872 + map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
2873 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
2874 +index f67b7e6ddf1bc..7e790655c1ab5 100644
2875 +--- a/drivers/iommu/intel/iommu.c
2876 ++++ b/drivers/iommu/intel/iommu.c
2877 +@@ -1798,7 +1798,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
2878 + if (ecap_prs(iommu->ecap))
2879 + intel_svm_finish_prq(iommu);
2880 + }
2881 +- if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
2882 ++ if (vccap_pasid(iommu->vccap))
2883 + ioasid_unregister_allocator(&iommu->pasid_allocator);
2884 +
2885 + #endif
2886 +@@ -3177,7 +3177,7 @@ static void register_pasid_allocator(struct intel_iommu *iommu)
2887 + * is active. All vIOMMU allocators will eventually be calling the same
2888 + * host allocator.
2889 + */
2890 +- if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
2891 ++ if (!vccap_pasid(iommu->vccap))
2892 + return;
2893 +
2894 + pr_info("Register custom PASID allocator\n");
2895 +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
2896 +index 609bd25bf154b..6a0a79e3f5641 100644
2897 +--- a/drivers/iommu/iommu.c
2898 ++++ b/drivers/iommu/iommu.c
2899 +@@ -264,16 +264,18 @@ int iommu_probe_device(struct device *dev)
2900 + */
2901 + iommu_alloc_default_domain(group, dev);
2902 +
2903 +- if (group->default_domain)
2904 ++ if (group->default_domain) {
2905 + ret = __iommu_attach_device(group->default_domain, dev);
2906 ++ if (ret) {
2907 ++ iommu_group_put(group);
2908 ++ goto err_release;
2909 ++ }
2910 ++ }
2911 +
2912 + iommu_create_device_direct_mappings(group, dev);
2913 +
2914 + iommu_group_put(group);
2915 +
2916 +- if (ret)
2917 +- goto err_release;
2918 +-
2919 + if (ops->probe_finalize)
2920 + ops->probe_finalize(dev);
2921 +
2922 +diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
2923 +index 1d027623c7760..abd011fcecf4a 100644
2924 +--- a/drivers/irqchip/irq-sni-exiu.c
2925 ++++ b/drivers/irqchip/irq-sni-exiu.c
2926 +@@ -136,7 +136,7 @@ static int exiu_domain_translate(struct irq_domain *domain,
2927 + if (fwspec->param_count != 2)
2928 + return -EINVAL;
2929 + *hwirq = fwspec->param[0];
2930 +- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
2931 ++ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
2932 + }
2933 + return 0;
2934 + }
2935 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2936 +index 84ecbc6fa0ff2..47afc5938c26b 100644
2937 +--- a/drivers/net/bonding/bond_main.c
2938 ++++ b/drivers/net/bonding/bond_main.c
2939 +@@ -1460,7 +1460,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
2940 + slave->dev->flags &= ~IFF_SLAVE;
2941 + }
2942 +
2943 +-static struct slave *bond_alloc_slave(struct bonding *bond)
2944 ++static void slave_kobj_release(struct kobject *kobj)
2945 ++{
2946 ++ struct slave *slave = to_slave(kobj);
2947 ++ struct bonding *bond = bond_get_bond_by_slave(slave);
2948 ++
2949 ++ cancel_delayed_work_sync(&slave->notify_work);
2950 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
2951 ++ kfree(SLAVE_AD_INFO(slave));
2952 ++
2953 ++ kfree(slave);
2954 ++}
2955 ++
2956 ++static struct kobj_type slave_ktype = {
2957 ++ .release = slave_kobj_release,
2958 ++#ifdef CONFIG_SYSFS
2959 ++ .sysfs_ops = &slave_sysfs_ops,
2960 ++#endif
2961 ++};
2962 ++
2963 ++static int bond_kobj_init(struct slave *slave)
2964 ++{
2965 ++ int err;
2966 ++
2967 ++ err = kobject_init_and_add(&slave->kobj, &slave_ktype,
2968 ++ &(slave->dev->dev.kobj), "bonding_slave");
2969 ++ if (err)
2970 ++ kobject_put(&slave->kobj);
2971 ++
2972 ++ return err;
2973 ++}
2974 ++
2975 ++static struct slave *bond_alloc_slave(struct bonding *bond,
2976 ++ struct net_device *slave_dev)
2977 + {
2978 + struct slave *slave = NULL;
2979 +
2980 +@@ -1468,11 +1500,17 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
2981 + if (!slave)
2982 + return NULL;
2983 +
2984 ++ slave->bond = bond;
2985 ++ slave->dev = slave_dev;
2986 ++
2987 ++ if (bond_kobj_init(slave))
2988 ++ return NULL;
2989 ++
2990 + if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2991 + SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
2992 + GFP_KERNEL);
2993 + if (!SLAVE_AD_INFO(slave)) {
2994 +- kfree(slave);
2995 ++ kobject_put(&slave->kobj);
2996 + return NULL;
2997 + }
2998 + }
2999 +@@ -1481,17 +1519,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
3000 + return slave;
3001 + }
3002 +
3003 +-static void bond_free_slave(struct slave *slave)
3004 +-{
3005 +- struct bonding *bond = bond_get_bond_by_slave(slave);
3006 +-
3007 +- cancel_delayed_work_sync(&slave->notify_work);
3008 +- if (BOND_MODE(bond) == BOND_MODE_8023AD)
3009 +- kfree(SLAVE_AD_INFO(slave));
3010 +-
3011 +- kfree(slave);
3012 +-}
3013 +-
3014 + static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
3015 + {
3016 + info->bond_mode = BOND_MODE(bond);
3017 +@@ -1678,14 +1705,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
3018 + goto err_undo_flags;
3019 + }
3020 +
3021 +- new_slave = bond_alloc_slave(bond);
3022 ++ new_slave = bond_alloc_slave(bond, slave_dev);
3023 + if (!new_slave) {
3024 + res = -ENOMEM;
3025 + goto err_undo_flags;
3026 + }
3027 +
3028 +- new_slave->bond = bond;
3029 +- new_slave->dev = slave_dev;
3030 + /* Set the new_slave's queue_id to be zero. Queue ID mapping
3031 + * is set via sysfs or module option if desired.
3032 + */
3033 +@@ -2007,7 +2032,7 @@ err_restore_mtu:
3034 + dev_set_mtu(slave_dev, new_slave->original_mtu);
3035 +
3036 + err_free:
3037 +- bond_free_slave(new_slave);
3038 ++ kobject_put(&new_slave->kobj);
3039 +
3040 + err_undo_flags:
3041 + /* Enslave of first slave has failed and we need to fix master's mac */
3042 +@@ -2187,7 +2212,7 @@ static int __bond_release_one(struct net_device *bond_dev,
3043 + if (!netif_is_bond_master(slave_dev))
3044 + slave_dev->priv_flags &= ~IFF_BONDING;
3045 +
3046 +- bond_free_slave(slave);
3047 ++ kobject_put(&slave->kobj);
3048 +
3049 + return 0;
3050 + }
3051 +diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
3052 +index 9b8346638f697..fd07561da0348 100644
3053 +--- a/drivers/net/bonding/bond_sysfs_slave.c
3054 ++++ b/drivers/net/bonding/bond_sysfs_slave.c
3055 +@@ -121,7 +121,6 @@ static const struct slave_attribute *slave_attrs[] = {
3056 + };
3057 +
3058 + #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
3059 +-#define to_slave(obj) container_of(obj, struct slave, kobj)
3060 +
3061 + static ssize_t slave_show(struct kobject *kobj,
3062 + struct attribute *attr, char *buf)
3063 +@@ -132,28 +131,15 @@ static ssize_t slave_show(struct kobject *kobj,
3064 + return slave_attr->show(slave, buf);
3065 + }
3066 +
3067 +-static const struct sysfs_ops slave_sysfs_ops = {
3068 ++const struct sysfs_ops slave_sysfs_ops = {
3069 + .show = slave_show,
3070 + };
3071 +
3072 +-static struct kobj_type slave_ktype = {
3073 +-#ifdef CONFIG_SYSFS
3074 +- .sysfs_ops = &slave_sysfs_ops,
3075 +-#endif
3076 +-};
3077 +-
3078 + int bond_sysfs_slave_add(struct slave *slave)
3079 + {
3080 + const struct slave_attribute **a;
3081 + int err;
3082 +
3083 +- err = kobject_init_and_add(&slave->kobj, &slave_ktype,
3084 +- &(slave->dev->dev.kobj), "bonding_slave");
3085 +- if (err) {
3086 +- kobject_put(&slave->kobj);
3087 +- return err;
3088 +- }
3089 +-
3090 + for (a = slave_attrs; *a; ++a) {
3091 + err = sysfs_create_file(&slave->kobj, &((*a)->attr));
3092 + if (err) {
3093 +@@ -171,6 +157,4 @@ void bond_sysfs_slave_del(struct slave *slave)
3094 +
3095 + for (a = slave_attrs; *a; ++a)
3096 + sysfs_remove_file(&slave->kobj, &((*a)->attr));
3097 +-
3098 +- kobject_put(&slave->kobj);
3099 + }
3100 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
3101 +index f3fc37e96b087..d4030abad935d 100644
3102 +--- a/drivers/net/can/m_can/m_can.c
3103 ++++ b/drivers/net/can/m_can/m_can.c
3104 +@@ -1033,7 +1033,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = {
3105 + .name = KBUILD_MODNAME,
3106 + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
3107 + .tseg1_max = 256,
3108 +- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
3109 ++ .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
3110 + .tseg2_max = 128,
3111 + .sjw_max = 128,
3112 + .brp_min = 1,
3113 +@@ -1653,7 +1653,7 @@ static int m_can_open(struct net_device *dev)
3114 + INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
3115 +
3116 + err = request_threaded_irq(dev->irq, NULL, m_can_isr,
3117 +- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
3118 ++ IRQF_ONESHOT,
3119 + dev->name, dev);
3120 + } else {
3121 + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
3122 +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
3123 +index a4b4b742c80c3..0ad13d78815c5 100644
3124 +--- a/drivers/net/can/usb/gs_usb.c
3125 ++++ b/drivers/net/can/usb/gs_usb.c
3126 +@@ -63,21 +63,27 @@ enum gs_can_identify_mode {
3127 + };
3128 +
3129 + /* data types passed between host and device */
3130 ++
3131 ++/* The firmware on the original USB2CAN by Geschwister Schneider
3132 ++ * Technologie Entwicklungs- und Vertriebs UG exchanges all data
3133 ++ * between the host and the device in host byte order. This is done
3134 ++ * with the struct gs_host_config::byte_order member, which is sent
3135 ++ * first to indicate the desired byte order.
3136 ++ *
3137 ++ * The widely used open source firmware candleLight doesn't support
3138 ++ * this feature and exchanges the data in little endian byte order.
3139 ++ */
3140 + struct gs_host_config {
3141 +- u32 byte_order;
3142 ++ __le32 byte_order;
3143 + } __packed;
3144 +-/* All data exchanged between host and device is exchanged in host byte order,
3145 +- * thanks to the struct gs_host_config byte_order member, which is sent first
3146 +- * to indicate the desired byte order.
3147 +- */
3148 +
3149 + struct gs_device_config {
3150 + u8 reserved1;
3151 + u8 reserved2;
3152 + u8 reserved3;
3153 + u8 icount;
3154 +- u32 sw_version;
3155 +- u32 hw_version;
3156 ++ __le32 sw_version;
3157 ++ __le32 hw_version;
3158 + } __packed;
3159 +
3160 + #define GS_CAN_MODE_NORMAL 0
3161 +@@ -87,26 +93,26 @@ struct gs_device_config {
3162 + #define GS_CAN_MODE_ONE_SHOT BIT(3)
3163 +
3164 + struct gs_device_mode {
3165 +- u32 mode;
3166 +- u32 flags;
3167 ++ __le32 mode;
3168 ++ __le32 flags;
3169 + } __packed;
3170 +
3171 + struct gs_device_state {
3172 +- u32 state;
3173 +- u32 rxerr;
3174 +- u32 txerr;
3175 ++ __le32 state;
3176 ++ __le32 rxerr;
3177 ++ __le32 txerr;
3178 + } __packed;
3179 +
3180 + struct gs_device_bittiming {
3181 +- u32 prop_seg;
3182 +- u32 phase_seg1;
3183 +- u32 phase_seg2;
3184 +- u32 sjw;
3185 +- u32 brp;
3186 ++ __le32 prop_seg;
3187 ++ __le32 phase_seg1;
3188 ++ __le32 phase_seg2;
3189 ++ __le32 sjw;
3190 ++ __le32 brp;
3191 + } __packed;
3192 +
3193 + struct gs_identify_mode {
3194 +- u32 mode;
3195 ++ __le32 mode;
3196 + } __packed;
3197 +
3198 + #define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
3199 +@@ -117,23 +123,23 @@ struct gs_identify_mode {
3200 + #define GS_CAN_FEATURE_IDENTIFY BIT(5)
3201 +
3202 + struct gs_device_bt_const {
3203 +- u32 feature;
3204 +- u32 fclk_can;
3205 +- u32 tseg1_min;
3206 +- u32 tseg1_max;
3207 +- u32 tseg2_min;
3208 +- u32 tseg2_max;
3209 +- u32 sjw_max;
3210 +- u32 brp_min;
3211 +- u32 brp_max;
3212 +- u32 brp_inc;
3213 ++ __le32 feature;
3214 ++ __le32 fclk_can;
3215 ++ __le32 tseg1_min;
3216 ++ __le32 tseg1_max;
3217 ++ __le32 tseg2_min;
3218 ++ __le32 tseg2_max;
3219 ++ __le32 sjw_max;
3220 ++ __le32 brp_min;
3221 ++ __le32 brp_max;
3222 ++ __le32 brp_inc;
3223 + } __packed;
3224 +
3225 + #define GS_CAN_FLAG_OVERFLOW 1
3226 +
3227 + struct gs_host_frame {
3228 + u32 echo_id;
3229 +- u32 can_id;
3230 ++ __le32 can_id;
3231 +
3232 + u8 can_dlc;
3233 + u8 channel;
3234 +@@ -329,13 +335,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
3235 + if (!skb)
3236 + return;
3237 +
3238 +- cf->can_id = hf->can_id;
3239 ++ cf->can_id = le32_to_cpu(hf->can_id);
3240 +
3241 + cf->can_dlc = get_can_dlc(hf->can_dlc);
3242 + memcpy(cf->data, hf->data, 8);
3243 +
3244 + /* ERROR frames tell us information about the controller */
3245 +- if (hf->can_id & CAN_ERR_FLAG)
3246 ++ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
3247 + gs_update_state(dev, cf);
3248 +
3249 + netdev->stats.rx_packets++;
3250 +@@ -418,11 +424,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
3251 + if (!dbt)
3252 + return -ENOMEM;
3253 +
3254 +- dbt->prop_seg = bt->prop_seg;
3255 +- dbt->phase_seg1 = bt->phase_seg1;
3256 +- dbt->phase_seg2 = bt->phase_seg2;
3257 +- dbt->sjw = bt->sjw;
3258 +- dbt->brp = bt->brp;
3259 ++ dbt->prop_seg = cpu_to_le32(bt->prop_seg);
3260 ++ dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
3261 ++ dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
3262 ++ dbt->sjw = cpu_to_le32(bt->sjw);
3263 ++ dbt->brp = cpu_to_le32(bt->brp);
3264 +
3265 + /* request bit timings */
3266 + rc = usb_control_msg(interface_to_usbdev(intf),
3267 +@@ -503,7 +509,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
3268 +
3269 + cf = (struct can_frame *)skb->data;
3270 +
3271 +- hf->can_id = cf->can_id;
3272 ++ hf->can_id = cpu_to_le32(cf->can_id);
3273 + hf->can_dlc = cf->can_dlc;
3274 + memcpy(hf->data, cf->data, cf->can_dlc);
3275 +
3276 +@@ -573,6 +579,7 @@ static int gs_can_open(struct net_device *netdev)
3277 + int rc, i;
3278 + struct gs_device_mode *dm;
3279 + u32 ctrlmode;
3280 ++ u32 flags = 0;
3281 +
3282 + rc = open_candev(netdev);
3283 + if (rc)
3284 +@@ -640,24 +647,24 @@ static int gs_can_open(struct net_device *netdev)
3285 +
3286 + /* flags */
3287 + ctrlmode = dev->can.ctrlmode;
3288 +- dm->flags = 0;
3289 +
3290 + if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
3291 +- dm->flags |= GS_CAN_MODE_LOOP_BACK;
3292 ++ flags |= GS_CAN_MODE_LOOP_BACK;
3293 + else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
3294 +- dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
3295 ++ flags |= GS_CAN_MODE_LISTEN_ONLY;
3296 +
3297 + /* Controller is not allowed to retry TX
3298 + * this mode is unavailable on atmels uc3c hardware
3299 + */
3300 + if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
3301 +- dm->flags |= GS_CAN_MODE_ONE_SHOT;
3302 ++ flags |= GS_CAN_MODE_ONE_SHOT;
3303 +
3304 + if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
3305 +- dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
3306 ++ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
3307 +
3308 + /* finally start device */
3309 +- dm->mode = GS_CAN_MODE_START;
3310 ++ dm->mode = cpu_to_le32(GS_CAN_MODE_START);
3311 ++ dm->flags = cpu_to_le32(flags);
3312 + rc = usb_control_msg(interface_to_usbdev(dev->iface),
3313 + usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
3314 + GS_USB_BREQ_MODE,
3315 +@@ -737,9 +744,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
3316 + return -ENOMEM;
3317 +
3318 + if (do_identify)
3319 +- imode->mode = GS_CAN_IDENTIFY_ON;
3320 ++ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
3321 + else
3322 +- imode->mode = GS_CAN_IDENTIFY_OFF;
3323 ++ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
3324 +
3325 + rc = usb_control_msg(interface_to_usbdev(dev->iface),
3326 + usb_sndctrlpipe(interface_to_usbdev(dev->iface),
3327 +@@ -790,6 +797,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
3328 + struct net_device *netdev;
3329 + int rc;
3330 + struct gs_device_bt_const *bt_const;
3331 ++ u32 feature;
3332 +
3333 + bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
3334 + if (!bt_const)
3335 +@@ -830,14 +838,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
3336 +
3337 + /* dev settup */
3338 + strcpy(dev->bt_const.name, "gs_usb");
3339 +- dev->bt_const.tseg1_min = bt_const->tseg1_min;
3340 +- dev->bt_const.tseg1_max = bt_const->tseg1_max;
3341 +- dev->bt_const.tseg2_min = bt_const->tseg2_min;
3342 +- dev->bt_const.tseg2_max = bt_const->tseg2_max;
3343 +- dev->bt_const.sjw_max = bt_const->sjw_max;
3344 +- dev->bt_const.brp_min = bt_const->brp_min;
3345 +- dev->bt_const.brp_max = bt_const->brp_max;
3346 +- dev->bt_const.brp_inc = bt_const->brp_inc;
3347 ++ dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
3348 ++ dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
3349 ++ dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
3350 ++ dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
3351 ++ dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
3352 ++ dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
3353 ++ dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
3354 ++ dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
3355 +
3356 + dev->udev = interface_to_usbdev(intf);
3357 + dev->iface = intf;
3358 +@@ -854,28 +862,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
3359 +
3360 + /* can settup */
3361 + dev->can.state = CAN_STATE_STOPPED;
3362 +- dev->can.clock.freq = bt_const->fclk_can;
3363 ++ dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
3364 + dev->can.bittiming_const = &dev->bt_const;
3365 + dev->can.do_set_bittiming = gs_usb_set_bittiming;
3366 +
3367 + dev->can.ctrlmode_supported = 0;
3368 +
3369 +- if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
3370 ++ feature = le32_to_cpu(bt_const->feature);
3371 ++ if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
3372 + dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
3373 +
3374 +- if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
3375 ++ if (feature & GS_CAN_FEATURE_LOOP_BACK)
3376 + dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
3377 +
3378 +- if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
3379 ++ if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
3380 + dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
3381 +
3382 +- if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
3383 ++ if (feature & GS_CAN_FEATURE_ONE_SHOT)
3384 + dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
3385 +
3386 + SET_NETDEV_DEV(netdev, &intf->dev);
3387 +
3388 +- if (dconf->sw_version > 1)
3389 +- if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
3390 ++ if (le32_to_cpu(dconf->sw_version) > 1)
3391 ++ if (feature & GS_CAN_FEATURE_IDENTIFY)
3392 + netdev->ethtool_ops = &gs_usb_ethtool_ops;
3393 +
3394 + kfree(bt_const);
3395 +@@ -910,7 +919,7 @@ static int gs_usb_probe(struct usb_interface *intf,
3396 + if (!hconf)
3397 + return -ENOMEM;
3398 +
3399 +- hconf->byte_order = 0x0000beef;
3400 ++ hconf->byte_order = cpu_to_le32(0x0000beef);
3401 +
3402 + /* send host config */
3403 + rc = usb_control_msg(interface_to_usbdev(intf),
3404 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
3405 +index f0dbc05e30a4d..16040b13579ef 100644
3406 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
3407 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
3408 +@@ -2299,6 +2299,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
3409 + usleep_range(10000, 20000);
3410 + gpiod_set_value_cansleep(gpiod, 0);
3411 + usleep_range(10000, 20000);
3412 ++
3413 ++ mv88e6xxx_g1_wait_eeprom_done(chip);
3414 + }
3415 + }
3416 +
3417 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
3418 +index f62aa83ca08d4..33d443a37efc4 100644
3419 +--- a/drivers/net/dsa/mv88e6xxx/global1.c
3420 ++++ b/drivers/net/dsa/mv88e6xxx/global1.c
3421 +@@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
3422 + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
3423 + }
3424 +
3425 ++void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
3426 ++{
3427 ++ const unsigned long timeout = jiffies + 1 * HZ;
3428 ++ u16 val;
3429 ++ int err;
3430 ++
3431 ++ /* Wait up to 1 second for the switch to finish reading the
3432 ++ * EEPROM.
3433 ++ */
3434 ++ while (time_before(jiffies, timeout)) {
3435 ++ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
3436 ++ if (err) {
3437 ++ dev_err(chip->dev, "Error reading status");
3438 ++ return;
3439 ++ }
3440 ++
3441 ++ /* If the switch is still resetting, it may not
3442 ++ * respond on the bus, and so MDIO read returns
3443 ++ * 0xffff. Differentiate between that, and waiting for
3444 ++ * the EEPROM to be done by bit 0 being set.
3445 ++ */
3446 ++ if (val != 0xffff &&
3447 ++ val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
3448 ++ return;
3449 ++
3450 ++ usleep_range(1000, 2000);
3451 ++ }
3452 ++
3453 ++ dev_err(chip->dev, "Timeout waiting for EEPROM done");
3454 ++}
3455 ++
3456 + /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
3457 + * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
3458 + * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
3459 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
3460 +index 1e3546f8b0727..e05abe61fa114 100644
3461 +--- a/drivers/net/dsa/mv88e6xxx/global1.h
3462 ++++ b/drivers/net/dsa/mv88e6xxx/global1.h
3463 +@@ -278,6 +278,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
3464 + int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
3465 + int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
3466 + int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
3467 ++void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
3468 +
3469 + int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
3470 + int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
3471 +diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
3472 +index ccd4405895651..336f115e8091f 100644
3473 +--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
3474 ++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
3475 +@@ -538,6 +538,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
3476 + {
3477 + struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
3478 + struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
3479 ++ u16 q_depth = io_cq->q_depth;
3480 + u16 cdesc_idx = 0;
3481 + u16 nb_hw_desc;
3482 + u16 i = 0;
3483 +@@ -565,6 +566,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
3484 + do {
3485 + ena_buf[i].len = cdesc->length;
3486 + ena_buf[i].req_id = cdesc->req_id;
3487 ++ if (unlikely(ena_buf[i].req_id >= q_depth))
3488 ++ return -EIO;
3489 +
3490 + if (++i >= nb_hw_desc)
3491 + break;
3492 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
3493 +index a3a8edf9a734d..36134fc3e9197 100644
3494 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
3495 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
3496 +@@ -801,24 +801,6 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
3497 + adapter->num_io_queues);
3498 + }
3499 +
3500 +-static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
3501 +-{
3502 +- if (likely(req_id < rx_ring->ring_size))
3503 +- return 0;
3504 +-
3505 +- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
3506 +- "Invalid rx req_id: %hu\n", req_id);
3507 +-
3508 +- u64_stats_update_begin(&rx_ring->syncp);
3509 +- rx_ring->rx_stats.bad_req_id++;
3510 +- u64_stats_update_end(&rx_ring->syncp);
3511 +-
3512 +- /* Trigger device reset */
3513 +- rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
3514 +- set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
3515 +- return -EFAULT;
3516 +-}
3517 +-
3518 + /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
3519 + * @adapter: network interface device structure
3520 + * @qid: queue index
3521 +@@ -938,10 +920,14 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
3522 + static int ena_alloc_rx_page(struct ena_ring *rx_ring,
3523 + struct ena_rx_buffer *rx_info, gfp_t gfp)
3524 + {
3525 ++ int headroom = rx_ring->rx_headroom;
3526 + struct ena_com_buf *ena_buf;
3527 + struct page *page;
3528 + dma_addr_t dma;
3529 +
3530 ++ /* restore page offset value in case it has been changed by device */
3531 ++ rx_info->page_offset = headroom;
3532 ++
3533 + /* if previous allocated page is not used */
3534 + if (unlikely(rx_info->page))
3535 + return 0;
3536 +@@ -971,10 +957,9 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
3537 + "alloc page %p, rx_info %p\n", page, rx_info);
3538 +
3539 + rx_info->page = page;
3540 +- rx_info->page_offset = 0;
3541 + ena_buf = &rx_info->ena_buf;
3542 +- ena_buf->paddr = dma + rx_ring->rx_headroom;
3543 +- ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
3544 ++ ena_buf->paddr = dma + headroom;
3545 ++ ena_buf->len = ENA_PAGE_SIZE - headroom;
3546 +
3547 + return 0;
3548 + }
3549 +@@ -1368,15 +1353,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
3550 + struct ena_rx_buffer *rx_info;
3551 + u16 len, req_id, buf = 0;
3552 + void *va;
3553 +- int rc;
3554 +
3555 + len = ena_bufs[buf].len;
3556 + req_id = ena_bufs[buf].req_id;
3557 +
3558 +- rc = validate_rx_req_id(rx_ring, req_id);
3559 +- if (unlikely(rc < 0))
3560 +- return NULL;
3561 +-
3562 + rx_info = &rx_ring->rx_buffer_info[req_id];
3563 +
3564 + if (unlikely(!rx_info->page)) {
3565 +@@ -1391,7 +1371,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
3566 +
3567 + /* save virt address of first buffer */
3568 + va = page_address(rx_info->page) + rx_info->page_offset;
3569 +- prefetch(va + NET_IP_ALIGN);
3570 ++
3571 ++ prefetch(va);
3572 +
3573 + if (len <= rx_ring->rx_copybreak) {
3574 + skb = ena_alloc_skb(rx_ring, false);
3575 +@@ -1432,8 +1413,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
3576 +
3577 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
3578 + rx_info->page_offset, len, ENA_PAGE_SIZE);
3579 +- /* The offset is non zero only for the first buffer */
3580 +- rx_info->page_offset = 0;
3581 +
3582 + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
3583 + "rx skb updated. len %d. data_len %d\n",
3584 +@@ -1452,10 +1431,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
3585 + len = ena_bufs[buf].len;
3586 + req_id = ena_bufs[buf].req_id;
3587 +
3588 +- rc = validate_rx_req_id(rx_ring, req_id);
3589 +- if (unlikely(rc < 0))
3590 +- return NULL;
3591 +-
3592 + rx_info = &rx_ring->rx_buffer_info[req_id];
3593 + } while (1);
3594 +
3595 +@@ -1556,8 +1531,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
3596 + int ret;
3597 +
3598 + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
3599 +- xdp->data = page_address(rx_info->page) +
3600 +- rx_info->page_offset + rx_ring->rx_headroom;
3601 ++ xdp->data = page_address(rx_info->page) + rx_info->page_offset;
3602 + xdp_set_data_meta_invalid(xdp);
3603 + xdp->data_hard_start = page_address(rx_info->page);
3604 + xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
3605 +@@ -1624,8 +1598,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
3606 + if (unlikely(ena_rx_ctx.descs == 0))
3607 + break;
3608 +
3609 ++ /* First descriptor might have an offset set by the device */
3610 + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
3611 +- rx_info->page_offset = ena_rx_ctx.pkt_offset;
3612 ++ rx_info->page_offset += ena_rx_ctx.pkt_offset;
3613 +
3614 + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
3615 + "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
3616 +@@ -1704,12 +1679,18 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
3617 + error:
3618 + adapter = netdev_priv(rx_ring->netdev);
3619 +
3620 +- u64_stats_update_begin(&rx_ring->syncp);
3621 +- rx_ring->rx_stats.bad_desc_num++;
3622 +- u64_stats_update_end(&rx_ring->syncp);
3623 ++ if (rc == -ENOSPC) {
3624 ++ u64_stats_update_begin(&rx_ring->syncp);
3625 ++ rx_ring->rx_stats.bad_desc_num++;
3626 ++ u64_stats_update_end(&rx_ring->syncp);
3627 ++ adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
3628 ++ } else {
3629 ++ u64_stats_update_begin(&rx_ring->syncp);
3630 ++ rx_ring->rx_stats.bad_req_id++;
3631 ++ u64_stats_update_end(&rx_ring->syncp);
3632 ++ adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
3633 ++ }
3634 +
3635 +- /* Too many desc from the device. Trigger reset */
3636 +- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
3637 + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3638 +
3639 + return 0;
3640 +@@ -3378,16 +3359,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3641 + goto err_mmio_read_less;
3642 + }
3643 +
3644 +- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3645 ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3646 + if (rc) {
3647 +- dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3648 +- goto err_mmio_read_less;
3649 +- }
3650 +-
3651 +- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3652 +- if (rc) {
3653 +- dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3654 +- rc);
3655 ++ dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3656 + goto err_mmio_read_less;
3657 + }
3658 +
3659 +@@ -4157,6 +4131,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3660 + return rc;
3661 + }
3662 +
3663 ++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
3664 ++ if (rc) {
3665 ++ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
3666 ++ goto err_disable_device;
3667 ++ }
3668 ++
3669 + pci_set_master(pdev);
3670 +
3671 + ena_dev = vzalloc(sizeof(*ena_dev));
3672 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
3673 +index 4f913658eea46..24122ccda614c 100644
3674 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
3675 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
3676 +@@ -413,85 +413,63 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
3677 + buff->rxdata.pg_off,
3678 + buff->len, DMA_FROM_DEVICE);
3679 +
3680 +- /* for single fragment packets use build_skb() */
3681 +- if (buff->is_eop &&
3682 +- buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
3683 +- skb = build_skb(aq_buf_vaddr(&buff->rxdata),
3684 ++ skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
3685 ++ if (unlikely(!skb)) {
3686 ++ u64_stats_update_begin(&self->stats.rx.syncp);
3687 ++ self->stats.rx.skb_alloc_fails++;
3688 ++ u64_stats_update_end(&self->stats.rx.syncp);
3689 ++ err = -ENOMEM;
3690 ++ goto err_exit;
3691 ++ }
3692 ++ if (is_ptp_ring)
3693 ++ buff->len -=
3694 ++ aq_ptp_extract_ts(self->aq_nic, skb,
3695 ++ aq_buf_vaddr(&buff->rxdata),
3696 ++ buff->len);
3697 ++
3698 ++ hdr_len = buff->len;
3699 ++ if (hdr_len > AQ_CFG_RX_HDR_SIZE)
3700 ++ hdr_len = eth_get_headlen(skb->dev,
3701 ++ aq_buf_vaddr(&buff->rxdata),
3702 ++ AQ_CFG_RX_HDR_SIZE);
3703 ++
3704 ++ memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
3705 ++ ALIGN(hdr_len, sizeof(long)));
3706 ++
3707 ++ if (buff->len - hdr_len > 0) {
3708 ++ skb_add_rx_frag(skb, 0, buff->rxdata.page,
3709 ++ buff->rxdata.pg_off + hdr_len,
3710 ++ buff->len - hdr_len,
3711 + AQ_CFG_RX_FRAME_MAX);
3712 +- if (unlikely(!skb)) {
3713 +- u64_stats_update_begin(&self->stats.rx.syncp);
3714 +- self->stats.rx.skb_alloc_fails++;
3715 +- u64_stats_update_end(&self->stats.rx.syncp);
3716 +- err = -ENOMEM;
3717 +- goto err_exit;
3718 +- }
3719 +- if (is_ptp_ring)
3720 +- buff->len -=
3721 +- aq_ptp_extract_ts(self->aq_nic, skb,
3722 +- aq_buf_vaddr(&buff->rxdata),
3723 +- buff->len);
3724 +- skb_put(skb, buff->len);
3725 + page_ref_inc(buff->rxdata.page);
3726 +- } else {
3727 +- skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
3728 +- if (unlikely(!skb)) {
3729 +- u64_stats_update_begin(&self->stats.rx.syncp);
3730 +- self->stats.rx.skb_alloc_fails++;
3731 +- u64_stats_update_end(&self->stats.rx.syncp);
3732 +- err = -ENOMEM;
3733 +- goto err_exit;
3734 +- }
3735 +- if (is_ptp_ring)
3736 +- buff->len -=
3737 +- aq_ptp_extract_ts(self->aq_nic, skb,
3738 +- aq_buf_vaddr(&buff->rxdata),
3739 +- buff->len);
3740 +-
3741 +- hdr_len = buff->len;
3742 +- if (hdr_len > AQ_CFG_RX_HDR_SIZE)
3743 +- hdr_len = eth_get_headlen(skb->dev,
3744 +- aq_buf_vaddr(&buff->rxdata),
3745 +- AQ_CFG_RX_HDR_SIZE);
3746 +-
3747 +- memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
3748 +- ALIGN(hdr_len, sizeof(long)));
3749 +-
3750 +- if (buff->len - hdr_len > 0) {
3751 +- skb_add_rx_frag(skb, 0, buff->rxdata.page,
3752 +- buff->rxdata.pg_off + hdr_len,
3753 +- buff->len - hdr_len,
3754 +- AQ_CFG_RX_FRAME_MAX);
3755 +- page_ref_inc(buff->rxdata.page);
3756 +- }
3757 ++ }
3758 +
3759 +- if (!buff->is_eop) {
3760 +- buff_ = buff;
3761 +- i = 1U;
3762 +- do {
3763 +- next_ = buff_->next,
3764 +- buff_ = &self->buff_ring[next_];
3765 ++ if (!buff->is_eop) {
3766 ++ buff_ = buff;
3767 ++ i = 1U;
3768 ++ do {
3769 ++ next_ = buff_->next;
3770 ++ buff_ = &self->buff_ring[next_];
3771 +
3772 +- dma_sync_single_range_for_cpu(
3773 +- aq_nic_get_dev(self->aq_nic),
3774 +- buff_->rxdata.daddr,
3775 +- buff_->rxdata.pg_off,
3776 +- buff_->len,
3777 +- DMA_FROM_DEVICE);
3778 +- skb_add_rx_frag(skb, i++,
3779 +- buff_->rxdata.page,
3780 +- buff_->rxdata.pg_off,
3781 +- buff_->len,
3782 +- AQ_CFG_RX_FRAME_MAX);
3783 +- page_ref_inc(buff_->rxdata.page);
3784 +- buff_->is_cleaned = 1;
3785 +-
3786 +- buff->is_ip_cso &= buff_->is_ip_cso;
3787 +- buff->is_udp_cso &= buff_->is_udp_cso;
3788 +- buff->is_tcp_cso &= buff_->is_tcp_cso;
3789 +- buff->is_cso_err |= buff_->is_cso_err;
3790 ++ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
3791 ++ buff_->rxdata.daddr,
3792 ++ buff_->rxdata.pg_off,
3793 ++ buff_->len,
3794 ++ DMA_FROM_DEVICE);
3795 ++ skb_add_rx_frag(skb, i++,
3796 ++ buff_->rxdata.page,
3797 ++ buff_->rxdata.pg_off,
3798 ++ buff_->len,
3799 ++ AQ_CFG_RX_FRAME_MAX);
3800 ++ page_ref_inc(buff_->rxdata.page);
3801 ++ buff_->is_cleaned = 1;
3802 +
3803 +- } while (!buff_->is_eop);
3804 +- }
3805 ++ buff->is_ip_cso &= buff_->is_ip_cso;
3806 ++ buff->is_udp_cso &= buff_->is_udp_cso;
3807 ++ buff->is_tcp_cso &= buff_->is_tcp_cso;
3808 ++ buff->is_cso_err |= buff_->is_cso_err;
3809 ++
3810 ++ } while (!buff_->is_eop);
3811 + }
3812 +
3813 + if (buff->is_vlan)
3814 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3815 +index 2326571e8c84a..50efdcf681083 100644
3816 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3817 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3818 +@@ -11273,7 +11273,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
3819 + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
3820 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
3821 + dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3822 +- goto init_err_disable;
3823 ++ rc = -EIO;
3824 ++ goto init_err_release;
3825 + }
3826 +
3827 + pci_set_master(pdev);
3828 +@@ -12353,6 +12354,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3829 + create_singlethread_workqueue("bnxt_pf_wq");
3830 + if (!bnxt_pf_wq) {
3831 + dev_err(&pdev->dev, "Unable to create workqueue.\n");
3832 ++ rc = -ENOMEM;
3833 + goto init_err_pci_clean;
3834 + }
3835 + }
3836 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3837 +index 8eb976106d0c8..7e7537eabf000 100644
3838 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3839 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3840 +@@ -883,7 +883,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
3841 + FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
3842 + FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
3843 + FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
3844 +- fwr->smac_sel = f->smt->idx;
3845 ++ if (f->fs.newsmac)
3846 ++ fwr->smac_sel = f->smt->idx;
3847 + fwr->rx_chan_rx_rpl_iq =
3848 + htons(FW_FILTER_WR_RX_CHAN_V(0) |
3849 + FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
3850 +diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
3851 +index feea797cde022..70aabd2343371 100644
3852 +--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
3853 ++++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
3854 +@@ -3,6 +3,7 @@ config FSL_DPAA2_ETH
3855 + tristate "Freescale DPAA2 Ethernet"
3856 + depends on FSL_MC_BUS && FSL_MC_DPIO
3857 + select PHYLINK
3858 ++ select FSL_XGMAC_MDIO
3859 + help
3860 + This is the DPAA2 Ethernet driver supporting Freescale SoCs
3861 + with DPAA2 (DataPath Acceleration Architecture v2).
3862 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
3863 +index 1c4a535890dac..9a91e3568adbf 100644
3864 +--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
3865 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
3866 +@@ -95,18 +95,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
3867 + gcl_config->atc = 0xff;
3868 + gcl_config->acl_len = cpu_to_le16(gcl_len);
3869 +
3870 +- if (!admin_conf->base_time) {
3871 +- gcl_data->btl =
3872 +- cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
3873 +- gcl_data->bth =
3874 +- cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
3875 +- } else {
3876 +- gcl_data->btl =
3877 +- cpu_to_le32(lower_32_bits(admin_conf->base_time));
3878 +- gcl_data->bth =
3879 +- cpu_to_le32(upper_32_bits(admin_conf->base_time));
3880 +- }
3881 +-
3882 ++ gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
3883 ++ gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
3884 + gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
3885 + gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
3886 +
3887 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
3888 +index c6ee42278fdcf..81ec233926acb 100644
3889 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
3890 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
3891 +@@ -2087,8 +2087,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
3892 + for (i = 0; i < adapter->req_rx_queues; i++)
3893 + napi_schedule(&adapter->napi[i]);
3894 +
3895 +- if (adapter->reset_reason != VNIC_RESET_FAILOVER)
3896 ++ if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
3897 ++ adapter->reset_reason == VNIC_RESET_MOBILITY) {
3898 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
3899 ++ call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
3900 ++ }
3901 +
3902 + rc = 0;
3903 +
3904 +@@ -2158,6 +2161,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
3905 + if (rc)
3906 + return IBMVNIC_OPEN_FAILED;
3907 +
3908 ++ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
3909 ++ call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
3910 ++
3911 + return 0;
3912 + }
3913 +
3914 +@@ -2222,7 +2228,6 @@ static void __ibmvnic_reset(struct work_struct *work)
3915 +
3916 + if (!saved_state) {
3917 + reset_state = adapter->state;
3918 +- adapter->state = VNIC_RESETTING;
3919 + saved_state = true;
3920 + }
3921 + spin_unlock_irqrestore(&adapter->state_lock, flags);
3922 +@@ -2881,6 +2886,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3923 + {
3924 + int i, rc;
3925 +
3926 ++ if (!adapter->tx_scrq || !adapter->rx_scrq)
3927 ++ return -EINVAL;
3928 ++
3929 + for (i = 0; i < adapter->req_tx_queues; i++) {
3930 + netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3931 + rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3932 +@@ -4910,6 +4918,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3933 + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3934 +
3935 + /* Clean out the queue */
3936 ++ if (!crq->msgs)
3937 ++ return -EINVAL;
3938 ++
3939 + memset(crq->msgs, 0, PAGE_SIZE);
3940 + crq->cur = 0;
3941 + crq->active = false;
3942 +@@ -5249,7 +5260,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
3943 + unsigned long flags;
3944 +
3945 + spin_lock_irqsave(&adapter->state_lock, flags);
3946 +- if (adapter->state == VNIC_RESETTING) {
3947 ++ if (test_bit(0, &adapter->resetting)) {
3948 + spin_unlock_irqrestore(&adapter->state_lock, flags);
3949 + return -EBUSY;
3950 + }
3951 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
3952 +index 43feb96b0a68a..31d604fc7bde7 100644
3953 +--- a/drivers/net/ethernet/ibm/ibmvnic.h
3954 ++++ b/drivers/net/ethernet/ibm/ibmvnic.h
3955 +@@ -941,8 +941,7 @@ enum vnic_state {VNIC_PROBING = 1,
3956 + VNIC_CLOSING,
3957 + VNIC_CLOSED,
3958 + VNIC_REMOVING,
3959 +- VNIC_REMOVED,
3960 +- VNIC_RESETTING};
3961 ++ VNIC_REMOVED};
3962 +
3963 + enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
3964 + VNIC_RESET_MOBILITY,
3965 +diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
3966 +index a7e212d1caa22..6c1290137cbba 100644
3967 +--- a/drivers/net/ethernet/intel/i40e/i40e.h
3968 ++++ b/drivers/net/ethernet/intel/i40e/i40e.h
3969 +@@ -140,6 +140,7 @@ enum i40e_state_t {
3970 + __I40E_CLIENT_RESET,
3971 + __I40E_VIRTCHNL_OP_PENDING,
3972 + __I40E_RECOVERY_MODE,
3973 ++ __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
3974 + /* This must be last as it determines the size of the BITMAP */
3975 + __I40E_STATE_SIZE__,
3976 + };
3977 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
3978 +index 2e433fdbf2c36..da80dccad1dd3 100644
3979 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
3980 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
3981 +@@ -4006,8 +4006,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
3982 + }
3983 +
3984 + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3985 +- ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3986 +- set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3987 ++ /* disable any further VFLR event notifications */
3988 ++ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
3989 ++ u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3990 ++
3991 ++ reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
3992 ++ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3993 ++ } else {
3994 ++ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3995 ++ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3996 ++ }
3997 + }
3998 +
3999 + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4000 +@@ -15466,6 +15474,11 @@ static void i40e_remove(struct pci_dev *pdev)
4001 + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4002 + usleep_range(1000, 2000);
4003 +
4004 ++ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
4005 ++ set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
4006 ++ i40e_free_vfs(pf);
4007 ++ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
4008 ++ }
4009 + /* no more scheduling of any task */
4010 + set_bit(__I40E_SUSPENDED, pf->state);
4011 + set_bit(__I40E_DOWN, pf->state);
4012 +@@ -15492,11 +15505,6 @@ static void i40e_remove(struct pci_dev *pdev)
4013 + */
4014 + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
4015 +
4016 +- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
4017 +- i40e_free_vfs(pf);
4018 +- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
4019 +- }
4020 +-
4021 + i40e_fdir_teardown(pf);
4022 +
4023 + /* If there is a switch structure or any orphans, remove them.
4024 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4025 +index 343177d71f70a..0d76b8c79f4da 100644
4026 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4027 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4028 +@@ -1403,7 +1403,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
4029 + * @vf: pointer to the VF structure
4030 + * @flr: VFLR was issued or not
4031 + *
4032 +- * Returns true if the VF is reset, false otherwise.
4033 ++ * Returns true if the VF is in reset, resets successfully, or resets
4034 ++ * are disabled and false otherwise.
4035 + **/
4036 + bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
4037 + {
4038 +@@ -1413,11 +1414,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
4039 + u32 reg;
4040 + int i;
4041 +
4042 ++ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
4043 ++ return true;
4044 ++
4045 + /* If the VFs have been disabled, this means something else is
4046 + * resetting the VF, so we shouldn't continue.
4047 + */
4048 + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
4049 +- return false;
4050 ++ return true;
4051 +
4052 + i40e_trigger_vf_reset(vf, flr);
4053 +
4054 +@@ -1581,6 +1585,15 @@ void i40e_free_vfs(struct i40e_pf *pf)
4055 +
4056 + i40e_notify_client_of_vf_enable(pf, 0);
4057 +
4058 ++ /* Disable IOV before freeing resources. This lets any VF drivers
4059 ++ * running in the host get themselves cleaned up before we yank
4060 ++ * the carpet out from underneath their feet.
4061 ++ */
4062 ++ if (!pci_vfs_assigned(pf->pdev))
4063 ++ pci_disable_sriov(pf->pdev);
4064 ++ else
4065 ++ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
4066 ++
4067 + /* Amortize wait time by stopping all VFs at the same time */
4068 + for (i = 0; i < pf->num_alloc_vfs; i++) {
4069 + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
4070 +@@ -1596,15 +1609,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
4071 + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
4072 + }
4073 +
4074 +- /* Disable IOV before freeing resources. This lets any VF drivers
4075 +- * running in the host get themselves cleaned up before we yank
4076 +- * the carpet out from underneath their feet.
4077 +- */
4078 +- if (!pci_vfs_assigned(pf->pdev))
4079 +- pci_disable_sriov(pf->pdev);
4080 +- else
4081 +- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
4082 +-
4083 + /* free up VF resources */
4084 + tmp = pf->num_alloc_vfs;
4085 + pf->num_alloc_vfs = 0;
4086 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
4087 +index ecd834e0e1216..72a5408a44d61 100644
4088 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
4089 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
4090 +@@ -1160,7 +1160,6 @@ const struct stmmac_ops dwmac4_ops = {
4091 + .pcs_get_adv_lp = dwmac4_get_adv_lp,
4092 + .debug = dwmac4_debug,
4093 + .set_filter = dwmac4_set_filter,
4094 +- .flex_pps_config = dwmac5_flex_pps_config,
4095 + .set_mac_loopback = dwmac4_set_mac_loopback,
4096 + .update_vlan_hash = dwmac4_update_vlan_hash,
4097 + .sarc_configure = dwmac4_sarc_configure,
4098 +@@ -1202,6 +1201,7 @@ const struct stmmac_ops dwmac410_ops = {
4099 + .pcs_get_adv_lp = dwmac4_get_adv_lp,
4100 + .debug = dwmac4_debug,
4101 + .set_filter = dwmac4_set_filter,
4102 ++ .flex_pps_config = dwmac5_flex_pps_config,
4103 + .set_mac_loopback = dwmac4_set_mac_loopback,
4104 + .update_vlan_hash = dwmac4_update_vlan_hash,
4105 + .sarc_configure = dwmac4_sarc_configure,
4106 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
4107 +index cb87d31a99dfb..57a53a600aa55 100644
4108 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
4109 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
4110 +@@ -23,7 +23,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
4111 +
4112 + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
4113 + !(value & DMA_BUS_MODE_SFT_RESET),
4114 +- 10000, 100000);
4115 ++ 10000, 200000);
4116 + }
4117 +
4118 + /* CSR1 enables the transmit DMA to check for new descriptor */
4119 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
4120 +index a731f28e101a6..53b438d709dbe 100644
4121 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
4122 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
4123 +@@ -8,7 +8,7 @@
4124 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
4125 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
4126 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
4127 +- * Copyright(c) 2018 - 2019 Intel Corporation
4128 ++ * Copyright(c) 2018 - 2020 Intel Corporation
4129 + *
4130 + * This program is free software; you can redistribute it and/or modify
4131 + * it under the terms of version 2 of the GNU General Public License as
4132 +@@ -31,7 +31,7 @@
4133 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
4134 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
4135 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
4136 +- * Copyright(c) 2018 - 2019 Intel Corporation
4137 ++ * Copyright(c) 2018 - 2020 Intel Corporation
4138 + * All rights reserved.
4139 + *
4140 + * Redistribution and use in source and binary forms, with or without
4141 +@@ -421,12 +421,14 @@ struct iwl_hs20_roc_res {
4142 + * able to run the GO Negotiation. Will not be fragmented and not
4143 + * repetitive. Valid only on the P2P Device MAC. Only the duration will
4144 + * be taken into account.
4145 ++ * @SESSION_PROTECT_CONF_MAX_ID: not used
4146 + */
4147 + enum iwl_mvm_session_prot_conf_id {
4148 + SESSION_PROTECT_CONF_ASSOC,
4149 + SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
4150 + SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
4151 + SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
4152 ++ SESSION_PROTECT_CONF_MAX_ID,
4153 + }; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
4154 +
4155 + /**
4156 +@@ -459,7 +461,7 @@ struct iwl_mvm_session_prot_cmd {
4157 + * @mac_id: the mac id for which the session protection started / ended
4158 + * @status: 1 means success, 0 means failure
4159 + * @start: 1 means the session protection started, 0 means it ended
4160 +- * @conf_id: the configuration id of the session that started / eneded
4161 ++ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
4162 + *
4163 + * Note that any session protection will always get two notifications: start
4164 + * and end even the firmware could not schedule it.
4165 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4166 +index c918c0887ed01..34362dc0d4612 100644
4167 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4168 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4169 +@@ -3104,6 +3104,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
4170 + goto out_unlock;
4171 + }
4172 +
4173 ++ if (vif->type == NL80211_IFTYPE_STATION)
4174 ++ vif->bss_conf.he_support = sta->he_cap.has_he;
4175 ++
4176 + if (sta->tdls &&
4177 + (vif->p2p ||
4178 + iwl_mvm_tdls_sta_count(mvm, NULL) ==
4179 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
4180 +index 1babc4bb5194b..6ca45e89a820c 100644
4181 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
4182 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
4183 +@@ -638,11 +638,32 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
4184 + }
4185 + }
4186 +
4187 ++static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
4188 ++ struct iwl_mvm_vif *mvmvif)
4189 ++{
4190 ++ struct iwl_mvm_session_prot_cmd cmd = {
4191 ++ .id_and_color =
4192 ++ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
4193 ++ mvmvif->color)),
4194 ++ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
4195 ++ .conf_id = cpu_to_le32(mvmvif->time_event_data.id),
4196 ++ };
4197 ++ int ret;
4198 ++
4199 ++ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
4200 ++ MAC_CONF_GROUP, 0),
4201 ++ 0, sizeof(cmd), &cmd);
4202 ++ if (ret)
4203 ++ IWL_ERR(mvm,
4204 ++ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
4205 ++}
4206 ++
4207 + static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
4208 + struct iwl_mvm_time_event_data *te_data,
4209 + u32 *uid)
4210 + {
4211 + u32 id;
4212 ++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
4213 +
4214 + /*
4215 + * It is possible that by the time we got to this point the time
4216 +@@ -660,14 +681,29 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
4217 + iwl_mvm_te_clear_data(mvm, te_data);
4218 + spin_unlock_bh(&mvm->time_event_lock);
4219 +
4220 +- /*
4221 +- * It is possible that by the time we try to remove it, the time event
4222 +- * has already ended and removed. In such a case there is no need to
4223 +- * send a removal command.
4224 ++ /* When session protection is supported, the te_data->id field
4225 ++ * is reused to save session protection's configuration.
4226 + */
4227 +- if (id == TE_MAX) {
4228 +- IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
4229 ++ if (fw_has_capa(&mvm->fw->ucode_capa,
4230 ++ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
4231 ++ if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
4232 ++ /* Session protection is still ongoing. Cancel it */
4233 ++ iwl_mvm_cancel_session_protection(mvm, mvmvif);
4234 ++ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
4235 ++ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
4236 ++ iwl_mvm_roc_finished(mvm);
4237 ++ }
4238 ++ }
4239 + return false;
4240 ++ } else {
4241 ++ /* It is possible that by the time we try to remove it, the
4242 ++ * time event has already ended and removed. In such a case
4243 ++ * there is no need to send a removal command.
4244 ++ */
4245 ++ if (id == TE_MAX) {
4246 ++ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
4247 ++ return false;
4248 ++ }
4249 + }
4250 +
4251 + return true;
4252 +@@ -768,6 +804,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
4253 + struct iwl_rx_packet *pkt = rxb_addr(rxb);
4254 + struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
4255 + struct ieee80211_vif *vif;
4256 ++ struct iwl_mvm_vif *mvmvif;
4257 +
4258 + rcu_read_lock();
4259 + vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
4260 +@@ -776,9 +813,10 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
4261 + if (!vif)
4262 + goto out_unlock;
4263 +
4264 ++ mvmvif = iwl_mvm_vif_from_mac80211(vif);
4265 ++
4266 + /* The vif is not a P2P_DEVICE, maintain its time_event_data */
4267 + if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
4268 +- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4269 + struct iwl_mvm_time_event_data *te_data =
4270 + &mvmvif->time_event_data;
4271 +
4272 +@@ -813,10 +851,14 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
4273 +
4274 + if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
4275 + /* End TE, notify mac80211 */
4276 ++ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
4277 + ieee80211_remain_on_channel_expired(mvm->hw);
4278 + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
4279 + iwl_mvm_roc_finished(mvm);
4280 + } else if (le32_to_cpu(notif->start)) {
4281 ++ if (WARN_ON(mvmvif->time_event_data.id !=
4282 ++ le32_to_cpu(notif->conf_id)))
4283 ++ goto out_unlock;
4284 + set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
4285 + ieee80211_ready_on_channel(mvm->hw); /* Start TE */
4286 + }
4287 +@@ -842,20 +884,24 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
4288 +
4289 + lockdep_assert_held(&mvm->mutex);
4290 +
4291 ++ /* The time_event_data.id field is reused to save session
4292 ++ * protection's configuration.
4293 ++ */
4294 + switch (type) {
4295 + case IEEE80211_ROC_TYPE_NORMAL:
4296 +- cmd.conf_id =
4297 +- cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
4298 ++ mvmvif->time_event_data.id =
4299 ++ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
4300 + break;
4301 + case IEEE80211_ROC_TYPE_MGMT_TX:
4302 +- cmd.conf_id =
4303 +- cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
4304 ++ mvmvif->time_event_data.id =
4305 ++ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
4306 + break;
4307 + default:
4308 + WARN_ONCE(1, "Got an invalid ROC type\n");
4309 + return -EINVAL;
4310 + }
4311 +
4312 ++ cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
4313 + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
4314 + MAC_CONF_GROUP, 0),
4315 + 0, sizeof(cmd), &cmd);
4316 +@@ -957,25 +1003,6 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
4317 + __iwl_mvm_remove_time_event(mvm, te_data, &uid);
4318 + }
4319 +
4320 +-static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
4321 +- struct iwl_mvm_vif *mvmvif)
4322 +-{
4323 +- struct iwl_mvm_session_prot_cmd cmd = {
4324 +- .id_and_color =
4325 +- cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
4326 +- mvmvif->color)),
4327 +- .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
4328 +- };
4329 +- int ret;
4330 +-
4331 +- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
4332 +- MAC_CONF_GROUP, 0),
4333 +- 0, sizeof(cmd), &cmd);
4334 +- if (ret)
4335 +- IWL_ERR(mvm,
4336 +- "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
4337 +-}
4338 +-
4339 + void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4340 + {
4341 + struct iwl_mvm_vif *mvmvif;
4342 +@@ -985,10 +1012,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4343 + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
4344 + mvmvif = iwl_mvm_vif_from_mac80211(vif);
4345 +
4346 +- iwl_mvm_cancel_session_protection(mvm, mvmvif);
4347 +-
4348 +- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
4349 ++ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
4350 ++ iwl_mvm_cancel_session_protection(mvm, mvmvif);
4351 + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
4352 ++ } else {
4353 ++ iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
4354 ++ &mvmvif->time_event_data);
4355 ++ }
4356 +
4357 + iwl_mvm_roc_finished(mvm);
4358 +
4359 +@@ -1101,10 +1131,15 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
4360 + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
4361 + mvmvif->color)),
4362 + .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
4363 +- .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
4364 + .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
4365 + };
4366 +
4367 ++ /* The time_event_data.id field is reused to save session
4368 ++ * protection's configuration.
4369 ++ */
4370 ++ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC;
4371 ++ cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
4372 ++
4373 + lockdep_assert_held(&mvm->mutex);
4374 +
4375 + spin_lock_bh(&mvm->time_event_lock);
4376 +diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
4377 +index b4eb926d220ac..d7ecff0b1c662 100644
4378 +--- a/drivers/nfc/s3fwrn5/i2c.c
4379 ++++ b/drivers/nfc/s3fwrn5/i2c.c
4380 +@@ -26,8 +26,8 @@ struct s3fwrn5_i2c_phy {
4381 + struct i2c_client *i2c_dev;
4382 + struct nci_dev *ndev;
4383 +
4384 +- unsigned int gpio_en;
4385 +- unsigned int gpio_fw_wake;
4386 ++ int gpio_en;
4387 ++ int gpio_fw_wake;
4388 +
4389 + struct mutex mutex;
4390 +
4391 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
4392 +index a6af96aaa0eb7..3448f7ac209a0 100644
4393 +--- a/drivers/nvme/host/pci.c
4394 ++++ b/drivers/nvme/host/pci.c
4395 +@@ -292,9 +292,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
4396 + nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
4397 + }
4398 +
4399 ++static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
4400 ++{
4401 ++ if (!nvmeq->qid)
4402 ++ return;
4403 ++
4404 ++ nvmeq->dbbuf_sq_db = NULL;
4405 ++ nvmeq->dbbuf_cq_db = NULL;
4406 ++ nvmeq->dbbuf_sq_ei = NULL;
4407 ++ nvmeq->dbbuf_cq_ei = NULL;
4408 ++}
4409 ++
4410 + static void nvme_dbbuf_set(struct nvme_dev *dev)
4411 + {
4412 + struct nvme_command c;
4413 ++ unsigned int i;
4414 +
4415 + if (!dev->dbbuf_dbs)
4416 + return;
4417 +@@ -308,6 +320,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
4418 + dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
4419 + /* Free memory and continue on */
4420 + nvme_dbbuf_dma_free(dev);
4421 ++
4422 ++ for (i = 1; i <= dev->online_queues; i++)
4423 ++ nvme_dbbuf_free(&dev->queues[i]);
4424 + }
4425 + }
4426 +
4427 +diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
4428 +index 928db510b86c6..7f6fcb8ec5bab 100644
4429 +--- a/drivers/phy/qualcomm/Kconfig
4430 ++++ b/drivers/phy/qualcomm/Kconfig
4431 +@@ -87,7 +87,7 @@ config PHY_QCOM_USB_HSIC
4432 +
4433 + config PHY_QCOM_USB_HS_28NM
4434 + tristate "Qualcomm 28nm High-Speed PHY"
4435 +- depends on ARCH_QCOM || COMPILE_TEST
4436 ++ depends on OF && (ARCH_QCOM || COMPILE_TEST)
4437 + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
4438 + select GENERIC_PHY
4439 + help
4440 +@@ -98,7 +98,7 @@ config PHY_QCOM_USB_HS_28NM
4441 +
4442 + config PHY_QCOM_USB_SS
4443 + tristate "Qualcomm USB Super-Speed PHY driver"
4444 +- depends on ARCH_QCOM || COMPILE_TEST
4445 ++ depends on OF && (ARCH_QCOM || COMPILE_TEST)
4446 + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
4447 + select GENERIC_PHY
4448 + help
4449 +diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
4450 +index de4a46fe17630..ad88d74c18842 100644
4451 +--- a/drivers/phy/tegra/xusb.c
4452 ++++ b/drivers/phy/tegra/xusb.c
4453 +@@ -1242,6 +1242,7 @@ power_down:
4454 + reset:
4455 + reset_control_assert(padctl->rst);
4456 + remove:
4457 ++ platform_set_drvdata(pdev, NULL);
4458 + soc->ops->remove(padctl);
4459 + return err;
4460 + }
4461 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
4462 +index eae3579f106f3..017f090a90f68 100644
4463 +--- a/drivers/platform/x86/thinkpad_acpi.c
4464 ++++ b/drivers/platform/x86/thinkpad_acpi.c
4465 +@@ -4220,6 +4220,7 @@ static void hotkey_resume(void)
4466 + pr_err("error while attempting to reset the event firmware interface\n");
4467 +
4468 + tpacpi_send_radiosw_update();
4469 ++ tpacpi_input_send_tabletsw();
4470 + hotkey_tablet_mode_notify_change();
4471 + hotkey_wakeup_reason_notify_change();
4472 + hotkey_wakeup_hotunplug_complete_notify_change();
4473 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
4474 +index e557d757c6470..fa7232ad8c395 100644
4475 +--- a/drivers/platform/x86/toshiba_acpi.c
4476 ++++ b/drivers/platform/x86/toshiba_acpi.c
4477 +@@ -1478,7 +1478,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
4478 + struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
4479 + char *buffer;
4480 + char *cmd;
4481 +- int lcd_out, crt_out, tv_out;
4482 ++ int lcd_out = -1, crt_out = -1, tv_out = -1;
4483 + int remain = count;
4484 + int value;
4485 + int ret;
4486 +@@ -1510,7 +1510,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
4487 +
4488 + kfree(cmd);
4489 +
4490 +- lcd_out = crt_out = tv_out = -1;
4491 + ret = get_video_status(dev, &video_out);
4492 + if (!ret) {
4493 + unsigned int new_video_out = video_out;
4494 +diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
4495 +index e020faff7da53..663255774c0b0 100644
4496 +--- a/drivers/ptp/ptp_clockmatrix.c
4497 ++++ b/drivers/ptp/ptp_clockmatrix.c
4498 +@@ -103,43 +103,26 @@ static int timespec_to_char_array(struct timespec64 const *ts,
4499 + return 0;
4500 + }
4501 +
4502 +-static int idtcm_strverscmp(const char *ver1, const char *ver2)
4503 ++static int idtcm_strverscmp(const char *version1, const char *version2)
4504 + {
4505 +- u8 num1;
4506 +- u8 num2;
4507 +- int result = 0;
4508 +-
4509 +- /* loop through each level of the version string */
4510 +- while (result == 0) {
4511 +- /* extract leading version numbers */
4512 +- if (kstrtou8(ver1, 10, &num1) < 0)
4513 +- return -1;
4514 ++ u8 ver1[3], ver2[3];
4515 ++ int i;
4516 +
4517 +- if (kstrtou8(ver2, 10, &num2) < 0)
4518 +- return -1;
4519 ++ if (sscanf(version1, "%hhu.%hhu.%hhu",
4520 ++ &ver1[0], &ver1[1], &ver1[2]) != 3)
4521 ++ return -1;
4522 ++ if (sscanf(version2, "%hhu.%hhu.%hhu",
4523 ++ &ver2[0], &ver2[1], &ver2[2]) != 3)
4524 ++ return -1;
4525 +
4526 +- /* if numbers differ, then set the result */
4527 +- if (num1 < num2)
4528 +- result = -1;
4529 +- else if (num1 > num2)
4530 +- result = 1;
4531 +- else {
4532 +- /* if numbers are the same, go to next level */
4533 +- ver1 = strchr(ver1, '.');
4534 +- ver2 = strchr(ver2, '.');
4535 +- if (!ver1 && !ver2)
4536 +- break;
4537 +- else if (!ver1)
4538 +- result = -1;
4539 +- else if (!ver2)
4540 +- result = 1;
4541 +- else {
4542 +- ver1++;
4543 +- ver2++;
4544 +- }
4545 +- }
4546 ++ for (i = 0; i < 3; i++) {
4547 ++ if (ver1[i] > ver2[i])
4548 ++ return 1;
4549 ++ if (ver1[i] < ver2[i])
4550 ++ return -1;
4551 + }
4552 +- return result;
4553 ++
4554 ++ return 0;
4555 + }
4556 +
4557 + static int idtcm_xfer_read(struct idtcm *idtcm,
4558 +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
4559 +index ed6316992cbb8..07a5630ec841f 100644
4560 +--- a/drivers/rtc/rtc-pcf2127.c
4561 ++++ b/drivers/rtc/rtc-pcf2127.c
4562 +@@ -559,7 +559,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
4563 + pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
4564 + pcf2127->rtc->uie_unsupported = 1;
4565 +
4566 +- if (alarm_irq >= 0) {
4567 ++ if (alarm_irq > 0) {
4568 + ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
4569 + pcf2127_rtc_irq,
4570 + IRQF_TRIGGER_LOW | IRQF_ONESHOT,
4571 +@@ -570,7 +570,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
4572 + }
4573 + }
4574 +
4575 +- if (alarm_irq >= 0 || device_property_read_bool(dev, "wakeup-source")) {
4576 ++ if (alarm_irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
4577 + device_init_wakeup(dev, true);
4578 + pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
4579 + }
4580 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
4581 +index 6b5cf9ba03e5b..757d6ba817ee1 100644
4582 +--- a/drivers/s390/net/qeth_core.h
4583 ++++ b/drivers/s390/net/qeth_core.h
4584 +@@ -397,10 +397,13 @@ enum qeth_qdio_out_buffer_state {
4585 + QETH_QDIO_BUF_EMPTY,
4586 + /* Filled by driver; owned by hardware in order to be sent. */
4587 + QETH_QDIO_BUF_PRIMED,
4588 +- /* Identified to be pending in TPQ. */
4589 ++ /* Discovered by the TX completion code: */
4590 + QETH_QDIO_BUF_PENDING,
4591 +- /* Found in completion queue. */
4592 +- QETH_QDIO_BUF_IN_CQ,
4593 ++ /* Finished by the TX completion code: */
4594 ++ QETH_QDIO_BUF_NEED_QAOB,
4595 ++ /* Received QAOB notification on CQ: */
4596 ++ QETH_QDIO_BUF_QAOB_OK,
4597 ++ QETH_QDIO_BUF_QAOB_ERROR,
4598 + /* Handled via transfer pending / completion queue. */
4599 + QETH_QDIO_BUF_HANDLED_DELAYED,
4600 + };
4601 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
4602 +index 6a73982514237..e3666232a19a8 100644
4603 +--- a/drivers/s390/net/qeth_core_main.c
4604 ++++ b/drivers/s390/net/qeth_core_main.c
4605 +@@ -32,6 +32,7 @@
4606 +
4607 + #include <net/iucv/af_iucv.h>
4608 + #include <net/dsfield.h>
4609 ++#include <net/sock.h>
4610 +
4611 + #include <asm/ebcdic.h>
4612 + #include <asm/chpid.h>
4613 +@@ -500,18 +501,13 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
4614 +
4615 + }
4616 + }
4617 +- if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
4618 +- QETH_QDIO_BUF_HANDLED_DELAYED)) {
4619 +- /* for recovery situations */
4620 +- qeth_init_qdio_out_buf(q, bidx);
4621 +- QETH_CARD_TEXT(q->card, 2, "clprecov");
4622 +- }
4623 + }
4624 +
4625 +
4626 + static void qeth_qdio_handle_aob(struct qeth_card *card,
4627 + unsigned long phys_aob_addr)
4628 + {
4629 ++ enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
4630 + struct qaob *aob;
4631 + struct qeth_qdio_out_buffer *buffer;
4632 + enum iucv_tx_notify notification;
4633 +@@ -523,22 +519,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
4634 + buffer = (struct qeth_qdio_out_buffer *) aob->user1;
4635 + QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
4636 +
4637 +- if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
4638 +- QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
4639 +- notification = TX_NOTIFY_OK;
4640 +- } else {
4641 +- WARN_ON_ONCE(atomic_read(&buffer->state) !=
4642 +- QETH_QDIO_BUF_PENDING);
4643 +- atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
4644 +- notification = TX_NOTIFY_DELAYED_OK;
4645 +- }
4646 +-
4647 +- if (aob->aorc != 0) {
4648 +- QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
4649 +- notification = qeth_compute_cq_notification(aob->aorc, 1);
4650 +- }
4651 +- qeth_notify_skbs(buffer->q, buffer, notification);
4652 +-
4653 + /* Free dangling allocations. The attached skbs are handled by
4654 + * qeth_cleanup_handled_pending().
4655 + */
4656 +@@ -550,7 +530,33 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
4657 + if (data && buffer->is_header[i])
4658 + kmem_cache_free(qeth_core_header_cache, data);
4659 + }
4660 +- atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
4661 ++
4662 ++ if (aob->aorc) {
4663 ++ QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
4664 ++ new_state = QETH_QDIO_BUF_QAOB_ERROR;
4665 ++ }
4666 ++
4667 ++ switch (atomic_xchg(&buffer->state, new_state)) {
4668 ++ case QETH_QDIO_BUF_PRIMED:
4669 ++ /* Faster than TX completion code. */
4670 ++ notification = qeth_compute_cq_notification(aob->aorc, 0);
4671 ++ qeth_notify_skbs(buffer->q, buffer, notification);
4672 ++ atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
4673 ++ break;
4674 ++ case QETH_QDIO_BUF_PENDING:
4675 ++ /* TX completion code is active and will handle the async
4676 ++ * completion for us.
4677 ++ */
4678 ++ break;
4679 ++ case QETH_QDIO_BUF_NEED_QAOB:
4680 ++ /* TX completion code is already finished. */
4681 ++ notification = qeth_compute_cq_notification(aob->aorc, 1);
4682 ++ qeth_notify_skbs(buffer->q, buffer, notification);
4683 ++ atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
4684 ++ break;
4685 ++ default:
4686 ++ WARN_ON_ONCE(1);
4687 ++ }
4688 +
4689 + qdio_release_aob(aob);
4690 + }
4691 +@@ -1408,7 +1414,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
4692 + skb_queue_walk(&buf->skb_list, skb) {
4693 + QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
4694 + QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
4695 +- if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
4696 ++ if (skb->sk && skb->sk->sk_family == PF_IUCV)
4697 + iucv_sk(skb->sk)->sk_txnotify(skb, notification);
4698 + }
4699 + }
4700 +@@ -1419,9 +1425,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
4701 + struct qeth_qdio_out_q *queue = buf->q;
4702 + struct sk_buff *skb;
4703 +
4704 +- /* release may never happen from within CQ tasklet scope */
4705 +- WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
4706 +-
4707 + if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
4708 + qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
4709 +
4710 +@@ -5846,9 +5849,32 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
4711 +
4712 + if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
4713 + QETH_QDIO_BUF_PENDING) ==
4714 +- QETH_QDIO_BUF_PRIMED)
4715 ++ QETH_QDIO_BUF_PRIMED) {
4716 + qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
4717 +
4718 ++ /* Handle race with qeth_qdio_handle_aob(): */
4719 ++ switch (atomic_xchg(&buffer->state,
4720 ++ QETH_QDIO_BUF_NEED_QAOB)) {
4721 ++ case QETH_QDIO_BUF_PENDING:
4722 ++ /* No concurrent QAOB notification. */
4723 ++ break;
4724 ++ case QETH_QDIO_BUF_QAOB_OK:
4725 ++ qeth_notify_skbs(queue, buffer,
4726 ++ TX_NOTIFY_DELAYED_OK);
4727 ++ atomic_set(&buffer->state,
4728 ++ QETH_QDIO_BUF_HANDLED_DELAYED);
4729 ++ break;
4730 ++ case QETH_QDIO_BUF_QAOB_ERROR:
4731 ++ qeth_notify_skbs(queue, buffer,
4732 ++ TX_NOTIFY_DELAYED_GENERALERROR);
4733 ++ atomic_set(&buffer->state,
4734 ++ QETH_QDIO_BUF_HANDLED_DELAYED);
4735 ++ break;
4736 ++ default:
4737 ++ WARN_ON_ONCE(1);
4738 ++ }
4739 ++ }
4740 ++
4741 + QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
4742 +
4743 + /* prepare the queue slot for re-use: */
4744 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
4745 +index 1e9c3171fa9f4..f9314f1393fbd 100644
4746 +--- a/drivers/scsi/libiscsi.c
4747 ++++ b/drivers/scsi/libiscsi.c
4748 +@@ -533,8 +533,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
4749 + if (conn->task == task)
4750 + conn->task = NULL;
4751 +
4752 +- if (conn->ping_task == task)
4753 +- conn->ping_task = NULL;
4754 ++ if (READ_ONCE(conn->ping_task) == task)
4755 ++ WRITE_ONCE(conn->ping_task, NULL);
4756 +
4757 + /* release get from queueing */
4758 + __iscsi_put_task(task);
4759 +@@ -738,6 +738,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
4760 + task->conn->session->age);
4761 + }
4762 +
4763 ++ if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
4764 ++ WRITE_ONCE(conn->ping_task, task);
4765 ++
4766 + if (!ihost->workq) {
4767 + if (iscsi_prep_mgmt_task(conn, task))
4768 + goto free_task;
4769 +@@ -941,8 +944,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
4770 + struct iscsi_nopout hdr;
4771 + struct iscsi_task *task;
4772 +
4773 +- if (!rhdr && conn->ping_task)
4774 +- return -EINVAL;
4775 ++ if (!rhdr) {
4776 ++ if (READ_ONCE(conn->ping_task))
4777 ++ return -EINVAL;
4778 ++ WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
4779 ++ }
4780 +
4781 + memset(&hdr, 0, sizeof(struct iscsi_nopout));
4782 + hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
4783 +@@ -957,11 +963,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
4784 +
4785 + task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
4786 + if (!task) {
4787 ++ if (!rhdr)
4788 ++ WRITE_ONCE(conn->ping_task, NULL);
4789 + iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
4790 + return -EIO;
4791 + } else if (!rhdr) {
4792 + /* only track our nops */
4793 +- conn->ping_task = task;
4794 + conn->last_ping = jiffies;
4795 + }
4796 +
4797 +@@ -984,7 +991,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
4798 + struct iscsi_conn *conn = task->conn;
4799 + int rc = 0;
4800 +
4801 +- if (conn->ping_task != task) {
4802 ++ if (READ_ONCE(conn->ping_task) != task) {
4803 + /*
4804 + * If this is not in response to one of our
4805 + * nops then it must be from userspace.
4806 +@@ -1923,7 +1930,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
4807 + */
4808 + static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
4809 + {
4810 +- if (conn->ping_task &&
4811 ++ if (READ_ONCE(conn->ping_task) &&
4812 + time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
4813 + (conn->ping_timeout * HZ), jiffies))
4814 + return 1;
4815 +@@ -2058,7 +2065,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
4816 + * Checking the transport already or nop from a cmd timeout still
4817 + * running
4818 + */
4819 +- if (conn->ping_task) {
4820 ++ if (READ_ONCE(conn->ping_task)) {
4821 + task->have_checked_conn = true;
4822 + rc = BLK_EH_RESET_TIMER;
4823 + goto done;
4824 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
4825 +index 54928a837dad0..9dd32bb0ff2be 100644
4826 +--- a/drivers/scsi/ufs/ufshcd.c
4827 ++++ b/drivers/scsi/ufs/ufshcd.c
4828 +@@ -8677,11 +8677,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
4829 + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
4830 + goto out;
4831 +
4832 +- if (pm_runtime_suspended(hba->dev)) {
4833 +- ret = ufshcd_runtime_resume(hba);
4834 +- if (ret)
4835 +- goto out;
4836 +- }
4837 ++ pm_runtime_get_sync(hba->dev);
4838 +
4839 + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
4840 + out:
4841 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
4842 +index 9cfa15ec8b08c..5743e727b5f78 100644
4843 +--- a/drivers/spi/spi-bcm-qspi.c
4844 ++++ b/drivers/spi/spi-bcm-qspi.c
4845 +@@ -1334,7 +1334,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
4846 +
4847 + data = of_id->data;
4848 +
4849 +- master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
4850 ++ master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
4851 + if (!master) {
4852 + dev_err(dev, "error allocating spi_master\n");
4853 + return -ENOMEM;
4854 +@@ -1374,21 +1374,17 @@ int bcm_qspi_probe(struct platform_device *pdev,
4855 +
4856 + if (res) {
4857 + qspi->base[MSPI] = devm_ioremap_resource(dev, res);
4858 +- if (IS_ERR(qspi->base[MSPI])) {
4859 +- ret = PTR_ERR(qspi->base[MSPI]);
4860 +- goto qspi_resource_err;
4861 +- }
4862 ++ if (IS_ERR(qspi->base[MSPI]))
4863 ++ return PTR_ERR(qspi->base[MSPI]);
4864 + } else {
4865 +- goto qspi_resource_err;
4866 ++ return 0;
4867 + }
4868 +
4869 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
4870 + if (res) {
4871 + qspi->base[BSPI] = devm_ioremap_resource(dev, res);
4872 +- if (IS_ERR(qspi->base[BSPI])) {
4873 +- ret = PTR_ERR(qspi->base[BSPI]);
4874 +- goto qspi_resource_err;
4875 +- }
4876 ++ if (IS_ERR(qspi->base[BSPI]))
4877 ++ return PTR_ERR(qspi->base[BSPI]);
4878 + qspi->bspi_mode = true;
4879 + } else {
4880 + qspi->bspi_mode = false;
4881 +@@ -1399,18 +1395,14 @@ int bcm_qspi_probe(struct platform_device *pdev,
4882 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
4883 + if (res) {
4884 + qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
4885 +- if (IS_ERR(qspi->base[CHIP_SELECT])) {
4886 +- ret = PTR_ERR(qspi->base[CHIP_SELECT]);
4887 +- goto qspi_resource_err;
4888 +- }
4889 ++ if (IS_ERR(qspi->base[CHIP_SELECT]))
4890 ++ return PTR_ERR(qspi->base[CHIP_SELECT]);
4891 + }
4892 +
4893 + qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
4894 + GFP_KERNEL);
4895 +- if (!qspi->dev_ids) {
4896 +- ret = -ENOMEM;
4897 +- goto qspi_resource_err;
4898 +- }
4899 ++ if (!qspi->dev_ids)
4900 ++ return -ENOMEM;
4901 +
4902 + for (val = 0; val < num_irqs; val++) {
4903 + irq = -1;
4904 +@@ -1491,7 +1483,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
4905 + qspi->xfer_mode.addrlen = -1;
4906 + qspi->xfer_mode.hp = -1;
4907 +
4908 +- ret = devm_spi_register_master(&pdev->dev, master);
4909 ++ ret = spi_register_master(master);
4910 + if (ret < 0) {
4911 + dev_err(dev, "can't register master\n");
4912 + goto qspi_reg_err;
4913 +@@ -1504,8 +1496,6 @@ qspi_reg_err:
4914 + clk_disable_unprepare(qspi->clk);
4915 + qspi_probe_err:
4916 + kfree(qspi->dev_ids);
4917 +-qspi_resource_err:
4918 +- spi_master_put(master);
4919 + return ret;
4920 + }
4921 + /* probe function to be called by SoC specific platform driver probe */
4922 +@@ -1515,10 +1505,10 @@ int bcm_qspi_remove(struct platform_device *pdev)
4923 + {
4924 + struct bcm_qspi *qspi = platform_get_drvdata(pdev);
4925 +
4926 ++ spi_unregister_master(qspi->master);
4927 + bcm_qspi_hw_uninit(qspi);
4928 + clk_disable_unprepare(qspi->clk);
4929 + kfree(qspi->dev_ids);
4930 +- spi_unregister_master(qspi->master);
4931 +
4932 + return 0;
4933 + }
4934 +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
4935 +index 9605abaaec670..197485f2c2b22 100644
4936 +--- a/drivers/spi/spi-bcm2835.c
4937 ++++ b/drivers/spi/spi-bcm2835.c
4938 +@@ -1278,7 +1278,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
4939 + struct bcm2835_spi *bs;
4940 + int err;
4941 +
4942 +- ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
4943 ++ ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
4944 + dma_get_cache_alignment()));
4945 + if (!ctlr)
4946 + return -ENOMEM;
4947 +@@ -1299,26 +1299,17 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
4948 + bs->ctlr = ctlr;
4949 +
4950 + bs->regs = devm_platform_ioremap_resource(pdev, 0);
4951 +- if (IS_ERR(bs->regs)) {
4952 +- err = PTR_ERR(bs->regs);
4953 +- goto out_controller_put;
4954 +- }
4955 ++ if (IS_ERR(bs->regs))
4956 ++ return PTR_ERR(bs->regs);
4957 +
4958 + bs->clk = devm_clk_get(&pdev->dev, NULL);
4959 +- if (IS_ERR(bs->clk)) {
4960 +- err = PTR_ERR(bs->clk);
4961 +- if (err == -EPROBE_DEFER)
4962 +- dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
4963 +- else
4964 +- dev_err(&pdev->dev, "could not get clk: %d\n", err);
4965 +- goto out_controller_put;
4966 +- }
4967 ++ if (IS_ERR(bs->clk))
4968 ++ return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
4969 ++ "could not get clk\n");
4970 +
4971 + bs->irq = platform_get_irq(pdev, 0);
4972 +- if (bs->irq <= 0) {
4973 +- err = bs->irq ? bs->irq : -ENODEV;
4974 +- goto out_controller_put;
4975 +- }
4976 ++ if (bs->irq <= 0)
4977 ++ return bs->irq ? bs->irq : -ENODEV;
4978 +
4979 + clk_prepare_enable(bs->clk);
4980 +
4981 +@@ -1352,8 +1343,6 @@ out_dma_release:
4982 + bcm2835_dma_release(ctlr, bs);
4983 + out_clk_disable:
4984 + clk_disable_unprepare(bs->clk);
4985 +-out_controller_put:
4986 +- spi_controller_put(ctlr);
4987 + return err;
4988 + }
4989 +
4990 +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
4991 +index fd58547110e68..1a26865c42f83 100644
4992 +--- a/drivers/spi/spi-bcm2835aux.c
4993 ++++ b/drivers/spi/spi-bcm2835aux.c
4994 +@@ -529,8 +529,9 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
4995 +
4996 + bs->clk = devm_clk_get(&pdev->dev, NULL);
4997 + if (IS_ERR(bs->clk)) {
4998 ++ err = PTR_ERR(bs->clk);
4999 + dev_err(&pdev->dev, "could not get clk: %d\n", err);
5000 +- return PTR_ERR(bs->clk);
5001 ++ return err;
5002 + }
5003 +
5004 + bs->irq = platform_get_irq(pdev, 0);
5005 +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
5006 +index 9aac515b718c8..91578103a3ca9 100644
5007 +--- a/drivers/spi/spi-imx.c
5008 ++++ b/drivers/spi/spi-imx.c
5009 +@@ -1684,6 +1684,7 @@ static int spi_imx_probe(struct platform_device *pdev)
5010 +
5011 + pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
5012 + pm_runtime_use_autosuspend(spi_imx->dev);
5013 ++ pm_runtime_get_noresume(spi_imx->dev);
5014 + pm_runtime_set_active(spi_imx->dev);
5015 + pm_runtime_enable(spi_imx->dev);
5016 +
5017 +diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
5018 +index 54e8029e6b1af..0017376234e28 100644
5019 +--- a/drivers/staging/ralink-gdma/Kconfig
5020 ++++ b/drivers/staging/ralink-gdma/Kconfig
5021 +@@ -2,6 +2,7 @@
5022 + config DMA_RALINK
5023 + tristate "RALINK DMA support"
5024 + depends on RALINK && !SOC_RT288X
5025 ++ depends on DMADEVICES
5026 + select DMA_ENGINE
5027 + select DMA_VIRTUAL_CHANNELS
5028 +
5029 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5030 +index 7b56fe9f10628..2e18ec42c7045 100644
5031 +--- a/drivers/target/iscsi/iscsi_target.c
5032 ++++ b/drivers/target/iscsi/iscsi_target.c
5033 +@@ -483,8 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
5034 + void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
5035 + {
5036 + spin_lock_bh(&conn->cmd_lock);
5037 +- if (!list_empty(&cmd->i_conn_node) &&
5038 +- !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
5039 ++ if (!list_empty(&cmd->i_conn_node))
5040 + list_del_init(&cmd->i_conn_node);
5041 + spin_unlock_bh(&conn->cmd_lock);
5042 +
5043 +@@ -4083,12 +4082,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
5044 + spin_lock_bh(&conn->cmd_lock);
5045 + list_splice_init(&conn->conn_cmd_list, &tmp_list);
5046 +
5047 +- list_for_each_entry(cmd, &tmp_list, i_conn_node) {
5048 ++ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
5049 + struct se_cmd *se_cmd = &cmd->se_cmd;
5050 +
5051 + if (se_cmd->se_tfo != NULL) {
5052 + spin_lock_irq(&se_cmd->t_state_lock);
5053 +- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
5054 ++ if (se_cmd->transport_state & CMD_T_ABORTED) {
5055 ++ /*
5056 ++ * LIO's abort path owns the cleanup for this,
5057 ++ * so put it back on the list and let
5058 ++ * aborted_task handle it.
5059 ++ */
5060 ++ list_move_tail(&cmd->i_conn_node,
5061 ++ &conn->conn_cmd_list);
5062 ++ } else {
5063 ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
5064 ++ }
5065 + spin_unlock_irq(&se_cmd->t_state_lock);
5066 + }
5067 + }
5068 +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
5069 +index 20b6fd7383c54..c981757ba0d40 100644
5070 +--- a/drivers/tee/optee/call.c
5071 ++++ b/drivers/tee/optee/call.c
5072 +@@ -534,7 +534,8 @@ void optee_free_pages_list(void *list, size_t num_entries)
5073 + static bool is_normal_memory(pgprot_t p)
5074 + {
5075 + #if defined(CONFIG_ARM)
5076 +- return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
5077 ++ return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
5078 ++ ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
5079 + #elif defined(CONFIG_ARM64)
5080 + return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
5081 + #else
5082 +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
5083 +index e0e1cb907ffd8..6af6343c7c65a 100644
5084 +--- a/drivers/usb/cdns3/gadget.c
5085 ++++ b/drivers/usb/cdns3/gadget.c
5086 +@@ -261,8 +261,8 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
5087 + */
5088 + link_trb->control = 0;
5089 + } else {
5090 +- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
5091 +- link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
5092 ++ link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
5093 ++ link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
5094 + }
5095 + return 0;
5096 + }
5097 +@@ -853,10 +853,10 @@ static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
5098 + priv_ep->wa1_trb_index = 0xFFFF;
5099 + if (priv_ep->wa1_cycle_bit) {
5100 + priv_ep->wa1_trb->control =
5101 +- priv_ep->wa1_trb->control | 0x1;
5102 ++ priv_ep->wa1_trb->control | cpu_to_le32(0x1);
5103 + } else {
5104 + priv_ep->wa1_trb->control =
5105 +- priv_ep->wa1_trb->control & ~0x1;
5106 ++ priv_ep->wa1_trb->control & cpu_to_le32(~0x1);
5107 + }
5108 + }
5109 + }
5110 +@@ -1014,17 +1014,16 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
5111 + TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
5112 +
5113 + if (!request->num_sgs) {
5114 +- trb->buffer = TRB_BUFFER(trb_dma);
5115 ++ trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
5116 + length = request->length;
5117 + } else {
5118 +- trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
5119 ++ trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address));
5120 + length = request->sg[sg_idx].length;
5121 + }
5122 +
5123 + tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
5124 +
5125 +- trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
5126 +- TRB_LEN(length);
5127 ++ trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length));
5128 +
5129 + /*
5130 + * For DEV_VER_V2 controller version we have enabled
5131 +@@ -1033,11 +1032,11 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
5132 + */
5133 + if (priv_dev->dev_ver >= DEV_VER_V2) {
5134 + if (priv_dev->gadget.speed == USB_SPEED_SUPER)
5135 +- trb->length |= TRB_TDL_SS_SIZE(tdl);
5136 ++ trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
5137 + }
5138 + priv_req->flags |= REQUEST_PENDING;
5139 +
5140 +- trb->control = control;
5141 ++ trb->control = cpu_to_le32(control);
5142 +
5143 + trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
5144 +
5145 +@@ -1162,8 +1161,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
5146 + TRBS_PER_SEGMENT > 2)
5147 + ch_bit = TRB_CHAIN;
5148 +
5149 +- link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
5150 +- TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
5151 ++ link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
5152 ++ TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
5153 + }
5154 +
5155 + if (priv_dev->dev_ver <= DEV_VER_V2)
5156 +@@ -1171,35 +1170,37 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
5157 +
5158 + /* set incorrect Cycle Bit for first trb*/
5159 + control = priv_ep->pcs ? 0 : TRB_CYCLE;
5160 ++ trb->length = 0;
5161 ++ if (priv_dev->dev_ver >= DEV_VER_V2) {
5162 ++ u16 td_size;
5163 ++
5164 ++ td_size = DIV_ROUND_UP(request->length,
5165 ++ priv_ep->endpoint.maxpacket);
5166 ++ if (priv_dev->gadget.speed == USB_SPEED_SUPER)
5167 ++ trb->length = TRB_TDL_SS_SIZE(td_size);
5168 ++ else
5169 ++ control |= TRB_TDL_HS_SIZE(td_size);
5170 ++ }
5171 +
5172 + do {
5173 + u32 length;
5174 +- u16 td_size = 0;
5175 +
5176 + /* fill TRB */
5177 + control |= TRB_TYPE(TRB_NORMAL);
5178 +- trb->buffer = TRB_BUFFER(request->num_sgs == 0
5179 +- ? trb_dma : request->sg[sg_iter].dma_address);
5180 ++ trb->buffer = cpu_to_le32(TRB_BUFFER(request->num_sgs == 0
5181 ++ ? trb_dma : request->sg[sg_iter].dma_address));
5182 +
5183 + if (likely(!request->num_sgs))
5184 + length = request->length;
5185 + else
5186 + length = request->sg[sg_iter].length;
5187 +
5188 +- if (likely(priv_dev->dev_ver >= DEV_VER_V2))
5189 +- td_size = DIV_ROUND_UP(length,
5190 +- priv_ep->endpoint.maxpacket);
5191 +- else if (priv_ep->flags & EP_TDLCHK_EN)
5192 ++ if (priv_ep->flags & EP_TDLCHK_EN)
5193 + total_tdl += DIV_ROUND_UP(length,
5194 + priv_ep->endpoint.maxpacket);
5195 +
5196 +- trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
5197 +- TRB_LEN(length);
5198 +- if (priv_dev->gadget.speed == USB_SPEED_SUPER)
5199 +- trb->length |= TRB_TDL_SS_SIZE(td_size);
5200 +- else
5201 +- control |= TRB_TDL_HS_SIZE(td_size);
5202 +-
5203 ++ trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
5204 ++ TRB_LEN(length));
5205 + pcs = priv_ep->pcs ? TRB_CYCLE : 0;
5206 +
5207 + /*
5208 +@@ -1218,9 +1219,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
5209 + }
5210 +
5211 + if (sg_iter)
5212 +- trb->control = control;
5213 ++ trb->control = cpu_to_le32(control);
5214 + else
5215 +- priv_req->trb->control = control;
5216 ++ priv_req->trb->control = cpu_to_le32(control);
5217 +
5218 + control = 0;
5219 + ++sg_iter;
5220 +@@ -1234,7 +1235,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
5221 + priv_req->flags |= REQUEST_PENDING;
5222 +
5223 + if (sg_iter == 1)
5224 +- trb->control |= TRB_IOC | TRB_ISP;
5225 ++ trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
5226 +
5227 + if (priv_dev->dev_ver < DEV_VER_V2 &&
5228 + (priv_ep->flags & EP_TDLCHK_EN)) {
5229 +@@ -1260,7 +1261,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
5230 +
5231 + /* give the TD to the consumer*/
5232 + if (togle_pcs)
5233 +- trb->control = trb->control ^ 1;
5234 ++ trb->control = trb->control ^ cpu_to_le32(1);
5235 +
5236 + if (priv_dev->dev_ver <= DEV_VER_V2)
5237 + cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
5238 +@@ -1399,7 +1400,7 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
5239 +
5240 + trb = &priv_ep->trb_pool[priv_req->start_trb];
5241 +
5242 +- if ((trb->control & TRB_CYCLE) != priv_ep->ccs)
5243 ++ if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs)
5244 + goto finish;
5245 +
5246 + if (doorbell == 1 && current_index == priv_ep->dequeue)
5247 +@@ -1448,7 +1449,7 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
5248 + trb = priv_ep->trb_pool + priv_ep->dequeue;
5249 +
5250 + /* Request was dequeued and TRB was changed to TRB_LINK. */
5251 +- if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
5252 ++ if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
5253 + trace_cdns3_complete_trb(priv_ep, trb);
5254 + cdns3_move_deq_to_next_trb(priv_req);
5255 + }
5256 +@@ -1580,7 +1581,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
5257 + * that host ignore the ERDY packet and driver has to send it
5258 + * again.
5259 + */
5260 +- if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
5261 ++ if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
5262 + EP_STS_HOSTPP(ep_sts_reg))) {
5263 + writel(EP_CMD_ERDY |
5264 + EP_CMD_ERDY_SID(priv_ep->last_stream_id),
5265 +@@ -2564,10 +2565,10 @@ found:
5266 +
5267 + /* Update ring only if removed request is on pending_req_list list */
5268 + if (req_on_hw_ring && link_trb) {
5269 +- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
5270 +- ((priv_req->end_trb + 1) * TRB_SIZE));
5271 +- link_trb->control = (link_trb->control & TRB_CYCLE) |
5272 +- TRB_TYPE(TRB_LINK) | TRB_CHAIN;
5273 ++ link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
5274 ++ ((priv_req->end_trb + 1) * TRB_SIZE)));
5275 ++ link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
5276 ++ TRB_TYPE(TRB_LINK) | TRB_CHAIN);
5277 +
5278 + if (priv_ep->wa1_trb == priv_req->trb)
5279 + cdns3_wa1_restore_cycle_bit(priv_ep);
5280 +@@ -2622,7 +2623,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
5281 + priv_req = to_cdns3_request(request);
5282 + trb = priv_req->trb;
5283 + if (trb)
5284 +- trb->control = trb->control ^ TRB_CYCLE;
5285 ++ trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
5286 + }
5287 +
5288 + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
5289 +@@ -2637,7 +2638,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
5290 +
5291 + if (request) {
5292 + if (trb)
5293 +- trb->control = trb->control ^ TRB_CYCLE;
5294 ++ trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
5295 ++
5296 + cdns3_rearm_transfer(priv_ep, 1);
5297 + }
5298 +
5299 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
5300 +index e96a858a12185..533236366a03b 100644
5301 +--- a/drivers/usb/core/devio.c
5302 ++++ b/drivers/usb/core/devio.c
5303 +@@ -482,11 +482,11 @@ static void snoop_urb(struct usb_device *udev,
5304 +
5305 + if (userurb) { /* Async */
5306 + if (when == SUBMIT)
5307 +- dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
5308 ++ dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
5309 + "length %u\n",
5310 + userurb, ep, t, d, length);
5311 + else
5312 +- dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
5313 ++ dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
5314 + "actual_length %u status %d\n",
5315 + userurb, ep, t, d, length,
5316 + timeout_or_status);
5317 +@@ -1997,7 +1997,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
5318 + if (as) {
5319 + int retval;
5320 +
5321 +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
5322 ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
5323 + retval = processcompl(as, (void __user * __user *)arg);
5324 + free_async(as);
5325 + return retval;
5326 +@@ -2014,7 +2014,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
5327 +
5328 + as = async_getcompleted(ps);
5329 + if (as) {
5330 +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
5331 ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
5332 + retval = processcompl(as, (void __user * __user *)arg);
5333 + free_async(as);
5334 + } else {
5335 +@@ -2142,7 +2142,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
5336 + if (as) {
5337 + int retval;
5338 +
5339 +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
5340 ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
5341 + retval = processcompl_compat(as, (void __user * __user *)arg);
5342 + free_async(as);
5343 + return retval;
5344 +@@ -2159,7 +2159,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
5345 +
5346 + as = async_getcompleted(ps);
5347 + if (as) {
5348 +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
5349 ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
5350 + retval = processcompl_compat(as, (void __user * __user *)arg);
5351 + free_async(as);
5352 + } else {
5353 +@@ -2624,7 +2624,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
5354 + #endif
5355 +
5356 + case USBDEVFS_DISCARDURB:
5357 +- snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
5358 ++ snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
5359 + ret = proc_unlinkurb(ps, p);
5360 + break;
5361 +
5362 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5363 +index a1e3a037a2892..fad31ccd1fa83 100644
5364 +--- a/drivers/usb/core/quirks.c
5365 ++++ b/drivers/usb/core/quirks.c
5366 +@@ -348,6 +348,10 @@ static const struct usb_device_id usb_quirk_list[] = {
5367 + /* Guillemot Webcam Hercules Dualpix Exchange*/
5368 + { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
5369 +
5370 ++ /* Guillemot Hercules DJ Console audio card (BZ 208357) */
5371 ++ { USB_DEVICE(0x06f8, 0xb000), .driver_info =
5372 ++ USB_QUIRK_ENDPOINT_IGNORE },
5373 ++
5374 + /* Midiman M-Audio Keystation 88es */
5375 + { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
5376 +
5377 +@@ -421,6 +425,10 @@ static const struct usb_device_id usb_quirk_list[] = {
5378 + { USB_DEVICE(0x1532, 0x0116), .driver_info =
5379 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
5380 +
5381 ++ /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
5382 ++ { USB_DEVICE(0x17ef, 0xa012), .driver_info =
5383 ++ USB_QUIRK_DISCONNECT_SUSPEND },
5384 ++
5385 + /* BUILDWIN Photo Frame */
5386 + { USB_DEVICE(0x1908, 0x1315), .driver_info =
5387 + USB_QUIRK_HONOR_BNUMINTERFACES },
5388 +@@ -521,6 +529,8 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
5389 + * Matched for devices with USB_QUIRK_ENDPOINT_IGNORE.
5390 + */
5391 + static const struct usb_device_id usb_endpoint_ignore[] = {
5392 ++ { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
5393 ++ { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
5394 + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
5395 + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
5396 + { }
5397 +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
5398 +index 46af0aa07e2e3..b2b5b0689667b 100644
5399 +--- a/drivers/usb/gadget/function/f_midi.c
5400 ++++ b/drivers/usb/gadget/function/f_midi.c
5401 +@@ -1315,7 +1315,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
5402 + midi->id = kstrdup(opts->id, GFP_KERNEL);
5403 + if (opts->id && !midi->id) {
5404 + status = -ENOMEM;
5405 +- goto setup_fail;
5406 ++ goto midi_free;
5407 + }
5408 + midi->in_ports = opts->in_ports;
5409 + midi->out_ports = opts->out_ports;
5410 +@@ -1327,7 +1327,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
5411 +
5412 + status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
5413 + if (status)
5414 +- goto setup_fail;
5415 ++ goto midi_free;
5416 +
5417 + spin_lock_init(&midi->transmit_lock);
5418 +
5419 +@@ -1343,9 +1343,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
5420 +
5421 + return &midi->func;
5422 +
5423 ++midi_free:
5424 ++ if (midi)
5425 ++ kfree(midi->id);
5426 ++ kfree(midi);
5427 + setup_fail:
5428 + mutex_unlock(&opts->lock);
5429 +- kfree(midi);
5430 ++
5431 + return ERR_PTR(status);
5432 + }
5433 +
5434 +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5435 +index 1b430b36d0a6b..71e7d10dd76b9 100644
5436 +--- a/drivers/usb/gadget/legacy/inode.c
5437 ++++ b/drivers/usb/gadget/legacy/inode.c
5438 +@@ -2039,6 +2039,9 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
5439 + return 0;
5440 +
5441 + Enomem:
5442 ++ kfree(CHIP);
5443 ++ CHIP = NULL;
5444 ++
5445 + return -ENOMEM;
5446 + }
5447 +
5448 +diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
5449 +index d7d32b6561021..358f6048dd3ce 100644
5450 +--- a/drivers/vdpa/Kconfig
5451 ++++ b/drivers/vdpa/Kconfig
5452 +@@ -13,6 +13,7 @@ config VDPA_SIM
5453 + depends on RUNTIME_TESTING_MENU && HAS_DMA
5454 + select DMA_OPS
5455 + select VHOST_RING
5456 ++ select GENERIC_NET_UTILS
5457 + default n
5458 + help
5459 + vDPA networking device simulator which loop TX traffic back
5460 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
5461 +index b22adf03f5842..5d8850f5aef16 100644
5462 +--- a/drivers/vhost/scsi.c
5463 ++++ b/drivers/vhost/scsi.c
5464 +@@ -52,7 +52,6 @@
5465 + #define VHOST_SCSI_VERSION "v0.1"
5466 + #define VHOST_SCSI_NAMELEN 256
5467 + #define VHOST_SCSI_MAX_CDB_SIZE 32
5468 +-#define VHOST_SCSI_DEFAULT_TAGS 256
5469 + #define VHOST_SCSI_PREALLOC_SGLS 2048
5470 + #define VHOST_SCSI_PREALLOC_UPAGES 2048
5471 + #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
5472 +@@ -189,6 +188,9 @@ struct vhost_scsi_virtqueue {
5473 + * Writers must also take dev mutex and flush under it.
5474 + */
5475 + int inflight_idx;
5476 ++ struct vhost_scsi_cmd *scsi_cmds;
5477 ++ struct sbitmap scsi_tags;
5478 ++ int max_cmds;
5479 + };
5480 +
5481 + struct vhost_scsi {
5482 +@@ -320,11 +322,13 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
5483 + return 1;
5484 + }
5485 +
5486 +-static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
5487 ++static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
5488 + {
5489 + struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
5490 + struct vhost_scsi_cmd, tvc_se_cmd);
5491 +- struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
5492 ++ struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
5493 ++ struct vhost_scsi_virtqueue, vq);
5494 ++ struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
5495 + int i;
5496 +
5497 + if (tv_cmd->tvc_sgl_count) {
5498 +@@ -336,8 +340,18 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
5499 + put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
5500 + }
5501 +
5502 +- vhost_scsi_put_inflight(tv_cmd->inflight);
5503 +- target_free_tag(se_sess, se_cmd);
5504 ++ sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
5505 ++ vhost_scsi_put_inflight(inflight);
5506 ++}
5507 ++
5508 ++static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
5509 ++{
5510 ++ struct vhost_scsi_cmd *cmd = container_of(se_cmd,
5511 ++ struct vhost_scsi_cmd, tvc_se_cmd);
5512 ++ struct vhost_scsi *vs = cmd->tvc_vhost;
5513 ++
5514 ++ llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
5515 ++ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
5516 + }
5517 +
5518 + static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
5519 +@@ -362,28 +376,15 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
5520 + return 0;
5521 + }
5522 +
5523 +-static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
5524 +-{
5525 +- struct vhost_scsi *vs = cmd->tvc_vhost;
5526 +-
5527 +- llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
5528 +-
5529 +- vhost_work_queue(&vs->dev, &vs->vs_completion_work);
5530 +-}
5531 +-
5532 + static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
5533 + {
5534 +- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
5535 +- struct vhost_scsi_cmd, tvc_se_cmd);
5536 +- vhost_scsi_complete_cmd(cmd);
5537 ++ transport_generic_free_cmd(se_cmd, 0);
5538 + return 0;
5539 + }
5540 +
5541 + static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
5542 + {
5543 +- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
5544 +- struct vhost_scsi_cmd, tvc_se_cmd);
5545 +- vhost_scsi_complete_cmd(cmd);
5546 ++ transport_generic_free_cmd(se_cmd, 0);
5547 + return 0;
5548 + }
5549 +
5550 +@@ -429,15 +430,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
5551 + return evt;
5552 + }
5553 +
5554 +-static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
5555 +-{
5556 +- struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
5557 +-
5558 +- /* TODO locking against target/backend threads? */
5559 +- transport_generic_free_cmd(se_cmd, 0);
5560 +-
5561 +-}
5562 +-
5563 + static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
5564 + {
5565 + return target_put_sess_cmd(se_cmd);
5566 +@@ -556,7 +548,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
5567 + } else
5568 + pr_err("Faulted on virtio_scsi_cmd_resp\n");
5569 +
5570 +- vhost_scsi_free_cmd(cmd);
5571 ++ vhost_scsi_release_cmd_res(se_cmd);
5572 + }
5573 +
5574 + vq = -1;
5575 +@@ -566,31 +558,31 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
5576 + }
5577 +
5578 + static struct vhost_scsi_cmd *
5579 +-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
5580 ++vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
5581 + unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
5582 + u32 exp_data_len, int data_direction)
5583 + {
5584 ++ struct vhost_scsi_virtqueue *svq = container_of(vq,
5585 ++ struct vhost_scsi_virtqueue, vq);
5586 + struct vhost_scsi_cmd *cmd;
5587 + struct vhost_scsi_nexus *tv_nexus;
5588 +- struct se_session *se_sess;
5589 + struct scatterlist *sg, *prot_sg;
5590 + struct page **pages;
5591 +- int tag, cpu;
5592 ++ int tag;
5593 +
5594 + tv_nexus = tpg->tpg_nexus;
5595 + if (!tv_nexus) {
5596 + pr_err("Unable to locate active struct vhost_scsi_nexus\n");
5597 + return ERR_PTR(-EIO);
5598 + }
5599 +- se_sess = tv_nexus->tvn_se_sess;
5600 +
5601 +- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
5602 ++ tag = sbitmap_get(&svq->scsi_tags, 0, false);
5603 + if (tag < 0) {
5604 + pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
5605 + return ERR_PTR(-ENOMEM);
5606 + }
5607 +
5608 +- cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
5609 ++ cmd = &svq->scsi_cmds[tag];
5610 + sg = cmd->tvc_sgl;
5611 + prot_sg = cmd->tvc_prot_sgl;
5612 + pages = cmd->tvc_upages;
5613 +@@ -599,7 +591,6 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
5614 + cmd->tvc_prot_sgl = prot_sg;
5615 + cmd->tvc_upages = pages;
5616 + cmd->tvc_se_cmd.map_tag = tag;
5617 +- cmd->tvc_se_cmd.map_cpu = cpu;
5618 + cmd->tvc_tag = scsi_tag;
5619 + cmd->tvc_lun = lun;
5620 + cmd->tvc_task_attr = task_attr;
5621 +@@ -1065,11 +1056,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
5622 + scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
5623 + goto err;
5624 + }
5625 +- cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
5626 ++ cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
5627 + exp_data_len + prot_bytes,
5628 + data_direction);
5629 + if (IS_ERR(cmd)) {
5630 +- vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
5631 ++ vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
5632 + PTR_ERR(cmd));
5633 + goto err;
5634 + }
5635 +@@ -1088,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
5636 + &prot_iter, exp_data_len,
5637 + &data_iter))) {
5638 + vq_err(vq, "Failed to map iov to sgl\n");
5639 +- vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
5640 ++ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
5641 + goto err;
5642 + }
5643 + }
5644 +@@ -1373,6 +1364,83 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
5645 + wait_for_completion(&old_inflight[i]->comp);
5646 + }
5647 +
5648 ++static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
5649 ++{
5650 ++ struct vhost_scsi_virtqueue *svq = container_of(vq,
5651 ++ struct vhost_scsi_virtqueue, vq);
5652 ++ struct vhost_scsi_cmd *tv_cmd;
5653 ++ unsigned int i;
5654 ++
5655 ++ if (!svq->scsi_cmds)
5656 ++ return;
5657 ++
5658 ++ for (i = 0; i < svq->max_cmds; i++) {
5659 ++ tv_cmd = &svq->scsi_cmds[i];
5660 ++
5661 ++ kfree(tv_cmd->tvc_sgl);
5662 ++ kfree(tv_cmd->tvc_prot_sgl);
5663 ++ kfree(tv_cmd->tvc_upages);
5664 ++ }
5665 ++
5666 ++ sbitmap_free(&svq->scsi_tags);
5667 ++ kfree(svq->scsi_cmds);
5668 ++ svq->scsi_cmds = NULL;
5669 ++}
5670 ++
5671 ++static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
5672 ++{
5673 ++ struct vhost_scsi_virtqueue *svq = container_of(vq,
5674 ++ struct vhost_scsi_virtqueue, vq);
5675 ++ struct vhost_scsi_cmd *tv_cmd;
5676 ++ unsigned int i;
5677 ++
5678 ++ if (svq->scsi_cmds)
5679 ++ return 0;
5680 ++
5681 ++ if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
5682 ++ NUMA_NO_NODE))
5683 ++ return -ENOMEM;
5684 ++ svq->max_cmds = max_cmds;
5685 ++
5686 ++ svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
5687 ++ if (!svq->scsi_cmds) {
5688 ++ sbitmap_free(&svq->scsi_tags);
5689 ++ return -ENOMEM;
5690 ++ }
5691 ++
5692 ++ for (i = 0; i < max_cmds; i++) {
5693 ++ tv_cmd = &svq->scsi_cmds[i];
5694 ++
5695 ++ tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
5696 ++ sizeof(struct scatterlist),
5697 ++ GFP_KERNEL);
5698 ++ if (!tv_cmd->tvc_sgl) {
5699 ++ pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
5700 ++ goto out;
5701 ++ }
5702 ++
5703 ++ tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
5704 ++ sizeof(struct page *),
5705 ++ GFP_KERNEL);
5706 ++ if (!tv_cmd->tvc_upages) {
5707 ++ pr_err("Unable to allocate tv_cmd->tvc_upages\n");
5708 ++ goto out;
5709 ++ }
5710 ++
5711 ++ tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
5712 ++ sizeof(struct scatterlist),
5713 ++ GFP_KERNEL);
5714 ++ if (!tv_cmd->tvc_prot_sgl) {
5715 ++ pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
5716 ++ goto out;
5717 ++ }
5718 ++ }
5719 ++ return 0;
5720 ++out:
5721 ++ vhost_scsi_destroy_vq_cmds(vq);
5722 ++ return -ENOMEM;
5723 ++}
5724 ++
5725 + /*
5726 + * Called from vhost_scsi_ioctl() context to walk the list of available
5727 + * vhost_scsi_tpg with an active struct vhost_scsi_nexus
5728 +@@ -1427,10 +1495,9 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
5729 +
5730 + if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
5731 + if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
5732 +- kfree(vs_tpg);
5733 + mutex_unlock(&tpg->tv_tpg_mutex);
5734 + ret = -EEXIST;
5735 +- goto out;
5736 ++ goto undepend;
5737 + }
5738 + /*
5739 + * In order to ensure individual vhost-scsi configfs
5740 +@@ -1442,9 +1509,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
5741 + ret = target_depend_item(&se_tpg->tpg_group.cg_item);
5742 + if (ret) {
5743 + pr_warn("target_depend_item() failed: %d\n", ret);
5744 +- kfree(vs_tpg);
5745 + mutex_unlock(&tpg->tv_tpg_mutex);
5746 +- goto out;
5747 ++ goto undepend;
5748 + }
5749 + tpg->tv_tpg_vhost_count++;
5750 + tpg->vhost_scsi = vs;
5751 +@@ -1457,6 +1523,16 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
5752 + if (match) {
5753 + memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
5754 + sizeof(vs->vs_vhost_wwpn));
5755 ++
5756 ++ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
5757 ++ vq = &vs->vqs[i].vq;
5758 ++ if (!vhost_vq_is_setup(vq))
5759 ++ continue;
5760 ++
5761 ++ if (vhost_scsi_setup_vq_cmds(vq, vq->num))
5762 ++ goto destroy_vq_cmds;
5763 ++ }
5764 ++
5765 + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
5766 + vq = &vs->vqs[i].vq;
5767 + mutex_lock(&vq->mutex);
5768 +@@ -1476,7 +1552,22 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
5769 + vhost_scsi_flush(vs);
5770 + kfree(vs->vs_tpg);
5771 + vs->vs_tpg = vs_tpg;
5772 ++ goto out;
5773 +
5774 ++destroy_vq_cmds:
5775 ++ for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
5776 ++ if (!vhost_vq_get_backend(&vs->vqs[i].vq))
5777 ++ vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
5778 ++ }
5779 ++undepend:
5780 ++ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
5781 ++ tpg = vs_tpg[i];
5782 ++ if (tpg) {
5783 ++ tpg->tv_tpg_vhost_count--;
5784 ++ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
5785 ++ }
5786 ++ }
5787 ++ kfree(vs_tpg);
5788 + out:
5789 + mutex_unlock(&vs->dev.mutex);
5790 + mutex_unlock(&vhost_scsi_mutex);
5791 +@@ -1549,6 +1640,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
5792 + mutex_lock(&vq->mutex);
5793 + vhost_vq_set_backend(vq, NULL);
5794 + mutex_unlock(&vq->mutex);
5795 ++ /*
5796 ++ * Make sure cmds are not running before tearing them
5797 ++ * down.
5798 ++ */
5799 ++ vhost_scsi_flush(vs);
5800 ++ vhost_scsi_destroy_vq_cmds(vq);
5801 + }
5802 + }
5803 + /*
5804 +@@ -1842,23 +1939,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
5805 + mutex_unlock(&vhost_scsi_mutex);
5806 + }
5807 +
5808 +-static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
5809 +-{
5810 +- struct vhost_scsi_cmd *tv_cmd;
5811 +- unsigned int i;
5812 +-
5813 +- if (!se_sess->sess_cmd_map)
5814 +- return;
5815 +-
5816 +- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
5817 +- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
5818 +-
5819 +- kfree(tv_cmd->tvc_sgl);
5820 +- kfree(tv_cmd->tvc_prot_sgl);
5821 +- kfree(tv_cmd->tvc_upages);
5822 +- }
5823 +-}
5824 +-
5825 + static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
5826 + struct config_item *item, const char *page, size_t count)
5827 + {
5828 +@@ -1898,45 +1978,6 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
5829 + NULL,
5830 + };
5831 +
5832 +-static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
5833 +- struct se_session *se_sess, void *p)
5834 +-{
5835 +- struct vhost_scsi_cmd *tv_cmd;
5836 +- unsigned int i;
5837 +-
5838 +- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
5839 +- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
5840 +-
5841 +- tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
5842 +- sizeof(struct scatterlist),
5843 +- GFP_KERNEL);
5844 +- if (!tv_cmd->tvc_sgl) {
5845 +- pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
5846 +- goto out;
5847 +- }
5848 +-
5849 +- tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
5850 +- sizeof(struct page *),
5851 +- GFP_KERNEL);
5852 +- if (!tv_cmd->tvc_upages) {
5853 +- pr_err("Unable to allocate tv_cmd->tvc_upages\n");
5854 +- goto out;
5855 +- }
5856 +-
5857 +- tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
5858 +- sizeof(struct scatterlist),
5859 +- GFP_KERNEL);
5860 +- if (!tv_cmd->tvc_prot_sgl) {
5861 +- pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
5862 +- goto out;
5863 +- }
5864 +- }
5865 +- return 0;
5866 +-out:
5867 +- vhost_scsi_free_cmd_map_res(se_sess);
5868 +- return -ENOMEM;
5869 +-}
5870 +-
5871 + static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
5872 + const char *name)
5873 + {
5874 +@@ -1960,12 +2001,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
5875 + * struct se_node_acl for the vhost_scsi struct se_portal_group with
5876 + * the SCSI Initiator port name of the passed configfs group 'name'.
5877 + */
5878 +- tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
5879 +- VHOST_SCSI_DEFAULT_TAGS,
5880 +- sizeof(struct vhost_scsi_cmd),
5881 ++ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
5882 + TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
5883 +- (unsigned char *)name, tv_nexus,
5884 +- vhost_scsi_nexus_cb);
5885 ++ (unsigned char *)name, tv_nexus, NULL);
5886 + if (IS_ERR(tv_nexus->tvn_se_sess)) {
5887 + mutex_unlock(&tpg->tv_tpg_mutex);
5888 + kfree(tv_nexus);
5889 +@@ -2015,7 +2053,6 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
5890 + " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
5891 + tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
5892 +
5893 +- vhost_scsi_free_cmd_map_res(se_sess);
5894 + /*
5895 + * Release the SCSI I_T Nexus to the emulated vhost Target Port
5896 + */
5897 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
5898 +index 9ad45e1d27f0f..23e7b2d624511 100644
5899 +--- a/drivers/vhost/vhost.c
5900 ++++ b/drivers/vhost/vhost.c
5901 +@@ -305,6 +305,12 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
5902 + spin_lock_init(&call_ctx->ctx_lock);
5903 + }
5904 +
5905 ++bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
5906 ++{
5907 ++ return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
5908 ++}
5909 ++EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
5910 ++
5911 + static void vhost_vq_reset(struct vhost_dev *dev,
5912 + struct vhost_virtqueue *vq)
5913 + {
5914 +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
5915 +index 9032d3c2a9f48..3d30b3da7bcf5 100644
5916 +--- a/drivers/vhost/vhost.h
5917 ++++ b/drivers/vhost/vhost.h
5918 +@@ -190,6 +190,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
5919 + struct vhost_log *log, unsigned int *log_num);
5920 + void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
5921 +
5922 ++bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
5923 + int vhost_vq_init_access(struct vhost_virtqueue *);
5924 + int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
5925 + int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
5926 +diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
5927 +index e36fb1a0ecdbd..19b3f3416d31c 100644
5928 +--- a/drivers/video/fbdev/hyperv_fb.c
5929 ++++ b/drivers/video/fbdev/hyperv_fb.c
5930 +@@ -1092,7 +1092,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
5931 + goto err1;
5932 + }
5933 +
5934 +- fb_virt = ioremap(par->mem->start, screen_fb_size);
5935 ++ /*
5936 ++ * Map the VRAM cacheable for performance. This is also required for
5937 ++ * VM Connect to display properly for ARM64 Linux VM, as the host also
5938 ++ * maps the VRAM cacheable.
5939 ++ */
5940 ++ fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
5941 + if (!fb_virt)
5942 + goto err2;
5943 +
5944 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
5945 +index 4507c3d093994..dcafe09be8dca 100644
5946 +--- a/fs/btrfs/file.c
5947 ++++ b/fs/btrfs/file.c
5948 +@@ -452,46 +452,6 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
5949 + }
5950 + }
5951 +
5952 +-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
5953 +- const u64 start,
5954 +- const u64 len,
5955 +- struct extent_state **cached_state)
5956 +-{
5957 +- u64 search_start = start;
5958 +- const u64 end = start + len - 1;
5959 +-
5960 +- while (search_start < end) {
5961 +- const u64 search_len = end - search_start + 1;
5962 +- struct extent_map *em;
5963 +- u64 em_len;
5964 +- int ret = 0;
5965 +-
5966 +- em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
5967 +- if (IS_ERR(em))
5968 +- return PTR_ERR(em);
5969 +-
5970 +- if (em->block_start != EXTENT_MAP_HOLE)
5971 +- goto next;
5972 +-
5973 +- em_len = em->len;
5974 +- if (em->start < search_start)
5975 +- em_len -= search_start - em->start;
5976 +- if (em_len > search_len)
5977 +- em_len = search_len;
5978 +-
5979 +- ret = set_extent_bit(&inode->io_tree, search_start,
5980 +- search_start + em_len - 1,
5981 +- EXTENT_DELALLOC_NEW,
5982 +- NULL, cached_state, GFP_NOFS);
5983 +-next:
5984 +- search_start = extent_map_end(em);
5985 +- free_extent_map(em);
5986 +- if (ret)
5987 +- return ret;
5988 +- }
5989 +- return 0;
5990 +-}
5991 +-
5992 + /*
5993 + * after copy_from_user, pages need to be dirtied and we need to make
5994 + * sure holes are created between the current EOF and the start of
5995 +@@ -528,23 +488,6 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
5996 + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5997 + 0, 0, cached);
5998 +
5999 +- if (!btrfs_is_free_space_inode(inode)) {
6000 +- if (start_pos >= isize &&
6001 +- !(inode->flags & BTRFS_INODE_PREALLOC)) {
6002 +- /*
6003 +- * There can't be any extents following eof in this case
6004 +- * so just set the delalloc new bit for the range
6005 +- * directly.
6006 +- */
6007 +- extra_bits |= EXTENT_DELALLOC_NEW;
6008 +- } else {
6009 +- err = btrfs_find_new_delalloc_bytes(inode, start_pos,
6010 +- num_bytes, cached);
6011 +- if (err)
6012 +- return err;
6013 +- }
6014 +- }
6015 +-
6016 + err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
6017 + extra_bits, cached);
6018 + if (err)
6019 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6020 +index 11d132bc2679c..4708ea05449b9 100644
6021 +--- a/fs/btrfs/inode.c
6022 ++++ b/fs/btrfs/inode.c
6023 +@@ -2262,11 +2262,69 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
6024 + return 0;
6025 + }
6026 +
6027 ++static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
6028 ++ const u64 start,
6029 ++ const u64 len,
6030 ++ struct extent_state **cached_state)
6031 ++{
6032 ++ u64 search_start = start;
6033 ++ const u64 end = start + len - 1;
6034 ++
6035 ++ while (search_start < end) {
6036 ++ const u64 search_len = end - search_start + 1;
6037 ++ struct extent_map *em;
6038 ++ u64 em_len;
6039 ++ int ret = 0;
6040 ++
6041 ++ em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
6042 ++ if (IS_ERR(em))
6043 ++ return PTR_ERR(em);
6044 ++
6045 ++ if (em->block_start != EXTENT_MAP_HOLE)
6046 ++ goto next;
6047 ++
6048 ++ em_len = em->len;
6049 ++ if (em->start < search_start)
6050 ++ em_len -= search_start - em->start;
6051 ++ if (em_len > search_len)
6052 ++ em_len = search_len;
6053 ++
6054 ++ ret = set_extent_bit(&inode->io_tree, search_start,
6055 ++ search_start + em_len - 1,
6056 ++ EXTENT_DELALLOC_NEW,
6057 ++ NULL, cached_state, GFP_NOFS);
6058 ++next:
6059 ++ search_start = extent_map_end(em);
6060 ++ free_extent_map(em);
6061 ++ if (ret)
6062 ++ return ret;
6063 ++ }
6064 ++ return 0;
6065 ++}
6066 ++
6067 + int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
6068 + unsigned int extra_bits,
6069 + struct extent_state **cached_state)
6070 + {
6071 + WARN_ON(PAGE_ALIGNED(end));
6072 ++
6073 ++ if (start >= i_size_read(&inode->vfs_inode) &&
6074 ++ !(inode->flags & BTRFS_INODE_PREALLOC)) {
6075 ++ /*
6076 ++ * There can't be any extents following eof in this case so just
6077 ++ * set the delalloc new bit for the range directly.
6078 ++ */
6079 ++ extra_bits |= EXTENT_DELALLOC_NEW;
6080 ++ } else {
6081 ++ int ret;
6082 ++
6083 ++ ret = btrfs_find_new_delalloc_bytes(inode, start,
6084 ++ end + 1 - start,
6085 ++ cached_state);
6086 ++ if (ret)
6087 ++ return ret;
6088 ++ }
6089 ++
6090 + return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
6091 + cached_state);
6092 + }
6093 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
6094 +index db953cb947bc4..9205a88f2a881 100644
6095 +--- a/fs/btrfs/qgroup.c
6096 ++++ b/fs/btrfs/qgroup.c
6097 +@@ -497,13 +497,13 @@ next2:
6098 + break;
6099 + }
6100 + out:
6101 ++ btrfs_free_path(path);
6102 + fs_info->qgroup_flags |= flags;
6103 + if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
6104 + clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
6105 + else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
6106 + ret >= 0)
6107 + ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
6108 +- btrfs_free_path(path);
6109 +
6110 + if (ret < 0) {
6111 + ulist_free(fs_info->qgroup_ulist);
6112 +@@ -3516,6 +3516,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
6113 + {
6114 + struct btrfs_trans_handle *trans;
6115 + int ret;
6116 ++ bool can_commit = true;
6117 +
6118 + /*
6119 + * We don't want to run flush again and again, so if there is a running
6120 +@@ -3527,6 +3528,20 @@ static int try_flush_qgroup(struct btrfs_root *root)
6121 + return 0;
6122 + }
6123 +
6124 ++ /*
6125 ++ * If current process holds a transaction, we shouldn't flush, as we
6126 ++ * assume all space reservation happens before a transaction handle is
6127 ++ * held.
6128 ++ *
6129 ++ * But there are cases like btrfs_delayed_item_reserve_metadata() where
6130 ++ * we try to reserve space with one transction handle already held.
6131 ++ * In that case we can't commit transaction, but at least try to end it
6132 ++ * and hope the started data writes can free some space.
6133 ++ */
6134 ++ if (current->journal_info &&
6135 ++ current->journal_info != BTRFS_SEND_TRANS_STUB)
6136 ++ can_commit = false;
6137 ++
6138 + ret = btrfs_start_delalloc_snapshot(root);
6139 + if (ret < 0)
6140 + goto out;
6141 +@@ -3538,7 +3553,10 @@ static int try_flush_qgroup(struct btrfs_root *root)
6142 + goto out;
6143 + }
6144 +
6145 +- ret = btrfs_commit_transaction(trans);
6146 ++ if (can_commit)
6147 ++ ret = btrfs_commit_transaction(trans);
6148 ++ else
6149 ++ ret = btrfs_end_transaction(trans);
6150 + out:
6151 + clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
6152 + wake_up(&root->qgroup_flush_wait);
6153 +diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
6154 +index 894a63a92236e..a6539500a8828 100644
6155 +--- a/fs/btrfs/tests/inode-tests.c
6156 ++++ b/fs/btrfs/tests/inode-tests.c
6157 +@@ -986,7 +986,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
6158 + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
6159 + BTRFS_MAX_EXTENT_SIZE >> 1,
6160 + (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
6161 +- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
6162 ++ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
6163 ++ EXTENT_UPTODATE, 0, 0, NULL);
6164 + if (ret) {
6165 + test_err("clear_extent_bit returned %d", ret);
6166 + goto out;
6167 +@@ -1053,7 +1054,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
6168 + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
6169 + BTRFS_MAX_EXTENT_SIZE + sectorsize,
6170 + BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
6171 +- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
6172 ++ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
6173 ++ EXTENT_UPTODATE, 0, 0, NULL);
6174 + if (ret) {
6175 + test_err("clear_extent_bit returned %d", ret);
6176 + goto out;
6177 +@@ -1085,7 +1087,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
6178 +
6179 + /* Empty */
6180 + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
6181 +- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
6182 ++ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
6183 ++ EXTENT_UPTODATE, 0, 0, NULL);
6184 + if (ret) {
6185 + test_err("clear_extent_bit returned %d", ret);
6186 + goto out;
6187 +@@ -1100,7 +1103,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
6188 + out:
6189 + if (ret)
6190 + clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
6191 +- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
6192 ++ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
6193 ++ EXTENT_UPTODATE, 0, 0, NULL);
6194 + iput(inode);
6195 + btrfs_free_dummy_root(root);
6196 + btrfs_free_dummy_fs_info(fs_info);
6197 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
6198 +index 8784b74f5232e..ea2bb4cb58909 100644
6199 +--- a/fs/btrfs/tree-checker.c
6200 ++++ b/fs/btrfs/tree-checker.c
6201 +@@ -1068,6 +1068,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
6202 + "invalid root item size, have %u expect %zu or %u",
6203 + btrfs_item_size_nr(leaf, slot), sizeof(ri),
6204 + btrfs_legacy_root_item_size());
6205 ++ return -EUCLEAN;
6206 + }
6207 +
6208 + /*
6209 +@@ -1423,6 +1424,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
6210 + "invalid item size, have %u expect aligned to %zu for key type %u",
6211 + btrfs_item_size_nr(leaf, slot),
6212 + sizeof(*dref), key->type);
6213 ++ return -EUCLEAN;
6214 + }
6215 + if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
6216 + generic_err(leaf, slot,
6217 +@@ -1451,6 +1453,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
6218 + extent_err(leaf, slot,
6219 + "invalid extent data backref offset, have %llu expect aligned to %u",
6220 + offset, leaf->fs_info->sectorsize);
6221 ++ return -EUCLEAN;
6222 + }
6223 + }
6224 + return 0;
6225 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
6226 +index 578bbe544c8b5..7c703f9c3eda7 100644
6227 +--- a/fs/btrfs/volumes.c
6228 ++++ b/fs/btrfs/volumes.c
6229 +@@ -941,7 +941,13 @@ static noinline struct btrfs_device *device_list_add(const char *path,
6230 + if (device->bdev != path_bdev) {
6231 + bdput(path_bdev);
6232 + mutex_unlock(&fs_devices->device_list_mutex);
6233 +- btrfs_warn_in_rcu(device->fs_info,
6234 ++ /*
6235 ++ * device->fs_info may not be reliable here, so
6236 ++ * pass in a NULL instead. This avoids a
6237 ++ * possible use-after-free when the fs_info and
6238 ++ * fs_info->sb are already torn down.
6239 ++ */
6240 ++ btrfs_warn_in_rcu(NULL,
6241 + "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
6242 + path, devid, found_transid,
6243 + current->comm,
6244 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
6245 +index 23b21e9436528..ef4784e72b1d5 100644
6246 +--- a/fs/cifs/cifsacl.c
6247 ++++ b/fs/cifs/cifsacl.c
6248 +@@ -1266,6 +1266,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
6249 + cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
6250 + } else if (mode_from_special_sid) {
6251 + rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
6252 ++ kfree(pntsd);
6253 + } else {
6254 + /* get approximated mode from ACL */
6255 + rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
6256 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
6257 +index e2e53652193e6..475c2b9e799d8 100644
6258 +--- a/fs/cifs/smb2ops.c
6259 ++++ b/fs/cifs/smb2ops.c
6260 +@@ -262,7 +262,7 @@ smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
6261 + }
6262 +
6263 + static struct mid_q_entry *
6264 +-smb2_find_mid(struct TCP_Server_Info *server, char *buf)
6265 ++__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
6266 + {
6267 + struct mid_q_entry *mid;
6268 + struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
6269 +@@ -279,6 +279,10 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
6270 + (mid->mid_state == MID_REQUEST_SUBMITTED) &&
6271 + (mid->command == shdr->Command)) {
6272 + kref_get(&mid->refcount);
6273 ++ if (dequeue) {
6274 ++ list_del_init(&mid->qhead);
6275 ++ mid->mid_flags |= MID_DELETED;
6276 ++ }
6277 + spin_unlock(&GlobalMid_Lock);
6278 + return mid;
6279 + }
6280 +@@ -287,6 +291,18 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
6281 + return NULL;
6282 + }
6283 +
6284 ++static struct mid_q_entry *
6285 ++smb2_find_mid(struct TCP_Server_Info *server, char *buf)
6286 ++{
6287 ++ return __smb2_find_mid(server, buf, false);
6288 ++}
6289 ++
6290 ++static struct mid_q_entry *
6291 ++smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
6292 ++{
6293 ++ return __smb2_find_mid(server, buf, true);
6294 ++}
6295 ++
6296 + static void
6297 + smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
6298 + {
6299 +@@ -4212,7 +4228,8 @@ init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
6300 + static int
6301 + handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6302 + char *buf, unsigned int buf_len, struct page **pages,
6303 +- unsigned int npages, unsigned int page_data_size)
6304 ++ unsigned int npages, unsigned int page_data_size,
6305 ++ bool is_offloaded)
6306 + {
6307 + unsigned int data_offset;
6308 + unsigned int data_len;
6309 +@@ -4234,7 +4251,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6310 +
6311 + if (server->ops->is_session_expired &&
6312 + server->ops->is_session_expired(buf)) {
6313 +- cifs_reconnect(server);
6314 ++ if (!is_offloaded)
6315 ++ cifs_reconnect(server);
6316 + return -1;
6317 + }
6318 +
6319 +@@ -4258,7 +4276,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6320 + cifs_dbg(FYI, "%s: server returned error %d\n",
6321 + __func__, rdata->result);
6322 + /* normal error on read response */
6323 +- dequeue_mid(mid, false);
6324 ++ if (is_offloaded)
6325 ++ mid->mid_state = MID_RESPONSE_RECEIVED;
6326 ++ else
6327 ++ dequeue_mid(mid, false);
6328 + return 0;
6329 + }
6330 +
6331 +@@ -4282,7 +4303,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6332 + cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
6333 + __func__, data_offset);
6334 + rdata->result = -EIO;
6335 +- dequeue_mid(mid, rdata->result);
6336 ++ if (is_offloaded)
6337 ++ mid->mid_state = MID_RESPONSE_MALFORMED;
6338 ++ else
6339 ++ dequeue_mid(mid, rdata->result);
6340 + return 0;
6341 + }
6342 +
6343 +@@ -4298,21 +4322,30 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6344 + cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
6345 + __func__, data_offset);
6346 + rdata->result = -EIO;
6347 +- dequeue_mid(mid, rdata->result);
6348 ++ if (is_offloaded)
6349 ++ mid->mid_state = MID_RESPONSE_MALFORMED;
6350 ++ else
6351 ++ dequeue_mid(mid, rdata->result);
6352 + return 0;
6353 + }
6354 +
6355 + if (data_len > page_data_size - pad_len) {
6356 + /* data_len is corrupt -- discard frame */
6357 + rdata->result = -EIO;
6358 +- dequeue_mid(mid, rdata->result);
6359 ++ if (is_offloaded)
6360 ++ mid->mid_state = MID_RESPONSE_MALFORMED;
6361 ++ else
6362 ++ dequeue_mid(mid, rdata->result);
6363 + return 0;
6364 + }
6365 +
6366 + rdata->result = init_read_bvec(pages, npages, page_data_size,
6367 + cur_off, &bvec);
6368 + if (rdata->result != 0) {
6369 +- dequeue_mid(mid, rdata->result);
6370 ++ if (is_offloaded)
6371 ++ mid->mid_state = MID_RESPONSE_MALFORMED;
6372 ++ else
6373 ++ dequeue_mid(mid, rdata->result);
6374 + return 0;
6375 + }
6376 +
6377 +@@ -4327,7 +4360,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6378 + /* read response payload cannot be in both buf and pages */
6379 + WARN_ONCE(1, "buf can not contain only a part of read data");
6380 + rdata->result = -EIO;
6381 +- dequeue_mid(mid, rdata->result);
6382 ++ if (is_offloaded)
6383 ++ mid->mid_state = MID_RESPONSE_MALFORMED;
6384 ++ else
6385 ++ dequeue_mid(mid, rdata->result);
6386 + return 0;
6387 + }
6388 +
6389 +@@ -4338,7 +4374,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
6390 + if (length < 0)
6391 + return length;
6392 +
6393 +- dequeue_mid(mid, false);
6394 ++ if (is_offloaded)
6395 ++ mid->mid_state = MID_RESPONSE_RECEIVED;
6396 ++ else
6397 ++ dequeue_mid(mid, false);
6398 + return length;
6399 + }
6400 +
6401 +@@ -4367,15 +4406,34 @@ static void smb2_decrypt_offload(struct work_struct *work)
6402 + }
6403 +
6404 + dw->server->lstrp = jiffies;
6405 +- mid = smb2_find_mid(dw->server, dw->buf);
6406 ++ mid = smb2_find_dequeue_mid(dw->server, dw->buf);
6407 + if (mid == NULL)
6408 + cifs_dbg(FYI, "mid not found\n");
6409 + else {
6410 + mid->decrypted = true;
6411 + rc = handle_read_data(dw->server, mid, dw->buf,
6412 + dw->server->vals->read_rsp_size,
6413 +- dw->ppages, dw->npages, dw->len);
6414 +- mid->callback(mid);
6415 ++ dw->ppages, dw->npages, dw->len,
6416 ++ true);
6417 ++ if (rc >= 0) {
6418 ++#ifdef CONFIG_CIFS_STATS2
6419 ++ mid->when_received = jiffies;
6420 ++#endif
6421 ++ mid->callback(mid);
6422 ++ } else {
6423 ++ spin_lock(&GlobalMid_Lock);
6424 ++ if (dw->server->tcpStatus == CifsNeedReconnect) {
6425 ++ mid->mid_state = MID_RETRY_NEEDED;
6426 ++ spin_unlock(&GlobalMid_Lock);
6427 ++ mid->callback(mid);
6428 ++ } else {
6429 ++ mid->mid_state = MID_REQUEST_SUBMITTED;
6430 ++ mid->mid_flags &= ~(MID_DELETED);
6431 ++ list_add_tail(&mid->qhead,
6432 ++ &dw->server->pending_mid_q);
6433 ++ spin_unlock(&GlobalMid_Lock);
6434 ++ }
6435 ++ }
6436 + cifs_mid_q_entry_release(mid);
6437 + }
6438 +
6439 +@@ -4478,7 +4536,7 @@ non_offloaded_decrypt:
6440 + (*mid)->decrypted = true;
6441 + rc = handle_read_data(server, *mid, buf,
6442 + server->vals->read_rsp_size,
6443 +- pages, npages, len);
6444 ++ pages, npages, len, false);
6445 + }
6446 +
6447 + free_pages:
6448 +@@ -4621,7 +4679,7 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
6449 + char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
6450 +
6451 + return handle_read_data(server, mid, buf, server->pdu_size,
6452 +- NULL, 0, 0);
6453 ++ NULL, 0, 0, false);
6454 + }
6455 +
6456 + static int
6457 +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
6458 +index 96c0c86f3fffe..0297ad95eb5cc 100644
6459 +--- a/fs/efivarfs/inode.c
6460 ++++ b/fs/efivarfs/inode.c
6461 +@@ -7,6 +7,7 @@
6462 + #include <linux/efi.h>
6463 + #include <linux/fs.h>
6464 + #include <linux/ctype.h>
6465 ++#include <linux/kmemleak.h>
6466 + #include <linux/slab.h>
6467 + #include <linux/uuid.h>
6468 +
6469 +@@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
6470 + var->var.VariableName[i] = '\0';
6471 +
6472 + inode->i_private = var;
6473 ++ kmemleak_ignore(var);
6474 +
6475 + err = efivar_entry_add(var, &efivarfs_list);
6476 + if (err)
6477 +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
6478 +index f943fd0b0699c..15880a68faadc 100644
6479 +--- a/fs/efivarfs/super.c
6480 ++++ b/fs/efivarfs/super.c
6481 +@@ -21,7 +21,6 @@ LIST_HEAD(efivarfs_list);
6482 + static void efivarfs_evict_inode(struct inode *inode)
6483 + {
6484 + clear_inode(inode);
6485 +- kfree(inode->i_private);
6486 + }
6487 +
6488 + static const struct super_operations efivarfs_ops = {
6489 +diff --git a/fs/io_uring.c b/fs/io_uring.c
6490 +index e74a56f6915c0..6d729a278535e 100644
6491 +--- a/fs/io_uring.c
6492 ++++ b/fs/io_uring.c
6493 +@@ -200,6 +200,7 @@ struct fixed_file_ref_node {
6494 + struct list_head file_list;
6495 + struct fixed_file_data *file_data;
6496 + struct llist_node llist;
6497 ++ bool done;
6498 + };
6499 +
6500 + struct fixed_file_data {
6501 +@@ -435,6 +436,7 @@ struct io_sr_msg {
6502 + struct io_open {
6503 + struct file *file;
6504 + int dfd;
6505 ++ bool ignore_nonblock;
6506 + struct filename *filename;
6507 + struct open_how how;
6508 + unsigned long nofile;
6509 +@@ -2990,7 +2992,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
6510 + rw->free_iovec = NULL;
6511 + rw->bytes_done = 0;
6512 + /* can only be fixed buffers, no need to do anything */
6513 +- if (iter->type == ITER_BVEC)
6514 ++ if (iov_iter_is_bvec(iter))
6515 + return;
6516 + if (!iovec) {
6517 + unsigned iov_off = 0;
6518 +@@ -3590,6 +3592,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
6519 + return ret;
6520 + }
6521 + req->open.nofile = rlimit(RLIMIT_NOFILE);
6522 ++ req->open.ignore_nonblock = false;
6523 + req->flags |= REQ_F_NEED_CLEANUP;
6524 + return 0;
6525 + }
6526 +@@ -3637,7 +3640,7 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
6527 + struct file *file;
6528 + int ret;
6529 +
6530 +- if (force_nonblock)
6531 ++ if (force_nonblock && !req->open.ignore_nonblock)
6532 + return -EAGAIN;
6533 +
6534 + ret = build_open_flags(&req->open.how, &op);
6535 +@@ -3652,6 +3655,21 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
6536 + if (IS_ERR(file)) {
6537 + put_unused_fd(ret);
6538 + ret = PTR_ERR(file);
6539 ++ /*
6540 ++ * A work-around to ensure that /proc/self works that way
6541 ++ * that it should - if we get -EOPNOTSUPP back, then assume
6542 ++ * that proc_self_get_link() failed us because we're in async
6543 ++ * context. We should be safe to retry this from the task
6544 ++ * itself with force_nonblock == false set, as it should not
6545 ++ * block on lookup. Would be nice to know this upfront and
6546 ++ * avoid the async dance, but doesn't seem feasible.
6547 ++ */
6548 ++ if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
6549 ++ req->open.ignore_nonblock = true;
6550 ++ refcount_inc(&req->refs);
6551 ++ io_req_task_queue(req);
6552 ++ return 0;
6553 ++ }
6554 + } else {
6555 + fsnotify_open(file);
6556 + fd_install(ret, file);
6557 +@@ -6854,9 +6872,8 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6558 + return -ENXIO;
6559 +
6560 + spin_lock(&data->lock);
6561 +- if (!list_empty(&data->ref_list))
6562 +- ref_node = list_first_entry(&data->ref_list,
6563 +- struct fixed_file_ref_node, node);
6564 ++ ref_node = container_of(data->cur_refs, struct fixed_file_ref_node,
6565 ++ refs);
6566 + spin_unlock(&data->lock);
6567 + if (ref_node)
6568 + percpu_ref_kill(&ref_node->refs);
6569 +@@ -7107,10 +7124,6 @@ static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
6570 + kfree(pfile);
6571 + }
6572 +
6573 +- spin_lock(&file_data->lock);
6574 +- list_del(&ref_node->node);
6575 +- spin_unlock(&file_data->lock);
6576 +-
6577 + percpu_ref_exit(&ref_node->refs);
6578 + kfree(ref_node);
6579 + percpu_ref_put(&file_data->refs);
6580 +@@ -7137,17 +7150,33 @@ static void io_file_put_work(struct work_struct *work)
6581 + static void io_file_data_ref_zero(struct percpu_ref *ref)
6582 + {
6583 + struct fixed_file_ref_node *ref_node;
6584 ++ struct fixed_file_data *data;
6585 + struct io_ring_ctx *ctx;
6586 +- bool first_add;
6587 ++ bool first_add = false;
6588 + int delay = HZ;
6589 +
6590 + ref_node = container_of(ref, struct fixed_file_ref_node, refs);
6591 +- ctx = ref_node->file_data->ctx;
6592 ++ data = ref_node->file_data;
6593 ++ ctx = data->ctx;
6594 ++
6595 ++ spin_lock(&data->lock);
6596 ++ ref_node->done = true;
6597 +
6598 +- if (percpu_ref_is_dying(&ctx->file_data->refs))
6599 ++ while (!list_empty(&data->ref_list)) {
6600 ++ ref_node = list_first_entry(&data->ref_list,
6601 ++ struct fixed_file_ref_node, node);
6602 ++ /* recycle ref nodes in order */
6603 ++ if (!ref_node->done)
6604 ++ break;
6605 ++ list_del(&ref_node->node);
6606 ++ first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
6607 ++ }
6608 ++ spin_unlock(&data->lock);
6609 ++
6610 ++
6611 ++ if (percpu_ref_is_dying(&data->refs))
6612 + delay = 0;
6613 +
6614 +- first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6615 + if (!delay)
6616 + mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6617 + else if (first_add)
6618 +@@ -7171,6 +7200,7 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6619 + INIT_LIST_HEAD(&ref_node->node);
6620 + INIT_LIST_HEAD(&ref_node->file_list);
6621 + ref_node->file_data = ctx->file_data;
6622 ++ ref_node->done = false;
6623 + return ref_node;
6624 + }
6625 +
6626 +@@ -7298,7 +7328,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6627 +
6628 + ctx->file_data->cur_refs = &ref_node->refs;
6629 + spin_lock(&ctx->file_data->lock);
6630 +- list_add(&ref_node->node, &ctx->file_data->ref_list);
6631 ++ list_add_tail(&ref_node->node, &ctx->file_data->ref_list);
6632 + spin_unlock(&ctx->file_data->lock);
6633 + percpu_ref_get(&ctx->file_data->refs);
6634 + return ret;
6635 +@@ -7443,7 +7473,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6636 + if (needs_switch) {
6637 + percpu_ref_kill(data->cur_refs);
6638 + spin_lock(&data->lock);
6639 +- list_add(&ref_node->node, &data->ref_list);
6640 ++ list_add_tail(&ref_node->node, &data->ref_list);
6641 + data->cur_refs = &ref_node->refs;
6642 + spin_unlock(&data->lock);
6643 + percpu_ref_get(&ctx->file_data->refs);
6644 +@@ -8877,14 +8907,16 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
6645 + * to a power-of-two, if it isn't already. We do NOT impose
6646 + * any cq vs sq ring sizing.
6647 + */
6648 +- p->cq_entries = roundup_pow_of_two(p->cq_entries);
6649 +- if (p->cq_entries < p->sq_entries)
6650 ++ if (!p->cq_entries)
6651 + return -EINVAL;
6652 + if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
6653 + if (!(p->flags & IORING_SETUP_CLAMP))
6654 + return -EINVAL;
6655 + p->cq_entries = IORING_MAX_CQ_ENTRIES;
6656 + }
6657 ++ p->cq_entries = roundup_pow_of_two(p->cq_entries);
6658 ++ if (p->cq_entries < p->sq_entries)
6659 ++ return -EINVAL;
6660 + } else {
6661 + p->cq_entries = 2 * p->sq_entries;
6662 + }
6663 +diff --git a/fs/proc/self.c b/fs/proc/self.c
6664 +index 72cd69bcaf4ad..cc71ce3466dc0 100644
6665 +--- a/fs/proc/self.c
6666 ++++ b/fs/proc/self.c
6667 +@@ -16,6 +16,13 @@ static const char *proc_self_get_link(struct dentry *dentry,
6668 + pid_t tgid = task_tgid_nr_ns(current, ns);
6669 + char *name;
6670 +
6671 ++ /*
6672 ++ * Not currently supported. Once we can inherit all of struct pid,
6673 ++ * we can allow this.
6674 ++ */
6675 ++ if (current->flags & PF_KTHREAD)
6676 ++ return ERR_PTR(-EOPNOTSUPP);
6677 ++
6678 + if (!tgid)
6679 + return ERR_PTR(-ENOENT);
6680 + /* max length of unsigned int in decimal + NULL term */
6681 +diff --git a/include/kunit/test.h b/include/kunit/test.h
6682 +index 59f3144f009a5..b68ba33c16937 100644
6683 +--- a/include/kunit/test.h
6684 ++++ b/include/kunit/test.h
6685 +@@ -1064,7 +1064,7 @@ do { \
6686 + KUNIT_ASSERTION(test, \
6687 + strcmp(__left, __right) op 0, \
6688 + kunit_binary_str_assert, \
6689 +- KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
6690 ++ KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
6691 + assert_type, \
6692 + #op, \
6693 + #left, \
6694 +diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
6695 +index 5968df82b9912..41a1bab98b7e1 100644
6696 +--- a/include/linux/firmware/xlnx-zynqmp.h
6697 ++++ b/include/linux/firmware/xlnx-zynqmp.h
6698 +@@ -50,10 +50,6 @@
6699 + #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
6700 + #define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
6701 +
6702 +-/* Feature check status */
6703 +-#define PM_FEATURE_INVALID -1
6704 +-#define PM_FEATURE_UNCHECKED 0
6705 +-
6706 + /*
6707 + * Firmware FPGA Manager flags
6708 + * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
6709 +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
6710 +index 0cb5fe3afd164..cd5aa875245b4 100644
6711 +--- a/include/linux/pgtable.h
6712 ++++ b/include/linux/pgtable.h
6713 +@@ -1399,6 +1399,19 @@ typedef unsigned int pgtbl_mod_mask;
6714 +
6715 + #endif /* !__ASSEMBLY__ */
6716 +
6717 ++#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
6718 ++#ifdef CONFIG_PHYS_ADDR_T_64BIT
6719 ++/*
6720 ++ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
6721 ++ * with physical address space extension, but falls back to
6722 ++ * BITS_PER_LONG otherwise.
6723 ++ */
6724 ++#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
6725 ++#else
6726 ++#define MAX_POSSIBLE_PHYSMEM_BITS 32
6727 ++#endif
6728 ++#endif
6729 ++
6730 + #ifndef has_transparent_hugepage
6731 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6732 + #define has_transparent_hugepage() 1
6733 +diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
6734 +index c59999ce044e5..240dce553a0bd 100644
6735 +--- a/include/linux/platform_data/ti-sysc.h
6736 ++++ b/include/linux/platform_data/ti-sysc.h
6737 +@@ -50,6 +50,7 @@ struct sysc_regbits {
6738 + s8 emufree_shift;
6739 + };
6740 +
6741 ++#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
6742 + #define SYSC_MODULE_QUIRK_PRUSS BIT(24)
6743 + #define SYSC_MODULE_QUIRK_DSS_RESET BIT(23)
6744 + #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22)
6745 +diff --git a/include/net/bonding.h b/include/net/bonding.h
6746 +index 7d132cc1e5848..d9d0ff3b0ad32 100644
6747 +--- a/include/net/bonding.h
6748 ++++ b/include/net/bonding.h
6749 +@@ -185,6 +185,11 @@ struct slave {
6750 + struct rtnl_link_stats64 slave_stats;
6751 + };
6752 +
6753 ++static inline struct slave *to_slave(struct kobject *kobj)
6754 ++{
6755 ++ return container_of(kobj, struct slave, kobj);
6756 ++}
6757 ++
6758 + struct bond_up_slave {
6759 + unsigned int count;
6760 + struct rcu_head rcu;
6761 +@@ -750,6 +755,9 @@ extern struct bond_parm_tbl ad_select_tbl[];
6762 + /* exported from bond_netlink.c */
6763 + extern struct rtnl_link_ops bond_link_ops;
6764 +
6765 ++/* exported from bond_sysfs_slave.c */
6766 ++extern const struct sysfs_ops slave_sysfs_ops;
6767 ++
6768 + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
6769 + {
6770 + atomic_long_inc(&dev->tx_dropped);
6771 +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
6772 +index c25fb86ffae95..b3bbd10eb3f07 100644
6773 +--- a/include/scsi/libiscsi.h
6774 ++++ b/include/scsi/libiscsi.h
6775 +@@ -132,6 +132,9 @@ struct iscsi_task {
6776 + void *dd_data; /* driver/transport data */
6777 + };
6778 +
6779 ++/* invalid scsi_task pointer */
6780 ++#define INVALID_SCSI_TASK (struct iscsi_task *)-1l
6781 ++
6782 + static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
6783 + {
6784 + return task->unsol_r2t.data_length > task->unsol_r2t.sent;
6785 +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
6786 +index e7cbccc7c14cc..57d795365987d 100644
6787 +--- a/include/trace/events/writeback.h
6788 ++++ b/include/trace/events/writeback.h
6789 +@@ -190,7 +190,7 @@ TRACE_EVENT(inode_foreign_history,
6790 + ),
6791 +
6792 + TP_fast_assign(
6793 +- strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
6794 ++ strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
6795 + __entry->ino = inode->i_ino;
6796 + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
6797 + __entry->history = history;
6798 +@@ -219,7 +219,7 @@ TRACE_EVENT(inode_switch_wbs,
6799 + ),
6800 +
6801 + TP_fast_assign(
6802 +- strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
6803 ++ strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
6804 + __entry->ino = inode->i_ino;
6805 + __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
6806 + __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
6807 +@@ -252,7 +252,7 @@ TRACE_EVENT(track_foreign_dirty,
6808 + struct address_space *mapping = page_mapping(page);
6809 + struct inode *inode = mapping ? mapping->host : NULL;
6810 +
6811 +- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
6812 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
6813 + __entry->bdi_id = wb->bdi->id;
6814 + __entry->ino = inode ? inode->i_ino : 0;
6815 + __entry->memcg_id = wb->memcg_css->id;
6816 +@@ -285,7 +285,7 @@ TRACE_EVENT(flush_foreign,
6817 + ),
6818 +
6819 + TP_fast_assign(
6820 +- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
6821 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
6822 + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
6823 + __entry->frn_bdi_id = frn_bdi_id;
6824 + __entry->frn_memcg_id = frn_memcg_id;
6825 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
6826 +index 3eb35ad1b5241..04134a242f3d5 100644
6827 +--- a/kernel/locking/lockdep.c
6828 ++++ b/kernel/locking/lockdep.c
6829 +@@ -108,19 +108,21 @@ static inline void lockdep_lock(void)
6830 + {
6831 + DEBUG_LOCKS_WARN_ON(!irqs_disabled());
6832 +
6833 ++ __this_cpu_inc(lockdep_recursion);
6834 + arch_spin_lock(&__lock);
6835 + __owner = current;
6836 +- __this_cpu_inc(lockdep_recursion);
6837 + }
6838 +
6839 + static inline void lockdep_unlock(void)
6840 + {
6841 ++ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
6842 ++
6843 + if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
6844 + return;
6845 +
6846 +- __this_cpu_dec(lockdep_recursion);
6847 + __owner = NULL;
6848 + arch_spin_unlock(&__lock);
6849 ++ __this_cpu_dec(lockdep_recursion);
6850 + }
6851 +
6852 + static inline bool lockdep_assert_locked(void)
6853 +diff --git a/mm/filemap.c b/mm/filemap.c
6854 +index 6024d15998a43..abc1a1dcce97b 100644
6855 +--- a/mm/filemap.c
6856 ++++ b/mm/filemap.c
6857 +@@ -1464,11 +1464,19 @@ void end_page_writeback(struct page *page)
6858 + rotate_reclaimable_page(page);
6859 + }
6860 +
6861 ++ /*
6862 ++ * Writeback does not hold a page reference of its own, relying
6863 ++ * on truncation to wait for the clearing of PG_writeback.
6864 ++ * But here we must make sure that the page is not freed and
6865 ++ * reused before the wake_up_page().
6866 ++ */
6867 ++ get_page(page);
6868 + if (!test_clear_page_writeback(page))
6869 + BUG();
6870 +
6871 + smp_mb__after_atomic();
6872 + wake_up_page(page, PG_writeback);
6873 ++ put_page(page);
6874 + }
6875 + EXPORT_SYMBOL(end_page_writeback);
6876 +
6877 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
6878 +index 4e4ddd67b71e5..a28dcf672e81a 100644
6879 +--- a/mm/page-writeback.c
6880 ++++ b/mm/page-writeback.c
6881 +@@ -2754,12 +2754,6 @@ int test_clear_page_writeback(struct page *page)
6882 + } else {
6883 + ret = TestClearPageWriteback(page);
6884 + }
6885 +- /*
6886 +- * NOTE: Page might be free now! Writeback doesn't hold a page
6887 +- * reference on its own, it relies on truncation to wait for
6888 +- * the clearing of PG_writeback. The below can only access
6889 +- * page state that is static across allocation cycles.
6890 +- */
6891 + if (ret) {
6892 + dec_lruvec_state(lruvec, NR_WRITEBACK);
6893 + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
6894 +diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
6895 +index a67b2b0914478..c0ca5fbe5b081 100644
6896 +--- a/net/batman-adv/log.c
6897 ++++ b/net/batman-adv/log.c
6898 +@@ -180,6 +180,7 @@ static const struct file_operations batadv_log_fops = {
6899 + .read = batadv_log_read,
6900 + .poll = batadv_log_poll,
6901 + .llseek = no_llseek,
6902 ++ .owner = THIS_MODULE,
6903 + };
6904 +
6905 + /**
6906 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
6907 +index 86a23e4a6a50f..b87140a1fa284 100644
6908 +--- a/net/ipv4/fib_frontend.c
6909 ++++ b/net/ipv4/fib_frontend.c
6910 +@@ -696,7 +696,7 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
6911 + cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
6912 + break;
6913 + case AF_INET6:
6914 +-#ifdef CONFIG_IPV6
6915 ++#if IS_ENABLED(CONFIG_IPV6)
6916 + if (alen != sizeof(struct in6_addr)) {
6917 + NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
6918 + return -EINVAL;
6919 +diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
6920 +index aa898014ad12f..03c1a39c312a8 100644
6921 +--- a/tools/perf/util/dwarf-aux.c
6922 ++++ b/tools/perf/util/dwarf-aux.c
6923 +@@ -373,6 +373,7 @@ bool die_is_func_def(Dwarf_Die *dw_die)
6924 + int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
6925 + {
6926 + Dwarf_Addr base, end;
6927 ++ Dwarf_Attribute attr;
6928 +
6929 + if (!addr)
6930 + return -EINVAL;
6931 +@@ -380,6 +381,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
6932 + if (dwarf_entrypc(dw_die, addr) == 0)
6933 + return 0;
6934 +
6935 ++ /*
6936 ++ * Since the dwarf_ranges() will return 0 if there is no
6937 ++ * DW_AT_ranges attribute, we should check it first.
6938 ++ */
6939 ++ if (!dwarf_attr(dw_die, DW_AT_ranges, &attr))
6940 ++ return -ENOENT;
6941 ++
6942 + return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
6943 + }
6944 +
6945 +diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
6946 +index 493ec372fdec4..f2709879bad96 100644
6947 +--- a/tools/perf/util/stat-display.c
6948 ++++ b/tools/perf/util/stat-display.c
6949 +@@ -324,13 +324,10 @@ static int first_shadow_cpu(struct perf_stat_config *config,
6950 + struct evlist *evlist = evsel->evlist;
6951 + int i;
6952 +
6953 +- if (!config->aggr_get_id)
6954 +- return 0;
6955 +-
6956 + if (config->aggr_mode == AGGR_NONE)
6957 + return id;
6958 +
6959 +- if (config->aggr_mode == AGGR_GLOBAL)
6960 ++ if (!config->aggr_get_id)
6961 + return 0;
6962 +
6963 + for (i = 0; i < evsel__nr_cpus(evsel); i++) {
6964 +diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
6965 +index 89b390623b63d..54ca751a2b3b3 100644
6966 +--- a/tools/perf/util/synthetic-events.c
6967 ++++ b/tools/perf/util/synthetic-events.c
6968 +@@ -563,6 +563,9 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool,
6969 + char cgrp_root[PATH_MAX];
6970 + size_t mount_len; /* length of mount point in the path */
6971 +
6972 ++ if (!tool || !tool->cgroup_events)
6973 ++ return 0;
6974 ++
6975 + if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
6976 + pr_debug("cannot find cgroup mount point\n");
6977 + return -1;