Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 10 Feb 2021 09:53:26
Message-Id: 1612950791.112ba4e426a6455dc1bc2c655f07c18dc3e887ac.alicef@gentoo
1 commit: 112ba4e426a6455dc1bc2c655f07c18dc3e887ac
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 10 09:53:05 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 10 09:53:11 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=112ba4e4
7
8 Linux patch 5.4.97
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1096_linux-5.4.97.patch | 2250 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2254 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 8aa848a..1182cab 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -427,6 +427,10 @@ Patch: 1095_linux-5.4.96.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.96
23
24 +Patch: 1096_linux-5.4.97.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.97
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1096_linux-5.4.97.patch b/1096_linux-5.4.97.patch
33 new file mode 100644
34 index 0000000..eefa48f
35 --- /dev/null
36 +++ b/1096_linux-5.4.97.patch
37 @@ -0,0 +1,2250 @@
38 +diff --git a/Makefile b/Makefile
39 +index 7a47a2594f957..032751f6be0c1 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 96
47 ++SUBLEVEL = 97
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +@@ -920,12 +920,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
52 + # change __FILE__ to the relative path from the srctree
53 + KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
54 +
55 +-# ensure -fcf-protection is disabled when using retpoline as it is
56 +-# incompatible with -mindirect-branch=thunk-extern
57 +-ifdef CONFIG_RETPOLINE
58 +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
59 +-endif
60 +-
61 + include scripts/Makefile.kasan
62 + include scripts/Makefile.extrawarn
63 + include scripts/Makefile.ubsan
64 +diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
65 +index 01ccff756996d..5740f9442705c 100644
66 +--- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts
67 ++++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
68 +@@ -110,7 +110,7 @@
69 + pinctrl-names = "default";
70 + pinctrl-0 = <&gmac_rgmii_pins>;
71 + phy-handle = <&phy1>;
72 +- phy-mode = "rgmii";
73 ++ phy-mode = "rgmii-id";
74 + phy-supply = <&reg_gmac_3v3>;
75 + status = "okay";
76 + };
77 +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
78 +index 8b81a17f675d9..e17ec92b90dd8 100644
79 +--- a/arch/arm/mach-footbridge/dc21285.c
80 ++++ b/arch/arm/mach-footbridge/dc21285.c
81 +@@ -66,15 +66,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
82 + if (addr)
83 + switch (size) {
84 + case 1:
85 +- asm("ldrb %0, [%1, %2]"
86 ++ asm volatile("ldrb %0, [%1, %2]"
87 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
88 + break;
89 + case 2:
90 +- asm("ldrh %0, [%1, %2]"
91 ++ asm volatile("ldrh %0, [%1, %2]"
92 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
93 + break;
94 + case 4:
95 +- asm("ldr %0, [%1, %2]"
96 ++ asm volatile("ldr %0, [%1, %2]"
97 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
98 + break;
99 + }
100 +@@ -100,17 +100,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
101 + if (addr)
102 + switch (size) {
103 + case 1:
104 +- asm("strb %0, [%1, %2]"
105 ++ asm volatile("strb %0, [%1, %2]"
106 + : : "r" (value), "r" (addr), "r" (where)
107 + : "cc");
108 + break;
109 + case 2:
110 +- asm("strh %0, [%1, %2]"
111 ++ asm volatile("strh %0, [%1, %2]"
112 + : : "r" (value), "r" (addr), "r" (where)
113 + : "cc");
114 + break;
115 + case 4:
116 +- asm("str %0, [%1, %2]"
117 ++ asm volatile("str %0, [%1, %2]"
118 + : : "r" (value), "r" (addr), "r" (where)
119 + : "cc");
120 + break;
121 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
122 +index 354ef2f3eac67..9533c85fb0a30 100644
123 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
124 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
125 +@@ -2382,7 +2382,7 @@
126 + interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
127 + dr_mode = "host";
128 + snps,dis_u2_susphy_quirk;
129 +- snps,quirk-frame-length-adjustment;
130 ++ snps,quirk-frame-length-adjustment = <0x20>;
131 + snps,parkmode-disable-ss-quirk;
132 + };
133 + };
134 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
135 +index d4c1da3d4bde2..04d4b1b11a00a 100644
136 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
137 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
138 +@@ -304,7 +304,7 @@
139 +
140 + dcfg: dcfg@1ee0000 {
141 + compatible = "fsl,ls1046a-dcfg", "syscon";
142 +- reg = <0x0 0x1ee0000 0x0 0x10000>;
143 ++ reg = <0x0 0x1ee0000 0x0 0x1000>;
144 + big-endian;
145 + };
146 +
147 +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
148 +index f539b3655f6b9..e638f216dbfb3 100644
149 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
150 ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
151 +@@ -243,6 +243,8 @@
152 + &i2c3 {
153 + status = "okay";
154 + clock-frequency = <400000>;
155 ++ /* Overwrite pinctrl-0 from sdm845.dtsi */
156 ++ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
157 +
158 + tsel: hid@15 {
159 + compatible = "hid-over-i2c";
160 +@@ -250,9 +252,6 @@
161 + hid-descr-addr = <0x1>;
162 +
163 + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
164 +-
165 +- pinctrl-names = "default";
166 +- pinctrl-0 = <&i2c3_hid_active>;
167 + };
168 +
169 + tsc2: hid@2c {
170 +@@ -261,11 +260,6 @@
171 + hid-descr-addr = <0x20>;
172 +
173 + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
174 +-
175 +- pinctrl-names = "default";
176 +- pinctrl-0 = <&i2c3_hid_active>;
177 +-
178 +- status = "disabled";
179 + };
180 + };
181 +
182 +diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
183 +index 9e09909a510a1..98b014a8f9165 100644
184 +--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
185 ++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
186 +@@ -860,7 +860,7 @@
187 + vopl_mmu: iommu@ff470f00 {
188 + compatible = "rockchip,iommu";
189 + reg = <0x0 0xff470f00 0x0 0x100>;
190 +- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
191 ++ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
192 + interrupt-names = "vopl_mmu";
193 + clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
194 + clock-names = "aclk", "hclk";
195 +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
196 +index 179b41ad63baf..18618af3835f9 100644
197 +--- a/arch/um/drivers/virtio_uml.c
198 ++++ b/arch/um/drivers/virtio_uml.c
199 +@@ -959,6 +959,7 @@ static void virtio_uml_release_dev(struct device *d)
200 + }
201 +
202 + os_close_file(vu_dev->sock);
203 ++ kfree(vu_dev);
204 + }
205 +
206 + /* Platform device */
207 +@@ -977,7 +978,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
208 + if (!pdata)
209 + return -EINVAL;
210 +
211 +- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
212 ++ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
213 + if (!vu_dev)
214 + return -ENOMEM;
215 +
216 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
217 +index 94df0868804bc..b5e3bfd4facea 100644
218 +--- a/arch/x86/Makefile
219 ++++ b/arch/x86/Makefile
220 +@@ -131,6 +131,9 @@ else
221 +
222 + KBUILD_CFLAGS += -mno-red-zone
223 + KBUILD_CFLAGS += -mcmodel=kernel
224 ++
225 ++ # Intel CET isn't enabled in the kernel
226 ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
227 + endif
228 +
229 + ifdef CONFIG_X86_X32
230 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
231 +index 6016559ed1713..5bef1575708dc 100644
232 +--- a/arch/x86/include/asm/apic.h
233 ++++ b/arch/x86/include/asm/apic.h
234 +@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; }
235 + #endif /* !CONFIG_X86_LOCAL_APIC */
236 +
237 + #ifdef CONFIG_X86_X2APIC
238 +-/*
239 +- * Make previous memory operations globally visible before
240 +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
241 +- * mfence for this.
242 +- */
243 +-static inline void x2apic_wrmsr_fence(void)
244 +-{
245 +- asm volatile("mfence" : : : "memory");
246 +-}
247 +-
248 + static inline void native_apic_msr_write(u32 reg, u32 v)
249 + {
250 + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
251 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
252 +index 7f828fe497978..4819d5e5a3353 100644
253 +--- a/arch/x86/include/asm/barrier.h
254 ++++ b/arch/x86/include/asm/barrier.h
255 +@@ -84,4 +84,22 @@ do { \
256 +
257 + #include <asm-generic/barrier.h>
258 +
259 ++/*
260 ++ * Make previous memory operations globally visible before
261 ++ * a WRMSR.
262 ++ *
263 ++ * MFENCE makes writes visible, but only affects load/store
264 ++ * instructions. WRMSR is unfortunately not a load/store
265 ++ * instruction and is unaffected by MFENCE. The LFENCE ensures
266 ++ * that the WRMSR is not reordered.
267 ++ *
268 ++ * Most WRMSRs are full serializing instructions themselves and
269 ++ * do not require this barrier. This is only required for the
270 ++ * IA32_TSC_DEADLINE and X2APIC MSRs.
271 ++ */
272 ++static inline void weak_wrmsr_fence(void)
273 ++{
274 ++ asm volatile("mfence; lfence" : : : "memory");
275 ++}
276 ++
277 + #endif /* _ASM_X86_BARRIER_H */
278 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
279 +index 06fa808d72032..3dca7b8642e9c 100644
280 +--- a/arch/x86/kernel/apic/apic.c
281 ++++ b/arch/x86/kernel/apic/apic.c
282 +@@ -42,6 +42,7 @@
283 + #include <asm/x86_init.h>
284 + #include <asm/pgalloc.h>
285 + #include <linux/atomic.h>
286 ++#include <asm/barrier.h>
287 + #include <asm/mpspec.h>
288 + #include <asm/i8259.h>
289 + #include <asm/proto.h>
290 +@@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta,
291 + {
292 + u64 tsc;
293 +
294 ++ /* This MSR is special and need a special fence: */
295 ++ weak_wrmsr_fence();
296 ++
297 + tsc = rdtsc();
298 + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
299 + return 0;
300 +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
301 +index b0889c48a2ac5..7eec3c154fa24 100644
302 +--- a/arch/x86/kernel/apic/x2apic_cluster.c
303 ++++ b/arch/x86/kernel/apic/x2apic_cluster.c
304 +@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector)
305 + {
306 + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
307 +
308 +- x2apic_wrmsr_fence();
309 ++ /* x2apic MSRs are special and need a special fence: */
310 ++ weak_wrmsr_fence();
311 + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
312 + }
313 +
314 +@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
315 + unsigned long flags;
316 + u32 dest;
317 +
318 +- x2apic_wrmsr_fence();
319 ++ /* x2apic MSRs are special and need a special fence: */
320 ++ weak_wrmsr_fence();
321 + local_irq_save(flags);
322 +
323 + tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
324 +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
325 +index e14eae6d6ea71..032a00e5d9fa6 100644
326 +--- a/arch/x86/kernel/apic/x2apic_phys.c
327 ++++ b/arch/x86/kernel/apic/x2apic_phys.c
328 +@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector)
329 + {
330 + u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
331 +
332 +- x2apic_wrmsr_fence();
333 ++ /* x2apic MSRs are special and need a special fence: */
334 ++ weak_wrmsr_fence();
335 + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
336 + }
337 +
338 +@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
339 + unsigned long this_cpu;
340 + unsigned long flags;
341 +
342 +- x2apic_wrmsr_fence();
343 ++ /* x2apic MSRs are special and need a special fence: */
344 ++ weak_wrmsr_fence();
345 +
346 + local_irq_save(flags);
347 +
348 +@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which)
349 + {
350 + unsigned long cfg = __prepare_ICR(which, vector, 0);
351 +
352 +- x2apic_wrmsr_fence();
353 ++ /* x2apic MSRs are special and need a special fence: */
354 ++ weak_wrmsr_fence();
355 + native_x2apic_icr_write(cfg, 0);
356 + }
357 +
358 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
359 +index 39265b55929d2..60c8dcb907a50 100644
360 +--- a/arch/x86/kvm/emulate.c
361 ++++ b/arch/x86/kvm/emulate.c
362 +@@ -2890,6 +2890,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
363 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
364 + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
365 + (u32)msr_data;
366 ++ if (efer & EFER_LMA)
367 ++ ctxt->mode = X86EMUL_MODE_PROT64;
368 +
369 + return X86EMUL_CONTINUE;
370 + }
371 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
372 +index 2b506904be024..4906e480b5bb6 100644
373 +--- a/arch/x86/kvm/svm.c
374 ++++ b/arch/x86/kvm/svm.c
375 +@@ -889,6 +889,11 @@ static int has_svm(void)
376 + return 0;
377 + }
378 +
379 ++ if (sev_active()) {
380 ++ pr_info("KVM is unsupported when running as an SEV guest\n");
381 ++ return 0;
382 ++ }
383 ++
384 + return 1;
385 + }
386 +
387 +diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
388 +index 9268c12458c84..dfa01bcdc3694 100644
389 +--- a/arch/x86/mm/mem_encrypt.c
390 ++++ b/arch/x86/mm/mem_encrypt.c
391 +@@ -375,6 +375,7 @@ bool force_dma_unencrypted(struct device *dev)
392 +
393 + return false;
394 + }
395 ++EXPORT_SYMBOL_GPL(sev_active);
396 +
397 + /* Architecture __weak replacement functions */
398 + void __init mem_encrypt_free_decrypted_mem(void)
399 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
400 +index d2dd387c95d86..de06ee7d2ad46 100644
401 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
402 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
403 +@@ -1434,8 +1434,6 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
404 +
405 + drm_connector_update_edid_property(connector,
406 + aconnector->edid);
407 +- drm_add_edid_modes(connector, aconnector->edid);
408 +-
409 + if (aconnector->dc_link->aux_mode)
410 + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
411 + aconnector->edid);
412 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
413 +index 8c73377ac82ca..3d004ca76b6ed 100644
414 +--- a/drivers/input/joystick/xpad.c
415 ++++ b/drivers/input/joystick/xpad.c
416 +@@ -215,9 +215,17 @@ static const struct xpad_device {
417 + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
418 + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
419 + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
420 +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
421 ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
422 ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
423 ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
424 + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
425 + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
426 ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
427 ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
428 ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
429 ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
430 ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
431 ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
432 + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
433 + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
434 + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
435 +@@ -296,6 +304,9 @@ static const struct xpad_device {
436 + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
437 + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
438 + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
439 ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
440 ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
441 ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
442 + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
443 + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
444 + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
445 +@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = {
446 + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
447 + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
448 + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
449 ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
450 ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
451 + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
452 + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
453 ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
454 ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
455 + { }
456 + };
457 +
458 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
459 +index eca931da76c3a..b7dbcbac3a1a5 100644
460 +--- a/drivers/input/serio/i8042-x86ia64io.h
461 ++++ b/drivers/input/serio/i8042-x86ia64io.h
462 +@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
463 + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
464 + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
465 + },
466 ++ },
467 ++ {
468 + .matches = {
469 + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
470 + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
471 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
472 +index 984c7a6ea4fe8..953d86ca6d2b2 100644
473 +--- a/drivers/iommu/intel-iommu.c
474 ++++ b/drivers/iommu/intel-iommu.c
475 +@@ -3285,6 +3285,12 @@ static int __init init_dmars(void)
476 +
477 + if (!ecap_pass_through(iommu->ecap))
478 + hw_pass_through = 0;
479 ++
480 ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
481 ++ pr_info("Disable batched IOTLB flush due to virtualization");
482 ++ intel_iommu_strict = 1;
483 ++ }
484 ++
485 + #ifdef CONFIG_INTEL_IOMMU_SVM
486 + if (pasid_supported(iommu))
487 + intel_svm_init(iommu);
488 +diff --git a/drivers/md/md.c b/drivers/md/md.c
489 +index ec5dfb7ae4e16..cc38530804c90 100644
490 +--- a/drivers/md/md.c
491 ++++ b/drivers/md/md.c
492 +@@ -538,8 +538,10 @@ static void md_submit_flush_data(struct work_struct *ws)
493 + * could wait for this and below md_handle_request could wait for those
494 + * bios because of suspend check
495 + */
496 ++ spin_lock_irq(&mddev->lock);
497 + mddev->last_flush = mddev->start_flush;
498 + mddev->flush_bio = NULL;
499 ++ spin_unlock_irq(&mddev->lock);
500 + wake_up(&mddev->sb_wait);
501 +
502 + if (bio->bi_iter.bi_size == 0) {
503 +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
504 +index 3efaa9534a777..9a5aaac29099b 100644
505 +--- a/drivers/mmc/core/sdio_cis.c
506 ++++ b/drivers/mmc/core/sdio_cis.c
507 +@@ -20,6 +20,8 @@
508 + #include "sdio_cis.h"
509 + #include "sdio_ops.h"
510 +
511 ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
512 ++
513 + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
514 + const unsigned char *buf, unsigned size)
515 + {
516 +@@ -266,6 +268,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
517 +
518 + do {
519 + unsigned char tpl_code, tpl_link;
520 ++ unsigned long timeout = jiffies +
521 ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
522 +
523 + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
524 + if (ret)
525 +@@ -318,6 +322,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
526 + prev = &this->next;
527 +
528 + if (ret == -ENOENT) {
529 ++ if (time_after(jiffies, timeout))
530 ++ break;
531 + /* warn about unknown tuples */
532 + pr_warn_ratelimited("%s: queuing unknown"
533 + " CIS tuple 0x%02x (%u bytes)\n",
534 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
535 +index 469b155df4885..1af09fd3fed1c 100644
536 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
537 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
538 +@@ -1517,7 +1517,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
539 + if (!entry.portvec)
540 + entry.state = 0;
541 + } else {
542 +- entry.portvec |= BIT(port);
543 ++ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
544 ++ entry.portvec = BIT(port);
545 ++ else
546 ++ entry.portvec |= BIT(port);
547 ++
548 + entry.state = state;
549 + }
550 +
551 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
552 +index c20dc689698ed..5acd599d6b9af 100644
553 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
554 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
555 +@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
556 +
557 + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
558 + pfe.severity = PF_EVENT_SEVERITY_INFO;
559 +-
560 +- /* Always report link is down if the VF queues aren't enabled */
561 +- if (!vf->queues_enabled) {
562 +- pfe.event_data.link_event.link_status = false;
563 +- pfe.event_data.link_event.link_speed = 0;
564 +- } else if (vf->link_forced) {
565 ++ if (vf->link_forced) {
566 + pfe.event_data.link_event.link_status = vf->link_up;
567 + pfe.event_data.link_event.link_speed =
568 + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
569 +@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
570 + pfe.event_data.link_event.link_speed =
571 + i40e_virtchnl_link_speed(ls->link_speed);
572 + }
573 +-
574 + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
575 + 0, (u8 *)&pfe, sizeof(pfe), NULL);
576 + }
577 +@@ -2393,8 +2387,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
578 + }
579 + }
580 +
581 +- vf->queues_enabled = true;
582 +-
583 + error_param:
584 + /* send the response to the VF */
585 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
586 +@@ -2416,9 +2408,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
587 + struct i40e_pf *pf = vf->pf;
588 + i40e_status aq_ret = 0;
589 +
590 +- /* Immediately mark queues as disabled */
591 +- vf->queues_enabled = false;
592 +-
593 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
594 + aq_ret = I40E_ERR_PARAM;
595 + goto error_param;
596 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
597 +index 7164b9bb294ff..f65cc0c165502 100644
598 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
599 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
600 +@@ -99,7 +99,6 @@ struct i40e_vf {
601 + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
602 + bool link_forced;
603 + bool link_up; /* only valid if VF link is forced */
604 +- bool queues_enabled; /* true if the VF queues are enabled */
605 + bool spoofchk;
606 + u16 num_mac;
607 + u16 num_vlan;
608 +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
609 +index 0303eeb760505..0365bf2b480e3 100644
610 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
611 ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
612 +@@ -1709,7 +1709,8 @@ static int igc_get_link_ksettings(struct net_device *netdev,
613 + Asym_Pause);
614 + }
615 +
616 +- status = rd32(IGC_STATUS);
617 ++ status = pm_runtime_suspended(&adapter->pdev->dev) ?
618 ++ 0 : rd32(IGC_STATUS);
619 +
620 + if (status & IGC_STATUS_LU) {
621 + if (status & IGC_STATUS_SPEED_1000) {
622 +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
623 +index c25f555aaf822..ed5d09c11c389 100644
624 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c
625 ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
626 +@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
627 + u16 *data)
628 + {
629 + struct igc_nvm_info *nvm = &hw->nvm;
630 ++ s32 ret_val = -IGC_ERR_NVM;
631 + u32 attempts = 100000;
632 + u32 i, k, eewr = 0;
633 +- s32 ret_val = 0;
634 +
635 + /* A check for invalid values: offset too large, too many words,
636 + * too many words for the offset, and not enough words.
637 +@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
638 + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
639 + words == 0) {
640 + hw_dbg("nvm parameter(s) out of bounds\n");
641 +- ret_val = -IGC_ERR_NVM;
642 + goto out;
643 + }
644 +
645 +diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
646 +index 5eeb4c8caf4ae..08adf103e90b4 100644
647 +--- a/drivers/net/ethernet/intel/igc/igc_mac.c
648 ++++ b/drivers/net/ethernet/intel/igc/igc_mac.c
649 +@@ -647,7 +647,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
650 + }
651 +
652 + out:
653 +- return 0;
654 ++ return ret_val;
655 + }
656 +
657 + /**
658 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
659 +index a30eb90ba3d28..dd590086fe6a5 100644
660 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
661 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
662 +@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
663 + /* Clear entry invalidation bit */
664 + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
665 +
666 +- /* Write tcam index - indirect access */
667 +- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
668 +- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
669 +- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
670 +-
671 + /* Write sram index - indirect access */
672 + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
673 + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
674 + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
675 +
676 ++ /* Write tcam index - indirect access */
677 ++ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
678 ++ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
679 ++ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
680 ++
681 + return 0;
682 + }
683 +
684 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
685 +index 4944c40436f08..11e12761b0a6e 100644
686 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
687 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
688 +@@ -1697,6 +1697,7 @@ search_again_locked:
689 + if (!fte_tmp)
690 + continue;
691 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
692 ++ /* No error check needed here, because insert_fte() is not called */
693 + up_write_ref_node(&fte_tmp->node, false);
694 + tree_put_node(&fte_tmp->node, false);
695 + kmem_cache_free(steering->ftes_cache, fte);
696 +@@ -1745,6 +1746,8 @@ skip_search:
697 + up_write_ref_node(&g->node, false);
698 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
699 + up_write_ref_node(&fte->node, false);
700 ++ if (IS_ERR(rule))
701 ++ tree_put_node(&fte->node, false);
702 + return rule;
703 + }
704 + rule = ERR_PTR(-ENOENT);
705 +@@ -1844,6 +1847,8 @@ search_again_locked:
706 + up_write_ref_node(&g->node, false);
707 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
708 + up_write_ref_node(&fte->node, false);
709 ++ if (IS_ERR(rule))
710 ++ tree_put_node(&fte->node, false);
711 + tree_put_node(&g->node, false);
712 + return rule;
713 +
714 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
715 +index 366ca1b5da5cc..1e8244ec5b332 100644
716 +--- a/drivers/net/ethernet/realtek/r8169_main.c
717 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
718 +@@ -6419,10 +6419,10 @@ static int rtl8169_close(struct net_device *dev)
719 +
720 + cancel_work_sync(&tp->wk.work);
721 +
722 +- phy_disconnect(tp->phydev);
723 +-
724 + free_irq(pci_irq_vector(pdev, 0), tp);
725 +
726 ++ phy_disconnect(tp->phydev);
727 ++
728 + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
729 + tp->RxPhyAddr);
730 + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
731 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
732 +index c54fe6650018e..7272d8522a9e9 100644
733 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
734 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
735 +@@ -134,7 +134,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
736 + .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
737 + };
738 +
739 +- /* Do not configure default queue, it is configured via context info */
740 ++ /*
741 ++ * The default queue is configured via context info, so if we
742 ++ * have a single queue, there's nothing to do here.
743 ++ */
744 ++ if (mvm->trans->num_rx_queues == 1)
745 ++ return 0;
746 ++
747 ++ /* skip the default queue */
748 + num_queues = mvm->trans->num_rx_queues - 1;
749 +
750 + size = struct_size(cmd, data, num_queues);
751 +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
752 +index 196aa44c4936a..e0f411021c59d 100644
753 +--- a/drivers/nvdimm/dimm_devs.c
754 ++++ b/drivers/nvdimm/dimm_devs.c
755 +@@ -344,16 +344,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
756 + }
757 + static DEVICE_ATTR_RO(state);
758 +
759 +-static ssize_t available_slots_show(struct device *dev,
760 +- struct device_attribute *attr, char *buf)
761 ++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
762 + {
763 +- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
764 ++ struct device *dev;
765 + ssize_t rc;
766 + u32 nfree;
767 +
768 + if (!ndd)
769 + return -ENXIO;
770 +
771 ++ dev = ndd->dev;
772 + nvdimm_bus_lock(dev);
773 + nfree = nd_label_nfree(ndd);
774 + if (nfree - 1 > nfree) {
775 +@@ -365,6 +365,18 @@ static ssize_t available_slots_show(struct device *dev,
776 + nvdimm_bus_unlock(dev);
777 + return rc;
778 + }
779 ++
780 ++static ssize_t available_slots_show(struct device *dev,
781 ++ struct device_attribute *attr, char *buf)
782 ++{
783 ++ ssize_t rc;
784 ++
785 ++ nd_device_lock(dev);
786 ++ rc = __available_slots_show(dev_get_drvdata(dev), buf);
787 ++ nd_device_unlock(dev);
788 ++
789 ++ return rc;
790 ++}
791 + static DEVICE_ATTR_RO(available_slots);
792 +
793 + __weak ssize_t security_show(struct device *dev,
794 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
795 +index ef93bd3ed339c..434d3f21f0e13 100644
796 +--- a/drivers/nvme/host/pci.c
797 ++++ b/drivers/nvme/host/pci.c
798 +@@ -3161,6 +3161,8 @@ static const struct pci_device_id nvme_id_table[] = {
799 + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
800 + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
801 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
802 ++ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
803 ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
804 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
805 + .driver_data = NVME_QUIRK_SINGLE_VECTOR },
806 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
807 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
808 +index e31823f19a0fa..9242224156f5b 100644
809 +--- a/drivers/nvme/target/tcp.c
810 ++++ b/drivers/nvme/target/tcp.c
811 +@@ -292,7 +292,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
812 + length = cmd->pdu_len;
813 + cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
814 + offset = cmd->rbytes_done;
815 +- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
816 ++ cmd->sg_idx = offset / PAGE_SIZE;
817 + sg_offset = offset % PAGE_SIZE;
818 + sg = &cmd->req.sg[cmd->sg_idx];
819 +
820 +@@ -305,6 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
821 + length -= iov_len;
822 + sg = sg_next(sg);
823 + iov++;
824 ++ sg_offset = 0;
825 + }
826 +
827 + iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
828 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
829 +index 134dc2005ce97..c9f6e97582885 100644
830 +--- a/drivers/usb/class/usblp.c
831 ++++ b/drivers/usb/class/usblp.c
832 +@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
833 + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
834 + return -EINVAL;
835 +
836 +- alts = usblp->protocol[protocol].alt_setting;
837 +- if (alts < 0)
838 +- return -EINVAL;
839 +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
840 +- if (r < 0) {
841 +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
842 +- alts, usblp->ifnum);
843 +- return r;
844 ++ /* Don't unnecessarily set the interface if there's a single alt. */
845 ++ if (usblp->intf->num_altsetting > 1) {
846 ++ alts = usblp->protocol[protocol].alt_setting;
847 ++ if (alts < 0)
848 ++ return -EINVAL;
849 ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
850 ++ if (r < 0) {
851 ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
852 ++ alts, usblp->ifnum);
853 ++ return r;
854 ++ }
855 + }
856 +
857 + usblp->bidir = (usblp->protocol[protocol].epread != NULL);
858 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
859 +index 70ac47a341ac2..e3f1f20c49221 100644
860 +--- a/drivers/usb/dwc2/gadget.c
861 ++++ b/drivers/usb/dwc2/gadget.c
862 +@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
863 + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
864 + u32 windex)
865 + {
866 +- struct dwc2_hsotg_ep *ep;
867 + int dir = (windex & USB_DIR_IN) ? 1 : 0;
868 + int idx = windex & 0x7F;
869 +
870 +@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
871 + if (idx > hsotg->num_of_eps)
872 + return NULL;
873 +
874 +- ep = index_to_ep(hsotg, idx, dir);
875 +-
876 +- if (idx && ep->dir_in != dir)
877 +- return NULL;
878 +-
879 +- return ep;
880 ++ return index_to_ep(hsotg, idx, dir);
881 + }
882 +
883 + /**
884 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
885 +index 440dbf55ddf70..90ec65d31059f 100644
886 +--- a/drivers/usb/dwc3/core.c
887 ++++ b/drivers/usb/dwc3/core.c
888 +@@ -1718,7 +1718,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
889 + if (PMSG_IS_AUTO(msg))
890 + break;
891 +
892 +- ret = dwc3_core_init(dwc);
893 ++ ret = dwc3_core_init_for_resume(dwc);
894 + if (ret)
895 + return ret;
896 +
897 +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
898 +index 30313b233680d..99c7fc0d1d597 100644
899 +--- a/drivers/usb/gadget/legacy/ether.c
900 ++++ b/drivers/usb/gadget/legacy/ether.c
901 +@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
902 + struct usb_descriptor_header *usb_desc;
903 +
904 + usb_desc = usb_otg_descriptor_alloc(gadget);
905 +- if (!usb_desc)
906 ++ if (!usb_desc) {
907 ++ status = -ENOMEM;
908 + goto fail1;
909 ++ }
910 + usb_otg_descriptor_init(gadget, usb_desc);
911 + otg_desc[0] = usb_desc;
912 + otg_desc[1] = NULL;
913 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
914 +index 45c54d56ecbd5..b45e5bf089979 100644
915 +--- a/drivers/usb/host/xhci-mtk-sch.c
916 ++++ b/drivers/usb/host/xhci-mtk-sch.c
917 +@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
918 +
919 + sch_ep->sch_tt = tt;
920 + sch_ep->ep = ep;
921 ++ INIT_LIST_HEAD(&sch_ep->endpoint);
922 ++ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
923 +
924 + return sch_ep;
925 + }
926 +@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
927 + sch_ep->bw_budget_table[j];
928 + }
929 + }
930 ++ sch_ep->allocated = used;
931 + }
932 +
933 + static int check_sch_tt(struct usb_device *udev,
934 +@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev,
935 + return 0;
936 + }
937 +
938 ++static void destroy_sch_ep(struct usb_device *udev,
939 ++ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
940 ++{
941 ++ /* only release ep bw check passed by check_sch_bw() */
942 ++ if (sch_ep->allocated)
943 ++ update_bus_bw(sch_bw, sch_ep, 0);
944 ++
945 ++ list_del(&sch_ep->endpoint);
946 ++
947 ++ if (sch_ep->sch_tt) {
948 ++ list_del(&sch_ep->tt_endpoint);
949 ++ drop_tt(udev);
950 ++ }
951 ++ kfree(sch_ep);
952 ++}
953 ++
954 + static bool need_bw_sch(struct usb_host_endpoint *ep,
955 + enum usb_device_speed speed, int has_tt)
956 + {
957 +@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
958 +
959 + mtk->sch_array = sch_array;
960 +
961 ++ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
962 ++
963 + return 0;
964 + }
965 + EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
966 +@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
967 + struct xhci_ep_ctx *ep_ctx;
968 + struct xhci_slot_ctx *slot_ctx;
969 + struct xhci_virt_device *virt_dev;
970 +- struct mu3h_sch_bw_info *sch_bw;
971 + struct mu3h_sch_ep_info *sch_ep;
972 +- struct mu3h_sch_bw_info *sch_array;
973 + unsigned int ep_index;
974 +- int bw_index;
975 +- int ret = 0;
976 +
977 + xhci = hcd_to_xhci(hcd);
978 + virt_dev = xhci->devs[udev->slot_id];
979 + ep_index = xhci_get_endpoint_index(&ep->desc);
980 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
981 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
982 +- sch_array = mtk->sch_array;
983 +
984 + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
985 + __func__, usb_endpoint_type(&ep->desc), udev->speed,
986 +@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
987 + return 0;
988 + }
989 +
990 +- bw_index = get_bw_index(xhci, udev, ep);
991 +- sch_bw = &sch_array[bw_index];
992 +-
993 + sch_ep = create_sch_ep(udev, ep, ep_ctx);
994 + if (IS_ERR_OR_NULL(sch_ep))
995 + return -ENOMEM;
996 +
997 + setup_sch_info(udev, ep_ctx, sch_ep);
998 +
999 +- ret = check_sch_bw(udev, sch_bw, sch_ep);
1000 +- if (ret) {
1001 +- xhci_err(xhci, "Not enough bandwidth!\n");
1002 +- if (is_fs_or_ls(udev->speed))
1003 +- drop_tt(udev);
1004 +-
1005 +- kfree(sch_ep);
1006 +- return -ENOSPC;
1007 +- }
1008 +-
1009 +- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
1010 +-
1011 +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
1012 +- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
1013 +- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
1014 +- | EP_BREPEAT(sch_ep->repeat));
1015 +-
1016 +- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
1017 +- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
1018 +- sch_ep->offset, sch_ep->repeat);
1019 ++ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
1020 +
1021 + return 0;
1022 + }
1023 +@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
1024 + struct xhci_virt_device *virt_dev;
1025 + struct mu3h_sch_bw_info *sch_array;
1026 + struct mu3h_sch_bw_info *sch_bw;
1027 +- struct mu3h_sch_ep_info *sch_ep;
1028 ++ struct mu3h_sch_ep_info *sch_ep, *tmp;
1029 + int bw_index;
1030 +
1031 + xhci = hcd_to_xhci(hcd);
1032 +@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
1033 + bw_index = get_bw_index(xhci, udev, ep);
1034 + sch_bw = &sch_array[bw_index];
1035 +
1036 +- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
1037 ++ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
1038 + if (sch_ep->ep == ep) {
1039 +- update_bus_bw(sch_bw, sch_ep, 0);
1040 +- list_del(&sch_ep->endpoint);
1041 +- if (is_fs_or_ls(udev->speed)) {
1042 +- list_del(&sch_ep->tt_endpoint);
1043 +- drop_tt(udev);
1044 +- }
1045 +- kfree(sch_ep);
1046 ++ destroy_sch_ep(udev, sch_bw, sch_ep);
1047 + break;
1048 + }
1049 + }
1050 + }
1051 + EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
1052 ++
1053 ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1054 ++{
1055 ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
1056 ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1057 ++ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1058 ++ struct mu3h_sch_bw_info *sch_bw;
1059 ++ struct mu3h_sch_ep_info *sch_ep, *tmp;
1060 ++ int bw_index, ret;
1061 ++
1062 ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
1063 ++
1064 ++ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
1065 ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
1066 ++ sch_bw = &mtk->sch_array[bw_index];
1067 ++
1068 ++ ret = check_sch_bw(udev, sch_bw, sch_ep);
1069 ++ if (ret) {
1070 ++ xhci_err(xhci, "Not enough bandwidth!\n");
1071 ++ return -ENOSPC;
1072 ++ }
1073 ++ }
1074 ++
1075 ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
1076 ++ struct xhci_ep_ctx *ep_ctx;
1077 ++ struct usb_host_endpoint *ep = sch_ep->ep;
1078 ++ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
1079 ++
1080 ++ bw_index = get_bw_index(xhci, udev, ep);
1081 ++ sch_bw = &mtk->sch_array[bw_index];
1082 ++
1083 ++ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
1084 ++
1085 ++ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1086 ++ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
1087 ++ | EP_BCSCOUNT(sch_ep->cs_count)
1088 ++ | EP_BBM(sch_ep->burst_mode));
1089 ++ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
1090 ++ | EP_BREPEAT(sch_ep->repeat));
1091 ++
1092 ++ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
1093 ++ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
1094 ++ sch_ep->offset, sch_ep->repeat);
1095 ++ }
1096 ++
1097 ++ return xhci_check_bandwidth(hcd, udev);
1098 ++}
1099 ++EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
1100 ++
1101 ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1102 ++{
1103 ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
1104 ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1105 ++ struct mu3h_sch_bw_info *sch_bw;
1106 ++ struct mu3h_sch_ep_info *sch_ep, *tmp;
1107 ++ int bw_index;
1108 ++
1109 ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
1110 ++
1111 ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
1112 ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
1113 ++ sch_bw = &mtk->sch_array[bw_index];
1114 ++ destroy_sch_ep(udev, sch_bw, sch_ep);
1115 ++ }
1116 ++
1117 ++ xhci_reset_bandwidth(hcd, udev);
1118 ++}
1119 ++EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
1120 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
1121 +index 85f1ff0399a9c..09b67219fd146 100644
1122 +--- a/drivers/usb/host/xhci-mtk.c
1123 ++++ b/drivers/usb/host/xhci-mtk.c
1124 +@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
1125 + static int xhci_mtk_setup(struct usb_hcd *hcd);
1126 + static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
1127 + .reset = xhci_mtk_setup,
1128 ++ .check_bandwidth = xhci_mtk_check_bandwidth,
1129 ++ .reset_bandwidth = xhci_mtk_reset_bandwidth,
1130 + };
1131 +
1132 + static struct hc_driver __read_mostly xhci_mtk_hc_driver;
1133 +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
1134 +index 5ac458b7d2e0e..734c5513aa1bf 100644
1135 +--- a/drivers/usb/host/xhci-mtk.h
1136 ++++ b/drivers/usb/host/xhci-mtk.h
1137 +@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info {
1138 + * @ep_type: endpoint type
1139 + * @maxpkt: max packet size of endpoint
1140 + * @ep: address of usb_host_endpoint struct
1141 ++ * @allocated: the bandwidth is aready allocated from bus_bw
1142 + * @offset: which uframe of the interval that transfer should be
1143 + * scheduled first time within the interval
1144 + * @repeat: the time gap between two uframes that transfers are
1145 +@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info {
1146 + u32 ep_type;
1147 + u32 maxpkt;
1148 + void *ep;
1149 ++ bool allocated;
1150 + /*
1151 + * mtk xHCI scheduling information put into reserved DWs
1152 + * in ep context
1153 +@@ -131,6 +133,7 @@ struct xhci_hcd_mtk {
1154 + struct device *dev;
1155 + struct usb_hcd *hcd;
1156 + struct mu3h_sch_bw_info *sch_array;
1157 ++ struct list_head bw_ep_chk_list;
1158 + struct mu3c_ippc_regs __iomem *ippc_regs;
1159 + bool has_ippc;
1160 + int num_u2_ports;
1161 +@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
1162 + struct usb_host_endpoint *ep);
1163 + void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
1164 + struct usb_host_endpoint *ep);
1165 ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1166 ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1167 +
1168 + #else
1169 + static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
1170 +@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
1171 + {
1172 + }
1173 +
1174 ++static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
1175 ++ struct usb_device *udev)
1176 ++{
1177 ++ return 0;
1178 ++}
1179 ++
1180 ++static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
1181 ++ struct usb_device *udev)
1182 ++{
1183 ++}
1184 + #endif
1185 +
1186 + #endif /* _XHCI_MTK_H_ */
1187 +diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
1188 +index 60651a50770f9..f27d5c2c42f31 100644
1189 +--- a/drivers/usb/host/xhci-mvebu.c
1190 ++++ b/drivers/usb/host/xhci-mvebu.c
1191 +@@ -8,6 +8,7 @@
1192 + #include <linux/mbus.h>
1193 + #include <linux/of.h>
1194 + #include <linux/platform_device.h>
1195 ++#include <linux/phy/phy.h>
1196 +
1197 + #include <linux/usb.h>
1198 + #include <linux/usb/hcd.h>
1199 +@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
1200 + return 0;
1201 + }
1202 +
1203 ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
1204 ++{
1205 ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1206 ++ struct device *dev = hcd->self.controller;
1207 ++ struct phy *phy;
1208 ++ int ret;
1209 ++
1210 ++ /* Old bindings miss the PHY handle */
1211 ++ phy = of_phy_get(dev->of_node, "usb3-phy");
1212 ++ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
1213 ++ return -EPROBE_DEFER;
1214 ++ else if (IS_ERR(phy))
1215 ++ goto phy_out;
1216 ++
1217 ++ ret = phy_init(phy);
1218 ++ if (ret)
1219 ++ goto phy_put;
1220 ++
1221 ++ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
1222 ++ if (ret)
1223 ++ goto phy_exit;
1224 ++
1225 ++ ret = phy_power_on(phy);
1226 ++ if (ret == -EOPNOTSUPP) {
1227 ++ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
1228 ++ dev_warn(dev, "PHY unsupported by firmware\n");
1229 ++ xhci->quirks |= XHCI_SKIP_PHY_INIT;
1230 ++ }
1231 ++ if (ret)
1232 ++ goto phy_exit;
1233 ++
1234 ++ phy_power_off(phy);
1235 ++phy_exit:
1236 ++ phy_exit(phy);
1237 ++phy_put:
1238 ++ phy_put(phy);
1239 ++phy_out:
1240 ++
1241 ++ return 0;
1242 ++}
1243 ++
1244 + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
1245 + {
1246 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1247 +diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
1248 +index ca0a3a5721dd7..74b4d21a498a0 100644
1249 +--- a/drivers/usb/host/xhci-mvebu.h
1250 ++++ b/drivers/usb/host/xhci-mvebu.h
1251 +@@ -12,6 +12,7 @@ struct usb_hcd;
1252 +
1253 + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
1254 + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
1255 ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
1256 + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
1257 + #else
1258 + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
1259 +@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
1260 + return 0;
1261 + }
1262 +
1263 ++static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
1264 ++{
1265 ++ return 0;
1266 ++}
1267 ++
1268 + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
1269 + {
1270 + return 0;
1271 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
1272 +index 52c625c023410..84cfa85442852 100644
1273 +--- a/drivers/usb/host/xhci-plat.c
1274 ++++ b/drivers/usb/host/xhci-plat.c
1275 +@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd)
1276 + priv->plat_start(hcd);
1277 + }
1278 +
1279 ++static int xhci_priv_plat_setup(struct usb_hcd *hcd)
1280 ++{
1281 ++ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
1282 ++
1283 ++ if (!priv->plat_setup)
1284 ++ return 0;
1285 ++
1286 ++ return priv->plat_setup(hcd);
1287 ++}
1288 ++
1289 + static int xhci_priv_init_quirk(struct usb_hcd *hcd)
1290 + {
1291 + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
1292 +@@ -101,6 +111,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
1293 + };
1294 +
1295 + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
1296 ++ .plat_setup = xhci_mvebu_a3700_plat_setup,
1297 + .init_quirk = xhci_mvebu_a3700_init_quirk,
1298 + };
1299 +
1300 +@@ -163,6 +174,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
1301 + struct usb_hcd *hcd;
1302 + int ret;
1303 + int irq;
1304 ++ struct xhci_plat_priv *priv = NULL;
1305 ++
1306 +
1307 + if (usb_disabled())
1308 + return -ENODEV;
1309 +@@ -257,8 +270,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
1310 +
1311 + priv_match = of_device_get_match_data(&pdev->dev);
1312 + if (priv_match) {
1313 +- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
1314 +-
1315 ++ priv = hcd_to_xhci_priv(hcd);
1316 + /* Just copy data for now */
1317 + if (priv_match)
1318 + *priv = *priv_match;
1319 +@@ -307,6 +319,16 @@ static int xhci_plat_probe(struct platform_device *pdev)
1320 +
1321 + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
1322 + xhci->shared_hcd->tpl_support = hcd->tpl_support;
1323 ++
1324 ++ if (priv) {
1325 ++ ret = xhci_priv_plat_setup(hcd);
1326 ++ if (ret)
1327 ++ goto disable_usb_phy;
1328 ++ }
1329 ++
1330 ++ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
1331 ++ hcd->skip_phy_initialization = 1;
1332 ++
1333 + ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
1334 + if (ret)
1335 + goto disable_usb_phy;
1336 +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
1337 +index 5681723fc9cd7..b7749151bdfb8 100644
1338 +--- a/drivers/usb/host/xhci-plat.h
1339 ++++ b/drivers/usb/host/xhci-plat.h
1340 +@@ -13,6 +13,7 @@
1341 + struct xhci_plat_priv {
1342 + const char *firmware_name;
1343 + unsigned long long quirks;
1344 ++ int (*plat_setup)(struct usb_hcd *);
1345 + void (*plat_start)(struct usb_hcd *);
1346 + int (*init_quirk)(struct usb_hcd *);
1347 + int (*resume_quirk)(struct usb_hcd *);
1348 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
1349 +index 52e156c018042..900ea91fb3c6b 100644
1350 +--- a/drivers/usb/host/xhci-ring.c
1351 ++++ b/drivers/usb/host/xhci-ring.c
1352 +@@ -695,11 +695,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
1353 + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
1354 + DMA_FROM_DEVICE);
1355 + /* for in tranfers we need to copy the data from bounce to sg */
1356 +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
1357 +- seg->bounce_len, seg->bounce_offs);
1358 +- if (len != seg->bounce_len)
1359 +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
1360 +- len, seg->bounce_len);
1361 ++ if (urb->num_sgs) {
1362 ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
1363 ++ seg->bounce_len, seg->bounce_offs);
1364 ++ if (len != seg->bounce_len)
1365 ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
1366 ++ len, seg->bounce_len);
1367 ++ } else {
1368 ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
1369 ++ seg->bounce_len);
1370 ++ }
1371 + seg->bounce_len = 0;
1372 + seg->bounce_offs = 0;
1373 + }
1374 +@@ -3263,12 +3268,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
1375 +
1376 + /* create a max max_pkt sized bounce buffer pointed to by last trb */
1377 + if (usb_urb_dir_out(urb)) {
1378 +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1379 +- seg->bounce_buf, new_buff_len, enqd_len);
1380 +- if (len != new_buff_len)
1381 +- xhci_warn(xhci,
1382 +- "WARN Wrong bounce buffer write length: %zu != %d\n",
1383 +- len, new_buff_len);
1384 ++ if (urb->num_sgs) {
1385 ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1386 ++ seg->bounce_buf, new_buff_len, enqd_len);
1387 ++ if (len != new_buff_len)
1388 ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
1389 ++ len, new_buff_len);
1390 ++ } else {
1391 ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
1392 ++ }
1393 ++
1394 + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1395 + max_pkt, DMA_TO_DEVICE);
1396 + } else {
1397 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1398 +index 70aa3055c41e7..91330517444e7 100644
1399 +--- a/drivers/usb/host/xhci.c
1400 ++++ b/drivers/usb/host/xhci.c
1401 +@@ -2861,7 +2861,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
1402 + * else should be touching the xhci->devs[slot_id] structure, so we
1403 + * don't need to take the xhci->lock for manipulating that.
1404 + */
1405 +-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1406 ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1407 + {
1408 + int i;
1409 + int ret = 0;
1410 +@@ -2959,7 +2959,7 @@ command_cleanup:
1411 + return ret;
1412 + }
1413 +
1414 +-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1415 ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1416 + {
1417 + struct xhci_hcd *xhci;
1418 + struct xhci_virt_device *virt_dev;
1419 +@@ -5380,6 +5380,10 @@ void xhci_init_driver(struct hc_driver *drv,
1420 + drv->reset = over->reset;
1421 + if (over->start)
1422 + drv->start = over->start;
1423 ++ if (over->check_bandwidth)
1424 ++ drv->check_bandwidth = over->check_bandwidth;
1425 ++ if (over->reset_bandwidth)
1426 ++ drv->reset_bandwidth = over->reset_bandwidth;
1427 + }
1428 + }
1429 + EXPORT_SYMBOL_GPL(xhci_init_driver);
1430 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1431 +index b483317bcb17b..1ad1d6e9e9979 100644
1432 +--- a/drivers/usb/host/xhci.h
1433 ++++ b/drivers/usb/host/xhci.h
1434 +@@ -1873,6 +1873,7 @@ struct xhci_hcd {
1435 + #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
1436 + #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
1437 + #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
1438 ++#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
1439 + #define XHCI_DISABLE_SPARSE BIT_ULL(38)
1440 +
1441 + unsigned int num_active_eps;
1442 +@@ -1911,6 +1912,8 @@ struct xhci_driver_overrides {
1443 + size_t extra_priv_size;
1444 + int (*reset)(struct usb_hcd *hcd);
1445 + int (*start)(struct usb_hcd *hcd);
1446 ++ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
1447 ++ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
1448 + };
1449 +
1450 + #define XHCI_CFC_DELAY 10
1451 +@@ -2063,6 +2066,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
1452 + void xhci_shutdown(struct usb_hcd *hcd);
1453 + void xhci_init_driver(struct hc_driver *drv,
1454 + const struct xhci_driver_overrides *over);
1455 ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1456 ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1457 + int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
1458 + int xhci_ext_cap_init(struct xhci_hcd *xhci);
1459 +
1460 +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
1461 +index 05cdad13933b1..cfc16943979d5 100644
1462 +--- a/drivers/usb/renesas_usbhs/fifo.c
1463 ++++ b/drivers/usb/renesas_usbhs/fifo.c
1464 +@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
1465 + }
1466 +
1467 + usbhs_pipe_clear_without_sequence(pipe, 0, 0);
1468 ++ usbhs_pipe_running(pipe, 0);
1469 +
1470 + __usbhsf_pkt_del(pkt);
1471 + }
1472 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1473 +index a90801ef00554..361a2e3ccad8d 100644
1474 +--- a/drivers/usb/serial/cp210x.c
1475 ++++ b/drivers/usb/serial/cp210x.c
1476 +@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
1477 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
1478 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
1479 + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
1480 ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
1481 + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
1482 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
1483 + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
1484 +@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = {
1485 + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
1486 + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
1487 + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
1488 ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
1489 + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
1490 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1491 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
1492 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1493 +index fd41b07b5aaf1..f49eae18500cc 100644
1494 +--- a/drivers/usb/serial/option.c
1495 ++++ b/drivers/usb/serial/option.c
1496 +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
1497 + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
1498 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
1499 + #define CINTERION_PRODUCT_CLS8 0x00b0
1500 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
1501 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
1502 +
1503 + /* Olivetti products */
1504 + #define OLIVETTI_VENDOR_ID 0x0b3c
1505 +@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
1506 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
1507 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
1508 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
1509 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
1510 ++ .driver_info = RSVD(3)},
1511 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
1512 ++ .driver_info = RSVD(0)},
1513 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
1514 + .driver_info = RSVD(4) },
1515 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
1516 +diff --git a/fs/afs/main.c b/fs/afs/main.c
1517 +index c9c45d7078bd1..5cd26af2464c9 100644
1518 +--- a/fs/afs/main.c
1519 ++++ b/fs/afs/main.c
1520 +@@ -186,7 +186,7 @@ static int __init afs_init(void)
1521 + goto error_cache;
1522 + #endif
1523 +
1524 +- ret = register_pernet_subsys(&afs_net_ops);
1525 ++ ret = register_pernet_device(&afs_net_ops);
1526 + if (ret < 0)
1527 + goto error_net;
1528 +
1529 +@@ -206,7 +206,7 @@ static int __init afs_init(void)
1530 + error_proc:
1531 + afs_fs_exit();
1532 + error_fs:
1533 +- unregister_pernet_subsys(&afs_net_ops);
1534 ++ unregister_pernet_device(&afs_net_ops);
1535 + error_net:
1536 + #ifdef CONFIG_AFS_FSCACHE
1537 + fscache_unregister_netfs(&afs_cache_netfs);
1538 +@@ -237,7 +237,7 @@ static void __exit afs_exit(void)
1539 +
1540 + proc_remove(afs_proc_symlink);
1541 + afs_fs_exit();
1542 +- unregister_pernet_subsys(&afs_net_ops);
1543 ++ unregister_pernet_device(&afs_net_ops);
1544 + #ifdef CONFIG_AFS_FSCACHE
1545 + fscache_unregister_netfs(&afs_cache_netfs);
1546 + #endif
1547 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
1548 +index 5a35850ccb1ab..9ae9a514676c3 100644
1549 +--- a/fs/cifs/dir.c
1550 ++++ b/fs/cifs/dir.c
1551 +@@ -738,6 +738,7 @@ static int
1552 + cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
1553 + {
1554 + struct inode *inode;
1555 ++ int rc;
1556 +
1557 + if (flags & LOOKUP_RCU)
1558 + return -ECHILD;
1559 +@@ -747,8 +748,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
1560 + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
1561 + CIFS_I(inode)->time = 0; /* force reval */
1562 +
1563 +- if (cifs_revalidate_dentry(direntry))
1564 +- return 0;
1565 ++ rc = cifs_revalidate_dentry(direntry);
1566 ++ if (rc) {
1567 ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
1568 ++ switch (rc) {
1569 ++ case -ENOENT:
1570 ++ case -ESTALE:
1571 ++ /*
1572 ++ * Those errors mean the dentry is invalid
1573 ++ * (file was deleted or recreated)
1574 ++ */
1575 ++ return 0;
1576 ++ default:
1577 ++ /*
1578 ++ * Otherwise some unexpected error happened
1579 ++ * report it as-is to VFS layer
1580 ++ */
1581 ++ return rc;
1582 ++ }
1583 ++ }
1584 + else {
1585 + /*
1586 + * If the inode wasn't known to be a dfs entry when
1587 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
1588 +index 2482978f09486..739556e385be8 100644
1589 +--- a/fs/cifs/smb2pdu.h
1590 ++++ b/fs/cifs/smb2pdu.h
1591 +@@ -227,7 +227,7 @@ struct smb2_negotiate_req {
1592 + __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
1593 + __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
1594 + __le16 Reserved2;
1595 +- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
1596 ++ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
1597 + } __packed;
1598 +
1599 + /* Dialects */
1600 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
1601 +index 4ffbf8f965814..eab7940bfebef 100644
1602 +--- a/fs/cifs/transport.c
1603 ++++ b/fs/cifs/transport.c
1604 +@@ -659,10 +659,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
1605 + spin_lock(&server->req_lock);
1606 + if (*credits < num) {
1607 + /*
1608 +- * Return immediately if not too many requests in flight since
1609 +- * we will likely be stuck on waiting for credits.
1610 ++ * If the server is tight on resources or just gives us less
1611 ++ * credits for other reasons (e.g. requests are coming out of
1612 ++ * order and the server delays granting more credits until it
1613 ++ * processes a missing mid) and we exhausted most available
1614 ++ * credits there may be situations when we try to send
1615 ++ * a compound request but we don't have enough credits. At this
1616 ++ * point the client needs to decide if it should wait for
1617 ++ * additional credits or fail the request. If at least one
1618 ++ * request is in flight there is a high probability that the
1619 ++ * server will return enough credits to satisfy this compound
1620 ++ * request.
1621 ++ *
1622 ++ * Return immediately if no requests in flight since we will be
1623 ++ * stuck on waiting for credits.
1624 + */
1625 +- if (server->in_flight < num - *credits) {
1626 ++ if (server->in_flight == 0) {
1627 + spin_unlock(&server->req_lock);
1628 + return -ENOTSUPP;
1629 + }
1630 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1631 +index 5fff7cb3582f0..cf3af2140c3d8 100644
1632 +--- a/fs/hugetlbfs/inode.c
1633 ++++ b/fs/hugetlbfs/inode.c
1634 +@@ -675,9 +675,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
1635 +
1636 + mutex_unlock(&hugetlb_fault_mutex_table[hash]);
1637 +
1638 ++ set_page_huge_active(page);
1639 + /*
1640 + * unlock_page because locked by add_to_page_cache()
1641 +- * page_put due to reference from alloc_huge_page()
1642 ++ * put_page() due to reference from alloc_huge_page()
1643 + */
1644 + unlock_page(page);
1645 + put_page(page);
1646 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
1647 +index 29abdb1d3b5c6..6509ec3cb3730 100644
1648 +--- a/fs/overlayfs/dir.c
1649 ++++ b/fs/overlayfs/dir.c
1650 +@@ -940,8 +940,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
1651 +
1652 + buflen -= thislen;
1653 + memcpy(&buf[buflen], name, thislen);
1654 +- tmp = dget_dlock(d->d_parent);
1655 + spin_unlock(&d->d_lock);
1656 ++ tmp = dget_parent(d);
1657 +
1658 + dput(d);
1659 + d = tmp;
1660 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1661 +index 8a03f392f3680..0e080ba5efbcc 100644
1662 +--- a/include/linux/hugetlb.h
1663 ++++ b/include/linux/hugetlb.h
1664 +@@ -590,6 +590,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1665 + }
1666 + #endif
1667 +
1668 ++void set_page_huge_active(struct page *page);
1669 ++
1670 + #else /* CONFIG_HUGETLB_PAGE */
1671 + struct hstate {};
1672 +
1673 +diff --git a/include/linux/msi.h b/include/linux/msi.h
1674 +index 8ad679e9d9c04..d695e2eb2092d 100644
1675 +--- a/include/linux/msi.h
1676 ++++ b/include/linux/msi.h
1677 +@@ -139,6 +139,12 @@ struct msi_desc {
1678 + list_for_each_entry((desc), dev_to_msi_list((dev)), list)
1679 + #define for_each_msi_entry_safe(desc, tmp, dev) \
1680 + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
1681 ++#define for_each_msi_vector(desc, __irq, dev) \
1682 ++ for_each_msi_entry((desc), (dev)) \
1683 ++ if ((desc)->irq) \
1684 ++ for (__irq = (desc)->irq; \
1685 ++ __irq < ((desc)->irq + (desc)->nvec_used); \
1686 ++ __irq++)
1687 +
1688 + #ifdef CONFIG_IRQ_MSI_IOMMU
1689 + static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
1690 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1691 +index 3d03756e10699..b2ceec7b280d4 100644
1692 +--- a/include/net/sch_generic.h
1693 ++++ b/include/net/sch_generic.h
1694 +@@ -1158,7 +1158,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1695 + old = *pold;
1696 + *pold = new;
1697 + if (old != NULL)
1698 +- qdisc_tree_flush_backlog(old);
1699 ++ qdisc_purge_queue(old);
1700 + sch_tree_unlock(sch);
1701 +
1702 + return old;
1703 +diff --git a/init/init_task.c b/init/init_task.c
1704 +index df7041be96fca..5d8359c44564a 100644
1705 +--- a/init/init_task.c
1706 ++++ b/init/init_task.c
1707 +@@ -171,7 +171,8 @@ struct task_struct init_task
1708 + .lockdep_recursion = 0,
1709 + #endif
1710 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1711 +- .ret_stack = NULL,
1712 ++ .ret_stack = NULL,
1713 ++ .tracing_graph_pause = ATOMIC_INIT(0),
1714 + #endif
1715 + #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
1716 + .trace_recursion = 0,
1717 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
1718 +index 5a8b4dfdb1419..c2f0aa818b7af 100644
1719 +--- a/kernel/bpf/cgroup.c
1720 ++++ b/kernel/bpf/cgroup.c
1721 +@@ -1109,6 +1109,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1722 + goto out;
1723 + }
1724 +
1725 ++ if (ctx.optlen < 0) {
1726 ++ ret = -EFAULT;
1727 ++ goto out;
1728 ++ }
1729 ++
1730 + if (copy_from_user(ctx.optval, optval,
1731 + min(ctx.optlen, max_optlen)) != 0) {
1732 + ret = -EFAULT;
1733 +@@ -1126,7 +1131,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1734 + goto out;
1735 + }
1736 +
1737 +- if (ctx.optlen > max_optlen) {
1738 ++ if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1739 + ret = -EFAULT;
1740 + goto out;
1741 + }
1742 +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
1743 +index eb95f6106a1ee..5d3da0db092ff 100644
1744 +--- a/kernel/irq/msi.c
1745 ++++ b/kernel/irq/msi.c
1746 +@@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
1747 +
1748 + can_reserve = msi_check_reservation_mode(domain, info, dev);
1749 +
1750 +- for_each_msi_entry(desc, dev) {
1751 +- virq = desc->irq;
1752 +- if (desc->nvec_used == 1)
1753 +- dev_dbg(dev, "irq %d for MSI\n", virq);
1754 +- else
1755 ++ /*
1756 ++ * This flag is set by the PCI layer as we need to activate
1757 ++ * the MSI entries before the PCI layer enables MSI in the
1758 ++ * card. Otherwise the card latches a random msi message.
1759 ++ */
1760 ++ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
1761 ++ goto skip_activate;
1762 ++
1763 ++ for_each_msi_vector(desc, i, dev) {
1764 ++ if (desc->irq == i) {
1765 ++ virq = desc->irq;
1766 + dev_dbg(dev, "irq [%d-%d] for MSI\n",
1767 + virq, virq + desc->nvec_used - 1);
1768 +- /*
1769 +- * This flag is set by the PCI layer as we need to activate
1770 +- * the MSI entries before the PCI layer enables MSI in the
1771 +- * card. Otherwise the card latches a random msi message.
1772 +- */
1773 +- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
1774 +- continue;
1775 ++ }
1776 +
1777 +- irq_data = irq_domain_get_irq_data(domain, desc->irq);
1778 ++ irq_data = irq_domain_get_irq_data(domain, i);
1779 + if (!can_reserve) {
1780 + irqd_clr_can_reserve(irq_data);
1781 + if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
1782 +@@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
1783 + goto cleanup;
1784 + }
1785 +
1786 ++skip_activate:
1787 + /*
1788 + * If these interrupts use reservation mode, clear the activated bit
1789 + * so request_irq() will assign the final vector.
1790 + */
1791 + if (can_reserve) {
1792 +- for_each_msi_entry(desc, dev) {
1793 +- irq_data = irq_domain_get_irq_data(domain, desc->irq);
1794 ++ for_each_msi_vector(desc, i, dev) {
1795 ++ irq_data = irq_domain_get_irq_data(domain, i);
1796 + irqd_clr_activated(irq_data);
1797 + }
1798 + }
1799 + return 0;
1800 +
1801 + cleanup:
1802 +- for_each_msi_entry(desc, dev) {
1803 +- struct irq_data *irqd;
1804 +-
1805 +- if (desc->irq == virq)
1806 +- break;
1807 +-
1808 +- irqd = irq_domain_get_irq_data(domain, desc->irq);
1809 +- if (irqd_is_activated(irqd))
1810 +- irq_domain_deactivate_irq(irqd);
1811 ++ for_each_msi_vector(desc, i, dev) {
1812 ++ irq_data = irq_domain_get_irq_data(domain, i);
1813 ++ if (irqd_is_activated(irq_data))
1814 ++ irq_domain_deactivate_irq(irq_data);
1815 + }
1816 + msi_domain_free_irqs(domain, dev);
1817 + return ret;
1818 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1819 +index 283c8b01ce789..26ae92c12fc22 100644
1820 +--- a/kernel/kprobes.c
1821 ++++ b/kernel/kprobes.c
1822 +@@ -1972,6 +1972,10 @@ int register_kretprobe(struct kretprobe *rp)
1823 + if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1824 + return -EINVAL;
1825 +
1826 ++ /* If only rp->kp.addr is specified, check reregistering kprobes */
1827 ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
1828 ++ return -EINVAL;
1829 ++
1830 + if (kretprobe_blacklist_size) {
1831 + addr = kprobe_addr(&rp->kp);
1832 + if (IS_ERR(addr))
1833 +diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
1834 +index 7950a0356042a..888cd00174fe3 100644
1835 +--- a/kernel/trace/fgraph.c
1836 ++++ b/kernel/trace/fgraph.c
1837 +@@ -367,7 +367,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1838 + }
1839 +
1840 + if (t->ret_stack == NULL) {
1841 +- atomic_set(&t->tracing_graph_pause, 0);
1842 + atomic_set(&t->trace_overrun, 0);
1843 + t->curr_ret_stack = -1;
1844 + t->curr_ret_depth = -1;
1845 +@@ -462,7 +461,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
1846 + static void
1847 + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
1848 + {
1849 +- atomic_set(&t->tracing_graph_pause, 0);
1850 + atomic_set(&t->trace_overrun, 0);
1851 + t->ftrace_timestamp = 0;
1852 + /* make curr_ret_stack visible before we add the ret_stack */
1853 +diff --git a/mm/compaction.c b/mm/compaction.c
1854 +index 92470625f0b1e..88c3f6bad1aba 100644
1855 +--- a/mm/compaction.c
1856 ++++ b/mm/compaction.c
1857 +@@ -1276,7 +1276,7 @@ fast_isolate_freepages(struct compact_control *cc)
1858 + {
1859 + unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
1860 + unsigned int nr_scanned = 0;
1861 +- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
1862 ++ unsigned long low_pfn, min_pfn, highest = 0;
1863 + unsigned long nr_isolated = 0;
1864 + unsigned long distance;
1865 + struct page *page = NULL;
1866 +@@ -1321,6 +1321,7 @@ fast_isolate_freepages(struct compact_control *cc)
1867 + struct page *freepage;
1868 + unsigned long flags;
1869 + unsigned int order_scanned = 0;
1870 ++ unsigned long high_pfn = 0;
1871 +
1872 + if (!area->nr_free)
1873 + continue;
1874 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1875 +index 11aa763a31440..7bbf419bb86d6 100644
1876 +--- a/mm/huge_memory.c
1877 ++++ b/mm/huge_memory.c
1878 +@@ -2306,7 +2306,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1879 + {
1880 + spinlock_t *ptl;
1881 + struct mmu_notifier_range range;
1882 +- bool was_locked = false;
1883 ++ bool do_unlock_page = false;
1884 + pmd_t _pmd;
1885 +
1886 + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1887 +@@ -2322,7 +2322,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1888 + VM_BUG_ON(freeze && !page);
1889 + if (page) {
1890 + VM_WARN_ON_ONCE(!PageLocked(page));
1891 +- was_locked = true;
1892 + if (page != pmd_page(*pmd))
1893 + goto out;
1894 + }
1895 +@@ -2331,19 +2330,29 @@ repeat:
1896 + if (pmd_trans_huge(*pmd)) {
1897 + if (!page) {
1898 + page = pmd_page(*pmd);
1899 +- if (unlikely(!trylock_page(page))) {
1900 +- get_page(page);
1901 +- _pmd = *pmd;
1902 +- spin_unlock(ptl);
1903 +- lock_page(page);
1904 +- spin_lock(ptl);
1905 +- if (unlikely(!pmd_same(*pmd, _pmd))) {
1906 +- unlock_page(page);
1907 ++ /*
1908 ++ * An anonymous page must be locked, to ensure that a
1909 ++ * concurrent reuse_swap_page() sees stable mapcount;
1910 ++ * but reuse_swap_page() is not used on shmem or file,
1911 ++ * and page lock must not be taken when zap_pmd_range()
1912 ++ * calls __split_huge_pmd() while i_mmap_lock is held.
1913 ++ */
1914 ++ if (PageAnon(page)) {
1915 ++ if (unlikely(!trylock_page(page))) {
1916 ++ get_page(page);
1917 ++ _pmd = *pmd;
1918 ++ spin_unlock(ptl);
1919 ++ lock_page(page);
1920 ++ spin_lock(ptl);
1921 ++ if (unlikely(!pmd_same(*pmd, _pmd))) {
1922 ++ unlock_page(page);
1923 ++ put_page(page);
1924 ++ page = NULL;
1925 ++ goto repeat;
1926 ++ }
1927 + put_page(page);
1928 +- page = NULL;
1929 +- goto repeat;
1930 + }
1931 +- put_page(page);
1932 ++ do_unlock_page = true;
1933 + }
1934 + }
1935 + if (PageMlocked(page))
1936 +@@ -2353,7 +2362,7 @@ repeat:
1937 + __split_huge_pmd_locked(vma, pmd, range.start, freeze);
1938 + out:
1939 + spin_unlock(ptl);
1940 +- if (!was_locked && page)
1941 ++ if (do_unlock_page)
1942 + unlock_page(page);
1943 + /*
1944 + * No need to double call mmu_notifier->invalidate_range() callback.
1945 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1946 +index 3bc33fa838177..d5b03b9262d4f 100644
1947 +--- a/mm/hugetlb.c
1948 ++++ b/mm/hugetlb.c
1949 +@@ -71,6 +71,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
1950 + static int num_fault_mutexes;
1951 + struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
1952 +
1953 ++static inline bool PageHugeFreed(struct page *head)
1954 ++{
1955 ++ return page_private(head + 4) == -1UL;
1956 ++}
1957 ++
1958 ++static inline void SetPageHugeFreed(struct page *head)
1959 ++{
1960 ++ set_page_private(head + 4, -1UL);
1961 ++}
1962 ++
1963 ++static inline void ClearPageHugeFreed(struct page *head)
1964 ++{
1965 ++ set_page_private(head + 4, 0);
1966 ++}
1967 ++
1968 + /* Forward declaration */
1969 + static int hugetlb_acct_memory(struct hstate *h, long delta);
1970 +
1971 +@@ -869,6 +884,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
1972 + list_move(&page->lru, &h->hugepage_freelists[nid]);
1973 + h->free_huge_pages++;
1974 + h->free_huge_pages_node[nid]++;
1975 ++ SetPageHugeFreed(page);
1976 + }
1977 +
1978 + static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1979 +@@ -886,6 +902,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1980 + return NULL;
1981 + list_move(&page->lru, &h->hugepage_activelist);
1982 + set_page_refcounted(page);
1983 ++ ClearPageHugeFreed(page);
1984 + h->free_huge_pages--;
1985 + h->free_huge_pages_node[nid]--;
1986 + return page;
1987 +@@ -1217,12 +1234,11 @@ struct hstate *size_to_hstate(unsigned long size)
1988 + */
1989 + bool page_huge_active(struct page *page)
1990 + {
1991 +- VM_BUG_ON_PAGE(!PageHuge(page), page);
1992 +- return PageHead(page) && PagePrivate(&page[1]);
1993 ++ return PageHeadHuge(page) && PagePrivate(&page[1]);
1994 + }
1995 +
1996 + /* never called for tail page */
1997 +-static void set_page_huge_active(struct page *page)
1998 ++void set_page_huge_active(struct page *page)
1999 + {
2000 + VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
2001 + SetPagePrivate(&page[1]);
2002 +@@ -1375,6 +1391,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
2003 + set_hugetlb_cgroup(page, NULL);
2004 + h->nr_huge_pages++;
2005 + h->nr_huge_pages_node[nid]++;
2006 ++ ClearPageHugeFreed(page);
2007 + spin_unlock(&hugetlb_lock);
2008 + }
2009 +
2010 +@@ -1602,6 +1619,7 @@ int dissolve_free_huge_page(struct page *page)
2011 + {
2012 + int rc = -EBUSY;
2013 +
2014 ++retry:
2015 + /* Not to disrupt normal path by vainly holding hugetlb_lock */
2016 + if (!PageHuge(page))
2017 + return 0;
2018 +@@ -1618,6 +1636,26 @@ int dissolve_free_huge_page(struct page *page)
2019 + int nid = page_to_nid(head);
2020 + if (h->free_huge_pages - h->resv_huge_pages == 0)
2021 + goto out;
2022 ++
2023 ++ /*
2024 ++ * We should make sure that the page is already on the free list
2025 ++ * when it is dissolved.
2026 ++ */
2027 ++ if (unlikely(!PageHugeFreed(head))) {
2028 ++ spin_unlock(&hugetlb_lock);
2029 ++ cond_resched();
2030 ++
2031 ++ /*
2032 ++ * Theoretically, we should return -EBUSY when we
2033 ++ * encounter this race. In fact, we have a chance
2034 ++ * to successfully dissolve the page if we do a
2035 ++ * retry. Because the race window is quite small.
2036 ++ * If we seize this opportunity, it is an optimization
2037 ++ * for increasing the success rate of dissolving page.
2038 ++ */
2039 ++ goto retry;
2040 ++ }
2041 ++
2042 + /*
2043 + * Move PageHWPoison flag from head page to the raw error page,
2044 + * which makes any subpages rather than the error page reusable.
2045 +@@ -5136,9 +5174,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
2046 + {
2047 + bool ret = true;
2048 +
2049 +- VM_BUG_ON_PAGE(!PageHead(page), page);
2050 + spin_lock(&hugetlb_lock);
2051 +- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
2052 ++ if (!PageHeadHuge(page) || !page_huge_active(page) ||
2053 ++ !get_page_unless_zero(page)) {
2054 + ret = false;
2055 + goto unlock;
2056 + }
2057 +diff --git a/mm/memblock.c b/mm/memblock.c
2058 +index c4b16cae2bc9b..11f6ae37d6699 100644
2059 +--- a/mm/memblock.c
2060 ++++ b/mm/memblock.c
2061 +@@ -257,14 +257,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
2062 + *
2063 + * Find @size free area aligned to @align in the specified range and node.
2064 + *
2065 +- * When allocation direction is bottom-up, the @start should be greater
2066 +- * than the end of the kernel image. Otherwise, it will be trimmed. The
2067 +- * reason is that we want the bottom-up allocation just near the kernel
2068 +- * image so it is highly likely that the allocated memory and the kernel
2069 +- * will reside in the same node.
2070 +- *
2071 +- * If bottom-up allocation failed, will try to allocate memory top-down.
2072 +- *
2073 + * Return:
2074 + * Found address on success, 0 on failure.
2075 + */
2076 +@@ -273,8 +265,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
2077 + phys_addr_t end, int nid,
2078 + enum memblock_flags flags)
2079 + {
2080 +- phys_addr_t kernel_end, ret;
2081 +-
2082 + /* pump up @end */
2083 + if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
2084 + end == MEMBLOCK_ALLOC_KASAN)
2085 +@@ -283,40 +273,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
2086 + /* avoid allocating the first page */
2087 + start = max_t(phys_addr_t, start, PAGE_SIZE);
2088 + end = max(start, end);
2089 +- kernel_end = __pa_symbol(_end);
2090 +-
2091 +- /*
2092 +- * try bottom-up allocation only when bottom-up mode
2093 +- * is set and @end is above the kernel image.
2094 +- */
2095 +- if (memblock_bottom_up() && end > kernel_end) {
2096 +- phys_addr_t bottom_up_start;
2097 +-
2098 +- /* make sure we will allocate above the kernel */
2099 +- bottom_up_start = max(start, kernel_end);
2100 +
2101 +- /* ok, try bottom-up allocation first */
2102 +- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
2103 +- size, align, nid, flags);
2104 +- if (ret)
2105 +- return ret;
2106 +-
2107 +- /*
2108 +- * we always limit bottom-up allocation above the kernel,
2109 +- * but top-down allocation doesn't have the limit, so
2110 +- * retrying top-down allocation may succeed when bottom-up
2111 +- * allocation failed.
2112 +- *
2113 +- * bottom-up allocation is expected to be fail very rarely,
2114 +- * so we use WARN_ONCE() here to see the stack trace if
2115 +- * fail happens.
2116 +- */
2117 +- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
2118 +- "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
2119 +- }
2120 +-
2121 +- return __memblock_find_range_top_down(start, end, size, align, nid,
2122 +- flags);
2123 ++ if (memblock_bottom_up())
2124 ++ return __memblock_find_range_bottom_up(start, end, size, align,
2125 ++ nid, flags);
2126 ++ else
2127 ++ return __memblock_find_range_top_down(start, end, size, align,
2128 ++ nid, flags);
2129 + }
2130 +
2131 + /**
2132 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2133 +index 6c270fce200f4..7080d708b7d08 100644
2134 +--- a/net/core/neighbour.c
2135 ++++ b/net/core/neighbour.c
2136 +@@ -1244,13 +1244,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
2137 + old = neigh->nud_state;
2138 + err = -EPERM;
2139 +
2140 +- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
2141 +- (old & (NUD_NOARP | NUD_PERMANENT)))
2142 +- goto out;
2143 + if (neigh->dead) {
2144 + NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
2145 ++ new = old;
2146 + goto out;
2147 + }
2148 ++ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
2149 ++ (old & (NUD_NOARP | NUD_PERMANENT)))
2150 ++ goto out;
2151 +
2152 + ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
2153 +
2154 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2155 +index ca525cf681a4e..f64d1743b86d6 100644
2156 +--- a/net/ipv4/ip_tunnel.c
2157 ++++ b/net/ipv4/ip_tunnel.c
2158 +@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
2159 + }
2160 +
2161 + dev->needed_headroom = t_hlen + hlen;
2162 +- mtu -= (dev->hard_header_len + t_hlen);
2163 ++ mtu -= t_hlen;
2164 +
2165 + if (mtu < IPV4_MIN_MTU)
2166 + mtu = IPV4_MIN_MTU;
2167 +@@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
2168 + nt = netdev_priv(dev);
2169 + t_hlen = nt->hlen + sizeof(struct iphdr);
2170 + dev->min_mtu = ETH_MIN_MTU;
2171 +- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
2172 ++ dev->max_mtu = IP_MAX_MTU - t_hlen;
2173 + ip_tunnel_add(itn, nt);
2174 + return nt;
2175 +
2176 +@@ -494,11 +494,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
2177 + int mtu;
2178 +
2179 + tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
2180 +- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
2181 ++ pkt_size = skb->len - tunnel_hlen;
2182 +
2183 + if (df)
2184 +- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
2185 +- - sizeof(struct iphdr) - tunnel_hlen;
2186 ++ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
2187 + else
2188 + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
2189 +
2190 +@@ -964,7 +963,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
2191 + {
2192 + struct ip_tunnel *tunnel = netdev_priv(dev);
2193 + int t_hlen = tunnel->hlen + sizeof(struct iphdr);
2194 +- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
2195 ++ int max_mtu = IP_MAX_MTU - t_hlen;
2196 +
2197 + if (new_mtu < ETH_MIN_MTU)
2198 + return -EINVAL;
2199 +@@ -1141,10 +1140,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
2200 +
2201 + mtu = ip_tunnel_bind_dev(dev);
2202 + if (tb[IFLA_MTU]) {
2203 +- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
2204 ++ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
2205 +
2206 +- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
2207 +- (unsigned int)(max - sizeof(struct iphdr)));
2208 ++ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
2209 + }
2210 +
2211 + err = dev_set_mtu(dev, mtu);
2212 +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
2213 +index 7a4d0715d1c32..a966d29c772d9 100644
2214 +--- a/net/lapb/lapb_out.c
2215 ++++ b/net/lapb/lapb_out.c
2216 +@@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb)
2217 + skb = skb_dequeue(&lapb->write_queue);
2218 +
2219 + do {
2220 +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
2221 ++ skbn = skb_copy(skb, GFP_ATOMIC);
2222 ++ if (!skbn) {
2223 + skb_queue_head(&lapb->write_queue, skb);
2224 + break;
2225 + }
2226 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
2227 +index c9a8a2433e8ac..48322e45e7ddb 100644
2228 +--- a/net/mac80211/driver-ops.c
2229 ++++ b/net/mac80211/driver-ops.c
2230 +@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local,
2231 + } else if (old_state == IEEE80211_STA_AUTH &&
2232 + new_state == IEEE80211_STA_ASSOC) {
2233 + ret = drv_sta_add(local, sdata, &sta->sta);
2234 +- if (ret == 0)
2235 ++ if (ret == 0) {
2236 + sta->uploaded = true;
2237 ++ if (rcu_access_pointer(sta->sta.rates))
2238 ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
2239 ++ }
2240 + } else if (old_state == IEEE80211_STA_ASSOC &&
2241 + new_state == IEEE80211_STA_AUTH) {
2242 + drv_sta_remove(local, sdata, &sta->sta);
2243 +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
2244 +index b051f125d3af2..9841db84bce0a 100644
2245 +--- a/net/mac80211/rate.c
2246 ++++ b/net/mac80211/rate.c
2247 +@@ -934,7 +934,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
2248 + if (old)
2249 + kfree_rcu(old, rcu_head);
2250 +
2251 +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
2252 ++ if (sta->uploaded)
2253 ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
2254 +
2255 + ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
2256 +
2257 +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
2258 +index 2921fc2767134..9bacec6653bac 100644
2259 +--- a/net/rxrpc/af_rxrpc.c
2260 ++++ b/net/rxrpc/af_rxrpc.c
2261 +@@ -976,7 +976,7 @@ static int __init af_rxrpc_init(void)
2262 + goto error_security;
2263 + }
2264 +
2265 +- ret = register_pernet_subsys(&rxrpc_net_ops);
2266 ++ ret = register_pernet_device(&rxrpc_net_ops);
2267 + if (ret)
2268 + goto error_pernet;
2269 +
2270 +@@ -1021,7 +1021,7 @@ error_key_type:
2271 + error_sock:
2272 + proto_unregister(&rxrpc_proto);
2273 + error_proto:
2274 +- unregister_pernet_subsys(&rxrpc_net_ops);
2275 ++ unregister_pernet_device(&rxrpc_net_ops);
2276 + error_pernet:
2277 + rxrpc_exit_security();
2278 + error_security:
2279 +@@ -1043,7 +1043,7 @@ static void __exit af_rxrpc_exit(void)
2280 + unregister_key_type(&key_type_rxrpc);
2281 + sock_unregister(PF_RXRPC);
2282 + proto_unregister(&rxrpc_proto);
2283 +- unregister_pernet_subsys(&rxrpc_net_ops);
2284 ++ unregister_pernet_device(&rxrpc_net_ops);
2285 + ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
2286 + ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
2287 +