Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Wed, 27 Apr 2022 13:10:44
Message-Id: 1651065012.b08b8be59e2765bb2d36321ab9f026eeb6c43e87.mpagano@gentoo
1 commit: b08b8be59e2765bb2d36321ab9f026eeb6c43e87
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 27 13:10:12 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 27 13:10:12 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b08b8be5
7
8 Linux patch 5.17.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 6 +-
13 1004_linux-5.17.5.patch | 5187 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5192 insertions(+), 1 deletion(-)
15
16 diff --git a/0000_README b/0000_README
17 index 1f4d8246..d851c5b7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,10 +55,14 @@ Patch: 1002_linux-5.17.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.17.3
23
24 -Patch: 1003_linux-5.17.3.patch
25 +Patch: 1003_linux-5.17.4.patch
26 From: http://www.kernel.org
27 Desc: Linux 5.17.4
28
29 +Patch: 1004_linux-5.17.5.patch
30 +From: http://www.kernel.org
31 +Desc: Linux 5.17.5
32 +
33 Patch: 1500_XATTR_USER_PREFIX.patch
34 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
35 Desc: Support for namespace user.pax.* on tmpfs.
36
37 diff --git a/1004_linux-5.17.5.patch b/1004_linux-5.17.5.patch
38 new file mode 100644
39 index 00000000..024660ad
40 --- /dev/null
41 +++ b/1004_linux-5.17.5.patch
42 @@ -0,0 +1,5187 @@
43 +diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
44 +index 54386a010a8d7..871d2da7a0a91 100644
45 +--- a/Documentation/filesystems/ext4/attributes.rst
46 ++++ b/Documentation/filesystems/ext4/attributes.rst
47 +@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
48 + - Checksum of the extended attribute block.
49 + * - 0x14
50 + - \_\_u32
51 +- - h\_reserved[2]
52 ++ - h\_reserved[3]
53 + - Zero.
54 +
55 + The checksum is calculated against the FS UUID, the 64-bit block number
56 +diff --git a/Makefile b/Makefile
57 +index d7747e4c216e4..3ad5dc6be3930 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 5
63 + PATCHLEVEL = 17
64 +-SUBLEVEL = 4
65 ++SUBLEVEL = 5
66 + EXTRAVERSION =
67 + NAME = Superb Owl
68 +
69 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
70 +index dd77a0c8f740b..66ba549b520fc 100644
71 +--- a/arch/arc/kernel/entry.S
72 ++++ b/arch/arc/kernel/entry.S
73 +@@ -196,6 +196,7 @@ tracesys_exit:
74 + st r0, [sp, PT_r0] ; sys call return value in pt_regs
75 +
76 + ;POST Sys Call Ptrace Hook
77 ++ mov r0, sp ; pt_regs needed
78 + bl @syscall_trace_exit
79 + b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
80 + ; we'd done before calling post hook above
81 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
82 +index 1da11bdb1dfbd..1c6500c4e6a17 100644
83 +--- a/arch/arm/mach-vexpress/spc.c
84 ++++ b/arch/arm/mach-vexpress/spc.c
85 +@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
86 + }
87 +
88 + cluster = topology_physical_package_id(cpu_dev->id);
89 +- if (init_opp_table[cluster])
90 ++ if (cluster < 0 || init_opp_table[cluster])
91 + continue;
92 +
93 + if (ve_init_opp_table(cpu_dev))
94 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
95 +index ec5b082f3de6e..07eb69f9e7df3 100644
96 +--- a/arch/arm/xen/enlighten.c
97 ++++ b/arch/arm/xen/enlighten.c
98 +@@ -337,12 +337,15 @@ int __init arch_xen_unpopulated_init(struct resource **res)
99 +
100 + if (!nr_reg) {
101 + pr_err("No extended regions are found\n");
102 ++ of_node_put(np);
103 + return -EINVAL;
104 + }
105 +
106 + regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
107 +- if (!regs)
108 ++ if (!regs) {
109 ++ of_node_put(np);
110 + return -ENOMEM;
111 ++ }
112 +
113 + /*
114 + * Create resource from extended regions provided by the hypervisor to be
115 +@@ -403,8 +406,8 @@ int __init arch_xen_unpopulated_init(struct resource **res)
116 + *res = &xen_resource;
117 +
118 + err:
119 ++ of_node_put(np);
120 + kfree(regs);
121 +-
122 + return rc;
123 + }
124 + #endif
125 +@@ -424,8 +427,10 @@ static void __init xen_dt_guest_init(void)
126 +
127 + if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
128 + pr_err("Xen grant table region is not found\n");
129 ++ of_node_put(xen_node);
130 + return;
131 + }
132 ++ of_node_put(xen_node);
133 + xen_grant_frames = res.start;
134 + }
135 +
136 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
137 +index 1dc9d187601c5..a0bd540f27d3d 100644
138 +--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
139 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
140 +@@ -89,12 +89,12 @@
141 + pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
142 +
143 + ti,x-min = /bits/ 16 <125>;
144 +- touchscreen-size-x = /bits/ 16 <4008>;
145 ++ touchscreen-size-x = <4008>;
146 + ti,y-min = /bits/ 16 <282>;
147 +- touchscreen-size-y = /bits/ 16 <3864>;
148 ++ touchscreen-size-y = <3864>;
149 + ti,x-plate-ohms = /bits/ 16 <180>;
150 +- touchscreen-max-pressure = /bits/ 16 <255>;
151 +- touchscreen-average-samples = /bits/ 16 <10>;
152 ++ touchscreen-max-pressure = <255>;
153 ++ touchscreen-average-samples = <10>;
154 + ti,debounce-tol = /bits/ 16 <3>;
155 + ti,debounce-rep = /bits/ 16 <1>;
156 + ti,settle-delay-usec = /bits/ 16 <150>;
157 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
158 +index b16c7caf34c11..87b5e23c766f7 100644
159 +--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
160 ++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
161 +@@ -70,12 +70,12 @@
162 + pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
163 +
164 + ti,x-min = /bits/ 16 <125>;
165 +- touchscreen-size-x = /bits/ 16 <4008>;
166 ++ touchscreen-size-x = <4008>;
167 + ti,y-min = /bits/ 16 <282>;
168 +- touchscreen-size-y = /bits/ 16 <3864>;
169 ++ touchscreen-size-y = <3864>;
170 + ti,x-plate-ohms = /bits/ 16 <180>;
171 +- touchscreen-max-pressure = /bits/ 16 <255>;
172 +- touchscreen-average-samples = /bits/ 16 <10>;
173 ++ touchscreen-max-pressure = <255>;
174 ++ touchscreen-average-samples = <10>;
175 + ti,debounce-tol = /bits/ 16 <3>;
176 + ti,debounce-rep = /bits/ 16 <1>;
177 + ti,settle-delay-usec = /bits/ 16 <150>;
178 +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
179 +index 2151cd8c8c7ab..e1c46b80f14a0 100644
180 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
181 ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
182 +@@ -1459,6 +1459,8 @@
183 + "imem",
184 + "config";
185 +
186 ++ qcom,qmp = <&aoss_qmp>;
187 ++
188 + qcom,smem-states = <&ipa_smp2p_out 0>,
189 + <&ipa_smp2p_out 1>;
190 + qcom,smem-state-names = "ipa-clock-enabled-valid",
191 +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
192 +index eab7a85050531..d66865131ef90 100644
193 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
194 ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
195 +@@ -1714,6 +1714,8 @@
196 + interconnect-names = "memory",
197 + "config";
198 +
199 ++ qcom,qmp = <&aoss_qmp>;
200 ++
201 + qcom,smem-states = <&ipa_smp2p_out 0>,
202 + <&ipa_smp2p_out 1>;
203 + qcom,smem-state-names = "ipa-clock-enabled-valid",
204 +diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
205 +index 765d018e6306c..0bde6bbb3bc74 100644
206 +--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
207 ++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
208 +@@ -1443,6 +1443,8 @@
209 + interconnect-names = "memory",
210 + "config";
211 +
212 ++ qcom,qmp = <&aoss_qmp>;
213 ++
214 + qcom,smem-states = <&ipa_smp2p_out 0>,
215 + <&ipa_smp2p_out 1>;
216 + qcom,smem-state-names = "ipa-clock-enabled-valid",
217 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
218 +index 94e147e5456ca..dff2b483ea509 100644
219 +--- a/arch/arm64/include/asm/pgtable.h
220 ++++ b/arch/arm64/include/asm/pgtable.h
221 +@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
222 + PMD_TYPE_TABLE)
223 + #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
224 + PMD_TYPE_SECT)
225 +-#define pmd_leaf(pmd) pmd_sect(pmd)
226 ++#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
227 + #define pmd_bad(pmd) (!pmd_table(pmd))
228 +
229 + #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
230 +@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
231 + #define pud_none(pud) (!pud_val(pud))
232 + #define pud_bad(pud) (!pud_table(pud))
233 + #define pud_present(pud) pte_present(pud_pte(pud))
234 +-#define pud_leaf(pud) pud_sect(pud)
235 ++#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
236 + #define pud_valid(pud) pte_valid(pud_pte(pud))
237 +
238 + static inline void set_pud(pud_t *pudp, pud_t pud)
239 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
240 +index 384f58a3f373f..5f8933aec75ce 100644
241 +--- a/arch/powerpc/kernel/time.c
242 ++++ b/arch/powerpc/kernel/time.c
243 +@@ -610,23 +610,22 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
244 + return;
245 + }
246 +
247 +- /* Conditionally hard-enable interrupts. */
248 +- if (should_hard_irq_enable()) {
249 +- /*
250 +- * Ensure a positive value is written to the decrementer, or
251 +- * else some CPUs will continue to take decrementer exceptions.
252 +- * When the PPC_WATCHDOG (decrementer based) is configured,
253 +- * keep this at most 31 bits, which is about 4 seconds on most
254 +- * systems, which gives the watchdog a chance of catching timer
255 +- * interrupt hard lockups.
256 +- */
257 +- if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
258 +- set_dec(0x7fffffff);
259 +- else
260 +- set_dec(decrementer_max);
261 ++ /*
262 ++ * Ensure a positive value is written to the decrementer, or
263 ++ * else some CPUs will continue to take decrementer exceptions.
264 ++ * When the PPC_WATCHDOG (decrementer based) is configured,
265 ++ * keep this at most 31 bits, which is about 4 seconds on most
266 ++ * systems, which gives the watchdog a chance of catching timer
267 ++ * interrupt hard lockups.
268 ++ */
269 ++ if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
270 ++ set_dec(0x7fffffff);
271 ++ else
272 ++ set_dec(decrementer_max);
273 +
274 ++ /* Conditionally hard-enable interrupts. */
275 ++ if (should_hard_irq_enable())
276 + do_hard_irq_enable();
277 +- }
278 +
279 + #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
280 + if (atomic_read(&ppc_n_lost_interrupts) != 0)
281 +diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
282 +index d42b4b6d4a791..85cfa6328222b 100644
283 +--- a/arch/powerpc/kvm/book3s_64_vio.c
284 ++++ b/arch/powerpc/kvm/book3s_64_vio.c
285 +@@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
286 + tbl[idx % TCES_PER_PAGE] = tce;
287 + }
288 +
289 +-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
290 +- unsigned long entry)
291 ++static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
292 ++ struct iommu_table *tbl, unsigned long entry)
293 + {
294 +- unsigned long hpa = 0;
295 +- enum dma_data_direction dir = DMA_NONE;
296 ++ unsigned long i;
297 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
298 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
299 ++
300 ++ for (i = 0; i < subpages; ++i) {
301 ++ unsigned long hpa = 0;
302 ++ enum dma_data_direction dir = DMA_NONE;
303 +
304 +- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
305 ++ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
306 ++ }
307 + }
308 +
309 + static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
310 +@@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
311 + break;
312 + }
313 +
314 ++ iommu_tce_kill(tbl, io_entry, subpages);
315 ++
316 + return ret;
317 + }
318 +
319 +@@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
320 + break;
321 + }
322 +
323 ++ iommu_tce_kill(tbl, io_entry, subpages);
324 ++
325 + return ret;
326 + }
327 +
328 +@@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
329 + ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
330 + entry, ua, dir);
331 +
332 +- iommu_tce_kill(stit->tbl, entry, 1);
333 +
334 + if (ret != H_SUCCESS) {
335 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
336 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
337 + goto unlock_exit;
338 + }
339 + }
340 +@@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
341 + */
342 + if (get_user(tce, tces + i)) {
343 + ret = H_TOO_HARD;
344 +- goto invalidate_exit;
345 ++ goto unlock_exit;
346 + }
347 + tce = be64_to_cpu(tce);
348 +
349 + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
350 + ret = H_PARAMETER;
351 +- goto invalidate_exit;
352 ++ goto unlock_exit;
353 + }
354 +
355 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
356 +@@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
357 + iommu_tce_direction(tce));
358 +
359 + if (ret != H_SUCCESS) {
360 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
361 +- entry);
362 +- goto invalidate_exit;
363 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
364 ++ entry + i);
365 ++ goto unlock_exit;
366 + }
367 + }
368 +
369 + kvmppc_tce_put(stt, entry + i, tce);
370 + }
371 +
372 +-invalidate_exit:
373 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
374 +- iommu_tce_kill(stit->tbl, entry, npages);
375 +-
376 + unlock_exit:
377 + srcu_read_unlock(&vcpu->kvm->srcu, idx);
378 +
379 +@@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
380 + continue;
381 +
382 + if (ret == H_TOO_HARD)
383 +- goto invalidate_exit;
384 ++ return ret;
385 +
386 + WARN_ON_ONCE(1);
387 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
388 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
389 + }
390 + }
391 +
392 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
393 + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
394 +
395 +-invalidate_exit:
396 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
397 +- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
398 +-
399 + return ret;
400 + }
401 + EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
402 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
403 +index 870b7f0c7ea56..fdeda6a9cff44 100644
404 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
405 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
406 +@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
407 + tbl->it_ops->tce_kill(tbl, entry, pages, true);
408 + }
409 +
410 +-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
411 +- unsigned long entry)
412 ++static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
413 ++ struct iommu_table *tbl, unsigned long entry)
414 + {
415 +- unsigned long hpa = 0;
416 +- enum dma_data_direction dir = DMA_NONE;
417 ++ unsigned long i;
418 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
419 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
420 ++
421 ++ for (i = 0; i < subpages; ++i) {
422 ++ unsigned long hpa = 0;
423 ++ enum dma_data_direction dir = DMA_NONE;
424 +
425 +- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
426 ++ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
427 ++ }
428 + }
429 +
430 + static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
431 +@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
432 + break;
433 + }
434 +
435 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
436 ++
437 + return ret;
438 + }
439 +
440 +@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
441 + break;
442 + }
443 +
444 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
445 ++
446 + return ret;
447 + }
448 +
449 +@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
450 + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
451 + stit->tbl, entry, ua, dir);
452 +
453 +- iommu_tce_kill_rm(stit->tbl, entry, 1);
454 +-
455 + if (ret != H_SUCCESS) {
456 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
457 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
458 + return ret;
459 + }
460 + }
461 +@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
462 + ua = 0;
463 + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
464 + ret = H_PARAMETER;
465 +- goto invalidate_exit;
466 ++ goto unlock_exit;
467 + }
468 +
469 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
470 +@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
471 + iommu_tce_direction(tce));
472 +
473 + if (ret != H_SUCCESS) {
474 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
475 +- entry);
476 +- goto invalidate_exit;
477 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
478 ++ entry + i);
479 ++ goto unlock_exit;
480 + }
481 + }
482 +
483 + kvmppc_rm_tce_put(stt, entry + i, tce);
484 + }
485 +
486 +-invalidate_exit:
487 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
488 +- iommu_tce_kill_rm(stit->tbl, entry, npages);
489 +-
490 + unlock_exit:
491 + if (!prereg)
492 + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
493 +@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
494 + continue;
495 +
496 + if (ret == H_TOO_HARD)
497 +- goto invalidate_exit;
498 ++ return ret;
499 +
500 + WARN_ON_ONCE_RM(1);
501 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
502 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
503 + }
504 + }
505 +
506 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
507 + kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
508 +
509 +-invalidate_exit:
510 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
511 +- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
512 +-
513 + return ret;
514 + }
515 +
516 +diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
517 +index 0975ad0b42c42..69b4565d1a8f0 100644
518 +--- a/arch/powerpc/perf/power10-pmu.c
519 ++++ b/arch/powerpc/perf/power10-pmu.c
520 +@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
521 +
522 + /* Table of alternatives, sorted by column 0 */
523 + static const unsigned int power10_event_alternatives[][MAX_ALT] = {
524 +- { PM_CYC_ALT, PM_CYC },
525 + { PM_INST_CMPL_ALT, PM_INST_CMPL },
526 ++ { PM_CYC_ALT, PM_CYC },
527 + };
528 +
529 + static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
530 +diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
531 +index 4b7c17e361003..37b2860db4833 100644
532 +--- a/arch/powerpc/perf/power9-pmu.c
533 ++++ b/arch/powerpc/perf/power9-pmu.c
534 +@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
535 +
536 + /* Table of alternatives, sorted by column 0 */
537 + static const unsigned int power9_event_alternatives[][MAX_ALT] = {
538 +- { PM_INST_DISP, PM_INST_DISP_ALT },
539 +- { PM_RUN_CYC_ALT, PM_RUN_CYC },
540 +- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
541 +- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
542 + { PM_BR_2PATH, PM_BR_2PATH_ALT },
543 ++ { PM_INST_DISP, PM_INST_DISP_ALT },
544 ++ { PM_RUN_CYC_ALT, PM_RUN_CYC },
545 ++ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
546 ++ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
547 + };
548 +
549 + static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
550 +diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
551 +index 6785aef4cbd46..aad430668bb4d 100644
552 +--- a/arch/riscv/kvm/vcpu.c
553 ++++ b/arch/riscv/kvm/vcpu.c
554 +@@ -38,14 +38,16 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
555 + sizeof(kvm_vcpu_stats_desc),
556 + };
557 +
558 +-#define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
559 +- riscv_isa_extension_mask(c) | \
560 +- riscv_isa_extension_mask(d) | \
561 +- riscv_isa_extension_mask(f) | \
562 +- riscv_isa_extension_mask(i) | \
563 +- riscv_isa_extension_mask(m) | \
564 +- riscv_isa_extension_mask(s) | \
565 +- riscv_isa_extension_mask(u))
566 ++#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
567 ++ riscv_isa_extension_mask(f))
568 ++
569 ++#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
570 ++ riscv_isa_extension_mask(c) | \
571 ++ riscv_isa_extension_mask(i) | \
572 ++ riscv_isa_extension_mask(m))
573 ++
574 ++#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
575 ++ KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
576 +
577 + static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
578 + {
579 +@@ -219,7 +221,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
580 + switch (reg_num) {
581 + case KVM_REG_RISCV_CONFIG_REG(isa):
582 + if (!vcpu->arch.ran_atleast_once) {
583 +- vcpu->arch.isa = reg_val;
584 ++ /* Ignore the disable request for these extensions */
585 ++ vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
586 + vcpu->arch.isa &= riscv_isa_extension_base(NULL);
587 + vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
588 + kvm_riscv_vcpu_fp_reset(vcpu);
589 +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
590 +index 7516e4199b3c6..20fd0acd7d800 100644
591 +--- a/arch/x86/include/asm/compat.h
592 ++++ b/arch/x86/include/asm/compat.h
593 +@@ -28,15 +28,13 @@ typedef u16 compat_ipc_pid_t;
594 + typedef __kernel_fsid_t compat_fsid_t;
595 +
596 + struct compat_stat {
597 +- compat_dev_t st_dev;
598 +- u16 __pad1;
599 ++ u32 st_dev;
600 + compat_ino_t st_ino;
601 + compat_mode_t st_mode;
602 + compat_nlink_t st_nlink;
603 + __compat_uid_t st_uid;
604 + __compat_gid_t st_gid;
605 +- compat_dev_t st_rdev;
606 +- u16 __pad2;
607 ++ u32 st_rdev;
608 + u32 st_size;
609 + u32 st_blksize;
610 + u32 st_blocks;
611 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
612 +index 85ee96abba806..c4b4c0839dbdb 100644
613 +--- a/arch/x86/include/asm/kvm_host.h
614 ++++ b/arch/x86/include/asm/kvm_host.h
615 +@@ -969,12 +969,10 @@ enum hv_tsc_page_status {
616 + HV_TSC_PAGE_UNSET = 0,
617 + /* TSC page MSR was written by the guest, update pending */
618 + HV_TSC_PAGE_GUEST_CHANGED,
619 +- /* TSC page MSR was written by KVM userspace, update pending */
620 ++ /* TSC page update was triggered from the host side */
621 + HV_TSC_PAGE_HOST_CHANGED,
622 + /* TSC page was properly set up and is currently active */
623 + HV_TSC_PAGE_SET,
624 +- /* TSC page is currently being updated and therefore is inactive */
625 +- HV_TSC_PAGE_UPDATING,
626 + /* TSC page was set up with an inaccessible GPA */
627 + HV_TSC_PAGE_BROKEN,
628 + };
629 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
630 +index 10bc257d3803b..247ac71b7a10f 100644
631 +--- a/arch/x86/kvm/hyperv.c
632 ++++ b/arch/x86/kvm/hyperv.c
633 +@@ -1128,11 +1128,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
634 + BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
635 + BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
636 +
637 ++ mutex_lock(&hv->hv_lock);
638 ++
639 + if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
640 ++ hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
641 + hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
642 +- return;
643 ++ goto out_unlock;
644 +
645 +- mutex_lock(&hv->hv_lock);
646 + if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
647 + goto out_unlock;
648 +
649 +@@ -1194,45 +1196,19 @@ out_unlock:
650 + mutex_unlock(&hv->hv_lock);
651 + }
652 +
653 +-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
654 ++void kvm_hv_request_tsc_page_update(struct kvm *kvm)
655 + {
656 + struct kvm_hv *hv = to_kvm_hv(kvm);
657 +- u64 gfn;
658 +- int idx;
659 +-
660 +- if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
661 +- hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
662 +- tsc_page_update_unsafe(hv))
663 +- return;
664 +
665 + mutex_lock(&hv->hv_lock);
666 +
667 +- if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
668 +- goto out_unlock;
669 +-
670 +- /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
671 +- if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
672 +- hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
673 ++ if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
674 ++ !tsc_page_update_unsafe(hv))
675 ++ hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
676 +
677 +- gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
678 +-
679 +- hv->tsc_ref.tsc_sequence = 0;
680 +-
681 +- /*
682 +- * Take the srcu lock as memslots will be accessed to check the gfn
683 +- * cache generation against the memslots generation.
684 +- */
685 +- idx = srcu_read_lock(&kvm->srcu);
686 +- if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
687 +- &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
688 +- hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
689 +- srcu_read_unlock(&kvm->srcu, idx);
690 +-
691 +-out_unlock:
692 + mutex_unlock(&hv->hv_lock);
693 + }
694 +
695 +-
696 + static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
697 + {
698 + if (!hv_vcpu->enforce_cpuid)
699 +diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
700 +index ed1c4e546d049..3e79b4a9ed4ef 100644
701 +--- a/arch/x86/kvm/hyperv.h
702 ++++ b/arch/x86/kvm/hyperv.h
703 +@@ -133,7 +133,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
704 +
705 + void kvm_hv_setup_tsc_page(struct kvm *kvm,
706 + struct pvclock_vcpu_time_info *hv_clock);
707 +-void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
708 ++void kvm_hv_request_tsc_page_update(struct kvm *kvm);
709 +
710 + void kvm_hv_init_vm(struct kvm *kvm);
711 + void kvm_hv_destroy_vm(struct kvm *kvm);
712 +diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
713 +index 7a7b8d5b775e9..5e7e8d163b985 100644
714 +--- a/arch/x86/kvm/pmu.h
715 ++++ b/arch/x86/kvm/pmu.h
716 +@@ -140,6 +140,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
717 + return sample_period;
718 + }
719 +
720 ++static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
721 ++{
722 ++ if (!pmc->perf_event || pmc->is_paused)
723 ++ return;
724 ++
725 ++ perf_event_period(pmc->perf_event,
726 ++ get_sample_period(pmc, pmc->counter));
727 ++}
728 ++
729 + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
730 + void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
731 + void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
732 +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
733 +index ba40b7fced5ae..b5b0837df0d11 100644
734 +--- a/arch/x86/kvm/svm/pmu.c
735 ++++ b/arch/x86/kvm/svm/pmu.c
736 +@@ -257,6 +257,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
737 + pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
738 + if (pmc) {
739 + pmc->counter += data - pmc_read_counter(pmc);
740 ++ pmc_update_sample_period(pmc);
741 + return 0;
742 + }
743 + /* MSR_EVNTSELn */
744 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
745 +index fef9758525826..e5cecd4ad2d44 100644
746 +--- a/arch/x86/kvm/svm/sev.c
747 ++++ b/arch/x86/kvm/svm/sev.c
748 +@@ -2204,51 +2204,39 @@ int sev_cpu_init(struct svm_cpu_data *sd)
749 + * Pages used by hardware to hold guest encrypted state must be flushed before
750 + * returning them to the system.
751 + */
752 +-static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
753 +- unsigned long len)
754 ++static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
755 + {
756 ++ int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
757 ++
758 + /*
759 +- * If hardware enforced cache coherency for encrypted mappings of the
760 +- * same physical page is supported, nothing to do.
761 ++ * Note! The address must be a kernel address, as regular page walk
762 ++ * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
763 ++ * address is non-deterministic and unsafe. This function deliberately
764 ++ * takes a pointer to deter passing in a user address.
765 + */
766 +- if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
767 +- return;
768 ++ unsigned long addr = (unsigned long)va;
769 +
770 + /*
771 +- * If the VM Page Flush MSR is supported, use it to flush the page
772 +- * (using the page virtual address and the guest ASID).
773 ++ * If CPU enforced cache coherency for encrypted mappings of the
774 ++ * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
775 ++ * flush is still needed in order to work properly with DMA devices.
776 + */
777 +- if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
778 +- struct kvm_sev_info *sev;
779 +- unsigned long va_start;
780 +- u64 start, stop;
781 +-
782 +- /* Align start and stop to page boundaries. */
783 +- va_start = (unsigned long)va;
784 +- start = (u64)va_start & PAGE_MASK;
785 +- stop = PAGE_ALIGN((u64)va_start + len);
786 +-
787 +- if (start < stop) {
788 +- sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
789 +-
790 +- while (start < stop) {
791 +- wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
792 +- start | sev->asid);
793 +-
794 +- start += PAGE_SIZE;
795 +- }
796 +-
797 +- return;
798 +- }
799 +-
800 +- WARN(1, "Address overflow, using WBINVD\n");
801 ++ if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
802 ++ clflush_cache_range(va, PAGE_SIZE);
803 ++ return;
804 + }
805 +
806 + /*
807 +- * Hardware should always have one of the above features,
808 +- * but if not, use WBINVD and issue a warning.
809 ++ * VM Page Flush takes a host virtual address and a guest ASID. Fall
810 ++ * back to WBINVD if this faults so as not to make any problems worse
811 ++ * by leaving stale encrypted data in the cache.
812 + */
813 +- WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
814 ++ if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
815 ++ goto do_wbinvd;
816 ++
817 ++ return;
818 ++
819 ++do_wbinvd:
820 + wbinvd_on_all_cpus();
821 + }
822 +
823 +@@ -2262,7 +2250,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
824 + svm = to_svm(vcpu);
825 +
826 + if (vcpu->arch.guest_state_protected)
827 +- sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
828 ++ sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
829 ++
830 + __free_page(virt_to_page(svm->sev_es.vmsa));
831 +
832 + if (svm->sev_es.ghcb_sa_free)
833 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
834 +index dc822a1d403d3..896ddf7392365 100644
835 +--- a/arch/x86/kvm/vmx/nested.c
836 ++++ b/arch/x86/kvm/vmx/nested.c
837 +@@ -4618,6 +4618,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
838 + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
839 + }
840 +
841 ++ if (vmx->nested.update_vmcs01_apicv_status) {
842 ++ vmx->nested.update_vmcs01_apicv_status = false;
843 ++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
844 ++ }
845 ++
846 + if ((vm_exit_reason != -1) &&
847 + (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
848 + vmx->nested.need_vmcs12_to_shadow_sync = true;
849 +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
850 +index 5fa3870b89881..a0c84761c9382 100644
851 +--- a/arch/x86/kvm/vmx/pmu_intel.c
852 ++++ b/arch/x86/kvm/vmx/pmu_intel.c
853 +@@ -431,15 +431,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
854 + !(msr & MSR_PMC_FULL_WIDTH_BIT))
855 + data = (s64)(s32)data;
856 + pmc->counter += data - pmc_read_counter(pmc);
857 +- if (pmc->perf_event && !pmc->is_paused)
858 +- perf_event_period(pmc->perf_event,
859 +- get_sample_period(pmc, data));
860 ++ pmc_update_sample_period(pmc);
861 + return 0;
862 + } else if ((pmc = get_fixed_pmc(pmu, msr))) {
863 + pmc->counter += data - pmc_read_counter(pmc);
864 +- if (pmc->perf_event && !pmc->is_paused)
865 +- perf_event_period(pmc->perf_event,
866 +- get_sample_period(pmc, data));
867 ++ pmc_update_sample_period(pmc);
868 + return 0;
869 + } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
870 + if (data == pmc->eventsel)
871 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
872 +index b730d799c26ed..ef63cfd57029a 100644
873 +--- a/arch/x86/kvm/vmx/vmx.c
874 ++++ b/arch/x86/kvm/vmx/vmx.c
875 +@@ -4182,6 +4182,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
876 + {
877 + struct vcpu_vmx *vmx = to_vmx(vcpu);
878 +
879 ++ if (is_guest_mode(vcpu)) {
880 ++ vmx->nested.update_vmcs01_apicv_status = true;
881 ++ return;
882 ++ }
883 ++
884 + pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
885 + if (cpu_has_secondary_exec_ctrls()) {
886 + if (kvm_vcpu_apicv_active(vcpu))
887 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
888 +index 9c6bfcd84008b..b98c7e96697a9 100644
889 +--- a/arch/x86/kvm/vmx/vmx.h
890 ++++ b/arch/x86/kvm/vmx/vmx.h
891 +@@ -183,6 +183,7 @@ struct nested_vmx {
892 + bool change_vmcs01_virtual_apic_mode;
893 + bool reload_vmcs01_apic_access_page;
894 + bool update_vmcs01_cpu_dirty_logging;
895 ++ bool update_vmcs01_apicv_status;
896 +
897 + /*
898 + * Enlightened VMCS has been enabled. It does not mean that L1 has to
899 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
900 +index 05128162ebd58..23d176cd12a4f 100644
901 +--- a/arch/x86/kvm/x86.c
902 ++++ b/arch/x86/kvm/x86.c
903 +@@ -2874,7 +2874,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
904 +
905 + static void kvm_update_masterclock(struct kvm *kvm)
906 + {
907 +- kvm_hv_invalidate_tsc_page(kvm);
908 ++ kvm_hv_request_tsc_page_update(kvm);
909 + kvm_start_pvclock_update(kvm);
910 + pvclock_update_vm_gtod_copy(kvm);
911 + kvm_end_pvclock_update(kvm);
912 +@@ -3086,8 +3086,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
913 + offsetof(struct compat_vcpu_info, time));
914 + if (vcpu->xen.vcpu_time_info_set)
915 + kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
916 +- if (!v->vcpu_idx)
917 +- kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
918 ++ kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
919 + return 0;
920 + }
921 +
922 +@@ -6190,7 +6189,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
923 + if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
924 + return -EINVAL;
925 +
926 +- kvm_hv_invalidate_tsc_page(kvm);
927 ++ kvm_hv_request_tsc_page_update(kvm);
928 + kvm_start_pvclock_update(kvm);
929 + pvclock_update_vm_gtod_copy(kvm);
930 +
931 +@@ -10297,12 +10296,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
932 +
933 + static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
934 + {
935 +- int r;
936 +-
937 +- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
938 +- r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
939 +- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
940 +- return r;
941 ++ return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
942 + }
943 +
944 + static int complete_emulated_pio(struct kvm_vcpu *vcpu)
945 +@@ -11119,8 +11113,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
946 + r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
947 + if (r < 0)
948 + goto fail_mmu_destroy;
949 +- if (kvm_apicv_activated(vcpu->kvm))
950 ++
951 ++ /*
952 ++ * Defer evaluating inhibits until the vCPU is first run, as
953 ++ * this vCPU will not get notified of any changes until this
954 ++ * vCPU is visible to other vCPUs (marked online and added to
955 ++ * the set of vCPUs). Opportunistically mark APICv active as
956 ++ * VMX in particularly is highly unlikely to have inhibits.
957 ++ * Ignore the current per-VM APICv state so that vCPU creation
958 ++ * is guaranteed to run with a deterministic value, the request
959 ++ * will ensure the vCPU gets the correct state before VM-Entry.
960 ++ */
961 ++ if (enable_apicv) {
962 + vcpu->arch.apicv_active = true;
963 ++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
964 ++ }
965 + } else
966 + static_branch_inc(&kvm_has_noapic_vcpu);
967 +
968 +diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
969 +index 45cc0ae0af6f9..c7b9f12896f20 100644
970 +--- a/arch/xtensa/kernel/coprocessor.S
971 ++++ b/arch/xtensa/kernel/coprocessor.S
972 +@@ -29,7 +29,7 @@
973 + .if XTENSA_HAVE_COPROCESSOR(x); \
974 + .align 4; \
975 + .Lsave_cp_regs_cp##x: \
976 +- xchal_cp##x##_store a2 a4 a5 a6 a7; \
977 ++ xchal_cp##x##_store a2 a3 a4 a5 a6; \
978 + jx a0; \
979 + .endif
980 +
981 +@@ -46,7 +46,7 @@
982 + .if XTENSA_HAVE_COPROCESSOR(x); \
983 + .align 4; \
984 + .Lload_cp_regs_cp##x: \
985 +- xchal_cp##x##_load a2 a4 a5 a6 a7; \
986 ++ xchal_cp##x##_load a2 a3 a4 a5 a6; \
987 + jx a0; \
988 + .endif
989 +
990 +diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
991 +index 0dde21e0d3de4..ad1841cecdfb7 100644
992 +--- a/arch/xtensa/kernel/jump_label.c
993 ++++ b/arch/xtensa/kernel/jump_label.c
994 +@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
995 + {
996 + struct patch *patch = data;
997 +
998 +- if (atomic_inc_return(&patch->cpu_count) == 1) {
999 ++ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
1000 + local_patch_text(patch->addr, patch->data, patch->sz);
1001 + atomic_inc(&patch->cpu_count);
1002 + } else {
1003 +diff --git a/block/ioctl.c b/block/ioctl.c
1004 +index 4a86340133e46..f8703db99c734 100644
1005 +--- a/block/ioctl.c
1006 ++++ b/block/ioctl.c
1007 +@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1008 + return compat_put_long(argp,
1009 + (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
1010 + case BLKGETSIZE:
1011 +- if (bdev_nr_sectors(bdev) > ~0UL)
1012 ++ if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
1013 + return -EFBIG;
1014 + return compat_put_ulong(argp, bdev_nr_sectors(bdev));
1015 +
1016 +diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
1017 +index 0c5a51970fbf5..014ccb0f45dc4 100644
1018 +--- a/drivers/ata/pata_marvell.c
1019 ++++ b/drivers/ata/pata_marvell.c
1020 +@@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
1021 + switch(ap->port_no)
1022 + {
1023 + case 0:
1024 ++ if (!ap->ioaddr.bmdma_addr)
1025 ++ return ATA_CBL_PATA_UNK;
1026 + if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
1027 + return ATA_CBL_PATA40;
1028 + return ATA_CBL_PATA80;
1029 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1030 +index 1476156af74b4..def564d1e8faf 100644
1031 +--- a/drivers/dma/at_xdmac.c
1032 ++++ b/drivers/dma/at_xdmac.c
1033 +@@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1034 + {
1035 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1036 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1037 +- struct at_xdmac_desc *desc, *_desc;
1038 ++ struct at_xdmac_desc *desc, *_desc, *iter;
1039 + struct list_head *descs_list;
1040 + enum dma_status ret;
1041 + int residue, retry;
1042 +@@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1043 + * microblock.
1044 + */
1045 + descs_list = &desc->descs_list;
1046 +- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1047 +- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1048 +- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1049 +- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1050 ++ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1051 ++ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1052 ++ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1053 ++ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1054 ++ desc = iter;
1055 + break;
1056 ++ }
1057 + }
1058 + residue += cur_ubc << dwidth;
1059 +
1060 +diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
1061 +index 329fc2e57b703..b5b8f8181e776 100644
1062 +--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
1063 ++++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
1064 +@@ -415,8 +415,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
1065 + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
1066 + /* Linked list */
1067 + #ifdef CONFIG_64BIT
1068 +- SET_CH_64(dw, chan->dir, chan->id, llp.reg,
1069 +- chunk->ll_region.paddr);
1070 ++ /* llp is not aligned on 64bit -> keep 32bit accesses */
1071 ++ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
1072 ++ lower_32_bits(chunk->ll_region.paddr));
1073 ++ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
1074 ++ upper_32_bits(chunk->ll_region.paddr));
1075 + #else /* CONFIG_64BIT */
1076 + SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
1077 + lower_32_bits(chunk->ll_region.paddr));
1078 +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
1079 +index 3061fe857d69f..f652da6ab47df 100644
1080 +--- a/drivers/dma/idxd/device.c
1081 ++++ b/drivers/dma/idxd/device.c
1082 +@@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
1083 + {
1084 + lockdep_assert_held(&wq->wq_lock);
1085 +
1086 +- idxd_wq_disable_cleanup(wq);
1087 + wq->size = 0;
1088 + wq->group = NULL;
1089 + }
1090 +@@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
1091 +
1092 + if (wq->state == IDXD_WQ_ENABLED) {
1093 + idxd_wq_disable_cleanup(wq);
1094 +- idxd_wq_device_reset_cleanup(wq);
1095 + wq->state = IDXD_WQ_DISABLED;
1096 + }
1097 ++ idxd_wq_device_reset_cleanup(wq);
1098 + }
1099 + }
1100 +
1101 + void idxd_device_clear_state(struct idxd_device *idxd)
1102 + {
1103 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1104 ++ return;
1105 ++
1106 + idxd_groups_clear_state(idxd);
1107 + idxd_engines_clear_state(idxd);
1108 + idxd_device_wqs_clear_state(idxd);
1109 +diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
1110 +index e289fd48711ad..c01db23e3333f 100644
1111 +--- a/drivers/dma/idxd/submit.c
1112 ++++ b/drivers/dma/idxd/submit.c
1113 +@@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
1114 + */
1115 + int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
1116 + {
1117 +- int rc, retries = 0;
1118 ++ unsigned int retries = wq->enqcmds_retries;
1119 ++ int rc;
1120 +
1121 + do {
1122 + rc = enqcmds(portal, desc);
1123 + if (rc == 0)
1124 + break;
1125 + cpu_relax();
1126 +- } while (retries++ < wq->enqcmds_retries);
1127 ++ } while (retries--);
1128 +
1129 + return rc;
1130 + }
1131 +diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
1132 +index 7e19ab92b61a8..dfd549685c467 100644
1133 +--- a/drivers/dma/idxd/sysfs.c
1134 ++++ b/drivers/dma/idxd/sysfs.c
1135 +@@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
1136 + u64 xfer_size;
1137 + int rc;
1138 +
1139 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1140 ++ return -EPERM;
1141 ++
1142 + if (wq->state != IDXD_WQ_DISABLED)
1143 + return -EPERM;
1144 +
1145 +@@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
1146 + u64 batch_size;
1147 + int rc;
1148 +
1149 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1150 ++ return -EPERM;
1151 ++
1152 + if (wq->state != IDXD_WQ_DISABLED)
1153 + return -EPERM;
1154 +
1155 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1156 +index 75ec0754d4ad4..b1e6173fcc271 100644
1157 +--- a/drivers/dma/imx-sdma.c
1158 ++++ b/drivers/dma/imx-sdma.c
1159 +@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
1160 + s32 per_2_firi_addr;
1161 + s32 mcu_2_firi_addr;
1162 + s32 uart_2_per_addr;
1163 +- s32 uart_2_mcu_ram_addr;
1164 ++ s32 uart_2_mcu_addr;
1165 + s32 per_2_app_addr;
1166 + s32 mcu_2_app_addr;
1167 + s32 per_2_per_addr;
1168 + s32 uartsh_2_per_addr;
1169 +- s32 uartsh_2_mcu_ram_addr;
1170 ++ s32 uartsh_2_mcu_addr;
1171 + s32 per_2_shp_addr;
1172 + s32 mcu_2_shp_addr;
1173 + s32 ata_2_mcu_addr;
1174 +@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
1175 + s32 mcu_2_ecspi_addr;
1176 + s32 mcu_2_sai_addr;
1177 + s32 sai_2_mcu_addr;
1178 +- s32 uart_2_mcu_addr;
1179 +- s32 uartsh_2_mcu_addr;
1180 ++ s32 uart_2_mcu_rom_addr;
1181 ++ s32 uartsh_2_mcu_rom_addr;
1182 + /* End of v3 array */
1183 + s32 mcu_2_zqspi_addr;
1184 + /* End of v4 array */
1185 +@@ -1780,17 +1780,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
1186 + saddr_arr[i] = addr_arr[i];
1187 +
1188 + /*
1189 +- * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
1190 +- * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
1191 +- * to be compatible with legacy freescale/nxp sdma firmware, and they
1192 +- * are located in the bottom part of sdma_script_start_addrs which are
1193 +- * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
1194 ++ * For compatibility with NXP internal legacy kernel before 4.19 which
1195 ++ * is based on uart ram script and mainline kernel based on uart rom
1196 ++ * script, both uart ram/rom scripts are present in newer sdma
1197 ++ * firmware. Use the rom versions if they are present (V3 or newer).
1198 + */
1199 +- if (addr->uart_2_mcu_addr)
1200 +- sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
1201 +- if (addr->uartsh_2_mcu_addr)
1202 +- sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
1203 +-
1204 ++ if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
1205 ++ if (addr->uart_2_mcu_rom_addr)
1206 ++ sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
1207 ++ if (addr->uartsh_2_mcu_rom_addr)
1208 ++ sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
1209 ++ }
1210 + }
1211 +
1212 + static void sdma_load_firmware(const struct firmware *fw, void *context)
1213 +@@ -1869,7 +1869,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
1214 + u32 reg, val, shift, num_map, i;
1215 + int ret = 0;
1216 +
1217 +- if (IS_ERR(np) || IS_ERR(gpr_np))
1218 ++ if (IS_ERR(np) || !gpr_np)
1219 + goto out;
1220 +
1221 + event_remap = of_find_property(np, propname, NULL);
1222 +@@ -1917,7 +1917,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
1223 + }
1224 +
1225 + out:
1226 +- if (!IS_ERR(gpr_np))
1227 ++ if (gpr_np)
1228 + of_node_put(gpr_np);
1229 +
1230 + return ret;
1231 +diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
1232 +index 375e7e647df6b..a1517ef1f4a01 100644
1233 +--- a/drivers/dma/mediatek/mtk-uart-apdma.c
1234 ++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
1235 +@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
1236 + unsigned int status;
1237 + int ret;
1238 +
1239 +- ret = pm_runtime_get_sync(mtkd->ddev.dev);
1240 ++ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
1241 + if (ret < 0) {
1242 + pm_runtime_put_noidle(chan->device->dev);
1243 + return ret;
1244 +@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
1245 + ret = readx_poll_timeout(readl, c->base + VFF_EN,
1246 + status, !status, 10, 100);
1247 + if (ret)
1248 +- return ret;
1249 ++ goto err_pm;
1250 +
1251 + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
1252 + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
1253 + if (ret < 0) {
1254 + dev_err(chan->device->dev, "Can't request dma IRQ\n");
1255 +- return -EINVAL;
1256 ++ ret = -EINVAL;
1257 ++ goto err_pm;
1258 + }
1259 +
1260 + if (mtkd->support_33bits)
1261 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
1262 +
1263 ++err_pm:
1264 ++ pm_runtime_put_noidle(mtkd->ddev.dev);
1265 + return ret;
1266 + }
1267 +
1268 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
1269 +index f05ff02c0656e..40b1abeca8562 100644
1270 +--- a/drivers/edac/synopsys_edac.c
1271 ++++ b/drivers/edac/synopsys_edac.c
1272 +@@ -164,6 +164,11 @@
1273 + #define ECC_STAT_CECNT_SHIFT 8
1274 + #define ECC_STAT_BITNUM_MASK 0x7F
1275 +
1276 ++/* ECC error count register definitions */
1277 ++#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
1278 ++#define ECC_ERRCNT_UECNT_SHIFT 16
1279 ++#define ECC_ERRCNT_CECNT_MASK 0xFFFF
1280 ++
1281 + /* DDR QOS Interrupt register definitions */
1282 + #define DDR_QOS_IRQ_STAT_OFST 0x20200
1283 + #define DDR_QOSUE_MASK 0x4
1284 +@@ -423,15 +428,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
1285 + base = priv->baseaddr;
1286 + p = &priv->stat;
1287 +
1288 ++ regval = readl(base + ECC_ERRCNT_OFST);
1289 ++ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
1290 ++ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
1291 ++ if (!p->ce_cnt)
1292 ++ goto ue_err;
1293 ++
1294 + regval = readl(base + ECC_STAT_OFST);
1295 + if (!regval)
1296 + return 1;
1297 +
1298 +- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
1299 +- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
1300 +- if (!p->ce_cnt)
1301 +- goto ue_err;
1302 +-
1303 + p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
1304 +
1305 + regval = readl(base + ECC_CEADDR0_OFST);
1306 +diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
1307 +index e48108e694f8d..7dad6f57d9704 100644
1308 +--- a/drivers/firmware/cirrus/cs_dsp.c
1309 ++++ b/drivers/firmware/cirrus/cs_dsp.c
1310 +@@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
1311 + ctl->alg_region = *alg_region;
1312 + if (subname && dsp->fw_ver >= 2) {
1313 + ctl->subname_len = subname_len;
1314 +- ctl->subname = kmemdup(subname,
1315 +- strlen(subname) + 1, GFP_KERNEL);
1316 ++ ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
1317 + if (!ctl->subname) {
1318 + ret = -ENOMEM;
1319 + goto err_ctl;
1320 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1321 +index 344e376b2ee99..d5a5cf2691026 100644
1322 +--- a/drivers/gpio/gpiolib.c
1323 ++++ b/drivers/gpio/gpiolib.c
1324 +@@ -1601,8 +1601,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
1325 +
1326 + gpiochip_set_irq_hooks(gc);
1327 +
1328 +- acpi_gpiochip_request_interrupts(gc);
1329 +-
1330 + /*
1331 + * Using barrier() here to prevent compiler from reordering
1332 + * gc->irq.initialized before initialization of above
1333 +@@ -1612,6 +1610,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
1334 +
1335 + gc->irq.initialized = true;
1336 +
1337 ++ acpi_gpiochip_request_interrupts(gc);
1338 ++
1339 + return 0;
1340 + }
1341 +
1342 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
1343 +index 87ed48d5530dc..8bd265b408470 100644
1344 +--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
1345 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
1346 +@@ -138,6 +138,10 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
1347 + cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
1348 + break;
1349 + }
1350 ++
1351 ++ if (cmd.psr_set_version.psr_set_version_data.version == PSR_VERSION_UNSUPPORTED)
1352 ++ return false;
1353 ++
1354 + cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1355 + cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
1356 + cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
1357 +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
1358 +index b00de57cc957e..cd32e1470b3cb 100644
1359 +--- a/drivers/gpu/drm/i915/display/intel_psr.c
1360 ++++ b/drivers/gpu/drm/i915/display/intel_psr.c
1361 +@@ -887,6 +887,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1362 + return false;
1363 + }
1364 +
1365 ++ /* Wa_16011303918:adl-p */
1366 ++ if (crtc_state->vrr.enable &&
1367 ++ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1368 ++ drm_dbg_kms(&dev_priv->drm,
1369 ++ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1370 ++ return false;
1371 ++ }
1372 ++
1373 ++ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1374 ++ drm_dbg_kms(&dev_priv->drm,
1375 ++ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1376 ++ return false;
1377 ++ }
1378 ++
1379 + if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1380 + if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1381 + !HAS_PSR_HW_TRACKING(dev_priv)) {
1382 +@@ -900,12 +914,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1383 + if (!crtc_state->enable_psr2_sel_fetch &&
1384 + IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
1385 + drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
1386 +- return false;
1387 ++ goto unsupported;
1388 + }
1389 +
1390 + if (!psr2_granularity_check(intel_dp, crtc_state)) {
1391 + drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1392 +- return false;
1393 ++ goto unsupported;
1394 + }
1395 +
1396 + if (!crtc_state->enable_psr2_sel_fetch &&
1397 +@@ -914,25 +928,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1398 + "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1399 + crtc_hdisplay, crtc_vdisplay,
1400 + psr_max_h, psr_max_v);
1401 +- return false;
1402 +- }
1403 +-
1404 +- if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1405 +- drm_dbg_kms(&dev_priv->drm,
1406 +- "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1407 +- return false;
1408 +- }
1409 +-
1410 +- /* Wa_16011303918:adl-p */
1411 +- if (crtc_state->vrr.enable &&
1412 +- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1413 +- drm_dbg_kms(&dev_priv->drm,
1414 +- "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1415 +- return false;
1416 ++ goto unsupported;
1417 + }
1418 +
1419 + tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1420 + return true;
1421 ++
1422 ++unsupported:
1423 ++ crtc_state->enable_psr2_sel_fetch = false;
1424 ++ return false;
1425 + }
1426 +
1427 + void intel_psr_compute_config(struct intel_dp *intel_dp,
1428 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
1429 +index fb261930ad1c7..e8a8240a68686 100644
1430 +--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
1431 ++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
1432 +@@ -601,29 +601,20 @@ static const struct of_device_id dt_match[] = {
1433 + };
1434 +
1435 + #ifdef CONFIG_PM
1436 +-static int adreno_resume(struct device *dev)
1437 ++static int adreno_runtime_resume(struct device *dev)
1438 + {
1439 + struct msm_gpu *gpu = dev_to_gpu(dev);
1440 +
1441 + return gpu->funcs->pm_resume(gpu);
1442 + }
1443 +
1444 +-static int active_submits(struct msm_gpu *gpu)
1445 +-{
1446 +- int active_submits;
1447 +- mutex_lock(&gpu->active_lock);
1448 +- active_submits = gpu->active_submits;
1449 +- mutex_unlock(&gpu->active_lock);
1450 +- return active_submits;
1451 +-}
1452 +-
1453 +-static int adreno_suspend(struct device *dev)
1454 ++static int adreno_runtime_suspend(struct device *dev)
1455 + {
1456 + struct msm_gpu *gpu = dev_to_gpu(dev);
1457 + int remaining;
1458 +
1459 + remaining = wait_event_timeout(gpu->retire_event,
1460 +- active_submits(gpu) == 0,
1461 ++ gpu->active_submits == 0,
1462 + msecs_to_jiffies(1000));
1463 + if (remaining == 0) {
1464 + dev_err(dev, "Timeout waiting for GPU to suspend\n");
1465 +@@ -636,7 +627,7 @@ static int adreno_suspend(struct device *dev)
1466 +
1467 + static const struct dev_pm_ops adreno_pm_ops = {
1468 + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
1469 +- SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
1470 ++ SET_RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
1471 + };
1472 +
1473 + static struct platform_driver adreno_driver = {
1474 +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
1475 +index c6b69afcbac89..50e854207c70a 100644
1476 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
1477 ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
1478 +@@ -90,7 +90,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
1479 + __drm_atomic_helper_plane_destroy_state(plane->state);
1480 +
1481 + kfree(to_mdp5_plane_state(plane->state));
1482 ++ plane->state = NULL;
1483 + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
1484 ++ if (!mdp5_state)
1485 ++ return;
1486 +
1487 + if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1488 + mdp5_state->base.zpos = STAGE_BASE;
1489 +diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
1490 +index 5d2ff67910586..acfe1b31e0792 100644
1491 +--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
1492 ++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
1493 +@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
1494 + va_list va;
1495 +
1496 + new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
1497 ++ if (!new_blk)
1498 ++ return;
1499 +
1500 + va_start(va, fmt);
1501 +
1502 +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1503 +index 46029c5610c80..145047e193946 100644
1504 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1505 ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1506 +@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
1507 +
1508 + ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
1509 + if (ret)
1510 +- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
1511 ++ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
1512 + }
1513 +
1514 + static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
1515 +@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
1516 + return 0;
1517 + }
1518 +
1519 +-static int rpi_touchscreen_enable(struct drm_panel *panel)
1520 ++static int rpi_touchscreen_prepare(struct drm_panel *panel)
1521 + {
1522 + struct rpi_touchscreen *ts = panel_to_ts(panel);
1523 + int i;
1524 +@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
1525 + rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
1526 + msleep(100);
1527 +
1528 ++ return 0;
1529 ++}
1530 ++
1531 ++static int rpi_touchscreen_enable(struct drm_panel *panel)
1532 ++{
1533 ++ struct rpi_touchscreen *ts = panel_to_ts(panel);
1534 ++
1535 + /* Turn on the backlight. */
1536 + rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
1537 +
1538 +@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
1539 + static const struct drm_panel_funcs rpi_touchscreen_funcs = {
1540 + .disable = rpi_touchscreen_disable,
1541 + .unprepare = rpi_touchscreen_noop,
1542 +- .prepare = rpi_touchscreen_noop,
1543 ++ .prepare = rpi_touchscreen_prepare,
1544 + .enable = rpi_touchscreen_enable,
1545 + .get_modes = rpi_touchscreen_get_modes,
1546 + };
1547 +diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
1548 +index b991ba1bcd513..f63efd8d5e524 100644
1549 +--- a/drivers/gpu/drm/radeon/radeon_sync.c
1550 ++++ b/drivers/gpu/drm/radeon/radeon_sync.c
1551 +@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
1552 + struct dma_fence *f;
1553 + int r = 0;
1554 +
1555 +- dma_resv_for_each_fence(&cursor, resv, shared, f) {
1556 ++ dma_resv_for_each_fence(&cursor, resv, !shared, f) {
1557 + fence = to_radeon_fence(f);
1558 + if (fence && fence->rdev == rdev)
1559 + radeon_sync_fence(sync, fence);
1560 +diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
1561 +index 9300d3354c512..64dfefeb03f5e 100644
1562 +--- a/drivers/gpu/drm/vc4/vc4_dsi.c
1563 ++++ b/drivers/gpu/drm/vc4/vc4_dsi.c
1564 +@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
1565 + unsigned long phy_clock;
1566 + int ret;
1567 +
1568 +- ret = pm_runtime_get_sync(dev);
1569 ++ ret = pm_runtime_resume_and_get(dev);
1570 + if (ret) {
1571 + DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
1572 + return;
1573 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
1574 +index 31aecc46624b3..04c8a378aeed6 100644
1575 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
1576 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
1577 +@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
1578 + return container_of(bo, struct vmw_buffer_object, base);
1579 + }
1580 +
1581 ++/**
1582 ++ * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
1583 ++ * @bo: ttm buffer object to be checked
1584 ++ *
1585 ++ * Uses destroy function associated with the object to determine if this is
1586 ++ * a &vmw_buffer_object.
1587 ++ *
1588 ++ * Returns:
1589 ++ * true if the object is of &vmw_buffer_object type, false if not.
1590 ++ */
1591 ++static bool bo_is_vmw(struct ttm_buffer_object *bo)
1592 ++{
1593 ++ return bo->destroy == &vmw_bo_bo_free ||
1594 ++ bo->destroy == &vmw_gem_destroy;
1595 ++}
1596 +
1597 + /**
1598 + * vmw_bo_pin_in_placement - Validate a buffer to placement.
1599 +@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
1600 +
1601 + ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
1602 + vmw_bo_unreference(&vbo);
1603 +- if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
1604 +- ret != -EBUSY)) {
1605 ++ if (unlikely(ret != 0)) {
1606 ++ if (ret == -ERESTARTSYS || ret == -EBUSY)
1607 ++ return -EBUSY;
1608 + DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
1609 + (unsigned int) arg->handle);
1610 + return ret;
1611 +@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
1612 + void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1613 + {
1614 + /* Is @bo embedded in a struct vmw_buffer_object? */
1615 +- if (vmw_bo_is_vmw_bo(bo))
1616 ++ if (!bo_is_vmw(bo))
1617 + return;
1618 +
1619 + /* Kill any cached kernel maps before swapout */
1620 +@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1621 + struct vmw_buffer_object *vbo;
1622 +
1623 + /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1624 +- if (vmw_bo_is_vmw_bo(bo))
1625 ++ if (!bo_is_vmw(bo))
1626 + return;
1627 +
1628 + vbo = container_of(bo, struct vmw_buffer_object, base);
1629 +@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1630 + if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
1631 + vmw_resource_unbind_list(vbo);
1632 + }
1633 +-
1634 +-/**
1635 +- * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
1636 +- * @bo: buffer object to be checked
1637 +- *
1638 +- * Uses destroy function associated with the object to determine if this is
1639 +- * a &vmw_buffer_object.
1640 +- *
1641 +- * Returns:
1642 +- * true if the object is of &vmw_buffer_object type, false if not.
1643 +- */
1644 +-bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
1645 +-{
1646 +- if (bo->destroy == &vmw_bo_bo_free ||
1647 +- bo->destroy == &vmw_gem_destroy)
1648 +- return true;
1649 +-
1650 +- return false;
1651 +-}
1652 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1653 +index fe36efdb7ff52..f685d426af7e3 100644
1654 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1655 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1656 +@@ -997,13 +997,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
1657 + goto out_no_fman;
1658 + }
1659 +
1660 +- drm_vma_offset_manager_init(&dev_priv->vma_manager,
1661 +- DRM_FILE_PAGE_OFFSET_START,
1662 +- DRM_FILE_PAGE_OFFSET_SIZE);
1663 + ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
1664 + dev_priv->drm.dev,
1665 + dev_priv->drm.anon_inode->i_mapping,
1666 +- &dev_priv->vma_manager,
1667 ++ dev_priv->drm.vma_offset_manager,
1668 + dev_priv->map_mode == vmw_dma_alloc_coherent,
1669 + false);
1670 + if (unlikely(ret != 0)) {
1671 +@@ -1173,7 +1170,6 @@ static void vmw_driver_unload(struct drm_device *dev)
1672 + vmw_devcaps_destroy(dev_priv);
1673 + vmw_vram_manager_fini(dev_priv);
1674 + ttm_device_fini(&dev_priv->bdev);
1675 +- drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1676 + vmw_release_device_late(dev_priv);
1677 + vmw_fence_manager_takedown(dev_priv->fman);
1678 + if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1679 +@@ -1397,7 +1393,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1680 + struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1681 +
1682 + return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1683 +- &dev_priv->vma_manager);
1684 ++ dev_priv->drm.vma_offset_manager);
1685 + }
1686 +
1687 + static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1688 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1689 +index 00e8e27e48846..ace7ca150b036 100644
1690 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1691 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1692 +@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1693 + container_of(base, struct vmw_user_surface, prime.base);
1694 + struct vmw_resource *res = &user_srf->srf.res;
1695 +
1696 ++ if (base->shareable && res && res->backup)
1697 ++ drm_gem_object_put(&res->backup->base.base);
1698 ++
1699 + *p_base = NULL;
1700 + vmw_resource_unreference(&res);
1701 + }
1702 +@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1703 + goto out_unlock;
1704 + }
1705 + vmw_bo_reference(res->backup);
1706 ++ drm_gem_object_get(&res->backup->base.base);
1707 + }
1708 +
1709 + tmp = vmw_resource_reference(&srf->res);
1710 +@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
1711 + &res->backup);
1712 + if (ret == 0)
1713 + vmw_bo_reference(res->backup);
1714 +-
1715 + }
1716 +
1717 + if (unlikely(ret != 0)) {
1718 +@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
1719 + drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
1720 + rep->buffer_size = res->backup->base.base.size;
1721 + rep->buffer_handle = backup_handle;
1722 ++ if (user_srf->prime.base.shareable)
1723 ++ drm_gem_object_get(&res->backup->base.base);
1724 + } else {
1725 + rep->buffer_map_handle = 0;
1726 + rep->buffer_size = 0;
1727 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
1728 +index 43375b38ee592..8a7ce41b8c56e 100644
1729 +--- a/drivers/input/keyboard/omap4-keypad.c
1730 ++++ b/drivers/input/keyboard/omap4-keypad.c
1731 +@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
1732 + * revision register.
1733 + */
1734 + error = pm_runtime_get_sync(dev);
1735 +- if (error) {
1736 ++ if (error < 0) {
1737 + dev_err(dev, "pm_runtime_get_sync() failed\n");
1738 + pm_runtime_put_noidle(dev);
1739 + return error;
1740 +diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
1741 +index db3ec47681596..7a730c9d4bdf6 100644
1742 +--- a/drivers/net/ethernet/Kconfig
1743 ++++ b/drivers/net/ethernet/Kconfig
1744 +@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
1745 + source "drivers/net/ethernet/arc/Kconfig"
1746 + source "drivers/net/ethernet/asix/Kconfig"
1747 + source "drivers/net/ethernet/atheros/Kconfig"
1748 +-source "drivers/net/ethernet/broadcom/Kconfig"
1749 +-source "drivers/net/ethernet/brocade/Kconfig"
1750 +-source "drivers/net/ethernet/cadence/Kconfig"
1751 +-source "drivers/net/ethernet/calxeda/Kconfig"
1752 +-source "drivers/net/ethernet/cavium/Kconfig"
1753 +-source "drivers/net/ethernet/chelsio/Kconfig"
1754 +-source "drivers/net/ethernet/cirrus/Kconfig"
1755 +-source "drivers/net/ethernet/cisco/Kconfig"
1756 +-source "drivers/net/ethernet/cortina/Kconfig"
1757 +
1758 + config CX_ECAT
1759 + tristate "Beckhoff CX5020 EtherCAT master support"
1760 +@@ -57,6 +48,14 @@ config CX_ECAT
1761 + To compile this driver as a module, choose M here. The module
1762 + will be called ec_bhf.
1763 +
1764 ++source "drivers/net/ethernet/broadcom/Kconfig"
1765 ++source "drivers/net/ethernet/cadence/Kconfig"
1766 ++source "drivers/net/ethernet/calxeda/Kconfig"
1767 ++source "drivers/net/ethernet/cavium/Kconfig"
1768 ++source "drivers/net/ethernet/chelsio/Kconfig"
1769 ++source "drivers/net/ethernet/cirrus/Kconfig"
1770 ++source "drivers/net/ethernet/cisco/Kconfig"
1771 ++source "drivers/net/ethernet/cortina/Kconfig"
1772 + source "drivers/net/ethernet/davicom/Kconfig"
1773 +
1774 + config DNET
1775 +@@ -84,7 +83,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
1776 + source "drivers/net/ethernet/i825xx/Kconfig"
1777 + source "drivers/net/ethernet/ibm/Kconfig"
1778 + source "drivers/net/ethernet/intel/Kconfig"
1779 +-source "drivers/net/ethernet/microsoft/Kconfig"
1780 + source "drivers/net/ethernet/xscale/Kconfig"
1781 +
1782 + config JME
1783 +@@ -127,8 +125,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
1784 + source "drivers/net/ethernet/mellanox/Kconfig"
1785 + source "drivers/net/ethernet/micrel/Kconfig"
1786 + source "drivers/net/ethernet/microchip/Kconfig"
1787 +-source "drivers/net/ethernet/moxa/Kconfig"
1788 + source "drivers/net/ethernet/mscc/Kconfig"
1789 ++source "drivers/net/ethernet/microsoft/Kconfig"
1790 ++source "drivers/net/ethernet/moxa/Kconfig"
1791 + source "drivers/net/ethernet/myricom/Kconfig"
1792 +
1793 + config FEALNX
1794 +@@ -140,10 +139,10 @@ config FEALNX
1795 + Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
1796 + cards. <http://www.myson.com.tw/>
1797 +
1798 ++source "drivers/net/ethernet/ni/Kconfig"
1799 + source "drivers/net/ethernet/natsemi/Kconfig"
1800 + source "drivers/net/ethernet/neterion/Kconfig"
1801 + source "drivers/net/ethernet/netronome/Kconfig"
1802 +-source "drivers/net/ethernet/ni/Kconfig"
1803 + source "drivers/net/ethernet/8390/Kconfig"
1804 + source "drivers/net/ethernet/nvidia/Kconfig"
1805 + source "drivers/net/ethernet/nxp/Kconfig"
1806 +@@ -163,6 +162,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
1807 + source "drivers/net/ethernet/pasemi/Kconfig"
1808 + source "drivers/net/ethernet/pensando/Kconfig"
1809 + source "drivers/net/ethernet/qlogic/Kconfig"
1810 ++source "drivers/net/ethernet/brocade/Kconfig"
1811 + source "drivers/net/ethernet/qualcomm/Kconfig"
1812 + source "drivers/net/ethernet/rdc/Kconfig"
1813 + source "drivers/net/ethernet/realtek/Kconfig"
1814 +@@ -170,10 +170,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
1815 + source "drivers/net/ethernet/rocker/Kconfig"
1816 + source "drivers/net/ethernet/samsung/Kconfig"
1817 + source "drivers/net/ethernet/seeq/Kconfig"
1818 +-source "drivers/net/ethernet/sfc/Kconfig"
1819 + source "drivers/net/ethernet/sgi/Kconfig"
1820 + source "drivers/net/ethernet/silan/Kconfig"
1821 + source "drivers/net/ethernet/sis/Kconfig"
1822 ++source "drivers/net/ethernet/sfc/Kconfig"
1823 + source "drivers/net/ethernet/smsc/Kconfig"
1824 + source "drivers/net/ethernet/socionext/Kconfig"
1825 + source "drivers/net/ethernet/stmicro/Kconfig"
1826 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1827 +index 33f1a1377588b..24d715c28a355 100644
1828 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1829 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1830 +@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
1831 + if (err < 0)
1832 + goto err_exit;
1833 +
1834 +- for (i = 0U, aq_vec = self->aq_vec[0];
1835 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
1836 ++ for (i = 0U; self->aq_vecs > i; ++i) {
1837 ++ aq_vec = self->aq_vec[i];
1838 + err = aq_vec_start(aq_vec);
1839 + if (err < 0)
1840 + goto err_exit;
1841 +@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
1842 + mod_timer(&self->polling_timer, jiffies +
1843 + AQ_CFG_POLLING_TIMER_INTERVAL);
1844 + } else {
1845 +- for (i = 0U, aq_vec = self->aq_vec[0];
1846 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
1847 ++ for (i = 0U; self->aq_vecs > i; ++i) {
1848 ++ aq_vec = self->aq_vec[i];
1849 + err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
1850 + aq_vec_isr, aq_vec,
1851 + aq_vec_get_affinity_mask(aq_vec));
1852 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1853 +index 797a95142d1f4..3a529ee8c8340 100644
1854 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1855 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1856 +@@ -444,22 +444,22 @@ err_exit:
1857 +
1858 + static int aq_pm_freeze(struct device *dev)
1859 + {
1860 +- return aq_suspend_common(dev, false);
1861 ++ return aq_suspend_common(dev, true);
1862 + }
1863 +
1864 + static int aq_pm_suspend_poweroff(struct device *dev)
1865 + {
1866 +- return aq_suspend_common(dev, true);
1867 ++ return aq_suspend_common(dev, false);
1868 + }
1869 +
1870 + static int aq_pm_thaw(struct device *dev)
1871 + {
1872 +- return atl_resume_common(dev, false);
1873 ++ return atl_resume_common(dev, true);
1874 + }
1875 +
1876 + static int aq_pm_resume_restore(struct device *dev)
1877 + {
1878 +- return atl_resume_common(dev, true);
1879 ++ return atl_resume_common(dev, false);
1880 + }
1881 +
1882 + static const struct dev_pm_ops aq_pm_ops = {
1883 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
1884 +index f4774cf051c97..6ab1f3212d246 100644
1885 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
1886 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
1887 +@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
1888 + if (!self) {
1889 + err = -EINVAL;
1890 + } else {
1891 +- for (i = 0U, ring = self->ring[0];
1892 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1893 ++ for (i = 0U; self->tx_rings > i; ++i) {
1894 ++ ring = self->ring[i];
1895 + u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
1896 + ring[AQ_VEC_RX_ID].stats.rx.polls++;
1897 + u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
1898 +@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
1899 + self->aq_hw_ops = aq_hw_ops;
1900 + self->aq_hw = aq_hw;
1901 +
1902 +- for (i = 0U, ring = self->ring[0];
1903 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1904 ++ for (i = 0U; self->tx_rings > i; ++i) {
1905 ++ ring = self->ring[i];
1906 + err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
1907 + if (err < 0)
1908 + goto err_exit;
1909 +@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
1910 + unsigned int i = 0U;
1911 + int err = 0;
1912 +
1913 +- for (i = 0U, ring = self->ring[0];
1914 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1915 ++ for (i = 0U; self->tx_rings > i; ++i) {
1916 ++ ring = self->ring[i];
1917 + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
1918 + &ring[AQ_VEC_TX_ID]);
1919 + if (err < 0)
1920 +@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
1921 + struct aq_ring_s *ring = NULL;
1922 + unsigned int i = 0U;
1923 +
1924 +- for (i = 0U, ring = self->ring[0];
1925 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1926 ++ for (i = 0U; self->tx_rings > i; ++i) {
1927 ++ ring = self->ring[i];
1928 + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
1929 + &ring[AQ_VEC_TX_ID]);
1930 +
1931 +@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
1932 + if (!self)
1933 + goto err_exit;
1934 +
1935 +- for (i = 0U, ring = self->ring[0];
1936 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1937 ++ for (i = 0U; self->tx_rings > i; ++i) {
1938 ++ ring = self->ring[i];
1939 + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
1940 + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
1941 + }
1942 +@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
1943 + if (!self)
1944 + goto err_exit;
1945 +
1946 +- for (i = 0U, ring = self->ring[0];
1947 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1948 ++ for (i = 0U; self->tx_rings > i; ++i) {
1949 ++ ring = self->ring[i];
1950 + aq_ring_free(&ring[AQ_VEC_TX_ID]);
1951 + if (i < self->rx_rings)
1952 + aq_ring_free(&ring[AQ_VEC_RX_ID]);
1953 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1954 +index d13f06cf0308a..c4f4b13ac4691 100644
1955 +--- a/drivers/net/ethernet/cadence/macb_main.c
1956 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1957 +@@ -1656,6 +1656,7 @@ static void macb_tx_restart(struct macb_queue *queue)
1958 + unsigned int head = queue->tx_head;
1959 + unsigned int tail = queue->tx_tail;
1960 + struct macb *bp = queue->bp;
1961 ++ unsigned int head_idx, tbqp;
1962 +
1963 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1964 + queue_writel(queue, ISR, MACB_BIT(TXUBR));
1965 +@@ -1663,6 +1664,13 @@ static void macb_tx_restart(struct macb_queue *queue)
1966 + if (head == tail)
1967 + return;
1968 +
1969 ++ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1970 ++ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1971 ++ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
1972 ++
1973 ++ if (tbqp == head_idx)
1974 ++ return;
1975 ++
1976 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1977 + }
1978 +
1979 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
1980 +index 763d2c7b5fb1a..5750f9a56393a 100644
1981 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
1982 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
1983 +@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
1984 + info->phc_index = -1;
1985 +
1986 + fman_node = of_get_parent(mac_node);
1987 +- if (fman_node)
1988 ++ if (fman_node) {
1989 + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
1990 ++ of_node_put(fman_node);
1991 ++ }
1992 +
1993 +- if (ptp_node)
1994 ++ if (ptp_node) {
1995 + ptp_dev = of_find_device_by_node(ptp_node);
1996 ++ of_node_put(ptp_node);
1997 ++ }
1998 +
1999 + if (ptp_dev)
2000 + ptp = platform_get_drvdata(ptp_dev);
2001 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
2002 +index d60e2016d03c6..e6c8e6d5234f8 100644
2003 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
2004 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
2005 +@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
2006 + {
2007 + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
2008 + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
2009 +- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
2010 +- u16 lat_enc_d = 0; /* latency decoded */
2011 ++ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
2012 ++ u32 lat_enc_d = 0; /* latency decoded */
2013 + u16 lat_enc = 0; /* latency encoded */
2014 +
2015 + if (link) {
2016 +diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
2017 +index 73edc24d81d54..c54b72f9fd345 100644
2018 +--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
2019 ++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
2020 +@@ -342,7 +342,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2021 + np = netdev_priv(netdev);
2022 + vsi = np->vsi;
2023 +
2024 +- if (ice_is_reset_in_progress(vsi->back->state))
2025 ++ if (ice_is_reset_in_progress(vsi->back->state) ||
2026 ++ test_bit(ICE_VF_DIS, vsi->back->state))
2027 + return NETDEV_TX_BUSY;
2028 +
2029 + repr = ice_netdev_to_repr(netdev);
2030 +diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
2031 +index bd58d9d2e5653..6a413331572b6 100644
2032 +--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
2033 ++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
2034 +@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
2035 +
2036 + static inline int ice_eswitch_configure(struct ice_pf *pf)
2037 + {
2038 +- return -EOPNOTSUPP;
2039 ++ return 0;
2040 + }
2041 +
2042 + static inline int ice_eswitch_rebuild(struct ice_pf *pf)
2043 +diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
2044 +index 4eb0599714f43..13cdb5ea594d2 100644
2045 +--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
2046 ++++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
2047 +@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
2048 + status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
2049 + orom_data, hw->flash.banks.orom_size);
2050 + if (status) {
2051 ++ vfree(orom_data);
2052 + ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
2053 + return status;
2054 + }
2055 +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
2056 +index 66ea566488d12..59d5c467ea6e3 100644
2057 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c
2058 ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
2059 +@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
2060 + {
2061 + u32 swfw_sync;
2062 +
2063 +- while (igc_get_hw_semaphore_i225(hw))
2064 +- ; /* Empty */
2065 ++ /* Releasing the resource requires first getting the HW semaphore.
2066 ++ * If we fail to get the semaphore, there is nothing we can do,
2067 ++ * except log an error and quit. We are not allowed to hang here
2068 ++ * indefinitely, as it may cause denial of service or system crash.
2069 ++ */
2070 ++ if (igc_get_hw_semaphore_i225(hw)) {
2071 ++ hw_dbg("Failed to release SW_FW_SYNC.\n");
2072 ++ return;
2073 ++ }
2074 +
2075 + swfw_sync = rd32(IGC_SW_FW_SYNC);
2076 + swfw_sync &= ~mask;
2077 +diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
2078 +index 40dbf4b432345..6961f65d36b9a 100644
2079 +--- a/drivers/net/ethernet/intel/igc/igc_phy.c
2080 ++++ b/drivers/net/ethernet/intel/igc/igc_phy.c
2081 +@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
2082 + * the lower time out
2083 + */
2084 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
2085 +- usleep_range(500, 1000);
2086 ++ udelay(50);
2087 + mdic = rd32(IGC_MDIC);
2088 + if (mdic & IGC_MDIC_READY)
2089 + break;
2090 +@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
2091 + * the lower time out
2092 + */
2093 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
2094 +- usleep_range(500, 1000);
2095 ++ udelay(50);
2096 + mdic = rd32(IGC_MDIC);
2097 + if (mdic & IGC_MDIC_READY)
2098 + break;
2099 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
2100 +index 0d6e3215e98f5..653e9f1e35b5c 100644
2101 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
2102 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
2103 +@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
2104 + igc_ptp_write_i225(adapter, &ts);
2105 + }
2106 +
2107 ++static void igc_ptm_stop(struct igc_adapter *adapter)
2108 ++{
2109 ++ struct igc_hw *hw = &adapter->hw;
2110 ++ u32 ctrl;
2111 ++
2112 ++ ctrl = rd32(IGC_PTM_CTRL);
2113 ++ ctrl &= ~IGC_PTM_CTRL_EN;
2114 ++
2115 ++ wr32(IGC_PTM_CTRL, ctrl);
2116 ++}
2117 ++
2118 + /**
2119 + * igc_ptp_suspend - Disable PTP work items and prepare for suspend
2120 + * @adapter: Board private structure
2121 +@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
2122 + adapter->ptp_tx_skb = NULL;
2123 + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
2124 +
2125 +- if (pci_device_is_present(adapter->pdev))
2126 ++ if (pci_device_is_present(adapter->pdev)) {
2127 + igc_ptp_time_save(adapter);
2128 ++ igc_ptm_stop(adapter);
2129 ++ }
2130 + }
2131 +
2132 + /**
2133 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
2134 +index fd3ceb74620d5..a314040c1a6af 100644
2135 +--- a/drivers/net/ethernet/mscc/ocelot.c
2136 ++++ b/drivers/net/ethernet/mscc/ocelot.c
2137 +@@ -2508,6 +2508,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
2138 + val = BIT(port);
2139 +
2140 + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
2141 ++ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
2142 ++ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
2143 + }
2144 +
2145 + static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
2146 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
2147 +index a7ec9f4d46ced..d68ef72dcdde0 100644
2148 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
2149 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
2150 +@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
2151 + writel(value, ioaddr + PTP_TCR);
2152 +
2153 + /* wait for present system time initialize to complete */
2154 +- return readl_poll_timeout(ioaddr + PTP_TCR, value,
2155 ++ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
2156 + !(value & PTP_TCR_TSINIT),
2157 +- 10000, 100000);
2158 ++ 10, 100000);
2159 + }
2160 +
2161 + static int config_addend(void __iomem *ioaddr, u32 addend)
2162 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2163 +index 359d16780dbbc..1bf8f7c35b7d2 100644
2164 +--- a/drivers/net/vxlan.c
2165 ++++ b/drivers/net/vxlan.c
2166 +@@ -712,11 +712,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
2167 +
2168 + rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
2169 + if (rd == NULL)
2170 +- return -ENOBUFS;
2171 ++ return -ENOMEM;
2172 +
2173 + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
2174 + kfree(rd);
2175 +- return -ENOBUFS;
2176 ++ return -ENOMEM;
2177 + }
2178 +
2179 + rd->remote_ip = *ip;
2180 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2181 +index 5d156e591b35c..f7961b22e0518 100644
2182 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2183 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2184 +@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
2185 + BRCMF_SDIO_FT_SUB,
2186 + };
2187 +
2188 +-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
2189 ++#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
2190 +
2191 + /* SDIO Pad drive strength to select value mappings */
2192 + struct sdiod_drive_str {
2193 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
2194 +index 8a22ee5816748..df85ebc6e1df0 100644
2195 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
2196 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
2197 +@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2198 + mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
2199 +
2200 + /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
2201 +- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
2202 ++ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
2203 +
2204 + /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
2205 + mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
2206 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2207 +index 6215d50ed3e7d..10f7c79caac2d 100644
2208 +--- a/drivers/nvme/host/core.c
2209 ++++ b/drivers/nvme/host/core.c
2210 +@@ -1363,6 +1363,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
2211 + warn_str, cur->nidl);
2212 + return -1;
2213 + }
2214 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
2215 ++ return NVME_NIDT_EUI64_LEN;
2216 + memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
2217 + return NVME_NIDT_EUI64_LEN;
2218 + case NVME_NIDT_NGUID:
2219 +@@ -1371,6 +1373,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
2220 + warn_str, cur->nidl);
2221 + return -1;
2222 + }
2223 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
2224 ++ return NVME_NIDT_NGUID_LEN;
2225 + memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
2226 + return NVME_NIDT_NGUID_LEN;
2227 + case NVME_NIDT_UUID:
2228 +@@ -1379,6 +1383,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
2229 + warn_str, cur->nidl);
2230 + return -1;
2231 + }
2232 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
2233 ++ return NVME_NIDT_UUID_LEN;
2234 + uuid_copy(&ids->uuid, data + sizeof(*cur));
2235 + return NVME_NIDT_UUID_LEN;
2236 + case NVME_NIDT_CSI:
2237 +@@ -1475,12 +1481,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
2238 + if ((*id)->ncap == 0) /* namespace not allocated or attached */
2239 + goto out_free_id;
2240 +
2241 +- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
2242 +- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2243 +- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
2244 +- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
2245 +- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2246 +- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
2247 ++
2248 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
2249 ++ dev_info(ctrl->device,
2250 ++ "Ignoring bogus Namespace Identifiers\n");
2251 ++ } else {
2252 ++ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
2253 ++ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2254 ++ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
2255 ++ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
2256 ++ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2257 ++ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
2258 ++ }
2259 +
2260 + return 0;
2261 +
2262 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2263 +index 730cc80d84ff7..68c42e8311172 100644
2264 +--- a/drivers/nvme/host/nvme.h
2265 ++++ b/drivers/nvme/host/nvme.h
2266 +@@ -144,6 +144,11 @@ enum nvme_quirks {
2267 + * encoding the generation sequence number.
2268 + */
2269 + NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
2270 ++
2271 ++ /*
2272 ++ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
2273 ++ */
2274 ++ NVME_QUIRK_BOGUS_NID = (1 << 18),
2275 + };
2276 +
2277 + /*
2278 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2279 +index 6a99ed6809158..e4b79bee62068 100644
2280 +--- a/drivers/nvme/host/pci.c
2281 ++++ b/drivers/nvme/host/pci.c
2282 +@@ -3405,7 +3405,10 @@ static const struct pci_device_id nvme_id_table[] = {
2283 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2284 + { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2285 + .driver_data = NVME_QUIRK_IDENTIFY_CNS |
2286 +- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
2287 ++ NVME_QUIRK_DISABLE_WRITE_ZEROES |
2288 ++ NVME_QUIRK_BOGUS_NID, },
2289 ++ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
2290 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
2291 + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
2292 + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
2293 + { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
2294 +@@ -3443,6 +3446,10 @@ static const struct pci_device_id nvme_id_table[] = {
2295 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
2296 + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
2297 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
2298 ++ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
2299 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
2300 ++ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
2301 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
2302 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
2303 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
2304 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
2305 +diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
2306 +index 295cc7952d0ed..57d20cf3da7a3 100644
2307 +--- a/drivers/perf/arm_pmu.c
2308 ++++ b/drivers/perf/arm_pmu.c
2309 +@@ -398,6 +398,9 @@ validate_group(struct perf_event *event)
2310 + if (!validate_event(event->pmu, &fake_pmu, leader))
2311 + return -EINVAL;
2312 +
2313 ++ if (event == leader)
2314 ++ return 0;
2315 ++
2316 + for_each_sibling_event(sibling, leader) {
2317 + if (!validate_event(event->pmu, &fake_pmu, sibling))
2318 + return -EINVAL;
2319 +@@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event)
2320 + local64_set(&hwc->period_left, hwc->sample_period);
2321 + }
2322 +
2323 +- if (event->group_leader != event) {
2324 +- if (validate_group(event) != 0)
2325 +- return -EINVAL;
2326 +- }
2327 +-
2328 +- return 0;
2329 ++ return validate_group(event);
2330 + }
2331 +
2332 + static int armpmu_event_init(struct perf_event *event)
2333 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
2334 +index c1d9ed9b7b672..19f6b456234f8 100644
2335 +--- a/drivers/platform/x86/samsung-laptop.c
2336 ++++ b/drivers/platform/x86/samsung-laptop.c
2337 +@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
2338 +
2339 + if (value > samsung->kbd_led.max_brightness)
2340 + value = samsung->kbd_led.max_brightness;
2341 +- else if (value < 0)
2342 +- value = 0;
2343 +
2344 + samsung->kbd_led_wk = value;
2345 + queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
2346 +diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
2347 +index 1e83150388506..a8dde46063602 100644
2348 +--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
2349 ++++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
2350 +@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
2351 + return dev_err_probe(dev, PTR_ERR(priv->rstc),
2352 + "failed to get reset\n");
2353 +
2354 +- reset_control_deassert(priv->rstc);
2355 ++ error = reset_control_deassert(priv->rstc);
2356 ++ if (error)
2357 ++ return error;
2358 +
2359 + priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
2360 + priv->rcdev.of_reset_n_cells = 1;
2361 +diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
2362 +index 24d3395964cc4..4c5bba52b1059 100644
2363 +--- a/drivers/reset/tegra/reset-bpmp.c
2364 ++++ b/drivers/reset/tegra/reset-bpmp.c
2365 +@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
2366 + struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
2367 + struct mrq_reset_request request;
2368 + struct tegra_bpmp_message msg;
2369 ++ int err;
2370 +
2371 + memset(&request, 0, sizeof(request));
2372 + request.cmd = command;
2373 +@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
2374 + msg.tx.data = &request;
2375 + msg.tx.size = sizeof(request);
2376 +
2377 +- return tegra_bpmp_transfer(bpmp, &msg);
2378 ++ err = tegra_bpmp_transfer(bpmp, &msg);
2379 ++ if (err)
2380 ++ return err;
2381 ++ if (msg.rx.ret)
2382 ++ return -EINVAL;
2383 ++
2384 ++ return 0;
2385 + }
2386 +
2387 + static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
2388 +diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
2389 +index 5521469ce678b..e16327a4b4c96 100644
2390 +--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
2391 ++++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
2392 +@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2393 + if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
2394 + break;
2395 +
2396 +- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
2397 ++ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
2398 + if (nopin->op_code == ISCSI_OP_NOOP_IN &&
2399 + nopin->itt == (u16) RESERVED_ITT) {
2400 + printk(KERN_ALERT "bnx2i: Unsolicited "
2401 +diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
2402 +index e21b053b4f3e1..a592ca8602f9f 100644
2403 +--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
2404 ++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
2405 +@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
2406 + struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
2407 +
2408 + /* Must suspend all rx queue activity for this ep */
2409 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2410 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
2411 + }
2412 + /* CONN_DISCONNECT timeout may or may not be an issue depending
2413 + * on what transcribed in TCP layer, different targets behave
2414 +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
2415 +index 8c7d4dda4cf29..4365d52c6430e 100644
2416 +--- a/drivers/scsi/cxgbi/libcxgbi.c
2417 ++++ b/drivers/scsi/cxgbi/libcxgbi.c
2418 +@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
2419 + log_debug(1 << CXGBI_DBG_PDU_RX,
2420 + "csk 0x%p, conn 0x%p.\n", csk, conn);
2421 +
2422 +- if (unlikely(!conn || conn->suspend_rx)) {
2423 ++ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
2424 + log_debug(1 << CXGBI_DBG_PDU_RX,
2425 +- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
2426 ++ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
2427 + csk, conn, conn ? conn->id : 0xFF,
2428 +- conn ? conn->suspend_rx : 0xFF);
2429 ++ conn ? conn->flags : 0xFF);
2430 + return;
2431 + }
2432 +
2433 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2434 +index 059dae8909ee5..f228d991038a2 100644
2435 +--- a/drivers/scsi/libiscsi.c
2436 ++++ b/drivers/scsi/libiscsi.c
2437 +@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
2438 + struct iscsi_task *task;
2439 + itt_t itt;
2440 +
2441 +- if (session->state == ISCSI_STATE_TERMINATE)
2442 ++ if (session->state == ISCSI_STATE_TERMINATE ||
2443 ++ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
2444 + return NULL;
2445 +
2446 + if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
2447 +@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
2448 + if (conn->stop_stage == 0)
2449 + session->state = ISCSI_STATE_FAILED;
2450 +
2451 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2452 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2453 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2454 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
2455 + return true;
2456 + }
2457 +
2458 +@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
2459 + * Do this after dropping the extra ref because if this was a requeue
2460 + * it's removed from that list and cleanup_queued_task would miss it.
2461 + */
2462 +- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
2463 ++ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
2464 + /*
2465 + * Save the task and ref in case we weren't cleaning up this
2466 + * task and get woken up again.
2467 +@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
2468 + int rc = 0;
2469 +
2470 + spin_lock_bh(&conn->session->frwd_lock);
2471 +- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
2472 ++ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
2473 + ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
2474 + spin_unlock_bh(&conn->session->frwd_lock);
2475 + return -ENODATA;
2476 +@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
2477 + goto fault;
2478 + }
2479 +
2480 +- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
2481 ++ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
2482 + reason = FAILURE_SESSION_IN_RECOVERY;
2483 + sc->result = DID_REQUEUE << 16;
2484 + goto fault;
2485 +@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
2486 + void iscsi_suspend_queue(struct iscsi_conn *conn)
2487 + {
2488 + spin_lock_bh(&conn->session->frwd_lock);
2489 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2490 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2491 + spin_unlock_bh(&conn->session->frwd_lock);
2492 + }
2493 + EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
2494 +@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
2495 + struct Scsi_Host *shost = conn->session->host;
2496 + struct iscsi_host *ihost = shost_priv(shost);
2497 +
2498 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2499 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2500 + if (ihost->workq)
2501 + flush_workqueue(ihost->workq);
2502 + }
2503 +@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
2504 +
2505 + static void iscsi_start_tx(struct iscsi_conn *conn)
2506 + {
2507 +- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2508 ++ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2509 + iscsi_conn_queue_work(conn);
2510 + }
2511 +
2512 +@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
2513 + iscsi_suspend_tx(conn);
2514 +
2515 + spin_lock_bh(&session->frwd_lock);
2516 ++ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
2517 ++
2518 + if (!is_active) {
2519 + /*
2520 + * if logout timed out before userspace could even send a PDU
2521 +@@ -3311,6 +3314,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2522 + spin_lock_bh(&session->frwd_lock);
2523 + if (is_leading)
2524 + session->leadconn = conn;
2525 ++
2526 ++ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
2527 + spin_unlock_bh(&session->frwd_lock);
2528 +
2529 + /*
2530 +@@ -3323,8 +3328,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2531 + /*
2532 + * Unblock xmitworker(), Login Phase will pass through.
2533 + */
2534 +- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2535 +- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2536 ++ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
2537 ++ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2538 + return 0;
2539 + }
2540 + EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2541 +diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
2542 +index 2e9ffe3d1a55e..883005757ddb8 100644
2543 +--- a/drivers/scsi/libiscsi_tcp.c
2544 ++++ b/drivers/scsi/libiscsi_tcp.c
2545 +@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
2546 + */
2547 + conn->last_recv = jiffies;
2548 +
2549 +- if (unlikely(conn->suspend_rx)) {
2550 ++ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
2551 + ISCSI_DBG_TCP(conn, "Rx suspended!\n");
2552 + *status = ISCSI_TCP_SUSPENDED;
2553 + return 0;
2554 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
2555 +index 282ecb4e39bbd..e1fe989ad7b33 100644
2556 +--- a/drivers/scsi/qedi/qedi_iscsi.c
2557 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
2558 +@@ -859,6 +859,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
2559 + return qedi_iscsi_send_ioreq(task);
2560 + }
2561 +
2562 ++static void qedi_offload_work(struct work_struct *work)
2563 ++{
2564 ++ struct qedi_endpoint *qedi_ep =
2565 ++ container_of(work, struct qedi_endpoint, offload_work);
2566 ++ struct qedi_ctx *qedi;
2567 ++ int wait_delay = 5 * HZ;
2568 ++ int ret;
2569 ++
2570 ++ qedi = qedi_ep->qedi;
2571 ++
2572 ++ ret = qedi_iscsi_offload_conn(qedi_ep);
2573 ++ if (ret) {
2574 ++ QEDI_ERR(&qedi->dbg_ctx,
2575 ++ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
2576 ++ qedi_ep->iscsi_cid, qedi_ep, ret);
2577 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2578 ++ return;
2579 ++ }
2580 ++
2581 ++ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
2582 ++ (qedi_ep->state ==
2583 ++ EP_STATE_OFLDCONN_COMPL),
2584 ++ wait_delay);
2585 ++ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
2586 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2587 ++ QEDI_ERR(&qedi->dbg_ctx,
2588 ++ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
2589 ++ qedi_ep->iscsi_cid, qedi_ep);
2590 ++ }
2591 ++}
2592 ++
2593 + static struct iscsi_endpoint *
2594 + qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
2595 + int non_blocking)
2596 +@@ -907,6 +938,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
2597 + }
2598 + qedi_ep = ep->dd_data;
2599 + memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
2600 ++ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
2601 + qedi_ep->state = EP_STATE_IDLE;
2602 + qedi_ep->iscsi_cid = (u32)-1;
2603 + qedi_ep->qedi = qedi;
2604 +@@ -1055,12 +1087,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
2605 + qedi_ep = ep->dd_data;
2606 + qedi = qedi_ep->qedi;
2607 +
2608 ++ flush_work(&qedi_ep->offload_work);
2609 ++
2610 + if (qedi_ep->state == EP_STATE_OFLDCONN_START)
2611 + goto ep_exit_recover;
2612 +
2613 +- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
2614 +- flush_work(&qedi_ep->offload_work);
2615 +-
2616 + if (qedi_ep->conn) {
2617 + qedi_conn = qedi_ep->conn;
2618 + abrt_conn = qedi_conn->abrt_conn;
2619 +@@ -1234,37 +1265,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
2620 + return rc;
2621 + }
2622 +
2623 +-static void qedi_offload_work(struct work_struct *work)
2624 +-{
2625 +- struct qedi_endpoint *qedi_ep =
2626 +- container_of(work, struct qedi_endpoint, offload_work);
2627 +- struct qedi_ctx *qedi;
2628 +- int wait_delay = 5 * HZ;
2629 +- int ret;
2630 +-
2631 +- qedi = qedi_ep->qedi;
2632 +-
2633 +- ret = qedi_iscsi_offload_conn(qedi_ep);
2634 +- if (ret) {
2635 +- QEDI_ERR(&qedi->dbg_ctx,
2636 +- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
2637 +- qedi_ep->iscsi_cid, qedi_ep, ret);
2638 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2639 +- return;
2640 +- }
2641 +-
2642 +- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
2643 +- (qedi_ep->state ==
2644 +- EP_STATE_OFLDCONN_COMPL),
2645 +- wait_delay);
2646 +- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
2647 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2648 +- QEDI_ERR(&qedi->dbg_ctx,
2649 +- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
2650 +- qedi_ep->iscsi_cid, qedi_ep);
2651 +- }
2652 +-}
2653 +-
2654 + static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
2655 + {
2656 + struct qedi_ctx *qedi;
2657 +@@ -1380,7 +1380,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
2658 + qedi_ep->dst_addr, qedi_ep->dst_port);
2659 + }
2660 +
2661 +- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
2662 + queue_work(qedi->offload_thread, &qedi_ep->offload_work);
2663 +
2664 + ret = 0;
2665 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2666 +index c7b1b2e8bb02f..bcdfcb25349ad 100644
2667 +--- a/drivers/scsi/scsi_transport_iscsi.c
2668 ++++ b/drivers/scsi/scsi_transport_iscsi.c
2669 +@@ -86,6 +86,9 @@ struct iscsi_internal {
2670 + struct transport_container session_cont;
2671 + };
2672 +
2673 ++static DEFINE_IDR(iscsi_ep_idr);
2674 ++static DEFINE_MUTEX(iscsi_ep_idr_mutex);
2675 ++
2676 + static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
2677 + static struct workqueue_struct *iscsi_eh_timer_workq;
2678 +
2679 +@@ -169,6 +172,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
2680 + static void iscsi_endpoint_release(struct device *dev)
2681 + {
2682 + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
2683 ++
2684 ++ mutex_lock(&iscsi_ep_idr_mutex);
2685 ++ idr_remove(&iscsi_ep_idr, ep->id);
2686 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2687 ++
2688 + kfree(ep);
2689 + }
2690 +
2691 +@@ -181,7 +189,7 @@ static ssize_t
2692 + show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
2693 + {
2694 + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
2695 +- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
2696 ++ return sysfs_emit(buf, "%d\n", ep->id);
2697 + }
2698 + static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
2699 +
2700 +@@ -194,48 +202,32 @@ static struct attribute_group iscsi_endpoint_group = {
2701 + .attrs = iscsi_endpoint_attrs,
2702 + };
2703 +
2704 +-#define ISCSI_MAX_EPID -1
2705 +-
2706 +-static int iscsi_match_epid(struct device *dev, const void *data)
2707 +-{
2708 +- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
2709 +- const uint64_t *epid = data;
2710 +-
2711 +- return *epid == ep->id;
2712 +-}
2713 +-
2714 + struct iscsi_endpoint *
2715 + iscsi_create_endpoint(int dd_size)
2716 + {
2717 +- struct device *dev;
2718 + struct iscsi_endpoint *ep;
2719 +- uint64_t id;
2720 +- int err;
2721 +-
2722 +- for (id = 1; id < ISCSI_MAX_EPID; id++) {
2723 +- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
2724 +- iscsi_match_epid);
2725 +- if (!dev)
2726 +- break;
2727 +- else
2728 +- put_device(dev);
2729 +- }
2730 +- if (id == ISCSI_MAX_EPID) {
2731 +- printk(KERN_ERR "Too many connections. Max supported %u\n",
2732 +- ISCSI_MAX_EPID - 1);
2733 +- return NULL;
2734 +- }
2735 ++ int err, id;
2736 +
2737 + ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
2738 + if (!ep)
2739 + return NULL;
2740 +
2741 ++ mutex_lock(&iscsi_ep_idr_mutex);
2742 ++ id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
2743 ++ if (id < 0) {
2744 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2745 ++ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
2746 ++ id);
2747 ++ goto free_ep;
2748 ++ }
2749 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2750 ++
2751 + ep->id = id;
2752 + ep->dev.class = &iscsi_endpoint_class;
2753 +- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
2754 ++ dev_set_name(&ep->dev, "ep-%d", id);
2755 + err = device_register(&ep->dev);
2756 + if (err)
2757 +- goto free_ep;
2758 ++ goto free_id;
2759 +
2760 + err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
2761 + if (err)
2762 +@@ -249,6 +241,10 @@ unregister_dev:
2763 + device_unregister(&ep->dev);
2764 + return NULL;
2765 +
2766 ++free_id:
2767 ++ mutex_lock(&iscsi_ep_idr_mutex);
2768 ++ idr_remove(&iscsi_ep_idr, id);
2769 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2770 + free_ep:
2771 + kfree(ep);
2772 + return NULL;
2773 +@@ -276,14 +272,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
2774 + */
2775 + struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
2776 + {
2777 +- struct device *dev;
2778 ++ struct iscsi_endpoint *ep;
2779 +
2780 +- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
2781 +- iscsi_match_epid);
2782 +- if (!dev)
2783 +- return NULL;
2784 ++ mutex_lock(&iscsi_ep_idr_mutex);
2785 ++ ep = idr_find(&iscsi_ep_idr, handle);
2786 ++ if (!ep)
2787 ++ goto unlock;
2788 +
2789 +- return iscsi_dev_to_endpoint(dev);
2790 ++ get_device(&ep->dev);
2791 ++unlock:
2792 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2793 ++ return ep;
2794 + }
2795 + EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
2796 +
2797 +diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
2798 +index ddd00efc48825..fbdb5124d7f7d 100644
2799 +--- a/drivers/scsi/sr_ioctl.c
2800 ++++ b/drivers/scsi/sr_ioctl.c
2801 +@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
2802 + int result;
2803 + unsigned char *buffer;
2804 +
2805 +- buffer = kmalloc(32, GFP_KERNEL);
2806 ++ buffer = kzalloc(32, GFP_KERNEL);
2807 + if (!buffer)
2808 + return -ENOMEM;
2809 +
2810 +@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
2811 + cgc.data_direction = DMA_FROM_DEVICE;
2812 +
2813 + result = sr_do_ioctl(cd, &cgc);
2814 ++ if (result)
2815 ++ goto err;
2816 +
2817 + tochdr->cdth_trk0 = buffer[2];
2818 + tochdr->cdth_trk1 = buffer[3];
2819 +
2820 ++err:
2821 + kfree(buffer);
2822 + return result;
2823 + }
2824 +@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
2825 + int result;
2826 + unsigned char *buffer;
2827 +
2828 +- buffer = kmalloc(32, GFP_KERNEL);
2829 ++ buffer = kzalloc(32, GFP_KERNEL);
2830 + if (!buffer)
2831 + return -ENOMEM;
2832 +
2833 +@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
2834 + cgc.data_direction = DMA_FROM_DEVICE;
2835 +
2836 + result = sr_do_ioctl(cd, &cgc);
2837 ++ if (result)
2838 ++ goto err;
2839 +
2840 + tocentry->cdte_ctrl = buffer[5] & 0xf;
2841 + tocentry->cdte_adr = buffer[5] >> 4;
2842 +@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
2843 + tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
2844 + + buffer[10]) << 8) + buffer[11];
2845 +
2846 ++err:
2847 + kfree(buffer);
2848 + return result;
2849 + }
2850 +@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
2851 + {
2852 + Scsi_CD *cd = cdi->handle;
2853 + struct packet_command cgc;
2854 +- char *buffer = kmalloc(32, GFP_KERNEL);
2855 ++ char *buffer = kzalloc(32, GFP_KERNEL);
2856 + int result;
2857 +
2858 + if (!buffer)
2859 +@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
2860 + cgc.data_direction = DMA_FROM_DEVICE;
2861 + cgc.timeout = IOCTL_TIMEOUT;
2862 + result = sr_do_ioctl(cd, &cgc);
2863 ++ if (result)
2864 ++ goto err;
2865 +
2866 + memcpy(mcn->medium_catalog_number, buffer + 9, 13);
2867 + mcn->medium_catalog_number[13] = 0;
2868 +
2869 ++err:
2870 + kfree(buffer);
2871 + return result;
2872 + }
2873 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2874 +index cb285d277201c..5696e52c76e9d 100644
2875 +--- a/drivers/scsi/ufs/ufshcd.c
2876 ++++ b/drivers/scsi/ufs/ufshcd.c
2877 +@@ -367,7 +367,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
2878 + static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2879 + enum ufs_trace_str_t str_t)
2880 + {
2881 +- u64 lba;
2882 ++ u64 lba = 0;
2883 + u8 opcode = 0, group_id = 0;
2884 + u32 intr, doorbell;
2885 + struct ufshcd_lrb *lrbp = &hba->lrb[tag];
2886 +@@ -384,7 +384,6 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2887 + return;
2888 +
2889 + opcode = cmd->cmnd[0];
2890 +- lba = scsi_get_lba(cmd);
2891 +
2892 + if (opcode == READ_10 || opcode == WRITE_10) {
2893 + /*
2894 +@@ -392,6 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2895 + */
2896 + transfer_len =
2897 + be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
2898 ++ lba = scsi_get_lba(cmd);
2899 + if (opcode == WRITE_10)
2900 + group_id = lrbp->cmd->cmnd[6];
2901 + } else if (opcode == UNMAP) {
2902 +@@ -399,6 +399,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2903 + * The number of Bytes to be unmapped beginning with the lba.
2904 + */
2905 + transfer_len = blk_rq_bytes(rq);
2906 ++ lba = scsi_get_lba(cmd);
2907 + }
2908 +
2909 + intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
2910 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
2911 +index 92d9610df1fd8..938017a60c8ed 100644
2912 +--- a/drivers/spi/atmel-quadspi.c
2913 ++++ b/drivers/spi/atmel-quadspi.c
2914 +@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
2915 + static bool atmel_qspi_supports_op(struct spi_mem *mem,
2916 + const struct spi_mem_op *op)
2917 + {
2918 ++ if (!spi_mem_default_supports_op(mem, op))
2919 ++ return false;
2920 ++
2921 + if (atmel_qspi_find_mode(op) < 0)
2922 + return false;
2923 +
2924 +diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
2925 +index 75f3560411386..b8ac24318cb3a 100644
2926 +--- a/drivers/spi/spi-cadence-quadspi.c
2927 ++++ b/drivers/spi/spi-cadence-quadspi.c
2928 +@@ -1415,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
2929 + all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
2930 + !op->data.dtr;
2931 +
2932 +- /* Mixed DTR modes not supported. */
2933 +- if (!(all_true || all_false))
2934 ++ if (all_true) {
2935 ++ /* Right now we only support 8-8-8 DTR mode. */
2936 ++ if (op->cmd.nbytes && op->cmd.buswidth != 8)
2937 ++ return false;
2938 ++ if (op->addr.nbytes && op->addr.buswidth != 8)
2939 ++ return false;
2940 ++ if (op->data.nbytes && op->data.buswidth != 8)
2941 ++ return false;
2942 ++ } else if (all_false) {
2943 ++ /* Only 1-1-X ops are supported without DTR */
2944 ++ if (op->cmd.nbytes && op->cmd.buswidth > 1)
2945 ++ return false;
2946 ++ if (op->addr.nbytes && op->addr.buswidth > 1)
2947 ++ return false;
2948 ++ } else {
2949 ++ /* Mixed DTR modes are not supported. */
2950 + return false;
2951 ++ }
2952 +
2953 + if (all_true)
2954 + return spi_mem_dtr_supports_op(mem, op);
2955 +diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
2956 +index 5c93730615f8d..6d203477c04b1 100644
2957 +--- a/drivers/spi/spi-mtk-nor.c
2958 ++++ b/drivers/spi/spi-mtk-nor.c
2959 +@@ -909,7 +909,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
2960 +
2961 + static int __maybe_unused mtk_nor_resume(struct device *dev)
2962 + {
2963 +- return pm_runtime_force_resume(dev);
2964 ++ struct spi_controller *ctlr = dev_get_drvdata(dev);
2965 ++ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
2966 ++ int ret;
2967 ++
2968 ++ ret = pm_runtime_force_resume(dev);
2969 ++ if (ret)
2970 ++ return ret;
2971 ++
2972 ++ mtk_nor_init(sp);
2973 ++
2974 ++ return 0;
2975 + }
2976 +
2977 + static const struct dev_pm_ops mtk_nor_pm_ops = {
2978 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2979 +index 792fdcfdc6add..10aa0fb946138 100644
2980 +--- a/fs/cifs/cifsfs.c
2981 ++++ b/fs/cifs/cifsfs.c
2982 +@@ -946,7 +946,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2983 + ssize_t rc;
2984 + struct inode *inode = file_inode(iocb->ki_filp);
2985 +
2986 +- if (iocb->ki_filp->f_flags & O_DIRECT)
2987 ++ if (iocb->ki_flags & IOCB_DIRECT)
2988 + return cifs_user_readv(iocb, iter);
2989 +
2990 + rc = cifs_revalidate_mapping(inode);
2991 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2992 +index c3be6a541c8fc..532770c30415d 100644
2993 +--- a/fs/cifs/connect.c
2994 ++++ b/fs/cifs/connect.c
2995 +@@ -534,12 +534,19 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
2996 + {
2997 + /* If tcp session is not an dfs connection, then reconnect to last target server */
2998 + spin_lock(&cifs_tcp_ses_lock);
2999 +- if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
3000 ++ if (!server->is_dfs_conn) {
3001 + spin_unlock(&cifs_tcp_ses_lock);
3002 + return __cifs_reconnect(server, mark_smb_session);
3003 + }
3004 + spin_unlock(&cifs_tcp_ses_lock);
3005 +
3006 ++ mutex_lock(&server->refpath_lock);
3007 ++ if (!server->origin_fullpath || !server->leaf_fullpath) {
3008 ++ mutex_unlock(&server->refpath_lock);
3009 ++ return __cifs_reconnect(server, mark_smb_session);
3010 ++ }
3011 ++ mutex_unlock(&server->refpath_lock);
3012 ++
3013 + return reconnect_dfs_server(server);
3014 + }
3015 + #else
3016 +@@ -3675,9 +3682,11 @@ static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
3017 + {
3018 + struct TCP_Server_Info *server = mnt_ctx->server;
3019 +
3020 ++ mutex_lock(&server->refpath_lock);
3021 + server->origin_fullpath = mnt_ctx->origin_fullpath;
3022 + server->leaf_fullpath = mnt_ctx->leaf_fullpath;
3023 + server->current_fullpath = mnt_ctx->leaf_fullpath;
3024 ++ mutex_unlock(&server->refpath_lock);
3025 + mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
3026 + }
3027 +
3028 +diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
3029 +index 30e040da4f096..956f8e5cf3e74 100644
3030 +--- a/fs/cifs/dfs_cache.c
3031 ++++ b/fs/cifs/dfs_cache.c
3032 +@@ -1422,12 +1422,14 @@ static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool
3033 + struct TCP_Server_Info *server = tcon->ses->server;
3034 +
3035 + mutex_lock(&server->refpath_lock);
3036 +- if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
3037 +- __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
3038 ++ if (server->origin_fullpath) {
3039 ++ if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
3040 ++ server->origin_fullpath))
3041 ++ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
3042 ++ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
3043 ++ }
3044 + mutex_unlock(&server->refpath_lock);
3045 +
3046 +- __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
3047 +-
3048 + return 0;
3049 + }
3050 +
3051 +@@ -1530,11 +1532,14 @@ static void refresh_mounts(struct cifs_ses **sessions)
3052 + list_del_init(&tcon->ulist);
3053 +
3054 + mutex_lock(&server->refpath_lock);
3055 +- if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
3056 +- __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
3057 ++ if (server->origin_fullpath) {
3058 ++ if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
3059 ++ server->origin_fullpath))
3060 ++ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
3061 ++ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
3062 ++ }
3063 + mutex_unlock(&server->refpath_lock);
3064 +
3065 +- __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
3066 + cifs_put_tcon(tcon);
3067 + }
3068 + }
3069 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3070 +index bcd3b9bf8069b..9b80693224957 100644
3071 +--- a/fs/ext4/ext4.h
3072 ++++ b/fs/ext4/ext4.h
3073 +@@ -2271,6 +2271,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
3074 + * Structure of a directory entry
3075 + */
3076 + #define EXT4_NAME_LEN 255
3077 ++/*
3078 ++ * Base length of the ext4 directory entry excluding the name length
3079 ++ */
3080 ++#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
3081 +
3082 + struct ext4_dir_entry {
3083 + __le32 inode; /* Inode number */
3084 +@@ -3030,7 +3034,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
3085 + extern int ext4_can_truncate(struct inode *inode);
3086 + extern int ext4_truncate(struct inode *);
3087 + extern int ext4_break_layouts(struct inode *);
3088 +-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
3089 ++extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
3090 + extern void ext4_set_inode_flags(struct inode *, bool init);
3091 + extern int ext4_alloc_da_blocks(struct inode *inode);
3092 + extern void ext4_set_aops(struct inode *inode);
3093 +@@ -3062,6 +3066,7 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
3094 + struct dentry *dentry, struct fileattr *fa);
3095 + int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3096 + extern void ext4_reset_inode_seed(struct inode *inode);
3097 ++int ext4_update_overhead(struct super_block *sb);
3098 +
3099 + /* migrate.c */
3100 + extern int ext4_ext_migrate(struct inode *);
3101 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3102 +index c0f3f83e0c1b1..488d7c1de941e 100644
3103 +--- a/fs/ext4/extents.c
3104 ++++ b/fs/ext4/extents.c
3105 +@@ -4501,9 +4501,9 @@ retry:
3106 + return ret > 0 ? ret2 : ret;
3107 + }
3108 +
3109 +-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
3110 ++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
3111 +
3112 +-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
3113 ++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
3114 +
3115 + static long ext4_zero_range(struct file *file, loff_t offset,
3116 + loff_t len, int mode)
3117 +@@ -4575,6 +4575,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
3118 + /* Wait all existing dio workers, newcomers will block on i_rwsem */
3119 + inode_dio_wait(inode);
3120 +
3121 ++ ret = file_modified(file);
3122 ++ if (ret)
3123 ++ goto out_mutex;
3124 ++
3125 + /* Preallocate the range including the unaligned edges */
3126 + if (partial_begin || partial_end) {
3127 + ret = ext4_alloc_file_blocks(file,
3128 +@@ -4691,7 +4695,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3129 + return -EOPNOTSUPP;
3130 +
3131 + if (mode & FALLOC_FL_PUNCH_HOLE) {
3132 +- ret = ext4_punch_hole(inode, offset, len);
3133 ++ ret = ext4_punch_hole(file, offset, len);
3134 + goto exit;
3135 + }
3136 +
3137 +@@ -4700,12 +4704,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3138 + goto exit;
3139 +
3140 + if (mode & FALLOC_FL_COLLAPSE_RANGE) {
3141 +- ret = ext4_collapse_range(inode, offset, len);
3142 ++ ret = ext4_collapse_range(file, offset, len);
3143 + goto exit;
3144 + }
3145 +
3146 + if (mode & FALLOC_FL_INSERT_RANGE) {
3147 +- ret = ext4_insert_range(inode, offset, len);
3148 ++ ret = ext4_insert_range(file, offset, len);
3149 + goto exit;
3150 + }
3151 +
3152 +@@ -4741,6 +4745,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3153 + /* Wait all existing dio workers, newcomers will block on i_rwsem */
3154 + inode_dio_wait(inode);
3155 +
3156 ++ ret = file_modified(file);
3157 ++ if (ret)
3158 ++ goto out;
3159 ++
3160 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
3161 + if (ret)
3162 + goto out;
3163 +@@ -5242,8 +5250,9 @@ out:
3164 + * This implements the fallocate's collapse range functionality for ext4
3165 + * Returns: 0 and non-zero on error.
3166 + */
3167 +-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
3168 ++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
3169 + {
3170 ++ struct inode *inode = file_inode(file);
3171 + struct super_block *sb = inode->i_sb;
3172 + struct address_space *mapping = inode->i_mapping;
3173 + ext4_lblk_t punch_start, punch_stop;
3174 +@@ -5295,6 +5304,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
3175 + /* Wait for existing dio to complete */
3176 + inode_dio_wait(inode);
3177 +
3178 ++ ret = file_modified(file);
3179 ++ if (ret)
3180 ++ goto out_mutex;
3181 ++
3182 + /*
3183 + * Prevent page faults from reinstantiating pages we have released from
3184 + * page cache.
3185 +@@ -5388,8 +5401,9 @@ out_mutex:
3186 + * by len bytes.
3187 + * Returns 0 on success, error otherwise.
3188 + */
3189 +-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
3190 ++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
3191 + {
3192 ++ struct inode *inode = file_inode(file);
3193 + struct super_block *sb = inode->i_sb;
3194 + struct address_space *mapping = inode->i_mapping;
3195 + handle_t *handle;
3196 +@@ -5446,6 +5460,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
3197 + /* Wait for existing dio to complete */
3198 + inode_dio_wait(inode);
3199 +
3200 ++ ret = file_modified(file);
3201 ++ if (ret)
3202 ++ goto out_mutex;
3203 ++
3204 + /*
3205 + * Prevent page faults from reinstantiating pages we have released from
3206 + * page cache.
3207 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3208 +index 531a94f48637c..d8ff93a4b1b90 100644
3209 +--- a/fs/ext4/inode.c
3210 ++++ b/fs/ext4/inode.c
3211 +@@ -3944,12 +3944,14 @@ int ext4_break_layouts(struct inode *inode)
3212 + * Returns: 0 on success or negative on failure
3213 + */
3214 +
3215 +-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3216 ++int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3217 + {
3218 ++ struct inode *inode = file_inode(file);
3219 + struct super_block *sb = inode->i_sb;
3220 + ext4_lblk_t first_block, stop_block;
3221 + struct address_space *mapping = inode->i_mapping;
3222 +- loff_t first_block_offset, last_block_offset;
3223 ++ loff_t first_block_offset, last_block_offset, max_length;
3224 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3225 + handle_t *handle;
3226 + unsigned int credits;
3227 + int ret = 0, ret2 = 0;
3228 +@@ -3992,6 +3994,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3229 + offset;
3230 + }
3231 +
3232 ++ /*
3233 ++ * For punch hole the length + offset needs to be within one block
3234 ++ * before last range. Adjust the length if it goes beyond that limit.
3235 ++ */
3236 ++ max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3237 ++ if (offset + length > max_length)
3238 ++ length = max_length - offset;
3239 ++
3240 + if (offset & (sb->s_blocksize - 1) ||
3241 + (offset + length) & (sb->s_blocksize - 1)) {
3242 + /*
3243 +@@ -4007,6 +4017,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3244 + /* Wait all existing dio workers, newcomers will block on i_rwsem */
3245 + inode_dio_wait(inode);
3246 +
3247 ++ ret = file_modified(file);
3248 ++ if (ret)
3249 ++ goto out_mutex;
3250 ++
3251 + /*
3252 + * Prevent page faults from reinstantiating pages we have released from
3253 + * page cache.
3254 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
3255 +index a8022c2c6a582..da0aefe67673d 100644
3256 +--- a/fs/ext4/ioctl.c
3257 ++++ b/fs/ext4/ioctl.c
3258 +@@ -1652,3 +1652,19 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3259 + return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3260 + }
3261 + #endif
3262 ++
3263 ++static void set_overhead(struct ext4_super_block *es, const void *arg)
3264 ++{
3265 ++ es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
3266 ++}
3267 ++
3268 ++int ext4_update_overhead(struct super_block *sb)
3269 ++{
3270 ++ struct ext4_sb_info *sbi = EXT4_SB(sb);
3271 ++
3272 ++ if (sb_rdonly(sb) || sbi->s_overhead == 0 ||
3273 ++ sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))
3274 ++ return 0;
3275 ++
3276 ++ return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
3277 ++}
3278 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3279 +index 39e223f7bf64d..f62260264f68c 100644
3280 +--- a/fs/ext4/namei.c
3281 ++++ b/fs/ext4/namei.c
3282 +@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
3283 +
3284 + de = (struct ext4_dir_entry_2 *)search_buf;
3285 + dlimit = search_buf + buf_size;
3286 +- while ((char *) de < dlimit) {
3287 ++ while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
3288 + /* this code is executed quadratically often */
3289 + /* do minimal checking `by hand' */
3290 +- if ((char *) de + de->name_len <= dlimit &&
3291 ++ if (de->name + de->name_len <= dlimit &&
3292 + ext4_match(dir, fname, de)) {
3293 + /* found a match - just to be sure, do
3294 + * a full check */
3295 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
3296 +index 1d370364230e8..40b7d8485b445 100644
3297 +--- a/fs/ext4/page-io.c
3298 ++++ b/fs/ext4/page-io.c
3299 +@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
3300 + continue;
3301 + }
3302 + clear_buffer_async_write(bh);
3303 +- if (bio->bi_status)
3304 ++ if (bio->bi_status) {
3305 ++ set_buffer_write_io_error(bh);
3306 + buffer_io_error(bh);
3307 ++ }
3308 + } while ((bh = bh->b_this_page) != head);
3309 + spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
3310 + if (!under_io) {
3311 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3312 +index bed29f96ccc7e..ba6530c2d2711 100644
3313 +--- a/fs/ext4/super.c
3314 ++++ b/fs/ext4/super.c
3315 +@@ -4156,9 +4156,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
3316 + ext4_fsblk_t first_block, last_block, b;
3317 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3318 + int s, j, count = 0;
3319 ++ int has_super = ext4_bg_has_super(sb, grp);
3320 +
3321 + if (!ext4_has_feature_bigalloc(sb))
3322 +- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
3323 ++ return (has_super + ext4_bg_num_gdb(sb, grp) +
3324 ++ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
3325 + sbi->s_itb_per_group + 2);
3326 +
3327 + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3328 +@@ -5266,9 +5268,18 @@ no_journal:
3329 + * Get the # of file system overhead blocks from the
3330 + * superblock if present.
3331 + */
3332 +- if (es->s_overhead_clusters)
3333 +- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
3334 +- else {
3335 ++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
3336 ++ /* ignore the precalculated value if it is ridiculous */
3337 ++ if (sbi->s_overhead > ext4_blocks_count(es))
3338 ++ sbi->s_overhead = 0;
3339 ++ /*
3340 ++ * If the bigalloc feature is not enabled recalculating the
3341 ++ * overhead doesn't take long, so we might as well just redo
3342 ++ * it to make sure we are using the correct value.
3343 ++ */
3344 ++ if (!ext4_has_feature_bigalloc(sb))
3345 ++ sbi->s_overhead = 0;
3346 ++ if (sbi->s_overhead == 0) {
3347 + err = ext4_calculate_overhead(sb);
3348 + if (err)
3349 + goto failed_mount_wq;
3350 +@@ -5586,6 +5597,8 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
3351 + ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
3352 + "Quota mode: %s.", descr, ext4_quota_mode(sb));
3353 +
3354 ++ /* Update the s_overhead_clusters if necessary */
3355 ++ ext4_update_overhead(sb);
3356 + return 0;
3357 +
3358 + free_sbi:
3359 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
3360 +index 3b34bb24d0af4..801ad9f4f2bef 100644
3361 +--- a/fs/gfs2/rgrp.c
3362 ++++ b/fs/gfs2/rgrp.c
3363 +@@ -923,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
3364 + spin_lock_init(&rgd->rd_rsspin);
3365 + mutex_init(&rgd->rd_mutex);
3366 +
3367 +- error = compute_bitstructs(rgd);
3368 +- if (error)
3369 +- goto fail;
3370 +-
3371 + error = gfs2_glock_get(sdp, rgd->rd_addr,
3372 + &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
3373 + if (error)
3374 + goto fail;
3375 +
3376 ++ error = compute_bitstructs(rgd);
3377 ++ if (error)
3378 ++ goto fail_glock;
3379 ++
3380 + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
3381 + rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
3382 + if (rgd->rd_data > sdp->sd_max_rg_data)
3383 +@@ -945,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
3384 + }
3385 +
3386 + error = 0; /* someone else read in the rgrp; free it and ignore it */
3387 ++fail_glock:
3388 + gfs2_glock_put(rgd->rd_gl);
3389 +
3390 + fail:
3391 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
3392 +index a7c6c7498be0b..ed85051b12754 100644
3393 +--- a/fs/hugetlbfs/inode.c
3394 ++++ b/fs/hugetlbfs/inode.c
3395 +@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
3396 + info.flags = 0;
3397 + info.length = len;
3398 + info.low_limit = current->mm->mmap_base;
3399 +- info.high_limit = TASK_SIZE;
3400 ++ info.high_limit = arch_get_mmap_end(addr);
3401 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
3402 + info.align_offset = 0;
3403 + return vm_unmapped_area(&info);
3404 +@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
3405 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
3406 + info.length = len;
3407 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
3408 +- info.high_limit = current->mm->mmap_base;
3409 ++ info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
3410 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
3411 + info.align_offset = 0;
3412 + addr = vm_unmapped_area(&info);
3413 +@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
3414 + VM_BUG_ON(addr != -ENOMEM);
3415 + info.flags = 0;
3416 + info.low_limit = current->mm->mmap_base;
3417 +- info.high_limit = TASK_SIZE;
3418 ++ info.high_limit = arch_get_mmap_end(addr);
3419 + addr = vm_unmapped_area(&info);
3420 + }
3421 +
3422 +@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
3423 + struct mm_struct *mm = current->mm;
3424 + struct vm_area_struct *vma;
3425 + struct hstate *h = hstate_file(file);
3426 ++ const unsigned long mmap_end = arch_get_mmap_end(addr);
3427 +
3428 + if (len & ~huge_page_mask(h))
3429 + return -EINVAL;
3430 +@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
3431 + if (addr) {
3432 + addr = ALIGN(addr, huge_page_size(h));
3433 + vma = find_vma(mm, addr);
3434 +- if (TASK_SIZE - len >= addr &&
3435 ++ if (mmap_end - len >= addr &&
3436 + (!vma || addr + len <= vm_start_gap(vma)))
3437 + return addr;
3438 + }
3439 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3440 +index 619c67fd456dd..fbba8342172a0 100644
3441 +--- a/fs/io_uring.c
3442 ++++ b/fs/io_uring.c
3443 +@@ -2612,11 +2612,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
3444 + /* order with io_complete_rw_iopoll(), e.g. ->result updates */
3445 + if (!smp_load_acquire(&req->iopoll_completed))
3446 + break;
3447 ++ nr_events++;
3448 + if (unlikely(req->flags & REQ_F_CQE_SKIP))
3449 + continue;
3450 +-
3451 + __io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
3452 +- nr_events++;
3453 + }
3454 +
3455 + if (unlikely(!nr_events))
3456 +@@ -3622,8 +3621,10 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3457 + iovec = NULL;
3458 + }
3459 + ret = io_rw_init_file(req, FMODE_READ);
3460 +- if (unlikely(ret))
3461 ++ if (unlikely(ret)) {
3462 ++ kfree(iovec);
3463 + return ret;
3464 ++ }
3465 + req->result = iov_iter_count(&s->iter);
3466 +
3467 + if (force_nonblock) {
3468 +@@ -3742,8 +3743,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3469 + iovec = NULL;
3470 + }
3471 + ret = io_rw_init_file(req, FMODE_WRITE);
3472 +- if (unlikely(ret))
3473 ++ if (unlikely(ret)) {
3474 ++ kfree(iovec);
3475 + return ret;
3476 ++ }
3477 + req->result = iov_iter_count(&s->iter);
3478 +
3479 + if (force_nonblock) {
3480 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
3481 +index 5b9408e3b370d..ac7f067b7bddb 100644
3482 +--- a/fs/jbd2/commit.c
3483 ++++ b/fs/jbd2/commit.c
3484 +@@ -488,7 +488,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3485 + jbd2_journal_wait_updates(journal);
3486 +
3487 + commit_transaction->t_state = T_SWITCH;
3488 +- write_unlock(&journal->j_state_lock);
3489 +
3490 + J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
3491 + journal->j_max_transaction_buffers);
3492 +@@ -508,6 +507,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3493 + * has reserved. This is consistent with the existing behaviour
3494 + * that multiple jbd2_journal_get_write_access() calls to the same
3495 + * buffer are perfectly permissible.
3496 ++ * We use journal->j_state_lock here to serialize processing of
3497 ++ * t_reserved_list with eviction of buffers from journal_unmap_buffer().
3498 + */
3499 + while (commit_transaction->t_reserved_list) {
3500 + jh = commit_transaction->t_reserved_list;
3501 +@@ -527,6 +528,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3502 + jbd2_journal_refile_buffer(journal, jh);
3503 + }
3504 +
3505 ++ write_unlock(&journal->j_state_lock);
3506 + /*
3507 + * Now try to drop any written-back buffers from the journal's
3508 + * checkpoint lists. We do this *before* commit because it potentially
3509 +diff --git a/fs/namei.c b/fs/namei.c
3510 +index 3f1829b3ab5b7..509657fdf4f56 100644
3511 +--- a/fs/namei.c
3512 ++++ b/fs/namei.c
3513 +@@ -3673,18 +3673,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
3514 + {
3515 + struct dentry *dentry = ERR_PTR(-EEXIST);
3516 + struct qstr last;
3517 ++ bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
3518 ++ unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
3519 ++ unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
3520 + int type;
3521 + int err2;
3522 + int error;
3523 +- bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
3524 +
3525 +- /*
3526 +- * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
3527 +- * other flags passed in are ignored!
3528 +- */
3529 +- lookup_flags &= LOOKUP_REVAL;
3530 +-
3531 +- error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
3532 ++ error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
3533 + if (error)
3534 + return ERR_PTR(error);
3535 +
3536 +@@ -3698,11 +3694,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
3537 + /* don't fail immediately if it's r/o, at least try to report other errors */
3538 + err2 = mnt_want_write(path->mnt);
3539 + /*
3540 +- * Do the final lookup.
3541 ++ * Do the final lookup. Suppress 'create' if there is a trailing
3542 ++ * '/', and a directory wasn't requested.
3543 + */
3544 +- lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
3545 ++ if (last.name[last.len] && !want_dir)
3546 ++ create_flags = 0;
3547 + inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
3548 +- dentry = __lookup_hash(&last, path->dentry, lookup_flags);
3549 ++ dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
3550 + if (IS_ERR(dentry))
3551 + goto unlock;
3552 +
3553 +@@ -3716,7 +3714,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
3554 + * all is fine. Let's be bastards - you had / on the end, you've
3555 + * been asking for (non-existent) directory. -ENOENT for you.
3556 + */
3557 +- if (unlikely(!is_dir && last.name[last.len])) {
3558 ++ if (unlikely(!create_flags)) {
3559 + error = -ENOENT;
3560 + goto fail;
3561 + }
3562 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
3563 +index 80acb6885cf90..962d32468eb48 100644
3564 +--- a/fs/posix_acl.c
3565 ++++ b/fs/posix_acl.c
3566 +@@ -759,9 +759,14 @@ static void posix_acl_fix_xattr_userns(
3567 + }
3568 +
3569 + void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
3570 ++ struct inode *inode,
3571 + void *value, size_t size)
3572 + {
3573 + struct user_namespace *user_ns = current_user_ns();
3574 ++
3575 ++ /* Leave ids untouched on non-idmapped mounts. */
3576 ++ if (no_idmapping(mnt_userns, i_user_ns(inode)))
3577 ++ mnt_userns = &init_user_ns;
3578 + if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
3579 + return;
3580 + posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
3581 +@@ -769,9 +774,14 @@ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
3582 + }
3583 +
3584 + void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
3585 ++ struct inode *inode,
3586 + void *value, size_t size)
3587 + {
3588 + struct user_namespace *user_ns = current_user_ns();
3589 ++
3590 ++ /* Leave ids untouched on non-idmapped mounts. */
3591 ++ if (no_idmapping(mnt_userns, i_user_ns(inode)))
3592 ++ mnt_userns = &init_user_ns;
3593 + if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
3594 + return;
3595 + posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
3596 +diff --git a/fs/stat.c b/fs/stat.c
3597 +index 28d2020ba1f42..246d138ec0669 100644
3598 +--- a/fs/stat.c
3599 ++++ b/fs/stat.c
3600 +@@ -334,9 +334,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
3601 + # define choose_32_64(a,b) b
3602 + #endif
3603 +
3604 +-#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
3605 +-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
3606 +-
3607 + #ifndef INIT_STRUCT_STAT_PADDING
3608 + # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
3609 + #endif
3610 +@@ -345,7 +342,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
3611 + {
3612 + struct stat tmp;
3613 +
3614 +- if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
3615 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
3616 ++ return -EOVERFLOW;
3617 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
3618 + return -EOVERFLOW;
3619 + #if BITS_PER_LONG == 32
3620 + if (stat->size > MAX_NON_LFS)
3621 +@@ -353,7 +352,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
3622 + #endif
3623 +
3624 + INIT_STRUCT_STAT_PADDING(tmp);
3625 +- tmp.st_dev = encode_dev(stat->dev);
3626 ++ tmp.st_dev = new_encode_dev(stat->dev);
3627 + tmp.st_ino = stat->ino;
3628 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
3629 + return -EOVERFLOW;
3630 +@@ -363,7 +362,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
3631 + return -EOVERFLOW;
3632 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
3633 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
3634 +- tmp.st_rdev = encode_dev(stat->rdev);
3635 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
3636 + tmp.st_size = stat->size;
3637 + tmp.st_atime = stat->atime.tv_sec;
3638 + tmp.st_mtime = stat->mtime.tv_sec;
3639 +@@ -644,11 +643,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
3640 + {
3641 + struct compat_stat tmp;
3642 +
3643 +- if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
3644 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
3645 ++ return -EOVERFLOW;
3646 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
3647 + return -EOVERFLOW;
3648 +
3649 + memset(&tmp, 0, sizeof(tmp));
3650 +- tmp.st_dev = old_encode_dev(stat->dev);
3651 ++ tmp.st_dev = new_encode_dev(stat->dev);
3652 + tmp.st_ino = stat->ino;
3653 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
3654 + return -EOVERFLOW;
3655 +@@ -658,7 +659,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
3656 + return -EOVERFLOW;
3657 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
3658 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
3659 +- tmp.st_rdev = old_encode_dev(stat->rdev);
3660 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
3661 + if ((u64) stat->size > MAX_NON_LFS)
3662 + return -EOVERFLOW;
3663 + tmp.st_size = stat->size;
3664 +diff --git a/fs/xattr.c b/fs/xattr.c
3665 +index 5c8c5175b385c..998045165916e 100644
3666 +--- a/fs/xattr.c
3667 ++++ b/fs/xattr.c
3668 +@@ -569,7 +569,8 @@ setxattr(struct user_namespace *mnt_userns, struct dentry *d,
3669 + }
3670 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
3671 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
3672 +- posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size);
3673 ++ posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d),
3674 ++ kvalue, size);
3675 + }
3676 +
3677 + error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags);
3678 +@@ -667,7 +668,8 @@ getxattr(struct user_namespace *mnt_userns, struct dentry *d,
3679 + if (error > 0) {
3680 + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
3681 + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
3682 +- posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error);
3683 ++ posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d),
3684 ++ kvalue, error);
3685 + if (size && copy_to_user(value, kvalue, error))
3686 + error = -EFAULT;
3687 + } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
3688 +diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
3689 +index 2ad71cc90b37d..92b10e67d5f87 100644
3690 +--- a/include/linux/etherdevice.h
3691 ++++ b/include/linux/etherdevice.h
3692 +@@ -134,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
3693 + #endif
3694 + }
3695 +
3696 +-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
3697 ++static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
3698 + {
3699 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3700 + #ifdef __BIG_ENDIAN
3701 +@@ -372,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
3702 + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
3703 + */
3704 +
3705 +-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
3706 +- const u8 addr2[6+2])
3707 ++static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
3708 + {
3709 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3710 + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
3711 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
3712 +index 0abbd685703b9..a1fcf57493479 100644
3713 +--- a/include/linux/memcontrol.h
3714 ++++ b/include/linux/memcontrol.h
3715 +@@ -999,6 +999,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
3716 + }
3717 +
3718 + void mem_cgroup_flush_stats(void);
3719 ++void mem_cgroup_flush_stats_delayed(void);
3720 +
3721 + void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
3722 + int val);
3723 +@@ -1442,6 +1443,10 @@ static inline void mem_cgroup_flush_stats(void)
3724 + {
3725 + }
3726 +
3727 ++static inline void mem_cgroup_flush_stats_delayed(void)
3728 ++{
3729 ++}
3730 ++
3731 + static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
3732 + enum node_stat_item idx, int val)
3733 + {
3734 +diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
3735 +index 060e8d2031814..1766e1de69560 100644
3736 +--- a/include/linux/posix_acl_xattr.h
3737 ++++ b/include/linux/posix_acl_xattr.h
3738 +@@ -34,15 +34,19 @@ posix_acl_xattr_count(size_t size)
3739 +
3740 + #ifdef CONFIG_FS_POSIX_ACL
3741 + void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
3742 ++ struct inode *inode,
3743 + void *value, size_t size);
3744 + void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
3745 ++ struct inode *inode,
3746 + void *value, size_t size);
3747 + #else
3748 + static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
3749 ++ struct inode *inode,
3750 + void *value, size_t size)
3751 + {
3752 + }
3753 + static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
3754 ++ struct inode *inode,
3755 + void *value, size_t size)
3756 + {
3757 + }
3758 +diff --git a/include/linux/sched.h b/include/linux/sched.h
3759 +index e806326eca723..4b4cc633b2665 100644
3760 +--- a/include/linux/sched.h
3761 ++++ b/include/linux/sched.h
3762 +@@ -1440,6 +1440,7 @@ struct task_struct {
3763 + int pagefault_disabled;
3764 + #ifdef CONFIG_MMU
3765 + struct task_struct *oom_reaper_list;
3766 ++ struct timer_list oom_reaper_timer;
3767 + #endif
3768 + #ifdef CONFIG_VMAP_STACK
3769 + struct vm_struct *stack_vm_area;
3770 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
3771 +index aa5f09ca5bcf4..0ee0515d5a175 100644
3772 +--- a/include/linux/sched/mm.h
3773 ++++ b/include/linux/sched/mm.h
3774 +@@ -135,6 +135,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
3775 + #endif /* CONFIG_MEMCG */
3776 +
3777 + #ifdef CONFIG_MMU
3778 ++#ifndef arch_get_mmap_end
3779 ++#define arch_get_mmap_end(addr) (TASK_SIZE)
3780 ++#endif
3781 ++
3782 ++#ifndef arch_get_mmap_base
3783 ++#define arch_get_mmap_base(addr, base) (base)
3784 ++#endif
3785 ++
3786 + extern void arch_pick_mmap_layout(struct mm_struct *mm,
3787 + struct rlimit *rlim_stack);
3788 + extern unsigned long
3789 +diff --git a/include/net/esp.h b/include/net/esp.h
3790 +index 90cd02ff77ef6..9c5637d41d951 100644
3791 +--- a/include/net/esp.h
3792 ++++ b/include/net/esp.h
3793 +@@ -4,8 +4,6 @@
3794 +
3795 + #include <linux/skbuff.h>
3796 +
3797 +-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
3798 +-
3799 + struct ip_esp_hdr;
3800 +
3801 + static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
3802 +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
3803 +index 6bd7e5a85ce76..ff82983b7ab41 100644
3804 +--- a/include/net/netns/ipv6.h
3805 ++++ b/include/net/netns/ipv6.h
3806 +@@ -75,8 +75,8 @@ struct netns_ipv6 {
3807 + struct list_head fib6_walkers;
3808 + rwlock_t fib6_walker_lock;
3809 + spinlock_t fib6_gc_lock;
3810 +- unsigned int ip6_rt_gc_expire;
3811 +- unsigned long ip6_rt_last_gc;
3812 ++ atomic_t ip6_rt_gc_expire;
3813 ++ unsigned long ip6_rt_last_gc;
3814 + unsigned char flowlabel_has_excl;
3815 + #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3816 + bool fib6_has_custom_rules;
3817 +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
3818 +index 4ee233e5a6ffa..d1e282f0d6f18 100644
3819 +--- a/include/scsi/libiscsi.h
3820 ++++ b/include/scsi/libiscsi.h
3821 +@@ -52,8 +52,10 @@ enum {
3822 +
3823 + #define ISID_SIZE 6
3824 +
3825 +-/* Connection suspend "bit" */
3826 +-#define ISCSI_SUSPEND_BIT 1
3827 ++/* Connection flags */
3828 ++#define ISCSI_CONN_FLAG_SUSPEND_TX BIT(0)
3829 ++#define ISCSI_CONN_FLAG_SUSPEND_RX BIT(1)
3830 ++#define ISCSI_CONN_FLAG_BOUND BIT(2)
3831 +
3832 + #define ISCSI_ITT_MASK 0x1fff
3833 + #define ISCSI_TOTAL_CMDS_MAX 4096
3834 +@@ -199,8 +201,7 @@ struct iscsi_conn {
3835 + struct list_head cmdqueue; /* data-path cmd queue */
3836 + struct list_head requeue; /* tasks needing another run */
3837 + struct work_struct xmitwork; /* per-conn. xmit workqueue */
3838 +- unsigned long suspend_tx; /* suspend Tx */
3839 +- unsigned long suspend_rx; /* suspend Rx */
3840 ++ unsigned long flags; /* ISCSI_CONN_FLAGs */
3841 +
3842 + /* negotiated params */
3843 + unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
3844 +diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
3845 +index 037c77fb5dc55..3ecf9702287be 100644
3846 +--- a/include/scsi/scsi_transport_iscsi.h
3847 ++++ b/include/scsi/scsi_transport_iscsi.h
3848 +@@ -296,7 +296,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
3849 + struct iscsi_endpoint {
3850 + void *dd_data; /* LLD private data */
3851 + struct device dev;
3852 +- uint64_t id;
3853 ++ int id;
3854 + struct iscsi_cls_conn *conn;
3855 + };
3856 +
3857 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3858 +index 0ee9ffceb9764..baa0fe350246f 100644
3859 +--- a/kernel/events/core.c
3860 ++++ b/kernel/events/core.c
3861 +@@ -6352,7 +6352,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3862 + again:
3863 + mutex_lock(&event->mmap_mutex);
3864 + if (event->rb) {
3865 +- if (event->rb->nr_pages != nr_pages) {
3866 ++ if (data_page_nr(event->rb) != nr_pages) {
3867 + ret = -EINVAL;
3868 + goto unlock;
3869 + }
3870 +diff --git a/kernel/events/internal.h b/kernel/events/internal.h
3871 +index 082832738c8fd..5150d5f84c033 100644
3872 +--- a/kernel/events/internal.h
3873 ++++ b/kernel/events/internal.h
3874 +@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
3875 + }
3876 + #endif
3877 +
3878 ++static inline int data_page_nr(struct perf_buffer *rb)
3879 ++{
3880 ++ return rb->nr_pages << page_order(rb);
3881 ++}
3882 ++
3883 + static inline unsigned long perf_data_size(struct perf_buffer *rb)
3884 + {
3885 + return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
3886 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
3887 +index 52868716ec358..fb35b926024ca 100644
3888 +--- a/kernel/events/ring_buffer.c
3889 ++++ b/kernel/events/ring_buffer.c
3890 +@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
3891 + }
3892 +
3893 + #else
3894 +-static int data_page_nr(struct perf_buffer *rb)
3895 +-{
3896 +- return rb->nr_pages << page_order(rb);
3897 +-}
3898 +-
3899 + static struct page *
3900 + __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
3901 + {
3902 +diff --git a/kernel/irq_work.c b/kernel/irq_work.c
3903 +index f7df715ec28e6..7afa40fe5cc43 100644
3904 +--- a/kernel/irq_work.c
3905 ++++ b/kernel/irq_work.c
3906 +@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
3907 + if (!irq_work_claim(work))
3908 + return false;
3909 +
3910 +- kasan_record_aux_stack(work);
3911 ++ kasan_record_aux_stack_noalloc(work);
3912 +
3913 + preempt_disable();
3914 + if (cpu != smp_processor_id()) {
3915 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3916 +index cddcf2f4f5251..2f461f0592789 100644
3917 +--- a/kernel/sched/fair.c
3918 ++++ b/kernel/sched/fair.c
3919 +@@ -3776,11 +3776,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3920 +
3921 + se->avg.runnable_sum = se->avg.runnable_avg * divider;
3922 +
3923 +- se->avg.load_sum = divider;
3924 +- if (se_weight(se)) {
3925 +- se->avg.load_sum =
3926 +- div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
3927 +- }
3928 ++ se->avg.load_sum = se->avg.load_avg * divider;
3929 ++ if (se_weight(se) < se->avg.load_sum)
3930 ++ se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3931 ++ else
3932 ++ se->avg.load_sum = 1;
3933 +
3934 + enqueue_load_avg(cfs_rq, se);
3935 + cfs_rq->avg.util_avg += se->avg.util_avg;
3936 +diff --git a/lib/xarray.c b/lib/xarray.c
3937 +index 88ca87435e3da..32e1669d5b649 100644
3938 +--- a/lib/xarray.c
3939 ++++ b/lib/xarray.c
3940 +@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
3941 + if (xa_is_sibling(entry)) {
3942 + offset = xa_to_sibling(entry);
3943 + entry = xa_entry(xas->xa, node, offset);
3944 ++ if (node->shift && xa_is_node(entry))
3945 ++ entry = XA_RETRY_ENTRY;
3946 + }
3947 +
3948 + xas->xa_offset = offset;
3949 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3950 +index 9b89a340a6629..563100f2a693e 100644
3951 +--- a/mm/memcontrol.c
3952 ++++ b/mm/memcontrol.c
3953 +@@ -628,6 +628,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
3954 + static DEFINE_SPINLOCK(stats_flush_lock);
3955 + static DEFINE_PER_CPU(unsigned int, stats_updates);
3956 + static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
3957 ++static u64 flush_next_time;
3958 ++
3959 ++#define FLUSH_TIME (2UL*HZ)
3960 +
3961 + static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
3962 + {
3963 +@@ -649,6 +652,7 @@ static void __mem_cgroup_flush_stats(void)
3964 + if (!spin_trylock_irqsave(&stats_flush_lock, flag))
3965 + return;
3966 +
3967 ++ flush_next_time = jiffies_64 + 2*FLUSH_TIME;
3968 + cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
3969 + atomic_set(&stats_flush_threshold, 0);
3970 + spin_unlock_irqrestore(&stats_flush_lock, flag);
3971 +@@ -660,10 +664,16 @@ void mem_cgroup_flush_stats(void)
3972 + __mem_cgroup_flush_stats();
3973 + }
3974 +
3975 ++void mem_cgroup_flush_stats_delayed(void)
3976 ++{
3977 ++ if (time_after64(jiffies_64, flush_next_time))
3978 ++ mem_cgroup_flush_stats();
3979 ++}
3980 ++
3981 + static void flush_memcg_stats_dwork(struct work_struct *w)
3982 + {
3983 + __mem_cgroup_flush_stats();
3984 +- queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
3985 ++ queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
3986 + }
3987 +
3988 + /**
3989 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3990 +index 97a9ed8f87a96..15dcedbc17306 100644
3991 +--- a/mm/memory-failure.c
3992 ++++ b/mm/memory-failure.c
3993 +@@ -1779,6 +1779,19 @@ try_again:
3994 + }
3995 +
3996 + if (PageTransHuge(hpage)) {
3997 ++ /*
3998 ++ * Bail out before SetPageHasHWPoisoned() if hpage is
3999 ++ * huge_zero_page, although PG_has_hwpoisoned is not
4000 ++ * checked in set_huge_zero_page().
4001 ++ *
4002 ++ * TODO: Handle memory failure of huge_zero_page thoroughly.
4003 ++ */
4004 ++ if (is_huge_zero_page(hpage)) {
4005 ++ action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
4006 ++ res = -EBUSY;
4007 ++ goto unlock_mutex;
4008 ++ }
4009 ++
4010 + /*
4011 + * The flag must be set after the refcount is bumped
4012 + * otherwise it may race with THP split.
4013 +diff --git a/mm/mmap.c b/mm/mmap.c
4014 +index 18875c216f8db..eb39f17cb86eb 100644
4015 +--- a/mm/mmap.c
4016 ++++ b/mm/mmap.c
4017 +@@ -2119,14 +2119,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
4018 + return addr;
4019 + }
4020 +
4021 +-#ifndef arch_get_mmap_end
4022 +-#define arch_get_mmap_end(addr) (TASK_SIZE)
4023 +-#endif
4024 +-
4025 +-#ifndef arch_get_mmap_base
4026 +-#define arch_get_mmap_base(addr, base) (base)
4027 +-#endif
4028 +-
4029 + /* Get an address range which is currently unmapped.
4030 + * For shmat() with addr=0.
4031 + *
4032 +diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
4033 +index 459d195d2ff64..f45ff1b7626a6 100644
4034 +--- a/mm/mmu_notifier.c
4035 ++++ b/mm/mmu_notifier.c
4036 +@@ -1036,6 +1036,18 @@ int mmu_interval_notifier_insert_locked(
4037 + }
4038 + EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
4039 +
4040 ++static bool
4041 ++mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
4042 ++ unsigned long seq)
4043 ++{
4044 ++ bool ret;
4045 ++
4046 ++ spin_lock(&subscriptions->lock);
4047 ++ ret = subscriptions->invalidate_seq != seq;
4048 ++ spin_unlock(&subscriptions->lock);
4049 ++ return ret;
4050 ++}
4051 ++
4052 + /**
4053 + * mmu_interval_notifier_remove - Remove a interval notifier
4054 + * @interval_sub: Interval subscription to unregister
4055 +@@ -1083,7 +1095,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
4056 + lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4057 + if (seq)
4058 + wait_event(subscriptions->wq,
4059 +- READ_ONCE(subscriptions->invalidate_seq) != seq);
4060 ++ mmu_interval_seq_released(subscriptions, seq));
4061 +
4062 + /* pairs with mmgrab in mmu_interval_notifier_insert() */
4063 + mmdrop(mm);
4064 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
4065 +index 832fb330376ef..a6bc4a6786ece 100644
4066 +--- a/mm/oom_kill.c
4067 ++++ b/mm/oom_kill.c
4068 +@@ -635,7 +635,7 @@ done:
4069 + */
4070 + set_bit(MMF_OOM_SKIP, &mm->flags);
4071 +
4072 +- /* Drop a reference taken by wake_oom_reaper */
4073 ++ /* Drop a reference taken by queue_oom_reaper */
4074 + put_task_struct(tsk);
4075 + }
4076 +
4077 +@@ -647,12 +647,12 @@ static int oom_reaper(void *unused)
4078 + struct task_struct *tsk = NULL;
4079 +
4080 + wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
4081 +- spin_lock(&oom_reaper_lock);
4082 ++ spin_lock_irq(&oom_reaper_lock);
4083 + if (oom_reaper_list != NULL) {
4084 + tsk = oom_reaper_list;
4085 + oom_reaper_list = tsk->oom_reaper_list;
4086 + }
4087 +- spin_unlock(&oom_reaper_lock);
4088 ++ spin_unlock_irq(&oom_reaper_lock);
4089 +
4090 + if (tsk)
4091 + oom_reap_task(tsk);
4092 +@@ -661,22 +661,48 @@ static int oom_reaper(void *unused)
4093 + return 0;
4094 + }
4095 +
4096 +-static void wake_oom_reaper(struct task_struct *tsk)
4097 ++static void wake_oom_reaper(struct timer_list *timer)
4098 + {
4099 +- /* mm is already queued? */
4100 +- if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
4101 +- return;
4102 ++ struct task_struct *tsk = container_of(timer, struct task_struct,
4103 ++ oom_reaper_timer);
4104 ++ struct mm_struct *mm = tsk->signal->oom_mm;
4105 ++ unsigned long flags;
4106 +
4107 +- get_task_struct(tsk);
4108 ++ /* The victim managed to terminate on its own - see exit_mmap */
4109 ++ if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
4110 ++ put_task_struct(tsk);
4111 ++ return;
4112 ++ }
4113 +
4114 +- spin_lock(&oom_reaper_lock);
4115 ++ spin_lock_irqsave(&oom_reaper_lock, flags);
4116 + tsk->oom_reaper_list = oom_reaper_list;
4117 + oom_reaper_list = tsk;
4118 +- spin_unlock(&oom_reaper_lock);
4119 ++ spin_unlock_irqrestore(&oom_reaper_lock, flags);
4120 + trace_wake_reaper(tsk->pid);
4121 + wake_up(&oom_reaper_wait);
4122 + }
4123 +
4124 ++/*
4125 ++ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
4126 ++ * The timers timeout is arbitrary... the longer it is, the longer the worst
4127 ++ * case scenario for the OOM can take. If it is too small, the oom_reaper can
4128 ++ * get in the way and release resources needed by the process exit path.
4129 ++ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
4130 ++ * before the exit path is able to wake the futex waiters.
4131 ++ */
4132 ++#define OOM_REAPER_DELAY (2*HZ)
4133 ++static void queue_oom_reaper(struct task_struct *tsk)
4134 ++{
4135 ++ /* mm is already queued? */
4136 ++ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
4137 ++ return;
4138 ++
4139 ++ get_task_struct(tsk);
4140 ++ timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
4141 ++ tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
4142 ++ add_timer(&tsk->oom_reaper_timer);
4143 ++}
4144 ++
4145 + static int __init oom_init(void)
4146 + {
4147 + oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
4148 +@@ -684,7 +710,7 @@ static int __init oom_init(void)
4149 + }
4150 + subsys_initcall(oom_init)
4151 + #else
4152 +-static inline void wake_oom_reaper(struct task_struct *tsk)
4153 ++static inline void queue_oom_reaper(struct task_struct *tsk)
4154 + {
4155 + }
4156 + #endif /* CONFIG_MMU */
4157 +@@ -935,7 +961,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
4158 + rcu_read_unlock();
4159 +
4160 + if (can_oom_reap)
4161 +- wake_oom_reaper(victim);
4162 ++ queue_oom_reaper(victim);
4163 +
4164 + mmdrop(mm);
4165 + put_task_struct(victim);
4166 +@@ -971,7 +997,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
4167 + task_lock(victim);
4168 + if (task_will_free_mem(victim)) {
4169 + mark_oom_victim(victim);
4170 +- wake_oom_reaper(victim);
4171 ++ queue_oom_reaper(victim);
4172 + task_unlock(victim);
4173 + put_task_struct(victim);
4174 + return;
4175 +@@ -1070,7 +1096,7 @@ bool out_of_memory(struct oom_control *oc)
4176 + */
4177 + if (task_will_free_mem(current)) {
4178 + mark_oom_victim(current);
4179 +- wake_oom_reaper(current);
4180 ++ queue_oom_reaper(current);
4181 + return true;
4182 + }
4183 +
4184 +diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
4185 +index 0780c2a57ff11..885e5adb0168d 100644
4186 +--- a/mm/userfaultfd.c
4187 ++++ b/mm/userfaultfd.c
4188 +@@ -72,12 +72,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
4189 + _dst_pte = pte_mkdirty(_dst_pte);
4190 + if (page_in_cache && !vm_shared)
4191 + writable = false;
4192 +- if (writable) {
4193 +- if (wp_copy)
4194 +- _dst_pte = pte_mkuffd_wp(_dst_pte);
4195 +- else
4196 +- _dst_pte = pte_mkwrite(_dst_pte);
4197 +- }
4198 ++
4199 ++ /*
4200 ++ * Always mark a PTE as write-protected when needed, regardless of
4201 ++ * VM_WRITE, which the user might change.
4202 ++ */
4203 ++ if (wp_copy)
4204 ++ _dst_pte = pte_mkuffd_wp(_dst_pte);
4205 ++ else if (writable)
4206 ++ _dst_pte = pte_mkwrite(_dst_pte);
4207 +
4208 + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
4209 +
4210 +diff --git a/mm/workingset.c b/mm/workingset.c
4211 +index 8c03afe1d67cb..f66a18d1deaad 100644
4212 +--- a/mm/workingset.c
4213 ++++ b/mm/workingset.c
4214 +@@ -354,7 +354,7 @@ void workingset_refault(struct folio *folio, void *shadow)
4215 +
4216 + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
4217 +
4218 +- mem_cgroup_flush_stats();
4219 ++ mem_cgroup_flush_stats_delayed();
4220 + /*
4221 + * Compare the distance to the existing workingset size. We
4222 + * don't activate pages that couldn't stay resident even if
4223 +diff --git a/net/can/isotp.c b/net/can/isotp.c
4224 +index 5bce7c66c1219..8c753dcefe7fc 100644
4225 +--- a/net/can/isotp.c
4226 ++++ b/net/can/isotp.c
4227 +@@ -866,6 +866,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
4228 + struct canfd_frame *cf;
4229 + int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
4230 + int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
4231 ++ s64 hrtimer_sec = 0;
4232 + int off;
4233 + int err;
4234 +
4235 +@@ -964,7 +965,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
4236 + isotp_create_fframe(cf, so, ae);
4237 +
4238 + /* start timeout for FC */
4239 +- hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
4240 ++ hrtimer_sec = 1;
4241 ++ hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
4242 ++ HRTIMER_MODE_REL_SOFT);
4243 + }
4244 +
4245 + /* send the first or only CAN frame */
4246 +@@ -977,6 +980,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
4247 + if (err) {
4248 + pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
4249 + __func__, ERR_PTR(err));
4250 ++
4251 ++ /* no transmission -> no timeout monitoring */
4252 ++ if (hrtimer_sec)
4253 ++ hrtimer_cancel(&so->txtimer);
4254 ++
4255 + goto err_out_drop;
4256 + }
4257 +
4258 +diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
4259 +index f64b805303cd7..eb204ad36eeec 100644
4260 +--- a/net/dsa/tag_hellcreek.c
4261 ++++ b/net/dsa/tag_hellcreek.c
4262 +@@ -21,6 +21,14 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
4263 + struct dsa_port *dp = dsa_slave_to_port(dev);
4264 + u8 *tag;
4265 +
4266 ++ /* Calculate checksums (if required) before adding the trailer tag to
4267 ++ * avoid including it in calculations. That would lead to wrong
4268 ++ * checksums after the switch strips the tag.
4269 ++ */
4270 ++ if (skb->ip_summed == CHECKSUM_PARTIAL &&
4271 ++ skb_checksum_help(skb))
4272 ++ return NULL;
4273 ++
4274 + /* Tag encoding */
4275 + tag = skb_put(skb, HELLCREEK_TAG_LEN);
4276 + *tag = BIT(dp->index);
4277 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
4278 +index 70e6c87fbe3df..d747166bb291c 100644
4279 +--- a/net/ipv4/esp4.c
4280 ++++ b/net/ipv4/esp4.c
4281 +@@ -446,7 +446,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
4282 + struct page *page;
4283 + struct sk_buff *trailer;
4284 + int tailen = esp->tailen;
4285 +- unsigned int allocsz;
4286 +
4287 + /* this is non-NULL only with TCP/UDP Encapsulation */
4288 + if (x->encap) {
4289 +@@ -456,8 +455,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
4290 + return err;
4291 + }
4292 +
4293 +- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
4294 +- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
4295 ++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
4296 ++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
4297 + goto cow;
4298 +
4299 + if (!skb_cloned(skb)) {
4300 +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
4301 +index 55d604c9b3b3e..f2120e92caf15 100644
4302 +--- a/net/ipv6/esp6.c
4303 ++++ b/net/ipv6/esp6.c
4304 +@@ -482,7 +482,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
4305 + struct page *page;
4306 + struct sk_buff *trailer;
4307 + int tailen = esp->tailen;
4308 +- unsigned int allocsz;
4309 +
4310 + if (x->encap) {
4311 + int err = esp6_output_encap(x, skb, esp);
4312 +@@ -491,8 +490,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
4313 + return err;
4314 + }
4315 +
4316 +- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
4317 +- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
4318 ++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
4319 ++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
4320 + goto cow;
4321 +
4322 + if (!skb_cloned(skb)) {
4323 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4324 +index 8753e9cec3264..9762367361463 100644
4325 +--- a/net/ipv6/ip6_gre.c
4326 ++++ b/net/ipv6/ip6_gre.c
4327 +@@ -733,9 +733,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4328 + else
4329 + fl6->daddr = tunnel->parms.raddr;
4330 +
4331 +- if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
4332 +- return -ENOMEM;
4333 +-
4334 + /* Push GRE header. */
4335 + protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
4336 +
4337 +@@ -743,6 +740,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4338 + struct ip_tunnel_info *tun_info;
4339 + const struct ip_tunnel_key *key;
4340 + __be16 flags;
4341 ++ int tun_hlen;
4342 +
4343 + tun_info = skb_tunnel_info_txcheck(skb);
4344 + if (IS_ERR(tun_info) ||
4345 +@@ -760,9 +758,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4346 + dsfield = key->tos;
4347 + flags = key->tun_flags &
4348 + (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
4349 +- tunnel->tun_hlen = gre_calc_hlen(flags);
4350 ++ tun_hlen = gre_calc_hlen(flags);
4351 +
4352 +- gre_build_header(skb, tunnel->tun_hlen,
4353 ++ if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
4354 ++ return -ENOMEM;
4355 ++
4356 ++ gre_build_header(skb, tun_hlen,
4357 + flags, protocol,
4358 + tunnel_id_to_key32(tun_info->key.tun_id),
4359 + (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
4360 +@@ -772,6 +773,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4361 + if (tunnel->parms.o_flags & TUNNEL_SEQ)
4362 + tunnel->o_seqno++;
4363 +
4364 ++ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
4365 ++ return -ENOMEM;
4366 ++
4367 + gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
4368 + protocol, tunnel->parms.o_key,
4369 + htonl(tunnel->o_seqno));
4370 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4371 +index da1bf48e79370..1caeb1ef20956 100644
4372 +--- a/net/ipv6/route.c
4373 ++++ b/net/ipv6/route.c
4374 +@@ -3303,6 +3303,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
4375 + int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
4376 + int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
4377 + unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
4378 ++ unsigned int val;
4379 + int entries;
4380 +
4381 + entries = dst_entries_get_fast(ops);
4382 +@@ -3313,13 +3314,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
4383 + entries <= rt_max_size)
4384 + goto out;
4385 +
4386 +- net->ipv6.ip6_rt_gc_expire++;
4387 +- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
4388 ++ fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
4389 + entries = dst_entries_get_slow(ops);
4390 + if (entries < ops->gc_thresh)
4391 +- net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
4392 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
4393 + out:
4394 +- net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
4395 ++ val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
4396 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
4397 + return entries > rt_max_size;
4398 + }
4399 +
4400 +@@ -6514,7 +6515,7 @@ static int __net_init ip6_route_net_init(struct net *net)
4401 + net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
4402 + net->ipv6.sysctl.skip_notify_on_dev_down = 0;
4403 +
4404 +- net->ipv6.ip6_rt_gc_expire = 30*HZ;
4405 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
4406 +
4407 + ret = 0;
4408 + out:
4409 +diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
4410 +index 17927966abb33..8b14a24f10404 100644
4411 +--- a/net/l3mdev/l3mdev.c
4412 ++++ b/net/l3mdev/l3mdev.c
4413 +@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
4414 +
4415 + dev = dev_get_by_index_rcu(net, ifindex);
4416 + while (dev && !netif_is_l3_master(dev))
4417 +- dev = netdev_master_upper_dev_get(dev);
4418 ++ dev = netdev_master_upper_dev_get_rcu(dev);
4419 +
4420 + return dev ? dev->ifindex : 0;
4421 + }
4422 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4423 +index 47a876ccd2881..05a3795eac8e9 100644
4424 +--- a/net/netlink/af_netlink.c
4425 ++++ b/net/netlink/af_netlink.c
4426 +@@ -2263,6 +2263,13 @@ static int netlink_dump(struct sock *sk)
4427 + * single netdev. The outcome is MSG_TRUNC error.
4428 + */
4429 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
4430 ++
4431 ++ /* Make sure malicious BPF programs can not read unitialized memory
4432 ++ * from skb->head -> skb->data
4433 ++ */
4434 ++ skb_reset_network_header(skb);
4435 ++ skb_reset_mac_header(skb);
4436 ++
4437 + netlink_skb_set_owner_r(skb, sk);
4438 +
4439 + if (nlk->dump_done_errno > 0) {
4440 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4441 +index c591b923016a6..d77c21ff066c9 100644
4442 +--- a/net/openvswitch/flow_netlink.c
4443 ++++ b/net/openvswitch/flow_netlink.c
4444 +@@ -2436,7 +2436,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
4445 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
4446 +
4447 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
4448 +- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
4449 ++ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
4450 + OVS_NLERR(log, "Flow action size exceeds max %u",
4451 + MAX_ACTIONS_BUFSIZE);
4452 + return ERR_PTR(-EMSGSIZE);
4453 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4454 +index a7273af2d9009..e3c60251e708a 100644
4455 +--- a/net/packet/af_packet.c
4456 ++++ b/net/packet/af_packet.c
4457 +@@ -2856,8 +2856,9 @@ tpacket_error:
4458 +
4459 + status = TP_STATUS_SEND_REQUEST;
4460 + err = po->xmit(skb);
4461 +- if (unlikely(err > 0)) {
4462 +- err = net_xmit_errno(err);
4463 ++ if (unlikely(err != 0)) {
4464 ++ if (err > 0)
4465 ++ err = net_xmit_errno(err);
4466 + if (err && __packet_get_status(po, ph) ==
4467 + TP_STATUS_AVAILABLE) {
4468 + /* skb was destructed already */
4469 +@@ -3058,8 +3059,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4470 + skb->no_fcs = 1;
4471 +
4472 + err = po->xmit(skb);
4473 +- if (err > 0 && (err = net_xmit_errno(err)) != 0)
4474 +- goto out_unlock;
4475 ++ if (unlikely(err != 0)) {
4476 ++ if (err > 0)
4477 ++ err = net_xmit_errno(err);
4478 ++ if (err)
4479 ++ goto out_unlock;
4480 ++ }
4481 +
4482 + dev_put(dev);
4483 +
4484 +diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
4485 +index f15d6942da453..cc7e30733feb0 100644
4486 +--- a/net/rxrpc/net_ns.c
4487 ++++ b/net/rxrpc/net_ns.c
4488 +@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
4489 + struct rxrpc_net *rxnet = rxrpc_net(net);
4490 +
4491 + rxnet->live = false;
4492 ++ del_timer_sync(&rxnet->peer_keepalive_timer);
4493 + cancel_work_sync(&rxnet->peer_keepalive_work);
4494 ++ /* Remove the timer again as the worker may have restarted it. */
4495 + del_timer_sync(&rxnet->peer_keepalive_timer);
4496 + rxrpc_destroy_all_calls(rxnet);
4497 + rxrpc_destroy_all_connections(rxnet);
4498 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
4499 +index cf5649292ee00..4d27300c287c4 100644
4500 +--- a/net/sched/cls_u32.c
4501 ++++ b/net/sched/cls_u32.c
4502 +@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
4503 + return 0;
4504 + }
4505 +
4506 +-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
4507 ++static void __u32_destroy_key(struct tc_u_knode *n)
4508 + {
4509 + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
4510 +
4511 + tcf_exts_destroy(&n->exts);
4512 +- tcf_exts_put_net(&n->exts);
4513 + if (ht && --ht->refcnt == 0)
4514 + kfree(ht);
4515 ++ kfree(n);
4516 ++}
4517 ++
4518 ++static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
4519 ++{
4520 ++ tcf_exts_put_net(&n->exts);
4521 + #ifdef CONFIG_CLS_U32_PERF
4522 + if (free_pf)
4523 + free_percpu(n->pf);
4524 +@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
4525 + if (free_pf)
4526 + free_percpu(n->pcpu_success);
4527 + #endif
4528 +- kfree(n);
4529 +- return 0;
4530 ++ __u32_destroy_key(n);
4531 + }
4532 +
4533 + /* u32_delete_key_rcu should be called when free'ing a copied
4534 +@@ -811,10 +815,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
4535 + new->flags = n->flags;
4536 + RCU_INIT_POINTER(new->ht_down, ht);
4537 +
4538 +- /* bump reference count as long as we hold pointer to structure */
4539 +- if (ht)
4540 +- ht->refcnt++;
4541 +-
4542 + #ifdef CONFIG_CLS_U32_PERF
4543 + /* Statistics may be incremented by readers during update
4544 + * so we must keep them in tact. When the node is later destroyed
4545 +@@ -836,6 +836,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
4546 + return NULL;
4547 + }
4548 +
4549 ++ /* bump reference count as long as we hold pointer to structure */
4550 ++ if (ht)
4551 ++ ht->refcnt++;
4552 ++
4553 + return new;
4554 + }
4555 +
4556 +@@ -900,13 +904,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
4557 + extack);
4558 +
4559 + if (err) {
4560 +- u32_destroy_key(new, false);
4561 ++ __u32_destroy_key(new);
4562 + return err;
4563 + }
4564 +
4565 + err = u32_replace_hw_knode(tp, new, flags, extack);
4566 + if (err) {
4567 +- u32_destroy_key(new, false);
4568 ++ __u32_destroy_key(new);
4569 + return err;
4570 + }
4571 +
4572 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
4573 +index 303c5e56e4df4..68cd110722a4a 100644
4574 +--- a/net/smc/af_smc.c
4575 ++++ b/net/smc/af_smc.c
4576 +@@ -2538,8 +2538,10 @@ static int smc_shutdown(struct socket *sock, int how)
4577 + if (smc->use_fallback) {
4578 + rc = kernel_sock_shutdown(smc->clcsock, how);
4579 + sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
4580 +- if (sk->sk_shutdown == SHUTDOWN_MASK)
4581 ++ if (sk->sk_shutdown == SHUTDOWN_MASK) {
4582 + sk->sk_state = SMC_CLOSED;
4583 ++ sock_put(sk);
4584 ++ }
4585 + goto out;
4586 + }
4587 + switch (how) {
4588 +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
4589 +index 70fd8b13938ed..8b0a16ba27d39 100644
4590 +--- a/sound/hda/intel-dsp-config.c
4591 ++++ b/sound/hda/intel-dsp-config.c
4592 +@@ -390,22 +390,36 @@ static const struct config_entry config_table[] = {
4593 +
4594 + /* Alder Lake */
4595 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE)
4596 ++ /* Alderlake-S */
4597 + {
4598 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4599 + .device = 0x7ad0,
4600 + },
4601 ++ /* RaptorLake-S */
4602 + {
4603 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4604 +- .device = 0x51c8,
4605 ++ .device = 0x7a50,
4606 + },
4607 ++ /* Alderlake-P */
4608 + {
4609 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4610 +- .device = 0x51cc,
4611 ++ .device = 0x51c8,
4612 + },
4613 + {
4614 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4615 + .device = 0x51cd,
4616 + },
4617 ++ /* Alderlake-PS */
4618 ++ {
4619 ++ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4620 ++ .device = 0x51c9,
4621 ++ },
4622 ++ /* Alderlake-M */
4623 ++ {
4624 ++ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4625 ++ .device = 0x51cc,
4626 ++ },
4627 ++ /* Alderlake-N */
4628 + {
4629 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
4630 + .device = 0x54c8,
4631 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4632 +index cf4f277dccdda..26637a6959792 100644
4633 +--- a/sound/pci/hda/patch_hdmi.c
4634 ++++ b/sound/pci/hda/patch_hdmi.c
4635 +@@ -1387,7 +1387,7 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
4636 +
4637 + last_try:
4638 + /* the last try; check the empty slots in pins */
4639 +- for (i = 0; i < spec->num_nids; i++) {
4640 ++ for (i = 0; i < spec->pcm_used; i++) {
4641 + if (!test_bit(i, &spec->pcm_bitmap))
4642 + return i;
4643 + }
4644 +@@ -2263,7 +2263,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
4645 + * dev_num is the device entry number in a pin
4646 + */
4647 +
4648 +- if (codec->mst_no_extra_pcms)
4649 ++ if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms)
4650 ++ pcm_num = spec->num_cvts;
4651 ++ else if (codec->mst_no_extra_pcms)
4652 + pcm_num = spec->num_nids;
4653 + else
4654 + pcm_num = spec->num_nids + spec->dev_num - 1;
4655 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4656 +index ca40c2bd8ba62..c66d31d8a498c 100644
4657 +--- a/sound/pci/hda/patch_realtek.c
4658 ++++ b/sound/pci/hda/patch_realtek.c
4659 +@@ -9116,6 +9116,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4660 + SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
4661 + SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4662 + SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4663 ++ SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4664 + SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4665 + SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4666 + SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
4667 +diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
4668 +index 33e43013ff770..0d639a33ad969 100644
4669 +--- a/sound/soc/atmel/sam9g20_wm8731.c
4670 ++++ b/sound/soc/atmel/sam9g20_wm8731.c
4671 +@@ -46,35 +46,6 @@
4672 + */
4673 + #undef ENABLE_MIC_INPUT
4674 +
4675 +-static struct clk *mclk;
4676 +-
4677 +-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
4678 +- struct snd_soc_dapm_context *dapm,
4679 +- enum snd_soc_bias_level level)
4680 +-{
4681 +- static int mclk_on;
4682 +- int ret = 0;
4683 +-
4684 +- switch (level) {
4685 +- case SND_SOC_BIAS_ON:
4686 +- case SND_SOC_BIAS_PREPARE:
4687 +- if (!mclk_on)
4688 +- ret = clk_enable(mclk);
4689 +- if (ret == 0)
4690 +- mclk_on = 1;
4691 +- break;
4692 +-
4693 +- case SND_SOC_BIAS_OFF:
4694 +- case SND_SOC_BIAS_STANDBY:
4695 +- if (mclk_on)
4696 +- clk_disable(mclk);
4697 +- mclk_on = 0;
4698 +- break;
4699 +- }
4700 +-
4701 +- return ret;
4702 +-}
4703 +-
4704 + static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
4705 + SND_SOC_DAPM_MIC("Int Mic", NULL),
4706 + SND_SOC_DAPM_SPK("Ext Spk", NULL),
4707 +@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
4708 + .owner = THIS_MODULE,
4709 + .dai_link = &at91sam9g20ek_dai,
4710 + .num_links = 1,
4711 +- .set_bias_level = at91sam9g20ek_set_bias_level,
4712 +
4713 + .dapm_widgets = at91sam9g20ek_dapm_widgets,
4714 + .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
4715 +@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
4716 + {
4717 + struct device_node *np = pdev->dev.of_node;
4718 + struct device_node *codec_np, *cpu_np;
4719 +- struct clk *pllb;
4720 + struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
4721 + int ret;
4722 +
4723 +@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
4724 + return -EINVAL;
4725 + }
4726 +
4727 +- /*
4728 +- * Codec MCLK is supplied by PCK0 - set it up.
4729 +- */
4730 +- mclk = clk_get(NULL, "pck0");
4731 +- if (IS_ERR(mclk)) {
4732 +- dev_err(&pdev->dev, "Failed to get MCLK\n");
4733 +- ret = PTR_ERR(mclk);
4734 +- goto err;
4735 +- }
4736 +-
4737 +- pllb = clk_get(NULL, "pllb");
4738 +- if (IS_ERR(pllb)) {
4739 +- dev_err(&pdev->dev, "Failed to get PLLB\n");
4740 +- ret = PTR_ERR(pllb);
4741 +- goto err_mclk;
4742 +- }
4743 +- ret = clk_set_parent(mclk, pllb);
4744 +- clk_put(pllb);
4745 +- if (ret != 0) {
4746 +- dev_err(&pdev->dev, "Failed to set MCLK parent\n");
4747 +- goto err_mclk;
4748 +- }
4749 +-
4750 +- clk_set_rate(mclk, MCLK_RATE);
4751 +-
4752 + card->dev = &pdev->dev;
4753 +
4754 + /* Parse device node info */
4755 +@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
4756 +
4757 + return ret;
4758 +
4759 +-err_mclk:
4760 +- clk_put(mclk);
4761 +- mclk = NULL;
4762 + err:
4763 + atmel_ssc_put_audio(0);
4764 + return ret;
4765 +@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
4766 + {
4767 + struct snd_soc_card *card = platform_get_drvdata(pdev);
4768 +
4769 +- clk_disable(mclk);
4770 +- mclk = NULL;
4771 + snd_soc_unregister_card(card);
4772 + atmel_ssc_put_audio(0);
4773 +
4774 +diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
4775 +index 9ad7fc0baf072..20a07c92b2fc2 100644
4776 +--- a/sound/soc/codecs/msm8916-wcd-digital.c
4777 ++++ b/sound/soc/codecs/msm8916-wcd-digital.c
4778 +@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
4779 +
4780 + dev_set_drvdata(dev, priv);
4781 +
4782 +- return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
4783 ++ ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
4784 + msm8916_wcd_digital_dai,
4785 + ARRAY_SIZE(msm8916_wcd_digital_dai));
4786 ++ if (ret)
4787 ++ goto err_mclk;
4788 ++
4789 ++ return 0;
4790 ++
4791 ++err_mclk:
4792 ++ clk_disable_unprepare(priv->mclk);
4793 + err_clk:
4794 + clk_disable_unprepare(priv->ahbclk);
4795 + return ret;
4796 +diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
4797 +index 8fffe378618d0..cce6f4e7992f5 100644
4798 +--- a/sound/soc/codecs/rk817_codec.c
4799 ++++ b/sound/soc/codecs/rk817_codec.c
4800 +@@ -489,7 +489,7 @@ static int rk817_platform_probe(struct platform_device *pdev)
4801 +
4802 + rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
4803 +
4804 +- rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
4805 ++ rk817_codec_data->mclk = devm_clk_get(pdev->dev.parent, "mclk");
4806 + if (IS_ERR(rk817_codec_data->mclk)) {
4807 + dev_dbg(&pdev->dev, "Unable to get mclk\n");
4808 + ret = -ENXIO;
4809 +diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
4810 +index be68d573a4906..c9ff9c89adf70 100644
4811 +--- a/sound/soc/codecs/rt5682.c
4812 ++++ b/sound/soc/codecs/rt5682.c
4813 +@@ -2822,14 +2822,11 @@ static int rt5682_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
4814 +
4815 + for_each_component_dais(component, dai)
4816 + if (dai->id == RT5682_AIF1)
4817 +- break;
4818 +- if (!dai) {
4819 +- dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
4820 +- RT5682_AIF1);
4821 +- return -ENODEV;
4822 +- }
4823 ++ return rt5682_set_bclk1_ratio(dai, factor);
4824 +
4825 +- return rt5682_set_bclk1_ratio(dai, factor);
4826 ++ dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
4827 ++ RT5682_AIF1);
4828 ++ return -ENODEV;
4829 + }
4830 +
4831 + static const struct clk_ops rt5682_dai_clk_ops[RT5682_DAI_NUM_CLKS] = {
4832 +diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
4833 +index 92b8753f1267b..f2296090716f3 100644
4834 +--- a/sound/soc/codecs/rt5682s.c
4835 ++++ b/sound/soc/codecs/rt5682s.c
4836 +@@ -2679,14 +2679,11 @@ static int rt5682s_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
4837 +
4838 + for_each_component_dais(component, dai)
4839 + if (dai->id == RT5682S_AIF1)
4840 +- break;
4841 +- if (!dai) {
4842 +- dev_err(component->dev, "dai %d not found in component\n",
4843 +- RT5682S_AIF1);
4844 +- return -ENODEV;
4845 +- }
4846 ++ return rt5682s_set_bclk1_ratio(dai, factor);
4847 +
4848 +- return rt5682s_set_bclk1_ratio(dai, factor);
4849 ++ dev_err(component->dev, "dai %d not found in component\n",
4850 ++ RT5682S_AIF1);
4851 ++ return -ENODEV;
4852 + }
4853 +
4854 + static const struct clk_ops rt5682s_dai_clk_ops[RT5682S_DAI_NUM_CLKS] = {
4855 +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
4856 +index 1e75e93cf28f2..6298ebe96e941 100644
4857 +--- a/sound/soc/codecs/wcd934x.c
4858 ++++ b/sound/soc/codecs/wcd934x.c
4859 +@@ -1274,29 +1274,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
4860 + if (sido_src == wcd->sido_input_src)
4861 + return 0;
4862 +
4863 +- if (sido_src == SIDO_SOURCE_INTERNAL) {
4864 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4865 +- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
4866 +- usleep_range(100, 110);
4867 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4868 +- WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
4869 +- usleep_range(100, 110);
4870 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
4871 +- WCD934X_ANA_RCO_BG_EN_MASK, 0);
4872 +- usleep_range(100, 110);
4873 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4874 +- WCD934X_ANA_BUCK_PRE_EN1_MASK,
4875 +- WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
4876 +- usleep_range(100, 110);
4877 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4878 +- WCD934X_ANA_BUCK_PRE_EN2_MASK,
4879 +- WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
4880 +- usleep_range(100, 110);
4881 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4882 +- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
4883 +- WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
4884 +- usleep_range(100, 110);
4885 +- } else if (sido_src == SIDO_SOURCE_RCO_BG) {
4886 ++ if (sido_src == SIDO_SOURCE_RCO_BG) {
4887 + regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
4888 + WCD934X_ANA_RCO_BG_EN_MASK,
4889 + WCD934X_ANA_RCO_BG_ENABLE);
4890 +@@ -1382,8 +1360,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
4891 + regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
4892 + WCD934X_EXT_CLK_BUF_EN_MASK |
4893 + WCD934X_MCLK_EN_MASK, 0x0);
4894 +- wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
4895 +-
4896 + regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
4897 + WCD934X_ANA_BIAS_EN_MASK, 0);
4898 + regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
4899 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4900 +index b06c5682445c0..fb43b331a36e8 100644
4901 +--- a/sound/soc/soc-dapm.c
4902 ++++ b/sound/soc/soc-dapm.c
4903 +@@ -1687,8 +1687,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
4904 + switch (w->id) {
4905 + case snd_soc_dapm_pre:
4906 + if (!w->event)
4907 +- list_for_each_entry_safe_continue(w, n, list,
4908 +- power_list);
4909 ++ continue;
4910 +
4911 + if (event == SND_SOC_DAPM_STREAM_START)
4912 + ret = w->event(w,
4913 +@@ -1700,8 +1699,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
4914 +
4915 + case snd_soc_dapm_post:
4916 + if (!w->event)
4917 +- list_for_each_entry_safe_continue(w, n, list,
4918 +- power_list);
4919 ++ continue;
4920 +
4921 + if (event == SND_SOC_DAPM_STREAM_START)
4922 + ret = w->event(w,
4923 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
4924 +index cb24805668bd8..f413238117af7 100644
4925 +--- a/sound/soc/soc-topology.c
4926 ++++ b/sound/soc/soc-topology.c
4927 +@@ -1479,12 +1479,12 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
4928 + template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
4929 + kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
4930 + if (!kc)
4931 +- goto err;
4932 ++ goto hdr_err;
4933 +
4934 + kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
4935 + GFP_KERNEL);
4936 + if (!kcontrol_type)
4937 +- goto err;
4938 ++ goto hdr_err;
4939 +
4940 + for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
4941 + control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
4942 +diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
4943 +index e72dcae5e7ee7..0db11ba559d65 100644
4944 +--- a/sound/soc/sof/topology.c
4945 ++++ b/sound/soc/sof/topology.c
4946 +@@ -1569,6 +1569,46 @@ static int sof_widget_load_buffer(struct snd_soc_component *scomp, int index,
4947 + return 0;
4948 + }
4949 +
4950 ++static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
4951 ++ struct snd_soc_dapm_widget *w)
4952 ++{
4953 ++ struct snd_soc_card *card = scomp->card;
4954 ++ struct snd_soc_pcm_runtime *rtd;
4955 ++ struct snd_soc_dai *cpu_dai;
4956 ++ int i;
4957 ++
4958 ++ if (!w->sname)
4959 ++ return;
4960 ++
4961 ++ list_for_each_entry(rtd, &card->rtd_list, list) {
4962 ++ /* does stream match DAI link ? */
4963 ++ if (!rtd->dai_link->stream_name ||
4964 ++ strcmp(w->sname, rtd->dai_link->stream_name))
4965 ++ continue;
4966 ++
4967 ++ switch (w->id) {
4968 ++ case snd_soc_dapm_dai_out:
4969 ++ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
4970 ++ if (cpu_dai->capture_widget == w) {
4971 ++ cpu_dai->capture_widget = NULL;
4972 ++ break;
4973 ++ }
4974 ++ }
4975 ++ break;
4976 ++ case snd_soc_dapm_dai_in:
4977 ++ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
4978 ++ if (cpu_dai->playback_widget == w) {
4979 ++ cpu_dai->playback_widget = NULL;
4980 ++ break;
4981 ++ }
4982 ++ }
4983 ++ break;
4984 ++ default:
4985 ++ break;
4986 ++ }
4987 ++ }
4988 ++}
4989 ++
4990 + /* bind PCM ID to host component ID */
4991 + static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
4992 + int dir)
4993 +@@ -2449,6 +2489,9 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
4994 + kfree(dai->dai_config);
4995 + list_del(&dai->list);
4996 + }
4997 ++
4998 ++ sof_disconnect_dai_widget(scomp, widget);
4999 ++
5000 + break;
5001 + default:
5002 + break;
5003 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
5004 +index 2c01649c70f61..7c6ca2b433a53 100644
5005 +--- a/sound/usb/midi.c
5006 ++++ b/sound/usb/midi.c
5007 +@@ -1194,6 +1194,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
5008 + } while (drain_urbs && timeout);
5009 + finish_wait(&ep->drain_wait, &wait);
5010 + }
5011 ++ port->active = 0;
5012 + spin_unlock_irq(&ep->buffer_lock);
5013 + }
5014 +
5015 +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
5016 +index 64f5544d0a0aa..7ef7a8abcc2b1 100644
5017 +--- a/sound/usb/mixer_maps.c
5018 ++++ b/sound/usb/mixer_maps.c
5019 +@@ -599,6 +599,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
5020 + .id = USB_ID(0x0db0, 0x419c),
5021 + .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
5022 + },
5023 ++ { /* MSI MAG X570S Torpedo Max */
5024 ++ .id = USB_ID(0x0db0, 0xa073),
5025 ++ .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
5026 ++ },
5027 + { /* MSI TRX40 */
5028 + .id = USB_ID(0x0db0, 0x543d),
5029 + .map = trx40_mobo_map,
5030 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
5031 +index 167834133b9bc..b8359a0aa008a 100644
5032 +--- a/sound/usb/usbaudio.h
5033 ++++ b/sound/usb/usbaudio.h
5034 +@@ -8,7 +8,7 @@
5035 + */
5036 +
5037 + /* handling of USB vendor/product ID pairs as 32-bit numbers */
5038 +-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
5039 ++#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
5040 + #define USB_ID_VENDOR(id) ((id) >> 16)
5041 + #define USB_ID_PRODUCT(id) ((u16)(id))
5042 +
5043 +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
5044 +index 9a770bfdc8042..15d42d871b3e6 100644
5045 +--- a/tools/lib/perf/evlist.c
5046 ++++ b/tools/lib/perf/evlist.c
5047 +@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
5048 + {
5049 + struct perf_evsel *evsel;
5050 + const struct perf_cpu_map *cpus = evlist->cpus;
5051 +- const struct perf_thread_map *threads = evlist->threads;
5052 +
5053 + if (!ops || !ops->get || !ops->mmap)
5054 + return -EINVAL;
5055 +@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
5056 + perf_evlist__for_each_entry(evlist, evsel) {
5057 + if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
5058 + evsel->sample_id == NULL &&
5059 +- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
5060 ++ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
5061 + return -ENOMEM;
5062 + }
5063 +
5064 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
5065 +index 1dd92d8c92799..a6bb35b0af9f9 100644
5066 +--- a/tools/perf/builtin-report.c
5067 ++++ b/tools/perf/builtin-report.c
5068 +@@ -349,6 +349,7 @@ static int report__setup_sample_type(struct report *rep)
5069 + struct perf_session *session = rep->session;
5070 + u64 sample_type = evlist__combined_sample_type(session->evlist);
5071 + bool is_pipe = perf_data__is_pipe(session->data);
5072 ++ struct evsel *evsel;
5073 +
5074 + if (session->itrace_synth_opts->callchain ||
5075 + session->itrace_synth_opts->add_callchain ||
5076 +@@ -403,6 +404,19 @@ static int report__setup_sample_type(struct report *rep)
5077 + }
5078 +
5079 + if (sort__mode == SORT_MODE__MEMORY) {
5080 ++ /*
5081 ++ * FIXUP: prior to kernel 5.18, Arm SPE missed to set
5082 ++ * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
5083 ++ * compatibility, set the bit if it's an old perf data file.
5084 ++ */
5085 ++ evlist__for_each_entry(session->evlist, evsel) {
5086 ++ if (strstr(evsel->name, "arm_spe") &&
5087 ++ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
5088 ++ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
5089 ++ sample_type |= PERF_SAMPLE_DATA_SRC;
5090 ++ }
5091 ++ }
5092 ++
5093 + if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
5094 + ui__error("Selected --mem-mode but no mem data. "
5095 + "Did you call perf record without -d?\n");
5096 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
5097 +index fa478ddcd18ae..537a552fe6b3b 100644
5098 +--- a/tools/perf/builtin-script.c
5099 ++++ b/tools/perf/builtin-script.c
5100 +@@ -459,7 +459,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
5101 + return -EINVAL;
5102 +
5103 + if (PRINT_FIELD(DATA_SRC) &&
5104 +- evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
5105 ++ evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
5106 + return -EINVAL;
5107 +
5108 + if (PRINT_FIELD(WEIGHT) &&
5109 +diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
5110 +index 429f7ee735cf4..fd23c80eba315 100755
5111 +--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
5112 ++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
5113 +@@ -159,6 +159,17 @@ flooding_remotes_add()
5114 + local lsb
5115 + local i
5116 +
5117 ++ # Prevent unwanted packets from entering the bridge and interfering
5118 ++ # with the test.
5119 ++ tc qdisc add dev br0 clsact
5120 ++ tc filter add dev br0 egress protocol all pref 1 handle 1 \
5121 ++ matchall skip_hw action drop
5122 ++ tc qdisc add dev $h1 clsact
5123 ++ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
5124 ++ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
5125 ++ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
5126 ++ matchall skip_hw action drop
5127 ++
5128 + for i in $(eval echo {1..$num_remotes}); do
5129 + lsb=$((i + 1))
5130 +
5131 +@@ -195,6 +206,12 @@ flooding_filters_del()
5132 + done
5133 +
5134 + tc qdisc del dev $rp2 clsact
5135 ++
5136 ++ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
5137 ++ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
5138 ++ tc qdisc del dev $h1 clsact
5139 ++ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
5140 ++ tc qdisc del dev br0 clsact
5141 + }
5142 +
5143 + flooding_check_packets()
5144 +diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
5145 +index fedcb7b35af9f..af5ea50ed5c0e 100755
5146 +--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
5147 ++++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
5148 +@@ -172,6 +172,17 @@ flooding_filters_add()
5149 + local lsb
5150 + local i
5151 +
5152 ++ # Prevent unwanted packets from entering the bridge and interfering
5153 ++ # with the test.
5154 ++ tc qdisc add dev br0 clsact
5155 ++ tc filter add dev br0 egress protocol all pref 1 handle 1 \
5156 ++ matchall skip_hw action drop
5157 ++ tc qdisc add dev $h1 clsact
5158 ++ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
5159 ++ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
5160 ++ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
5161 ++ matchall skip_hw action drop
5162 ++
5163 + tc qdisc add dev $rp2 clsact
5164 +
5165 + for i in $(eval echo {1..$num_remotes}); do
5166 +@@ -194,6 +205,12 @@ flooding_filters_del()
5167 + done
5168 +
5169 + tc qdisc del dev $rp2 clsact
5170 ++
5171 ++ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
5172 ++ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
5173 ++ tc qdisc del dev $h1 clsact
5174 ++ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
5175 ++ tc qdisc del dev br0 clsact
5176 + }
5177 +
5178 + flooding_check_packets()
5179 +diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
5180 +index b08d30bf71c51..3b940a101bc07 100644
5181 +--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
5182 ++++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
5183 +@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
5184 + pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
5185 + }
5186 +
5187 ++static int gic_fd;
5188 ++
5189 + static struct kvm_vm *test_vm_create(void)
5190 + {
5191 + struct kvm_vm *vm;
5192 + unsigned int i;
5193 +- int ret;
5194 + int nr_vcpus = test_args.nr_vcpus;
5195 +
5196 + vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
5197 +@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
5198 +
5199 + ucall_init(vm, NULL);
5200 + test_init_timer_irq(vm);
5201 +- ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
5202 +- if (ret < 0) {
5203 ++ gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
5204 ++ if (gic_fd < 0) {
5205 + print_skip("Failed to create vgic-v3");
5206 + exit(KSFT_SKIP);
5207 + }
5208 +@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
5209 + return vm;
5210 + }
5211 +
5212 ++static void test_vm_cleanup(struct kvm_vm *vm)
5213 ++{
5214 ++ close(gic_fd);
5215 ++ kvm_vm_free(vm);
5216 ++}
5217 ++
5218 + static void test_print_help(char *name)
5219 + {
5220 + pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
5221 +@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
5222 +
5223 + vm = test_vm_create();
5224 + test_run(vm);
5225 +- kvm_vm_free(vm);
5226 ++ test_vm_cleanup(vm);
5227 +
5228 + return 0;
5229 + }