Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 27 Apr 2022 12:20:31
Message-Id: 1651062007.7a8071f7fc7e8d3ba7cb7e27a7175f45de7cefe2.mpagano@gentoo
1 commit: 7a8071f7fc7e8d3ba7cb7e27a7175f45de7cefe2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 27 12:20:07 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 27 12:20:07 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7a8071f7
7
8 Linux patch 5.10.113
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1112_linux-5.10.113.patch | 2842 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2846 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 79bdf239..e6bddb24 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -491,6 +491,10 @@ Patch: 1111_linux-5.10.112.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.112
23
24 +Patch: 1112_linux-5.10.113.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.113
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1112_linux-5.10.113.patch b/1112_linux-5.10.113.patch
33 new file mode 100644
34 index 00000000..3839f947
35 --- /dev/null
36 +++ b/1112_linux-5.10.113.patch
37 @@ -0,0 +1,2842 @@
38 +diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
39 +index 54386a010a8d7..871d2da7a0a91 100644
40 +--- a/Documentation/filesystems/ext4/attributes.rst
41 ++++ b/Documentation/filesystems/ext4/attributes.rst
42 +@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
43 + - Checksum of the extended attribute block.
44 + * - 0x14
45 + - \_\_u32
46 +- - h\_reserved[2]
47 ++ - h\_reserved[3]
48 + - Zero.
49 +
50 + The checksum is calculated against the FS UUID, the 64-bit block number
51 +diff --git a/Makefile b/Makefile
52 +index 05013bf5a469b..99bbaa9f54f4c 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 10
59 +-SUBLEVEL = 112
60 ++SUBLEVEL = 113
61 + EXTRAVERSION =
62 + NAME = Dare mighty things
63 +
64 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
65 +index ae656bfc31c3d..301ade4d0b943 100644
66 +--- a/arch/arc/kernel/entry.S
67 ++++ b/arch/arc/kernel/entry.S
68 +@@ -199,6 +199,7 @@ tracesys_exit:
69 + st r0, [sp, PT_r0] ; sys call return value in pt_regs
70 +
71 + ;POST Sys Call Ptrace Hook
72 ++ mov r0, sp ; pt_regs needed
73 + bl @syscall_trace_exit
74 + b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
75 + ; we'd done before calling post hook above
76 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
77 +index 1da11bdb1dfbd..1c6500c4e6a17 100644
78 +--- a/arch/arm/mach-vexpress/spc.c
79 ++++ b/arch/arm/mach-vexpress/spc.c
80 +@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
81 + }
82 +
83 + cluster = topology_physical_package_id(cpu_dev->id);
84 +- if (init_opp_table[cluster])
85 ++ if (cluster < 0 || init_opp_table[cluster])
86 + continue;
87 +
88 + if (ve_init_opp_table(cpu_dev))
89 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
90 +index 49082529764f0..0fac1f3f7f478 100644
91 +--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
92 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
93 +@@ -89,12 +89,12 @@
94 + pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
95 +
96 + ti,x-min = /bits/ 16 <125>;
97 +- touchscreen-size-x = /bits/ 16 <4008>;
98 ++ touchscreen-size-x = <4008>;
99 + ti,y-min = /bits/ 16 <282>;
100 +- touchscreen-size-y = /bits/ 16 <3864>;
101 ++ touchscreen-size-y = <3864>;
102 + ti,x-plate-ohms = /bits/ 16 <180>;
103 +- touchscreen-max-pressure = /bits/ 16 <255>;
104 +- touchscreen-average-samples = /bits/ 16 <10>;
105 ++ touchscreen-max-pressure = <255>;
106 ++ touchscreen-average-samples = <10>;
107 + ti,debounce-tol = /bits/ 16 <3>;
108 + ti,debounce-rep = /bits/ 16 <1>;
109 + ti,settle-delay-usec = /bits/ 16 <150>;
110 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
111 +index 7f356edf9f916..f6287f174355c 100644
112 +--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
113 ++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
114 +@@ -70,12 +70,12 @@
115 + pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
116 +
117 + ti,x-min = /bits/ 16 <125>;
118 +- touchscreen-size-x = /bits/ 16 <4008>;
119 ++ touchscreen-size-x = <4008>;
120 + ti,y-min = /bits/ 16 <282>;
121 +- touchscreen-size-y = /bits/ 16 <3864>;
122 ++ touchscreen-size-y = <3864>;
123 + ti,x-plate-ohms = /bits/ 16 <180>;
124 +- touchscreen-max-pressure = /bits/ 16 <255>;
125 +- touchscreen-average-samples = /bits/ 16 <10>;
126 ++ touchscreen-max-pressure = <255>;
127 ++ touchscreen-average-samples = <10>;
128 + ti,debounce-tol = /bits/ 16 <3>;
129 + ti,debounce-rep = /bits/ 16 <1>;
130 + ti,settle-delay-usec = /bits/ 16 <150>;
131 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
132 +index f3a70dc7c5942..3f74db7b0a31d 100644
133 +--- a/arch/arm64/include/asm/pgtable.h
134 ++++ b/arch/arm64/include/asm/pgtable.h
135 +@@ -512,13 +512,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
136 +
137 + #define pmd_none(pmd) (!pmd_val(pmd))
138 +
139 +-#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
140 +-
141 + #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
142 + PMD_TYPE_TABLE)
143 + #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
144 + PMD_TYPE_SECT)
145 +-#define pmd_leaf(pmd) pmd_sect(pmd)
146 ++#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
147 ++#define pmd_bad(pmd) (!pmd_table(pmd))
148 +
149 + #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
150 + static inline bool pud_sect(pud_t pud) { return false; }
151 +@@ -602,9 +601,9 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
152 + pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
153 +
154 + #define pud_none(pud) (!pud_val(pud))
155 +-#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
156 ++#define pud_bad(pud) (!pud_table(pud))
157 + #define pud_present(pud) pte_present(pud_pte(pud))
158 +-#define pud_leaf(pud) pud_sect(pud)
159 ++#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
160 + #define pud_valid(pud) pte_valid(pud_pte(pud))
161 +
162 + static inline void set_pud(pud_t *pudp, pud_t pud)
163 +diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
164 +index 8da93fdfa59e9..c640053ab03f2 100644
165 +--- a/arch/powerpc/kvm/book3s_64_vio.c
166 ++++ b/arch/powerpc/kvm/book3s_64_vio.c
167 +@@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
168 + tbl[idx % TCES_PER_PAGE] = tce;
169 + }
170 +
171 +-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
172 +- unsigned long entry)
173 ++static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
174 ++ struct iommu_table *tbl, unsigned long entry)
175 + {
176 +- unsigned long hpa = 0;
177 +- enum dma_data_direction dir = DMA_NONE;
178 ++ unsigned long i;
179 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
180 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
181 ++
182 ++ for (i = 0; i < subpages; ++i) {
183 ++ unsigned long hpa = 0;
184 ++ enum dma_data_direction dir = DMA_NONE;
185 +
186 +- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
187 ++ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
188 ++ }
189 + }
190 +
191 + static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
192 +@@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
193 + break;
194 + }
195 +
196 ++ iommu_tce_kill(tbl, io_entry, subpages);
197 ++
198 + return ret;
199 + }
200 +
201 +@@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
202 + break;
203 + }
204 +
205 ++ iommu_tce_kill(tbl, io_entry, subpages);
206 ++
207 + return ret;
208 + }
209 +
210 +@@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
211 + ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
212 + entry, ua, dir);
213 +
214 +- iommu_tce_kill(stit->tbl, entry, 1);
215 +
216 + if (ret != H_SUCCESS) {
217 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
218 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
219 + goto unlock_exit;
220 + }
221 + }
222 +@@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
223 + */
224 + if (get_user(tce, tces + i)) {
225 + ret = H_TOO_HARD;
226 +- goto invalidate_exit;
227 ++ goto unlock_exit;
228 + }
229 + tce = be64_to_cpu(tce);
230 +
231 + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
232 + ret = H_PARAMETER;
233 +- goto invalidate_exit;
234 ++ goto unlock_exit;
235 + }
236 +
237 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
238 +@@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
239 + iommu_tce_direction(tce));
240 +
241 + if (ret != H_SUCCESS) {
242 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
243 +- entry);
244 +- goto invalidate_exit;
245 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
246 ++ entry + i);
247 ++ goto unlock_exit;
248 + }
249 + }
250 +
251 + kvmppc_tce_put(stt, entry + i, tce);
252 + }
253 +
254 +-invalidate_exit:
255 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
256 +- iommu_tce_kill(stit->tbl, entry, npages);
257 +-
258 + unlock_exit:
259 + srcu_read_unlock(&vcpu->kvm->srcu, idx);
260 +
261 +@@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
262 + continue;
263 +
264 + if (ret == H_TOO_HARD)
265 +- goto invalidate_exit;
266 ++ return ret;
267 +
268 + WARN_ON_ONCE(1);
269 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
270 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
271 + }
272 + }
273 +
274 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
275 + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
276 +
277 +-invalidate_exit:
278 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
279 +- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
280 +-
281 + return ret;
282 + }
283 + EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
284 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
285 +index e5ba96c41f3fc..57af53a6a2d84 100644
286 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
287 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
288 +@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
289 + tbl->it_ops->tce_kill(tbl, entry, pages, true);
290 + }
291 +
292 +-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
293 +- unsigned long entry)
294 ++static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
295 ++ struct iommu_table *tbl, unsigned long entry)
296 + {
297 +- unsigned long hpa = 0;
298 +- enum dma_data_direction dir = DMA_NONE;
299 ++ unsigned long i;
300 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
301 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
302 ++
303 ++ for (i = 0; i < subpages; ++i) {
304 ++ unsigned long hpa = 0;
305 ++ enum dma_data_direction dir = DMA_NONE;
306 +
307 +- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
308 ++ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
309 ++ }
310 + }
311 +
312 + static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
313 +@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
314 + break;
315 + }
316 +
317 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
318 ++
319 + return ret;
320 + }
321 +
322 +@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
323 + break;
324 + }
325 +
326 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
327 ++
328 + return ret;
329 + }
330 +
331 +@@ -424,10 +434,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
332 + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
333 + stit->tbl, entry, ua, dir);
334 +
335 +- iommu_tce_kill_rm(stit->tbl, entry, 1);
336 +-
337 + if (ret != H_SUCCESS) {
338 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
339 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
340 + return ret;
341 + }
342 + }
343 +@@ -569,7 +577,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
344 + ua = 0;
345 + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
346 + ret = H_PARAMETER;
347 +- goto invalidate_exit;
348 ++ goto unlock_exit;
349 + }
350 +
351 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
352 +@@ -578,19 +586,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
353 + iommu_tce_direction(tce));
354 +
355 + if (ret != H_SUCCESS) {
356 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
357 +- entry);
358 +- goto invalidate_exit;
359 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
360 ++ entry + i);
361 ++ goto unlock_exit;
362 + }
363 + }
364 +
365 + kvmppc_rm_tce_put(stt, entry + i, tce);
366 + }
367 +
368 +-invalidate_exit:
369 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
370 +- iommu_tce_kill_rm(stit->tbl, entry, npages);
371 +-
372 + unlock_exit:
373 + if (!prereg)
374 + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
375 +@@ -632,20 +636,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
376 + continue;
377 +
378 + if (ret == H_TOO_HARD)
379 +- goto invalidate_exit;
380 ++ return ret;
381 +
382 + WARN_ON_ONCE_RM(1);
383 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
384 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
385 + }
386 + }
387 +
388 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
389 + kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
390 +
391 +-invalidate_exit:
392 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
393 +- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
394 +-
395 + return ret;
396 + }
397 +
398 +diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
399 +index 2a57e93a79dcf..7245355bee28b 100644
400 +--- a/arch/powerpc/perf/power9-pmu.c
401 ++++ b/arch/powerpc/perf/power9-pmu.c
402 +@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
403 +
404 + /* Table of alternatives, sorted by column 0 */
405 + static const unsigned int power9_event_alternatives[][MAX_ALT] = {
406 +- { PM_INST_DISP, PM_INST_DISP_ALT },
407 +- { PM_RUN_CYC_ALT, PM_RUN_CYC },
408 +- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
409 +- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
410 + { PM_BR_2PATH, PM_BR_2PATH_ALT },
411 ++ { PM_INST_DISP, PM_INST_DISP_ALT },
412 ++ { PM_RUN_CYC_ALT, PM_RUN_CYC },
413 ++ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
414 ++ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
415 + };
416 +
417 + static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
418 +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
419 +index 0e327a01f50fb..46a067bd7e0ba 100644
420 +--- a/arch/x86/include/asm/compat.h
421 ++++ b/arch/x86/include/asm/compat.h
422 +@@ -29,15 +29,13 @@ typedef u32 compat_caddr_t;
423 + typedef __kernel_fsid_t compat_fsid_t;
424 +
425 + struct compat_stat {
426 +- compat_dev_t st_dev;
427 +- u16 __pad1;
428 ++ u32 st_dev;
429 + compat_ino_t st_ino;
430 + compat_mode_t st_mode;
431 + compat_nlink_t st_nlink;
432 + __compat_uid_t st_uid;
433 + __compat_gid_t st_gid;
434 +- compat_dev_t st_rdev;
435 +- u16 __pad2;
436 ++ u32 st_rdev;
437 + u32 st_size;
438 + u32 st_blksize;
439 + u32 st_blocks;
440 +diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
441 +index 45cc0ae0af6f9..c7b9f12896f20 100644
442 +--- a/arch/xtensa/kernel/coprocessor.S
443 ++++ b/arch/xtensa/kernel/coprocessor.S
444 +@@ -29,7 +29,7 @@
445 + .if XTENSA_HAVE_COPROCESSOR(x); \
446 + .align 4; \
447 + .Lsave_cp_regs_cp##x: \
448 +- xchal_cp##x##_store a2 a4 a5 a6 a7; \
449 ++ xchal_cp##x##_store a2 a3 a4 a5 a6; \
450 + jx a0; \
451 + .endif
452 +
453 +@@ -46,7 +46,7 @@
454 + .if XTENSA_HAVE_COPROCESSOR(x); \
455 + .align 4; \
456 + .Lload_cp_regs_cp##x: \
457 +- xchal_cp##x##_load a2 a4 a5 a6 a7; \
458 ++ xchal_cp##x##_load a2 a3 a4 a5 a6; \
459 + jx a0; \
460 + .endif
461 +
462 +diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
463 +index 0dde21e0d3de4..ad1841cecdfb7 100644
464 +--- a/arch/xtensa/kernel/jump_label.c
465 ++++ b/arch/xtensa/kernel/jump_label.c
466 +@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
467 + {
468 + struct patch *patch = data;
469 +
470 +- if (atomic_inc_return(&patch->cpu_count) == 1) {
471 ++ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
472 + local_patch_text(patch->addr, patch->data, patch->sz);
473 + atomic_inc(&patch->cpu_count);
474 + } else {
475 +diff --git a/block/ioctl.c b/block/ioctl.c
476 +index ed240e170e148..e7eed7dadb5cf 100644
477 +--- a/block/ioctl.c
478 ++++ b/block/ioctl.c
479 +@@ -679,7 +679,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
480 + (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
481 + case BLKGETSIZE:
482 + size = i_size_read(bdev->bd_inode);
483 +- if ((size >> 9) > ~0UL)
484 ++ if ((size >> 9) > ~(compat_ulong_t)0)
485 + return -EFBIG;
486 + return compat_put_ulong(argp, size >> 9);
487 +
488 +diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
489 +index b066809ba9a11..c56f4043b0cc0 100644
490 +--- a/drivers/ata/pata_marvell.c
491 ++++ b/drivers/ata/pata_marvell.c
492 +@@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
493 + switch(ap->port_no)
494 + {
495 + case 0:
496 ++ if (!ap->ioaddr.bmdma_addr)
497 ++ return ATA_CBL_PATA_UNK;
498 + if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
499 + return ATA_CBL_PATA40;
500 + return ATA_CBL_PATA80;
501 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
502 +index 90afba0b36fe9..47552db6b8dc3 100644
503 +--- a/drivers/dma/at_xdmac.c
504 ++++ b/drivers/dma/at_xdmac.c
505 +@@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
506 + {
507 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
508 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
509 +- struct at_xdmac_desc *desc, *_desc;
510 ++ struct at_xdmac_desc *desc, *_desc, *iter;
511 + struct list_head *descs_list;
512 + enum dma_status ret;
513 + int residue, retry;
514 +@@ -1505,11 +1505,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
515 + * microblock.
516 + */
517 + descs_list = &desc->descs_list;
518 +- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
519 +- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
520 +- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
521 +- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
522 ++ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
523 ++ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
524 ++ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
525 ++ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
526 ++ desc = iter;
527 + break;
528 ++ }
529 + }
530 + residue += cur_ubc << dwidth;
531 +
532 +diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
533 +index 7b41cdff1a2ce..51af0dfc3c63e 100644
534 +--- a/drivers/dma/idxd/sysfs.c
535 ++++ b/drivers/dma/idxd/sysfs.c
536 +@@ -1098,6 +1098,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
537 + u64 xfer_size;
538 + int rc;
539 +
540 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
541 ++ return -EPERM;
542 ++
543 + if (wq->state != IDXD_WQ_DISABLED)
544 + return -EPERM;
545 +
546 +@@ -1132,6 +1135,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
547 + u64 batch_size;
548 + int rc;
549 +
550 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
551 ++ return -EPERM;
552 ++
553 + if (wq->state != IDXD_WQ_DISABLED)
554 + return -EPERM;
555 +
556 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
557 +index 306f93e4b26a8..792c91cd16080 100644
558 +--- a/drivers/dma/imx-sdma.c
559 ++++ b/drivers/dma/imx-sdma.c
560 +@@ -1789,7 +1789,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
561 + u32 reg, val, shift, num_map, i;
562 + int ret = 0;
563 +
564 +- if (IS_ERR(np) || IS_ERR(gpr_np))
565 ++ if (IS_ERR(np) || !gpr_np)
566 + goto out;
567 +
568 + event_remap = of_find_property(np, propname, NULL);
569 +@@ -1837,7 +1837,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
570 + }
571 +
572 + out:
573 +- if (!IS_ERR(gpr_np))
574 ++ if (gpr_np)
575 + of_node_put(gpr_np);
576 +
577 + return ret;
578 +diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
579 +index 375e7e647df6b..a1517ef1f4a01 100644
580 +--- a/drivers/dma/mediatek/mtk-uart-apdma.c
581 ++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
582 +@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
583 + unsigned int status;
584 + int ret;
585 +
586 +- ret = pm_runtime_get_sync(mtkd->ddev.dev);
587 ++ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
588 + if (ret < 0) {
589 + pm_runtime_put_noidle(chan->device->dev);
590 + return ret;
591 +@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
592 + ret = readx_poll_timeout(readl, c->base + VFF_EN,
593 + status, !status, 10, 100);
594 + if (ret)
595 +- return ret;
596 ++ goto err_pm;
597 +
598 + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
599 + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
600 + if (ret < 0) {
601 + dev_err(chan->device->dev, "Can't request dma IRQ\n");
602 +- return -EINVAL;
603 ++ ret = -EINVAL;
604 ++ goto err_pm;
605 + }
606 +
607 + if (mtkd->support_33bits)
608 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
609 +
610 ++err_pm:
611 ++ pm_runtime_put_noidle(mtkd->ddev.dev);
612 + return ret;
613 + }
614 +
615 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
616 +index 92906b56b1a2b..fea44dc0484b5 100644
617 +--- a/drivers/edac/synopsys_edac.c
618 ++++ b/drivers/edac/synopsys_edac.c
619 +@@ -163,6 +163,11 @@
620 + #define ECC_STAT_CECNT_SHIFT 8
621 + #define ECC_STAT_BITNUM_MASK 0x7F
622 +
623 ++/* ECC error count register definitions */
624 ++#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
625 ++#define ECC_ERRCNT_UECNT_SHIFT 16
626 ++#define ECC_ERRCNT_CECNT_MASK 0xFFFF
627 ++
628 + /* DDR QOS Interrupt register definitions */
629 + #define DDR_QOS_IRQ_STAT_OFST 0x20200
630 + #define DDR_QOSUE_MASK 0x4
631 +@@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
632 + base = priv->baseaddr;
633 + p = &priv->stat;
634 +
635 ++ regval = readl(base + ECC_ERRCNT_OFST);
636 ++ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
637 ++ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
638 ++ if (!p->ce_cnt)
639 ++ goto ue_err;
640 ++
641 + regval = readl(base + ECC_STAT_OFST);
642 + if (!regval)
643 + return 1;
644 +
645 +- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
646 +- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
647 +- if (!p->ce_cnt)
648 +- goto ue_err;
649 +-
650 + p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
651 +
652 + regval = readl(base + ECC_CEADDR0_OFST);
653 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
654 +index d180787482009..59d8affad343a 100644
655 +--- a/drivers/gpio/gpiolib.c
656 ++++ b/drivers/gpio/gpiolib.c
657 +@@ -1612,8 +1612,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
658 +
659 + gpiochip_set_irq_hooks(gc);
660 +
661 +- acpi_gpiochip_request_interrupts(gc);
662 +-
663 + /*
664 + * Using barrier() here to prevent compiler from reordering
665 + * gc->irq.initialized before initialization of above
666 +@@ -1623,6 +1621,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
667 +
668 + gc->irq.initialized = true;
669 +
670 ++ acpi_gpiochip_request_interrupts(gc);
671 ++
672 + return 0;
673 + }
674 +
675 +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
676 +index 83423092de2ff..da07993339702 100644
677 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
678 ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
679 +@@ -179,7 +179,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
680 + drm_framebuffer_put(plane->state->fb);
681 +
682 + kfree(to_mdp5_plane_state(plane->state));
683 ++ plane->state = NULL;
684 + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
685 ++ if (!mdp5_state)
686 ++ return;
687 +
688 + /* assign default blend parameters */
689 + mdp5_state->alpha = 255;
690 +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
691 +index bbdd086be7f59..4b92c63414905 100644
692 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
693 ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
694 +@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
695 +
696 + ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
697 + if (ret)
698 +- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
699 ++ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
700 + }
701 +
702 + static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
703 +@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
704 + return 0;
705 + }
706 +
707 +-static int rpi_touchscreen_enable(struct drm_panel *panel)
708 ++static int rpi_touchscreen_prepare(struct drm_panel *panel)
709 + {
710 + struct rpi_touchscreen *ts = panel_to_ts(panel);
711 + int i;
712 +@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
713 + rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
714 + msleep(100);
715 +
716 ++ return 0;
717 ++}
718 ++
719 ++static int rpi_touchscreen_enable(struct drm_panel *panel)
720 ++{
721 ++ struct rpi_touchscreen *ts = panel_to_ts(panel);
722 ++
723 + /* Turn on the backlight. */
724 + rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
725 +
726 +@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
727 + static const struct drm_panel_funcs rpi_touchscreen_funcs = {
728 + .disable = rpi_touchscreen_disable,
729 + .unprepare = rpi_touchscreen_noop,
730 +- .prepare = rpi_touchscreen_noop,
731 ++ .prepare = rpi_touchscreen_prepare,
732 + .enable = rpi_touchscreen_enable,
733 + .get_modes = rpi_touchscreen_get_modes,
734 + };
735 +diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
736 +index eaf276978ee7f..ad84b56f4091d 100644
737 +--- a/drivers/gpu/drm/vc4/vc4_dsi.c
738 ++++ b/drivers/gpu/drm/vc4/vc4_dsi.c
739 +@@ -835,7 +835,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
740 + unsigned long phy_clock;
741 + int ret;
742 +
743 +- ret = pm_runtime_get_sync(dev);
744 ++ ret = pm_runtime_resume_and_get(dev);
745 + if (ret) {
746 + DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
747 + return;
748 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
749 +index 2836d44094aba..b3d8d9e0e6f6e 100644
750 +--- a/drivers/md/dm.c
751 ++++ b/drivers/md/dm.c
752 +@@ -607,18 +607,17 @@ static void start_io_acct(struct dm_io *io)
753 + false, 0, &io->stats_aux);
754 + }
755 +
756 +-static void end_io_acct(struct dm_io *io)
757 ++static void end_io_acct(struct mapped_device *md, struct bio *bio,
758 ++ unsigned long start_time, struct dm_stats_aux *stats_aux)
759 + {
760 +- struct mapped_device *md = io->md;
761 +- struct bio *bio = io->orig_bio;
762 +- unsigned long duration = jiffies - io->start_time;
763 ++ unsigned long duration = jiffies - start_time;
764 +
765 +- bio_end_io_acct(bio, io->start_time);
766 ++ bio_end_io_acct(bio, start_time);
767 +
768 + if (unlikely(dm_stats_used(&md->stats)))
769 + dm_stats_account_io(&md->stats, bio_data_dir(bio),
770 + bio->bi_iter.bi_sector, bio_sectors(bio),
771 +- true, duration, &io->stats_aux);
772 ++ true, duration, stats_aux);
773 +
774 + /* nudge anyone waiting on suspend queue */
775 + if (unlikely(wq_has_sleeper(&md->wait)))
776 +@@ -903,6 +902,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
777 + blk_status_t io_error;
778 + struct bio *bio;
779 + struct mapped_device *md = io->md;
780 ++ unsigned long start_time = 0;
781 ++ struct dm_stats_aux stats_aux;
782 +
783 + /* Push-back supersedes any I/O errors */
784 + if (unlikely(error)) {
785 +@@ -929,8 +930,10 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
786 +
787 + io_error = io->status;
788 + bio = io->orig_bio;
789 +- end_io_acct(io);
790 ++ start_time = io->start_time;
791 ++ stats_aux = io->stats_aux;
792 + free_io(md, io);
793 ++ end_io_acct(md, bio, start_time, &stats_aux);
794 +
795 + if (io_error == BLK_STS_DM_REQUEUE)
796 + return;
797 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
798 +index 0cf8ae8aeac83..2fb4126ae8d8a 100644
799 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
800 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
801 +@@ -480,8 +480,8 @@ int aq_nic_start(struct aq_nic_s *self)
802 + if (err < 0)
803 + goto err_exit;
804 +
805 +- for (i = 0U, aq_vec = self->aq_vec[0];
806 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
807 ++ for (i = 0U; self->aq_vecs > i; ++i) {
808 ++ aq_vec = self->aq_vec[i];
809 + err = aq_vec_start(aq_vec);
810 + if (err < 0)
811 + goto err_exit;
812 +@@ -511,8 +511,8 @@ int aq_nic_start(struct aq_nic_s *self)
813 + mod_timer(&self->polling_timer, jiffies +
814 + AQ_CFG_POLLING_TIMER_INTERVAL);
815 + } else {
816 +- for (i = 0U, aq_vec = self->aq_vec[0];
817 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
818 ++ for (i = 0U; self->aq_vecs > i; ++i) {
819 ++ aq_vec = self->aq_vec[i];
820 + err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
821 + aq_vec_isr, aq_vec,
822 + aq_vec_get_affinity_mask(aq_vec));
823 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
824 +index 1826253f97dc4..bdfd462c74db9 100644
825 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
826 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
827 +@@ -450,22 +450,22 @@ err_exit:
828 +
829 + static int aq_pm_freeze(struct device *dev)
830 + {
831 +- return aq_suspend_common(dev, false);
832 ++ return aq_suspend_common(dev, true);
833 + }
834 +
835 + static int aq_pm_suspend_poweroff(struct device *dev)
836 + {
837 +- return aq_suspend_common(dev, true);
838 ++ return aq_suspend_common(dev, false);
839 + }
840 +
841 + static int aq_pm_thaw(struct device *dev)
842 + {
843 +- return atl_resume_common(dev, false);
844 ++ return atl_resume_common(dev, true);
845 + }
846 +
847 + static int aq_pm_resume_restore(struct device *dev)
848 + {
849 +- return atl_resume_common(dev, true);
850 ++ return atl_resume_common(dev, false);
851 + }
852 +
853 + static const struct dev_pm_ops aq_pm_ops = {
854 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
855 +index f4774cf051c97..6ab1f3212d246 100644
856 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
857 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
858 +@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
859 + if (!self) {
860 + err = -EINVAL;
861 + } else {
862 +- for (i = 0U, ring = self->ring[0];
863 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
864 ++ for (i = 0U; self->tx_rings > i; ++i) {
865 ++ ring = self->ring[i];
866 + u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
867 + ring[AQ_VEC_RX_ID].stats.rx.polls++;
868 + u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
869 +@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
870 + self->aq_hw_ops = aq_hw_ops;
871 + self->aq_hw = aq_hw;
872 +
873 +- for (i = 0U, ring = self->ring[0];
874 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
875 ++ for (i = 0U; self->tx_rings > i; ++i) {
876 ++ ring = self->ring[i];
877 + err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
878 + if (err < 0)
879 + goto err_exit;
880 +@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
881 + unsigned int i = 0U;
882 + int err = 0;
883 +
884 +- for (i = 0U, ring = self->ring[0];
885 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
886 ++ for (i = 0U; self->tx_rings > i; ++i) {
887 ++ ring = self->ring[i];
888 + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
889 + &ring[AQ_VEC_TX_ID]);
890 + if (err < 0)
891 +@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
892 + struct aq_ring_s *ring = NULL;
893 + unsigned int i = 0U;
894 +
895 +- for (i = 0U, ring = self->ring[0];
896 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
897 ++ for (i = 0U; self->tx_rings > i; ++i) {
898 ++ ring = self->ring[i];
899 + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
900 + &ring[AQ_VEC_TX_ID]);
901 +
902 +@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
903 + if (!self)
904 + goto err_exit;
905 +
906 +- for (i = 0U, ring = self->ring[0];
907 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
908 ++ for (i = 0U; self->tx_rings > i; ++i) {
909 ++ ring = self->ring[i];
910 + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
911 + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
912 + }
913 +@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
914 + if (!self)
915 + goto err_exit;
916 +
917 +- for (i = 0U, ring = self->ring[0];
918 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
919 ++ for (i = 0U; self->tx_rings > i; ++i) {
920 ++ ring = self->ring[i];
921 + aq_ring_free(&ring[AQ_VEC_TX_ID]);
922 + if (i < self->rx_rings)
923 + aq_ring_free(&ring[AQ_VEC_RX_ID]);
924 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
925 +index f29ec765d684a..bd13f91efe7c5 100644
926 +--- a/drivers/net/ethernet/cadence/macb_main.c
927 ++++ b/drivers/net/ethernet/cadence/macb_main.c
928 +@@ -1531,6 +1531,7 @@ static void macb_tx_restart(struct macb_queue *queue)
929 + unsigned int head = queue->tx_head;
930 + unsigned int tail = queue->tx_tail;
931 + struct macb *bp = queue->bp;
932 ++ unsigned int head_idx, tbqp;
933 +
934 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
935 + queue_writel(queue, ISR, MACB_BIT(TXUBR));
936 +@@ -1538,6 +1539,13 @@ static void macb_tx_restart(struct macb_queue *queue)
937 + if (head == tail)
938 + return;
939 +
940 ++ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
941 ++ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
942 ++ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
943 ++
944 ++ if (tbqp == head_idx)
945 ++ return;
946 ++
947 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
948 + }
949 +
950 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
951 +index 1268996b70301..2f9075429c43e 100644
952 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
953 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
954 +@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
955 + info->phc_index = -1;
956 +
957 + fman_node = of_get_parent(mac_node);
958 +- if (fman_node)
959 ++ if (fman_node) {
960 + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
961 ++ of_node_put(fman_node);
962 ++ }
963 +
964 +- if (ptp_node)
965 ++ if (ptp_node) {
966 + ptp_dev = of_find_device_by_node(ptp_node);
967 ++ of_node_put(ptp_node);
968 ++ }
969 +
970 + if (ptp_dev)
971 + ptp = platform_get_drvdata(ptp_dev);
972 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
973 +index 15b1503d5b6ca..1f51252b465a6 100644
974 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
975 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
976 +@@ -1006,8 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
977 + {
978 + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
979 + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
980 +- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
981 +- u16 lat_enc_d = 0; /* latency decoded */
982 ++ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
983 ++ u32 lat_enc_d = 0; /* latency decoded */
984 + u16 lat_enc = 0; /* latency encoded */
985 +
986 + if (link) {
987 +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
988 +index 553d6bc78e6bd..624236a4202e5 100644
989 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c
990 ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
991 +@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
992 + {
993 + u32 swfw_sync;
994 +
995 +- while (igc_get_hw_semaphore_i225(hw))
996 +- ; /* Empty */
997 ++ /* Releasing the resource requires first getting the HW semaphore.
998 ++ * If we fail to get the semaphore, there is nothing we can do,
999 ++ * except log an error and quit. We are not allowed to hang here
1000 ++ * indefinitely, as it may cause denial of service or system crash.
1001 ++ */
1002 ++ if (igc_get_hw_semaphore_i225(hw)) {
1003 ++ hw_dbg("Failed to release SW_FW_SYNC.\n");
1004 ++ return;
1005 ++ }
1006 +
1007 + swfw_sync = rd32(IGC_SW_FW_SYNC);
1008 + swfw_sync &= ~mask;
1009 +diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
1010 +index e380b7a3ea63b..8de4de2e56362 100644
1011 +--- a/drivers/net/ethernet/intel/igc/igc_phy.c
1012 ++++ b/drivers/net/ethernet/intel/igc/igc_phy.c
1013 +@@ -583,7 +583,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
1014 + * the lower time out
1015 + */
1016 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
1017 +- usleep_range(500, 1000);
1018 ++ udelay(50);
1019 + mdic = rd32(IGC_MDIC);
1020 + if (mdic & IGC_MDIC_READY)
1021 + break;
1022 +@@ -640,7 +640,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
1023 + * the lower time out
1024 + */
1025 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
1026 +- usleep_range(500, 1000);
1027 ++ udelay(50);
1028 + mdic = rd32(IGC_MDIC);
1029 + if (mdic & IGC_MDIC_READY)
1030 + break;
1031 +diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
1032 +index 9ceb7e1fb1696..42bc014136fe3 100644
1033 +--- a/drivers/net/ethernet/micrel/Kconfig
1034 ++++ b/drivers/net/ethernet/micrel/Kconfig
1035 +@@ -37,7 +37,6 @@ config KS8851
1036 + config KS8851_MLL
1037 + tristate "Micrel KS8851 MLL"
1038 + depends on HAS_IOMEM
1039 +- depends on PTP_1588_CLOCK_OPTIONAL
1040 + select MII
1041 + select CRC32
1042 + select EEPROM_93CX6
1043 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
1044 +index 07b1b8374cd26..53efcc9c40e28 100644
1045 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
1046 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
1047 +@@ -68,9 +68,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
1048 + writel(value, ioaddr + PTP_TCR);
1049 +
1050 + /* wait for present system time initialize to complete */
1051 +- return readl_poll_timeout(ioaddr + PTP_TCR, value,
1052 ++ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
1053 + !(value & PTP_TCR_TSINIT),
1054 +- 10000, 100000);
1055 ++ 10, 100000);
1056 + }
1057 +
1058 + static int config_addend(void __iomem *ioaddr, u32 addend)
1059 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1060 +index 48fbdce6a70e7..72d670667f64f 100644
1061 +--- a/drivers/net/vxlan.c
1062 ++++ b/drivers/net/vxlan.c
1063 +@@ -710,11 +710,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
1064 +
1065 + rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
1066 + if (rd == NULL)
1067 +- return -ENOBUFS;
1068 ++ return -ENOMEM;
1069 +
1070 + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
1071 + kfree(rd);
1072 +- return -ENOBUFS;
1073 ++ return -ENOMEM;
1074 + }
1075 +
1076 + rd->remote_ip = *ip;
1077 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1078 +index 6d5d5c39c6359..9929e90866f04 100644
1079 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1080 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1081 +@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
1082 + BRCMF_SDIO_FT_SUB,
1083 + };
1084 +
1085 +-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
1086 ++#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
1087 +
1088 + /* SDIO Pad drive strength to select value mappings */
1089 + struct sdiod_drive_str {
1090 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
1091 +index ecaf85b483ac3..e57e49a722dc0 100644
1092 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
1093 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
1094 +@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1095 + mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
1096 +
1097 + /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
1098 +- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
1099 ++ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
1100 +
1101 + /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
1102 + mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
1103 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1104 +index 853b9a24f744e..ad4f1cfbad2e0 100644
1105 +--- a/drivers/nvme/host/core.c
1106 ++++ b/drivers/nvme/host/core.c
1107 +@@ -1270,6 +1270,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1108 + warn_str, cur->nidl);
1109 + return -1;
1110 + }
1111 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1112 ++ return NVME_NIDT_EUI64_LEN;
1113 + memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1114 + return NVME_NIDT_EUI64_LEN;
1115 + case NVME_NIDT_NGUID:
1116 +@@ -1278,6 +1280,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1117 + warn_str, cur->nidl);
1118 + return -1;
1119 + }
1120 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1121 ++ return NVME_NIDT_NGUID_LEN;
1122 + memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1123 + return NVME_NIDT_NGUID_LEN;
1124 + case NVME_NIDT_UUID:
1125 +@@ -1286,6 +1290,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1126 + warn_str, cur->nidl);
1127 + return -1;
1128 + }
1129 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1130 ++ return NVME_NIDT_UUID_LEN;
1131 + uuid_copy(&ids->uuid, data + sizeof(*cur));
1132 + return NVME_NIDT_UUID_LEN;
1133 + case NVME_NIDT_CSI:
1134 +@@ -1381,12 +1387,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1135 + if ((*id)->ncap == 0) /* namespace not allocated or attached */
1136 + goto out_free_id;
1137 +
1138 +- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1139 +- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1140 +- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1141 +- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1142 +- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1143 +- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1144 ++
1145 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1146 ++ dev_info(ctrl->device,
1147 ++ "Ignoring bogus Namespace Identifiers\n");
1148 ++ } else {
1149 ++ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1150 ++ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1151 ++ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1152 ++ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1153 ++ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1154 ++ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1155 ++ }
1156 +
1157 + return 0;
1158 +
1159 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1160 +index 5dd1dd8021ba1..10e5ae3a8c0df 100644
1161 +--- a/drivers/nvme/host/nvme.h
1162 ++++ b/drivers/nvme/host/nvme.h
1163 +@@ -150,6 +150,11 @@ enum nvme_quirks {
1164 + * encoding the generation sequence number.
1165 + */
1166 + NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
1167 ++
1168 ++ /*
1169 ++ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
1170 ++ */
1171 ++ NVME_QUIRK_BOGUS_NID = (1 << 18),
1172 + };
1173 +
1174 + /*
1175 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1176 +index 97afeb898b253..6939b03a16c58 100644
1177 +--- a/drivers/nvme/host/pci.c
1178 ++++ b/drivers/nvme/host/pci.c
1179 +@@ -3212,7 +3212,10 @@ static const struct pci_device_id nvme_id_table[] = {
1180 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
1181 + { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
1182 + .driver_data = NVME_QUIRK_IDENTIFY_CNS |
1183 +- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
1184 ++ NVME_QUIRK_DISABLE_WRITE_ZEROES |
1185 ++ NVME_QUIRK_BOGUS_NID, },
1186 ++ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
1187 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
1188 + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
1189 + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
1190 + { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
1191 +diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
1192 +index cb2f55f450e4a..7fd11ef5cb8a2 100644
1193 +--- a/drivers/perf/arm_pmu.c
1194 ++++ b/drivers/perf/arm_pmu.c
1195 +@@ -398,6 +398,9 @@ validate_group(struct perf_event *event)
1196 + if (!validate_event(event->pmu, &fake_pmu, leader))
1197 + return -EINVAL;
1198 +
1199 ++ if (event == leader)
1200 ++ return 0;
1201 ++
1202 + for_each_sibling_event(sibling, leader) {
1203 + if (!validate_event(event->pmu, &fake_pmu, sibling))
1204 + return -EINVAL;
1205 +@@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event)
1206 + local64_set(&hwc->period_left, hwc->sample_period);
1207 + }
1208 +
1209 +- if (event->group_leader != event) {
1210 +- if (validate_group(event) != 0)
1211 +- return -EINVAL;
1212 +- }
1213 +-
1214 +- return 0;
1215 ++ return validate_group(event);
1216 + }
1217 +
1218 + static int armpmu_event_init(struct perf_event *event)
1219 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
1220 +index d5cec6e35bb83..0e456c39a603d 100644
1221 +--- a/drivers/platform/x86/samsung-laptop.c
1222 ++++ b/drivers/platform/x86/samsung-laptop.c
1223 +@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
1224 +
1225 + if (value > samsung->kbd_led.max_brightness)
1226 + value = samsung->kbd_led.max_brightness;
1227 +- else if (value < 0)
1228 +- value = 0;
1229 +
1230 + samsung->kbd_led_wk = value;
1231 + queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
1232 +diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
1233 +index 24d3395964cc4..4c5bba52b1059 100644
1234 +--- a/drivers/reset/tegra/reset-bpmp.c
1235 ++++ b/drivers/reset/tegra/reset-bpmp.c
1236 +@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
1237 + struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
1238 + struct mrq_reset_request request;
1239 + struct tegra_bpmp_message msg;
1240 ++ int err;
1241 +
1242 + memset(&request, 0, sizeof(request));
1243 + request.cmd = command;
1244 +@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
1245 + msg.tx.data = &request;
1246 + msg.tx.size = sizeof(request);
1247 +
1248 +- return tegra_bpmp_transfer(bpmp, &msg);
1249 ++ err = tegra_bpmp_transfer(bpmp, &msg);
1250 ++ if (err)
1251 ++ return err;
1252 ++ if (msg.rx.ret)
1253 ++ return -EINVAL;
1254 ++
1255 ++ return 0;
1256 + }
1257 +
1258 + static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
1259 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
1260 +index 5f7e62f19d83a..3bcadb3dd40d2 100644
1261 +--- a/drivers/scsi/qedi/qedi_iscsi.c
1262 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
1263 +@@ -828,6 +828,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
1264 + return qedi_iscsi_send_ioreq(task);
1265 + }
1266 +
1267 ++static void qedi_offload_work(struct work_struct *work)
1268 ++{
1269 ++ struct qedi_endpoint *qedi_ep =
1270 ++ container_of(work, struct qedi_endpoint, offload_work);
1271 ++ struct qedi_ctx *qedi;
1272 ++ int wait_delay = 5 * HZ;
1273 ++ int ret;
1274 ++
1275 ++ qedi = qedi_ep->qedi;
1276 ++
1277 ++ ret = qedi_iscsi_offload_conn(qedi_ep);
1278 ++ if (ret) {
1279 ++ QEDI_ERR(&qedi->dbg_ctx,
1280 ++ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
1281 ++ qedi_ep->iscsi_cid, qedi_ep, ret);
1282 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1283 ++ return;
1284 ++ }
1285 ++
1286 ++ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
1287 ++ (qedi_ep->state ==
1288 ++ EP_STATE_OFLDCONN_COMPL),
1289 ++ wait_delay);
1290 ++ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
1291 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1292 ++ QEDI_ERR(&qedi->dbg_ctx,
1293 ++ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
1294 ++ qedi_ep->iscsi_cid, qedi_ep);
1295 ++ }
1296 ++}
1297 ++
1298 + static struct iscsi_endpoint *
1299 + qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1300 + int non_blocking)
1301 +@@ -876,6 +907,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1302 + }
1303 + qedi_ep = ep->dd_data;
1304 + memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
1305 ++ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
1306 + qedi_ep->state = EP_STATE_IDLE;
1307 + qedi_ep->iscsi_cid = (u32)-1;
1308 + qedi_ep->qedi = qedi;
1309 +@@ -1026,12 +1058,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1310 + qedi_ep = ep->dd_data;
1311 + qedi = qedi_ep->qedi;
1312 +
1313 ++ flush_work(&qedi_ep->offload_work);
1314 ++
1315 + if (qedi_ep->state == EP_STATE_OFLDCONN_START)
1316 + goto ep_exit_recover;
1317 +
1318 +- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
1319 +- flush_work(&qedi_ep->offload_work);
1320 +-
1321 + if (qedi_ep->conn) {
1322 + qedi_conn = qedi_ep->conn;
1323 + conn = qedi_conn->cls_conn->dd_data;
1324 +@@ -1196,37 +1227,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
1325 + return rc;
1326 + }
1327 +
1328 +-static void qedi_offload_work(struct work_struct *work)
1329 +-{
1330 +- struct qedi_endpoint *qedi_ep =
1331 +- container_of(work, struct qedi_endpoint, offload_work);
1332 +- struct qedi_ctx *qedi;
1333 +- int wait_delay = 5 * HZ;
1334 +- int ret;
1335 +-
1336 +- qedi = qedi_ep->qedi;
1337 +-
1338 +- ret = qedi_iscsi_offload_conn(qedi_ep);
1339 +- if (ret) {
1340 +- QEDI_ERR(&qedi->dbg_ctx,
1341 +- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
1342 +- qedi_ep->iscsi_cid, qedi_ep, ret);
1343 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1344 +- return;
1345 +- }
1346 +-
1347 +- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
1348 +- (qedi_ep->state ==
1349 +- EP_STATE_OFLDCONN_COMPL),
1350 +- wait_delay);
1351 +- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
1352 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1353 +- QEDI_ERR(&qedi->dbg_ctx,
1354 +- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
1355 +- qedi_ep->iscsi_cid, qedi_ep);
1356 +- }
1357 +-}
1358 +-
1359 + static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1360 + {
1361 + struct qedi_ctx *qedi;
1362 +@@ -1342,7 +1342,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1363 + qedi_ep->dst_addr, qedi_ep->dst_port);
1364 + }
1365 +
1366 +- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
1367 + queue_work(qedi->offload_thread, &qedi_ep->offload_work);
1368 +
1369 + ret = 0;
1370 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
1371 +index 1e63fd4821f96..8aa89d93db118 100644
1372 +--- a/drivers/spi/atmel-quadspi.c
1373 ++++ b/drivers/spi/atmel-quadspi.c
1374 +@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
1375 + static bool atmel_qspi_supports_op(struct spi_mem *mem,
1376 + const struct spi_mem_op *op)
1377 + {
1378 ++ if (!spi_mem_default_supports_op(mem, op))
1379 ++ return false;
1380 ++
1381 + if (atmel_qspi_find_mode(op) < 0)
1382 + return false;
1383 +
1384 +diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
1385 +index 288f6c2bbd573..106e3cacba4c3 100644
1386 +--- a/drivers/spi/spi-mtk-nor.c
1387 ++++ b/drivers/spi/spi-mtk-nor.c
1388 +@@ -895,7 +895,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
1389 +
1390 + static int __maybe_unused mtk_nor_resume(struct device *dev)
1391 + {
1392 +- return pm_runtime_force_resume(dev);
1393 ++ struct spi_controller *ctlr = dev_get_drvdata(dev);
1394 ++ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
1395 ++ int ret;
1396 ++
1397 ++ ret = pm_runtime_force_resume(dev);
1398 ++ if (ret)
1399 ++ return ret;
1400 ++
1401 ++ mtk_nor_init(sp);
1402 ++
1403 ++ return 0;
1404 + }
1405 +
1406 + static const struct dev_pm_ops mtk_nor_pm_ops = {
1407 +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
1408 +index e1fe03ceb7f13..e6d4a3ee6cda5 100644
1409 +--- a/drivers/staging/android/ion/ion.c
1410 ++++ b/drivers/staging/android/ion/ion.c
1411 +@@ -114,6 +114,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
1412 + void *vaddr;
1413 +
1414 + if (buffer->kmap_cnt) {
1415 ++ if (buffer->kmap_cnt == INT_MAX)
1416 ++ return ERR_PTR(-EOVERFLOW);
1417 ++
1418 + buffer->kmap_cnt++;
1419 + return buffer->vaddr;
1420 + }
1421 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1422 +index aa5a4d759ca23..370188b2a55d2 100644
1423 +--- a/fs/cifs/cifsfs.c
1424 ++++ b/fs/cifs/cifsfs.c
1425 +@@ -898,7 +898,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1426 + ssize_t rc;
1427 + struct inode *inode = file_inode(iocb->ki_filp);
1428 +
1429 +- if (iocb->ki_filp->f_flags & O_DIRECT)
1430 ++ if (iocb->ki_flags & IOCB_DIRECT)
1431 + return cifs_user_readv(iocb, iter);
1432 +
1433 + rc = cifs_revalidate_mapping(inode);
1434 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1435 +index 455eb349c76f8..8329961546b58 100644
1436 +--- a/fs/ext4/ext4.h
1437 ++++ b/fs/ext4/ext4.h
1438 +@@ -2159,6 +2159,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
1439 + * Structure of a directory entry
1440 + */
1441 + #define EXT4_NAME_LEN 255
1442 ++/*
1443 ++ * Base length of the ext4 directory entry excluding the name length
1444 ++ */
1445 ++#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
1446 +
1447 + struct ext4_dir_entry {
1448 + __le32 inode; /* Inode number */
1449 +@@ -2870,7 +2874,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
1450 + extern int ext4_can_truncate(struct inode *inode);
1451 + extern int ext4_truncate(struct inode *);
1452 + extern int ext4_break_layouts(struct inode *);
1453 +-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
1454 ++extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
1455 + extern void ext4_set_inode_flags(struct inode *, bool init);
1456 + extern int ext4_alloc_da_blocks(struct inode *inode);
1457 + extern void ext4_set_aops(struct inode *inode);
1458 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1459 +index 0fda3051760d1..80b876ab6b1fe 100644
1460 +--- a/fs/ext4/extents.c
1461 ++++ b/fs/ext4/extents.c
1462 +@@ -4498,9 +4498,9 @@ retry:
1463 + return ret > 0 ? ret2 : ret;
1464 + }
1465 +
1466 +-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
1467 ++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
1468 +
1469 +-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
1470 ++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
1471 +
1472 + static long ext4_zero_range(struct file *file, loff_t offset,
1473 + loff_t len, int mode)
1474 +@@ -4571,6 +4571,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
1475 + /* Wait all existing dio workers, newcomers will block on i_mutex */
1476 + inode_dio_wait(inode);
1477 +
1478 ++ ret = file_modified(file);
1479 ++ if (ret)
1480 ++ goto out_mutex;
1481 ++
1482 + /* Preallocate the range including the unaligned edges */
1483 + if (partial_begin || partial_end) {
1484 + ret = ext4_alloc_file_blocks(file,
1485 +@@ -4689,7 +4693,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1486 + ext4_fc_start_update(inode);
1487 +
1488 + if (mode & FALLOC_FL_PUNCH_HOLE) {
1489 +- ret = ext4_punch_hole(inode, offset, len);
1490 ++ ret = ext4_punch_hole(file, offset, len);
1491 + goto exit;
1492 + }
1493 +
1494 +@@ -4698,12 +4702,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1495 + goto exit;
1496 +
1497 + if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1498 +- ret = ext4_collapse_range(inode, offset, len);
1499 ++ ret = ext4_collapse_range(file, offset, len);
1500 + goto exit;
1501 + }
1502 +
1503 + if (mode & FALLOC_FL_INSERT_RANGE) {
1504 +- ret = ext4_insert_range(inode, offset, len);
1505 ++ ret = ext4_insert_range(file, offset, len);
1506 + goto exit;
1507 + }
1508 +
1509 +@@ -4739,6 +4743,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1510 + /* Wait all existing dio workers, newcomers will block on i_mutex */
1511 + inode_dio_wait(inode);
1512 +
1513 ++ ret = file_modified(file);
1514 ++ if (ret)
1515 ++ goto out;
1516 ++
1517 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
1518 + if (ret)
1519 + goto out;
1520 +@@ -5241,8 +5249,9 @@ out:
1521 + * This implements the fallocate's collapse range functionality for ext4
1522 + * Returns: 0 and non-zero on error.
1523 + */
1524 +-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1525 ++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
1526 + {
1527 ++ struct inode *inode = file_inode(file);
1528 + struct super_block *sb = inode->i_sb;
1529 + ext4_lblk_t punch_start, punch_stop;
1530 + handle_t *handle;
1531 +@@ -5293,6 +5302,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1532 + /* Wait for existing dio to complete */
1533 + inode_dio_wait(inode);
1534 +
1535 ++ ret = file_modified(file);
1536 ++ if (ret)
1537 ++ goto out_mutex;
1538 ++
1539 + /*
1540 + * Prevent page faults from reinstantiating pages we have released from
1541 + * page cache.
1542 +@@ -5387,8 +5400,9 @@ out_mutex:
1543 + * by len bytes.
1544 + * Returns 0 on success, error otherwise.
1545 + */
1546 +-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
1547 ++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
1548 + {
1549 ++ struct inode *inode = file_inode(file);
1550 + struct super_block *sb = inode->i_sb;
1551 + handle_t *handle;
1552 + struct ext4_ext_path *path;
1553 +@@ -5444,6 +5458,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
1554 + /* Wait for existing dio to complete */
1555 + inode_dio_wait(inode);
1556 +
1557 ++ ret = file_modified(file);
1558 ++ if (ret)
1559 ++ goto out_mutex;
1560 ++
1561 + /*
1562 + * Prevent page faults from reinstantiating pages we have released from
1563 + * page cache.
1564 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1565 +index 96546df39bcf9..31ab73c4b07e7 100644
1566 +--- a/fs/ext4/inode.c
1567 ++++ b/fs/ext4/inode.c
1568 +@@ -4028,12 +4028,14 @@ int ext4_break_layouts(struct inode *inode)
1569 + * Returns: 0 on success or negative on failure
1570 + */
1571 +
1572 +-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1573 ++int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
1574 + {
1575 ++ struct inode *inode = file_inode(file);
1576 + struct super_block *sb = inode->i_sb;
1577 + ext4_lblk_t first_block, stop_block;
1578 + struct address_space *mapping = inode->i_mapping;
1579 +- loff_t first_block_offset, last_block_offset;
1580 ++ loff_t first_block_offset, last_block_offset, max_length;
1581 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1582 + handle_t *handle;
1583 + unsigned int credits;
1584 + int ret = 0, ret2 = 0;
1585 +@@ -4076,6 +4078,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1586 + offset;
1587 + }
1588 +
1589 ++ /*
1590 ++ * For punch hole the length + offset needs to be within one block
1591 ++ * before last range. Adjust the length if it goes beyond that limit.
1592 ++ */
1593 ++ max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
1594 ++ if (offset + length > max_length)
1595 ++ length = max_length - offset;
1596 ++
1597 + if (offset & (sb->s_blocksize - 1) ||
1598 + (offset + length) & (sb->s_blocksize - 1)) {
1599 + /*
1600 +@@ -4091,6 +4101,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1601 + /* Wait all existing dio workers, newcomers will block on i_mutex */
1602 + inode_dio_wait(inode);
1603 +
1604 ++ ret = file_modified(file);
1605 ++ if (ret)
1606 ++ goto out_mutex;
1607 ++
1608 + /*
1609 + * Prevent page faults from reinstantiating pages we have released from
1610 + * page cache.
1611 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1612 +index a622e186b7ee1..47ea35e98ffe9 100644
1613 +--- a/fs/ext4/namei.c
1614 ++++ b/fs/ext4/namei.c
1615 +@@ -1388,10 +1388,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1616 +
1617 + de = (struct ext4_dir_entry_2 *)search_buf;
1618 + dlimit = search_buf + buf_size;
1619 +- while ((char *) de < dlimit) {
1620 ++ while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
1621 + /* this code is executed quadratically often */
1622 + /* do minimal checking `by hand' */
1623 +- if ((char *) de + de->name_len <= dlimit &&
1624 ++ if (de->name + de->name_len <= dlimit &&
1625 + ext4_match(dir, fname, de)) {
1626 + /* found a match - just to be sure, do
1627 + * a full check */
1628 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
1629 +index defd2e10dfd10..4569075a7da0c 100644
1630 +--- a/fs/ext4/page-io.c
1631 ++++ b/fs/ext4/page-io.c
1632 +@@ -137,8 +137,10 @@ static void ext4_finish_bio(struct bio *bio)
1633 + continue;
1634 + }
1635 + clear_buffer_async_write(bh);
1636 +- if (bio->bi_status)
1637 ++ if (bio->bi_status) {
1638 ++ set_buffer_write_io_error(bh);
1639 + buffer_io_error(bh);
1640 ++ }
1641 + } while ((bh = bh->b_this_page) != head);
1642 + spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
1643 + if (!under_io) {
1644 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1645 +index 9e210bc85c817..5e6c034583176 100644
1646 +--- a/fs/ext4/super.c
1647 ++++ b/fs/ext4/super.c
1648 +@@ -3870,9 +3870,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
1649 + ext4_fsblk_t first_block, last_block, b;
1650 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1651 + int s, j, count = 0;
1652 ++ int has_super = ext4_bg_has_super(sb, grp);
1653 +
1654 + if (!ext4_has_feature_bigalloc(sb))
1655 +- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
1656 ++ return (has_super + ext4_bg_num_gdb(sb, grp) +
1657 ++ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
1658 + sbi->s_itb_per_group + 2);
1659 +
1660 + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
1661 +@@ -4931,9 +4933,18 @@ no_journal:
1662 + * Get the # of file system overhead blocks from the
1663 + * superblock if present.
1664 + */
1665 +- if (es->s_overhead_clusters)
1666 +- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
1667 +- else {
1668 ++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
1669 ++ /* ignore the precalculated value if it is ridiculous */
1670 ++ if (sbi->s_overhead > ext4_blocks_count(es))
1671 ++ sbi->s_overhead = 0;
1672 ++ /*
1673 ++ * If the bigalloc feature is not enabled recalculating the
1674 ++ * overhead doesn't take long, so we might as well just redo
1675 ++ * it to make sure we are using the correct value.
1676 ++ */
1677 ++ if (!ext4_has_feature_bigalloc(sb))
1678 ++ sbi->s_overhead = 0;
1679 ++ if (sbi->s_overhead == 0) {
1680 + err = ext4_calculate_overhead(sb);
1681 + if (err)
1682 + goto failed_mount_wq;
1683 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1684 +index dc55b029afaa4..c5bde789a16db 100644
1685 +--- a/fs/gfs2/rgrp.c
1686 ++++ b/fs/gfs2/rgrp.c
1687 +@@ -906,15 +906,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1688 + rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
1689 + spin_lock_init(&rgd->rd_rsspin);
1690 +
1691 +- error = compute_bitstructs(rgd);
1692 +- if (error)
1693 +- goto fail;
1694 +-
1695 + error = gfs2_glock_get(sdp, rgd->rd_addr,
1696 + &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
1697 + if (error)
1698 + goto fail;
1699 +
1700 ++ error = compute_bitstructs(rgd);
1701 ++ if (error)
1702 ++ goto fail_glock;
1703 ++
1704 + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
1705 + rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
1706 + if (rgd->rd_data > sdp->sd_max_rg_data)
1707 +@@ -928,6 +928,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1708 + }
1709 +
1710 + error = 0; /* someone else read in the rgrp; free it and ignore it */
1711 ++fail_glock:
1712 + gfs2_glock_put(rgd->rd_gl);
1713 +
1714 + fail:
1715 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1716 +index 5fc9ccab907c3..a2f43f1a85f8d 100644
1717 +--- a/fs/hugetlbfs/inode.c
1718 ++++ b/fs/hugetlbfs/inode.c
1719 +@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
1720 + info.flags = 0;
1721 + info.length = len;
1722 + info.low_limit = current->mm->mmap_base;
1723 +- info.high_limit = TASK_SIZE;
1724 ++ info.high_limit = arch_get_mmap_end(addr);
1725 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
1726 + info.align_offset = 0;
1727 + return vm_unmapped_area(&info);
1728 +@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
1729 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1730 + info.length = len;
1731 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
1732 +- info.high_limit = current->mm->mmap_base;
1733 ++ info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
1734 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
1735 + info.align_offset = 0;
1736 + addr = vm_unmapped_area(&info);
1737 +@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
1738 + VM_BUG_ON(addr != -ENOMEM);
1739 + info.flags = 0;
1740 + info.low_limit = current->mm->mmap_base;
1741 +- info.high_limit = TASK_SIZE;
1742 ++ info.high_limit = arch_get_mmap_end(addr);
1743 + addr = vm_unmapped_area(&info);
1744 + }
1745 +
1746 +@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
1747 + struct mm_struct *mm = current->mm;
1748 + struct vm_area_struct *vma;
1749 + struct hstate *h = hstate_file(file);
1750 ++ const unsigned long mmap_end = arch_get_mmap_end(addr);
1751 +
1752 + if (len & ~huge_page_mask(h))
1753 + return -EINVAL;
1754 +@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
1755 + if (addr) {
1756 + addr = ALIGN(addr, huge_page_size(h));
1757 + vma = find_vma(mm, addr);
1758 +- if (TASK_SIZE - len >= addr &&
1759 ++ if (mmap_end - len >= addr &&
1760 + (!vma || addr + len <= vm_start_gap(vma)))
1761 + return addr;
1762 + }
1763 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
1764 +index b121d7d434c67..867362f45cf63 100644
1765 +--- a/fs/jbd2/commit.c
1766 ++++ b/fs/jbd2/commit.c
1767 +@@ -501,7 +501,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1768 + }
1769 + spin_unlock(&commit_transaction->t_handle_lock);
1770 + commit_transaction->t_state = T_SWITCH;
1771 +- write_unlock(&journal->j_state_lock);
1772 +
1773 + J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
1774 + journal->j_max_transaction_buffers);
1775 +@@ -521,6 +520,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1776 + * has reserved. This is consistent with the existing behaviour
1777 + * that multiple jbd2_journal_get_write_access() calls to the same
1778 + * buffer are perfectly permissible.
1779 ++ * We use journal->j_state_lock here to serialize processing of
1780 ++ * t_reserved_list with eviction of buffers from journal_unmap_buffer().
1781 + */
1782 + while (commit_transaction->t_reserved_list) {
1783 + jh = commit_transaction->t_reserved_list;
1784 +@@ -540,6 +541,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1785 + jbd2_journal_refile_buffer(journal, jh);
1786 + }
1787 +
1788 ++ write_unlock(&journal->j_state_lock);
1789 + /*
1790 + * Now try to drop any written-back buffers from the journal's
1791 + * checkpoint lists. We do this *before* commit because it potentially
1792 +diff --git a/fs/stat.c b/fs/stat.c
1793 +index 1196af4d1ea03..04550c0ba5407 100644
1794 +--- a/fs/stat.c
1795 ++++ b/fs/stat.c
1796 +@@ -306,9 +306,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
1797 + # define choose_32_64(a,b) b
1798 + #endif
1799 +
1800 +-#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
1801 +-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
1802 +-
1803 + #ifndef INIT_STRUCT_STAT_PADDING
1804 + # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
1805 + #endif
1806 +@@ -317,7 +314,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1807 + {
1808 + struct stat tmp;
1809 +
1810 +- if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
1811 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
1812 ++ return -EOVERFLOW;
1813 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
1814 + return -EOVERFLOW;
1815 + #if BITS_PER_LONG == 32
1816 + if (stat->size > MAX_NON_LFS)
1817 +@@ -325,7 +324,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1818 + #endif
1819 +
1820 + INIT_STRUCT_STAT_PADDING(tmp);
1821 +- tmp.st_dev = encode_dev(stat->dev);
1822 ++ tmp.st_dev = new_encode_dev(stat->dev);
1823 + tmp.st_ino = stat->ino;
1824 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
1825 + return -EOVERFLOW;
1826 +@@ -335,7 +334,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1827 + return -EOVERFLOW;
1828 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
1829 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
1830 +- tmp.st_rdev = encode_dev(stat->rdev);
1831 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
1832 + tmp.st_size = stat->size;
1833 + tmp.st_atime = stat->atime.tv_sec;
1834 + tmp.st_mtime = stat->mtime.tv_sec;
1835 +@@ -616,11 +615,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
1836 + {
1837 + struct compat_stat tmp;
1838 +
1839 +- if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
1840 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
1841 ++ return -EOVERFLOW;
1842 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
1843 + return -EOVERFLOW;
1844 +
1845 + memset(&tmp, 0, sizeof(tmp));
1846 +- tmp.st_dev = old_encode_dev(stat->dev);
1847 ++ tmp.st_dev = new_encode_dev(stat->dev);
1848 + tmp.st_ino = stat->ino;
1849 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
1850 + return -EOVERFLOW;
1851 +@@ -630,7 +631,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
1852 + return -EOVERFLOW;
1853 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
1854 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
1855 +- tmp.st_rdev = old_encode_dev(stat->rdev);
1856 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
1857 + if ((u64) stat->size > MAX_NON_LFS)
1858 + return -EOVERFLOW;
1859 + tmp.st_size = stat->size;
1860 +diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
1861 +index 2e5debc0373c5..99209f50915f4 100644
1862 +--- a/include/linux/etherdevice.h
1863 ++++ b/include/linux/etherdevice.h
1864 +@@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
1865 + #endif
1866 + }
1867 +
1868 +-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
1869 ++static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
1870 + {
1871 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1872 + #ifdef __BIG_ENDIAN
1873 +@@ -352,8 +352,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
1874 + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
1875 + */
1876 +
1877 +-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
1878 +- const u8 addr2[6+2])
1879 ++static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
1880 + {
1881 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1882 + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
1883 +diff --git a/include/linux/sched.h b/include/linux/sched.h
1884 +index f996d1f343bb7..4bca80c9931fb 100644
1885 +--- a/include/linux/sched.h
1886 ++++ b/include/linux/sched.h
1887 +@@ -1325,6 +1325,7 @@ struct task_struct {
1888 + int pagefault_disabled;
1889 + #ifdef CONFIG_MMU
1890 + struct task_struct *oom_reaper_list;
1891 ++ struct timer_list oom_reaper_timer;
1892 + #endif
1893 + #ifdef CONFIG_VMAP_STACK
1894 + struct vm_struct *stack_vm_area;
1895 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
1896 +index dc1f4dcd9a825..e3e5e149b00e6 100644
1897 +--- a/include/linux/sched/mm.h
1898 ++++ b/include/linux/sched/mm.h
1899 +@@ -106,6 +106,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
1900 + #endif /* CONFIG_MEMCG */
1901 +
1902 + #ifdef CONFIG_MMU
1903 ++#ifndef arch_get_mmap_end
1904 ++#define arch_get_mmap_end(addr) (TASK_SIZE)
1905 ++#endif
1906 ++
1907 ++#ifndef arch_get_mmap_base
1908 ++#define arch_get_mmap_base(addr, base) (base)
1909 ++#endif
1910 ++
1911 + extern void arch_pick_mmap_layout(struct mm_struct *mm,
1912 + struct rlimit *rlim_stack);
1913 + extern unsigned long
1914 +diff --git a/include/net/esp.h b/include/net/esp.h
1915 +index 90cd02ff77ef6..9c5637d41d951 100644
1916 +--- a/include/net/esp.h
1917 ++++ b/include/net/esp.h
1918 +@@ -4,8 +4,6 @@
1919 +
1920 + #include <linux/skbuff.h>
1921 +
1922 +-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
1923 +-
1924 + struct ip_esp_hdr;
1925 +
1926 + static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
1927 +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
1928 +index 1c0fbe3abf247..f179996c61844 100644
1929 +--- a/include/net/netns/ipv6.h
1930 ++++ b/include/net/netns/ipv6.h
1931 +@@ -78,8 +78,8 @@ struct netns_ipv6 {
1932 + struct dst_ops ip6_dst_ops;
1933 + rwlock_t fib6_walker_lock;
1934 + spinlock_t fib6_gc_lock;
1935 +- unsigned int ip6_rt_gc_expire;
1936 +- unsigned long ip6_rt_last_gc;
1937 ++ atomic_t ip6_rt_gc_expire;
1938 ++ unsigned long ip6_rt_last_gc;
1939 + unsigned char flowlabel_has_excl;
1940 + #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1941 + bool fib6_has_custom_rules;
1942 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1943 +index 79d8b27cf2fc6..9aa6563587d88 100644
1944 +--- a/kernel/events/core.c
1945 ++++ b/kernel/events/core.c
1946 +@@ -6221,7 +6221,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1947 + again:
1948 + mutex_lock(&event->mmap_mutex);
1949 + if (event->rb) {
1950 +- if (event->rb->nr_pages != nr_pages) {
1951 ++ if (data_page_nr(event->rb) != nr_pages) {
1952 + ret = -EINVAL;
1953 + goto unlock;
1954 + }
1955 +diff --git a/kernel/events/internal.h b/kernel/events/internal.h
1956 +index 228801e207886..aa23ffdaf819f 100644
1957 +--- a/kernel/events/internal.h
1958 ++++ b/kernel/events/internal.h
1959 +@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
1960 + }
1961 + #endif
1962 +
1963 ++static inline int data_page_nr(struct perf_buffer *rb)
1964 ++{
1965 ++ return rb->nr_pages << page_order(rb);
1966 ++}
1967 ++
1968 + static inline unsigned long perf_data_size(struct perf_buffer *rb)
1969 + {
1970 + return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
1971 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
1972 +index ef91ae75ca56f..4032cd4750001 100644
1973 +--- a/kernel/events/ring_buffer.c
1974 ++++ b/kernel/events/ring_buffer.c
1975 +@@ -856,11 +856,6 @@ void rb_free(struct perf_buffer *rb)
1976 + }
1977 +
1978 + #else
1979 +-static int data_page_nr(struct perf_buffer *rb)
1980 +-{
1981 +- return rb->nr_pages << page_order(rb);
1982 +-}
1983 +-
1984 + static struct page *
1985 + __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
1986 + {
1987 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1988 +index acd9833b8ec22..1a306ef51bbe5 100644
1989 +--- a/kernel/sched/fair.c
1990 ++++ b/kernel/sched/fair.c
1991 +@@ -3748,11 +3748,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
1992 +
1993 + se->avg.runnable_sum = se->avg.runnable_avg * divider;
1994 +
1995 +- se->avg.load_sum = divider;
1996 +- if (se_weight(se)) {
1997 +- se->avg.load_sum =
1998 +- div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
1999 +- }
2000 ++ se->avg.load_sum = se->avg.load_avg * divider;
2001 ++ if (se_weight(se) < se->avg.load_sum)
2002 ++ se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
2003 ++ else
2004 ++ se->avg.load_sum = 1;
2005 +
2006 + enqueue_load_avg(cfs_rq, se);
2007 + cfs_rq->avg.util_avg += se->avg.util_avg;
2008 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
2009 +index d0309de2f84fe..4bc90965abb25 100644
2010 +--- a/kernel/trace/trace_events_trigger.c
2011 ++++ b/kernel/trace/trace_events_trigger.c
2012 +@@ -1219,7 +1219,14 @@ static void
2013 + stacktrace_trigger(struct event_trigger_data *data, void *rec,
2014 + struct ring_buffer_event *event)
2015 + {
2016 +- trace_dump_stack(STACK_SKIP);
2017 ++ struct trace_event_file *file = data->private_data;
2018 ++ unsigned long flags;
2019 ++
2020 ++ if (file) {
2021 ++ local_save_flags(flags);
2022 ++ __trace_stack(file->tr, flags, STACK_SKIP, preempt_count());
2023 ++ } else
2024 ++ trace_dump_stack(STACK_SKIP);
2025 + }
2026 +
2027 + static void
2028 +diff --git a/mm/mmap.c b/mm/mmap.c
2029 +index 46c160d4eac14..102f73ed4b1b9 100644
2030 +--- a/mm/mmap.c
2031 ++++ b/mm/mmap.c
2032 +@@ -2140,14 +2140,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
2033 + return addr;
2034 + }
2035 +
2036 +-#ifndef arch_get_mmap_end
2037 +-#define arch_get_mmap_end(addr) (TASK_SIZE)
2038 +-#endif
2039 +-
2040 +-#ifndef arch_get_mmap_base
2041 +-#define arch_get_mmap_base(addr, base) (base)
2042 +-#endif
2043 +-
2044 + /* Get an address range which is currently unmapped.
2045 + * For shmat() with addr=0.
2046 + *
2047 +diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
2048 +index 07f42a7a60657..9165ca619c8cf 100644
2049 +--- a/mm/mmu_notifier.c
2050 ++++ b/mm/mmu_notifier.c
2051 +@@ -1043,6 +1043,18 @@ int mmu_interval_notifier_insert_locked(
2052 + }
2053 + EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
2054 +
2055 ++static bool
2056 ++mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
2057 ++ unsigned long seq)
2058 ++{
2059 ++ bool ret;
2060 ++
2061 ++ spin_lock(&subscriptions->lock);
2062 ++ ret = subscriptions->invalidate_seq != seq;
2063 ++ spin_unlock(&subscriptions->lock);
2064 ++ return ret;
2065 ++}
2066 ++
2067 + /**
2068 + * mmu_interval_notifier_remove - Remove a interval notifier
2069 + * @interval_sub: Interval subscription to unregister
2070 +@@ -1090,7 +1102,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
2071 + lock_map_release(&__mmu_notifier_invalidate_range_start_map);
2072 + if (seq)
2073 + wait_event(subscriptions->wq,
2074 +- READ_ONCE(subscriptions->invalidate_seq) != seq);
2075 ++ mmu_interval_seq_released(subscriptions, seq));
2076 +
2077 + /* pairs with mmgrab in mmu_interval_notifier_insert() */
2078 + mmdrop(mm);
2079 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
2080 +index 419a814f467e0..3d7c557fb70c9 100644
2081 +--- a/mm/oom_kill.c
2082 ++++ b/mm/oom_kill.c
2083 +@@ -633,7 +633,7 @@ done:
2084 + */
2085 + set_bit(MMF_OOM_SKIP, &mm->flags);
2086 +
2087 +- /* Drop a reference taken by wake_oom_reaper */
2088 ++ /* Drop a reference taken by queue_oom_reaper */
2089 + put_task_struct(tsk);
2090 + }
2091 +
2092 +@@ -643,12 +643,12 @@ static int oom_reaper(void *unused)
2093 + struct task_struct *tsk = NULL;
2094 +
2095 + wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
2096 +- spin_lock(&oom_reaper_lock);
2097 ++ spin_lock_irq(&oom_reaper_lock);
2098 + if (oom_reaper_list != NULL) {
2099 + tsk = oom_reaper_list;
2100 + oom_reaper_list = tsk->oom_reaper_list;
2101 + }
2102 +- spin_unlock(&oom_reaper_lock);
2103 ++ spin_unlock_irq(&oom_reaper_lock);
2104 +
2105 + if (tsk)
2106 + oom_reap_task(tsk);
2107 +@@ -657,22 +657,48 @@ static int oom_reaper(void *unused)
2108 + return 0;
2109 + }
2110 +
2111 +-static void wake_oom_reaper(struct task_struct *tsk)
2112 ++static void wake_oom_reaper(struct timer_list *timer)
2113 + {
2114 +- /* mm is already queued? */
2115 +- if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
2116 +- return;
2117 ++ struct task_struct *tsk = container_of(timer, struct task_struct,
2118 ++ oom_reaper_timer);
2119 ++ struct mm_struct *mm = tsk->signal->oom_mm;
2120 ++ unsigned long flags;
2121 +
2122 +- get_task_struct(tsk);
2123 ++ /* The victim managed to terminate on its own - see exit_mmap */
2124 ++ if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
2125 ++ put_task_struct(tsk);
2126 ++ return;
2127 ++ }
2128 +
2129 +- spin_lock(&oom_reaper_lock);
2130 ++ spin_lock_irqsave(&oom_reaper_lock, flags);
2131 + tsk->oom_reaper_list = oom_reaper_list;
2132 + oom_reaper_list = tsk;
2133 +- spin_unlock(&oom_reaper_lock);
2134 ++ spin_unlock_irqrestore(&oom_reaper_lock, flags);
2135 + trace_wake_reaper(tsk->pid);
2136 + wake_up(&oom_reaper_wait);
2137 + }
2138 +
2139 ++/*
2140 ++ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
2141 ++ * The timers timeout is arbitrary... the longer it is, the longer the worst
2142 ++ * case scenario for the OOM can take. If it is too small, the oom_reaper can
2143 ++ * get in the way and release resources needed by the process exit path.
2144 ++ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
2145 ++ * before the exit path is able to wake the futex waiters.
2146 ++ */
2147 ++#define OOM_REAPER_DELAY (2*HZ)
2148 ++static void queue_oom_reaper(struct task_struct *tsk)
2149 ++{
2150 ++ /* mm is already queued? */
2151 ++ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
2152 ++ return;
2153 ++
2154 ++ get_task_struct(tsk);
2155 ++ timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
2156 ++ tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
2157 ++ add_timer(&tsk->oom_reaper_timer);
2158 ++}
2159 ++
2160 + static int __init oom_init(void)
2161 + {
2162 + oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
2163 +@@ -680,7 +706,7 @@ static int __init oom_init(void)
2164 + }
2165 + subsys_initcall(oom_init)
2166 + #else
2167 +-static inline void wake_oom_reaper(struct task_struct *tsk)
2168 ++static inline void queue_oom_reaper(struct task_struct *tsk)
2169 + {
2170 + }
2171 + #endif /* CONFIG_MMU */
2172 +@@ -931,7 +957,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
2173 + rcu_read_unlock();
2174 +
2175 + if (can_oom_reap)
2176 +- wake_oom_reaper(victim);
2177 ++ queue_oom_reaper(victim);
2178 +
2179 + mmdrop(mm);
2180 + put_task_struct(victim);
2181 +@@ -967,7 +993,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
2182 + task_lock(victim);
2183 + if (task_will_free_mem(victim)) {
2184 + mark_oom_victim(victim);
2185 +- wake_oom_reaper(victim);
2186 ++ queue_oom_reaper(victim);
2187 + task_unlock(victim);
2188 + put_task_struct(victim);
2189 + return;
2190 +@@ -1065,7 +1091,7 @@ bool out_of_memory(struct oom_control *oc)
2191 + */
2192 + if (task_will_free_mem(current)) {
2193 + mark_oom_victim(current);
2194 +- wake_oom_reaper(current);
2195 ++ queue_oom_reaper(current);
2196 + return true;
2197 + }
2198 +
2199 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2200 +index f022e0024e8db..f3418edb136be 100644
2201 +--- a/mm/page_alloc.c
2202 ++++ b/mm/page_alloc.c
2203 +@@ -7678,7 +7678,7 @@ void __init mem_init_print_info(const char *str)
2204 + */
2205 + #define adj_init_size(start, end, size, pos, adj) \
2206 + do { \
2207 +- if (start <= pos && pos < end && size > adj) \
2208 ++ if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2209 + size -= adj; \
2210 + } while (0)
2211 +
2212 +diff --git a/net/can/isotp.c b/net/can/isotp.c
2213 +index 9a4a9c5a9f24c..c515bbd46c679 100644
2214 +--- a/net/can/isotp.c
2215 ++++ b/net/can/isotp.c
2216 +@@ -864,6 +864,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
2217 + struct canfd_frame *cf;
2218 + int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
2219 + int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
2220 ++ s64 hrtimer_sec = 0;
2221 + int off;
2222 + int err;
2223 +
2224 +@@ -962,7 +963,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
2225 + isotp_create_fframe(cf, so, ae);
2226 +
2227 + /* start timeout for FC */
2228 +- hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
2229 ++ hrtimer_sec = 1;
2230 ++ hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
2231 ++ HRTIMER_MODE_REL_SOFT);
2232 + }
2233 +
2234 + /* send the first or only CAN frame */
2235 +@@ -975,6 +978,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
2236 + if (err) {
2237 + pr_notice_once("can-isotp: %s: can_send_ret %d\n",
2238 + __func__, err);
2239 ++
2240 ++ /* no transmission -> no timeout monitoring */
2241 ++ if (hrtimer_sec)
2242 ++ hrtimer_cancel(&so->txtimer);
2243 ++
2244 + goto err_out_drop;
2245 + }
2246 +
2247 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
2248 +index 9aae82145bc16..20d7381378418 100644
2249 +--- a/net/ipv4/esp4.c
2250 ++++ b/net/ipv4/esp4.c
2251 +@@ -448,7 +448,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
2252 + struct page *page;
2253 + struct sk_buff *trailer;
2254 + int tailen = esp->tailen;
2255 +- unsigned int allocsz;
2256 +
2257 + /* this is non-NULL only with TCP/UDP Encapsulation */
2258 + if (x->encap) {
2259 +@@ -458,8 +457,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
2260 + return err;
2261 + }
2262 +
2263 +- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
2264 +- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
2265 ++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
2266 ++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
2267 + goto cow;
2268 +
2269 + if (!skb_cloned(skb)) {
2270 +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
2271 +index 20c7bef6829e1..cb28f8928f9ee 100644
2272 +--- a/net/ipv6/esp6.c
2273 ++++ b/net/ipv6/esp6.c
2274 +@@ -483,7 +483,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
2275 + struct page *page;
2276 + struct sk_buff *trailer;
2277 + int tailen = esp->tailen;
2278 +- unsigned int allocsz;
2279 +
2280 + if (x->encap) {
2281 + int err = esp6_output_encap(x, skb, esp);
2282 +@@ -492,8 +491,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
2283 + return err;
2284 + }
2285 +
2286 +- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
2287 +- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
2288 ++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
2289 ++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
2290 + goto cow;
2291 +
2292 + if (!skb_cloned(skb)) {
2293 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2294 +index 9a0263f252323..1f6c752f13b40 100644
2295 +--- a/net/ipv6/ip6_gre.c
2296 ++++ b/net/ipv6/ip6_gre.c
2297 +@@ -733,9 +733,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
2298 + else
2299 + fl6->daddr = tunnel->parms.raddr;
2300 +
2301 +- if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
2302 +- return -ENOMEM;
2303 +-
2304 + /* Push GRE header. */
2305 + protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
2306 +
2307 +@@ -743,6 +740,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
2308 + struct ip_tunnel_info *tun_info;
2309 + const struct ip_tunnel_key *key;
2310 + __be16 flags;
2311 ++ int tun_hlen;
2312 +
2313 + tun_info = skb_tunnel_info_txcheck(skb);
2314 + if (IS_ERR(tun_info) ||
2315 +@@ -760,9 +758,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
2316 + dsfield = key->tos;
2317 + flags = key->tun_flags &
2318 + (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
2319 +- tunnel->tun_hlen = gre_calc_hlen(flags);
2320 ++ tun_hlen = gre_calc_hlen(flags);
2321 +
2322 +- gre_build_header(skb, tunnel->tun_hlen,
2323 ++ if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
2324 ++ return -ENOMEM;
2325 ++
2326 ++ gre_build_header(skb, tun_hlen,
2327 + flags, protocol,
2328 + tunnel_id_to_key32(tun_info->key.tun_id),
2329 + (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
2330 +@@ -772,6 +773,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
2331 + if (tunnel->parms.o_flags & TUNNEL_SEQ)
2332 + tunnel->o_seqno++;
2333 +
2334 ++ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
2335 ++ return -ENOMEM;
2336 ++
2337 + gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
2338 + protocol, tunnel->parms.o_key,
2339 + htonl(tunnel->o_seqno));
2340 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2341 +index 776b1b58c5dc6..6ace9f0ac22f3 100644
2342 +--- a/net/ipv6/route.c
2343 ++++ b/net/ipv6/route.c
2344 +@@ -3192,6 +3192,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
2345 + int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2346 + int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2347 + unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2348 ++ unsigned int val;
2349 + int entries;
2350 +
2351 + entries = dst_entries_get_fast(ops);
2352 +@@ -3202,13 +3203,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
2353 + entries <= rt_max_size)
2354 + goto out;
2355 +
2356 +- net->ipv6.ip6_rt_gc_expire++;
2357 +- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2358 ++ fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
2359 + entries = dst_entries_get_slow(ops);
2360 + if (entries < ops->gc_thresh)
2361 +- net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2362 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
2363 + out:
2364 +- net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2365 ++ val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
2366 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
2367 + return entries > rt_max_size;
2368 + }
2369 +
2370 +@@ -6363,7 +6364,7 @@ static int __net_init ip6_route_net_init(struct net *net)
2371 + net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2372 + net->ipv6.sysctl.skip_notify_on_dev_down = 0;
2373 +
2374 +- net->ipv6.ip6_rt_gc_expire = 30*HZ;
2375 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
2376 +
2377 + ret = 0;
2378 + out:
2379 +diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
2380 +index 864326f150e2f..f2c3a61ad134b 100644
2381 +--- a/net/l3mdev/l3mdev.c
2382 ++++ b/net/l3mdev/l3mdev.c
2383 +@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
2384 +
2385 + dev = dev_get_by_index_rcu(net, ifindex);
2386 + while (dev && !netif_is_l3_master(dev))
2387 +- dev = netdev_master_upper_dev_get(dev);
2388 ++ dev = netdev_master_upper_dev_get_rcu(dev);
2389 +
2390 + return dev ? dev->ifindex : 0;
2391 + }
2392 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2393 +index f37916156ca52..cbfb601c4ee98 100644
2394 +--- a/net/netlink/af_netlink.c
2395 ++++ b/net/netlink/af_netlink.c
2396 +@@ -2276,6 +2276,13 @@ static int netlink_dump(struct sock *sk)
2397 + * single netdev. The outcome is MSG_TRUNC error.
2398 + */
2399 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2400 ++
2401 ++ /* Make sure malicious BPF programs can not read unitialized memory
2402 ++ * from skb->head -> skb->data
2403 ++ */
2404 ++ skb_reset_network_header(skb);
2405 ++ skb_reset_mac_header(skb);
2406 ++
2407 + netlink_skb_set_owner_r(skb, sk);
2408 +
2409 + if (nlk->dump_done_errno > 0) {
2410 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
2411 +index 98a7e6f64ab0b..293a798e89f42 100644
2412 +--- a/net/openvswitch/flow_netlink.c
2413 ++++ b/net/openvswitch/flow_netlink.c
2414 +@@ -2436,7 +2436,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2415 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
2416 +
2417 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2418 +- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2419 ++ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
2420 + OVS_NLERR(log, "Flow action size exceeds max %u",
2421 + MAX_ACTIONS_BUFSIZE);
2422 + return ERR_PTR(-EMSGSIZE);
2423 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2424 +index d0c95d7dd292d..5ee600d108a0a 100644
2425 +--- a/net/packet/af_packet.c
2426 ++++ b/net/packet/af_packet.c
2427 +@@ -2817,8 +2817,9 @@ tpacket_error:
2428 +
2429 + status = TP_STATUS_SEND_REQUEST;
2430 + err = po->xmit(skb);
2431 +- if (unlikely(err > 0)) {
2432 +- err = net_xmit_errno(err);
2433 ++ if (unlikely(err != 0)) {
2434 ++ if (err > 0)
2435 ++ err = net_xmit_errno(err);
2436 + if (err && __packet_get_status(po, ph) ==
2437 + TP_STATUS_AVAILABLE) {
2438 + /* skb was destructed already */
2439 +@@ -3019,8 +3020,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2440 + skb->no_fcs = 1;
2441 +
2442 + err = po->xmit(skb);
2443 +- if (err > 0 && (err = net_xmit_errno(err)) != 0)
2444 +- goto out_unlock;
2445 ++ if (unlikely(err != 0)) {
2446 ++ if (err > 0)
2447 ++ err = net_xmit_errno(err);
2448 ++ if (err)
2449 ++ goto out_unlock;
2450 ++ }
2451 +
2452 + dev_put(dev);
2453 +
2454 +diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
2455 +index f15d6942da453..cc7e30733feb0 100644
2456 +--- a/net/rxrpc/net_ns.c
2457 ++++ b/net/rxrpc/net_ns.c
2458 +@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
2459 + struct rxrpc_net *rxnet = rxrpc_net(net);
2460 +
2461 + rxnet->live = false;
2462 ++ del_timer_sync(&rxnet->peer_keepalive_timer);
2463 + cancel_work_sync(&rxnet->peer_keepalive_work);
2464 ++ /* Remove the timer again as the worker may have restarted it. */
2465 + del_timer_sync(&rxnet->peer_keepalive_timer);
2466 + rxrpc_destroy_all_calls(rxnet);
2467 + rxrpc_destroy_all_connections(rxnet);
2468 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
2469 +index 54209a18d7fec..da042bc8b239d 100644
2470 +--- a/net/sched/cls_u32.c
2471 ++++ b/net/sched/cls_u32.c
2472 +@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
2473 + return 0;
2474 + }
2475 +
2476 +-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
2477 ++static void __u32_destroy_key(struct tc_u_knode *n)
2478 + {
2479 + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
2480 +
2481 + tcf_exts_destroy(&n->exts);
2482 +- tcf_exts_put_net(&n->exts);
2483 + if (ht && --ht->refcnt == 0)
2484 + kfree(ht);
2485 ++ kfree(n);
2486 ++}
2487 ++
2488 ++static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
2489 ++{
2490 ++ tcf_exts_put_net(&n->exts);
2491 + #ifdef CONFIG_CLS_U32_PERF
2492 + if (free_pf)
2493 + free_percpu(n->pf);
2494 +@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
2495 + if (free_pf)
2496 + free_percpu(n->pcpu_success);
2497 + #endif
2498 +- kfree(n);
2499 +- return 0;
2500 ++ __u32_destroy_key(n);
2501 + }
2502 +
2503 + /* u32_delete_key_rcu should be called when free'ing a copied
2504 +@@ -810,10 +814,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
2505 + new->flags = n->flags;
2506 + RCU_INIT_POINTER(new->ht_down, ht);
2507 +
2508 +- /* bump reference count as long as we hold pointer to structure */
2509 +- if (ht)
2510 +- ht->refcnt++;
2511 +-
2512 + #ifdef CONFIG_CLS_U32_PERF
2513 + /* Statistics may be incremented by readers during update
2514 + * so we must keep them in tact. When the node is later destroyed
2515 +@@ -835,6 +835,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
2516 + return NULL;
2517 + }
2518 +
2519 ++ /* bump reference count as long as we hold pointer to structure */
2520 ++ if (ht)
2521 ++ ht->refcnt++;
2522 ++
2523 + return new;
2524 + }
2525 +
2526 +@@ -898,13 +902,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
2527 + tca[TCA_RATE], ovr, extack);
2528 +
2529 + if (err) {
2530 +- u32_destroy_key(new, false);
2531 ++ __u32_destroy_key(new);
2532 + return err;
2533 + }
2534 +
2535 + err = u32_replace_hw_knode(tp, new, flags, extack);
2536 + if (err) {
2537 +- u32_destroy_key(new, false);
2538 ++ __u32_destroy_key(new);
2539 + return err;
2540 + }
2541 +
2542 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
2543 +index 4f16d406ad8ea..1b98f3241150b 100644
2544 +--- a/net/smc/af_smc.c
2545 ++++ b/net/smc/af_smc.c
2546 +@@ -2144,8 +2144,10 @@ static int smc_shutdown(struct socket *sock, int how)
2547 + if (smc->use_fallback) {
2548 + rc = kernel_sock_shutdown(smc->clcsock, how);
2549 + sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2550 +- if (sk->sk_shutdown == SHUTDOWN_MASK)
2551 ++ if (sk->sk_shutdown == SHUTDOWN_MASK) {
2552 + sk->sk_state = SMC_CLOSED;
2553 ++ sock_put(sk);
2554 ++ }
2555 + goto out;
2556 + }
2557 + switch (how) {
2558 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2559 +index 11d653190e6ea..b5168959fcf63 100644
2560 +--- a/sound/pci/hda/patch_realtek.c
2561 ++++ b/sound/pci/hda/patch_realtek.c
2562 +@@ -8897,6 +8897,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2563 + SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
2564 + SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
2565 + SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
2566 ++ SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
2567 + SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
2568 + SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
2569 + SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
2570 +diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
2571 +index 8a55d59a6c2aa..d243de5f23dc1 100644
2572 +--- a/sound/soc/atmel/sam9g20_wm8731.c
2573 ++++ b/sound/soc/atmel/sam9g20_wm8731.c
2574 +@@ -46,35 +46,6 @@
2575 + */
2576 + #undef ENABLE_MIC_INPUT
2577 +
2578 +-static struct clk *mclk;
2579 +-
2580 +-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
2581 +- struct snd_soc_dapm_context *dapm,
2582 +- enum snd_soc_bias_level level)
2583 +-{
2584 +- static int mclk_on;
2585 +- int ret = 0;
2586 +-
2587 +- switch (level) {
2588 +- case SND_SOC_BIAS_ON:
2589 +- case SND_SOC_BIAS_PREPARE:
2590 +- if (!mclk_on)
2591 +- ret = clk_enable(mclk);
2592 +- if (ret == 0)
2593 +- mclk_on = 1;
2594 +- break;
2595 +-
2596 +- case SND_SOC_BIAS_OFF:
2597 +- case SND_SOC_BIAS_STANDBY:
2598 +- if (mclk_on)
2599 +- clk_disable(mclk);
2600 +- mclk_on = 0;
2601 +- break;
2602 +- }
2603 +-
2604 +- return ret;
2605 +-}
2606 +-
2607 + static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
2608 + SND_SOC_DAPM_MIC("Int Mic", NULL),
2609 + SND_SOC_DAPM_SPK("Ext Spk", NULL),
2610 +@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
2611 + .owner = THIS_MODULE,
2612 + .dai_link = &at91sam9g20ek_dai,
2613 + .num_links = 1,
2614 +- .set_bias_level = at91sam9g20ek_set_bias_level,
2615 +
2616 + .dapm_widgets = at91sam9g20ek_dapm_widgets,
2617 + .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
2618 +@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
2619 + {
2620 + struct device_node *np = pdev->dev.of_node;
2621 + struct device_node *codec_np, *cpu_np;
2622 +- struct clk *pllb;
2623 + struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
2624 + int ret;
2625 +
2626 +@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
2627 + return -EINVAL;
2628 + }
2629 +
2630 +- /*
2631 +- * Codec MCLK is supplied by PCK0 - set it up.
2632 +- */
2633 +- mclk = clk_get(NULL, "pck0");
2634 +- if (IS_ERR(mclk)) {
2635 +- dev_err(&pdev->dev, "Failed to get MCLK\n");
2636 +- ret = PTR_ERR(mclk);
2637 +- goto err;
2638 +- }
2639 +-
2640 +- pllb = clk_get(NULL, "pllb");
2641 +- if (IS_ERR(pllb)) {
2642 +- dev_err(&pdev->dev, "Failed to get PLLB\n");
2643 +- ret = PTR_ERR(pllb);
2644 +- goto err_mclk;
2645 +- }
2646 +- ret = clk_set_parent(mclk, pllb);
2647 +- clk_put(pllb);
2648 +- if (ret != 0) {
2649 +- dev_err(&pdev->dev, "Failed to set MCLK parent\n");
2650 +- goto err_mclk;
2651 +- }
2652 +-
2653 +- clk_set_rate(mclk, MCLK_RATE);
2654 +-
2655 + card->dev = &pdev->dev;
2656 +
2657 + /* Parse device node info */
2658 +@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
2659 +
2660 + return ret;
2661 +
2662 +-err_mclk:
2663 +- clk_put(mclk);
2664 +- mclk = NULL;
2665 + err:
2666 + atmel_ssc_put_audio(0);
2667 + return ret;
2668 +@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
2669 + {
2670 + struct snd_soc_card *card = platform_get_drvdata(pdev);
2671 +
2672 +- clk_disable(mclk);
2673 +- mclk = NULL;
2674 + snd_soc_unregister_card(card);
2675 + atmel_ssc_put_audio(0);
2676 +
2677 +diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
2678 +index 9ad7fc0baf072..20a07c92b2fc2 100644
2679 +--- a/sound/soc/codecs/msm8916-wcd-digital.c
2680 ++++ b/sound/soc/codecs/msm8916-wcd-digital.c
2681 +@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
2682 +
2683 + dev_set_drvdata(dev, priv);
2684 +
2685 +- return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
2686 ++ ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
2687 + msm8916_wcd_digital_dai,
2688 + ARRAY_SIZE(msm8916_wcd_digital_dai));
2689 ++ if (ret)
2690 ++ goto err_mclk;
2691 ++
2692 ++ return 0;
2693 ++
2694 ++err_mclk:
2695 ++ clk_disable_unprepare(priv->mclk);
2696 + err_clk:
2697 + clk_disable_unprepare(priv->ahbclk);
2698 + return ret;
2699 +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
2700 +index 8540ac230d0ed..fd704df9b1758 100644
2701 +--- a/sound/soc/codecs/wcd934x.c
2702 ++++ b/sound/soc/codecs/wcd934x.c
2703 +@@ -1188,29 +1188,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
2704 + if (sido_src == wcd->sido_input_src)
2705 + return 0;
2706 +
2707 +- if (sido_src == SIDO_SOURCE_INTERNAL) {
2708 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
2709 +- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
2710 +- usleep_range(100, 110);
2711 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
2712 +- WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
2713 +- usleep_range(100, 110);
2714 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
2715 +- WCD934X_ANA_RCO_BG_EN_MASK, 0);
2716 +- usleep_range(100, 110);
2717 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
2718 +- WCD934X_ANA_BUCK_PRE_EN1_MASK,
2719 +- WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
2720 +- usleep_range(100, 110);
2721 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
2722 +- WCD934X_ANA_BUCK_PRE_EN2_MASK,
2723 +- WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
2724 +- usleep_range(100, 110);
2725 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
2726 +- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
2727 +- WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
2728 +- usleep_range(100, 110);
2729 +- } else if (sido_src == SIDO_SOURCE_RCO_BG) {
2730 ++ if (sido_src == SIDO_SOURCE_RCO_BG) {
2731 + regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
2732 + WCD934X_ANA_RCO_BG_EN_MASK,
2733 + WCD934X_ANA_RCO_BG_ENABLE);
2734 +@@ -1296,8 +1274,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
2735 + regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
2736 + WCD934X_EXT_CLK_BUF_EN_MASK |
2737 + WCD934X_MCLK_EN_MASK, 0x0);
2738 +- wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
2739 +-
2740 + regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
2741 + WCD934X_ANA_BIAS_EN_MASK, 0);
2742 + regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
2743 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2744 +index 2924d89bf0daf..417732bdf2860 100644
2745 +--- a/sound/soc/soc-dapm.c
2746 ++++ b/sound/soc/soc-dapm.c
2747 +@@ -1683,8 +1683,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
2748 + switch (w->id) {
2749 + case snd_soc_dapm_pre:
2750 + if (!w->event)
2751 +- list_for_each_entry_safe_continue(w, n, list,
2752 +- power_list);
2753 ++ continue;
2754 +
2755 + if (event == SND_SOC_DAPM_STREAM_START)
2756 + ret = w->event(w,
2757 +@@ -1696,8 +1695,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
2758 +
2759 + case snd_soc_dapm_post:
2760 + if (!w->event)
2761 +- list_for_each_entry_safe_continue(w, n, list,
2762 +- power_list);
2763 ++ continue;
2764 +
2765 + if (event == SND_SOC_DAPM_STREAM_START)
2766 + ret = w->event(w,
2767 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
2768 +index fa91290ad89db..84676a8fb60dc 100644
2769 +--- a/sound/usb/midi.c
2770 ++++ b/sound/usb/midi.c
2771 +@@ -1210,6 +1210,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
2772 + } while (drain_urbs && timeout);
2773 + finish_wait(&ep->drain_wait, &wait);
2774 + }
2775 ++ port->active = 0;
2776 + spin_unlock_irq(&ep->buffer_lock);
2777 + }
2778 +
2779 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
2780 +index e54a98f465490..d8e31ee03b9d0 100644
2781 +--- a/sound/usb/usbaudio.h
2782 ++++ b/sound/usb/usbaudio.h
2783 +@@ -8,7 +8,7 @@
2784 + */
2785 +
2786 + /* handling of USB vendor/product ID pairs as 32-bit numbers */
2787 +-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
2788 ++#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
2789 + #define USB_ID_VENDOR(id) ((id) >> 16)
2790 + #define USB_ID_PRODUCT(id) ((u16)(id))
2791 +
2792 +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
2793 +index 17465d454a0e3..f76b1a9d5a6e1 100644
2794 +--- a/tools/lib/perf/evlist.c
2795 ++++ b/tools/lib/perf/evlist.c
2796 +@@ -571,7 +571,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
2797 + {
2798 + struct perf_evsel *evsel;
2799 + const struct perf_cpu_map *cpus = evlist->cpus;
2800 +- const struct perf_thread_map *threads = evlist->threads;
2801 +
2802 + if (!ops || !ops->get || !ops->mmap)
2803 + return -EINVAL;
2804 +@@ -583,7 +582,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
2805 + perf_evlist__for_each_entry(evlist, evsel) {
2806 + if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
2807 + evsel->sample_id == NULL &&
2808 +- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
2809 ++ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
2810 + return -ENOMEM;
2811 + }
2812 +
2813 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
2814 +index 91cab5cdfbc16..b55ee073c2f72 100644
2815 +--- a/tools/perf/builtin-report.c
2816 ++++ b/tools/perf/builtin-report.c
2817 +@@ -340,6 +340,7 @@ static int report__setup_sample_type(struct report *rep)
2818 + struct perf_session *session = rep->session;
2819 + u64 sample_type = evlist__combined_sample_type(session->evlist);
2820 + bool is_pipe = perf_data__is_pipe(session->data);
2821 ++ struct evsel *evsel;
2822 +
2823 + if (session->itrace_synth_opts->callchain ||
2824 + session->itrace_synth_opts->add_callchain ||
2825 +@@ -394,6 +395,19 @@ static int report__setup_sample_type(struct report *rep)
2826 + }
2827 +
2828 + if (sort__mode == SORT_MODE__MEMORY) {
2829 ++ /*
2830 ++ * FIXUP: prior to kernel 5.18, Arm SPE missed to set
2831 ++ * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
2832 ++ * compatibility, set the bit if it's an old perf data file.
2833 ++ */
2834 ++ evlist__for_each_entry(session->evlist, evsel) {
2835 ++ if (strstr(evsel->name, "arm_spe") &&
2836 ++ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
2837 ++ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
2838 ++ sample_type |= PERF_SAMPLE_DATA_SRC;
2839 ++ }
2840 ++ }
2841 ++
2842 + if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
2843 + ui__error("Selected --mem-mode but no mem data. "
2844 + "Did you call perf record without -d?\n");
2845 +diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2846 +index fedcb7b35af9f..af5ea50ed5c0e 100755
2847 +--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2848 ++++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2849 +@@ -172,6 +172,17 @@ flooding_filters_add()
2850 + local lsb
2851 + local i
2852 +
2853 ++ # Prevent unwanted packets from entering the bridge and interfering
2854 ++ # with the test.
2855 ++ tc qdisc add dev br0 clsact
2856 ++ tc filter add dev br0 egress protocol all pref 1 handle 1 \
2857 ++ matchall skip_hw action drop
2858 ++ tc qdisc add dev $h1 clsact
2859 ++ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
2860 ++ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
2861 ++ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
2862 ++ matchall skip_hw action drop
2863 ++
2864 + tc qdisc add dev $rp2 clsact
2865 +
2866 + for i in $(eval echo {1..$num_remotes}); do
2867 +@@ -194,6 +205,12 @@ flooding_filters_del()
2868 + done
2869 +
2870 + tc qdisc del dev $rp2 clsact
2871 ++
2872 ++ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
2873 ++ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
2874 ++ tc qdisc del dev $h1 clsact
2875 ++ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
2876 ++ tc qdisc del dev br0 clsact
2877 + }
2878 +
2879 + flooding_check_packets()