Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 27 Apr 2022 13:11:34
Message-Id: 1651065078.7e3e69e091451f1eb1f39105e5b2cc1597a483ef.mpagano@gentoo
1 commit: 7e3e69e091451f1eb1f39105e5b2cc1597a483ef
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 27 13:11:18 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 27 13:11:18 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7e3e69e0
7
8 Linux patch 5.15.36
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1035_linux-5.15.36.patch | 5013 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5017 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d98d254b..f44dec35 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -183,6 +183,10 @@ Patch: 1034_linux-5.15.35.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.35
23
24 +Patch: 1035_linux-5.15.36.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.36
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1035_linux-5.15.36.patch b/1035_linux-5.15.36.patch
33 new file mode 100644
34 index 00000000..9beabe36
35 --- /dev/null
36 +++ b/1035_linux-5.15.36.patch
37 @@ -0,0 +1,5013 @@
38 +diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
39 +index 54386a010a8d7..871d2da7a0a91 100644
40 +--- a/Documentation/filesystems/ext4/attributes.rst
41 ++++ b/Documentation/filesystems/ext4/attributes.rst
42 +@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
43 + - Checksum of the extended attribute block.
44 + * - 0x14
45 + - \_\_u32
46 +- - h\_reserved[2]
47 ++ - h\_reserved[3]
48 + - Zero.
49 +
50 + The checksum is calculated against the FS UUID, the 64-bit block number
51 +diff --git a/Makefile b/Makefile
52 +index e5440c513f5ac..e0710f9837847 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 15
59 +-SUBLEVEL = 35
60 ++SUBLEVEL = 36
61 + EXTRAVERSION =
62 + NAME = Trick or Treat
63 +
64 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
65 +index dd77a0c8f740b..66ba549b520fc 100644
66 +--- a/arch/arc/kernel/entry.S
67 ++++ b/arch/arc/kernel/entry.S
68 +@@ -196,6 +196,7 @@ tracesys_exit:
69 + st r0, [sp, PT_r0] ; sys call return value in pt_regs
70 +
71 + ;POST Sys Call Ptrace Hook
72 ++ mov r0, sp ; pt_regs needed
73 + bl @syscall_trace_exit
74 + b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
75 + ; we'd done before calling post hook above
76 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
77 +index 1da11bdb1dfbd..1c6500c4e6a17 100644
78 +--- a/arch/arm/mach-vexpress/spc.c
79 ++++ b/arch/arm/mach-vexpress/spc.c
80 +@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
81 + }
82 +
83 + cluster = topology_physical_package_id(cpu_dev->id);
84 +- if (init_opp_table[cluster])
85 ++ if (cluster < 0 || init_opp_table[cluster])
86 + continue;
87 +
88 + if (ve_init_opp_table(cpu_dev))
89 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
90 +index 1a18c90457738..8b6f090e0364c 100644
91 +--- a/arch/arm64/Kconfig
92 ++++ b/arch/arm64/Kconfig
93 +@@ -154,7 +154,6 @@ config ARM64
94 + select HAVE_ARCH_KGDB
95 + select HAVE_ARCH_MMAP_RND_BITS
96 + select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
97 +- select HAVE_ARCH_PFN_VALID
98 + select HAVE_ARCH_PREL32_RELOCATIONS
99 + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
100 + select HAVE_ARCH_SECCOMP_FILTER
101 +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
102 +index 1dc9d187601c5..a0bd540f27d3d 100644
103 +--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
104 ++++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
105 +@@ -89,12 +89,12 @@
106 + pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
107 +
108 + ti,x-min = /bits/ 16 <125>;
109 +- touchscreen-size-x = /bits/ 16 <4008>;
110 ++ touchscreen-size-x = <4008>;
111 + ti,y-min = /bits/ 16 <282>;
112 +- touchscreen-size-y = /bits/ 16 <3864>;
113 ++ touchscreen-size-y = <3864>;
114 + ti,x-plate-ohms = /bits/ 16 <180>;
115 +- touchscreen-max-pressure = /bits/ 16 <255>;
116 +- touchscreen-average-samples = /bits/ 16 <10>;
117 ++ touchscreen-max-pressure = <255>;
118 ++ touchscreen-average-samples = <10>;
119 + ti,debounce-tol = /bits/ 16 <3>;
120 + ti,debounce-rep = /bits/ 16 <1>;
121 + ti,settle-delay-usec = /bits/ 16 <150>;
122 +diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
123 +index b16c7caf34c11..87b5e23c766f7 100644
124 +--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
125 ++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
126 +@@ -70,12 +70,12 @@
127 + pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
128 +
129 + ti,x-min = /bits/ 16 <125>;
130 +- touchscreen-size-x = /bits/ 16 <4008>;
131 ++ touchscreen-size-x = <4008>;
132 + ti,y-min = /bits/ 16 <282>;
133 +- touchscreen-size-y = /bits/ 16 <3864>;
134 ++ touchscreen-size-y = <3864>;
135 + ti,x-plate-ohms = /bits/ 16 <180>;
136 +- touchscreen-max-pressure = /bits/ 16 <255>;
137 +- touchscreen-average-samples = /bits/ 16 <10>;
138 ++ touchscreen-max-pressure = <255>;
139 ++ touchscreen-average-samples = <10>;
140 + ti,debounce-tol = /bits/ 16 <3>;
141 + ti,debounce-rep = /bits/ 16 <1>;
142 + ti,settle-delay-usec = /bits/ 16 <150>;
143 +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
144 +index 495c15deacb7d..de86ae3a7fd27 100644
145 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
146 ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
147 +@@ -1460,6 +1460,8 @@
148 + "imem",
149 + "config";
150 +
151 ++ qcom,qmp = <&aoss_qmp>;
152 ++
153 + qcom,smem-states = <&ipa_smp2p_out 0>,
154 + <&ipa_smp2p_out 1>;
155 + qcom,smem-state-names = "ipa-clock-enabled-valid",
156 +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
157 +index 692973c4f4344..b795a9993cc1b 100644
158 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
159 ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
160 +@@ -615,6 +615,8 @@
161 + interconnect-names = "memory",
162 + "config";
163 +
164 ++ qcom,qmp = <&aoss_qmp>;
165 ++
166 + qcom,smem-states = <&ipa_smp2p_out 0>,
167 + <&ipa_smp2p_out 1>;
168 + qcom,smem-state-names = "ipa-clock-enabled-valid",
169 +diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
170 +index 3d32d55818168..9ffb7355850c7 100644
171 +--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
172 ++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
173 +@@ -736,6 +736,8 @@
174 + interconnect-names = "memory",
175 + "config";
176 +
177 ++ qcom,qmp = <&aoss_qmp>;
178 ++
179 + qcom,smem-states = <&ipa_smp2p_out 0>,
180 + <&ipa_smp2p_out 1>;
181 + qcom,smem-state-names = "ipa-clock-enabled-valid",
182 +diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
183 +index f98c91bbd7c17..993a27ea6f543 100644
184 +--- a/arch/arm64/include/asm/page.h
185 ++++ b/arch/arm64/include/asm/page.h
186 +@@ -41,7 +41,6 @@ void tag_clear_highpage(struct page *to);
187 +
188 + typedef struct page *pgtable_t;
189 +
190 +-int pfn_valid(unsigned long pfn);
191 + int pfn_is_map_memory(unsigned long pfn);
192 +
193 + #include <asm/memory.h>
194 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
195 +index 08363d3cc1da5..ed57717cd0040 100644
196 +--- a/arch/arm64/include/asm/pgtable.h
197 ++++ b/arch/arm64/include/asm/pgtable.h
198 +@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
199 + PMD_TYPE_TABLE)
200 + #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
201 + PMD_TYPE_SECT)
202 +-#define pmd_leaf(pmd) pmd_sect(pmd)
203 ++#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
204 + #define pmd_bad(pmd) (!pmd_table(pmd))
205 +
206 + #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
207 +@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
208 + #define pud_none(pud) (!pud_val(pud))
209 + #define pud_bad(pud) (!pud_table(pud))
210 + #define pud_present(pud) pte_present(pud_pte(pud))
211 +-#define pud_leaf(pud) pud_sect(pud)
212 ++#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
213 + #define pud_valid(pud) pte_valid(pud_pte(pud))
214 +
215 + static inline void set_pud(pud_t *pudp, pud_t pud)
216 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
217 +index c59cb2efd5542..3b269c7567984 100644
218 +--- a/arch/arm64/mm/init.c
219 ++++ b/arch/arm64/mm/init.c
220 +@@ -184,43 +184,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
221 + free_area_init(max_zone_pfns);
222 + }
223 +
224 +-int pfn_valid(unsigned long pfn)
225 +-{
226 +- phys_addr_t addr = PFN_PHYS(pfn);
227 +- struct mem_section *ms;
228 +-
229 +- /*
230 +- * Ensure the upper PAGE_SHIFT bits are clear in the
231 +- * pfn. Else it might lead to false positives when
232 +- * some of the upper bits are set, but the lower bits
233 +- * match a valid pfn.
234 +- */
235 +- if (PHYS_PFN(addr) != pfn)
236 +- return 0;
237 +-
238 +- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
239 +- return 0;
240 +-
241 +- ms = __pfn_to_section(pfn);
242 +- if (!valid_section(ms))
243 +- return 0;
244 +-
245 +- /*
246 +- * ZONE_DEVICE memory does not have the memblock entries.
247 +- * memblock_is_map_memory() check for ZONE_DEVICE based
248 +- * addresses will always fail. Even the normal hotplugged
249 +- * memory will never have MEMBLOCK_NOMAP flag set in their
250 +- * memblock entries. Skip memblock search for all non early
251 +- * memory sections covering all of hotplug memory including
252 +- * both normal and ZONE_DEVICE based.
253 +- */
254 +- if (!early_section(ms))
255 +- return pfn_section_valid(ms, pfn);
256 +-
257 +- return memblock_is_memory(addr);
258 +-}
259 +-EXPORT_SYMBOL(pfn_valid);
260 +-
261 + int pfn_is_map_memory(unsigned long pfn)
262 + {
263 + phys_addr_t addr = PFN_PHYS(pfn);
264 +diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
265 +index 6365087f31602..3cb2e05a7ee83 100644
266 +--- a/arch/powerpc/kvm/book3s_64_vio.c
267 ++++ b/arch/powerpc/kvm/book3s_64_vio.c
268 +@@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
269 + tbl[idx % TCES_PER_PAGE] = tce;
270 + }
271 +
272 +-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
273 +- unsigned long entry)
274 ++static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
275 ++ struct iommu_table *tbl, unsigned long entry)
276 + {
277 +- unsigned long hpa = 0;
278 +- enum dma_data_direction dir = DMA_NONE;
279 ++ unsigned long i;
280 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
281 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
282 ++
283 ++ for (i = 0; i < subpages; ++i) {
284 ++ unsigned long hpa = 0;
285 ++ enum dma_data_direction dir = DMA_NONE;
286 +
287 +- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
288 ++ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
289 ++ }
290 + }
291 +
292 + static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
293 +@@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
294 + break;
295 + }
296 +
297 ++ iommu_tce_kill(tbl, io_entry, subpages);
298 ++
299 + return ret;
300 + }
301 +
302 +@@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
303 + break;
304 + }
305 +
306 ++ iommu_tce_kill(tbl, io_entry, subpages);
307 ++
308 + return ret;
309 + }
310 +
311 +@@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
312 + ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
313 + entry, ua, dir);
314 +
315 +- iommu_tce_kill(stit->tbl, entry, 1);
316 +
317 + if (ret != H_SUCCESS) {
318 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
319 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
320 + goto unlock_exit;
321 + }
322 + }
323 +@@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
324 + */
325 + if (get_user(tce, tces + i)) {
326 + ret = H_TOO_HARD;
327 +- goto invalidate_exit;
328 ++ goto unlock_exit;
329 + }
330 + tce = be64_to_cpu(tce);
331 +
332 + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
333 + ret = H_PARAMETER;
334 +- goto invalidate_exit;
335 ++ goto unlock_exit;
336 + }
337 +
338 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
339 +@@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
340 + iommu_tce_direction(tce));
341 +
342 + if (ret != H_SUCCESS) {
343 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
344 +- entry);
345 +- goto invalidate_exit;
346 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
347 ++ entry + i);
348 ++ goto unlock_exit;
349 + }
350 + }
351 +
352 + kvmppc_tce_put(stt, entry + i, tce);
353 + }
354 +
355 +-invalidate_exit:
356 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
357 +- iommu_tce_kill(stit->tbl, entry, npages);
358 +-
359 + unlock_exit:
360 + srcu_read_unlock(&vcpu->kvm->srcu, idx);
361 +
362 +@@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
363 + continue;
364 +
365 + if (ret == H_TOO_HARD)
366 +- goto invalidate_exit;
367 ++ return ret;
368 +
369 + WARN_ON_ONCE(1);
370 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
371 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
372 + }
373 + }
374 +
375 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
376 + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
377 +
378 +-invalidate_exit:
379 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
380 +- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
381 +-
382 + return ret;
383 + }
384 + EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
385 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
386 +index 870b7f0c7ea56..fdeda6a9cff44 100644
387 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
388 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
389 +@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
390 + tbl->it_ops->tce_kill(tbl, entry, pages, true);
391 + }
392 +
393 +-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
394 +- unsigned long entry)
395 ++static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
396 ++ struct iommu_table *tbl, unsigned long entry)
397 + {
398 +- unsigned long hpa = 0;
399 +- enum dma_data_direction dir = DMA_NONE;
400 ++ unsigned long i;
401 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
402 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
403 ++
404 ++ for (i = 0; i < subpages; ++i) {
405 ++ unsigned long hpa = 0;
406 ++ enum dma_data_direction dir = DMA_NONE;
407 +
408 +- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
409 ++ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
410 ++ }
411 + }
412 +
413 + static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
414 +@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
415 + break;
416 + }
417 +
418 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
419 ++
420 + return ret;
421 + }
422 +
423 +@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
424 + break;
425 + }
426 +
427 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
428 ++
429 + return ret;
430 + }
431 +
432 +@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
433 + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
434 + stit->tbl, entry, ua, dir);
435 +
436 +- iommu_tce_kill_rm(stit->tbl, entry, 1);
437 +-
438 + if (ret != H_SUCCESS) {
439 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
440 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
441 + return ret;
442 + }
443 + }
444 +@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
445 + ua = 0;
446 + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
447 + ret = H_PARAMETER;
448 +- goto invalidate_exit;
449 ++ goto unlock_exit;
450 + }
451 +
452 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
453 +@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
454 + iommu_tce_direction(tce));
455 +
456 + if (ret != H_SUCCESS) {
457 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
458 +- entry);
459 +- goto invalidate_exit;
460 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
461 ++ entry + i);
462 ++ goto unlock_exit;
463 + }
464 + }
465 +
466 + kvmppc_rm_tce_put(stt, entry + i, tce);
467 + }
468 +
469 +-invalidate_exit:
470 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
471 +- iommu_tce_kill_rm(stit->tbl, entry, npages);
472 +-
473 + unlock_exit:
474 + if (!prereg)
475 + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
476 +@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
477 + continue;
478 +
479 + if (ret == H_TOO_HARD)
480 +- goto invalidate_exit;
481 ++ return ret;
482 +
483 + WARN_ON_ONCE_RM(1);
484 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
485 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
486 + }
487 + }
488 +
489 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
490 + kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
491 +
492 +-invalidate_exit:
493 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
494 +- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
495 +-
496 + return ret;
497 + }
498 +
499 +diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
500 +index 9dd75f3858372..07ca62d084d9d 100644
501 +--- a/arch/powerpc/perf/power10-pmu.c
502 ++++ b/arch/powerpc/perf/power10-pmu.c
503 +@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
504 +
505 + /* Table of alternatives, sorted by column 0 */
506 + static const unsigned int power10_event_alternatives[][MAX_ALT] = {
507 +- { PM_CYC_ALT, PM_CYC },
508 + { PM_INST_CMPL_ALT, PM_INST_CMPL },
509 ++ { PM_CYC_ALT, PM_CYC },
510 + };
511 +
512 + static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
513 +diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
514 +index ff3382140d7e6..cbdd074ee2a70 100644
515 +--- a/arch/powerpc/perf/power9-pmu.c
516 ++++ b/arch/powerpc/perf/power9-pmu.c
517 +@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
518 +
519 + /* Table of alternatives, sorted by column 0 */
520 + static const unsigned int power9_event_alternatives[][MAX_ALT] = {
521 +- { PM_INST_DISP, PM_INST_DISP_ALT },
522 +- { PM_RUN_CYC_ALT, PM_RUN_CYC },
523 +- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
524 +- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
525 + { PM_BR_2PATH, PM_BR_2PATH_ALT },
526 ++ { PM_INST_DISP, PM_INST_DISP_ALT },
527 ++ { PM_RUN_CYC_ALT, PM_RUN_CYC },
528 ++ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
529 ++ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
530 + };
531 +
532 + static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
533 +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
534 +index 7516e4199b3c6..20fd0acd7d800 100644
535 +--- a/arch/x86/include/asm/compat.h
536 ++++ b/arch/x86/include/asm/compat.h
537 +@@ -28,15 +28,13 @@ typedef u16 compat_ipc_pid_t;
538 + typedef __kernel_fsid_t compat_fsid_t;
539 +
540 + struct compat_stat {
541 +- compat_dev_t st_dev;
542 +- u16 __pad1;
543 ++ u32 st_dev;
544 + compat_ino_t st_ino;
545 + compat_mode_t st_mode;
546 + compat_nlink_t st_nlink;
547 + __compat_uid_t st_uid;
548 + __compat_gid_t st_gid;
549 +- compat_dev_t st_rdev;
550 +- u16 __pad2;
551 ++ u32 st_rdev;
552 + u32 st_size;
553 + u32 st_blksize;
554 + u32 st_blocks;
555 +diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
556 +index a06d95165ac7c..c206decb39fab 100644
557 +--- a/arch/x86/kvm/pmu.h
558 ++++ b/arch/x86/kvm/pmu.h
559 +@@ -141,6 +141,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
560 + return sample_period;
561 + }
562 +
563 ++static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
564 ++{
565 ++ if (!pmc->perf_event || pmc->is_paused)
566 ++ return;
567 ++
568 ++ perf_event_period(pmc->perf_event,
569 ++ get_sample_period(pmc, pmc->counter));
570 ++}
571 ++
572 + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
573 + void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
574 + void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
575 +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
576 +index 3faf1d9c6c91c..f337ce7e898e3 100644
577 +--- a/arch/x86/kvm/svm/pmu.c
578 ++++ b/arch/x86/kvm/svm/pmu.c
579 +@@ -256,6 +256,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
580 + pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
581 + if (pmc) {
582 + pmc->counter += data - pmc_read_counter(pmc);
583 ++ pmc_update_sample_period(pmc);
584 + return 0;
585 + }
586 + /* MSR_EVNTSELn */
587 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
588 +index 134c4ea5e6ad8..c8c3212250618 100644
589 +--- a/arch/x86/kvm/svm/sev.c
590 ++++ b/arch/x86/kvm/svm/sev.c
591 +@@ -1990,11 +1990,14 @@ static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
592 + unsigned long len)
593 + {
594 + /*
595 +- * If hardware enforced cache coherency for encrypted mappings of the
596 +- * same physical page is supported, nothing to do.
597 ++ * If CPU enforced cache coherency for encrypted mappings of the
598 ++ * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
599 ++ * flush is still needed in order to work properly with DMA devices.
600 + */
601 +- if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
602 ++ if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
603 ++ clflush_cache_range(va, PAGE_SIZE);
604 + return;
605 ++ }
606 +
607 + /*
608 + * If the VM Page Flush MSR is supported, use it to flush the page
609 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
610 +index a0193b11c381d..1546a10ecb564 100644
611 +--- a/arch/x86/kvm/vmx/nested.c
612 ++++ b/arch/x86/kvm/vmx/nested.c
613 +@@ -4601,6 +4601,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
614 + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
615 + }
616 +
617 ++ if (vmx->nested.update_vmcs01_apicv_status) {
618 ++ vmx->nested.update_vmcs01_apicv_status = false;
619 ++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
620 ++ }
621 ++
622 + if ((vm_exit_reason != -1) &&
623 + (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
624 + vmx->nested.need_vmcs12_to_shadow_sync = true;
625 +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
626 +index 7abe77c8b5d03..e7275ce15a8b0 100644
627 +--- a/arch/x86/kvm/vmx/pmu_intel.c
628 ++++ b/arch/x86/kvm/vmx/pmu_intel.c
629 +@@ -439,15 +439,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
630 + !(msr & MSR_PMC_FULL_WIDTH_BIT))
631 + data = (s64)(s32)data;
632 + pmc->counter += data - pmc_read_counter(pmc);
633 +- if (pmc->perf_event && !pmc->is_paused)
634 +- perf_event_period(pmc->perf_event,
635 +- get_sample_period(pmc, data));
636 ++ pmc_update_sample_period(pmc);
637 + return 0;
638 + } else if ((pmc = get_fixed_pmc(pmu, msr))) {
639 + pmc->counter += data - pmc_read_counter(pmc);
640 +- if (pmc->perf_event && !pmc->is_paused)
641 +- perf_event_period(pmc->perf_event,
642 +- get_sample_period(pmc, data));
643 ++ pmc_update_sample_period(pmc);
644 + return 0;
645 + } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
646 + if (data == pmc->eventsel)
647 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
648 +index 322485ab9271c..16a660a0ed5f7 100644
649 +--- a/arch/x86/kvm/vmx/vmx.c
650 ++++ b/arch/x86/kvm/vmx/vmx.c
651 +@@ -4098,6 +4098,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
652 + {
653 + struct vcpu_vmx *vmx = to_vmx(vcpu);
654 +
655 ++ if (is_guest_mode(vcpu)) {
656 ++ vmx->nested.update_vmcs01_apicv_status = true;
657 ++ return;
658 ++ }
659 ++
660 + pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
661 + if (cpu_has_secondary_exec_ctrls()) {
662 + if (kvm_vcpu_apicv_active(vcpu))
663 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
664 +index 3f9c8548625d6..460c7bd8158cc 100644
665 +--- a/arch/x86/kvm/vmx/vmx.h
666 ++++ b/arch/x86/kvm/vmx/vmx.h
667 +@@ -164,6 +164,7 @@ struct nested_vmx {
668 + bool change_vmcs01_virtual_apic_mode;
669 + bool reload_vmcs01_apic_access_page;
670 + bool update_vmcs01_cpu_dirty_logging;
671 ++ bool update_vmcs01_apicv_status;
672 +
673 + /*
674 + * Enlightened VMCS has been enabled. It does not mean that L1 has to
675 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
676 +index 5b1d2f656b45a..75da9c0d5ae37 100644
677 +--- a/arch/x86/kvm/x86.c
678 ++++ b/arch/x86/kvm/x86.c
679 +@@ -10813,8 +10813,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
680 + r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
681 + if (r < 0)
682 + goto fail_mmu_destroy;
683 +- if (kvm_apicv_activated(vcpu->kvm))
684 ++
685 ++ /*
686 ++ * Defer evaluating inhibits until the vCPU is first run, as
687 ++ * this vCPU will not get notified of any changes until this
688 ++ * vCPU is visible to other vCPUs (marked online and added to
689 ++ * the set of vCPUs). Opportunistically mark APICv active as
690 ++ * VMX in particularly is highly unlikely to have inhibits.
691 ++ * Ignore the current per-VM APICv state so that vCPU creation
692 ++ * is guaranteed to run with a deterministic value, the request
693 ++ * will ensure the vCPU gets the correct state before VM-Entry.
694 ++ */
695 ++ if (enable_apicv) {
696 + vcpu->arch.apicv_active = true;
697 ++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
698 ++ }
699 + } else
700 + static_branch_inc(&kvm_has_noapic_vcpu);
701 +
702 +diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
703 +index 45cc0ae0af6f9..c7b9f12896f20 100644
704 +--- a/arch/xtensa/kernel/coprocessor.S
705 ++++ b/arch/xtensa/kernel/coprocessor.S
706 +@@ -29,7 +29,7 @@
707 + .if XTENSA_HAVE_COPROCESSOR(x); \
708 + .align 4; \
709 + .Lsave_cp_regs_cp##x: \
710 +- xchal_cp##x##_store a2 a4 a5 a6 a7; \
711 ++ xchal_cp##x##_store a2 a3 a4 a5 a6; \
712 + jx a0; \
713 + .endif
714 +
715 +@@ -46,7 +46,7 @@
716 + .if XTENSA_HAVE_COPROCESSOR(x); \
717 + .align 4; \
718 + .Lload_cp_regs_cp##x: \
719 +- xchal_cp##x##_load a2 a4 a5 a6 a7; \
720 ++ xchal_cp##x##_load a2 a3 a4 a5 a6; \
721 + jx a0; \
722 + .endif
723 +
724 +diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
725 +index 0dde21e0d3de4..ad1841cecdfb7 100644
726 +--- a/arch/xtensa/kernel/jump_label.c
727 ++++ b/arch/xtensa/kernel/jump_label.c
728 +@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
729 + {
730 + struct patch *patch = data;
731 +
732 +- if (atomic_inc_return(&patch->cpu_count) == 1) {
733 ++ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
734 + local_patch_text(patch->addr, patch->data, patch->sz);
735 + atomic_inc(&patch->cpu_count);
736 + } else {
737 +diff --git a/block/bdev.c b/block/bdev.c
738 +index 485a258b0ab37..18abafb135e0b 100644
739 +--- a/block/bdev.c
740 ++++ b/block/bdev.c
741 +@@ -184,14 +184,13 @@ int sb_min_blocksize(struct super_block *sb, int size)
742 +
743 + EXPORT_SYMBOL(sb_min_blocksize);
744 +
745 +-int __sync_blockdev(struct block_device *bdev, int wait)
746 ++int sync_blockdev_nowait(struct block_device *bdev)
747 + {
748 + if (!bdev)
749 + return 0;
750 +- if (!wait)
751 +- return filemap_flush(bdev->bd_inode->i_mapping);
752 +- return filemap_write_and_wait(bdev->bd_inode->i_mapping);
753 ++ return filemap_flush(bdev->bd_inode->i_mapping);
754 + }
755 ++EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
756 +
757 + /*
758 + * Write out and wait upon all the dirty data associated with a block
759 +@@ -199,7 +198,9 @@ int __sync_blockdev(struct block_device *bdev, int wait)
760 + */
761 + int sync_blockdev(struct block_device *bdev)
762 + {
763 +- return __sync_blockdev(bdev, 1);
764 ++ if (!bdev)
765 ++ return 0;
766 ++ return filemap_write_and_wait(bdev->bd_inode->i_mapping);
767 + }
768 + EXPORT_SYMBOL(sync_blockdev);
769 +
770 +@@ -1016,7 +1017,7 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty)
771 + }
772 + EXPORT_SYMBOL(__invalidate_device);
773 +
774 +-void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
775 ++void sync_bdevs(bool wait)
776 + {
777 + struct inode *inode, *old_inode = NULL;
778 +
779 +@@ -1047,8 +1048,19 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
780 + bdev = I_BDEV(inode);
781 +
782 + mutex_lock(&bdev->bd_disk->open_mutex);
783 +- if (bdev->bd_openers)
784 +- func(bdev, arg);
785 ++ if (!bdev->bd_openers) {
786 ++ ; /* skip */
787 ++ } else if (wait) {
788 ++ /*
789 ++ * We keep the error status of individual mapping so
790 ++ * that applications can catch the writeback error using
791 ++ * fsync(2). See filemap_fdatawait_keep_errors() for
792 ++ * details.
793 ++ */
794 ++ filemap_fdatawait_keep_errors(inode->i_mapping);
795 ++ } else {
796 ++ filemap_fdatawrite(inode->i_mapping);
797 ++ }
798 + mutex_unlock(&bdev->bd_disk->open_mutex);
799 +
800 + spin_lock(&blockdev_superblock->s_inode_list_lock);
801 +diff --git a/block/ioctl.c b/block/ioctl.c
802 +index a31be7fa31a51..cd506a9029630 100644
803 +--- a/block/ioctl.c
804 ++++ b/block/ioctl.c
805 +@@ -645,7 +645,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
806 + (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
807 + case BLKGETSIZE:
808 + size = i_size_read(bdev->bd_inode);
809 +- if ((size >> 9) > ~0UL)
810 ++ if ((size >> 9) > ~(compat_ulong_t)0)
811 + return -EFBIG;
812 + return compat_put_ulong(argp, size >> 9);
813 +
814 +diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
815 +index 361597d14c569..d45a75bfc0169 100644
816 +--- a/drivers/ata/pata_marvell.c
817 ++++ b/drivers/ata/pata_marvell.c
818 +@@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
819 + switch(ap->port_no)
820 + {
821 + case 0:
822 ++ if (!ap->ioaddr.bmdma_addr)
823 ++ return ATA_CBL_PATA_UNK;
824 + if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
825 + return ATA_CBL_PATA40;
826 + return ATA_CBL_PATA80;
827 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
828 +index 8177aed160060..177a537971a1b 100644
829 +--- a/drivers/dma/at_xdmac.c
830 ++++ b/drivers/dma/at_xdmac.c
831 +@@ -1450,7 +1450,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
832 + {
833 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
834 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
835 +- struct at_xdmac_desc *desc, *_desc;
836 ++ struct at_xdmac_desc *desc, *_desc, *iter;
837 + struct list_head *descs_list;
838 + enum dma_status ret;
839 + int residue, retry;
840 +@@ -1565,11 +1565,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
841 + * microblock.
842 + */
843 + descs_list = &desc->descs_list;
844 +- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
845 +- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
846 +- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
847 +- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
848 ++ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
849 ++ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
850 ++ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
851 ++ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
852 ++ desc = iter;
853 + break;
854 ++ }
855 + }
856 + residue += cur_ubc << dwidth;
857 +
858 +diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
859 +index 329fc2e57b703..b5b8f8181e776 100644
860 +--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
861 ++++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
862 +@@ -415,8 +415,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
863 + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
864 + /* Linked list */
865 + #ifdef CONFIG_64BIT
866 +- SET_CH_64(dw, chan->dir, chan->id, llp.reg,
867 +- chunk->ll_region.paddr);
868 ++ /* llp is not aligned on 64bit -> keep 32bit accesses */
869 ++ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
870 ++ lower_32_bits(chunk->ll_region.paddr));
871 ++ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
872 ++ upper_32_bits(chunk->ll_region.paddr));
873 + #else /* CONFIG_64BIT */
874 + SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
875 + lower_32_bits(chunk->ll_region.paddr));
876 +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
877 +index 7bd9ac1e93b29..e622245c93804 100644
878 +--- a/drivers/dma/idxd/device.c
879 ++++ b/drivers/dma/idxd/device.c
880 +@@ -406,7 +406,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
881 + {
882 + lockdep_assert_held(&wq->wq_lock);
883 +
884 +- idxd_wq_disable_cleanup(wq);
885 + wq->size = 0;
886 + wq->group = NULL;
887 + }
888 +@@ -723,14 +722,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
889 +
890 + if (wq->state == IDXD_WQ_ENABLED) {
891 + idxd_wq_disable_cleanup(wq);
892 +- idxd_wq_device_reset_cleanup(wq);
893 + wq->state = IDXD_WQ_DISABLED;
894 + }
895 ++ idxd_wq_device_reset_cleanup(wq);
896 + }
897 + }
898 +
899 + void idxd_device_clear_state(struct idxd_device *idxd)
900 + {
901 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
902 ++ return;
903 ++
904 + idxd_groups_clear_state(idxd);
905 + idxd_engines_clear_state(idxd);
906 + idxd_device_wqs_clear_state(idxd);
907 +diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
908 +index 999ce13a93adc..33d94c67fedb9 100644
909 +--- a/drivers/dma/idxd/sysfs.c
910 ++++ b/drivers/dma/idxd/sysfs.c
911 +@@ -842,6 +842,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
912 + u64 xfer_size;
913 + int rc;
914 +
915 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
916 ++ return -EPERM;
917 ++
918 + if (wq->state != IDXD_WQ_DISABLED)
919 + return -EPERM;
920 +
921 +@@ -876,6 +879,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
922 + u64 batch_size;
923 + int rc;
924 +
925 ++ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
926 ++ return -EPERM;
927 ++
928 + if (wq->state != IDXD_WQ_DISABLED)
929 + return -EPERM;
930 +
931 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
932 +index cacc725ca5459..2300d965a3f44 100644
933 +--- a/drivers/dma/imx-sdma.c
934 ++++ b/drivers/dma/imx-sdma.c
935 +@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
936 + s32 per_2_firi_addr;
937 + s32 mcu_2_firi_addr;
938 + s32 uart_2_per_addr;
939 +- s32 uart_2_mcu_ram_addr;
940 ++ s32 uart_2_mcu_addr;
941 + s32 per_2_app_addr;
942 + s32 mcu_2_app_addr;
943 + s32 per_2_per_addr;
944 + s32 uartsh_2_per_addr;
945 +- s32 uartsh_2_mcu_ram_addr;
946 ++ s32 uartsh_2_mcu_addr;
947 + s32 per_2_shp_addr;
948 + s32 mcu_2_shp_addr;
949 + s32 ata_2_mcu_addr;
950 +@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
951 + s32 mcu_2_ecspi_addr;
952 + s32 mcu_2_sai_addr;
953 + s32 sai_2_mcu_addr;
954 +- s32 uart_2_mcu_addr;
955 +- s32 uartsh_2_mcu_addr;
956 ++ s32 uart_2_mcu_rom_addr;
957 ++ s32 uartsh_2_mcu_rom_addr;
958 + /* End of v3 array */
959 + s32 mcu_2_zqspi_addr;
960 + /* End of v4 array */
961 +@@ -1780,17 +1780,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
962 + saddr_arr[i] = addr_arr[i];
963 +
964 + /*
965 +- * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
966 +- * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
967 +- * to be compatible with legacy freescale/nxp sdma firmware, and they
968 +- * are located in the bottom part of sdma_script_start_addrs which are
969 +- * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
970 ++ * For compatibility with NXP internal legacy kernel before 4.19 which
971 ++ * is based on uart ram script and mainline kernel based on uart rom
972 ++ * script, both uart ram/rom scripts are present in newer sdma
973 ++ * firmware. Use the rom versions if they are present (V3 or newer).
974 + */
975 +- if (addr->uart_2_mcu_addr)
976 +- sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
977 +- if (addr->uartsh_2_mcu_addr)
978 +- sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
979 +-
980 ++ if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
981 ++ if (addr->uart_2_mcu_rom_addr)
982 ++ sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
983 ++ if (addr->uartsh_2_mcu_rom_addr)
984 ++ sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
985 ++ }
986 + }
987 +
988 + static void sdma_load_firmware(const struct firmware *fw, void *context)
989 +@@ -1869,7 +1869,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
990 + u32 reg, val, shift, num_map, i;
991 + int ret = 0;
992 +
993 +- if (IS_ERR(np) || IS_ERR(gpr_np))
994 ++ if (IS_ERR(np) || !gpr_np)
995 + goto out;
996 +
997 + event_remap = of_find_property(np, propname, NULL);
998 +@@ -1917,7 +1917,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
999 + }
1000 +
1001 + out:
1002 +- if (!IS_ERR(gpr_np))
1003 ++ if (gpr_np)
1004 + of_node_put(gpr_np);
1005 +
1006 + return ret;
1007 +diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
1008 +index 375e7e647df6b..a1517ef1f4a01 100644
1009 +--- a/drivers/dma/mediatek/mtk-uart-apdma.c
1010 ++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
1011 +@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
1012 + unsigned int status;
1013 + int ret;
1014 +
1015 +- ret = pm_runtime_get_sync(mtkd->ddev.dev);
1016 ++ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
1017 + if (ret < 0) {
1018 + pm_runtime_put_noidle(chan->device->dev);
1019 + return ret;
1020 +@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
1021 + ret = readx_poll_timeout(readl, c->base + VFF_EN,
1022 + status, !status, 10, 100);
1023 + if (ret)
1024 +- return ret;
1025 ++ goto err_pm;
1026 +
1027 + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
1028 + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
1029 + if (ret < 0) {
1030 + dev_err(chan->device->dev, "Can't request dma IRQ\n");
1031 +- return -EINVAL;
1032 ++ ret = -EINVAL;
1033 ++ goto err_pm;
1034 + }
1035 +
1036 + if (mtkd->support_33bits)
1037 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
1038 +
1039 ++err_pm:
1040 ++ pm_runtime_put_noidle(mtkd->ddev.dev);
1041 + return ret;
1042 + }
1043 +
1044 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
1045 +index a5486d86fdd2f..8557781bb8dce 100644
1046 +--- a/drivers/edac/synopsys_edac.c
1047 ++++ b/drivers/edac/synopsys_edac.c
1048 +@@ -163,6 +163,11 @@
1049 + #define ECC_STAT_CECNT_SHIFT 8
1050 + #define ECC_STAT_BITNUM_MASK 0x7F
1051 +
1052 ++/* ECC error count register definitions */
1053 ++#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
1054 ++#define ECC_ERRCNT_UECNT_SHIFT 16
1055 ++#define ECC_ERRCNT_CECNT_MASK 0xFFFF
1056 ++
1057 + /* DDR QOS Interrupt register definitions */
1058 + #define DDR_QOS_IRQ_STAT_OFST 0x20200
1059 + #define DDR_QOSUE_MASK 0x4
1060 +@@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
1061 + base = priv->baseaddr;
1062 + p = &priv->stat;
1063 +
1064 ++ regval = readl(base + ECC_ERRCNT_OFST);
1065 ++ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
1066 ++ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
1067 ++ if (!p->ce_cnt)
1068 ++ goto ue_err;
1069 ++
1070 + regval = readl(base + ECC_STAT_OFST);
1071 + if (!regval)
1072 + return 1;
1073 +
1074 +- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
1075 +- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
1076 +- if (!p->ce_cnt)
1077 +- goto ue_err;
1078 +-
1079 + p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
1080 +
1081 + regval = readl(base + ECC_CEADDR0_OFST);
1082 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1083 +index 91628edad2c67..320baed949ee8 100644
1084 +--- a/drivers/gpio/gpiolib.c
1085 ++++ b/drivers/gpio/gpiolib.c
1086 +@@ -1560,8 +1560,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
1087 +
1088 + gpiochip_set_irq_hooks(gc);
1089 +
1090 +- acpi_gpiochip_request_interrupts(gc);
1091 +-
1092 + /*
1093 + * Using barrier() here to prevent compiler from reordering
1094 + * gc->irq.initialized before initialization of above
1095 +@@ -1571,6 +1569,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
1096 +
1097 + gc->irq.initialized = true;
1098 +
1099 ++ acpi_gpiochip_request_interrupts(gc);
1100 ++
1101 + return 0;
1102 + }
1103 +
1104 +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
1105 +index 1b0daf649e823..a3d0c57ec0f0b 100644
1106 +--- a/drivers/gpu/drm/i915/display/intel_psr.c
1107 ++++ b/drivers/gpu/drm/i915/display/intel_psr.c
1108 +@@ -936,6 +936,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1109 + return false;
1110 + }
1111 +
1112 ++ /* Wa_16011303918:adl-p */
1113 ++ if (crtc_state->vrr.enable &&
1114 ++ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1115 ++ drm_dbg_kms(&dev_priv->drm,
1116 ++ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1117 ++ return false;
1118 ++ }
1119 ++
1120 ++ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1121 ++ drm_dbg_kms(&dev_priv->drm,
1122 ++ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1123 ++ return false;
1124 ++ }
1125 ++
1126 + if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1127 + if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1128 + !HAS_PSR_HW_TRACKING(dev_priv)) {
1129 +@@ -949,12 +963,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1130 + if (!crtc_state->enable_psr2_sel_fetch &&
1131 + IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
1132 + drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
1133 +- return false;
1134 ++ goto unsupported;
1135 + }
1136 +
1137 + if (!psr2_granularity_check(intel_dp, crtc_state)) {
1138 + drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1139 +- return false;
1140 ++ goto unsupported;
1141 + }
1142 +
1143 + if (!crtc_state->enable_psr2_sel_fetch &&
1144 +@@ -963,25 +977,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1145 + "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1146 + crtc_hdisplay, crtc_vdisplay,
1147 + psr_max_h, psr_max_v);
1148 +- return false;
1149 +- }
1150 +-
1151 +- if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1152 +- drm_dbg_kms(&dev_priv->drm,
1153 +- "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1154 +- return false;
1155 +- }
1156 +-
1157 +- /* Wa_16011303918:adl-p */
1158 +- if (crtc_state->vrr.enable &&
1159 +- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1160 +- drm_dbg_kms(&dev_priv->drm,
1161 +- "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1162 +- return false;
1163 ++ goto unsupported;
1164 + }
1165 +
1166 + tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1167 + return true;
1168 ++
1169 ++unsupported:
1170 ++ crtc_state->enable_psr2_sel_fetch = false;
1171 ++ return false;
1172 + }
1173 +
1174 + void intel_psr_compute_config(struct intel_dp *intel_dp,
1175 +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
1176 +index c6b69afcbac89..50e854207c70a 100644
1177 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
1178 ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
1179 +@@ -90,7 +90,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
1180 + __drm_atomic_helper_plane_destroy_state(plane->state);
1181 +
1182 + kfree(to_mdp5_plane_state(plane->state));
1183 ++ plane->state = NULL;
1184 + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
1185 ++ if (!mdp5_state)
1186 ++ return;
1187 +
1188 + if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1189 + mdp5_state->base.zpos = STAGE_BASE;
1190 +diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
1191 +index cabe15190ec18..369e57f73a470 100644
1192 +--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
1193 ++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
1194 +@@ -169,6 +169,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
1195 + va_list va;
1196 +
1197 + new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
1198 ++ if (!new_blk)
1199 ++ return;
1200 +
1201 + va_start(va, fmt);
1202 +
1203 +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1204 +index 46029c5610c80..145047e193946 100644
1205 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1206 ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1207 +@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
1208 +
1209 + ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
1210 + if (ret)
1211 +- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
1212 ++ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
1213 + }
1214 +
1215 + static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
1216 +@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
1217 + return 0;
1218 + }
1219 +
1220 +-static int rpi_touchscreen_enable(struct drm_panel *panel)
1221 ++static int rpi_touchscreen_prepare(struct drm_panel *panel)
1222 + {
1223 + struct rpi_touchscreen *ts = panel_to_ts(panel);
1224 + int i;
1225 +@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
1226 + rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
1227 + msleep(100);
1228 +
1229 ++ return 0;
1230 ++}
1231 ++
1232 ++static int rpi_touchscreen_enable(struct drm_panel *panel)
1233 ++{
1234 ++ struct rpi_touchscreen *ts = panel_to_ts(panel);
1235 ++
1236 + /* Turn on the backlight. */
1237 + rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
1238 +
1239 +@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
1240 + static const struct drm_panel_funcs rpi_touchscreen_funcs = {
1241 + .disable = rpi_touchscreen_disable,
1242 + .unprepare = rpi_touchscreen_noop,
1243 +- .prepare = rpi_touchscreen_noop,
1244 ++ .prepare = rpi_touchscreen_prepare,
1245 + .enable = rpi_touchscreen_enable,
1246 + .get_modes = rpi_touchscreen_get_modes,
1247 + };
1248 +diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
1249 +index d09c1ea60c04e..ca85063166609 100644
1250 +--- a/drivers/gpu/drm/vc4/vc4_dsi.c
1251 ++++ b/drivers/gpu/drm/vc4/vc4_dsi.c
1252 +@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
1253 + unsigned long phy_clock;
1254 + int ret;
1255 +
1256 +- ret = pm_runtime_get_sync(dev);
1257 ++ ret = pm_runtime_resume_and_get(dev);
1258 + if (ret) {
1259 + DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
1260 + return;
1261 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
1262 +index 43375b38ee592..8a7ce41b8c56e 100644
1263 +--- a/drivers/input/keyboard/omap4-keypad.c
1264 ++++ b/drivers/input/keyboard/omap4-keypad.c
1265 +@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
1266 + * revision register.
1267 + */
1268 + error = pm_runtime_get_sync(dev);
1269 +- if (error) {
1270 ++ if (error < 0) {
1271 + dev_err(dev, "pm_runtime_get_sync() failed\n");
1272 + pm_runtime_put_noidle(dev);
1273 + return error;
1274 +diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
1275 +index 412ae3e43ffb7..35ac6fe7529c5 100644
1276 +--- a/drivers/net/ethernet/Kconfig
1277 ++++ b/drivers/net/ethernet/Kconfig
1278 +@@ -34,15 +34,6 @@ source "drivers/net/ethernet/apple/Kconfig"
1279 + source "drivers/net/ethernet/aquantia/Kconfig"
1280 + source "drivers/net/ethernet/arc/Kconfig"
1281 + source "drivers/net/ethernet/atheros/Kconfig"
1282 +-source "drivers/net/ethernet/broadcom/Kconfig"
1283 +-source "drivers/net/ethernet/brocade/Kconfig"
1284 +-source "drivers/net/ethernet/cadence/Kconfig"
1285 +-source "drivers/net/ethernet/calxeda/Kconfig"
1286 +-source "drivers/net/ethernet/cavium/Kconfig"
1287 +-source "drivers/net/ethernet/chelsio/Kconfig"
1288 +-source "drivers/net/ethernet/cirrus/Kconfig"
1289 +-source "drivers/net/ethernet/cisco/Kconfig"
1290 +-source "drivers/net/ethernet/cortina/Kconfig"
1291 +
1292 + config CX_ECAT
1293 + tristate "Beckhoff CX5020 EtherCAT master support"
1294 +@@ -56,6 +47,14 @@ config CX_ECAT
1295 + To compile this driver as a module, choose M here. The module
1296 + will be called ec_bhf.
1297 +
1298 ++source "drivers/net/ethernet/broadcom/Kconfig"
1299 ++source "drivers/net/ethernet/cadence/Kconfig"
1300 ++source "drivers/net/ethernet/calxeda/Kconfig"
1301 ++source "drivers/net/ethernet/cavium/Kconfig"
1302 ++source "drivers/net/ethernet/chelsio/Kconfig"
1303 ++source "drivers/net/ethernet/cirrus/Kconfig"
1304 ++source "drivers/net/ethernet/cisco/Kconfig"
1305 ++source "drivers/net/ethernet/cortina/Kconfig"
1306 + source "drivers/net/ethernet/davicom/Kconfig"
1307 +
1308 + config DNET
1309 +@@ -82,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
1310 + source "drivers/net/ethernet/i825xx/Kconfig"
1311 + source "drivers/net/ethernet/ibm/Kconfig"
1312 + source "drivers/net/ethernet/intel/Kconfig"
1313 +-source "drivers/net/ethernet/microsoft/Kconfig"
1314 + source "drivers/net/ethernet/xscale/Kconfig"
1315 +
1316 + config JME
1317 +@@ -125,8 +123,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
1318 + source "drivers/net/ethernet/mellanox/Kconfig"
1319 + source "drivers/net/ethernet/micrel/Kconfig"
1320 + source "drivers/net/ethernet/microchip/Kconfig"
1321 +-source "drivers/net/ethernet/moxa/Kconfig"
1322 + source "drivers/net/ethernet/mscc/Kconfig"
1323 ++source "drivers/net/ethernet/microsoft/Kconfig"
1324 ++source "drivers/net/ethernet/moxa/Kconfig"
1325 + source "drivers/net/ethernet/myricom/Kconfig"
1326 +
1327 + config FEALNX
1328 +@@ -138,10 +137,10 @@ config FEALNX
1329 + Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
1330 + cards. <http://www.myson.com.tw/>
1331 +
1332 ++source "drivers/net/ethernet/ni/Kconfig"
1333 + source "drivers/net/ethernet/natsemi/Kconfig"
1334 + source "drivers/net/ethernet/neterion/Kconfig"
1335 + source "drivers/net/ethernet/netronome/Kconfig"
1336 +-source "drivers/net/ethernet/ni/Kconfig"
1337 + source "drivers/net/ethernet/8390/Kconfig"
1338 + source "drivers/net/ethernet/nvidia/Kconfig"
1339 + source "drivers/net/ethernet/nxp/Kconfig"
1340 +@@ -161,6 +160,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
1341 + source "drivers/net/ethernet/pasemi/Kconfig"
1342 + source "drivers/net/ethernet/pensando/Kconfig"
1343 + source "drivers/net/ethernet/qlogic/Kconfig"
1344 ++source "drivers/net/ethernet/brocade/Kconfig"
1345 + source "drivers/net/ethernet/qualcomm/Kconfig"
1346 + source "drivers/net/ethernet/rdc/Kconfig"
1347 + source "drivers/net/ethernet/realtek/Kconfig"
1348 +@@ -168,10 +168,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
1349 + source "drivers/net/ethernet/rocker/Kconfig"
1350 + source "drivers/net/ethernet/samsung/Kconfig"
1351 + source "drivers/net/ethernet/seeq/Kconfig"
1352 +-source "drivers/net/ethernet/sfc/Kconfig"
1353 + source "drivers/net/ethernet/sgi/Kconfig"
1354 + source "drivers/net/ethernet/silan/Kconfig"
1355 + source "drivers/net/ethernet/sis/Kconfig"
1356 ++source "drivers/net/ethernet/sfc/Kconfig"
1357 + source "drivers/net/ethernet/smsc/Kconfig"
1358 + source "drivers/net/ethernet/socionext/Kconfig"
1359 + source "drivers/net/ethernet/stmicro/Kconfig"
1360 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1361 +index 9de0065f89b90..fbb1e05d58783 100644
1362 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1363 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1364 +@@ -480,8 +480,8 @@ int aq_nic_start(struct aq_nic_s *self)
1365 + if (err < 0)
1366 + goto err_exit;
1367 +
1368 +- for (i = 0U, aq_vec = self->aq_vec[0];
1369 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
1370 ++ for (i = 0U; self->aq_vecs > i; ++i) {
1371 ++ aq_vec = self->aq_vec[i];
1372 + err = aq_vec_start(aq_vec);
1373 + if (err < 0)
1374 + goto err_exit;
1375 +@@ -511,8 +511,8 @@ int aq_nic_start(struct aq_nic_s *self)
1376 + mod_timer(&self->polling_timer, jiffies +
1377 + AQ_CFG_POLLING_TIMER_INTERVAL);
1378 + } else {
1379 +- for (i = 0U, aq_vec = self->aq_vec[0];
1380 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
1381 ++ for (i = 0U; self->aq_vecs > i; ++i) {
1382 ++ aq_vec = self->aq_vec[i];
1383 + err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
1384 + aq_vec_isr, aq_vec,
1385 + aq_vec_get_affinity_mask(aq_vec));
1386 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1387 +index 797a95142d1f4..3a529ee8c8340 100644
1388 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1389 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1390 +@@ -444,22 +444,22 @@ err_exit:
1391 +
1392 + static int aq_pm_freeze(struct device *dev)
1393 + {
1394 +- return aq_suspend_common(dev, false);
1395 ++ return aq_suspend_common(dev, true);
1396 + }
1397 +
1398 + static int aq_pm_suspend_poweroff(struct device *dev)
1399 + {
1400 +- return aq_suspend_common(dev, true);
1401 ++ return aq_suspend_common(dev, false);
1402 + }
1403 +
1404 + static int aq_pm_thaw(struct device *dev)
1405 + {
1406 +- return atl_resume_common(dev, false);
1407 ++ return atl_resume_common(dev, true);
1408 + }
1409 +
1410 + static int aq_pm_resume_restore(struct device *dev)
1411 + {
1412 +- return atl_resume_common(dev, true);
1413 ++ return atl_resume_common(dev, false);
1414 + }
1415 +
1416 + static const struct dev_pm_ops aq_pm_ops = {
1417 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
1418 +index f4774cf051c97..6ab1f3212d246 100644
1419 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
1420 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
1421 +@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
1422 + if (!self) {
1423 + err = -EINVAL;
1424 + } else {
1425 +- for (i = 0U, ring = self->ring[0];
1426 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1427 ++ for (i = 0U; self->tx_rings > i; ++i) {
1428 ++ ring = self->ring[i];
1429 + u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
1430 + ring[AQ_VEC_RX_ID].stats.rx.polls++;
1431 + u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
1432 +@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
1433 + self->aq_hw_ops = aq_hw_ops;
1434 + self->aq_hw = aq_hw;
1435 +
1436 +- for (i = 0U, ring = self->ring[0];
1437 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1438 ++ for (i = 0U; self->tx_rings > i; ++i) {
1439 ++ ring = self->ring[i];
1440 + err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
1441 + if (err < 0)
1442 + goto err_exit;
1443 +@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
1444 + unsigned int i = 0U;
1445 + int err = 0;
1446 +
1447 +- for (i = 0U, ring = self->ring[0];
1448 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1449 ++ for (i = 0U; self->tx_rings > i; ++i) {
1450 ++ ring = self->ring[i];
1451 + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
1452 + &ring[AQ_VEC_TX_ID]);
1453 + if (err < 0)
1454 +@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
1455 + struct aq_ring_s *ring = NULL;
1456 + unsigned int i = 0U;
1457 +
1458 +- for (i = 0U, ring = self->ring[0];
1459 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1460 ++ for (i = 0U; self->tx_rings > i; ++i) {
1461 ++ ring = self->ring[i];
1462 + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
1463 + &ring[AQ_VEC_TX_ID]);
1464 +
1465 +@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
1466 + if (!self)
1467 + goto err_exit;
1468 +
1469 +- for (i = 0U, ring = self->ring[0];
1470 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1471 ++ for (i = 0U; self->tx_rings > i; ++i) {
1472 ++ ring = self->ring[i];
1473 + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
1474 + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
1475 + }
1476 +@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
1477 + if (!self)
1478 + goto err_exit;
1479 +
1480 +- for (i = 0U, ring = self->ring[0];
1481 +- self->tx_rings > i; ++i, ring = self->ring[i]) {
1482 ++ for (i = 0U; self->tx_rings > i; ++i) {
1483 ++ ring = self->ring[i];
1484 + aq_ring_free(&ring[AQ_VEC_TX_ID]);
1485 + if (i < self->rx_rings)
1486 + aq_ring_free(&ring[AQ_VEC_RX_ID]);
1487 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1488 +index 9705c49655ad3..217c1a0f8940b 100644
1489 +--- a/drivers/net/ethernet/cadence/macb_main.c
1490 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1491 +@@ -1689,6 +1689,7 @@ static void macb_tx_restart(struct macb_queue *queue)
1492 + unsigned int head = queue->tx_head;
1493 + unsigned int tail = queue->tx_tail;
1494 + struct macb *bp = queue->bp;
1495 ++ unsigned int head_idx, tbqp;
1496 +
1497 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1498 + queue_writel(queue, ISR, MACB_BIT(TXUBR));
1499 +@@ -1696,6 +1697,13 @@ static void macb_tx_restart(struct macb_queue *queue)
1500 + if (head == tail)
1501 + return;
1502 +
1503 ++ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1504 ++ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1505 ++ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
1506 ++
1507 ++ if (tbqp == head_idx)
1508 ++ return;
1509 ++
1510 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1511 + }
1512 +
1513 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
1514 +index 763d2c7b5fb1a..5750f9a56393a 100644
1515 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
1516 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
1517 +@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
1518 + info->phc_index = -1;
1519 +
1520 + fman_node = of_get_parent(mac_node);
1521 +- if (fman_node)
1522 ++ if (fman_node) {
1523 + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
1524 ++ of_node_put(fman_node);
1525 ++ }
1526 +
1527 +- if (ptp_node)
1528 ++ if (ptp_node) {
1529 + ptp_dev = of_find_device_by_node(ptp_node);
1530 ++ of_node_put(ptp_node);
1531 ++ }
1532 +
1533 + if (ptp_dev)
1534 + ptp = platform_get_drvdata(ptp_dev);
1535 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
1536 +index d60e2016d03c6..e6c8e6d5234f8 100644
1537 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
1538 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
1539 +@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1540 + {
1541 + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1542 + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1543 +- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
1544 +- u16 lat_enc_d = 0; /* latency decoded */
1545 ++ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
1546 ++ u32 lat_enc_d = 0; /* latency decoded */
1547 + u16 lat_enc = 0; /* latency encoded */
1548 +
1549 + if (link) {
1550 +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
1551 +index b6807e16eea93..a0e2a404d5355 100644
1552 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c
1553 ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
1554 +@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
1555 + {
1556 + u32 swfw_sync;
1557 +
1558 +- while (igc_get_hw_semaphore_i225(hw))
1559 +- ; /* Empty */
1560 ++ /* Releasing the resource requires first getting the HW semaphore.
1561 ++ * If we fail to get the semaphore, there is nothing we can do,
1562 ++ * except log an error and quit. We are not allowed to hang here
1563 ++ * indefinitely, as it may cause denial of service or system crash.
1564 ++ */
1565 ++ if (igc_get_hw_semaphore_i225(hw)) {
1566 ++ hw_dbg("Failed to release SW_FW_SYNC.\n");
1567 ++ return;
1568 ++ }
1569 +
1570 + swfw_sync = rd32(IGC_SW_FW_SYNC);
1571 + swfw_sync &= ~mask;
1572 +diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
1573 +index 40dbf4b432345..6961f65d36b9a 100644
1574 +--- a/drivers/net/ethernet/intel/igc/igc_phy.c
1575 ++++ b/drivers/net/ethernet/intel/igc/igc_phy.c
1576 +@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
1577 + * the lower time out
1578 + */
1579 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
1580 +- usleep_range(500, 1000);
1581 ++ udelay(50);
1582 + mdic = rd32(IGC_MDIC);
1583 + if (mdic & IGC_MDIC_READY)
1584 + break;
1585 +@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
1586 + * the lower time out
1587 + */
1588 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
1589 +- usleep_range(500, 1000);
1590 ++ udelay(50);
1591 + mdic = rd32(IGC_MDIC);
1592 + if (mdic & IGC_MDIC_READY)
1593 + break;
1594 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
1595 +index 4f9245aa79a18..8e521f99b80ae 100644
1596 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
1597 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
1598 +@@ -996,6 +996,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
1599 + igc_ptp_write_i225(adapter, &ts);
1600 + }
1601 +
1602 ++static void igc_ptm_stop(struct igc_adapter *adapter)
1603 ++{
1604 ++ struct igc_hw *hw = &adapter->hw;
1605 ++ u32 ctrl;
1606 ++
1607 ++ ctrl = rd32(IGC_PTM_CTRL);
1608 ++ ctrl &= ~IGC_PTM_CTRL_EN;
1609 ++
1610 ++ wr32(IGC_PTM_CTRL, ctrl);
1611 ++}
1612 ++
1613 + /**
1614 + * igc_ptp_suspend - Disable PTP work items and prepare for suspend
1615 + * @adapter: Board private structure
1616 +@@ -1013,8 +1024,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
1617 + adapter->ptp_tx_skb = NULL;
1618 + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
1619 +
1620 +- if (pci_device_is_present(adapter->pdev))
1621 ++ if (pci_device_is_present(adapter->pdev)) {
1622 + igc_ptp_time_save(adapter);
1623 ++ igc_ptm_stop(adapter);
1624 ++ }
1625 + }
1626 +
1627 + /**
1628 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
1629 +index 6aad0953e8fe5..a59300d9e0000 100644
1630 +--- a/drivers/net/ethernet/mscc/ocelot.c
1631 ++++ b/drivers/net/ethernet/mscc/ocelot.c
1632 +@@ -1932,6 +1932,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
1633 + val = BIT(port);
1634 +
1635 + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
1636 ++ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
1637 ++ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
1638 + }
1639 +
1640 + static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
1641 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
1642 +index a7ec9f4d46ced..d68ef72dcdde0 100644
1643 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
1644 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
1645 +@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
1646 + writel(value, ioaddr + PTP_TCR);
1647 +
1648 + /* wait for present system time initialize to complete */
1649 +- return readl_poll_timeout(ioaddr + PTP_TCR, value,
1650 ++ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
1651 + !(value & PTP_TCR_TSINIT),
1652 +- 10000, 100000);
1653 ++ 10, 100000);
1654 + }
1655 +
1656 + static int config_addend(void __iomem *ioaddr, u32 addend)
1657 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1658 +index 141635a35c28a..129e270e9a7cd 100644
1659 +--- a/drivers/net/vxlan.c
1660 ++++ b/drivers/net/vxlan.c
1661 +@@ -711,11 +711,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
1662 +
1663 + rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
1664 + if (rd == NULL)
1665 +- return -ENOBUFS;
1666 ++ return -ENOMEM;
1667 +
1668 + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
1669 + kfree(rd);
1670 +- return -ENOBUFS;
1671 ++ return -ENOMEM;
1672 + }
1673 +
1674 + rd->remote_ip = *ip;
1675 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1676 +index 5d156e591b35c..f7961b22e0518 100644
1677 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1678 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1679 +@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
1680 + BRCMF_SDIO_FT_SUB,
1681 + };
1682 +
1683 +-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
1684 ++#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
1685 +
1686 + /* SDIO Pad drive strength to select value mappings */
1687 + struct sdiod_drive_str {
1688 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
1689 +index adf288e50e212..5cd0379d86de8 100644
1690 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
1691 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
1692 +@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1693 + mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
1694 +
1695 + /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
1696 +- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
1697 ++ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
1698 +
1699 + /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
1700 + mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
1701 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1702 +index 4c35e9acf8eed..f2bb576157625 100644
1703 +--- a/drivers/nvme/host/core.c
1704 ++++ b/drivers/nvme/host/core.c
1705 +@@ -1354,6 +1354,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1706 + warn_str, cur->nidl);
1707 + return -1;
1708 + }
1709 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1710 ++ return NVME_NIDT_EUI64_LEN;
1711 + memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1712 + return NVME_NIDT_EUI64_LEN;
1713 + case NVME_NIDT_NGUID:
1714 +@@ -1362,6 +1364,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1715 + warn_str, cur->nidl);
1716 + return -1;
1717 + }
1718 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1719 ++ return NVME_NIDT_NGUID_LEN;
1720 + memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1721 + return NVME_NIDT_NGUID_LEN;
1722 + case NVME_NIDT_UUID:
1723 +@@ -1370,6 +1374,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1724 + warn_str, cur->nidl);
1725 + return -1;
1726 + }
1727 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1728 ++ return NVME_NIDT_UUID_LEN;
1729 + uuid_copy(&ids->uuid, data + sizeof(*cur));
1730 + return NVME_NIDT_UUID_LEN;
1731 + case NVME_NIDT_CSI:
1732 +@@ -1466,12 +1472,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1733 + if ((*id)->ncap == 0) /* namespace not allocated or attached */
1734 + goto out_free_id;
1735 +
1736 +- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1737 +- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1738 +- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1739 +- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1740 +- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1741 +- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1742 ++
1743 ++ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1744 ++ dev_info(ctrl->device,
1745 ++ "Ignoring bogus Namespace Identifiers\n");
1746 ++ } else {
1747 ++ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1748 ++ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1749 ++ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1750 ++ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1751 ++ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1752 ++ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1753 ++ }
1754 +
1755 + return 0;
1756 +
1757 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1758 +index 0628e2d802e73..f1e5c7564cae6 100644
1759 +--- a/drivers/nvme/host/nvme.h
1760 ++++ b/drivers/nvme/host/nvme.h
1761 +@@ -144,6 +144,11 @@ enum nvme_quirks {
1762 + * encoding the generation sequence number.
1763 + */
1764 + NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
1765 ++
1766 ++ /*
1767 ++ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
1768 ++ */
1769 ++ NVME_QUIRK_BOGUS_NID = (1 << 18),
1770 + };
1771 +
1772 + /*
1773 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1774 +index b925a5f4afc3a..d7695bdbde8d3 100644
1775 +--- a/drivers/nvme/host/pci.c
1776 ++++ b/drivers/nvme/host/pci.c
1777 +@@ -3314,7 +3314,10 @@ static const struct pci_device_id nvme_id_table[] = {
1778 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
1779 + { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
1780 + .driver_data = NVME_QUIRK_IDENTIFY_CNS |
1781 +- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
1782 ++ NVME_QUIRK_DISABLE_WRITE_ZEROES |
1783 ++ NVME_QUIRK_BOGUS_NID, },
1784 ++ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
1785 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
1786 + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
1787 + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
1788 + { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
1789 +@@ -3352,6 +3355,10 @@ static const struct pci_device_id nvme_id_table[] = {
1790 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
1791 + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
1792 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
1793 ++ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
1794 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
1795 ++ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
1796 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
1797 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
1798 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
1799 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
1800 +diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
1801 +index 295cc7952d0ed..57d20cf3da7a3 100644
1802 +--- a/drivers/perf/arm_pmu.c
1803 ++++ b/drivers/perf/arm_pmu.c
1804 +@@ -398,6 +398,9 @@ validate_group(struct perf_event *event)
1805 + if (!validate_event(event->pmu, &fake_pmu, leader))
1806 + return -EINVAL;
1807 +
1808 ++ if (event == leader)
1809 ++ return 0;
1810 ++
1811 + for_each_sibling_event(sibling, leader) {
1812 + if (!validate_event(event->pmu, &fake_pmu, sibling))
1813 + return -EINVAL;
1814 +@@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event)
1815 + local64_set(&hwc->period_left, hwc->sample_period);
1816 + }
1817 +
1818 +- if (event->group_leader != event) {
1819 +- if (validate_group(event) != 0)
1820 +- return -EINVAL;
1821 +- }
1822 +-
1823 +- return 0;
1824 ++ return validate_group(event);
1825 + }
1826 +
1827 + static int armpmu_event_init(struct perf_event *event)
1828 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
1829 +index 7ee010aa740aa..404bdb4cbfae4 100644
1830 +--- a/drivers/platform/x86/samsung-laptop.c
1831 ++++ b/drivers/platform/x86/samsung-laptop.c
1832 +@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
1833 +
1834 + if (value > samsung->kbd_led.max_brightness)
1835 + value = samsung->kbd_led.max_brightness;
1836 +- else if (value < 0)
1837 +- value = 0;
1838 +
1839 + samsung->kbd_led_wk = value;
1840 + queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
1841 +diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
1842 +index 1e83150388506..a8dde46063602 100644
1843 +--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
1844 ++++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
1845 +@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
1846 + return dev_err_probe(dev, PTR_ERR(priv->rstc),
1847 + "failed to get reset\n");
1848 +
1849 +- reset_control_deassert(priv->rstc);
1850 ++ error = reset_control_deassert(priv->rstc);
1851 ++ if (error)
1852 ++ return error;
1853 +
1854 + priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
1855 + priv->rcdev.of_reset_n_cells = 1;
1856 +diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
1857 +index 24d3395964cc4..4c5bba52b1059 100644
1858 +--- a/drivers/reset/tegra/reset-bpmp.c
1859 ++++ b/drivers/reset/tegra/reset-bpmp.c
1860 +@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
1861 + struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
1862 + struct mrq_reset_request request;
1863 + struct tegra_bpmp_message msg;
1864 ++ int err;
1865 +
1866 + memset(&request, 0, sizeof(request));
1867 + request.cmd = command;
1868 +@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
1869 + msg.tx.data = &request;
1870 + msg.tx.size = sizeof(request);
1871 +
1872 +- return tegra_bpmp_transfer(bpmp, &msg);
1873 ++ err = tegra_bpmp_transfer(bpmp, &msg);
1874 ++ if (err)
1875 ++ return err;
1876 ++ if (msg.rx.ret)
1877 ++ return -EINVAL;
1878 ++
1879 ++ return 0;
1880 + }
1881 +
1882 + static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
1883 +diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
1884 +index 5521469ce678b..e16327a4b4c96 100644
1885 +--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
1886 ++++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
1887 +@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1888 + if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1889 + break;
1890 +
1891 +- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
1892 ++ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
1893 + if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1894 + nopin->itt == (u16) RESERVED_ITT) {
1895 + printk(KERN_ALERT "bnx2i: Unsolicited "
1896 +diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
1897 +index 1b5f3e143f071..2e5241d12dc3a 100644
1898 +--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
1899 ++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
1900 +@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1901 + struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1902 +
1903 + /* Must suspend all rx queue activity for this ep */
1904 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1905 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
1906 + }
1907 + /* CONN_DISCONNECT timeout may or may not be an issue depending
1908 + * on what transcribed in TCP layer, different targets behave
1909 +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
1910 +index 8c7d4dda4cf29..4365d52c6430e 100644
1911 +--- a/drivers/scsi/cxgbi/libcxgbi.c
1912 ++++ b/drivers/scsi/cxgbi/libcxgbi.c
1913 +@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1914 + log_debug(1 << CXGBI_DBG_PDU_RX,
1915 + "csk 0x%p, conn 0x%p.\n", csk, conn);
1916 +
1917 +- if (unlikely(!conn || conn->suspend_rx)) {
1918 ++ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
1919 + log_debug(1 << CXGBI_DBG_PDU_RX,
1920 +- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1921 ++ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
1922 + csk, conn, conn ? conn->id : 0xFF,
1923 +- conn ? conn->suspend_rx : 0xFF);
1924 ++ conn ? conn->flags : 0xFF);
1925 + return;
1926 + }
1927 +
1928 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1929 +index cbc263ec9d661..0f2c7098f9d6e 100644
1930 +--- a/drivers/scsi/libiscsi.c
1931 ++++ b/drivers/scsi/libiscsi.c
1932 +@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1933 + struct iscsi_task *task;
1934 + itt_t itt;
1935 +
1936 +- if (session->state == ISCSI_STATE_TERMINATE)
1937 ++ if (session->state == ISCSI_STATE_TERMINATE ||
1938 ++ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
1939 + return NULL;
1940 +
1941 + if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
1942 +@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
1943 + if (conn->stop_stage == 0)
1944 + session->state = ISCSI_STATE_FAILED;
1945 +
1946 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1947 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1948 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
1949 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
1950 + return true;
1951 + }
1952 +
1953 +@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
1954 + * Do this after dropping the extra ref because if this was a requeue
1955 + * it's removed from that list and cleanup_queued_task would miss it.
1956 + */
1957 +- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1958 ++ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
1959 + /*
1960 + * Save the task and ref in case we weren't cleaning up this
1961 + * task and get woken up again.
1962 +@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1963 + int rc = 0;
1964 +
1965 + spin_lock_bh(&conn->session->frwd_lock);
1966 +- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1967 ++ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
1968 + ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1969 + spin_unlock_bh(&conn->session->frwd_lock);
1970 + return -ENODATA;
1971 +@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1972 + goto fault;
1973 + }
1974 +
1975 +- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1976 ++ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
1977 + reason = FAILURE_SESSION_IN_RECOVERY;
1978 + sc->result = DID_REQUEUE << 16;
1979 + goto fault;
1980 +@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
1981 + void iscsi_suspend_queue(struct iscsi_conn *conn)
1982 + {
1983 + spin_lock_bh(&conn->session->frwd_lock);
1984 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1985 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
1986 + spin_unlock_bh(&conn->session->frwd_lock);
1987 + }
1988 + EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1989 +@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
1990 + struct Scsi_Host *shost = conn->session->host;
1991 + struct iscsi_host *ihost = shost_priv(shost);
1992 +
1993 +- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1994 ++ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
1995 + if (ihost->workq)
1996 + flush_workqueue(ihost->workq);
1997 + }
1998 +@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1999 +
2000 + static void iscsi_start_tx(struct iscsi_conn *conn)
2001 + {
2002 +- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2003 ++ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2004 + iscsi_conn_queue_work(conn);
2005 + }
2006 +
2007 +@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
2008 + iscsi_suspend_tx(conn);
2009 +
2010 + spin_lock_bh(&session->frwd_lock);
2011 ++ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
2012 ++
2013 + if (!is_active) {
2014 + /*
2015 + * if logout timed out before userspace could even send a PDU
2016 +@@ -3312,6 +3315,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2017 + spin_lock_bh(&session->frwd_lock);
2018 + if (is_leading)
2019 + session->leadconn = conn;
2020 ++
2021 ++ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
2022 + spin_unlock_bh(&session->frwd_lock);
2023 +
2024 + /*
2025 +@@ -3324,8 +3329,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2026 + /*
2027 + * Unblock xmitworker(), Login Phase will pass through.
2028 + */
2029 +- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2030 +- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2031 ++ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
2032 ++ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
2033 + return 0;
2034 + }
2035 + EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2036 +diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
2037 +index 2e9ffe3d1a55e..883005757ddb8 100644
2038 +--- a/drivers/scsi/libiscsi_tcp.c
2039 ++++ b/drivers/scsi/libiscsi_tcp.c
2040 +@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
2041 + */
2042 + conn->last_recv = jiffies;
2043 +
2044 +- if (unlikely(conn->suspend_rx)) {
2045 ++ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
2046 + ISCSI_DBG_TCP(conn, "Rx suspended!\n");
2047 + *status = ISCSI_TCP_SUSPENDED;
2048 + return 0;
2049 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
2050 +index c5260429c637e..04b40a6c1afff 100644
2051 +--- a/drivers/scsi/qedi/qedi_iscsi.c
2052 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
2053 +@@ -859,6 +859,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
2054 + return qedi_iscsi_send_ioreq(task);
2055 + }
2056 +
2057 ++static void qedi_offload_work(struct work_struct *work)
2058 ++{
2059 ++ struct qedi_endpoint *qedi_ep =
2060 ++ container_of(work, struct qedi_endpoint, offload_work);
2061 ++ struct qedi_ctx *qedi;
2062 ++ int wait_delay = 5 * HZ;
2063 ++ int ret;
2064 ++
2065 ++ qedi = qedi_ep->qedi;
2066 ++
2067 ++ ret = qedi_iscsi_offload_conn(qedi_ep);
2068 ++ if (ret) {
2069 ++ QEDI_ERR(&qedi->dbg_ctx,
2070 ++ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
2071 ++ qedi_ep->iscsi_cid, qedi_ep, ret);
2072 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2073 ++ return;
2074 ++ }
2075 ++
2076 ++ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
2077 ++ (qedi_ep->state ==
2078 ++ EP_STATE_OFLDCONN_COMPL),
2079 ++ wait_delay);
2080 ++ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
2081 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2082 ++ QEDI_ERR(&qedi->dbg_ctx,
2083 ++ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
2084 ++ qedi_ep->iscsi_cid, qedi_ep);
2085 ++ }
2086 ++}
2087 ++
2088 + static struct iscsi_endpoint *
2089 + qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
2090 + int non_blocking)
2091 +@@ -907,6 +938,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
2092 + }
2093 + qedi_ep = ep->dd_data;
2094 + memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
2095 ++ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
2096 + qedi_ep->state = EP_STATE_IDLE;
2097 + qedi_ep->iscsi_cid = (u32)-1;
2098 + qedi_ep->qedi = qedi;
2099 +@@ -1055,12 +1087,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
2100 + qedi_ep = ep->dd_data;
2101 + qedi = qedi_ep->qedi;
2102 +
2103 ++ flush_work(&qedi_ep->offload_work);
2104 ++
2105 + if (qedi_ep->state == EP_STATE_OFLDCONN_START)
2106 + goto ep_exit_recover;
2107 +
2108 +- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
2109 +- flush_work(&qedi_ep->offload_work);
2110 +-
2111 + if (qedi_ep->conn) {
2112 + qedi_conn = qedi_ep->conn;
2113 + abrt_conn = qedi_conn->abrt_conn;
2114 +@@ -1234,37 +1265,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
2115 + return rc;
2116 + }
2117 +
2118 +-static void qedi_offload_work(struct work_struct *work)
2119 +-{
2120 +- struct qedi_endpoint *qedi_ep =
2121 +- container_of(work, struct qedi_endpoint, offload_work);
2122 +- struct qedi_ctx *qedi;
2123 +- int wait_delay = 5 * HZ;
2124 +- int ret;
2125 +-
2126 +- qedi = qedi_ep->qedi;
2127 +-
2128 +- ret = qedi_iscsi_offload_conn(qedi_ep);
2129 +- if (ret) {
2130 +- QEDI_ERR(&qedi->dbg_ctx,
2131 +- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
2132 +- qedi_ep->iscsi_cid, qedi_ep, ret);
2133 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2134 +- return;
2135 +- }
2136 +-
2137 +- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
2138 +- (qedi_ep->state ==
2139 +- EP_STATE_OFLDCONN_COMPL),
2140 +- wait_delay);
2141 +- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
2142 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
2143 +- QEDI_ERR(&qedi->dbg_ctx,
2144 +- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
2145 +- qedi_ep->iscsi_cid, qedi_ep);
2146 +- }
2147 +-}
2148 +-
2149 + static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
2150 + {
2151 + struct qedi_ctx *qedi;
2152 +@@ -1380,7 +1380,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
2153 + qedi_ep->dst_addr, qedi_ep->dst_port);
2154 + }
2155 +
2156 +- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
2157 + queue_work(qedi->offload_thread, &qedi_ep->offload_work);
2158 +
2159 + ret = 0;
2160 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2161 +index c7b1b2e8bb02f..bcdfcb25349ad 100644
2162 +--- a/drivers/scsi/scsi_transport_iscsi.c
2163 ++++ b/drivers/scsi/scsi_transport_iscsi.c
2164 +@@ -86,6 +86,9 @@ struct iscsi_internal {
2165 + struct transport_container session_cont;
2166 + };
2167 +
2168 ++static DEFINE_IDR(iscsi_ep_idr);
2169 ++static DEFINE_MUTEX(iscsi_ep_idr_mutex);
2170 ++
2171 + static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
2172 + static struct workqueue_struct *iscsi_eh_timer_workq;
2173 +
2174 +@@ -169,6 +172,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
2175 + static void iscsi_endpoint_release(struct device *dev)
2176 + {
2177 + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
2178 ++
2179 ++ mutex_lock(&iscsi_ep_idr_mutex);
2180 ++ idr_remove(&iscsi_ep_idr, ep->id);
2181 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2182 ++
2183 + kfree(ep);
2184 + }
2185 +
2186 +@@ -181,7 +189,7 @@ static ssize_t
2187 + show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
2188 + {
2189 + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
2190 +- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
2191 ++ return sysfs_emit(buf, "%d\n", ep->id);
2192 + }
2193 + static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
2194 +
2195 +@@ -194,48 +202,32 @@ static struct attribute_group iscsi_endpoint_group = {
2196 + .attrs = iscsi_endpoint_attrs,
2197 + };
2198 +
2199 +-#define ISCSI_MAX_EPID -1
2200 +-
2201 +-static int iscsi_match_epid(struct device *dev, const void *data)
2202 +-{
2203 +- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
2204 +- const uint64_t *epid = data;
2205 +-
2206 +- return *epid == ep->id;
2207 +-}
2208 +-
2209 + struct iscsi_endpoint *
2210 + iscsi_create_endpoint(int dd_size)
2211 + {
2212 +- struct device *dev;
2213 + struct iscsi_endpoint *ep;
2214 +- uint64_t id;
2215 +- int err;
2216 +-
2217 +- for (id = 1; id < ISCSI_MAX_EPID; id++) {
2218 +- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
2219 +- iscsi_match_epid);
2220 +- if (!dev)
2221 +- break;
2222 +- else
2223 +- put_device(dev);
2224 +- }
2225 +- if (id == ISCSI_MAX_EPID) {
2226 +- printk(KERN_ERR "Too many connections. Max supported %u\n",
2227 +- ISCSI_MAX_EPID - 1);
2228 +- return NULL;
2229 +- }
2230 ++ int err, id;
2231 +
2232 + ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
2233 + if (!ep)
2234 + return NULL;
2235 +
2236 ++ mutex_lock(&iscsi_ep_idr_mutex);
2237 ++ id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
2238 ++ if (id < 0) {
2239 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2240 ++ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
2241 ++ id);
2242 ++ goto free_ep;
2243 ++ }
2244 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2245 ++
2246 + ep->id = id;
2247 + ep->dev.class = &iscsi_endpoint_class;
2248 +- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
2249 ++ dev_set_name(&ep->dev, "ep-%d", id);
2250 + err = device_register(&ep->dev);
2251 + if (err)
2252 +- goto free_ep;
2253 ++ goto free_id;
2254 +
2255 + err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
2256 + if (err)
2257 +@@ -249,6 +241,10 @@ unregister_dev:
2258 + device_unregister(&ep->dev);
2259 + return NULL;
2260 +
2261 ++free_id:
2262 ++ mutex_lock(&iscsi_ep_idr_mutex);
2263 ++ idr_remove(&iscsi_ep_idr, id);
2264 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2265 + free_ep:
2266 + kfree(ep);
2267 + return NULL;
2268 +@@ -276,14 +272,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
2269 + */
2270 + struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
2271 + {
2272 +- struct device *dev;
2273 ++ struct iscsi_endpoint *ep;
2274 +
2275 +- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
2276 +- iscsi_match_epid);
2277 +- if (!dev)
2278 +- return NULL;
2279 ++ mutex_lock(&iscsi_ep_idr_mutex);
2280 ++ ep = idr_find(&iscsi_ep_idr, handle);
2281 ++ if (!ep)
2282 ++ goto unlock;
2283 +
2284 +- return iscsi_dev_to_endpoint(dev);
2285 ++ get_device(&ep->dev);
2286 ++unlock:
2287 ++ mutex_unlock(&iscsi_ep_idr_mutex);
2288 ++ return ep;
2289 + }
2290 + EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
2291 +
2292 +diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
2293 +index ddd00efc48825..fbdb5124d7f7d 100644
2294 +--- a/drivers/scsi/sr_ioctl.c
2295 ++++ b/drivers/scsi/sr_ioctl.c
2296 +@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
2297 + int result;
2298 + unsigned char *buffer;
2299 +
2300 +- buffer = kmalloc(32, GFP_KERNEL);
2301 ++ buffer = kzalloc(32, GFP_KERNEL);
2302 + if (!buffer)
2303 + return -ENOMEM;
2304 +
2305 +@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
2306 + cgc.data_direction = DMA_FROM_DEVICE;
2307 +
2308 + result = sr_do_ioctl(cd, &cgc);
2309 ++ if (result)
2310 ++ goto err;
2311 +
2312 + tochdr->cdth_trk0 = buffer[2];
2313 + tochdr->cdth_trk1 = buffer[3];
2314 +
2315 ++err:
2316 + kfree(buffer);
2317 + return result;
2318 + }
2319 +@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
2320 + int result;
2321 + unsigned char *buffer;
2322 +
2323 +- buffer = kmalloc(32, GFP_KERNEL);
2324 ++ buffer = kzalloc(32, GFP_KERNEL);
2325 + if (!buffer)
2326 + return -ENOMEM;
2327 +
2328 +@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
2329 + cgc.data_direction = DMA_FROM_DEVICE;
2330 +
2331 + result = sr_do_ioctl(cd, &cgc);
2332 ++ if (result)
2333 ++ goto err;
2334 +
2335 + tocentry->cdte_ctrl = buffer[5] & 0xf;
2336 + tocentry->cdte_adr = buffer[5] >> 4;
2337 +@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
2338 + tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
2339 + + buffer[10]) << 8) + buffer[11];
2340 +
2341 ++err:
2342 + kfree(buffer);
2343 + return result;
2344 + }
2345 +@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
2346 + {
2347 + Scsi_CD *cd = cdi->handle;
2348 + struct packet_command cgc;
2349 +- char *buffer = kmalloc(32, GFP_KERNEL);
2350 ++ char *buffer = kzalloc(32, GFP_KERNEL);
2351 + int result;
2352 +
2353 + if (!buffer)
2354 +@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
2355 + cgc.data_direction = DMA_FROM_DEVICE;
2356 + cgc.timeout = IOCTL_TIMEOUT;
2357 + result = sr_do_ioctl(cd, &cgc);
2358 ++ if (result)
2359 ++ goto err;
2360 +
2361 + memcpy(mcn->medium_catalog_number, buffer + 9, 13);
2362 + mcn->medium_catalog_number[13] = 0;
2363 +
2364 ++err:
2365 + kfree(buffer);
2366 + return result;
2367 + }
2368 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2369 +index 0e4c04d3b0236..b55e0a07363f2 100644
2370 +--- a/drivers/scsi/ufs/ufshcd.c
2371 ++++ b/drivers/scsi/ufs/ufshcd.c
2372 +@@ -358,7 +358,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
2373 + static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2374 + enum ufs_trace_str_t str_t)
2375 + {
2376 +- u64 lba;
2377 ++ u64 lba = 0;
2378 + u8 opcode = 0, group_id = 0;
2379 + u32 intr, doorbell;
2380 + struct ufshcd_lrb *lrbp = &hba->lrb[tag];
2381 +@@ -375,7 +375,6 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2382 + return;
2383 +
2384 + opcode = cmd->cmnd[0];
2385 +- lba = scsi_get_lba(cmd);
2386 +
2387 + if (opcode == READ_10 || opcode == WRITE_10) {
2388 + /*
2389 +@@ -383,6 +382,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2390 + */
2391 + transfer_len =
2392 + be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
2393 ++ lba = scsi_get_lba(cmd);
2394 + if (opcode == WRITE_10)
2395 + group_id = lrbp->cmd->cmnd[6];
2396 + } else if (opcode == UNMAP) {
2397 +@@ -390,6 +390,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
2398 + * The number of Bytes to be unmapped beginning with the lba.
2399 + */
2400 + transfer_len = blk_rq_bytes(rq);
2401 ++ lba = scsi_get_lba(cmd);
2402 + }
2403 +
2404 + intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
2405 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
2406 +index 92d9610df1fd8..938017a60c8ed 100644
2407 +--- a/drivers/spi/atmel-quadspi.c
2408 ++++ b/drivers/spi/atmel-quadspi.c
2409 +@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
2410 + static bool atmel_qspi_supports_op(struct spi_mem *mem,
2411 + const struct spi_mem_op *op)
2412 + {
2413 ++ if (!spi_mem_default_supports_op(mem, op))
2414 ++ return false;
2415 ++
2416 + if (atmel_qspi_find_mode(op) < 0)
2417 + return false;
2418 +
2419 +diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
2420 +index 1a6294a06e728..75680eecd2f7d 100644
2421 +--- a/drivers/spi/spi-cadence-quadspi.c
2422 ++++ b/drivers/spi/spi-cadence-quadspi.c
2423 +@@ -1226,9 +1226,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
2424 + all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
2425 + !op->data.dtr;
2426 +
2427 +- /* Mixed DTR modes not supported. */
2428 +- if (!(all_true || all_false))
2429 ++ if (all_true) {
2430 ++ /* Right now we only support 8-8-8 DTR mode. */
2431 ++ if (op->cmd.nbytes && op->cmd.buswidth != 8)
2432 ++ return false;
2433 ++ if (op->addr.nbytes && op->addr.buswidth != 8)
2434 ++ return false;
2435 ++ if (op->data.nbytes && op->data.buswidth != 8)
2436 ++ return false;
2437 ++ } else if (all_false) {
2438 ++ /* Only 1-1-X ops are supported without DTR */
2439 ++ if (op->cmd.nbytes && op->cmd.buswidth > 1)
2440 ++ return false;
2441 ++ if (op->addr.nbytes && op->addr.buswidth > 1)
2442 ++ return false;
2443 ++ } else {
2444 ++ /* Mixed DTR modes are not supported. */
2445 + return false;
2446 ++ }
2447 +
2448 + if (all_true)
2449 + return spi_mem_dtr_supports_op(mem, op);
2450 +diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
2451 +index 5c93730615f8d..6d203477c04b1 100644
2452 +--- a/drivers/spi/spi-mtk-nor.c
2453 ++++ b/drivers/spi/spi-mtk-nor.c
2454 +@@ -909,7 +909,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
2455 +
2456 + static int __maybe_unused mtk_nor_resume(struct device *dev)
2457 + {
2458 +- return pm_runtime_force_resume(dev);
2459 ++ struct spi_controller *ctlr = dev_get_drvdata(dev);
2460 ++ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
2461 ++ int ret;
2462 ++
2463 ++ ret = pm_runtime_force_resume(dev);
2464 ++ if (ret)
2465 ++ return ret;
2466 ++
2467 ++ mtk_nor_init(sp);
2468 ++
2469 ++ return 0;
2470 + }
2471 +
2472 + static const struct dev_pm_ops mtk_nor_pm_ops = {
2473 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2474 +index 29a019cf1d5f8..8f8d281e31510 100644
2475 +--- a/fs/cifs/cifsfs.c
2476 ++++ b/fs/cifs/cifsfs.c
2477 +@@ -936,7 +936,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2478 + ssize_t rc;
2479 + struct inode *inode = file_inode(iocb->ki_filp);
2480 +
2481 +- if (iocb->ki_filp->f_flags & O_DIRECT)
2482 ++ if (iocb->ki_flags & IOCB_DIRECT)
2483 + return cifs_user_readv(iocb, iter);
2484 +
2485 + rc = cifs_revalidate_mapping(inode);
2486 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2487 +index db981619f6c8e..a0a9878578949 100644
2488 +--- a/fs/ext4/ext4.h
2489 ++++ b/fs/ext4/ext4.h
2490 +@@ -2267,6 +2267,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
2491 + * Structure of a directory entry
2492 + */
2493 + #define EXT4_NAME_LEN 255
2494 ++/*
2495 ++ * Base length of the ext4 directory entry excluding the name length
2496 ++ */
2497 ++#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
2498 +
2499 + struct ext4_dir_entry {
2500 + __le32 inode; /* Inode number */
2501 +@@ -3027,7 +3031,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
2502 + extern int ext4_can_truncate(struct inode *inode);
2503 + extern int ext4_truncate(struct inode *);
2504 + extern int ext4_break_layouts(struct inode *);
2505 +-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
2506 ++extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
2507 + extern void ext4_set_inode_flags(struct inode *, bool init);
2508 + extern int ext4_alloc_da_blocks(struct inode *inode);
2509 + extern void ext4_set_aops(struct inode *inode);
2510 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2511 +index b81c008e66755..44d00951e6092 100644
2512 +--- a/fs/ext4/extents.c
2513 ++++ b/fs/ext4/extents.c
2514 +@@ -4504,9 +4504,9 @@ retry:
2515 + return ret > 0 ? ret2 : ret;
2516 + }
2517 +
2518 +-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
2519 ++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
2520 +
2521 +-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
2522 ++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
2523 +
2524 + static long ext4_zero_range(struct file *file, loff_t offset,
2525 + loff_t len, int mode)
2526 +@@ -4578,6 +4578,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
2527 + /* Wait all existing dio workers, newcomers will block on i_mutex */
2528 + inode_dio_wait(inode);
2529 +
2530 ++ ret = file_modified(file);
2531 ++ if (ret)
2532 ++ goto out_mutex;
2533 ++
2534 + /* Preallocate the range including the unaligned edges */
2535 + if (partial_begin || partial_end) {
2536 + ret = ext4_alloc_file_blocks(file,
2537 +@@ -4696,7 +4700,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
2538 + ext4_fc_start_update(inode);
2539 +
2540 + if (mode & FALLOC_FL_PUNCH_HOLE) {
2541 +- ret = ext4_punch_hole(inode, offset, len);
2542 ++ ret = ext4_punch_hole(file, offset, len);
2543 + goto exit;
2544 + }
2545 +
2546 +@@ -4705,12 +4709,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
2547 + goto exit;
2548 +
2549 + if (mode & FALLOC_FL_COLLAPSE_RANGE) {
2550 +- ret = ext4_collapse_range(inode, offset, len);
2551 ++ ret = ext4_collapse_range(file, offset, len);
2552 + goto exit;
2553 + }
2554 +
2555 + if (mode & FALLOC_FL_INSERT_RANGE) {
2556 +- ret = ext4_insert_range(inode, offset, len);
2557 ++ ret = ext4_insert_range(file, offset, len);
2558 + goto exit;
2559 + }
2560 +
2561 +@@ -4746,6 +4750,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
2562 + /* Wait all existing dio workers, newcomers will block on i_mutex */
2563 + inode_dio_wait(inode);
2564 +
2565 ++ ret = file_modified(file);
2566 ++ if (ret)
2567 ++ goto out;
2568 ++
2569 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
2570 + if (ret)
2571 + goto out;
2572 +@@ -5248,8 +5256,9 @@ out:
2573 + * This implements the fallocate's collapse range functionality for ext4
2574 + * Returns: 0 and non-zero on error.
2575 + */
2576 +-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
2577 ++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
2578 + {
2579 ++ struct inode *inode = file_inode(file);
2580 + struct super_block *sb = inode->i_sb;
2581 + struct address_space *mapping = inode->i_mapping;
2582 + ext4_lblk_t punch_start, punch_stop;
2583 +@@ -5301,6 +5310,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
2584 + /* Wait for existing dio to complete */
2585 + inode_dio_wait(inode);
2586 +
2587 ++ ret = file_modified(file);
2588 ++ if (ret)
2589 ++ goto out_mutex;
2590 ++
2591 + /*
2592 + * Prevent page faults from reinstantiating pages we have released from
2593 + * page cache.
2594 +@@ -5394,8 +5407,9 @@ out_mutex:
2595 + * by len bytes.
2596 + * Returns 0 on success, error otherwise.
2597 + */
2598 +-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
2599 ++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
2600 + {
2601 ++ struct inode *inode = file_inode(file);
2602 + struct super_block *sb = inode->i_sb;
2603 + struct address_space *mapping = inode->i_mapping;
2604 + handle_t *handle;
2605 +@@ -5452,6 +5466,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
2606 + /* Wait for existing dio to complete */
2607 + inode_dio_wait(inode);
2608 +
2609 ++ ret = file_modified(file);
2610 ++ if (ret)
2611 ++ goto out_mutex;
2612 ++
2613 + /*
2614 + * Prevent page faults from reinstantiating pages we have released from
2615 + * page cache.
2616 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2617 +index fff52292c01ec..db73b49bd9795 100644
2618 +--- a/fs/ext4/inode.c
2619 ++++ b/fs/ext4/inode.c
2620 +@@ -3939,12 +3939,14 @@ int ext4_break_layouts(struct inode *inode)
2621 + * Returns: 0 on success or negative on failure
2622 + */
2623 +
2624 +-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
2625 ++int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
2626 + {
2627 ++ struct inode *inode = file_inode(file);
2628 + struct super_block *sb = inode->i_sb;
2629 + ext4_lblk_t first_block, stop_block;
2630 + struct address_space *mapping = inode->i_mapping;
2631 +- loff_t first_block_offset, last_block_offset;
2632 ++ loff_t first_block_offset, last_block_offset, max_length;
2633 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2634 + handle_t *handle;
2635 + unsigned int credits;
2636 + int ret = 0, ret2 = 0;
2637 +@@ -3987,6 +3989,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
2638 + offset;
2639 + }
2640 +
2641 ++ /*
2642 ++ * For punch hole the length + offset needs to be within one block
2643 ++ * before last range. Adjust the length if it goes beyond that limit.
2644 ++ */
2645 ++ max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
2646 ++ if (offset + length > max_length)
2647 ++ length = max_length - offset;
2648 ++
2649 + if (offset & (sb->s_blocksize - 1) ||
2650 + (offset + length) & (sb->s_blocksize - 1)) {
2651 + /*
2652 +@@ -4002,6 +4012,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
2653 + /* Wait all existing dio workers, newcomers will block on i_mutex */
2654 + inode_dio_wait(inode);
2655 +
2656 ++ ret = file_modified(file);
2657 ++ if (ret)
2658 ++ goto out_mutex;
2659 ++
2660 + /*
2661 + * Prevent page faults from reinstantiating pages we have released from
2662 + * page cache.
2663 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2664 +index 8cb5ea7ee506c..19c620118e628 100644
2665 +--- a/fs/ext4/namei.c
2666 ++++ b/fs/ext4/namei.c
2667 +@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
2668 +
2669 + de = (struct ext4_dir_entry_2 *)search_buf;
2670 + dlimit = search_buf + buf_size;
2671 +- while ((char *) de < dlimit) {
2672 ++ while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
2673 + /* this code is executed quadratically often */
2674 + /* do minimal checking `by hand' */
2675 +- if ((char *) de + de->name_len <= dlimit &&
2676 ++ if (de->name + de->name_len <= dlimit &&
2677 + ext4_match(dir, fname, de)) {
2678 + /* found a match - just to be sure, do
2679 + * a full check */
2680 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
2681 +index f038d578d8d8f..18977ff8e4939 100644
2682 +--- a/fs/ext4/page-io.c
2683 ++++ b/fs/ext4/page-io.c
2684 +@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
2685 + continue;
2686 + }
2687 + clear_buffer_async_write(bh);
2688 +- if (bio->bi_status)
2689 ++ if (bio->bi_status) {
2690 ++ set_buffer_write_io_error(bh);
2691 + buffer_io_error(bh);
2692 ++ }
2693 + } while ((bh = bh->b_this_page) != head);
2694 + spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
2695 + if (!under_io) {
2696 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2697 +index fd4d34deb9fc6..fa21d81803190 100644
2698 +--- a/fs/ext4/super.c
2699 ++++ b/fs/ext4/super.c
2700 +@@ -3697,9 +3697,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
2701 + ext4_fsblk_t first_block, last_block, b;
2702 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
2703 + int s, j, count = 0;
2704 ++ int has_super = ext4_bg_has_super(sb, grp);
2705 +
2706 + if (!ext4_has_feature_bigalloc(sb))
2707 +- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
2708 ++ return (has_super + ext4_bg_num_gdb(sb, grp) +
2709 ++ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
2710 + sbi->s_itb_per_group + 2);
2711 +
2712 + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
2713 +@@ -4786,9 +4788,18 @@ no_journal:
2714 + * Get the # of file system overhead blocks from the
2715 + * superblock if present.
2716 + */
2717 +- if (es->s_overhead_clusters)
2718 +- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
2719 +- else {
2720 ++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
2721 ++ /* ignore the precalculated value if it is ridiculous */
2722 ++ if (sbi->s_overhead > ext4_blocks_count(es))
2723 ++ sbi->s_overhead = 0;
2724 ++ /*
2725 ++ * If the bigalloc feature is not enabled recalculating the
2726 ++ * overhead doesn't take long, so we might as well just redo
2727 ++ * it to make sure we are using the correct value.
2728 ++ */
2729 ++ if (!ext4_has_feature_bigalloc(sb))
2730 ++ sbi->s_overhead = 0;
2731 ++ if (sbi->s_overhead == 0) {
2732 + err = ext4_calculate_overhead(sb);
2733 + if (err)
2734 + goto failed_mount_wq;
2735 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
2736 +index 403cf6f1eb8c5..6901cd85f1df7 100644
2737 +--- a/fs/gfs2/rgrp.c
2738 ++++ b/fs/gfs2/rgrp.c
2739 +@@ -923,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
2740 + spin_lock_init(&rgd->rd_rsspin);
2741 + mutex_init(&rgd->rd_mutex);
2742 +
2743 +- error = compute_bitstructs(rgd);
2744 +- if (error)
2745 +- goto fail;
2746 +-
2747 + error = gfs2_glock_get(sdp, rgd->rd_addr,
2748 + &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
2749 + if (error)
2750 + goto fail;
2751 +
2752 ++ error = compute_bitstructs(rgd);
2753 ++ if (error)
2754 ++ goto fail_glock;
2755 ++
2756 + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
2757 + rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
2758 + if (rgd->rd_data > sdp->sd_max_rg_data)
2759 +@@ -945,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
2760 + }
2761 +
2762 + error = 0; /* someone else read in the rgrp; free it and ignore it */
2763 ++fail_glock:
2764 + gfs2_glock_put(rgd->rd_gl);
2765 +
2766 + fail:
2767 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
2768 +index 54c4e0b0dda4a..bb0651a4a1287 100644
2769 +--- a/fs/hugetlbfs/inode.c
2770 ++++ b/fs/hugetlbfs/inode.c
2771 +@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
2772 + info.flags = 0;
2773 + info.length = len;
2774 + info.low_limit = current->mm->mmap_base;
2775 +- info.high_limit = TASK_SIZE;
2776 ++ info.high_limit = arch_get_mmap_end(addr);
2777 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
2778 + info.align_offset = 0;
2779 + return vm_unmapped_area(&info);
2780 +@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
2781 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
2782 + info.length = len;
2783 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2784 +- info.high_limit = current->mm->mmap_base;
2785 ++ info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
2786 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
2787 + info.align_offset = 0;
2788 + addr = vm_unmapped_area(&info);
2789 +@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
2790 + VM_BUG_ON(addr != -ENOMEM);
2791 + info.flags = 0;
2792 + info.low_limit = current->mm->mmap_base;
2793 +- info.high_limit = TASK_SIZE;
2794 ++ info.high_limit = arch_get_mmap_end(addr);
2795 + addr = vm_unmapped_area(&info);
2796 + }
2797 +
2798 +@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2799 + struct mm_struct *mm = current->mm;
2800 + struct vm_area_struct *vma;
2801 + struct hstate *h = hstate_file(file);
2802 ++ const unsigned long mmap_end = arch_get_mmap_end(addr);
2803 +
2804 + if (len & ~huge_page_mask(h))
2805 + return -EINVAL;
2806 +@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2807 + if (addr) {
2808 + addr = ALIGN(addr, huge_page_size(h));
2809 + vma = find_vma(mm, addr);
2810 +- if (TASK_SIZE - len >= addr &&
2811 ++ if (mmap_end - len >= addr &&
2812 + (!vma || addr + len <= vm_start_gap(vma)))
2813 + return addr;
2814 + }
2815 +diff --git a/fs/internal.h b/fs/internal.h
2816 +index 3cd065c8a66b4..cdd83d4899bb3 100644
2817 +--- a/fs/internal.h
2818 ++++ b/fs/internal.h
2819 +@@ -23,22 +23,11 @@ struct pipe_inode_info;
2820 + #ifdef CONFIG_BLOCK
2821 + extern void __init bdev_cache_init(void);
2822 +
2823 +-extern int __sync_blockdev(struct block_device *bdev, int wait);
2824 +-void iterate_bdevs(void (*)(struct block_device *, void *), void *);
2825 + void emergency_thaw_bdev(struct super_block *sb);
2826 + #else
2827 + static inline void bdev_cache_init(void)
2828 + {
2829 + }
2830 +-
2831 +-static inline int __sync_blockdev(struct block_device *bdev, int wait)
2832 +-{
2833 +- return 0;
2834 +-}
2835 +-static inline void iterate_bdevs(void (*f)(struct block_device *, void *),
2836 +- void *arg)
2837 +-{
2838 +-}
2839 + static inline int emergency_thaw_bdev(struct super_block *sb)
2840 + {
2841 + return 0;
2842 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
2843 +index d188fa913a075..34b1406c06fde 100644
2844 +--- a/fs/jbd2/commit.c
2845 ++++ b/fs/jbd2/commit.c
2846 +@@ -501,7 +501,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
2847 + }
2848 + spin_unlock(&commit_transaction->t_handle_lock);
2849 + commit_transaction->t_state = T_SWITCH;
2850 +- write_unlock(&journal->j_state_lock);
2851 +
2852 + J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
2853 + journal->j_max_transaction_buffers);
2854 +@@ -521,6 +520,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
2855 + * has reserved. This is consistent with the existing behaviour
2856 + * that multiple jbd2_journal_get_write_access() calls to the same
2857 + * buffer are perfectly permissible.
2858 ++ * We use journal->j_state_lock here to serialize processing of
2859 ++ * t_reserved_list with eviction of buffers from journal_unmap_buffer().
2860 + */
2861 + while (commit_transaction->t_reserved_list) {
2862 + jh = commit_transaction->t_reserved_list;
2863 +@@ -540,6 +541,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
2864 + jbd2_journal_refile_buffer(journal, jh);
2865 + }
2866 +
2867 ++ write_unlock(&journal->j_state_lock);
2868 + /*
2869 + * Now try to drop any written-back buffers from the journal's
2870 + * checkpoint lists. We do this *before* commit because it potentially
2871 +diff --git a/fs/namei.c b/fs/namei.c
2872 +index 3bb65f48fe1da..8882a70dc1192 100644
2873 +--- a/fs/namei.c
2874 ++++ b/fs/namei.c
2875 +@@ -3625,18 +3625,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
2876 + {
2877 + struct dentry *dentry = ERR_PTR(-EEXIST);
2878 + struct qstr last;
2879 ++ bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
2880 ++ unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
2881 ++ unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
2882 + int type;
2883 + int err2;
2884 + int error;
2885 +- bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
2886 +
2887 +- /*
2888 +- * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
2889 +- * other flags passed in are ignored!
2890 +- */
2891 +- lookup_flags &= LOOKUP_REVAL;
2892 +-
2893 +- error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
2894 ++ error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
2895 + if (error)
2896 + return ERR_PTR(error);
2897 +
2898 +@@ -3650,11 +3646,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
2899 + /* don't fail immediately if it's r/o, at least try to report other errors */
2900 + err2 = mnt_want_write(path->mnt);
2901 + /*
2902 +- * Do the final lookup.
2903 ++ * Do the final lookup. Suppress 'create' if there is a trailing
2904 ++ * '/', and a directory wasn't requested.
2905 + */
2906 +- lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
2907 ++ if (last.name[last.len] && !want_dir)
2908 ++ create_flags = 0;
2909 + inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
2910 +- dentry = __lookup_hash(&last, path->dentry, lookup_flags);
2911 ++ dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
2912 + if (IS_ERR(dentry))
2913 + goto unlock;
2914 +
2915 +@@ -3668,7 +3666,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
2916 + * all is fine. Let's be bastards - you had / on the end, you've
2917 + * been asking for (non-existent) directory. -ENOENT for you.
2918 + */
2919 +- if (unlikely(!is_dir && last.name[last.len])) {
2920 ++ if (unlikely(!create_flags)) {
2921 + error = -ENOENT;
2922 + goto fail;
2923 + }
2924 +diff --git a/fs/stat.c b/fs/stat.c
2925 +index 28d2020ba1f42..246d138ec0669 100644
2926 +--- a/fs/stat.c
2927 ++++ b/fs/stat.c
2928 +@@ -334,9 +334,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
2929 + # define choose_32_64(a,b) b
2930 + #endif
2931 +
2932 +-#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
2933 +-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
2934 +-
2935 + #ifndef INIT_STRUCT_STAT_PADDING
2936 + # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
2937 + #endif
2938 +@@ -345,7 +342,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
2939 + {
2940 + struct stat tmp;
2941 +
2942 +- if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
2943 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
2944 ++ return -EOVERFLOW;
2945 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
2946 + return -EOVERFLOW;
2947 + #if BITS_PER_LONG == 32
2948 + if (stat->size > MAX_NON_LFS)
2949 +@@ -353,7 +352,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
2950 + #endif
2951 +
2952 + INIT_STRUCT_STAT_PADDING(tmp);
2953 +- tmp.st_dev = encode_dev(stat->dev);
2954 ++ tmp.st_dev = new_encode_dev(stat->dev);
2955 + tmp.st_ino = stat->ino;
2956 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
2957 + return -EOVERFLOW;
2958 +@@ -363,7 +362,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
2959 + return -EOVERFLOW;
2960 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
2961 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
2962 +- tmp.st_rdev = encode_dev(stat->rdev);
2963 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
2964 + tmp.st_size = stat->size;
2965 + tmp.st_atime = stat->atime.tv_sec;
2966 + tmp.st_mtime = stat->mtime.tv_sec;
2967 +@@ -644,11 +643,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
2968 + {
2969 + struct compat_stat tmp;
2970 +
2971 +- if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
2972 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
2973 ++ return -EOVERFLOW;
2974 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
2975 + return -EOVERFLOW;
2976 +
2977 + memset(&tmp, 0, sizeof(tmp));
2978 +- tmp.st_dev = old_encode_dev(stat->dev);
2979 ++ tmp.st_dev = new_encode_dev(stat->dev);
2980 + tmp.st_ino = stat->ino;
2981 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
2982 + return -EOVERFLOW;
2983 +@@ -658,7 +659,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
2984 + return -EOVERFLOW;
2985 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
2986 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
2987 +- tmp.st_rdev = old_encode_dev(stat->rdev);
2988 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
2989 + if ((u64) stat->size > MAX_NON_LFS)
2990 + return -EOVERFLOW;
2991 + tmp.st_size = stat->size;
2992 +diff --git a/fs/sync.c b/fs/sync.c
2993 +index 1373a610dc784..c7690016453e4 100644
2994 +--- a/fs/sync.c
2995 ++++ b/fs/sync.c
2996 +@@ -3,6 +3,7 @@
2997 + * High-level sync()-related operations
2998 + */
2999 +
3000 ++#include <linux/blkdev.h>
3001 + #include <linux/kernel.h>
3002 + #include <linux/file.h>
3003 + #include <linux/fs.h>
3004 +@@ -21,25 +22,6 @@
3005 + #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
3006 + SYNC_FILE_RANGE_WAIT_AFTER)
3007 +
3008 +-/*
3009 +- * Do the filesystem syncing work. For simple filesystems
3010 +- * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
3011 +- * submit IO for these buffers via __sync_blockdev(). This also speeds up the
3012 +- * wait == 1 case since in that case write_inode() functions do
3013 +- * sync_dirty_buffer() and thus effectively write one block at a time.
3014 +- */
3015 +-static int __sync_filesystem(struct super_block *sb, int wait)
3016 +-{
3017 +- if (wait)
3018 +- sync_inodes_sb(sb);
3019 +- else
3020 +- writeback_inodes_sb(sb, WB_REASON_SYNC);
3021 +-
3022 +- if (sb->s_op->sync_fs)
3023 +- sb->s_op->sync_fs(sb, wait);
3024 +- return __sync_blockdev(sb->s_bdev, wait);
3025 +-}
3026 +-
3027 + /*
3028 + * Write out and wait upon all dirty data associated with this
3029 + * superblock. Filesystem data as well as the underlying block
3030 +@@ -47,7 +29,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
3031 + */
3032 + int sync_filesystem(struct super_block *sb)
3033 + {
3034 +- int ret;
3035 ++ int ret = 0;
3036 +
3037 + /*
3038 + * We need to be protected against the filesystem going from
3039 +@@ -61,10 +43,31 @@ int sync_filesystem(struct super_block *sb)
3040 + if (sb_rdonly(sb))
3041 + return 0;
3042 +
3043 +- ret = __sync_filesystem(sb, 0);
3044 +- if (ret < 0)
3045 ++ /*
3046 ++ * Do the filesystem syncing work. For simple filesystems
3047 ++ * writeback_inodes_sb(sb) just dirties buffers with inodes so we have
3048 ++ * to submit I/O for these buffers via sync_blockdev(). This also
3049 ++ * speeds up the wait == 1 case since in that case write_inode()
3050 ++ * methods call sync_dirty_buffer() and thus effectively write one block
3051 ++ * at a time.
3052 ++ */
3053 ++ writeback_inodes_sb(sb, WB_REASON_SYNC);
3054 ++ if (sb->s_op->sync_fs) {
3055 ++ ret = sb->s_op->sync_fs(sb, 0);
3056 ++ if (ret)
3057 ++ return ret;
3058 ++ }
3059 ++ ret = sync_blockdev_nowait(sb->s_bdev);
3060 ++ if (ret)
3061 + return ret;
3062 +- return __sync_filesystem(sb, 1);
3063 ++
3064 ++ sync_inodes_sb(sb);
3065 ++ if (sb->s_op->sync_fs) {
3066 ++ ret = sb->s_op->sync_fs(sb, 1);
3067 ++ if (ret)
3068 ++ return ret;
3069 ++ }
3070 ++ return sync_blockdev(sb->s_bdev);
3071 + }
3072 + EXPORT_SYMBOL(sync_filesystem);
3073 +
3074 +@@ -81,21 +84,6 @@ static void sync_fs_one_sb(struct super_block *sb, void *arg)
3075 + sb->s_op->sync_fs(sb, *(int *)arg);
3076 + }
3077 +
3078 +-static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
3079 +-{
3080 +- filemap_fdatawrite(bdev->bd_inode->i_mapping);
3081 +-}
3082 +-
3083 +-static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
3084 +-{
3085 +- /*
3086 +- * We keep the error status of individual mapping so that
3087 +- * applications can catch the writeback error using fsync(2).
3088 +- * See filemap_fdatawait_keep_errors() for details.
3089 +- */
3090 +- filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
3091 +-}
3092 +-
3093 + /*
3094 + * Sync everything. We start by waking flusher threads so that most of
3095 + * writeback runs on all devices in parallel. Then we sync all inodes reliably
3096 +@@ -114,8 +102,8 @@ void ksys_sync(void)
3097 + iterate_supers(sync_inodes_one_sb, NULL);
3098 + iterate_supers(sync_fs_one_sb, &nowait);
3099 + iterate_supers(sync_fs_one_sb, &wait);
3100 +- iterate_bdevs(fdatawrite_one_bdev, NULL);
3101 +- iterate_bdevs(fdatawait_one_bdev, NULL);
3102 ++ sync_bdevs(false);
3103 ++ sync_bdevs(true);
3104 + if (unlikely(laptop_mode))
3105 + laptop_sync_completion();
3106 + }
3107 +@@ -136,10 +124,10 @@ static void do_sync_work(struct work_struct *work)
3108 + */
3109 + iterate_supers(sync_inodes_one_sb, &nowait);
3110 + iterate_supers(sync_fs_one_sb, &nowait);
3111 +- iterate_bdevs(fdatawrite_one_bdev, NULL);
3112 ++ sync_bdevs(false);
3113 + iterate_supers(sync_inodes_one_sb, &nowait);
3114 + iterate_supers(sync_fs_one_sb, &nowait);
3115 +- iterate_bdevs(fdatawrite_one_bdev, NULL);
3116 ++ sync_bdevs(false);
3117 + printk("Emergency Sync complete\n");
3118 + kfree(work);
3119 + }
3120 +diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
3121 +index c4e0cd1c1c8ca..170fee98c45c7 100644
3122 +--- a/fs/xfs/xfs_super.c
3123 ++++ b/fs/xfs/xfs_super.c
3124 +@@ -729,6 +729,7 @@ xfs_fs_sync_fs(
3125 + int wait)
3126 + {
3127 + struct xfs_mount *mp = XFS_M(sb);
3128 ++ int error;
3129 +
3130 + trace_xfs_fs_sync_fs(mp, __return_address);
3131 +
3132 +@@ -738,7 +739,10 @@ xfs_fs_sync_fs(
3133 + if (!wait)
3134 + return 0;
3135 +
3136 +- xfs_log_force(mp, XFS_LOG_SYNC);
3137 ++ error = xfs_log_force(mp, XFS_LOG_SYNC);
3138 ++ if (error)
3139 ++ return error;
3140 ++
3141 + if (laptop_mode) {
3142 + /*
3143 + * The disk must be active because we're syncing.
3144 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
3145 +index 413c0148c0ce5..aebe67ed7a73e 100644
3146 +--- a/include/linux/blkdev.h
3147 ++++ b/include/linux/blkdev.h
3148 +@@ -1999,6 +1999,8 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
3149 + #ifdef CONFIG_BLOCK
3150 + void invalidate_bdev(struct block_device *bdev);
3151 + int sync_blockdev(struct block_device *bdev);
3152 ++int sync_blockdev_nowait(struct block_device *bdev);
3153 ++void sync_bdevs(bool wait);
3154 + #else
3155 + static inline void invalidate_bdev(struct block_device *bdev)
3156 + {
3157 +@@ -2007,6 +2009,13 @@ static inline int sync_blockdev(struct block_device *bdev)
3158 + {
3159 + return 0;
3160 + }
3161 ++static inline int sync_blockdev_nowait(struct block_device *bdev)
3162 ++{
3163 ++ return 0;
3164 ++}
3165 ++static inline void sync_bdevs(bool wait)
3166 ++{
3167 ++}
3168 + #endif
3169 + int fsync_bdev(struct block_device *bdev);
3170 +
3171 +diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
3172 +index c58d504514854..7f28fa702bb72 100644
3173 +--- a/include/linux/etherdevice.h
3174 ++++ b/include/linux/etherdevice.h
3175 +@@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
3176 + #endif
3177 + }
3178 +
3179 +-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
3180 ++static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
3181 + {
3182 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3183 + #ifdef __BIG_ENDIAN
3184 +@@ -364,8 +364,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
3185 + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
3186 + */
3187 +
3188 +-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
3189 +- const u8 addr2[6+2])
3190 ++static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
3191 + {
3192 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3193 + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
3194 +diff --git a/include/linux/kfence.h b/include/linux/kfence.h
3195 +index 4b5e3679a72c7..3c75209a545e1 100644
3196 +--- a/include/linux/kfence.h
3197 ++++ b/include/linux/kfence.h
3198 +@@ -202,6 +202,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
3199 + */
3200 + bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
3201 +
3202 ++#ifdef CONFIG_PRINTK
3203 ++struct kmem_obj_info;
3204 ++/**
3205 ++ * __kfence_obj_info() - fill kmem_obj_info struct
3206 ++ * @kpp: kmem_obj_info to be filled
3207 ++ * @object: the object
3208 ++ *
3209 ++ * Return:
3210 ++ * * false - not a KFENCE object
3211 ++ * * true - a KFENCE object, filled @kpp
3212 ++ *
3213 ++ * Copies information to @kpp for KFENCE objects.
3214 ++ */
3215 ++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
3216 ++#endif
3217 ++
3218 + #else /* CONFIG_KFENCE */
3219 +
3220 + static inline bool is_kfence_address(const void *addr) { return false; }
3221 +@@ -219,6 +235,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
3222 + return false;
3223 + }
3224 +
3225 ++#ifdef CONFIG_PRINTK
3226 ++struct kmem_obj_info;
3227 ++static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3228 ++{
3229 ++ return false;
3230 ++}
3231 ++#endif
3232 ++
3233 + #endif
3234 +
3235 + #endif /* _LINUX_KFENCE_H */
3236 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
3237 +index d9b8df5ef212f..d35439db047cb 100644
3238 +--- a/include/linux/memcontrol.h
3239 ++++ b/include/linux/memcontrol.h
3240 +@@ -1002,6 +1002,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
3241 + }
3242 +
3243 + void mem_cgroup_flush_stats(void);
3244 ++void mem_cgroup_flush_stats_delayed(void);
3245 +
3246 + void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
3247 + int val);
3248 +@@ -1422,6 +1423,10 @@ static inline void mem_cgroup_flush_stats(void)
3249 + {
3250 + }
3251 +
3252 ++static inline void mem_cgroup_flush_stats_delayed(void)
3253 ++{
3254 ++}
3255 ++
3256 + static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
3257 + enum node_stat_item idx, int val)
3258 + {
3259 +diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
3260 +index 700ea077ce2d6..2770db2fa080d 100644
3261 +--- a/include/linux/netfilter/nf_conntrack_common.h
3262 ++++ b/include/linux/netfilter/nf_conntrack_common.h
3263 +@@ -2,7 +2,7 @@
3264 + #ifndef _NF_CONNTRACK_COMMON_H
3265 + #define _NF_CONNTRACK_COMMON_H
3266 +
3267 +-#include <linux/atomic.h>
3268 ++#include <linux/refcount.h>
3269 + #include <uapi/linux/netfilter/nf_conntrack_common.h>
3270 +
3271 + struct ip_conntrack_stat {
3272 +@@ -25,19 +25,21 @@ struct ip_conntrack_stat {
3273 + #define NFCT_PTRMASK ~(NFCT_INFOMASK)
3274 +
3275 + struct nf_conntrack {
3276 +- atomic_t use;
3277 ++ refcount_t use;
3278 + };
3279 +
3280 + void nf_conntrack_destroy(struct nf_conntrack *nfct);
3281 ++
3282 ++/* like nf_ct_put, but without module dependency on nf_conntrack */
3283 + static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3284 + {
3285 +- if (nfct && atomic_dec_and_test(&nfct->use))
3286 ++ if (nfct && refcount_dec_and_test(&nfct->use))
3287 + nf_conntrack_destroy(nfct);
3288 + }
3289 + static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3290 + {
3291 + if (nfct)
3292 +- atomic_inc(&nfct->use);
3293 ++ refcount_inc(&nfct->use);
3294 + }
3295 +
3296 + #endif /* _NF_CONNTRACK_COMMON_H */
3297 +diff --git a/include/linux/sched.h b/include/linux/sched.h
3298 +index 9016bbacedf3b..ad7ff332a0ac8 100644
3299 +--- a/include/linux/sched.h
3300 ++++ b/include/linux/sched.h
3301 +@@ -1436,6 +1436,7 @@ struct task_struct {
3302 + int pagefault_disabled;
3303 + #ifdef CONFIG_MMU
3304 + struct task_struct *oom_reaper_list;
3305 ++ struct timer_list oom_reaper_timer;
3306 + #endif
3307 + #ifdef CONFIG_VMAP_STACK
3308 + struct vm_struct *stack_vm_area;
3309 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
3310 +index 5561486fddef7..95fb7aaaec8de 100644
3311 +--- a/include/linux/sched/mm.h
3312 ++++ b/include/linux/sched/mm.h
3313 +@@ -106,6 +106,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
3314 + #endif /* CONFIG_MEMCG */
3315 +
3316 + #ifdef CONFIG_MMU
3317 ++#ifndef arch_get_mmap_end
3318 ++#define arch_get_mmap_end(addr) (TASK_SIZE)
3319 ++#endif
3320 ++
3321 ++#ifndef arch_get_mmap_base
3322 ++#define arch_get_mmap_base(addr, base) (base)
3323 ++#endif
3324 ++
3325 + extern void arch_pick_mmap_layout(struct mm_struct *mm,
3326 + struct rlimit *rlim_stack);
3327 + extern unsigned long
3328 +diff --git a/include/net/esp.h b/include/net/esp.h
3329 +index 90cd02ff77ef6..9c5637d41d951 100644
3330 +--- a/include/net/esp.h
3331 ++++ b/include/net/esp.h
3332 +@@ -4,8 +4,6 @@
3333 +
3334 + #include <linux/skbuff.h>
3335 +
3336 +-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
3337 +-
3338 + struct ip_esp_hdr;
3339 +
3340 + static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
3341 +diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
3342 +index d24b0a34c8f0c..34c266502a50e 100644
3343 +--- a/include/net/netfilter/nf_conntrack.h
3344 ++++ b/include/net/netfilter/nf_conntrack.h
3345 +@@ -76,6 +76,8 @@ struct nf_conn {
3346 + * Hint, SKB address this struct and refcnt via skb->_nfct and
3347 + * helpers nf_conntrack_get() and nf_conntrack_put().
3348 + * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
3349 ++ * except that the latter uses internal indirection and does not
3350 ++ * result in a conntrack module dependency.
3351 + * beware nf_ct_get() is different and don't inc refcnt.
3352 + */
3353 + struct nf_conntrack ct_general;
3354 +@@ -169,11 +171,13 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
3355 + return (struct nf_conn *)(nfct & NFCT_PTRMASK);
3356 + }
3357 +
3358 ++void nf_ct_destroy(struct nf_conntrack *nfct);
3359 ++
3360 + /* decrement reference count on a conntrack */
3361 + static inline void nf_ct_put(struct nf_conn *ct)
3362 + {
3363 +- WARN_ON(!ct);
3364 +- nf_conntrack_put(&ct->ct_general);
3365 ++ if (ct && refcount_dec_and_test(&ct->ct_general.use))
3366 ++ nf_ct_destroy(&ct->ct_general);
3367 + }
3368 +
3369 + /* Protocol module loading */
3370 +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
3371 +index 6bd7e5a85ce76..ff82983b7ab41 100644
3372 +--- a/include/net/netns/ipv6.h
3373 ++++ b/include/net/netns/ipv6.h
3374 +@@ -75,8 +75,8 @@ struct netns_ipv6 {
3375 + struct list_head fib6_walkers;
3376 + rwlock_t fib6_walker_lock;
3377 + spinlock_t fib6_gc_lock;
3378 +- unsigned int ip6_rt_gc_expire;
3379 +- unsigned long ip6_rt_last_gc;
3380 ++ atomic_t ip6_rt_gc_expire;
3381 ++ unsigned long ip6_rt_last_gc;
3382 + unsigned char flowlabel_has_excl;
3383 + #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3384 + bool fib6_has_custom_rules;
3385 +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
3386 +index 4ee233e5a6ffa..d1e282f0d6f18 100644
3387 +--- a/include/scsi/libiscsi.h
3388 ++++ b/include/scsi/libiscsi.h
3389 +@@ -52,8 +52,10 @@ enum {
3390 +
3391 + #define ISID_SIZE 6
3392 +
3393 +-/* Connection suspend "bit" */
3394 +-#define ISCSI_SUSPEND_BIT 1
3395 ++/* Connection flags */
3396 ++#define ISCSI_CONN_FLAG_SUSPEND_TX BIT(0)
3397 ++#define ISCSI_CONN_FLAG_SUSPEND_RX BIT(1)
3398 ++#define ISCSI_CONN_FLAG_BOUND BIT(2)
3399 +
3400 + #define ISCSI_ITT_MASK 0x1fff
3401 + #define ISCSI_TOTAL_CMDS_MAX 4096
3402 +@@ -199,8 +201,7 @@ struct iscsi_conn {
3403 + struct list_head cmdqueue; /* data-path cmd queue */
3404 + struct list_head requeue; /* tasks needing another run */
3405 + struct work_struct xmitwork; /* per-conn. xmit workqueue */
3406 +- unsigned long suspend_tx; /* suspend Tx */
3407 +- unsigned long suspend_rx; /* suspend Rx */
3408 ++ unsigned long flags; /* ISCSI_CONN_FLAGs */
3409 +
3410 + /* negotiated params */
3411 + unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
3412 +diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
3413 +index 037c77fb5dc55..3ecf9702287be 100644
3414 +--- a/include/scsi/scsi_transport_iscsi.h
3415 ++++ b/include/scsi/scsi_transport_iscsi.h
3416 +@@ -296,7 +296,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
3417 + struct iscsi_endpoint {
3418 + void *dd_data; /* LLD private data */
3419 + struct device dev;
3420 +- uint64_t id;
3421 ++ int id;
3422 + struct iscsi_cls_conn *conn;
3423 + };
3424 +
3425 +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
3426 +index 8349a9f2c3453..9478eccd1c8e6 100644
3427 +--- a/kernel/dma/mapping.c
3428 ++++ b/kernel/dma/mapping.c
3429 +@@ -296,10 +296,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
3430 + if (WARN_ON_ONCE(!dev->dma_mask))
3431 + return DMA_MAPPING_ERROR;
3432 +
3433 +- /* Don't allow RAM to be mapped */
3434 +- if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
3435 +- return DMA_MAPPING_ERROR;
3436 +-
3437 + if (dma_map_direct(dev, ops))
3438 + addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3439 + else if (ops->map_resource)
3440 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3441 +index 699446d60b6bf..7c891a8eb3234 100644
3442 +--- a/kernel/events/core.c
3443 ++++ b/kernel/events/core.c
3444 +@@ -6348,7 +6348,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3445 + again:
3446 + mutex_lock(&event->mmap_mutex);
3447 + if (event->rb) {
3448 +- if (event->rb->nr_pages != nr_pages) {
3449 ++ if (data_page_nr(event->rb) != nr_pages) {
3450 + ret = -EINVAL;
3451 + goto unlock;
3452 + }
3453 +diff --git a/kernel/events/internal.h b/kernel/events/internal.h
3454 +index 228801e207886..aa23ffdaf819f 100644
3455 +--- a/kernel/events/internal.h
3456 ++++ b/kernel/events/internal.h
3457 +@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
3458 + }
3459 + #endif
3460 +
3461 ++static inline int data_page_nr(struct perf_buffer *rb)
3462 ++{
3463 ++ return rb->nr_pages << page_order(rb);
3464 ++}
3465 ++
3466 + static inline unsigned long perf_data_size(struct perf_buffer *rb)
3467 + {
3468 + return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
3469 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
3470 +index 52868716ec358..fb35b926024ca 100644
3471 +--- a/kernel/events/ring_buffer.c
3472 ++++ b/kernel/events/ring_buffer.c
3473 +@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
3474 + }
3475 +
3476 + #else
3477 +-static int data_page_nr(struct perf_buffer *rb)
3478 +-{
3479 +- return rb->nr_pages << page_order(rb);
3480 +-}
3481 +-
3482 + static struct page *
3483 + __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
3484 + {
3485 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3486 +index 02766f3fe206b..9a4fa22a69edf 100644
3487 +--- a/kernel/sched/fair.c
3488 ++++ b/kernel/sched/fair.c
3489 +@@ -3794,11 +3794,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3490 +
3491 + se->avg.runnable_sum = se->avg.runnable_avg * divider;
3492 +
3493 +- se->avg.load_sum = divider;
3494 +- if (se_weight(se)) {
3495 +- se->avg.load_sum =
3496 +- div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
3497 +- }
3498 ++ se->avg.load_sum = se->avg.load_avg * divider;
3499 ++ if (se_weight(se) < se->avg.load_sum)
3500 ++ se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3501 ++ else
3502 ++ se->avg.load_sum = 1;
3503 +
3504 + enqueue_load_avg(cfs_rq, se);
3505 + cfs_rq->avg.util_avg += se->avg.util_avg;
3506 +diff --git a/mm/kfence/core.c b/mm/kfence/core.c
3507 +index 51ea9193cecb3..86260e8f28302 100644
3508 +--- a/mm/kfence/core.c
3509 ++++ b/mm/kfence/core.c
3510 +@@ -221,27 +221,6 @@ static bool kfence_unprotect(unsigned long addr)
3511 + return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
3512 + }
3513 +
3514 +-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
3515 +-{
3516 +- long index;
3517 +-
3518 +- /* The checks do not affect performance; only called from slow-paths. */
3519 +-
3520 +- if (!is_kfence_address((void *)addr))
3521 +- return NULL;
3522 +-
3523 +- /*
3524 +- * May be an invalid index if called with an address at the edge of
3525 +- * __kfence_pool, in which case we would report an "invalid access"
3526 +- * error.
3527 +- */
3528 +- index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
3529 +- if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
3530 +- return NULL;
3531 +-
3532 +- return &kfence_metadata[index];
3533 +-}
3534 +-
3535 + static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
3536 + {
3537 + unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
3538 +diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
3539 +index 2a2d5de9d3791..92bf6eff6060d 100644
3540 +--- a/mm/kfence/kfence.h
3541 ++++ b/mm/kfence/kfence.h
3542 +@@ -93,6 +93,27 @@ struct kfence_metadata {
3543 +
3544 + extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
3545 +
3546 ++static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
3547 ++{
3548 ++ long index;
3549 ++
3550 ++ /* The checks do not affect performance; only called from slow-paths. */
3551 ++
3552 ++ if (!is_kfence_address((void *)addr))
3553 ++ return NULL;
3554 ++
3555 ++ /*
3556 ++ * May be an invalid index if called with an address at the edge of
3557 ++ * __kfence_pool, in which case we would report an "invalid access"
3558 ++ * error.
3559 ++ */
3560 ++ index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
3561 ++ if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
3562 ++ return NULL;
3563 ++
3564 ++ return &kfence_metadata[index];
3565 ++}
3566 ++
3567 + /* KFENCE error types for report generation. */
3568 + enum kfence_error_type {
3569 + KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
3570 +diff --git a/mm/kfence/report.c b/mm/kfence/report.c
3571 +index f93a7b2a338be..37e140e7f201e 100644
3572 +--- a/mm/kfence/report.c
3573 ++++ b/mm/kfence/report.c
3574 +@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
3575 + /* We encountered a memory safety error, taint the kernel! */
3576 + add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
3577 + }
3578 ++
3579 ++#ifdef CONFIG_PRINTK
3580 ++static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
3581 ++{
3582 ++ int i, j;
3583 ++
3584 ++ i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
3585 ++ for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
3586 ++ kp_stack[j] = (void *)track->stack_entries[i];
3587 ++ if (j < KS_ADDRS_COUNT)
3588 ++ kp_stack[j] = NULL;
3589 ++}
3590 ++
3591 ++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3592 ++{
3593 ++ struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
3594 ++ unsigned long flags;
3595 ++
3596 ++ if (!meta)
3597 ++ return false;
3598 ++
3599 ++ /*
3600 ++ * If state is UNUSED at least show the pointer requested; the rest
3601 ++ * would be garbage data.
3602 ++ */
3603 ++ kpp->kp_ptr = object;
3604 ++
3605 ++ /* Requesting info an a never-used object is almost certainly a bug. */
3606 ++ if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
3607 ++ return true;
3608 ++
3609 ++ raw_spin_lock_irqsave(&meta->lock, flags);
3610 ++
3611 ++ kpp->kp_page = page;
3612 ++ kpp->kp_slab_cache = meta->cache;
3613 ++ kpp->kp_objp = (void *)meta->addr;
3614 ++ kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
3615 ++ if (meta->state == KFENCE_OBJECT_FREED)
3616 ++ kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
3617 ++ /* get_stack_skipnr() ensures the first entry is outside allocator. */
3618 ++ kpp->kp_ret = kpp->kp_stack[0];
3619 ++
3620 ++ raw_spin_unlock_irqrestore(&meta->lock, flags);
3621 ++
3622 ++ return true;
3623 ++}
3624 ++#endif
3625 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3626 +index 8cdeb33d2cf9f..971546bb99e04 100644
3627 +--- a/mm/memcontrol.c
3628 ++++ b/mm/memcontrol.c
3629 +@@ -650,6 +650,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
3630 + static DEFINE_SPINLOCK(stats_flush_lock);
3631 + static DEFINE_PER_CPU(unsigned int, stats_updates);
3632 + static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
3633 ++static u64 flush_next_time;
3634 ++
3635 ++#define FLUSH_TIME (2UL*HZ)
3636 +
3637 + static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
3638 + {
3639 +@@ -671,6 +674,7 @@ static void __mem_cgroup_flush_stats(void)
3640 + if (!spin_trylock_irqsave(&stats_flush_lock, flag))
3641 + return;
3642 +
3643 ++ flush_next_time = jiffies_64 + 2*FLUSH_TIME;
3644 + cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
3645 + atomic_set(&stats_flush_threshold, 0);
3646 + spin_unlock_irqrestore(&stats_flush_lock, flag);
3647 +@@ -682,10 +686,16 @@ void mem_cgroup_flush_stats(void)
3648 + __mem_cgroup_flush_stats();
3649 + }
3650 +
3651 ++void mem_cgroup_flush_stats_delayed(void)
3652 ++{
3653 ++ if (time_after64(jiffies_64, flush_next_time))
3654 ++ mem_cgroup_flush_stats();
3655 ++}
3656 ++
3657 + static void flush_memcg_stats_dwork(struct work_struct *w)
3658 + {
3659 + __mem_cgroup_flush_stats();
3660 +- queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
3661 ++ queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
3662 + }
3663 +
3664 + /**
3665 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3666 +index f66977a171966..e659a7ef5acff 100644
3667 +--- a/mm/memory-failure.c
3668 ++++ b/mm/memory-failure.c
3669 +@@ -1689,6 +1689,19 @@ try_again:
3670 + }
3671 +
3672 + if (PageTransHuge(hpage)) {
3673 ++ /*
3674 ++ * Bail out before SetPageHasHWPoisoned() if hpage is
3675 ++ * huge_zero_page, although PG_has_hwpoisoned is not
3676 ++ * checked in set_huge_zero_page().
3677 ++ *
3678 ++ * TODO: Handle memory failure of huge_zero_page thoroughly.
3679 ++ */
3680 ++ if (is_huge_zero_page(hpage)) {
3681 ++ action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
3682 ++ res = -EBUSY;
3683 ++ goto unlock_mutex;
3684 ++ }
3685 ++
3686 + /*
3687 + * The flag must be set after the refcount is bumped
3688 + * otherwise it may race with THP split.
3689 +diff --git a/mm/mmap.c b/mm/mmap.c
3690 +index 049b8e5c18f02..6bb553ed5c557 100644
3691 +--- a/mm/mmap.c
3692 ++++ b/mm/mmap.c
3693 +@@ -2113,14 +2113,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
3694 + return addr;
3695 + }
3696 +
3697 +-#ifndef arch_get_mmap_end
3698 +-#define arch_get_mmap_end(addr) (TASK_SIZE)
3699 +-#endif
3700 +-
3701 +-#ifndef arch_get_mmap_base
3702 +-#define arch_get_mmap_base(addr, base) (base)
3703 +-#endif
3704 +-
3705 + /* Get an address range which is currently unmapped.
3706 + * For shmat() with addr=0.
3707 + *
3708 +diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
3709 +index 459d195d2ff64..f45ff1b7626a6 100644
3710 +--- a/mm/mmu_notifier.c
3711 ++++ b/mm/mmu_notifier.c
3712 +@@ -1036,6 +1036,18 @@ int mmu_interval_notifier_insert_locked(
3713 + }
3714 + EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
3715 +
3716 ++static bool
3717 ++mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
3718 ++ unsigned long seq)
3719 ++{
3720 ++ bool ret;
3721 ++
3722 ++ spin_lock(&subscriptions->lock);
3723 ++ ret = subscriptions->invalidate_seq != seq;
3724 ++ spin_unlock(&subscriptions->lock);
3725 ++ return ret;
3726 ++}
3727 ++
3728 + /**
3729 + * mmu_interval_notifier_remove - Remove a interval notifier
3730 + * @interval_sub: Interval subscription to unregister
3731 +@@ -1083,7 +1095,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
3732 + lock_map_release(&__mmu_notifier_invalidate_range_start_map);
3733 + if (seq)
3734 + wait_event(subscriptions->wq,
3735 +- READ_ONCE(subscriptions->invalidate_seq) != seq);
3736 ++ mmu_interval_seq_released(subscriptions, seq));
3737 +
3738 + /* pairs with mmgrab in mmu_interval_notifier_insert() */
3739 + mmdrop(mm);
3740 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
3741 +index bfa9e348c3a3a..262f752d3d516 100644
3742 +--- a/mm/oom_kill.c
3743 ++++ b/mm/oom_kill.c
3744 +@@ -635,7 +635,7 @@ done:
3745 + */
3746 + set_bit(MMF_OOM_SKIP, &mm->flags);
3747 +
3748 +- /* Drop a reference taken by wake_oom_reaper */
3749 ++ /* Drop a reference taken by queue_oom_reaper */
3750 + put_task_struct(tsk);
3751 + }
3752 +
3753 +@@ -645,12 +645,12 @@ static int oom_reaper(void *unused)
3754 + struct task_struct *tsk = NULL;
3755 +
3756 + wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
3757 +- spin_lock(&oom_reaper_lock);
3758 ++ spin_lock_irq(&oom_reaper_lock);
3759 + if (oom_reaper_list != NULL) {
3760 + tsk = oom_reaper_list;
3761 + oom_reaper_list = tsk->oom_reaper_list;
3762 + }
3763 +- spin_unlock(&oom_reaper_lock);
3764 ++ spin_unlock_irq(&oom_reaper_lock);
3765 +
3766 + if (tsk)
3767 + oom_reap_task(tsk);
3768 +@@ -659,22 +659,48 @@ static int oom_reaper(void *unused)
3769 + return 0;
3770 + }
3771 +
3772 +-static void wake_oom_reaper(struct task_struct *tsk)
3773 ++static void wake_oom_reaper(struct timer_list *timer)
3774 + {
3775 +- /* mm is already queued? */
3776 +- if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
3777 +- return;
3778 ++ struct task_struct *tsk = container_of(timer, struct task_struct,
3779 ++ oom_reaper_timer);
3780 ++ struct mm_struct *mm = tsk->signal->oom_mm;
3781 ++ unsigned long flags;
3782 +
3783 +- get_task_struct(tsk);
3784 ++ /* The victim managed to terminate on its own - see exit_mmap */
3785 ++ if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
3786 ++ put_task_struct(tsk);
3787 ++ return;
3788 ++ }
3789 +
3790 +- spin_lock(&oom_reaper_lock);
3791 ++ spin_lock_irqsave(&oom_reaper_lock, flags);
3792 + tsk->oom_reaper_list = oom_reaper_list;
3793 + oom_reaper_list = tsk;
3794 +- spin_unlock(&oom_reaper_lock);
3795 ++ spin_unlock_irqrestore(&oom_reaper_lock, flags);
3796 + trace_wake_reaper(tsk->pid);
3797 + wake_up(&oom_reaper_wait);
3798 + }
3799 +
3800 ++/*
3801 ++ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
3802 ++ * The timers timeout is arbitrary... the longer it is, the longer the worst
3803 ++ * case scenario for the OOM can take. If it is too small, the oom_reaper can
3804 ++ * get in the way and release resources needed by the process exit path.
3805 ++ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
3806 ++ * before the exit path is able to wake the futex waiters.
3807 ++ */
3808 ++#define OOM_REAPER_DELAY (2*HZ)
3809 ++static void queue_oom_reaper(struct task_struct *tsk)
3810 ++{
3811 ++ /* mm is already queued? */
3812 ++ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
3813 ++ return;
3814 ++
3815 ++ get_task_struct(tsk);
3816 ++ timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
3817 ++ tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
3818 ++ add_timer(&tsk->oom_reaper_timer);
3819 ++}
3820 ++
3821 + static int __init oom_init(void)
3822 + {
3823 + oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
3824 +@@ -682,7 +708,7 @@ static int __init oom_init(void)
3825 + }
3826 + subsys_initcall(oom_init)
3827 + #else
3828 +-static inline void wake_oom_reaper(struct task_struct *tsk)
3829 ++static inline void queue_oom_reaper(struct task_struct *tsk)
3830 + {
3831 + }
3832 + #endif /* CONFIG_MMU */
3833 +@@ -933,7 +959,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
3834 + rcu_read_unlock();
3835 +
3836 + if (can_oom_reap)
3837 +- wake_oom_reaper(victim);
3838 ++ queue_oom_reaper(victim);
3839 +
3840 + mmdrop(mm);
3841 + put_task_struct(victim);
3842 +@@ -969,7 +995,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
3843 + task_lock(victim);
3844 + if (task_will_free_mem(victim)) {
3845 + mark_oom_victim(victim);
3846 +- wake_oom_reaper(victim);
3847 ++ queue_oom_reaper(victim);
3848 + task_unlock(victim);
3849 + put_task_struct(victim);
3850 + return;
3851 +@@ -1067,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc)
3852 + */
3853 + if (task_will_free_mem(current)) {
3854 + mark_oom_victim(current);
3855 +- wake_oom_reaper(current);
3856 ++ queue_oom_reaper(current);
3857 + return true;
3858 + }
3859 +
3860 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3861 +index a373cd6326b04..47c22810c3c5f 100644
3862 +--- a/mm/page_alloc.c
3863 ++++ b/mm/page_alloc.c
3864 +@@ -8169,7 +8169,7 @@ void __init mem_init_print_info(void)
3865 + */
3866 + #define adj_init_size(start, end, size, pos, adj) \
3867 + do { \
3868 +- if (start <= pos && pos < end && size > adj) \
3869 ++ if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
3870 + size -= adj; \
3871 + } while (0)
3872 +
3873 +diff --git a/mm/slab.c b/mm/slab.c
3874 +index 03d3074d0bb07..1bd283e98c58c 100644
3875 +--- a/mm/slab.c
3876 ++++ b/mm/slab.c
3877 +@@ -3658,7 +3658,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
3878 + #endif /* CONFIG_NUMA */
3879 +
3880 + #ifdef CONFIG_PRINTK
3881 +-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3882 ++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3883 + {
3884 + struct kmem_cache *cachep;
3885 + unsigned int objnr;
3886 +diff --git a/mm/slab.h b/mm/slab.h
3887 +index 56ad7eea3ddfb..1ae1bdd485c17 100644
3888 +--- a/mm/slab.h
3889 ++++ b/mm/slab.h
3890 +@@ -643,7 +643,7 @@ struct kmem_obj_info {
3891 + void *kp_stack[KS_ADDRS_COUNT];
3892 + void *kp_free_stack[KS_ADDRS_COUNT];
3893 + };
3894 +-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
3895 ++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
3896 + #endif
3897 +
3898 + #endif /* MM_SLAB_H */
3899 +diff --git a/mm/slab_common.c b/mm/slab_common.c
3900 +index ec2bb0beed757..022319e7deaf7 100644
3901 +--- a/mm/slab_common.c
3902 ++++ b/mm/slab_common.c
3903 +@@ -568,6 +568,13 @@ bool kmem_valid_obj(void *object)
3904 + }
3905 + EXPORT_SYMBOL_GPL(kmem_valid_obj);
3906 +
3907 ++static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3908 ++{
3909 ++ if (__kfence_obj_info(kpp, object, page))
3910 ++ return;
3911 ++ __kmem_obj_info(kpp, object, page);
3912 ++}
3913 ++
3914 + /**
3915 + * kmem_dump_obj - Print available slab provenance information
3916 + * @object: slab object for which to find provenance information.
3917 +@@ -603,6 +610,8 @@ void kmem_dump_obj(void *object)
3918 + pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
3919 + else
3920 + pr_cont(" slab%s", cp);
3921 ++ if (is_kfence_address(object))
3922 ++ pr_cont(" (kfence)");
3923 + if (kp.kp_objp)
3924 + pr_cont(" start %px", kp.kp_objp);
3925 + if (kp.kp_data_offset)
3926 +diff --git a/mm/slob.c b/mm/slob.c
3927 +index 74d3f6e60666e..f3fc15df971af 100644
3928 +--- a/mm/slob.c
3929 ++++ b/mm/slob.c
3930 +@@ -462,7 +462,7 @@ out:
3931 + }
3932 +
3933 + #ifdef CONFIG_PRINTK
3934 +-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3935 ++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3936 + {
3937 + kpp->kp_ptr = object;
3938 + kpp->kp_page = page;
3939 +diff --git a/mm/slub.c b/mm/slub.c
3940 +index ca6ba6bdf27b1..b75eebc0350e7 100644
3941 +--- a/mm/slub.c
3942 ++++ b/mm/slub.c
3943 +@@ -4299,7 +4299,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
3944 + }
3945 +
3946 + #ifdef CONFIG_PRINTK
3947 +-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3948 ++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3949 + {
3950 + void *base;
3951 + int __maybe_unused i;
3952 +diff --git a/mm/workingset.c b/mm/workingset.c
3953 +index d5b81e4f4cbe8..880d882f3325f 100644
3954 +--- a/mm/workingset.c
3955 ++++ b/mm/workingset.c
3956 +@@ -352,7 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
3957 +
3958 + inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
3959 +
3960 +- mem_cgroup_flush_stats();
3961 ++ mem_cgroup_flush_stats_delayed();
3962 + /*
3963 + * Compare the distance to the existing workingset size. We
3964 + * don't activate pages that couldn't stay resident even if
3965 +diff --git a/net/can/isotp.c b/net/can/isotp.c
3966 +index 5bce7c66c1219..8c753dcefe7fc 100644
3967 +--- a/net/can/isotp.c
3968 ++++ b/net/can/isotp.c
3969 +@@ -866,6 +866,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
3970 + struct canfd_frame *cf;
3971 + int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
3972 + int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
3973 ++ s64 hrtimer_sec = 0;
3974 + int off;
3975 + int err;
3976 +
3977 +@@ -964,7 +965,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
3978 + isotp_create_fframe(cf, so, ae);
3979 +
3980 + /* start timeout for FC */
3981 +- hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
3982 ++ hrtimer_sec = 1;
3983 ++ hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
3984 ++ HRTIMER_MODE_REL_SOFT);
3985 + }
3986 +
3987 + /* send the first or only CAN frame */
3988 +@@ -977,6 +980,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
3989 + if (err) {
3990 + pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
3991 + __func__, ERR_PTR(err));
3992 ++
3993 ++ /* no transmission -> no timeout monitoring */
3994 ++ if (hrtimer_sec)
3995 ++ hrtimer_cancel(&so->txtimer);
3996 ++
3997 + goto err_out_drop;
3998 + }
3999 +
4000 +diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
4001 +index f64b805303cd7..eb204ad36eeec 100644
4002 +--- a/net/dsa/tag_hellcreek.c
4003 ++++ b/net/dsa/tag_hellcreek.c
4004 +@@ -21,6 +21,14 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
4005 + struct dsa_port *dp = dsa_slave_to_port(dev);
4006 + u8 *tag;
4007 +
4008 ++ /* Calculate checksums (if required) before adding the trailer tag to
4009 ++ * avoid including it in calculations. That would lead to wrong
4010 ++ * checksums after the switch strips the tag.
4011 ++ */
4012 ++ if (skb->ip_summed == CHECKSUM_PARTIAL &&
4013 ++ skb_checksum_help(skb))
4014 ++ return NULL;
4015 ++
4016 + /* Tag encoding */
4017 + tag = skb_put(skb, HELLCREEK_TAG_LEN);
4018 + *tag = BIT(dp->index);
4019 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
4020 +index 70e6c87fbe3df..d747166bb291c 100644
4021 +--- a/net/ipv4/esp4.c
4022 ++++ b/net/ipv4/esp4.c
4023 +@@ -446,7 +446,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
4024 + struct page *page;
4025 + struct sk_buff *trailer;
4026 + int tailen = esp->tailen;
4027 +- unsigned int allocsz;
4028 +
4029 + /* this is non-NULL only with TCP/UDP Encapsulation */
4030 + if (x->encap) {
4031 +@@ -456,8 +455,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
4032 + return err;
4033 + }
4034 +
4035 +- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
4036 +- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
4037 ++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
4038 ++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
4039 + goto cow;
4040 +
4041 + if (!skb_cloned(skb)) {
4042 +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
4043 +index 5023f59a5b968..6219d97cac7a3 100644
4044 +--- a/net/ipv6/esp6.c
4045 ++++ b/net/ipv6/esp6.c
4046 +@@ -483,7 +483,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
4047 + struct page *page;
4048 + struct sk_buff *trailer;
4049 + int tailen = esp->tailen;
4050 +- unsigned int allocsz;
4051 +
4052 + if (x->encap) {
4053 + int err = esp6_output_encap(x, skb, esp);
4054 +@@ -492,8 +491,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
4055 + return err;
4056 + }
4057 +
4058 +- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
4059 +- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
4060 ++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
4061 ++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
4062 + goto cow;
4063 +
4064 + if (!skb_cloned(skb)) {
4065 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4066 +index 466a5610e3ca9..869c3337e319d 100644
4067 +--- a/net/ipv6/ip6_gre.c
4068 ++++ b/net/ipv6/ip6_gre.c
4069 +@@ -733,9 +733,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4070 + else
4071 + fl6->daddr = tunnel->parms.raddr;
4072 +
4073 +- if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
4074 +- return -ENOMEM;
4075 +-
4076 + /* Push GRE header. */
4077 + protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
4078 +
4079 +@@ -743,6 +740,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4080 + struct ip_tunnel_info *tun_info;
4081 + const struct ip_tunnel_key *key;
4082 + __be16 flags;
4083 ++ int tun_hlen;
4084 +
4085 + tun_info = skb_tunnel_info_txcheck(skb);
4086 + if (IS_ERR(tun_info) ||
4087 +@@ -760,9 +758,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4088 + dsfield = key->tos;
4089 + flags = key->tun_flags &
4090 + (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
4091 +- tunnel->tun_hlen = gre_calc_hlen(flags);
4092 ++ tun_hlen = gre_calc_hlen(flags);
4093 +
4094 +- gre_build_header(skb, tunnel->tun_hlen,
4095 ++ if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
4096 ++ return -ENOMEM;
4097 ++
4098 ++ gre_build_header(skb, tun_hlen,
4099 + flags, protocol,
4100 + tunnel_id_to_key32(tun_info->key.tun_id),
4101 + (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
4102 +@@ -772,6 +773,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
4103 + if (tunnel->parms.o_flags & TUNNEL_SEQ)
4104 + tunnel->o_seqno++;
4105 +
4106 ++ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
4107 ++ return -ENOMEM;
4108 ++
4109 + gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
4110 + protocol, tunnel->parms.o_key,
4111 + htonl(tunnel->o_seqno));
4112 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4113 +index 6b269595efaa2..0ca7c780d97a2 100644
4114 +--- a/net/ipv6/route.c
4115 ++++ b/net/ipv6/route.c
4116 +@@ -3303,6 +3303,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
4117 + int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
4118 + int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
4119 + unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
4120 ++ unsigned int val;
4121 + int entries;
4122 +
4123 + entries = dst_entries_get_fast(ops);
4124 +@@ -3313,13 +3314,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
4125 + entries <= rt_max_size)
4126 + goto out;
4127 +
4128 +- net->ipv6.ip6_rt_gc_expire++;
4129 +- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
4130 ++ fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
4131 + entries = dst_entries_get_slow(ops);
4132 + if (entries < ops->gc_thresh)
4133 +- net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
4134 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
4135 + out:
4136 +- net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
4137 ++ val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
4138 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
4139 + return entries > rt_max_size;
4140 + }
4141 +
4142 +@@ -6528,7 +6529,7 @@ static int __net_init ip6_route_net_init(struct net *net)
4143 + net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
4144 + net->ipv6.sysctl.skip_notify_on_dev_down = 0;
4145 +
4146 +- net->ipv6.ip6_rt_gc_expire = 30*HZ;
4147 ++ atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
4148 +
4149 + ret = 0;
4150 + out:
4151 +diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
4152 +index 17927966abb33..8b14a24f10404 100644
4153 +--- a/net/l3mdev/l3mdev.c
4154 ++++ b/net/l3mdev/l3mdev.c
4155 +@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
4156 +
4157 + dev = dev_get_by_index_rcu(net, ifindex);
4158 + while (dev && !netif_is_l3_master(dev))
4159 +- dev = netdev_master_upper_dev_get(dev);
4160 ++ dev = netdev_master_upper_dev_get_rcu(dev);
4161 +
4162 + return dev ? dev->ifindex : 0;
4163 + }
4164 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
4165 +index 3a98a1316307c..31399c53dfb13 100644
4166 +--- a/net/netfilter/nf_conntrack_core.c
4167 ++++ b/net/netfilter/nf_conntrack_core.c
4168 +@@ -571,7 +571,7 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
4169 +
4170 + #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
4171 +
4172 +-/* Released via destroy_conntrack() */
4173 ++/* Released via nf_ct_destroy() */
4174 + struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
4175 + const struct nf_conntrack_zone *zone,
4176 + gfp_t flags)
4177 +@@ -598,7 +598,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
4178 + tmpl->status = IPS_TEMPLATE;
4179 + write_pnet(&tmpl->ct_net, net);
4180 + nf_ct_zone_add(tmpl, zone);
4181 +- atomic_set(&tmpl->ct_general.use, 0);
4182 ++ refcount_set(&tmpl->ct_general.use, 1);
4183 +
4184 + return tmpl;
4185 + }
4186 +@@ -625,13 +625,12 @@ static void destroy_gre_conntrack(struct nf_conn *ct)
4187 + #endif
4188 + }
4189 +
4190 +-static void
4191 +-destroy_conntrack(struct nf_conntrack *nfct)
4192 ++void nf_ct_destroy(struct nf_conntrack *nfct)
4193 + {
4194 + struct nf_conn *ct = (struct nf_conn *)nfct;
4195 +
4196 +- pr_debug("destroy_conntrack(%p)\n", ct);
4197 +- WARN_ON(atomic_read(&nfct->use) != 0);
4198 ++ pr_debug("%s(%p)\n", __func__, ct);
4199 ++ WARN_ON(refcount_read(&nfct->use) != 0);
4200 +
4201 + if (unlikely(nf_ct_is_template(ct))) {
4202 + nf_ct_tmpl_free(ct);
4203 +@@ -656,9 +655,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
4204 + if (ct->master)
4205 + nf_ct_put(ct->master);
4206 +
4207 +- pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
4208 ++ pr_debug("%s: returning ct=%p to slab\n", __func__, ct);
4209 + nf_conntrack_free(ct);
4210 + }
4211 ++EXPORT_SYMBOL(nf_ct_destroy);
4212 +
4213 + static void nf_ct_delete_from_lists(struct nf_conn *ct)
4214 + {
4215 +@@ -755,7 +755,7 @@ nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
4216 + /* caller must hold rcu readlock and none of the nf_conntrack_locks */
4217 + static void nf_ct_gc_expired(struct nf_conn *ct)
4218 + {
4219 +- if (!atomic_inc_not_zero(&ct->ct_general.use))
4220 ++ if (!refcount_inc_not_zero(&ct->ct_general.use))
4221 + return;
4222 +
4223 + if (nf_ct_should_gc(ct))
4224 +@@ -823,7 +823,7 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
4225 + * in, try to obtain a reference and re-check tuple
4226 + */
4227 + ct = nf_ct_tuplehash_to_ctrack(h);
4228 +- if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
4229 ++ if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
4230 + if (likely(nf_ct_key_equal(h, tuple, zone, net)))
4231 + goto found;
4232 +
4233 +@@ -920,7 +920,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
4234 +
4235 + smp_wmb();
4236 + /* The caller holds a reference to this object */
4237 +- atomic_set(&ct->ct_general.use, 2);
4238 ++ refcount_set(&ct->ct_general.use, 2);
4239 + __nf_conntrack_hash_insert(ct, hash, reply_hash);
4240 + nf_conntrack_double_unlock(hash, reply_hash);
4241 + NF_CT_STAT_INC(net, insert);
4242 +@@ -971,7 +971,7 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
4243 + {
4244 + struct nf_conn_tstamp *tstamp;
4245 +
4246 +- atomic_inc(&ct->ct_general.use);
4247 ++ refcount_inc(&ct->ct_general.use);
4248 + ct->status |= IPS_CONFIRMED;
4249 +
4250 + /* set conntrack timestamp, if enabled. */
4251 +@@ -1364,7 +1364,7 @@ static unsigned int early_drop_list(struct net *net,
4252 + nf_ct_is_dying(tmp))
4253 + continue;
4254 +
4255 +- if (!atomic_inc_not_zero(&tmp->ct_general.use))
4256 ++ if (!refcount_inc_not_zero(&tmp->ct_general.use))
4257 + continue;
4258 +
4259 + /* kill only if still in same netns -- might have moved due to
4260 +@@ -1513,7 +1513,7 @@ static void gc_worker(struct work_struct *work)
4261 + continue;
4262 +
4263 + /* need to take reference to avoid possible races */
4264 +- if (!atomic_inc_not_zero(&tmp->ct_general.use))
4265 ++ if (!refcount_inc_not_zero(&tmp->ct_general.use))
4266 + continue;
4267 +
4268 + if (gc_worker_skip_ct(tmp)) {
4269 +@@ -1622,7 +1622,7 @@ __nf_conntrack_alloc(struct net *net,
4270 + /* Because we use RCU lookups, we set ct_general.use to zero before
4271 + * this is inserted in any list.
4272 + */
4273 +- atomic_set(&ct->ct_general.use, 0);
4274 ++ refcount_set(&ct->ct_general.use, 0);
4275 + return ct;
4276 + out:
4277 + atomic_dec(&cnet->count);
4278 +@@ -1647,7 +1647,7 @@ void nf_conntrack_free(struct nf_conn *ct)
4279 + /* A freed object has refcnt == 0, that's
4280 + * the golden rule for SLAB_TYPESAFE_BY_RCU
4281 + */
4282 +- WARN_ON(atomic_read(&ct->ct_general.use) != 0);
4283 ++ WARN_ON(refcount_read(&ct->ct_general.use) != 0);
4284 +
4285 + nf_ct_ext_destroy(ct);
4286 + kmem_cache_free(nf_conntrack_cachep, ct);
4287 +@@ -1739,8 +1739,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
4288 + if (!exp)
4289 + __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
4290 +
4291 +- /* Now it is inserted into the unconfirmed list, bump refcount */
4292 +- nf_conntrack_get(&ct->ct_general);
4293 ++ /* Now it is inserted into the unconfirmed list, set refcount to 1. */
4294 ++ refcount_set(&ct->ct_general.use, 1);
4295 + nf_ct_add_to_unconfirmed_list(ct);
4296 +
4297 + local_bh_enable();
4298 +@@ -2352,7 +2352,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
4299 +
4300 + return NULL;
4301 + found:
4302 +- atomic_inc(&ct->ct_general.use);
4303 ++ refcount_inc(&ct->ct_general.use);
4304 + spin_unlock(lockp);
4305 + local_bh_enable();
4306 + return ct;
4307 +@@ -2825,7 +2825,7 @@ err_cachep:
4308 +
4309 + static struct nf_ct_hook nf_conntrack_hook = {
4310 + .update = nf_conntrack_update,
4311 +- .destroy = destroy_conntrack,
4312 ++ .destroy = nf_ct_destroy,
4313 + .get_tuple_skb = nf_conntrack_get_tuple_skb,
4314 + };
4315 +
4316 +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
4317 +index f562eeef42349..6d056ebba57c6 100644
4318 +--- a/net/netfilter/nf_conntrack_expect.c
4319 ++++ b/net/netfilter/nf_conntrack_expect.c
4320 +@@ -203,12 +203,12 @@ nf_ct_find_expectation(struct net *net,
4321 + * about to invoke ->destroy(), or nf_ct_delete() via timeout
4322 + * or early_drop().
4323 + *
4324 +- * The atomic_inc_not_zero() check tells: If that fails, we
4325 ++ * The refcount_inc_not_zero() check tells: If that fails, we
4326 + * know that the ct is being destroyed. If it succeeds, we
4327 + * can be sure the ct cannot disappear underneath.
4328 + */
4329 + if (unlikely(nf_ct_is_dying(exp->master) ||
4330 +- !atomic_inc_not_zero(&exp->master->ct_general.use)))
4331 ++ !refcount_inc_not_zero(&exp->master->ct_general.use)))
4332 + return NULL;
4333 +
4334 + if (exp->flags & NF_CT_EXPECT_PERMANENT) {
4335 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
4336 +index 1c02be04aaf5c..ef0a78aa9ba9e 100644
4337 +--- a/net/netfilter/nf_conntrack_netlink.c
4338 ++++ b/net/netfilter/nf_conntrack_netlink.c
4339 +@@ -508,7 +508,7 @@ nla_put_failure:
4340 +
4341 + static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
4342 + {
4343 +- if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
4344 ++ if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use))))
4345 + goto nla_put_failure;
4346 + return 0;
4347 +
4348 +@@ -1200,7 +1200,7 @@ restart:
4349 + ct = nf_ct_tuplehash_to_ctrack(h);
4350 + if (nf_ct_is_expired(ct)) {
4351 + if (i < ARRAY_SIZE(nf_ct_evict) &&
4352 +- atomic_inc_not_zero(&ct->ct_general.use))
4353 ++ refcount_inc_not_zero(&ct->ct_general.use))
4354 + nf_ct_evict[i++] = ct;
4355 + continue;
4356 + }
4357 +@@ -1748,7 +1748,7 @@ restart:
4358 + NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
4359 + ct, dying ? true : false, 0);
4360 + if (res < 0) {
4361 +- if (!atomic_inc_not_zero(&ct->ct_general.use))
4362 ++ if (!refcount_inc_not_zero(&ct->ct_general.use))
4363 + continue;
4364 + cb->args[0] = cpu;
4365 + cb->args[1] = (unsigned long)ct;
4366 +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
4367 +index 80f675d884b26..3e1afd10a9b60 100644
4368 +--- a/net/netfilter/nf_conntrack_standalone.c
4369 ++++ b/net/netfilter/nf_conntrack_standalone.c
4370 +@@ -303,7 +303,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
4371 + int ret = 0;
4372 +
4373 + WARN_ON(!ct);
4374 +- if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
4375 ++ if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))
4376 + return 0;
4377 +
4378 + if (nf_ct_should_gc(ct)) {
4379 +@@ -370,7 +370,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
4380 + ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
4381 + ct_show_delta_time(s, ct);
4382 +
4383 +- seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
4384 ++ seq_printf(s, "use=%u\n", refcount_read(&ct->ct_general.use));
4385 +
4386 + if (seq_has_overflowed(s))
4387 + goto release;
4388 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
4389 +index ed37bb9b4e588..b90eca7a2f22b 100644
4390 +--- a/net/netfilter/nf_flow_table_core.c
4391 ++++ b/net/netfilter/nf_flow_table_core.c
4392 +@@ -48,7 +48,7 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
4393 + struct flow_offload *flow;
4394 +
4395 + if (unlikely(nf_ct_is_dying(ct) ||
4396 +- !atomic_inc_not_zero(&ct->ct_general.use)))
4397 ++ !refcount_inc_not_zero(&ct->ct_general.use)))
4398 + return NULL;
4399 +
4400 + flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
4401 +diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
4402 +index 3d6d49420db8b..2dfc5dae06563 100644
4403 +--- a/net/netfilter/nf_synproxy_core.c
4404 ++++ b/net/netfilter/nf_synproxy_core.c
4405 +@@ -349,7 +349,6 @@ static int __net_init synproxy_net_init(struct net *net)
4406 + goto err2;
4407 +
4408 + __set_bit(IPS_CONFIRMED_BIT, &ct->status);
4409 +- nf_conntrack_get(&ct->ct_general);
4410 + snet->tmpl = ct;
4411 +
4412 + snet->stats = alloc_percpu(struct synproxy_stats);
4413 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
4414 +index 54ecb9fbf2de6..9c7472af9e4a1 100644
4415 +--- a/net/netfilter/nft_ct.c
4416 ++++ b/net/netfilter/nft_ct.c
4417 +@@ -259,10 +259,13 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
4418 +
4419 + ct = this_cpu_read(nft_ct_pcpu_template);
4420 +
4421 +- if (likely(atomic_read(&ct->ct_general.use) == 1)) {
4422 ++ if (likely(refcount_read(&ct->ct_general.use) == 1)) {
4423 ++ refcount_inc(&ct->ct_general.use);
4424 + nf_ct_zone_add(ct, &zone);
4425 + } else {
4426 +- /* previous skb got queued to userspace */
4427 ++ /* previous skb got queued to userspace, allocate temporary
4428 ++ * one until percpu template can be reused.
4429 ++ */
4430 + ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
4431 + if (!ct) {
4432 + regs->verdict.code = NF_DROP;
4433 +@@ -270,7 +273,6 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
4434 + }
4435 + }
4436 +
4437 +- atomic_inc(&ct->ct_general.use);
4438 + nf_ct_set(skb, ct, IP_CT_NEW);
4439 + }
4440 + #endif
4441 +@@ -375,7 +377,6 @@ static bool nft_ct_tmpl_alloc_pcpu(void)
4442 + return false;
4443 + }
4444 +
4445 +- atomic_set(&tmp->ct_general.use, 1);
4446 + per_cpu(nft_ct_pcpu_template, cpu) = tmp;
4447 + }
4448 +
4449 +diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
4450 +index 0a913ce07425a..267757b0392a6 100644
4451 +--- a/net/netfilter/xt_CT.c
4452 ++++ b/net/netfilter/xt_CT.c
4453 +@@ -24,7 +24,7 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
4454 + return XT_CONTINUE;
4455 +
4456 + if (ct) {
4457 +- atomic_inc(&ct->ct_general.use);
4458 ++ refcount_inc(&ct->ct_general.use);
4459 + nf_ct_set(skb, ct, IP_CT_NEW);
4460 + } else {
4461 + nf_ct_set(skb, ct, IP_CT_UNTRACKED);
4462 +@@ -201,7 +201,6 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
4463 + goto err4;
4464 + }
4465 + __set_bit(IPS_CONFIRMED_BIT, &ct->status);
4466 +- nf_conntrack_get(&ct->ct_general);
4467 + out:
4468 + info->ct = ct;
4469 + return 0;
4470 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4471 +index 83ca93b32f5f7..fb7f7b17c78c0 100644
4472 +--- a/net/netlink/af_netlink.c
4473 ++++ b/net/netlink/af_netlink.c
4474 +@@ -2284,6 +2284,13 @@ static int netlink_dump(struct sock *sk)
4475 + * single netdev. The outcome is MSG_TRUNC error.
4476 + */
4477 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
4478 ++
4479 ++ /* Make sure malicious BPF programs can not read unitialized memory
4480 ++ * from skb->head -> skb->data
4481 ++ */
4482 ++ skb_reset_network_header(skb);
4483 ++ skb_reset_mac_header(skb);
4484 ++
4485 + netlink_skb_set_owner_r(skb, sk);
4486 +
4487 + if (nlk->dump_done_errno > 0) {
4488 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
4489 +index f2b64cab9af70..815916056e0de 100644
4490 +--- a/net/openvswitch/conntrack.c
4491 ++++ b/net/openvswitch/conntrack.c
4492 +@@ -1722,7 +1722,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
4493 + goto err_free_ct;
4494 +
4495 + __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
4496 +- nf_conntrack_get(&ct_info.ct->ct_general);
4497 + return 0;
4498 + err_free_ct:
4499 + __ovs_ct_free_action(&ct_info);
4500 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4501 +index c591b923016a6..d77c21ff066c9 100644
4502 +--- a/net/openvswitch/flow_netlink.c
4503 ++++ b/net/openvswitch/flow_netlink.c
4504 +@@ -2436,7 +2436,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
4505 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
4506 +
4507 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
4508 +- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
4509 ++ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
4510 + OVS_NLERR(log, "Flow action size exceeds max %u",
4511 + MAX_ACTIONS_BUFSIZE);
4512 + return ERR_PTR(-EMSGSIZE);
4513 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4514 +index c0d4a65931de5..88c3b5cf8d94c 100644
4515 +--- a/net/packet/af_packet.c
4516 ++++ b/net/packet/af_packet.c
4517 +@@ -2820,8 +2820,9 @@ tpacket_error:
4518 +
4519 + status = TP_STATUS_SEND_REQUEST;
4520 + err = po->xmit(skb);
4521 +- if (unlikely(err > 0)) {
4522 +- err = net_xmit_errno(err);
4523 ++ if (unlikely(err != 0)) {
4524 ++ if (err > 0)
4525 ++ err = net_xmit_errno(err);
4526 + if (err && __packet_get_status(po, ph) ==
4527 + TP_STATUS_AVAILABLE) {
4528 + /* skb was destructed already */
4529 +@@ -3022,8 +3023,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4530 + skb->no_fcs = 1;
4531 +
4532 + err = po->xmit(skb);
4533 +- if (err > 0 && (err = net_xmit_errno(err)) != 0)
4534 +- goto out_unlock;
4535 ++ if (unlikely(err != 0)) {
4536 ++ if (err > 0)
4537 ++ err = net_xmit_errno(err);
4538 ++ if (err)
4539 ++ goto out_unlock;
4540 ++ }
4541 +
4542 + dev_put(dev);
4543 +
4544 +diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
4545 +index f15d6942da453..cc7e30733feb0 100644
4546 +--- a/net/rxrpc/net_ns.c
4547 ++++ b/net/rxrpc/net_ns.c
4548 +@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
4549 + struct rxrpc_net *rxnet = rxrpc_net(net);
4550 +
4551 + rxnet->live = false;
4552 ++ del_timer_sync(&rxnet->peer_keepalive_timer);
4553 + cancel_work_sync(&rxnet->peer_keepalive_work);
4554 ++ /* Remove the timer again as the worker may have restarted it. */
4555 + del_timer_sync(&rxnet->peer_keepalive_timer);
4556 + rxrpc_destroy_all_calls(rxnet);
4557 + rxrpc_destroy_all_connections(rxnet);
4558 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
4559 +index 553bf41671a65..f4fd584fba08a 100644
4560 +--- a/net/sched/act_ct.c
4561 ++++ b/net/sched/act_ct.c
4562 +@@ -1232,7 +1232,6 @@ static int tcf_ct_fill_params(struct net *net,
4563 + return -ENOMEM;
4564 + }
4565 + __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
4566 +- nf_conntrack_get(&tmpl->ct_general);
4567 + p->tmpl = tmpl;
4568 +
4569 + return 0;
4570 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
4571 +index 4272814487f09..5d30db0d71574 100644
4572 +--- a/net/sched/cls_u32.c
4573 ++++ b/net/sched/cls_u32.c
4574 +@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
4575 + return 0;
4576 + }
4577 +
4578 +-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
4579 ++static void __u32_destroy_key(struct tc_u_knode *n)
4580 + {
4581 + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
4582 +
4583 + tcf_exts_destroy(&n->exts);
4584 +- tcf_exts_put_net(&n->exts);
4585 + if (ht && --ht->refcnt == 0)
4586 + kfree(ht);
4587 ++ kfree(n);
4588 ++}
4589 ++
4590 ++static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
4591 ++{
4592 ++ tcf_exts_put_net(&n->exts);
4593 + #ifdef CONFIG_CLS_U32_PERF
4594 + if (free_pf)
4595 + free_percpu(n->pf);
4596 +@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
4597 + if (free_pf)
4598 + free_percpu(n->pcpu_success);
4599 + #endif
4600 +- kfree(n);
4601 +- return 0;
4602 ++ __u32_destroy_key(n);
4603 + }
4604 +
4605 + /* u32_delete_key_rcu should be called when free'ing a copied
4606 +@@ -810,10 +814,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
4607 + new->flags = n->flags;
4608 + RCU_INIT_POINTER(new->ht_down, ht);
4609 +
4610 +- /* bump reference count as long as we hold pointer to structure */
4611 +- if (ht)
4612 +- ht->refcnt++;
4613 +-
4614 + #ifdef CONFIG_CLS_U32_PERF
4615 + /* Statistics may be incremented by readers during update
4616 + * so we must keep them in tact. When the node is later destroyed
4617 +@@ -835,6 +835,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
4618 + return NULL;
4619 + }
4620 +
4621 ++ /* bump reference count as long as we hold pointer to structure */
4622 ++ if (ht)
4623 ++ ht->refcnt++;
4624 ++
4625 + return new;
4626 + }
4627 +
4628 +@@ -898,13 +902,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
4629 + tca[TCA_RATE], flags, extack);
4630 +
4631 + if (err) {
4632 +- u32_destroy_key(new, false);
4633 ++ __u32_destroy_key(new);
4634 + return err;
4635 + }
4636 +
4637 + err = u32_replace_hw_knode(tp, new, flags, extack);
4638 + if (err) {
4639 +- u32_destroy_key(new, false);
4640 ++ __u32_destroy_key(new);
4641 + return err;
4642 + }
4643 +
4644 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
4645 +index fa8897497dcc9..499058248bdb8 100644
4646 +--- a/net/smc/af_smc.c
4647 ++++ b/net/smc/af_smc.c
4648 +@@ -2332,8 +2332,10 @@ static int smc_shutdown(struct socket *sock, int how)
4649 + if (smc->use_fallback) {
4650 + rc = kernel_sock_shutdown(smc->clcsock, how);
4651 + sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
4652 +- if (sk->sk_shutdown == SHUTDOWN_MASK)
4653 ++ if (sk->sk_shutdown == SHUTDOWN_MASK) {
4654 + sk->sk_state = SMC_CLOSED;
4655 ++ sock_put(sk);
4656 ++ }
4657 + goto out;
4658 + }
4659 + switch (how) {
4660 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4661 +index 472d81679a27d..24da843f39a11 100644
4662 +--- a/sound/pci/hda/patch_hdmi.c
4663 ++++ b/sound/pci/hda/patch_hdmi.c
4664 +@@ -1387,7 +1387,7 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
4665 +
4666 + last_try:
4667 + /* the last try; check the empty slots in pins */
4668 +- for (i = 0; i < spec->num_nids; i++) {
4669 ++ for (i = 0; i < spec->pcm_used; i++) {
4670 + if (!test_bit(i, &spec->pcm_bitmap))
4671 + return i;
4672 + }
4673 +@@ -2263,7 +2263,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
4674 + * dev_num is the device entry number in a pin
4675 + */
4676 +
4677 +- if (codec->mst_no_extra_pcms)
4678 ++ if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms)
4679 ++ pcm_num = spec->num_cvts;
4680 ++ else if (codec->mst_no_extra_pcms)
4681 + pcm_num = spec->num_nids;
4682 + else
4683 + pcm_num = spec->num_nids + spec->dev_num - 1;
4684 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4685 +index 5ae20cbbd5c06..9771300683c47 100644
4686 +--- a/sound/pci/hda/patch_realtek.c
4687 ++++ b/sound/pci/hda/patch_realtek.c
4688 +@@ -8962,6 +8962,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4689 + SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
4690 + SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4691 + SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4692 ++ SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4693 + SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4694 + SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
4695 + SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
4696 +diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
4697 +index 8a55d59a6c2aa..d243de5f23dc1 100644
4698 +--- a/sound/soc/atmel/sam9g20_wm8731.c
4699 ++++ b/sound/soc/atmel/sam9g20_wm8731.c
4700 +@@ -46,35 +46,6 @@
4701 + */
4702 + #undef ENABLE_MIC_INPUT
4703 +
4704 +-static struct clk *mclk;
4705 +-
4706 +-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
4707 +- struct snd_soc_dapm_context *dapm,
4708 +- enum snd_soc_bias_level level)
4709 +-{
4710 +- static int mclk_on;
4711 +- int ret = 0;
4712 +-
4713 +- switch (level) {
4714 +- case SND_SOC_BIAS_ON:
4715 +- case SND_SOC_BIAS_PREPARE:
4716 +- if (!mclk_on)
4717 +- ret = clk_enable(mclk);
4718 +- if (ret == 0)
4719 +- mclk_on = 1;
4720 +- break;
4721 +-
4722 +- case SND_SOC_BIAS_OFF:
4723 +- case SND_SOC_BIAS_STANDBY:
4724 +- if (mclk_on)
4725 +- clk_disable(mclk);
4726 +- mclk_on = 0;
4727 +- break;
4728 +- }
4729 +-
4730 +- return ret;
4731 +-}
4732 +-
4733 + static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
4734 + SND_SOC_DAPM_MIC("Int Mic", NULL),
4735 + SND_SOC_DAPM_SPK("Ext Spk", NULL),
4736 +@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
4737 + .owner = THIS_MODULE,
4738 + .dai_link = &at91sam9g20ek_dai,
4739 + .num_links = 1,
4740 +- .set_bias_level = at91sam9g20ek_set_bias_level,
4741 +
4742 + .dapm_widgets = at91sam9g20ek_dapm_widgets,
4743 + .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
4744 +@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
4745 + {
4746 + struct device_node *np = pdev->dev.of_node;
4747 + struct device_node *codec_np, *cpu_np;
4748 +- struct clk *pllb;
4749 + struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
4750 + int ret;
4751 +
4752 +@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
4753 + return -EINVAL;
4754 + }
4755 +
4756 +- /*
4757 +- * Codec MCLK is supplied by PCK0 - set it up.
4758 +- */
4759 +- mclk = clk_get(NULL, "pck0");
4760 +- if (IS_ERR(mclk)) {
4761 +- dev_err(&pdev->dev, "Failed to get MCLK\n");
4762 +- ret = PTR_ERR(mclk);
4763 +- goto err;
4764 +- }
4765 +-
4766 +- pllb = clk_get(NULL, "pllb");
4767 +- if (IS_ERR(pllb)) {
4768 +- dev_err(&pdev->dev, "Failed to get PLLB\n");
4769 +- ret = PTR_ERR(pllb);
4770 +- goto err_mclk;
4771 +- }
4772 +- ret = clk_set_parent(mclk, pllb);
4773 +- clk_put(pllb);
4774 +- if (ret != 0) {
4775 +- dev_err(&pdev->dev, "Failed to set MCLK parent\n");
4776 +- goto err_mclk;
4777 +- }
4778 +-
4779 +- clk_set_rate(mclk, MCLK_RATE);
4780 +-
4781 + card->dev = &pdev->dev;
4782 +
4783 + /* Parse device node info */
4784 +@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
4785 +
4786 + return ret;
4787 +
4788 +-err_mclk:
4789 +- clk_put(mclk);
4790 +- mclk = NULL;
4791 + err:
4792 + atmel_ssc_put_audio(0);
4793 + return ret;
4794 +@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
4795 + {
4796 + struct snd_soc_card *card = platform_get_drvdata(pdev);
4797 +
4798 +- clk_disable(mclk);
4799 +- mclk = NULL;
4800 + snd_soc_unregister_card(card);
4801 + atmel_ssc_put_audio(0);
4802 +
4803 +diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
4804 +index 9ad7fc0baf072..20a07c92b2fc2 100644
4805 +--- a/sound/soc/codecs/msm8916-wcd-digital.c
4806 ++++ b/sound/soc/codecs/msm8916-wcd-digital.c
4807 +@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
4808 +
4809 + dev_set_drvdata(dev, priv);
4810 +
4811 +- return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
4812 ++ ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
4813 + msm8916_wcd_digital_dai,
4814 + ARRAY_SIZE(msm8916_wcd_digital_dai));
4815 ++ if (ret)
4816 ++ goto err_mclk;
4817 ++
4818 ++ return 0;
4819 ++
4820 ++err_mclk:
4821 ++ clk_disable_unprepare(priv->mclk);
4822 + err_clk:
4823 + clk_disable_unprepare(priv->ahbclk);
4824 + return ret;
4825 +diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
4826 +index 8fffe378618d0..cce6f4e7992f5 100644
4827 +--- a/sound/soc/codecs/rk817_codec.c
4828 ++++ b/sound/soc/codecs/rk817_codec.c
4829 +@@ -489,7 +489,7 @@ static int rk817_platform_probe(struct platform_device *pdev)
4830 +
4831 + rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
4832 +
4833 +- rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
4834 ++ rk817_codec_data->mclk = devm_clk_get(pdev->dev.parent, "mclk");
4835 + if (IS_ERR(rk817_codec_data->mclk)) {
4836 + dev_dbg(&pdev->dev, "Unable to get mclk\n");
4837 + ret = -ENXIO;
4838 +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
4839 +index 7b99318070cfa..144046864d157 100644
4840 +--- a/sound/soc/codecs/wcd934x.c
4841 ++++ b/sound/soc/codecs/wcd934x.c
4842 +@@ -1274,29 +1274,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
4843 + if (sido_src == wcd->sido_input_src)
4844 + return 0;
4845 +
4846 +- if (sido_src == SIDO_SOURCE_INTERNAL) {
4847 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4848 +- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
4849 +- usleep_range(100, 110);
4850 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4851 +- WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
4852 +- usleep_range(100, 110);
4853 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
4854 +- WCD934X_ANA_RCO_BG_EN_MASK, 0);
4855 +- usleep_range(100, 110);
4856 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4857 +- WCD934X_ANA_BUCK_PRE_EN1_MASK,
4858 +- WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
4859 +- usleep_range(100, 110);
4860 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4861 +- WCD934X_ANA_BUCK_PRE_EN2_MASK,
4862 +- WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
4863 +- usleep_range(100, 110);
4864 +- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
4865 +- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
4866 +- WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
4867 +- usleep_range(100, 110);
4868 +- } else if (sido_src == SIDO_SOURCE_RCO_BG) {
4869 ++ if (sido_src == SIDO_SOURCE_RCO_BG) {
4870 + regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
4871 + WCD934X_ANA_RCO_BG_EN_MASK,
4872 + WCD934X_ANA_RCO_BG_ENABLE);
4873 +@@ -1382,8 +1360,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
4874 + regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
4875 + WCD934X_EXT_CLK_BUF_EN_MASK |
4876 + WCD934X_MCLK_EN_MASK, 0x0);
4877 +- wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
4878 +-
4879 + regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
4880 + WCD934X_ANA_BIAS_EN_MASK, 0);
4881 + regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
4882 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4883 +index 0479bb0005abd..0b166e074457f 100644
4884 +--- a/sound/soc/soc-dapm.c
4885 ++++ b/sound/soc/soc-dapm.c
4886 +@@ -1685,8 +1685,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
4887 + switch (w->id) {
4888 + case snd_soc_dapm_pre:
4889 + if (!w->event)
4890 +- list_for_each_entry_safe_continue(w, n, list,
4891 +- power_list);
4892 ++ continue;
4893 +
4894 + if (event == SND_SOC_DAPM_STREAM_START)
4895 + ret = w->event(w,
4896 +@@ -1698,8 +1697,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
4897 +
4898 + case snd_soc_dapm_post:
4899 + if (!w->event)
4900 +- list_for_each_entry_safe_continue(w, n, list,
4901 +- power_list);
4902 ++ continue;
4903 +
4904 + if (event == SND_SOC_DAPM_STREAM_START)
4905 + ret = w->event(w,
4906 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
4907 +index 561eddfc8c22c..eff8d4f715611 100644
4908 +--- a/sound/soc/soc-topology.c
4909 ++++ b/sound/soc/soc-topology.c
4910 +@@ -1481,12 +1481,12 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
4911 + template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
4912 + kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
4913 + if (!kc)
4914 +- goto err;
4915 ++ goto hdr_err;
4916 +
4917 + kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
4918 + GFP_KERNEL);
4919 + if (!kcontrol_type)
4920 +- goto err;
4921 ++ goto hdr_err;
4922 +
4923 + for (i = 0; i < w->num_kcontrols; i++) {
4924 + control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
4925 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
4926 +index 2c01649c70f61..7c6ca2b433a53 100644
4927 +--- a/sound/usb/midi.c
4928 ++++ b/sound/usb/midi.c
4929 +@@ -1194,6 +1194,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
4930 + } while (drain_urbs && timeout);
4931 + finish_wait(&ep->drain_wait, &wait);
4932 + }
4933 ++ port->active = 0;
4934 + spin_unlock_irq(&ep->buffer_lock);
4935 + }
4936 +
4937 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
4938 +index 167834133b9bc..b8359a0aa008a 100644
4939 +--- a/sound/usb/usbaudio.h
4940 ++++ b/sound/usb/usbaudio.h
4941 +@@ -8,7 +8,7 @@
4942 + */
4943 +
4944 + /* handling of USB vendor/product ID pairs as 32-bit numbers */
4945 +-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
4946 ++#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
4947 + #define USB_ID_VENDOR(id) ((id) >> 16)
4948 + #define USB_ID_PRODUCT(id) ((u16)(id))
4949 +
4950 +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
4951 +index e37dfad31383f..5146ff0fa078c 100644
4952 +--- a/tools/lib/perf/evlist.c
4953 ++++ b/tools/lib/perf/evlist.c
4954 +@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
4955 + {
4956 + struct perf_evsel *evsel;
4957 + const struct perf_cpu_map *cpus = evlist->cpus;
4958 +- const struct perf_thread_map *threads = evlist->threads;
4959 +
4960 + if (!ops || !ops->get || !ops->mmap)
4961 + return -EINVAL;
4962 +@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
4963 + perf_evlist__for_each_entry(evlist, evsel) {
4964 + if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
4965 + evsel->sample_id == NULL &&
4966 +- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
4967 ++ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
4968 + return -ENOMEM;
4969 + }
4970 +
4971 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
4972 +index 997e0a4b0902a..6583ad9cc7deb 100644
4973 +--- a/tools/perf/builtin-report.c
4974 ++++ b/tools/perf/builtin-report.c
4975 +@@ -349,6 +349,7 @@ static int report__setup_sample_type(struct report *rep)
4976 + struct perf_session *session = rep->session;
4977 + u64 sample_type = evlist__combined_sample_type(session->evlist);
4978 + bool is_pipe = perf_data__is_pipe(session->data);
4979 ++ struct evsel *evsel;
4980 +
4981 + if (session->itrace_synth_opts->callchain ||
4982 + session->itrace_synth_opts->add_callchain ||
4983 +@@ -403,6 +404,19 @@ static int report__setup_sample_type(struct report *rep)
4984 + }
4985 +
4986 + if (sort__mode == SORT_MODE__MEMORY) {
4987 ++ /*
4988 ++ * FIXUP: prior to kernel 5.18, Arm SPE missed to set
4989 ++ * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
4990 ++ * compatibility, set the bit if it's an old perf data file.
4991 ++ */
4992 ++ evlist__for_each_entry(session->evlist, evsel) {
4993 ++ if (strstr(evsel->name, "arm_spe") &&
4994 ++ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
4995 ++ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
4996 ++ sample_type |= PERF_SAMPLE_DATA_SRC;
4997 ++ }
4998 ++ }
4999 ++
5000 + if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
5001 + ui__error("Selected --mem-mode but no mem data. "
5002 + "Did you call perf record without -d?\n");
5003 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
5004 +index 18b56256bb6ff..cb3d81adf5ca8 100644
5005 +--- a/tools/perf/builtin-script.c
5006 ++++ b/tools/perf/builtin-script.c
5007 +@@ -455,7 +455,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
5008 + return -EINVAL;
5009 +
5010 + if (PRINT_FIELD(DATA_SRC) &&
5011 +- evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
5012 ++ evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
5013 + return -EINVAL;
5014 +
5015 + if (PRINT_FIELD(WEIGHT) &&
5016 +diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
5017 +index fedcb7b35af9f..af5ea50ed5c0e 100755
5018 +--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
5019 ++++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
5020 +@@ -172,6 +172,17 @@ flooding_filters_add()
5021 + local lsb
5022 + local i
5023 +
5024 ++ # Prevent unwanted packets from entering the bridge and interfering
5025 ++ # with the test.
5026 ++ tc qdisc add dev br0 clsact
5027 ++ tc filter add dev br0 egress protocol all pref 1 handle 1 \
5028 ++ matchall skip_hw action drop
5029 ++ tc qdisc add dev $h1 clsact
5030 ++ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
5031 ++ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
5032 ++ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
5033 ++ matchall skip_hw action drop
5034 ++
5035 + tc qdisc add dev $rp2 clsact
5036 +
5037 + for i in $(eval echo {1..$num_remotes}); do
5038 +@@ -194,6 +205,12 @@ flooding_filters_del()
5039 + done
5040 +
5041 + tc qdisc del dev $rp2 clsact
5042 ++
5043 ++ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
5044 ++ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
5045 ++ tc qdisc del dev $h1 clsact
5046 ++ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
5047 ++ tc qdisc del dev br0 clsact
5048 + }
5049 +
5050 + flooding_check_packets()