Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 27 Apr 2022 12:21:15
Message-Id: 1651062056.0cd34c2b0e4fd1ba802468a45d1f0216374709fb.mpagano@gentoo
1 commit: 0cd34c2b0e4fd1ba802468a45d1f0216374709fb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 27 12:20:56 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 27 12:20:56 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0cd34c2b
7
8 Linux patch 5.4.191
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1190_linux-5.4.191.patch | 2098 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2102 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index c97a7636..bad996a3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -803,6 +803,10 @@ Patch: 1189_linux-5.4.190.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.190
23
24 +Patch: 1190_linux-5.4.191.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.191
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1190_linux-5.4.191.patch b/1190_linux-5.4.191.patch
33 new file mode 100644
34 index 00000000..4b838542
35 --- /dev/null
36 +++ b/1190_linux-5.4.191.patch
37 @@ -0,0 +1,2098 @@
38 +diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
39 +index 54386a010a8d7..871d2da7a0a91 100644
40 +--- a/Documentation/filesystems/ext4/attributes.rst
41 ++++ b/Documentation/filesystems/ext4/attributes.rst
42 +@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
43 + - Checksum of the extended attribute block.
44 + * - 0x14
45 + - \_\_u32
46 +- - h\_reserved[2]
47 ++ - h\_reserved[3]
48 + - Zero.
49 +
50 + The checksum is calculated against the FS UUID, the 64-bit block number
51 +diff --git a/Makefile b/Makefile
52 +index fd239ec16278b..365b487e50d7f 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 4
59 +-SUBLEVEL = 190
60 ++SUBLEVEL = 191
61 + EXTRAVERSION =
62 + NAME = Kleptomaniac Octopus
63 +
64 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
65 +index cef1d3f2656f6..beb39930eedbe 100644
66 +--- a/arch/arc/kernel/entry.S
67 ++++ b/arch/arc/kernel/entry.S
68 +@@ -199,6 +199,7 @@ tracesys_exit:
69 + st r0, [sp, PT_r0] ; sys call return value in pt_regs
70 +
71 + ;POST Sys Call Ptrace Hook
72 ++ mov r0, sp ; pt_regs needed
73 + bl @syscall_trace_exit
74 + b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
75 + ; we'd done before calling post hook above
76 +diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
77 +index 1da11bdb1dfbd..1c6500c4e6a17 100644
78 +--- a/arch/arm/mach-vexpress/spc.c
79 ++++ b/arch/arm/mach-vexpress/spc.c
80 +@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
81 + }
82 +
83 + cluster = topology_physical_package_id(cpu_dev->id);
84 +- if (init_opp_table[cluster])
85 ++ if (cluster < 0 || init_opp_table[cluster])
86 + continue;
87 +
88 + if (ve_init_opp_table(cpu_dev))
89 +diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
90 +index 03b947429e4de..4518a0f2d6c69 100644
91 +--- a/arch/powerpc/kvm/book3s_64_vio.c
92 ++++ b/arch/powerpc/kvm/book3s_64_vio.c
93 +@@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
94 + tbl[idx % TCES_PER_PAGE] = tce;
95 + }
96 +
97 +-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
98 +- unsigned long entry)
99 ++static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
100 ++ struct iommu_table *tbl, unsigned long entry)
101 + {
102 +- unsigned long hpa = 0;
103 +- enum dma_data_direction dir = DMA_NONE;
104 ++ unsigned long i;
105 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
106 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
107 ++
108 ++ for (i = 0; i < subpages; ++i) {
109 ++ unsigned long hpa = 0;
110 ++ enum dma_data_direction dir = DMA_NONE;
111 +
112 +- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
113 ++ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
114 ++ }
115 + }
116 +
117 + static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
118 +@@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
119 + break;
120 + }
121 +
122 ++ iommu_tce_kill(tbl, io_entry, subpages);
123 ++
124 + return ret;
125 + }
126 +
127 +@@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
128 + break;
129 + }
130 +
131 ++ iommu_tce_kill(tbl, io_entry, subpages);
132 ++
133 + return ret;
134 + }
135 +
136 +@@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
137 + ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
138 + entry, ua, dir);
139 +
140 +- iommu_tce_kill(stit->tbl, entry, 1);
141 +
142 + if (ret != H_SUCCESS) {
143 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
144 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
145 + goto unlock_exit;
146 + }
147 + }
148 +@@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
149 + */
150 + if (get_user(tce, tces + i)) {
151 + ret = H_TOO_HARD;
152 +- goto invalidate_exit;
153 ++ goto unlock_exit;
154 + }
155 + tce = be64_to_cpu(tce);
156 +
157 + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
158 + ret = H_PARAMETER;
159 +- goto invalidate_exit;
160 ++ goto unlock_exit;
161 + }
162 +
163 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
164 +@@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
165 + iommu_tce_direction(tce));
166 +
167 + if (ret != H_SUCCESS) {
168 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
169 +- entry);
170 +- goto invalidate_exit;
171 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
172 ++ entry + i);
173 ++ goto unlock_exit;
174 + }
175 + }
176 +
177 + kvmppc_tce_put(stt, entry + i, tce);
178 + }
179 +
180 +-invalidate_exit:
181 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
182 +- iommu_tce_kill(stit->tbl, entry, npages);
183 +-
184 + unlock_exit:
185 + srcu_read_unlock(&vcpu->kvm->srcu, idx);
186 +
187 +@@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
188 + continue;
189 +
190 + if (ret == H_TOO_HARD)
191 +- goto invalidate_exit;
192 ++ return ret;
193 +
194 + WARN_ON_ONCE(1);
195 +- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
196 ++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
197 + }
198 + }
199 +
200 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
201 + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
202 +
203 +-invalidate_exit:
204 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
205 +- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
206 +-
207 + return ret;
208 + }
209 + EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
210 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
211 +index 35fd67b4ceb41..abb49d8633298 100644
212 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
213 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
214 +@@ -251,13 +251,19 @@ extern void iommu_tce_kill_rm(struct iommu_table *tbl,
215 + tbl->it_ops->tce_kill(tbl, entry, pages, true);
216 + }
217 +
218 +-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
219 +- unsigned long entry)
220 ++static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
221 ++ struct iommu_table *tbl, unsigned long entry)
222 + {
223 +- unsigned long hpa = 0;
224 +- enum dma_data_direction dir = DMA_NONE;
225 ++ unsigned long i;
226 ++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
227 ++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
228 ++
229 ++ for (i = 0; i < subpages; ++i) {
230 ++ unsigned long hpa = 0;
231 ++ enum dma_data_direction dir = DMA_NONE;
232 +
233 +- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
234 ++ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
235 ++ }
236 + }
237 +
238 + static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
239 +@@ -320,6 +326,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
240 + break;
241 + }
242 +
243 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
244 ++
245 + return ret;
246 + }
247 +
248 +@@ -383,6 +391,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
249 + break;
250 + }
251 +
252 ++ iommu_tce_kill_rm(tbl, io_entry, subpages);
253 ++
254 + return ret;
255 + }
256 +
257 +@@ -428,10 +438,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
258 + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
259 + stit->tbl, entry, ua, dir);
260 +
261 +- iommu_tce_kill_rm(stit->tbl, entry, 1);
262 +-
263 + if (ret != H_SUCCESS) {
264 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
265 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
266 + return ret;
267 + }
268 + }
269 +@@ -571,7 +579,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
270 + ua = 0;
271 + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
272 + ret = H_PARAMETER;
273 +- goto invalidate_exit;
274 ++ goto unlock_exit;
275 + }
276 +
277 + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
278 +@@ -580,19 +588,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
279 + iommu_tce_direction(tce));
280 +
281 + if (ret != H_SUCCESS) {
282 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
283 +- entry);
284 +- goto invalidate_exit;
285 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
286 ++ entry + i);
287 ++ goto unlock_exit;
288 + }
289 + }
290 +
291 + kvmppc_rm_tce_put(stt, entry + i, tce);
292 + }
293 +
294 +-invalidate_exit:
295 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
296 +- iommu_tce_kill_rm(stit->tbl, entry, npages);
297 +-
298 + unlock_exit:
299 + if (rmap)
300 + unlock_rmap(rmap);
301 +@@ -635,20 +639,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
302 + continue;
303 +
304 + if (ret == H_TOO_HARD)
305 +- goto invalidate_exit;
306 ++ return ret;
307 +
308 + WARN_ON_ONCE_RM(1);
309 +- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
310 ++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
311 + }
312 + }
313 +
314 + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
315 + kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
316 +
317 +-invalidate_exit:
318 +- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
319 +- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
320 +-
321 + return ret;
322 + }
323 +
324 +diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
325 +index 08c3ef7961982..1225f53609a44 100644
326 +--- a/arch/powerpc/perf/power9-pmu.c
327 ++++ b/arch/powerpc/perf/power9-pmu.c
328 +@@ -131,11 +131,11 @@ int p9_dd22_bl_ev[] = {
329 +
330 + /* Table of alternatives, sorted by column 0 */
331 + static const unsigned int power9_event_alternatives[][MAX_ALT] = {
332 +- { PM_INST_DISP, PM_INST_DISP_ALT },
333 +- { PM_RUN_CYC_ALT, PM_RUN_CYC },
334 +- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
335 +- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
336 + { PM_BR_2PATH, PM_BR_2PATH_ALT },
337 ++ { PM_INST_DISP, PM_INST_DISP_ALT },
338 ++ { PM_RUN_CYC_ALT, PM_RUN_CYC },
339 ++ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
340 ++ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
341 + };
342 +
343 + static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
344 +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
345 +index 22c4dfe659923..b4dd6ab0fdfce 100644
346 +--- a/arch/x86/include/asm/compat.h
347 ++++ b/arch/x86/include/asm/compat.h
348 +@@ -31,15 +31,13 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
349 + typedef u64 __attribute__((aligned(4))) compat_u64;
350 +
351 + struct compat_stat {
352 +- compat_dev_t st_dev;
353 +- u16 __pad1;
354 ++ u32 st_dev;
355 + compat_ino_t st_ino;
356 + compat_mode_t st_mode;
357 + compat_nlink_t st_nlink;
358 + __compat_uid_t st_uid;
359 + __compat_gid_t st_gid;
360 +- compat_dev_t st_rdev;
361 +- u16 __pad2;
362 ++ u32 st_rdev;
363 + u32 st_size;
364 + u32 st_blksize;
365 + u32 st_blocks;
366 +diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
367 +index d956f87fcb095..6a6cc8dae5653 100644
368 +--- a/arch/xtensa/kernel/coprocessor.S
369 ++++ b/arch/xtensa/kernel/coprocessor.S
370 +@@ -37,7 +37,7 @@
371 + .if XTENSA_HAVE_COPROCESSOR(x); \
372 + .align 4; \
373 + .Lsave_cp_regs_cp##x: \
374 +- xchal_cp##x##_store a2 a4 a5 a6 a7; \
375 ++ xchal_cp##x##_store a2 a3 a4 a5 a6; \
376 + jx a0; \
377 + .endif
378 +
379 +@@ -54,7 +54,7 @@
380 + .if XTENSA_HAVE_COPROCESSOR(x); \
381 + .align 4; \
382 + .Lload_cp_regs_cp##x: \
383 +- xchal_cp##x##_load a2 a4 a5 a6 a7; \
384 ++ xchal_cp##x##_load a2 a3 a4 a5 a6; \
385 + jx a0; \
386 + .endif
387 +
388 +diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
389 +index 0dde21e0d3de4..ad1841cecdfb7 100644
390 +--- a/arch/xtensa/kernel/jump_label.c
391 ++++ b/arch/xtensa/kernel/jump_label.c
392 +@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
393 + {
394 + struct patch *patch = data;
395 +
396 +- if (atomic_inc_return(&patch->cpu_count) == 1) {
397 ++ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
398 + local_patch_text(patch->addr, patch->data, patch->sz);
399 + atomic_inc(&patch->cpu_count);
400 + } else {
401 +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
402 +index 7f053468b50d7..d490ac220ba86 100644
403 +--- a/block/compat_ioctl.c
404 ++++ b/block/compat_ioctl.c
405 +@@ -393,7 +393,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
406 + return 0;
407 + case BLKGETSIZE:
408 + size = i_size_read(bdev->bd_inode);
409 +- if ((size >> 9) > ~0UL)
410 ++ if ((size >> 9) > ~(compat_ulong_t)0)
411 + return -EFBIG;
412 + return compat_put_ulong(arg, size >> 9);
413 +
414 +diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
415 +index b066809ba9a11..c56f4043b0cc0 100644
416 +--- a/drivers/ata/pata_marvell.c
417 ++++ b/drivers/ata/pata_marvell.c
418 +@@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
419 + switch(ap->port_no)
420 + {
421 + case 0:
422 ++ if (!ap->ioaddr.bmdma_addr)
423 ++ return ATA_CBL_PATA_UNK;
424 + if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
425 + return ATA_CBL_PATA40;
426 + return ATA_CBL_PATA80;
427 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
428 +index 9aae6b3da356b..6473a4a81d58b 100644
429 +--- a/drivers/dma/at_xdmac.c
430 ++++ b/drivers/dma/at_xdmac.c
431 +@@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
432 + {
433 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
434 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
435 +- struct at_xdmac_desc *desc, *_desc;
436 ++ struct at_xdmac_desc *desc, *_desc, *iter;
437 + struct list_head *descs_list;
438 + enum dma_status ret;
439 + int residue, retry;
440 +@@ -1505,11 +1505,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
441 + * microblock.
442 + */
443 + descs_list = &desc->descs_list;
444 +- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
445 +- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
446 +- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
447 +- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
448 ++ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
449 ++ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
450 ++ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
451 ++ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
452 ++ desc = iter;
453 + break;
454 ++ }
455 + }
456 + residue += cur_ubc << dwidth;
457 +
458 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
459 +index cc70da05db4b5..801bef83df2a5 100644
460 +--- a/drivers/dma/imx-sdma.c
461 ++++ b/drivers/dma/imx-sdma.c
462 +@@ -1784,7 +1784,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
463 + u32 reg, val, shift, num_map, i;
464 + int ret = 0;
465 +
466 +- if (IS_ERR(np) || IS_ERR(gpr_np))
467 ++ if (IS_ERR(np) || !gpr_np)
468 + goto out;
469 +
470 + event_remap = of_find_property(np, propname, NULL);
471 +@@ -1832,7 +1832,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
472 + }
473 +
474 + out:
475 +- if (!IS_ERR(gpr_np))
476 ++ if (gpr_np)
477 + of_node_put(gpr_np);
478 +
479 + return ret;
480 +diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
481 +index 9c0ea13ca7883..7718d09e3d29f 100644
482 +--- a/drivers/dma/mediatek/mtk-uart-apdma.c
483 ++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
484 +@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
485 + unsigned int status;
486 + int ret;
487 +
488 +- ret = pm_runtime_get_sync(mtkd->ddev.dev);
489 ++ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
490 + if (ret < 0) {
491 + pm_runtime_put_noidle(chan->device->dev);
492 + return ret;
493 +@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
494 + ret = readx_poll_timeout(readl, c->base + VFF_EN,
495 + status, !status, 10, 100);
496 + if (ret)
497 +- return ret;
498 ++ goto err_pm;
499 +
500 + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
501 + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
502 + if (ret < 0) {
503 + dev_err(chan->device->dev, "Can't request dma IRQ\n");
504 +- return -EINVAL;
505 ++ ret = -EINVAL;
506 ++ goto err_pm;
507 + }
508 +
509 + if (mtkd->support_33bits)
510 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
511 +
512 ++err_pm:
513 ++ pm_runtime_put_noidle(mtkd->ddev.dev);
514 + return ret;
515 + }
516 +
517 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
518 +index d23a0782fb49c..4d2387f8e511b 100644
519 +--- a/drivers/edac/synopsys_edac.c
520 ++++ b/drivers/edac/synopsys_edac.c
521 +@@ -163,6 +163,11 @@
522 + #define ECC_STAT_CECNT_SHIFT 8
523 + #define ECC_STAT_BITNUM_MASK 0x7F
524 +
525 ++/* ECC error count register definitions */
526 ++#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
527 ++#define ECC_ERRCNT_UECNT_SHIFT 16
528 ++#define ECC_ERRCNT_CECNT_MASK 0xFFFF
529 ++
530 + /* DDR QOS Interrupt register definitions */
531 + #define DDR_QOS_IRQ_STAT_OFST 0x20200
532 + #define DDR_QOSUE_MASK 0x4
533 +@@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
534 + base = priv->baseaddr;
535 + p = &priv->stat;
536 +
537 ++ regval = readl(base + ECC_ERRCNT_OFST);
538 ++ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
539 ++ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
540 ++ if (!p->ce_cnt)
541 ++ goto ue_err;
542 ++
543 + regval = readl(base + ECC_STAT_OFST);
544 + if (!regval)
545 + return 1;
546 +
547 +- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
548 +- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
549 +- if (!p->ce_cnt)
550 +- goto ue_err;
551 +-
552 + p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
553 +
554 + regval = readl(base + ECC_CEADDR0_OFST);
555 +diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
556 +index 83423092de2ff..da07993339702 100644
557 +--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
558 ++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
559 +@@ -179,7 +179,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
560 + drm_framebuffer_put(plane->state->fb);
561 +
562 + kfree(to_mdp5_plane_state(plane->state));
563 ++ plane->state = NULL;
564 + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
565 ++ if (!mdp5_state)
566 ++ return;
567 +
568 + /* assign default blend parameters */
569 + mdp5_state->alpha = 255;
570 +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
571 +index bdb4d59c81277..a621dd28ff70d 100644
572 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
573 ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
574 +@@ -232,7 +232,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
575 +
576 + ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
577 + if (ret)
578 +- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
579 ++ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
580 + }
581 +
582 + static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
583 +@@ -268,7 +268,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
584 + return 0;
585 + }
586 +
587 +-static int rpi_touchscreen_enable(struct drm_panel *panel)
588 ++static int rpi_touchscreen_prepare(struct drm_panel *panel)
589 + {
590 + struct rpi_touchscreen *ts = panel_to_ts(panel);
591 + int i;
592 +@@ -298,6 +298,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
593 + rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
594 + msleep(100);
595 +
596 ++ return 0;
597 ++}
598 ++
599 ++static int rpi_touchscreen_enable(struct drm_panel *panel)
600 ++{
601 ++ struct rpi_touchscreen *ts = panel_to_ts(panel);
602 ++
603 + /* Turn on the backlight. */
604 + rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
605 +
606 +@@ -352,7 +359,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
607 + static const struct drm_panel_funcs rpi_touchscreen_funcs = {
608 + .disable = rpi_touchscreen_disable,
609 + .unprepare = rpi_touchscreen_noop,
610 +- .prepare = rpi_touchscreen_noop,
611 ++ .prepare = rpi_touchscreen_prepare,
612 + .enable = rpi_touchscreen_enable,
613 + .get_modes = rpi_touchscreen_get_modes,
614 + };
615 +diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
616 +index c78fa8144776e..0983949cc8c98 100644
617 +--- a/drivers/gpu/drm/vc4/vc4_dsi.c
618 ++++ b/drivers/gpu/drm/vc4/vc4_dsi.c
619 +@@ -831,7 +831,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
620 + unsigned long phy_clock;
621 + int ret;
622 +
623 +- ret = pm_runtime_get_sync(dev);
624 ++ ret = pm_runtime_resume_and_get(dev);
625 + if (ret) {
626 + DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
627 + return;
628 +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
629 +index c43e98bb6e2d7..b514b2eaa3180 100644
630 +--- a/drivers/net/can/usb/usb_8dev.c
631 ++++ b/drivers/net/can/usb/usb_8dev.c
632 +@@ -670,9 +670,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
633 + atomic_inc(&priv->active_tx_urbs);
634 +
635 + err = usb_submit_urb(urb, GFP_ATOMIC);
636 +- if (unlikely(err))
637 +- goto failed;
638 +- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
639 ++ if (unlikely(err)) {
640 ++ can_free_echo_skb(netdev, context->echo_index);
641 ++
642 ++ usb_unanchor_urb(urb);
643 ++ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
644 ++
645 ++ atomic_dec(&priv->active_tx_urbs);
646 ++
647 ++ if (err == -ENODEV)
648 ++ netif_device_detach(netdev);
649 ++ else
650 ++ netdev_warn(netdev, "failed tx_urb %d\n", err);
651 ++ stats->tx_dropped++;
652 ++ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
653 + /* Slow down tx path */
654 + netif_stop_queue(netdev);
655 +
656 +@@ -691,19 +702,6 @@ nofreecontext:
657 +
658 + return NETDEV_TX_BUSY;
659 +
660 +-failed:
661 +- can_free_echo_skb(netdev, context->echo_index);
662 +-
663 +- usb_unanchor_urb(urb);
664 +- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
665 +-
666 +- atomic_dec(&priv->active_tx_urbs);
667 +-
668 +- if (err == -ENODEV)
669 +- netif_device_detach(netdev);
670 +- else
671 +- netdev_warn(netdev, "failed tx_urb %d\n", err);
672 +-
673 + nomembuf:
674 + usb_free_urb(urb);
675 +
676 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
677 +index 480d2ca369e6b..002a374f197bd 100644
678 +--- a/drivers/net/ethernet/cadence/macb_main.c
679 ++++ b/drivers/net/ethernet/cadence/macb_main.c
680 +@@ -1378,6 +1378,7 @@ static void macb_tx_restart(struct macb_queue *queue)
681 + unsigned int head = queue->tx_head;
682 + unsigned int tail = queue->tx_tail;
683 + struct macb *bp = queue->bp;
684 ++ unsigned int head_idx, tbqp;
685 +
686 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
687 + queue_writel(queue, ISR, MACB_BIT(TXUBR));
688 +@@ -1385,6 +1386,13 @@ static void macb_tx_restart(struct macb_queue *queue)
689 + if (head == tail)
690 + return;
691 +
692 ++ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
693 ++ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
694 ++ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
695 ++
696 ++ if (tbqp == head_idx)
697 ++ return;
698 ++
699 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
700 + }
701 +
702 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
703 +index 7ce2e99b594d6..0a186d16e73f7 100644
704 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
705 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
706 +@@ -506,11 +506,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
707 + info->phc_index = -1;
708 +
709 + fman_node = of_get_parent(mac_node);
710 +- if (fman_node)
711 ++ if (fman_node) {
712 + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
713 ++ of_node_put(fman_node);
714 ++ }
715 +
716 +- if (ptp_node)
717 ++ if (ptp_node) {
718 + ptp_dev = of_find_device_by_node(ptp_node);
719 ++ of_node_put(ptp_node);
720 ++ }
721 +
722 + if (ptp_dev)
723 + ptp = platform_get_drvdata(ptp_dev);
724 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
725 +index 58ff747a42ae6..1241b4734896f 100644
726 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
727 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
728 +@@ -995,8 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
729 + {
730 + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
731 + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
732 +- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
733 +- u16 lat_enc_d = 0; /* latency decoded */
734 ++ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
735 ++ u32 lat_enc_d = 0; /* latency decoded */
736 + u16 lat_enc = 0; /* latency encoded */
737 +
738 + if (link) {
739 +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
740 +index ed5d09c11c389..79252ca9e2133 100644
741 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c
742 ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
743 +@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
744 + {
745 + u32 swfw_sync;
746 +
747 +- while (igc_get_hw_semaphore_i225(hw))
748 +- ; /* Empty */
749 ++ /* Releasing the resource requires first getting the HW semaphore.
750 ++ * If we fail to get the semaphore, there is nothing we can do,
751 ++ * except log an error and quit. We are not allowed to hang here
752 ++ * indefinitely, as it may cause denial of service or system crash.
753 ++ */
754 ++ if (igc_get_hw_semaphore_i225(hw)) {
755 ++ hw_dbg("Failed to release SW_FW_SYNC.\n");
756 ++ return;
757 ++ }
758 +
759 + swfw_sync = rd32(IGC_SW_FW_SYNC);
760 + swfw_sync &= ~mask;
761 +diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
762 +index 1a4947e6933c3..6156c76d765ff 100644
763 +--- a/drivers/net/ethernet/intel/igc/igc_phy.c
764 ++++ b/drivers/net/ethernet/intel/igc/igc_phy.c
765 +@@ -569,7 +569,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
766 + * the lower time out
767 + */
768 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
769 +- usleep_range(500, 1000);
770 ++ udelay(50);
771 + mdic = rd32(IGC_MDIC);
772 + if (mdic & IGC_MDIC_READY)
773 + break;
774 +@@ -626,7 +626,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
775 + * the lower time out
776 + */
777 + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
778 +- usleep_range(500, 1000);
779 ++ udelay(50);
780 + mdic = rd32(IGC_MDIC);
781 + if (mdic & IGC_MDIC_READY)
782 + break;
783 +diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
784 +index 120ed4633a096..b9c4d48e28e42 100644
785 +--- a/drivers/net/ethernet/micrel/Kconfig
786 ++++ b/drivers/net/ethernet/micrel/Kconfig
787 +@@ -37,7 +37,6 @@ config KS8851
788 + config KS8851_MLL
789 + tristate "Micrel KS8851 MLL"
790 + depends on HAS_IOMEM
791 +- depends on PTP_1588_CLOCK_OPTIONAL
792 + select MII
793 + ---help---
794 + This platform driver is for Micrel KS8851 Address/data bus
795 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
796 +index c5991e31c557e..f4869b1836f30 100644
797 +--- a/drivers/net/vxlan.c
798 ++++ b/drivers/net/vxlan.c
799 +@@ -679,11 +679,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
800 +
801 + rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
802 + if (rd == NULL)
803 +- return -ENOBUFS;
804 ++ return -ENOMEM;
805 +
806 + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
807 + kfree(rd);
808 +- return -ENOBUFS;
809 ++ return -ENOMEM;
810 + }
811 +
812 + rd->remote_ip = *ip;
813 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
814 +index ef5521b9b3577..ddc999670484f 100644
815 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
816 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
817 +@@ -550,7 +550,7 @@ enum brcmf_sdio_frmtype {
818 + BRCMF_SDIO_FT_SUB,
819 + };
820 +
821 +-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
822 ++#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
823 +
824 + /* SDIO Pad drive strength to select value mappings */
825 + struct sdiod_drive_str {
826 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
827 +index cf611d1b817c0..e6d7646a0d9ca 100644
828 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
829 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
830 +@@ -76,7 +76,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
831 + mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
832 +
833 + /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
834 +- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
835 ++ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
836 +
837 + /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
838 + mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
839 +diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
840 +index df352b334ea77..b377872a8f9d6 100644
841 +--- a/drivers/perf/arm_pmu.c
842 ++++ b/drivers/perf/arm_pmu.c
843 +@@ -322,6 +322,9 @@ validate_group(struct perf_event *event)
844 + if (!validate_event(event->pmu, &fake_pmu, leader))
845 + return -EINVAL;
846 +
847 ++ if (event == leader)
848 ++ return 0;
849 ++
850 + for_each_sibling_event(sibling, leader) {
851 + if (!validate_event(event->pmu, &fake_pmu, sibling))
852 + return -EINVAL;
853 +@@ -411,12 +414,7 @@ __hw_perf_event_init(struct perf_event *event)
854 + local64_set(&hwc->period_left, hwc->sample_period);
855 + }
856 +
857 +- if (event->group_leader != event) {
858 +- if (validate_group(event) != 0)
859 +- return -EINVAL;
860 +- }
861 +-
862 +- return 0;
863 ++ return validate_group(event);
864 + }
865 +
866 + static int armpmu_event_init(struct perf_event *event)
867 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
868 +index 9b6a93ff41ffb..91e468fcaf7cc 100644
869 +--- a/drivers/platform/x86/samsung-laptop.c
870 ++++ b/drivers/platform/x86/samsung-laptop.c
871 +@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
872 +
873 + if (value > samsung->kbd_led.max_brightness)
874 + value = samsung->kbd_led.max_brightness;
875 +- else if (value < 0)
876 +- value = 0;
877 +
878 + samsung->kbd_led_wk = value;
879 + queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
880 +diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
881 +index 24d3395964cc4..4c5bba52b1059 100644
882 +--- a/drivers/reset/tegra/reset-bpmp.c
883 ++++ b/drivers/reset/tegra/reset-bpmp.c
884 +@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
885 + struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
886 + struct mrq_reset_request request;
887 + struct tegra_bpmp_message msg;
888 ++ int err;
889 +
890 + memset(&request, 0, sizeof(request));
891 + request.cmd = command;
892 +@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
893 + msg.tx.data = &request;
894 + msg.tx.size = sizeof(request);
895 +
896 +- return tegra_bpmp_transfer(bpmp, &msg);
897 ++ err = tegra_bpmp_transfer(bpmp, &msg);
898 ++ if (err)
899 ++ return err;
900 ++ if (msg.rx.ret)
901 ++ return -EINVAL;
902 ++
903 ++ return 0;
904 + }
905 +
906 + static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
907 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
908 +index 755f66b1ff9c7..f05fb4ddeaff0 100644
909 +--- a/drivers/scsi/qedi/qedi_iscsi.c
910 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
911 +@@ -797,6 +797,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
912 + return qedi_iscsi_send_ioreq(task);
913 + }
914 +
915 ++static void qedi_offload_work(struct work_struct *work)
916 ++{
917 ++ struct qedi_endpoint *qedi_ep =
918 ++ container_of(work, struct qedi_endpoint, offload_work);
919 ++ struct qedi_ctx *qedi;
920 ++ int wait_delay = 5 * HZ;
921 ++ int ret;
922 ++
923 ++ qedi = qedi_ep->qedi;
924 ++
925 ++ ret = qedi_iscsi_offload_conn(qedi_ep);
926 ++ if (ret) {
927 ++ QEDI_ERR(&qedi->dbg_ctx,
928 ++ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
929 ++ qedi_ep->iscsi_cid, qedi_ep, ret);
930 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
931 ++ return;
932 ++ }
933 ++
934 ++ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
935 ++ (qedi_ep->state ==
936 ++ EP_STATE_OFLDCONN_COMPL),
937 ++ wait_delay);
938 ++ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
939 ++ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
940 ++ QEDI_ERR(&qedi->dbg_ctx,
941 ++ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
942 ++ qedi_ep->iscsi_cid, qedi_ep);
943 ++ }
944 ++}
945 ++
946 + static struct iscsi_endpoint *
947 + qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
948 + int non_blocking)
949 +@@ -840,6 +871,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
950 + }
951 + qedi_ep = ep->dd_data;
952 + memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
953 ++ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
954 + qedi_ep->state = EP_STATE_IDLE;
955 + qedi_ep->iscsi_cid = (u32)-1;
956 + qedi_ep->qedi = qedi;
957 +@@ -996,12 +1028,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
958 + qedi_ep = ep->dd_data;
959 + qedi = qedi_ep->qedi;
960 +
961 ++ flush_work(&qedi_ep->offload_work);
962 ++
963 + if (qedi_ep->state == EP_STATE_OFLDCONN_START)
964 + goto ep_exit_recover;
965 +
966 +- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
967 +- flush_work(&qedi_ep->offload_work);
968 +-
969 + if (qedi_ep->conn) {
970 + qedi_conn = qedi_ep->conn;
971 + conn = qedi_conn->cls_conn->dd_data;
972 +@@ -1161,37 +1192,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
973 + return rc;
974 + }
975 +
976 +-static void qedi_offload_work(struct work_struct *work)
977 +-{
978 +- struct qedi_endpoint *qedi_ep =
979 +- container_of(work, struct qedi_endpoint, offload_work);
980 +- struct qedi_ctx *qedi;
981 +- int wait_delay = 5 * HZ;
982 +- int ret;
983 +-
984 +- qedi = qedi_ep->qedi;
985 +-
986 +- ret = qedi_iscsi_offload_conn(qedi_ep);
987 +- if (ret) {
988 +- QEDI_ERR(&qedi->dbg_ctx,
989 +- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
990 +- qedi_ep->iscsi_cid, qedi_ep, ret);
991 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
992 +- return;
993 +- }
994 +-
995 +- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
996 +- (qedi_ep->state ==
997 +- EP_STATE_OFLDCONN_COMPL),
998 +- wait_delay);
999 +- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
1000 +- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1001 +- QEDI_ERR(&qedi->dbg_ctx,
1002 +- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
1003 +- qedi_ep->iscsi_cid, qedi_ep);
1004 +- }
1005 +-}
1006 +-
1007 + static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1008 + {
1009 + struct qedi_ctx *qedi;
1010 +@@ -1307,7 +1307,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1011 + qedi_ep->dst_addr, qedi_ep->dst_port);
1012 + }
1013 +
1014 +- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
1015 + queue_work(qedi->offload_thread, &qedi_ep->offload_work);
1016 +
1017 + ret = 0;
1018 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
1019 +index 5fd929e023e18..b4d85fd62ce91 100644
1020 +--- a/drivers/spi/atmel-quadspi.c
1021 ++++ b/drivers/spi/atmel-quadspi.c
1022 +@@ -202,6 +202,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
1023 + static bool atmel_qspi_supports_op(struct spi_mem *mem,
1024 + const struct spi_mem_op *op)
1025 + {
1026 ++ if (!spi_mem_default_supports_op(mem, op))
1027 ++ return false;
1028 ++
1029 + if (atmel_qspi_find_mode(op) < 0)
1030 + return false;
1031 +
1032 +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
1033 +index e6b1ca141b930..88888cc5d1932 100644
1034 +--- a/drivers/staging/android/ion/ion.c
1035 ++++ b/drivers/staging/android/ion/ion.c
1036 +@@ -114,6 +114,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
1037 + void *vaddr;
1038 +
1039 + if (buffer->kmap_cnt) {
1040 ++ if (buffer->kmap_cnt == INT_MAX)
1041 ++ return ERR_PTR(-EOVERFLOW);
1042 ++
1043 + buffer->kmap_cnt++;
1044 + return buffer->vaddr;
1045 + }
1046 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1047 +index f44b6f9d07776..79a18692b84c5 100644
1048 +--- a/fs/cifs/cifsfs.c
1049 ++++ b/fs/cifs/cifsfs.c
1050 +@@ -889,7 +889,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1051 + ssize_t rc;
1052 + struct inode *inode = file_inode(iocb->ki_filp);
1053 +
1054 +- if (iocb->ki_filp->f_flags & O_DIRECT)
1055 ++ if (iocb->ki_flags & IOCB_DIRECT)
1056 + return cifs_user_readv(iocb, iter);
1057 +
1058 + rc = cifs_revalidate_mapping(inode);
1059 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1060 +index ae2cb15d95407..e932e84823714 100644
1061 +--- a/fs/ext4/ext4.h
1062 ++++ b/fs/ext4/ext4.h
1063 +@@ -1966,6 +1966,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
1064 + * Structure of a directory entry
1065 + */
1066 + #define EXT4_NAME_LEN 255
1067 ++/*
1068 ++ * Base length of the ext4 directory entry excluding the name length
1069 ++ */
1070 ++#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
1071 +
1072 + struct ext4_dir_entry {
1073 + __le32 inode; /* Inode number */
1074 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1075 +index 0d62f05f89256..00686fbe3c27d 100644
1076 +--- a/fs/ext4/inode.c
1077 ++++ b/fs/ext4/inode.c
1078 +@@ -4311,7 +4311,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1079 + struct super_block *sb = inode->i_sb;
1080 + ext4_lblk_t first_block, stop_block;
1081 + struct address_space *mapping = inode->i_mapping;
1082 +- loff_t first_block_offset, last_block_offset;
1083 ++ loff_t first_block_offset, last_block_offset, max_length;
1084 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1085 + handle_t *handle;
1086 + unsigned int credits;
1087 + int ret = 0;
1088 +@@ -4357,6 +4358,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1089 + offset;
1090 + }
1091 +
1092 ++ /*
1093 ++ * For punch hole the length + offset needs to be within one block
1094 ++ * before last range. Adjust the length if it goes beyond that limit.
1095 ++ */
1096 ++ max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
1097 ++ if (offset + length > max_length)
1098 ++ length = max_length - offset;
1099 ++
1100 + if (offset & (sb->s_blocksize - 1) ||
1101 + (offset + length) & (sb->s_blocksize - 1)) {
1102 + /*
1103 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1104 +index 9905720df9248..f10307215d583 100644
1105 +--- a/fs/ext4/namei.c
1106 ++++ b/fs/ext4/namei.c
1107 +@@ -1385,10 +1385,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1108 +
1109 + de = (struct ext4_dir_entry_2 *)search_buf;
1110 + dlimit = search_buf + buf_size;
1111 +- while ((char *) de < dlimit) {
1112 ++ while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
1113 + /* this code is executed quadratically often */
1114 + /* do minimal checking `by hand' */
1115 +- if ((char *) de + de->name_len <= dlimit &&
1116 ++ if (de->name + de->name_len <= dlimit &&
1117 + ext4_match(dir, fname, de)) {
1118 + /* found a match - just to be sure, do
1119 + * a full check */
1120 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
1121 +index 2cc9f2168b9e4..b66b335a0ca6f 100644
1122 +--- a/fs/ext4/page-io.c
1123 ++++ b/fs/ext4/page-io.c
1124 +@@ -100,8 +100,10 @@ static void ext4_finish_bio(struct bio *bio)
1125 + continue;
1126 + }
1127 + clear_buffer_async_write(bh);
1128 +- if (bio->bi_status)
1129 ++ if (bio->bi_status) {
1130 ++ set_buffer_write_io_error(bh);
1131 + buffer_io_error(bh);
1132 ++ }
1133 + } while ((bh = bh->b_this_page) != head);
1134 + bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
1135 + local_irq_restore(flags);
1136 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1137 +index 5bc7fd0240a19..c13879bd21683 100644
1138 +--- a/fs/ext4/super.c
1139 ++++ b/fs/ext4/super.c
1140 +@@ -3485,9 +3485,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
1141 + ext4_fsblk_t first_block, last_block, b;
1142 + ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1143 + int s, j, count = 0;
1144 ++ int has_super = ext4_bg_has_super(sb, grp);
1145 +
1146 + if (!ext4_has_feature_bigalloc(sb))
1147 +- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
1148 ++ return (has_super + ext4_bg_num_gdb(sb, grp) +
1149 ++ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
1150 + sbi->s_itb_per_group + 2);
1151 +
1152 + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
1153 +@@ -4512,9 +4514,18 @@ no_journal:
1154 + * Get the # of file system overhead blocks from the
1155 + * superblock if present.
1156 + */
1157 +- if (es->s_overhead_clusters)
1158 +- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
1159 +- else {
1160 ++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
1161 ++ /* ignore the precalculated value if it is ridiculous */
1162 ++ if (sbi->s_overhead > ext4_blocks_count(es))
1163 ++ sbi->s_overhead = 0;
1164 ++ /*
1165 ++ * If the bigalloc feature is not enabled recalculating the
1166 ++ * overhead doesn't take long, so we might as well just redo
1167 ++ * it to make sure we are using the correct value.
1168 ++ */
1169 ++ if (!ext4_has_feature_bigalloc(sb))
1170 ++ sbi->s_overhead = 0;
1171 ++ if (sbi->s_overhead == 0) {
1172 + err = ext4_calculate_overhead(sb);
1173 + if (err)
1174 + goto failed_mount_wq;
1175 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1176 +index d7ec0ac87fc03..8153a3eac540a 100644
1177 +--- a/fs/gfs2/rgrp.c
1178 ++++ b/fs/gfs2/rgrp.c
1179 +@@ -925,15 +925,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1180 + rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
1181 + spin_lock_init(&rgd->rd_rsspin);
1182 +
1183 +- error = compute_bitstructs(rgd);
1184 +- if (error)
1185 +- goto fail;
1186 +-
1187 + error = gfs2_glock_get(sdp, rgd->rd_addr,
1188 + &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
1189 + if (error)
1190 + goto fail;
1191 +
1192 ++ error = compute_bitstructs(rgd);
1193 ++ if (error)
1194 ++ goto fail_glock;
1195 ++
1196 + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
1197 + rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
1198 + if (rgd->rd_data > sdp->sd_max_rg_data)
1199 +@@ -950,6 +950,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1200 + }
1201 +
1202 + error = 0; /* someone else read in the rgrp; free it and ignore it */
1203 ++fail_glock:
1204 + gfs2_glock_put(rgd->rd_gl);
1205 +
1206 + fail:
1207 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
1208 +index 88146008b3e36..d45ceb2e21492 100644
1209 +--- a/fs/jbd2/commit.c
1210 ++++ b/fs/jbd2/commit.c
1211 +@@ -451,7 +451,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1212 + }
1213 + spin_unlock(&commit_transaction->t_handle_lock);
1214 + commit_transaction->t_state = T_SWITCH;
1215 +- write_unlock(&journal->j_state_lock);
1216 +
1217 + J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
1218 + journal->j_max_transaction_buffers);
1219 +@@ -471,6 +470,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1220 + * has reserved. This is consistent with the existing behaviour
1221 + * that multiple jbd2_journal_get_write_access() calls to the same
1222 + * buffer are perfectly permissible.
1223 ++ * We use journal->j_state_lock here to serialize processing of
1224 ++ * t_reserved_list with eviction of buffers from journal_unmap_buffer().
1225 + */
1226 + while (commit_transaction->t_reserved_list) {
1227 + jh = commit_transaction->t_reserved_list;
1228 +@@ -490,6 +491,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1229 + jbd2_journal_refile_buffer(journal, jh);
1230 + }
1231 +
1232 ++ write_unlock(&journal->j_state_lock);
1233 + /*
1234 + * Now try to drop any written-back buffers from the journal's
1235 + * checkpoint lists. We do this *before* commit because it potentially
1236 +diff --git a/fs/stat.c b/fs/stat.c
1237 +index c38e4c2e1221c..268c9eb896564 100644
1238 +--- a/fs/stat.c
1239 ++++ b/fs/stat.c
1240 +@@ -290,9 +290,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
1241 + # define choose_32_64(a,b) b
1242 + #endif
1243 +
1244 +-#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
1245 +-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
1246 +-
1247 + #ifndef INIT_STRUCT_STAT_PADDING
1248 + # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
1249 + #endif
1250 +@@ -301,7 +298,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1251 + {
1252 + struct stat tmp;
1253 +
1254 +- if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
1255 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
1256 ++ return -EOVERFLOW;
1257 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
1258 + return -EOVERFLOW;
1259 + #if BITS_PER_LONG == 32
1260 + if (stat->size > MAX_NON_LFS)
1261 +@@ -309,7 +308,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1262 + #endif
1263 +
1264 + INIT_STRUCT_STAT_PADDING(tmp);
1265 +- tmp.st_dev = encode_dev(stat->dev);
1266 ++ tmp.st_dev = new_encode_dev(stat->dev);
1267 + tmp.st_ino = stat->ino;
1268 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
1269 + return -EOVERFLOW;
1270 +@@ -319,7 +318,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1271 + return -EOVERFLOW;
1272 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
1273 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
1274 +- tmp.st_rdev = encode_dev(stat->rdev);
1275 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
1276 + tmp.st_size = stat->size;
1277 + tmp.st_atime = stat->atime.tv_sec;
1278 + tmp.st_mtime = stat->mtime.tv_sec;
1279 +@@ -593,11 +592,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
1280 + {
1281 + struct compat_stat tmp;
1282 +
1283 +- if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
1284 ++ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
1285 ++ return -EOVERFLOW;
1286 ++ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
1287 + return -EOVERFLOW;
1288 +
1289 + memset(&tmp, 0, sizeof(tmp));
1290 +- tmp.st_dev = old_encode_dev(stat->dev);
1291 ++ tmp.st_dev = new_encode_dev(stat->dev);
1292 + tmp.st_ino = stat->ino;
1293 + if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
1294 + return -EOVERFLOW;
1295 +@@ -607,7 +608,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
1296 + return -EOVERFLOW;
1297 + SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
1298 + SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
1299 +- tmp.st_rdev = old_encode_dev(stat->rdev);
1300 ++ tmp.st_rdev = new_encode_dev(stat->rdev);
1301 + if ((u64) stat->size > MAX_NON_LFS)
1302 + return -EOVERFLOW;
1303 + tmp.st_size = stat->size;
1304 +diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
1305 +index f6564b572d779..0f1e95240c0c0 100644
1306 +--- a/include/linux/etherdevice.h
1307 ++++ b/include/linux/etherdevice.h
1308 +@@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
1309 + #endif
1310 + }
1311 +
1312 +-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
1313 ++static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
1314 + {
1315 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1316 + #ifdef __BIG_ENDIAN
1317 +@@ -341,8 +341,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
1318 + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
1319 + */
1320 +
1321 +-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
1322 +- const u8 addr2[6+2])
1323 ++static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
1324 + {
1325 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1326 + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
1327 +diff --git a/include/linux/sched.h b/include/linux/sched.h
1328 +index b341471de9d60..171cb7475b450 100644
1329 +--- a/include/linux/sched.h
1330 ++++ b/include/linux/sched.h
1331 +@@ -1247,6 +1247,7 @@ struct task_struct {
1332 + int pagefault_disabled;
1333 + #ifdef CONFIG_MMU
1334 + struct task_struct *oom_reaper_list;
1335 ++ struct timer_list oom_reaper_timer;
1336 + #endif
1337 + #ifdef CONFIG_VMAP_STACK
1338 + struct vm_struct *stack_vm_area;
1339 +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
1340 +index 59802eb8d2cc1..a1869a6789448 100644
1341 +--- a/include/net/inet_hashtables.h
1342 ++++ b/include/net/inet_hashtables.h
1343 +@@ -247,8 +247,9 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1344 + unsigned long high_limit);
1345 + int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
1346 +
1347 +-bool inet_ehash_insert(struct sock *sk, struct sock *osk);
1348 +-bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
1349 ++bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
1350 ++bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
1351 ++ bool *found_dup_sk);
1352 + int __inet_hash(struct sock *sk, struct sock *osk);
1353 + int inet_hash(struct sock *sk);
1354 + void inet_unhash(struct sock *sk);
1355 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
1356 +index 3105dbf6c0e96..82580f7ffad95 100644
1357 +--- a/kernel/trace/trace_events_trigger.c
1358 ++++ b/kernel/trace/trace_events_trigger.c
1359 +@@ -1219,7 +1219,14 @@ static void
1360 + stacktrace_trigger(struct event_trigger_data *data, void *rec,
1361 + struct ring_buffer_event *event)
1362 + {
1363 +- trace_dump_stack(STACK_SKIP);
1364 ++ struct trace_event_file *file = data->private_data;
1365 ++ unsigned long flags;
1366 ++
1367 ++ if (file) {
1368 ++ local_save_flags(flags);
1369 ++ __trace_stack(file->tr, flags, STACK_SKIP, preempt_count());
1370 ++ } else
1371 ++ trace_dump_stack(STACK_SKIP);
1372 + }
1373 +
1374 + static void
1375 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1376 +index dcbb9a28706fc..ee927ffeb718d 100644
1377 +--- a/mm/oom_kill.c
1378 ++++ b/mm/oom_kill.c
1379 +@@ -631,7 +631,7 @@ done:
1380 + */
1381 + set_bit(MMF_OOM_SKIP, &mm->flags);
1382 +
1383 +- /* Drop a reference taken by wake_oom_reaper */
1384 ++ /* Drop a reference taken by queue_oom_reaper */
1385 + put_task_struct(tsk);
1386 + }
1387 +
1388 +@@ -641,12 +641,12 @@ static int oom_reaper(void *unused)
1389 + struct task_struct *tsk = NULL;
1390 +
1391 + wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
1392 +- spin_lock(&oom_reaper_lock);
1393 ++ spin_lock_irq(&oom_reaper_lock);
1394 + if (oom_reaper_list != NULL) {
1395 + tsk = oom_reaper_list;
1396 + oom_reaper_list = tsk->oom_reaper_list;
1397 + }
1398 +- spin_unlock(&oom_reaper_lock);
1399 ++ spin_unlock_irq(&oom_reaper_lock);
1400 +
1401 + if (tsk)
1402 + oom_reap_task(tsk);
1403 +@@ -655,22 +655,48 @@ static int oom_reaper(void *unused)
1404 + return 0;
1405 + }
1406 +
1407 +-static void wake_oom_reaper(struct task_struct *tsk)
1408 ++static void wake_oom_reaper(struct timer_list *timer)
1409 + {
1410 +- /* mm is already queued? */
1411 +- if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
1412 +- return;
1413 ++ struct task_struct *tsk = container_of(timer, struct task_struct,
1414 ++ oom_reaper_timer);
1415 ++ struct mm_struct *mm = tsk->signal->oom_mm;
1416 ++ unsigned long flags;
1417 +
1418 +- get_task_struct(tsk);
1419 ++ /* The victim managed to terminate on its own - see exit_mmap */
1420 ++ if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
1421 ++ put_task_struct(tsk);
1422 ++ return;
1423 ++ }
1424 +
1425 +- spin_lock(&oom_reaper_lock);
1426 ++ spin_lock_irqsave(&oom_reaper_lock, flags);
1427 + tsk->oom_reaper_list = oom_reaper_list;
1428 + oom_reaper_list = tsk;
1429 +- spin_unlock(&oom_reaper_lock);
1430 ++ spin_unlock_irqrestore(&oom_reaper_lock, flags);
1431 + trace_wake_reaper(tsk->pid);
1432 + wake_up(&oom_reaper_wait);
1433 + }
1434 +
1435 ++/*
1436 ++ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
1437 ++ * The timers timeout is arbitrary... the longer it is, the longer the worst
1438 ++ * case scenario for the OOM can take. If it is too small, the oom_reaper can
1439 ++ * get in the way and release resources needed by the process exit path.
1440 ++ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
1441 ++ * before the exit path is able to wake the futex waiters.
1442 ++ */
1443 ++#define OOM_REAPER_DELAY (2*HZ)
1444 ++static void queue_oom_reaper(struct task_struct *tsk)
1445 ++{
1446 ++ /* mm is already queued? */
1447 ++ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
1448 ++ return;
1449 ++
1450 ++ get_task_struct(tsk);
1451 ++ timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
1452 ++ tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
1453 ++ add_timer(&tsk->oom_reaper_timer);
1454 ++}
1455 ++
1456 + static int __init oom_init(void)
1457 + {
1458 + oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
1459 +@@ -678,7 +704,7 @@ static int __init oom_init(void)
1460 + }
1461 + subsys_initcall(oom_init)
1462 + #else
1463 +-static inline void wake_oom_reaper(struct task_struct *tsk)
1464 ++static inline void queue_oom_reaper(struct task_struct *tsk)
1465 + {
1466 + }
1467 + #endif /* CONFIG_MMU */
1468 +@@ -927,7 +953,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
1469 + rcu_read_unlock();
1470 +
1471 + if (can_oom_reap)
1472 +- wake_oom_reaper(victim);
1473 ++ queue_oom_reaper(victim);
1474 +
1475 + mmdrop(mm);
1476 + put_task_struct(victim);
1477 +@@ -963,7 +989,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
1478 + task_lock(victim);
1479 + if (task_will_free_mem(victim)) {
1480 + mark_oom_victim(victim);
1481 +- wake_oom_reaper(victim);
1482 ++ queue_oom_reaper(victim);
1483 + task_unlock(victim);
1484 + put_task_struct(victim);
1485 + return;
1486 +@@ -1061,7 +1087,7 @@ bool out_of_memory(struct oom_control *oc)
1487 + */
1488 + if (task_will_free_mem(current)) {
1489 + mark_oom_victim(current);
1490 +- wake_oom_reaper(current);
1491 ++ queue_oom_reaper(current);
1492 + return true;
1493 + }
1494 +
1495 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1496 +index 7048ea59d58bd..f08ce248af2a9 100644
1497 +--- a/mm/page_alloc.c
1498 ++++ b/mm/page_alloc.c
1499 +@@ -7588,7 +7588,7 @@ void __init mem_init_print_info(const char *str)
1500 + */
1501 + #define adj_init_size(start, end, size, pos, adj) \
1502 + do { \
1503 +- if (start <= pos && pos < end && size > adj) \
1504 ++ if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
1505 + size -= adj; \
1506 + } while (0)
1507 +
1508 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
1509 +index d19557c6d04b5..7cf903f9e29a9 100644
1510 +--- a/net/dccp/ipv4.c
1511 ++++ b/net/dccp/ipv4.c
1512 +@@ -427,7 +427,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
1513 +
1514 + if (__inet_inherit_port(sk, newsk) < 0)
1515 + goto put_and_exit;
1516 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1517 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
1518 + if (*own_req)
1519 + ireq->ireq_opt = NULL;
1520 + else
1521 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1522 +index 9f73ccf46c9b1..7c24927e9c2c2 100644
1523 +--- a/net/dccp/ipv6.c
1524 ++++ b/net/dccp/ipv6.c
1525 +@@ -538,7 +538,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
1526 + dccp_done(newsk);
1527 + goto out;
1528 + }
1529 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1530 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
1531 + /* Clone pktoptions received with SYN, if we own the req */
1532 + if (*own_req && ireq->pktopts) {
1533 + newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
1534 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
1535 +index 85a88425edc48..6cbf0db57ad06 100644
1536 +--- a/net/ipv4/inet_connection_sock.c
1537 ++++ b/net/ipv4/inet_connection_sock.c
1538 +@@ -791,7 +791,7 @@ static void reqsk_queue_hash_req(struct request_sock *req,
1539 + timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1540 + mod_timer(&req->rsk_timer, jiffies + timeout);
1541 +
1542 +- inet_ehash_insert(req_to_sk(req), NULL);
1543 ++ inet_ehash_insert(req_to_sk(req), NULL, NULL);
1544 + /* before letting lookups find us, make sure all req fields
1545 + * are committed to memory and refcnt initialized.
1546 + */
1547 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
1548 +index 72fdf1fcbcaa9..cbbeb0eea0c35 100644
1549 +--- a/net/ipv4/inet_hashtables.c
1550 ++++ b/net/ipv4/inet_hashtables.c
1551 +@@ -20,6 +20,9 @@
1552 + #include <net/addrconf.h>
1553 + #include <net/inet_connection_sock.h>
1554 + #include <net/inet_hashtables.h>
1555 ++#if IS_ENABLED(CONFIG_IPV6)
1556 ++#include <net/inet6_hashtables.h>
1557 ++#endif
1558 + #include <net/secure_seq.h>
1559 + #include <net/ip.h>
1560 + #include <net/tcp.h>
1561 +@@ -470,10 +473,52 @@ static u32 inet_sk_port_offset(const struct sock *sk)
1562 + inet->inet_dport);
1563 + }
1564 +
1565 +-/* insert a socket into ehash, and eventually remove another one
1566 +- * (The another one can be a SYN_RECV or TIMEWAIT
1567 ++/* Searches for an exsiting socket in the ehash bucket list.
1568 ++ * Returns true if found, false otherwise.
1569 + */
1570 +-bool inet_ehash_insert(struct sock *sk, struct sock *osk)
1571 ++static bool inet_ehash_lookup_by_sk(struct sock *sk,
1572 ++ struct hlist_nulls_head *list)
1573 ++{
1574 ++ const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
1575 ++ const int sdif = sk->sk_bound_dev_if;
1576 ++ const int dif = sk->sk_bound_dev_if;
1577 ++ const struct hlist_nulls_node *node;
1578 ++ struct net *net = sock_net(sk);
1579 ++ struct sock *esk;
1580 ++
1581 ++ INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
1582 ++
1583 ++ sk_nulls_for_each_rcu(esk, node, list) {
1584 ++ if (esk->sk_hash != sk->sk_hash)
1585 ++ continue;
1586 ++ if (sk->sk_family == AF_INET) {
1587 ++ if (unlikely(INET_MATCH(esk, net, acookie,
1588 ++ sk->sk_daddr,
1589 ++ sk->sk_rcv_saddr,
1590 ++ ports, dif, sdif))) {
1591 ++ return true;
1592 ++ }
1593 ++ }
1594 ++#if IS_ENABLED(CONFIG_IPV6)
1595 ++ else if (sk->sk_family == AF_INET6) {
1596 ++ if (unlikely(INET6_MATCH(esk, net,
1597 ++ &sk->sk_v6_daddr,
1598 ++ &sk->sk_v6_rcv_saddr,
1599 ++ ports, dif, sdif))) {
1600 ++ return true;
1601 ++ }
1602 ++ }
1603 ++#endif
1604 ++ }
1605 ++ return false;
1606 ++}
1607 ++
1608 ++/* Insert a socket into ehash, and eventually remove another one
1609 ++ * (The another one can be a SYN_RECV or TIMEWAIT)
1610 ++ * If an existing socket already exists, socket sk is not inserted,
1611 ++ * and sets found_dup_sk parameter to true.
1612 ++ */
1613 ++bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
1614 + {
1615 + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
1616 + struct hlist_nulls_head *list;
1617 +@@ -492,16 +537,23 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk)
1618 + if (osk) {
1619 + WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
1620 + ret = sk_nulls_del_node_init_rcu(osk);
1621 ++ } else if (found_dup_sk) {
1622 ++ *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
1623 ++ if (*found_dup_sk)
1624 ++ ret = false;
1625 + }
1626 ++
1627 + if (ret)
1628 + __sk_nulls_add_node_rcu(sk, list);
1629 ++
1630 + spin_unlock(lock);
1631 ++
1632 + return ret;
1633 + }
1634 +
1635 +-bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
1636 ++bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
1637 + {
1638 +- bool ok = inet_ehash_insert(sk, osk);
1639 ++ bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
1640 +
1641 + if (ok) {
1642 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
1643 +@@ -545,7 +597,7 @@ int __inet_hash(struct sock *sk, struct sock *osk)
1644 + int err = 0;
1645 +
1646 + if (sk->sk_state != TCP_LISTEN) {
1647 +- inet_ehash_nolisten(sk, osk);
1648 ++ inet_ehash_nolisten(sk, osk, NULL);
1649 + return 0;
1650 + }
1651 + WARN_ON(!sk_unhashed(sk));
1652 +@@ -641,7 +693,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1653 + tb = inet_csk(sk)->icsk_bind_hash;
1654 + spin_lock_bh(&head->lock);
1655 + if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
1656 +- inet_ehash_nolisten(sk, NULL);
1657 ++ inet_ehash_nolisten(sk, NULL, NULL);
1658 + spin_unlock_bh(&head->lock);
1659 + return 0;
1660 + }
1661 +@@ -720,7 +772,7 @@ ok:
1662 + inet_bind_hash(sk, tb, port);
1663 + if (sk_unhashed(sk)) {
1664 + inet_sk(sk)->inet_sport = htons(port);
1665 +- inet_ehash_nolisten(sk, (struct sock *)tw);
1666 ++ inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1667 + }
1668 + if (tw)
1669 + inet_twsk_bind_unhash(tw, hinfo);
1670 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1671 +index 2ce85e52aea7c..426d70d45eda4 100644
1672 +--- a/net/ipv4/tcp_ipv4.c
1673 ++++ b/net/ipv4/tcp_ipv4.c
1674 +@@ -1426,6 +1426,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1675 + bool *own_req)
1676 + {
1677 + struct inet_request_sock *ireq;
1678 ++ bool found_dup_sk = false;
1679 + struct inet_sock *newinet;
1680 + struct tcp_sock *newtp;
1681 + struct sock *newsk;
1682 +@@ -1496,12 +1497,22 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1683 +
1684 + if (__inet_inherit_port(sk, newsk) < 0)
1685 + goto put_and_exit;
1686 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1687 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1688 ++ &found_dup_sk);
1689 + if (likely(*own_req)) {
1690 + tcp_move_syn(newtp, req);
1691 + ireq->ireq_opt = NULL;
1692 + } else {
1693 + newinet->inet_opt = NULL;
1694 ++
1695 ++ if (!req_unhash && found_dup_sk) {
1696 ++ /* This code path should only be executed in the
1697 ++ * syncookie case only
1698 ++ */
1699 ++ bh_unlock_sock(newsk);
1700 ++ sock_put(newsk);
1701 ++ newsk = NULL;
1702 ++ }
1703 + }
1704 + return newsk;
1705 +
1706 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1707 +index 3903cc0ab1883..51c900e9bfe20 100644
1708 +--- a/net/ipv6/tcp_ipv6.c
1709 ++++ b/net/ipv6/tcp_ipv6.c
1710 +@@ -1142,6 +1142,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1711 + const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1712 + struct ipv6_txoptions *opt;
1713 + struct inet_sock *newinet;
1714 ++ bool found_dup_sk = false;
1715 + struct tcp_sock *newtp;
1716 + struct sock *newsk;
1717 + #ifdef CONFIG_TCP_MD5SIG
1718 +@@ -1308,7 +1309,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1719 + tcp_done(newsk);
1720 + goto out;
1721 + }
1722 +- *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1723 ++ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1724 ++ &found_dup_sk);
1725 + if (*own_req) {
1726 + tcp_move_syn(newtp, req);
1727 +
1728 +@@ -1323,6 +1325,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1729 + skb_set_owner_r(newnp->pktoptions, newsk);
1730 + }
1731 + }
1732 ++ } else {
1733 ++ if (!req_unhash && found_dup_sk) {
1734 ++ /* This code path should only be executed in the
1735 ++ * syncookie case only
1736 ++ */
1737 ++ bh_unlock_sock(newsk);
1738 ++ sock_put(newsk);
1739 ++ newsk = NULL;
1740 ++ }
1741 + }
1742 +
1743 + return newsk;
1744 +diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
1745 +index f35899d45a9af..ff4352f6d168a 100644
1746 +--- a/net/l3mdev/l3mdev.c
1747 ++++ b/net/l3mdev/l3mdev.c
1748 +@@ -54,7 +54,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
1749 +
1750 + dev = dev_get_by_index_rcu(net, ifindex);
1751 + while (dev && !netif_is_l3_master(dev))
1752 +- dev = netdev_master_upper_dev_get(dev);
1753 ++ dev = netdev_master_upper_dev_get_rcu(dev);
1754 +
1755 + return dev ? dev->ifindex : 0;
1756 + }
1757 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1758 +index fb28969899af0..8aefc52542a00 100644
1759 +--- a/net/netlink/af_netlink.c
1760 ++++ b/net/netlink/af_netlink.c
1761 +@@ -2253,6 +2253,13 @@ static int netlink_dump(struct sock *sk)
1762 + * single netdev. The outcome is MSG_TRUNC error.
1763 + */
1764 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
1765 ++
1766 ++ /* Make sure malicious BPF programs can not read unitialized memory
1767 ++ * from skb->head -> skb->data
1768 ++ */
1769 ++ skb_reset_network_header(skb);
1770 ++ skb_reset_mac_header(skb);
1771 ++
1772 + netlink_skb_set_owner_r(skb, sk);
1773 +
1774 + if (nlk->dump_done_errno > 0) {
1775 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
1776 +index d3f068ad154cb..8461de79f67b4 100644
1777 +--- a/net/openvswitch/flow_netlink.c
1778 ++++ b/net/openvswitch/flow_netlink.c
1779 +@@ -2329,7 +2329,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1780 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
1781 +
1782 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1783 +- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
1784 ++ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
1785 + OVS_NLERR(log, "Flow action size exceeds max %u",
1786 + MAX_ACTIONS_BUFSIZE);
1787 + return ERR_PTR(-EMSGSIZE);
1788 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1789 +index 70c102359bfef..a2696acbcd9d2 100644
1790 +--- a/net/packet/af_packet.c
1791 ++++ b/net/packet/af_packet.c
1792 +@@ -2791,8 +2791,9 @@ tpacket_error:
1793 +
1794 + status = TP_STATUS_SEND_REQUEST;
1795 + err = po->xmit(skb);
1796 +- if (unlikely(err > 0)) {
1797 +- err = net_xmit_errno(err);
1798 ++ if (unlikely(err != 0)) {
1799 ++ if (err > 0)
1800 ++ err = net_xmit_errno(err);
1801 + if (err && __packet_get_status(po, ph) ==
1802 + TP_STATUS_AVAILABLE) {
1803 + /* skb was destructed already */
1804 +@@ -2993,8 +2994,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1805 + skb->no_fcs = 1;
1806 +
1807 + err = po->xmit(skb);
1808 +- if (err > 0 && (err = net_xmit_errno(err)) != 0)
1809 +- goto out_unlock;
1810 ++ if (unlikely(err != 0)) {
1811 ++ if (err > 0)
1812 ++ err = net_xmit_errno(err);
1813 ++ if (err)
1814 ++ goto out_unlock;
1815 ++ }
1816 +
1817 + dev_put(dev);
1818 +
1819 +diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
1820 +index 9a76b74af37bc..91a503871116b 100644
1821 +--- a/net/rxrpc/net_ns.c
1822 ++++ b/net/rxrpc/net_ns.c
1823 +@@ -116,7 +116,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
1824 + struct rxrpc_net *rxnet = rxrpc_net(net);
1825 +
1826 + rxnet->live = false;
1827 ++ del_timer_sync(&rxnet->peer_keepalive_timer);
1828 + cancel_work_sync(&rxnet->peer_keepalive_work);
1829 ++ /* Remove the timer again as the worker may have restarted it. */
1830 + del_timer_sync(&rxnet->peer_keepalive_timer);
1831 + rxrpc_destroy_all_calls(rxnet);
1832 + rxrpc_destroy_all_connections(rxnet);
1833 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1834 +index e15ff335953de..ed8d26e6468ca 100644
1835 +--- a/net/sched/cls_u32.c
1836 ++++ b/net/sched/cls_u32.c
1837 +@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
1838 + return 0;
1839 + }
1840 +
1841 +-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
1842 ++static void __u32_destroy_key(struct tc_u_knode *n)
1843 + {
1844 + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1845 +
1846 + tcf_exts_destroy(&n->exts);
1847 +- tcf_exts_put_net(&n->exts);
1848 + if (ht && --ht->refcnt == 0)
1849 + kfree(ht);
1850 ++ kfree(n);
1851 ++}
1852 ++
1853 ++static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
1854 ++{
1855 ++ tcf_exts_put_net(&n->exts);
1856 + #ifdef CONFIG_CLS_U32_PERF
1857 + if (free_pf)
1858 + free_percpu(n->pf);
1859 +@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
1860 + if (free_pf)
1861 + free_percpu(n->pcpu_success);
1862 + #endif
1863 +- kfree(n);
1864 +- return 0;
1865 ++ __u32_destroy_key(n);
1866 + }
1867 +
1868 + /* u32_delete_key_rcu should be called when free'ing a copied
1869 +@@ -812,10 +816,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
1870 + new->flags = n->flags;
1871 + RCU_INIT_POINTER(new->ht_down, ht);
1872 +
1873 +- /* bump reference count as long as we hold pointer to structure */
1874 +- if (ht)
1875 +- ht->refcnt++;
1876 +-
1877 + #ifdef CONFIG_CLS_U32_PERF
1878 + /* Statistics may be incremented by readers during update
1879 + * so we must keep them in tact. When the node is later destroyed
1880 +@@ -837,6 +837,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
1881 + return NULL;
1882 + }
1883 +
1884 ++ /* bump reference count as long as we hold pointer to structure */
1885 ++ if (ht)
1886 ++ ht->refcnt++;
1887 ++
1888 + return new;
1889 + }
1890 +
1891 +@@ -903,13 +907,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1892 + tca[TCA_RATE], ovr, extack);
1893 +
1894 + if (err) {
1895 +- u32_destroy_key(new, false);
1896 ++ __u32_destroy_key(new);
1897 + return err;
1898 + }
1899 +
1900 + err = u32_replace_hw_knode(tp, new, flags, extack);
1901 + if (err) {
1902 +- u32_destroy_key(new, false);
1903 ++ __u32_destroy_key(new);
1904 + return err;
1905 + }
1906 +
1907 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
1908 +index 06684ac346abd..5221092cc66d4 100644
1909 +--- a/net/smc/af_smc.c
1910 ++++ b/net/smc/af_smc.c
1911 +@@ -1698,8 +1698,10 @@ static int smc_shutdown(struct socket *sock, int how)
1912 + if (smc->use_fallback) {
1913 + rc = kernel_sock_shutdown(smc->clcsock, how);
1914 + sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1915 +- if (sk->sk_shutdown == SHUTDOWN_MASK)
1916 ++ if (sk->sk_shutdown == SHUTDOWN_MASK) {
1917 + sk->sk_state = SMC_CLOSED;
1918 ++ sock_put(sk);
1919 ++ }
1920 + goto out;
1921 + }
1922 + switch (how) {
1923 +diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
1924 +index 05277a88e20d8..d1579896f3a11 100644
1925 +--- a/sound/soc/atmel/sam9g20_wm8731.c
1926 ++++ b/sound/soc/atmel/sam9g20_wm8731.c
1927 +@@ -46,35 +46,6 @@
1928 + */
1929 + #undef ENABLE_MIC_INPUT
1930 +
1931 +-static struct clk *mclk;
1932 +-
1933 +-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
1934 +- struct snd_soc_dapm_context *dapm,
1935 +- enum snd_soc_bias_level level)
1936 +-{
1937 +- static int mclk_on;
1938 +- int ret = 0;
1939 +-
1940 +- switch (level) {
1941 +- case SND_SOC_BIAS_ON:
1942 +- case SND_SOC_BIAS_PREPARE:
1943 +- if (!mclk_on)
1944 +- ret = clk_enable(mclk);
1945 +- if (ret == 0)
1946 +- mclk_on = 1;
1947 +- break;
1948 +-
1949 +- case SND_SOC_BIAS_OFF:
1950 +- case SND_SOC_BIAS_STANDBY:
1951 +- if (mclk_on)
1952 +- clk_disable(mclk);
1953 +- mclk_on = 0;
1954 +- break;
1955 +- }
1956 +-
1957 +- return ret;
1958 +-}
1959 +-
1960 + static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
1961 + SND_SOC_DAPM_MIC("Int Mic", NULL),
1962 + SND_SOC_DAPM_SPK("Ext Spk", NULL),
1963 +@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
1964 + .owner = THIS_MODULE,
1965 + .dai_link = &at91sam9g20ek_dai,
1966 + .num_links = 1,
1967 +- .set_bias_level = at91sam9g20ek_set_bias_level,
1968 +
1969 + .dapm_widgets = at91sam9g20ek_dapm_widgets,
1970 + .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
1971 +@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1972 + {
1973 + struct device_node *np = pdev->dev.of_node;
1974 + struct device_node *codec_np, *cpu_np;
1975 +- struct clk *pllb;
1976 + struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
1977 + int ret;
1978 +
1979 +@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1980 + return -EINVAL;
1981 + }
1982 +
1983 +- /*
1984 +- * Codec MCLK is supplied by PCK0 - set it up.
1985 +- */
1986 +- mclk = clk_get(NULL, "pck0");
1987 +- if (IS_ERR(mclk)) {
1988 +- dev_err(&pdev->dev, "Failed to get MCLK\n");
1989 +- ret = PTR_ERR(mclk);
1990 +- goto err;
1991 +- }
1992 +-
1993 +- pllb = clk_get(NULL, "pllb");
1994 +- if (IS_ERR(pllb)) {
1995 +- dev_err(&pdev->dev, "Failed to get PLLB\n");
1996 +- ret = PTR_ERR(pllb);
1997 +- goto err_mclk;
1998 +- }
1999 +- ret = clk_set_parent(mclk, pllb);
2000 +- clk_put(pllb);
2001 +- if (ret != 0) {
2002 +- dev_err(&pdev->dev, "Failed to set MCLK parent\n");
2003 +- goto err_mclk;
2004 +- }
2005 +-
2006 +- clk_set_rate(mclk, MCLK_RATE);
2007 +-
2008 + card->dev = &pdev->dev;
2009 +
2010 + /* Parse device node info */
2011 +@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
2012 +
2013 + return ret;
2014 +
2015 +-err_mclk:
2016 +- clk_put(mclk);
2017 +- mclk = NULL;
2018 + err:
2019 + atmel_ssc_put_audio(0);
2020 + return ret;
2021 +@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
2022 + {
2023 + struct snd_soc_card *card = platform_get_drvdata(pdev);
2024 +
2025 +- clk_disable(mclk);
2026 +- mclk = NULL;
2027 + snd_soc_unregister_card(card);
2028 + atmel_ssc_put_audio(0);
2029 +
2030 +diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
2031 +index d5269ab5f91c5..e4cde214b7b2d 100644
2032 +--- a/sound/soc/codecs/msm8916-wcd-digital.c
2033 ++++ b/sound/soc/codecs/msm8916-wcd-digital.c
2034 +@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
2035 +
2036 + dev_set_drvdata(dev, priv);
2037 +
2038 +- return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
2039 ++ ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
2040 + msm8916_wcd_digital_dai,
2041 + ARRAY_SIZE(msm8916_wcd_digital_dai));
2042 ++ if (ret)
2043 ++ goto err_mclk;
2044 ++
2045 ++ return 0;
2046 ++
2047 ++err_mclk:
2048 ++ clk_disable_unprepare(priv->mclk);
2049 + err_clk:
2050 + clk_disable_unprepare(priv->ahbclk);
2051 + return ret;
2052 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2053 +index 5876be5dd9bae..1c09dfb0c0f09 100644
2054 +--- a/sound/soc/soc-dapm.c
2055 ++++ b/sound/soc/soc-dapm.c
2056 +@@ -1676,8 +1676,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
2057 + switch (w->id) {
2058 + case snd_soc_dapm_pre:
2059 + if (!w->event)
2060 +- list_for_each_entry_safe_continue(w, n, list,
2061 +- power_list);
2062 ++ continue;
2063 +
2064 + if (event == SND_SOC_DAPM_STREAM_START)
2065 + ret = w->event(w,
2066 +@@ -1689,8 +1688,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
2067 +
2068 + case snd_soc_dapm_post:
2069 + if (!w->event)
2070 +- list_for_each_entry_safe_continue(w, n, list,
2071 +- power_list);
2072 ++ continue;
2073 +
2074 + if (event == SND_SOC_DAPM_STREAM_START)
2075 + ret = w->event(w,
2076 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
2077 +index 33e9a7f6246f7..ce501200e592f 100644
2078 +--- a/sound/usb/midi.c
2079 ++++ b/sound/usb/midi.c
2080 +@@ -1210,6 +1210,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
2081 + } while (drain_urbs && timeout);
2082 + finish_wait(&ep->drain_wait, &wait);
2083 + }
2084 ++ port->active = 0;
2085 + spin_unlock_irq(&ep->buffer_lock);
2086 + }
2087 +
2088 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
2089 +index ff97fdcf63bd5..b1959e04cbb14 100644
2090 +--- a/sound/usb/usbaudio.h
2091 ++++ b/sound/usb/usbaudio.h
2092 +@@ -8,7 +8,7 @@
2093 + */
2094 +
2095 + /* handling of USB vendor/product ID pairs as 32-bit numbers */
2096 +-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
2097 ++#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
2098 + #define USB_ID_VENDOR(id) ((id) >> 16)
2099 + #define USB_ID_PRODUCT(id) ((u16)(id))
2100 +
2101 +diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2102 +index fedcb7b35af9f..af5ea50ed5c0e 100755
2103 +--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2104 ++++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2105 +@@ -172,6 +172,17 @@ flooding_filters_add()
2106 + local lsb
2107 + local i
2108 +
2109 ++ # Prevent unwanted packets from entering the bridge and interfering
2110 ++ # with the test.
2111 ++ tc qdisc add dev br0 clsact
2112 ++ tc filter add dev br0 egress protocol all pref 1 handle 1 \
2113 ++ matchall skip_hw action drop
2114 ++ tc qdisc add dev $h1 clsact
2115 ++ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
2116 ++ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
2117 ++ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
2118 ++ matchall skip_hw action drop
2119 ++
2120 + tc qdisc add dev $rp2 clsact
2121 +
2122 + for i in $(eval echo {1..$num_remotes}); do
2123 +@@ -194,6 +205,12 @@ flooding_filters_del()
2124 + done
2125 +
2126 + tc qdisc del dev $rp2 clsact
2127 ++
2128 ++ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
2129 ++ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
2130 ++ tc qdisc del dev $h1 clsact
2131 ++ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
2132 ++ tc qdisc del dev br0 clsact
2133 + }
2134 +
2135 + flooding_check_packets()