Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Thu, 02 May 2019 10:12:54
Message-Id: 1556791950.665ebba14c8b3d369b4d6e59828e8e33697c4879.mpagano@gentoo
1 commit: 665ebba14c8b3d369b4d6e59828e8e33697c4879
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 2 10:12:30 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 2 10:12:30 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=665ebba1
7
8 Linux patch 5.0.11
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1010_linux-5.0.11.patch | 3504 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3508 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 49a76eb..4dfa486 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -83,6 +83,10 @@ Patch: 1009_linux-5.0.10.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.10
23
24 +Patch: 1010_linux-5.0.11.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.11
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1010_linux-5.0.11.patch b/1010_linux-5.0.11.patch
33 new file mode 100644
34 index 0000000..a5f9df8
35 --- /dev/null
36 +++ b/1010_linux-5.0.11.patch
37 @@ -0,0 +1,3504 @@
38 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
39 +index acdfb5d2bcaa..e2142fe40cda 100644
40 +--- a/Documentation/networking/ip-sysctl.txt
41 ++++ b/Documentation/networking/ip-sysctl.txt
42 +@@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER
43 + minimum RTT when it is moved to a longer path (e.g., due to traffic
44 + engineering). A longer window makes the filter more resistant to RTT
45 + inflations such as transient congestion. The unit is seconds.
46 ++ Possible values: 0 - 86400 (1 day)
47 + Default: 300
48 +
49 + tcp_moderate_rcvbuf - BOOLEAN
50 +diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
51 +index 187ce4f599a2..e4dfaf0d6e87 100644
52 +--- a/Documentation/sysctl/vm.txt
53 ++++ b/Documentation/sysctl/vm.txt
54 +@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
55 + increase the success rate of future high-order allocations such as SLUB
56 + allocations, THP and hugetlbfs pages.
57 +
58 +-To make it sensible with respect to the watermark_scale_factor parameter,
59 +-the unit is in fractions of 10,000. The default value of 15,000 means
60 +-that up to 150% of the high watermark will be reclaimed in the event of
61 +-a pageblock being mixed due to fragmentation. The level of reclaim is
62 +-determined by the number of fragmentation events that occurred in the
63 +-recent past. If this value is smaller than a pageblock then a pageblocks
64 +-worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
65 +-of 0 will disable the feature.
66 ++To make it sensible with respect to the watermark_scale_factor
67 ++parameter, the unit is in fractions of 10,000. The default value of
68 ++15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
69 ++watermark will be reclaimed in the event of a pageblock being mixed due
70 ++to fragmentation. The level of reclaim is determined by the number of
71 ++fragmentation events that occurred in the recent past. If this value is
72 ++smaller than a pageblock then a pageblocks worth of pages will be reclaimed
73 ++(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
74 +
75 + =============================================================
76 +
77 +diff --git a/Makefile b/Makefile
78 +index b282c4143b21..c3daaefa979c 100644
79 +--- a/Makefile
80 ++++ b/Makefile
81 +@@ -1,7 +1,7 @@
82 + # SPDX-License-Identifier: GPL-2.0
83 + VERSION = 5
84 + PATCHLEVEL = 0
85 +-SUBLEVEL = 10
86 ++SUBLEVEL = 11
87 + EXTRAVERSION =
88 + NAME = Shy Crocodile
89 +
90 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
91 +index 6c7ccb428c07..7135820f76d4 100644
92 +--- a/arch/arm/boot/compressed/head.S
93 ++++ b/arch/arm/boot/compressed/head.S
94 +@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
95 +
96 + @ Preserve return value of efi_entry() in r4
97 + mov r4, r0
98 +- bl cache_clean_flush
99 ++
100 ++ @ our cache maintenance code relies on CP15 barrier instructions
101 ++ @ but since we arrived here with the MMU and caches configured
102 ++ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
103 ++ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
104 ++ @ the enable path will be executed on v7+ only.
105 ++ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
106 ++ tst r1, #(1 << 5) @ CP15BEN bit set?
107 ++ bne 0f
108 ++ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
109 ++ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
110 ++ ARM( .inst 0xf57ff06f @ v7+ isb )
111 ++ THUMB( isb )
112 ++
113 ++0: bl cache_clean_flush
114 + bl cache_off
115 +
116 + @ Set parameters for booting zImage according to boot protocol
117 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
118 +index 7205a9085b4d..c9411774555d 100644
119 +--- a/arch/arm64/mm/init.c
120 ++++ b/arch/arm64/mm/init.c
121 +@@ -406,7 +406,7 @@ void __init arm64_memblock_init(void)
122 + * Otherwise, this is a no-op
123 + */
124 + u64 base = phys_initrd_start & PAGE_MASK;
125 +- u64 size = PAGE_ALIGN(phys_initrd_size);
126 ++ u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
127 +
128 + /*
129 + * We can only add back the initrd memory if we don't end up
130 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
131 +index f158c5894a9a..feb2653490df 100644
132 +--- a/arch/mips/kernel/scall64-o32.S
133 ++++ b/arch/mips/kernel/scall64-o32.S
134 +@@ -125,7 +125,7 @@ trace_a_syscall:
135 + subu t1, v0, __NR_O32_Linux
136 + move a1, v0
137 + bnez t1, 1f /* __NR_syscall at offset 0 */
138 +- lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
139 ++ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
140 + .set pop
141 +
142 + 1: jal syscall_trace_enter
143 +diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
144 +index cfdd08897a06..e2b0c5f15c7b 100644
145 +--- a/arch/powerpc/configs/skiroot_defconfig
146 ++++ b/arch/powerpc/configs/skiroot_defconfig
147 +@@ -260,6 +260,7 @@ CONFIG_UDF_FS=m
148 + CONFIG_MSDOS_FS=m
149 + CONFIG_VFAT_FS=m
150 + CONFIG_PROC_KCORE=y
151 ++CONFIG_HUGETLBFS=y
152 + # CONFIG_MISC_FILESYSTEMS is not set
153 + # CONFIG_NETWORK_FILESYSTEMS is not set
154 + CONFIG_NLS=y
155 +diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
156 +index 1e0bc5955a40..afd516b572f8 100644
157 +--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
158 ++++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
159 +@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
160 + * can be used, r7 contains NSEC_PER_SEC.
161 + */
162 +
163 +- lwz r5,WTOM_CLOCK_SEC(r9)
164 ++ lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
165 + lwz r6,WTOM_CLOCK_NSEC(r9)
166 +
167 + /* We now have our offset in r5,r6. We create a fake dependency
168 +diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
169 +index 8c7464c3f27f..2782188a5ba1 100644
170 +--- a/arch/powerpc/platforms/Kconfig.cputype
171 ++++ b/arch/powerpc/platforms/Kconfig.cputype
172 +@@ -318,7 +318,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
173 +
174 + config PPC_RADIX_MMU
175 + bool "Radix MMU Support"
176 +- depends on PPC_BOOK3S_64
177 ++ depends on PPC_BOOK3S_64 && HUGETLB_PAGE
178 + select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
179 + default y
180 + help
181 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
182 +index 9c5a67d1b9c1..c0c7291d4ccf 100644
183 +--- a/arch/x86/Makefile
184 ++++ b/arch/x86/Makefile
185 +@@ -217,6 +217,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
186 + # Avoid indirect branches in kernel to deal with Spectre
187 + ifdef CONFIG_RETPOLINE
188 + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
189 ++ # Additionally, avoid generating expensive indirect jumps which
190 ++ # are subject to retpolines for small number of switch cases.
191 ++ # clang turns off jump table generation by default when under
192 ++ # retpoline builds, however, gcc does not for x86. This has
193 ++ # only been fixed starting from gcc stable version 8.4.0 and
194 ++ # onwards, but not for older ones. See gcc bug #86952.
195 ++ ifndef CONFIG_CC_IS_CLANG
196 ++ KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
197 ++ endif
198 + endif
199 +
200 + archscripts: scripts_basic
201 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
202 +index d2e780705c5a..56194c571299 100644
203 +--- a/arch/x86/events/intel/cstate.c
204 ++++ b/arch/x86/events/intel/cstate.c
205 +@@ -76,15 +76,15 @@
206 + * Scope: Package (physical package)
207 + * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
208 + * perf code: 0x04
209 +- * Available model: HSW ULT,CNL
210 ++ * Available model: HSW ULT,KBL,CNL
211 + * Scope: Package (physical package)
212 + * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
213 + * perf code: 0x05
214 +- * Available model: HSW ULT,CNL
215 ++ * Available model: HSW ULT,KBL,CNL
216 + * Scope: Package (physical package)
217 + * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
218 + * perf code: 0x06
219 +- * Available model: HSW ULT,GLM,CNL
220 ++ * Available model: HSW ULT,KBL,GLM,CNL
221 + * Scope: Package (physical package)
222 + *
223 + */
224 +@@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
225 + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
226 + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
227 +
228 +- X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
229 +- X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
230 ++ X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
231 ++ X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
232 +
233 + X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
234 +
235 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
236 +index e5ed28629271..72510c470001 100644
237 +--- a/block/bfq-iosched.c
238 ++++ b/block/bfq-iosched.c
239 +@@ -2804,7 +2804,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
240 + bfq_remove_request(q, rq);
241 + }
242 +
243 +-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
244 ++static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
245 + {
246 + /*
247 + * If this bfqq is shared between multiple processes, check
248 +@@ -2837,9 +2837,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
249 + /*
250 + * All in-service entities must have been properly deactivated
251 + * or requeued before executing the next function, which
252 +- * resets all in-service entites as no more in service.
253 ++ * resets all in-service entities as no more in service. This
254 ++ * may cause bfqq to be freed. If this happens, the next
255 ++ * function returns true.
256 + */
257 +- __bfq_bfqd_reset_in_service(bfqd);
258 ++ return __bfq_bfqd_reset_in_service(bfqd);
259 + }
260 +
261 + /**
262 +@@ -3244,7 +3246,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
263 + bool slow;
264 + unsigned long delta = 0;
265 + struct bfq_entity *entity = &bfqq->entity;
266 +- int ref;
267 +
268 + /*
269 + * Check whether the process is slow (see bfq_bfqq_is_slow).
270 +@@ -3313,10 +3314,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
271 + * reason.
272 + */
273 + __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
274 +- ref = bfqq->ref;
275 +- __bfq_bfqq_expire(bfqd, bfqq);
276 +-
277 +- if (ref == 1) /* bfqq is gone, no more actions on it */
278 ++ if (__bfq_bfqq_expire(bfqd, bfqq))
279 ++ /* bfqq is gone, no more actions on it */
280 + return;
281 +
282 + bfqq->injected_service = 0;
283 +diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
284 +index 746bd570b85a..ca98c98a8179 100644
285 +--- a/block/bfq-iosched.h
286 ++++ b/block/bfq-iosched.h
287 +@@ -993,7 +993,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
288 + bool ins_into_idle_tree);
289 + bool next_queue_may_preempt(struct bfq_data *bfqd);
290 + struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
291 +-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
292 ++bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
293 + void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
294 + bool ins_into_idle_tree, bool expiration);
295 + void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
296 +diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
297 +index 4aab1a8191f0..8077bf71d2ac 100644
298 +--- a/block/bfq-wf2q.c
299 ++++ b/block/bfq-wf2q.c
300 +@@ -1599,7 +1599,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
301 + return bfqq;
302 + }
303 +
304 +-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
305 ++/* returns true if the in-service queue gets freed */
306 ++bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
307 + {
308 + struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
309 + struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
310 +@@ -1623,8 +1624,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
311 + * service tree either, then release the service reference to
312 + * the queue it represents (taken with bfq_get_entity).
313 + */
314 +- if (!in_serv_entity->on_st)
315 ++ if (!in_serv_entity->on_st) {
316 ++ /*
317 ++ * If no process is referencing in_serv_bfqq any
318 ++ * longer, then the service reference may be the only
319 ++ * reference to the queue. If this is the case, then
320 ++ * bfqq gets freed here.
321 ++ */
322 ++ int ref = in_serv_bfqq->ref;
323 + bfq_put_queue(in_serv_bfqq);
324 ++ if (ref == 1)
325 ++ return true;
326 ++ }
327 ++
328 ++ return false;
329 + }
330 +
331 + void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
332 +diff --git a/crypto/lrw.c b/crypto/lrw.c
333 +index 0430ccd08728..08a0e458bc3e 100644
334 +--- a/crypto/lrw.c
335 ++++ b/crypto/lrw.c
336 +@@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
337 + {
338 + struct skcipher_request *req = areq->data;
339 +
340 +- if (!err)
341 ++ if (!err) {
342 ++ struct rctx *rctx = skcipher_request_ctx(req);
343 ++
344 ++ rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
345 + err = xor_tweak_post(req);
346 ++ }
347 +
348 + skcipher_request_complete(req, err);
349 + }
350 +diff --git a/crypto/xts.c b/crypto/xts.c
351 +index 847f54f76789..2f948328cabb 100644
352 +--- a/crypto/xts.c
353 ++++ b/crypto/xts.c
354 +@@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
355 + {
356 + struct skcipher_request *req = areq->data;
357 +
358 +- if (!err)
359 ++ if (!err) {
360 ++ struct rctx *rctx = skcipher_request_ctx(req);
361 ++
362 ++ rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
363 + err = xor_tweak_post(req);
364 ++ }
365 +
366 + skcipher_request_complete(req, err);
367 + }
368 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
369 +index 022cd80e80cc..a6e556bf62df 100644
370 +--- a/drivers/android/binder_alloc.c
371 ++++ b/drivers/android/binder_alloc.c
372 +@@ -959,14 +959,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
373 +
374 + index = page - alloc->pages;
375 + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
376 ++
377 ++ mm = alloc->vma_vm_mm;
378 ++ if (!mmget_not_zero(mm))
379 ++ goto err_mmget;
380 ++ if (!down_write_trylock(&mm->mmap_sem))
381 ++ goto err_down_write_mmap_sem_failed;
382 + vma = binder_alloc_get_vma(alloc);
383 +- if (vma) {
384 +- if (!mmget_not_zero(alloc->vma_vm_mm))
385 +- goto err_mmget;
386 +- mm = alloc->vma_vm_mm;
387 +- if (!down_write_trylock(&mm->mmap_sem))
388 +- goto err_down_write_mmap_sem_failed;
389 +- }
390 +
391 + list_lru_isolate(lru, item);
392 + spin_unlock(lock);
393 +@@ -979,10 +978,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
394 + PAGE_SIZE);
395 +
396 + trace_binder_unmap_user_end(alloc, index);
397 +-
398 +- up_write(&mm->mmap_sem);
399 +- mmput(mm);
400 + }
401 ++ up_write(&mm->mmap_sem);
402 ++ mmput(mm);
403 +
404 + trace_binder_unmap_kernel_start(alloc, index);
405 +
406 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
407 +index 9a8d83bc1e75..fc7aefd42ae0 100644
408 +--- a/drivers/block/loop.c
409 ++++ b/drivers/block/loop.c
410 +@@ -1111,8 +1111,9 @@ out_unlock:
411 + err = __blkdev_reread_part(bdev);
412 + else
413 + err = blkdev_reread_part(bdev);
414 +- pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
415 +- __func__, lo_number, err);
416 ++ if (err)
417 ++ pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
418 ++ __func__, lo_number, err);
419 + /* Device is gone, no point in returning error */
420 + err = 0;
421 + }
422 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
423 +index 684854d3b0ad..7e57f8f012c3 100644
424 +--- a/drivers/block/zram/zram_drv.c
425 ++++ b/drivers/block/zram/zram_drv.c
426 +@@ -774,18 +774,18 @@ struct zram_work {
427 + struct zram *zram;
428 + unsigned long entry;
429 + struct bio *bio;
430 ++ struct bio_vec bvec;
431 + };
432 +
433 + #if PAGE_SIZE != 4096
434 + static void zram_sync_read(struct work_struct *work)
435 + {
436 +- struct bio_vec bvec;
437 + struct zram_work *zw = container_of(work, struct zram_work, work);
438 + struct zram *zram = zw->zram;
439 + unsigned long entry = zw->entry;
440 + struct bio *bio = zw->bio;
441 +
442 +- read_from_bdev_async(zram, &bvec, entry, bio);
443 ++ read_from_bdev_async(zram, &zw->bvec, entry, bio);
444 + }
445 +
446 + /*
447 +@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
448 + {
449 + struct zram_work work;
450 +
451 ++ work.bvec = *bvec;
452 + work.zram = zram;
453 + work.entry = entry;
454 + work.bio = bio;
455 +diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
456 +index 131f3974740d..814853842e29 100644
457 +--- a/drivers/dma/mediatek/mtk-cqdma.c
458 ++++ b/drivers/dma/mediatek/mtk-cqdma.c
459 +@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
460 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
461 + mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
462 + #else
463 +- mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
464 ++ mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
465 + #endif
466 +
467 + /* setup the length */
468 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
469 +index 2b4f25698169..e2a5398f89b5 100644
470 +--- a/drivers/dma/sh/rcar-dmac.c
471 ++++ b/drivers/dma/sh/rcar-dmac.c
472 +@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
473 + enum dma_status status;
474 + unsigned int residue = 0;
475 + unsigned int dptr = 0;
476 ++ unsigned int chcrb;
477 ++ unsigned int tcrb;
478 ++ unsigned int i;
479 +
480 + if (!desc)
481 + return 0;
482 +@@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
483 + return 0;
484 + }
485 +
486 ++ /*
487 ++ * We need to read two registers.
488 ++ * Make sure the control register does not skip to next chunk
489 ++ * while reading the counter.
490 ++ * Trying it 3 times should be enough: Initial read, retry, retry
491 ++ * for the paranoid.
492 ++ */
493 ++ for (i = 0; i < 3; i++) {
494 ++ chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
495 ++ RCAR_DMACHCRB_DPTR_MASK;
496 ++ tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
497 ++ /* Still the same? */
498 ++ if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
499 ++ RCAR_DMACHCRB_DPTR_MASK))
500 ++ break;
501 ++ }
502 ++ WARN_ONCE(i >= 3, "residue might be not continuous!");
503 ++
504 + /*
505 + * In descriptor mode the descriptor running pointer is not maintained
506 + * by the interrupt handler, find the running descriptor from the
507 +@@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
508 + * mode just use the running descriptor pointer.
509 + */
510 + if (desc->hwdescs.use) {
511 +- dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
512 +- RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
513 ++ dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
514 + if (dptr == 0)
515 + dptr = desc->nchunks;
516 + dptr--;
517 +@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
518 + }
519 +
520 + /* Add the residue for the current chunk. */
521 +- residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
522 ++ residue += tcrb << desc->xfer_shift;
523 +
524 + return residue;
525 + }
526 +@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
527 + enum dma_status status;
528 + unsigned long flags;
529 + unsigned int residue;
530 ++ bool cyclic;
531 +
532 + status = dma_cookie_status(chan, cookie, txstate);
533 + if (status == DMA_COMPLETE || !txstate)
534 +@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
535 +
536 + spin_lock_irqsave(&rchan->lock, flags);
537 + residue = rcar_dmac_chan_get_residue(rchan, cookie);
538 ++ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
539 + spin_unlock_irqrestore(&rchan->lock, flags);
540 +
541 + /* if there's no residue, the cookie is complete */
542 +- if (!residue)
543 ++ if (!residue && !cyclic)
544 + return DMA_COMPLETE;
545 +
546 + dma_set_residue(txstate, residue);
547 +diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
548 +index e41223c05f6e..6cf2e2ce4093 100644
549 +--- a/drivers/gpio/gpio-eic-sprd.c
550 ++++ b/drivers/gpio/gpio-eic-sprd.c
551 +@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
552 + irq_set_handler_locked(data, handle_edge_irq);
553 + break;
554 + case IRQ_TYPE_EDGE_BOTH:
555 ++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
556 + sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
557 + irq_set_handler_locked(data, handle_edge_irq);
558 + break;
559 +diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
560 +index 4ee16b264dbe..7f365ac0b549 100644
561 +--- a/drivers/gpu/drm/i915/intel_fbdev.c
562 ++++ b/drivers/gpu/drm/i915/intel_fbdev.c
563 +@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
564 + bool *enabled, int width, int height)
565 + {
566 + struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
567 ++ unsigned long conn_configured, conn_seq, mask;
568 + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
569 +- unsigned long conn_configured, conn_seq;
570 + int i, j;
571 + bool *save_enabled;
572 + bool fallback = true, ret = true;
573 +@@ -355,9 +355,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
574 + drm_modeset_backoff(&ctx);
575 +
576 + memcpy(save_enabled, enabled, count);
577 +- conn_seq = GENMASK(count - 1, 0);
578 ++ mask = GENMASK(count - 1, 0);
579 + conn_configured = 0;
580 + retry:
581 ++ conn_seq = conn_configured;
582 + for (i = 0; i < count; i++) {
583 + struct drm_fb_helper_connector *fb_conn;
584 + struct drm_connector *connector;
585 +@@ -370,8 +371,7 @@ retry:
586 + if (conn_configured & BIT(i))
587 + continue;
588 +
589 +- /* First pass, only consider tiled connectors */
590 +- if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
591 ++ if (conn_seq == 0 && !connector->has_tile)
592 + continue;
593 +
594 + if (connector->status == connector_status_connected)
595 +@@ -475,10 +475,8 @@ retry:
596 + conn_configured |= BIT(i);
597 + }
598 +
599 +- if (conn_configured != conn_seq) { /* repeat until no more are found */
600 +- conn_seq = conn_configured;
601 ++ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
602 + goto retry;
603 +- }
604 +
605 + /*
606 + * If the BIOS didn't enable everything it could, fall back to have the
607 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
608 +index 0ec08394e17a..996cadd83f24 100644
609 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
610 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
611 +@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
612 + * ttm_global_mutex - protecting the global BO state
613 + */
614 + DEFINE_MUTEX(ttm_global_mutex);
615 +-struct ttm_bo_global ttm_bo_glob = {
616 +- .use_count = 0
617 +-};
618 ++unsigned ttm_bo_glob_use_count;
619 ++struct ttm_bo_global ttm_bo_glob;
620 +
621 + static struct attribute ttm_bo_count = {
622 + .name = "bo_count",
623 +@@ -1535,12 +1534,13 @@ static void ttm_bo_global_release(void)
624 + struct ttm_bo_global *glob = &ttm_bo_glob;
625 +
626 + mutex_lock(&ttm_global_mutex);
627 +- if (--glob->use_count > 0)
628 ++ if (--ttm_bo_glob_use_count > 0)
629 + goto out;
630 +
631 + kobject_del(&glob->kobj);
632 + kobject_put(&glob->kobj);
633 + ttm_mem_global_release(&ttm_mem_glob);
634 ++ memset(glob, 0, sizeof(*glob));
635 + out:
636 + mutex_unlock(&ttm_global_mutex);
637 + }
638 +@@ -1552,7 +1552,7 @@ static int ttm_bo_global_init(void)
639 + unsigned i;
640 +
641 + mutex_lock(&ttm_global_mutex);
642 +- if (++glob->use_count > 1)
643 ++ if (++ttm_bo_glob_use_count > 1)
644 + goto out;
645 +
646 + ret = ttm_mem_global_init(&ttm_mem_glob);
647 +diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
648 +index f1567c353b54..9a0909decb36 100644
649 +--- a/drivers/gpu/drm/ttm/ttm_memory.c
650 ++++ b/drivers/gpu/drm/ttm/ttm_memory.c
651 +@@ -461,8 +461,8 @@ out_no_zone:
652 +
653 + void ttm_mem_global_release(struct ttm_mem_global *glob)
654 + {
655 +- unsigned int i;
656 + struct ttm_mem_zone *zone;
657 ++ unsigned int i;
658 +
659 + /* let the page allocator first stop the shrink work. */
660 + ttm_page_alloc_fini();
661 +@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
662 + zone = glob->zones[i];
663 + kobject_del(&zone->kobj);
664 + kobject_put(&zone->kobj);
665 +- }
666 ++ }
667 + kobject_del(&glob->kobj);
668 + kobject_put(&glob->kobj);
669 ++ memset(glob, 0, sizeof(*glob));
670 + }
671 +
672 + static void ttm_check_swapping(struct ttm_mem_global *glob)
673 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
674 +index 3ce136ba8791..2ae4ece0dcea 100644
675 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
676 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
677 +@@ -999,7 +999,7 @@ static void
678 + vc4_crtc_reset(struct drm_crtc *crtc)
679 + {
680 + if (crtc->state)
681 +- __drm_atomic_helper_crtc_destroy_state(crtc->state);
682 ++ vc4_crtc_destroy_state(crtc, crtc->state);
683 +
684 + crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
685 + if (crtc->state)
686 +diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
687 +index cc287cf6eb29..edc52d75e6bd 100644
688 +--- a/drivers/hwtracing/intel_th/gth.c
689 ++++ b/drivers/hwtracing/intel_th/gth.c
690 +@@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
691 + othdev->output.port = -1;
692 + othdev->output.active = false;
693 + gth->output[port].output = NULL;
694 +- for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
695 ++ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
696 + if (gth->master[master] == port)
697 + gth->master[master] = -1;
698 + spin_unlock(&gth->gth_lock);
699 +diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
700 +index ea0bc6885517..32cc8fe7902f 100644
701 +--- a/drivers/infiniband/core/uverbs.h
702 ++++ b/drivers/infiniband/core/uverbs.h
703 +@@ -160,6 +160,7 @@ struct ib_uverbs_file {
704 +
705 + struct mutex umap_lock;
706 + struct list_head umaps;
707 ++ struct page *disassociate_page;
708 +
709 + struct idr idr;
710 + /* spinlock protects write access to idr */
711 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
712 +index e2a4570a47e8..27ca4022ca70 100644
713 +--- a/drivers/infiniband/core/uverbs_main.c
714 ++++ b/drivers/infiniband/core/uverbs_main.c
715 +@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
716 + kref_put(&file->async_file->ref,
717 + ib_uverbs_release_async_event_file);
718 + put_device(&file->device->dev);
719 ++
720 ++ if (file->disassociate_page)
721 ++ __free_pages(file->disassociate_page, 0);
722 + kfree(file);
723 + }
724 +
725 +@@ -876,9 +879,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
726 + kfree(priv);
727 + }
728 +
729 ++/*
730 ++ * Once the zap_vma_ptes has been called touches to the VMA will come here and
731 ++ * we return a dummy writable zero page for all the pfns.
732 ++ */
733 ++static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
734 ++{
735 ++ struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
736 ++ struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
737 ++ vm_fault_t ret = 0;
738 ++
739 ++ if (!priv)
740 ++ return VM_FAULT_SIGBUS;
741 ++
742 ++ /* Read only pages can just use the system zero page. */
743 ++ if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
744 ++ vmf->page = ZERO_PAGE(vmf->address);
745 ++ get_page(vmf->page);
746 ++ return 0;
747 ++ }
748 ++
749 ++ mutex_lock(&ufile->umap_lock);
750 ++ if (!ufile->disassociate_page)
751 ++ ufile->disassociate_page =
752 ++ alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
753 ++
754 ++ if (ufile->disassociate_page) {
755 ++ /*
756 ++ * This VMA is forced to always be shared so this doesn't have
757 ++ * to worry about COW.
758 ++ */
759 ++ vmf->page = ufile->disassociate_page;
760 ++ get_page(vmf->page);
761 ++ } else {
762 ++ ret = VM_FAULT_SIGBUS;
763 ++ }
764 ++ mutex_unlock(&ufile->umap_lock);
765 ++
766 ++ return ret;
767 ++}
768 ++
769 + static const struct vm_operations_struct rdma_umap_ops = {
770 + .open = rdma_umap_open,
771 + .close = rdma_umap_close,
772 ++ .fault = rdma_umap_fault,
773 + };
774 +
775 + static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
776 +@@ -888,6 +932,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
777 + struct ib_uverbs_file *ufile = ucontext->ufile;
778 + struct rdma_umap_priv *priv;
779 +
780 ++ if (!(vma->vm_flags & VM_SHARED))
781 ++ return ERR_PTR(-EINVAL);
782 ++
783 + if (vma->vm_end - vma->vm_start != size)
784 + return ERR_PTR(-EINVAL);
785 +
786 +@@ -991,7 +1038,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
787 + * at a time to get the lock ordering right. Typically there
788 + * will only be one mm, so no big deal.
789 + */
790 +- down_write(&mm->mmap_sem);
791 ++ down_read(&mm->mmap_sem);
792 + if (!mmget_still_valid(mm))
793 + goto skip_mm;
794 + mutex_lock(&ufile->umap_lock);
795 +@@ -1005,11 +1052,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
796 +
797 + zap_vma_ptes(vma, vma->vm_start,
798 + vma->vm_end - vma->vm_start);
799 +- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
800 + }
801 + mutex_unlock(&ufile->umap_lock);
802 + skip_mm:
803 +- up_write(&mm->mmap_sem);
804 ++ up_read(&mm->mmap_sem);
805 + mmput(mm);
806 + }
807 + }
808 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
809 +index 94fe253d4956..497181f5ba09 100644
810 +--- a/drivers/infiniband/hw/mlx5/main.c
811 ++++ b/drivers/infiniband/hw/mlx5/main.c
812 +@@ -1982,6 +1982,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
813 +
814 + if (vma->vm_flags & VM_WRITE)
815 + return -EPERM;
816 ++ vma->vm_flags &= ~VM_MAYWRITE;
817 +
818 + if (!dev->mdev->clock_info_page)
819 + return -EOPNOTSUPP;
820 +@@ -2147,19 +2148,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
821 +
822 + if (vma->vm_flags & VM_WRITE)
823 + return -EPERM;
824 ++ vma->vm_flags &= ~VM_MAYWRITE;
825 +
826 + /* Don't expose to user-space information it shouldn't have */
827 + if (PAGE_SIZE > 4096)
828 + return -EOPNOTSUPP;
829 +
830 +- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
831 + pfn = (dev->mdev->iseg_base +
832 + offsetof(struct mlx5_init_seg, internal_timer_h)) >>
833 + PAGE_SHIFT;
834 +- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
835 +- PAGE_SIZE, vma->vm_page_prot))
836 +- return -EAGAIN;
837 +- break;
838 ++ return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
839 ++ PAGE_SIZE,
840 ++ pgprot_noncached(vma->vm_page_prot));
841 + case MLX5_IB_MMAP_CLOCK_INFO:
842 + return mlx5_ib_mmap_clock_info_page(dev, vma, context);
843 +
844 +diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
845 +index 49c9541050d4..5819c9d6ffdc 100644
846 +--- a/drivers/infiniband/sw/rdmavt/mr.c
847 ++++ b/drivers/infiniband/sw/rdmavt/mr.c
848 +@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
849 + if (unlikely(mapped_segs == mr->mr.max_segs))
850 + return -ENOMEM;
851 +
852 +- if (mr->mr.length == 0) {
853 +- mr->mr.user_base = addr;
854 +- mr->mr.iova = addr;
855 +- }
856 +-
857 + m = mapped_segs / RVT_SEGSZ;
858 + n = mapped_segs % RVT_SEGSZ;
859 + mr->mr.map[m]->segs[n].vaddr = (void *)addr;
860 +@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
861 + * @sg_nents: number of entries in sg
862 + * @sg_offset: offset in bytes into sg
863 + *
864 ++ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
865 ++ *
866 + * Return: number of sg elements mapped to the memory region
867 + */
868 + int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
869 + int sg_nents, unsigned int *sg_offset)
870 + {
871 + struct rvt_mr *mr = to_imr(ibmr);
872 ++ int ret;
873 +
874 + mr->mr.length = 0;
875 + mr->mr.page_shift = PAGE_SHIFT;
876 +- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
877 +- rvt_set_page);
878 ++ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
879 ++ mr->mr.user_base = ibmr->iova;
880 ++ mr->mr.iova = ibmr->iova;
881 ++ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
882 ++ mr->mr.length = (size_t)ibmr->length;
883 ++ return ret;
884 + }
885 +
886 + /**
887 +@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
888 + ibmr->rkey = key;
889 + mr->mr.lkey = key;
890 + mr->mr.access_flags = access;
891 ++ mr->mr.iova = ibmr->iova;
892 + atomic_set(&mr->mr.lkey_invalid, 0);
893 +
894 + return 0;
895 +diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
896 +index df64d6aed4f7..93901ebd122a 100644
897 +--- a/drivers/input/rmi4/rmi_f11.c
898 ++++ b/drivers/input/rmi4/rmi_f11.c
899 +@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
900 + }
901 +
902 + rc = f11_write_control_regs(fn, &f11->sens_query,
903 +- &f11->dev_controls, fn->fd.query_base_addr);
904 ++ &f11->dev_controls, fn->fd.control_base_addr);
905 + if (rc)
906 + dev_warn(&fn->dev, "Failed to write control registers\n");
907 +
908 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
909 +index 6fd15a734324..58f02c85f2fe 100644
910 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
911 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
912 +@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
913 + /* create driver workqueue */
914 + fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
915 + fm10k_driver_name);
916 ++ if (!fm10k_workqueue)
917 ++ return -ENOMEM;
918 +
919 + fm10k_dbg_init();
920 +
921 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
922 +index 03b2a9f9c589..cad34d6f5f45 100644
923 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
924 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
925 +@@ -33,6 +33,26 @@
926 + #include <linux/bpf_trace.h>
927 + #include "en/xdp.h"
928 +
929 ++int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
930 ++{
931 ++ int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
932 ++
933 ++ /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
934 ++ * The condition checked in mlx5e_rx_is_linear_skb is:
935 ++ * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
936 ++ * (Note that hw_mtu == sw_mtu + hard_mtu.)
937 ++ * What is returned from this function is:
938 ++ * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
939 ++ * After assigning sw_mtu := max_mtu, the left side of (1) turns to
940 ++ * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
941 ++ * because both PAGE_SIZE and S are already aligned. Any number greater
942 ++ * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
943 ++ * so max_mtu is the maximum MTU allowed.
944 ++ */
945 ++
946 ++ return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
947 ++}
948 ++
949 + static inline bool
950 + mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
951 + struct xdp_buff *xdp)
952 +@@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
953 + mlx5e_xdpi_fifo_pop(xdpi_fifo);
954 +
955 + if (is_redirect) {
956 +- xdp_return_frame(xdpi.xdpf);
957 + dma_unmap_single(sq->pdev, xdpi.dma_addr,
958 + xdpi.xdpf->len, DMA_TO_DEVICE);
959 ++ xdp_return_frame(xdpi.xdpf);
960 + } else {
961 + /* Recycle RX page */
962 + mlx5e_page_release(rq, &xdpi.di, true);
963 +@@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
964 + mlx5e_xdpi_fifo_pop(xdpi_fifo);
965 +
966 + if (is_redirect) {
967 +- xdp_return_frame(xdpi.xdpf);
968 + dma_unmap_single(sq->pdev, xdpi.dma_addr,
969 + xdpi.xdpf->len, DMA_TO_DEVICE);
970 ++ xdp_return_frame(xdpi.xdpf);
971 + } else {
972 + /* Recycle RX page */
973 + mlx5e_page_release(rq, &xdpi.di, false);
974 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
975 +index ee27a7c8cd87..553956cadc8a 100644
976 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
977 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
978 +@@ -34,13 +34,12 @@
979 +
980 + #include "en.h"
981 +
982 +-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
983 +- MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
984 + #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
985 + #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
986 + (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
987 + #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
988 +
989 ++int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
990 + bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
991 + void *va, u16 *rx_headroom, u32 *len);
992 + bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
993 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
994 +index 3b9e5f0d0212..253496c4a3db 100644
995 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
996 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
997 +@@ -1470,7 +1470,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
998 + break;
999 + case MLX5_MODULE_ID_SFP:
1000 + modinfo->type = ETH_MODULE_SFF_8472;
1001 +- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1002 ++ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
1003 + break;
1004 + default:
1005 + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
1006 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1007 +index 0cb19e4dd439..2d269acdbc8e 100644
1008 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1009 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1010 +@@ -3816,7 +3816,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1011 + if (params->xdp_prog &&
1012 + !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
1013 + netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
1014 +- new_mtu, MLX5E_XDP_MAX_MTU);
1015 ++ new_mtu, mlx5e_xdp_max_mtu(params));
1016 + err = -EINVAL;
1017 + goto out;
1018 + }
1019 +@@ -4280,7 +4280,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
1020 +
1021 + if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
1022 + netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
1023 +- new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
1024 ++ new_channels.params.sw_mtu,
1025 ++ mlx5e_xdp_max_mtu(&new_channels.params));
1026 + return -EINVAL;
1027 + }
1028 +
1029 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
1030 +index 2b82f35f4c35..efce1fa37f6f 100644
1031 +--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
1032 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
1033 +@@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
1034 + size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
1035 +
1036 + i2c_addr = MLX5_I2C_ADDR_LOW;
1037 +- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
1038 +- i2c_addr = MLX5_I2C_ADDR_HIGH;
1039 +- offset -= MLX5_EEPROM_PAGE_LENGTH;
1040 +- }
1041 +
1042 + MLX5_SET(mcia_reg, in, l, 0);
1043 + MLX5_SET(mcia_reg, in, module, module_num);
1044 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1045 +index ffee38e36ce8..8648ca171254 100644
1046 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1047 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1048 +@@ -27,7 +27,7 @@
1049 +
1050 + #define MLXSW_PCI_SW_RESET 0xF0010
1051 + #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
1052 +-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
1053 ++#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
1054 + #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
1055 + #define MLXSW_PCI_FW_READY 0xA1844
1056 + #define MLXSW_PCI_FW_READY_MASK 0xFFFF
1057 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1058 +index cbdee5164be7..ce49504e1f9c 100644
1059 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1060 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1061 +@@ -2667,11 +2667,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
1062 + if (err)
1063 + return err;
1064 +
1065 ++ mlxsw_sp_port->link.autoneg = autoneg;
1066 ++
1067 + if (!netif_running(dev))
1068 + return 0;
1069 +
1070 +- mlxsw_sp_port->link.autoneg = autoneg;
1071 +-
1072 + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1073 + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1074 +
1075 +@@ -2961,7 +2961,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1076 + err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1077 + MLXSW_REG_QEEC_HIERARCY_TC,
1078 + i + 8, i,
1079 +- false, 0);
1080 ++ true, 100);
1081 + if (err)
1082 + return err;
1083 + }
1084 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
1085 +index a18149720aa2..cba5881b2746 100644
1086 +--- a/drivers/net/ethernet/socionext/netsec.c
1087 ++++ b/drivers/net/ethernet/socionext/netsec.c
1088 +@@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
1089 + }
1090 +
1091 + static void *netsec_alloc_rx_data(struct netsec_priv *priv,
1092 +- dma_addr_t *dma_handle, u16 *desc_len)
1093 ++ dma_addr_t *dma_handle, u16 *desc_len,
1094 ++ bool napi)
1095 + {
1096 + size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1097 + size_t payload_len = NETSEC_RX_BUF_SZ;
1098 +@@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
1099 +
1100 + total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
1101 +
1102 +- buf = napi_alloc_frag(total_len);
1103 ++ buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
1104 + if (!buf)
1105 + return NULL;
1106 +
1107 +@@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
1108 + /* allocate a fresh buffer and map it to the hardware.
1109 + * This will eventually replace the old buffer in the hardware
1110 + */
1111 +- buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
1112 ++ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
1113 ++ true);
1114 + if (unlikely(!buf_addr))
1115 + break;
1116 +
1117 +@@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
1118 + void *buf;
1119 + u16 len;
1120 +
1121 +- buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1122 ++ buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
1123 ++ false);
1124 + if (!buf) {
1125 + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1126 + goto err_out;
1127 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1128 +index 019ab99e65bb..1d8d6f2ddfd6 100644
1129 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1130 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1131 +@@ -2590,8 +2590,6 @@ static int stmmac_open(struct net_device *dev)
1132 + u32 chan;
1133 + int ret;
1134 +
1135 +- stmmac_check_ether_addr(priv);
1136 +-
1137 + if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1138 + priv->hw->pcs != STMMAC_PCS_TBI &&
1139 + priv->hw->pcs != STMMAC_PCS_RTBI) {
1140 +@@ -4265,6 +4263,8 @@ int stmmac_dvr_probe(struct device *device,
1141 + if (ret)
1142 + goto error_hw_init;
1143 +
1144 ++ stmmac_check_ether_addr(priv);
1145 ++
1146 + /* Configure real RX and TX queues */
1147 + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
1148 + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
1149 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1150 +index d819e8eaba12..cc1e887e47b5 100644
1151 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1152 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1153 +@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
1154 + },
1155 + .driver_data = (void *)&galileo_stmmac_dmi_data,
1156 + },
1157 ++ /*
1158 ++ * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
1159 ++ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
1160 ++ * has only one pci network device while other asset tags are
1161 ++ * for IOT2040 which has two.
1162 ++ */
1163 + {
1164 + .matches = {
1165 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1166 +@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
1167 + {
1168 + .matches = {
1169 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1170 +- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
1171 +- "6ES7647-0AA00-1YA2"),
1172 + },
1173 + .driver_data = (void *)&iot2040_stmmac_dmi_data,
1174 + },
1175 +diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
1176 +index f4e93f5fc204..ea90db3c7705 100644
1177 +--- a/drivers/net/slip/slhc.c
1178 ++++ b/drivers/net/slip/slhc.c
1179 +@@ -153,7 +153,7 @@ out_fail:
1180 + void
1181 + slhc_free(struct slcompress *comp)
1182 + {
1183 +- if ( comp == NULLSLCOMPR )
1184 ++ if ( IS_ERR_OR_NULL(comp) )
1185 + return;
1186 +
1187 + if ( comp->tstate != NULLSLSTATE )
1188 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1189 +index 1283632091d5..7dcda9364009 100644
1190 +--- a/drivers/net/team/team.c
1191 ++++ b/drivers/net/team/team.c
1192 +@@ -1157,6 +1157,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1193 + return -EINVAL;
1194 + }
1195 +
1196 ++ if (netdev_has_upper_dev(dev, port_dev)) {
1197 ++ NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1198 ++ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1199 ++ portname);
1200 ++ return -EBUSY;
1201 ++ }
1202 ++
1203 + if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1204 + vlan_uses_dev(dev)) {
1205 + NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1206 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1207 +index 6359053bd0c7..862fd2b92d12 100644
1208 +--- a/drivers/net/wireless/mac80211_hwsim.c
1209 ++++ b/drivers/net/wireless/mac80211_hwsim.c
1210 +@@ -2642,7 +2642,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1211 + enum nl80211_band band;
1212 + const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
1213 + struct net *net;
1214 +- int idx;
1215 ++ int idx, i;
1216 + int n_limits = 0;
1217 +
1218 + if (WARN_ON(param->channels > 1 && !param->use_chanctx))
1219 +@@ -2766,12 +2766,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1220 + goto failed_hw;
1221 + }
1222 +
1223 ++ data->if_combination.max_interfaces = 0;
1224 ++ for (i = 0; i < n_limits; i++)
1225 ++ data->if_combination.max_interfaces +=
1226 ++ data->if_limits[i].max;
1227 ++
1228 + data->if_combination.n_limits = n_limits;
1229 +- data->if_combination.max_interfaces = 2048;
1230 + data->if_combination.limits = data->if_limits;
1231 +
1232 +- hw->wiphy->iface_combinations = &data->if_combination;
1233 +- hw->wiphy->n_iface_combinations = 1;
1234 ++ /*
1235 ++ * If we actually were asked to support combinations,
1236 ++ * advertise them - if there's only a single thing like
1237 ++ * only IBSS then don't advertise it as combinations.
1238 ++ */
1239 ++ if (data->if_combination.max_interfaces > 1) {
1240 ++ hw->wiphy->iface_combinations = &data->if_combination;
1241 ++ hw->wiphy->n_iface_combinations = 1;
1242 ++ }
1243 +
1244 + if (param->ciphers) {
1245 + memcpy(data->ciphers, param->ciphers,
1246 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
1247 +index 53564386ed57..8987cec9549d 100644
1248 +--- a/drivers/usb/core/driver.c
1249 ++++ b/drivers/usb/core/driver.c
1250 +@@ -1896,14 +1896,11 @@ int usb_runtime_idle(struct device *dev)
1251 + return -EBUSY;
1252 + }
1253 +
1254 +-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1255 ++static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1256 + {
1257 + struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1258 + int ret = -EPERM;
1259 +
1260 +- if (enable && !udev->usb2_hw_lpm_allowed)
1261 +- return 0;
1262 +-
1263 + if (hcd->driver->set_usb2_hw_lpm) {
1264 + ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
1265 + if (!ret)
1266 +@@ -1913,6 +1910,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1267 + return ret;
1268 + }
1269 +
1270 ++int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
1271 ++{
1272 ++ if (!udev->usb2_hw_lpm_capable ||
1273 ++ !udev->usb2_hw_lpm_allowed ||
1274 ++ udev->usb2_hw_lpm_enabled)
1275 ++ return 0;
1276 ++
1277 ++ return usb_set_usb2_hardware_lpm(udev, 1);
1278 ++}
1279 ++
1280 ++int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
1281 ++{
1282 ++ if (!udev->usb2_hw_lpm_enabled)
1283 ++ return 0;
1284 ++
1285 ++ return usb_set_usb2_hardware_lpm(udev, 0);
1286 ++}
1287 ++
1288 + #endif /* CONFIG_PM */
1289 +
1290 + struct bus_type usb_bus_type = {
1291 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1292 +index 1d1e61e980f3..55c87be5764c 100644
1293 +--- a/drivers/usb/core/hub.c
1294 ++++ b/drivers/usb/core/hub.c
1295 +@@ -3220,8 +3220,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
1296 + }
1297 +
1298 + /* disable USB2 hardware LPM */
1299 +- if (udev->usb2_hw_lpm_enabled == 1)
1300 +- usb_set_usb2_hardware_lpm(udev, 0);
1301 ++ usb_disable_usb2_hardware_lpm(udev);
1302 +
1303 + if (usb_disable_ltm(udev)) {
1304 + dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
1305 +@@ -3259,8 +3258,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
1306 + usb_enable_ltm(udev);
1307 + err_ltm:
1308 + /* Try to enable USB2 hardware LPM again */
1309 +- if (udev->usb2_hw_lpm_capable == 1)
1310 +- usb_set_usb2_hardware_lpm(udev, 1);
1311 ++ usb_enable_usb2_hardware_lpm(udev);
1312 +
1313 + if (udev->do_remote_wakeup)
1314 + (void) usb_disable_remote_wakeup(udev);
1315 +@@ -3543,8 +3541,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
1316 + hub_port_logical_disconnect(hub, port1);
1317 + } else {
1318 + /* Try to enable USB2 hardware LPM */
1319 +- if (udev->usb2_hw_lpm_capable == 1)
1320 +- usb_set_usb2_hardware_lpm(udev, 1);
1321 ++ usb_enable_usb2_hardware_lpm(udev);
1322 +
1323 + /* Try to enable USB3 LTM */
1324 + usb_enable_ltm(udev);
1325 +@@ -4435,7 +4432,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
1326 + if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
1327 + connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
1328 + udev->usb2_hw_lpm_allowed = 1;
1329 +- usb_set_usb2_hardware_lpm(udev, 1);
1330 ++ usb_enable_usb2_hardware_lpm(udev);
1331 + }
1332 + }
1333 +
1334 +@@ -5649,8 +5646,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
1335 + /* Disable USB2 hardware LPM.
1336 + * It will be re-enabled by the enumeration process.
1337 + */
1338 +- if (udev->usb2_hw_lpm_enabled == 1)
1339 +- usb_set_usb2_hardware_lpm(udev, 0);
1340 ++ usb_disable_usb2_hardware_lpm(udev);
1341 +
1342 + /* Disable LPM while we reset the device and reinstall the alt settings.
1343 + * Device-initiated LPM, and system exit latency settings are cleared
1344 +@@ -5753,7 +5749,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
1345 +
1346 + done:
1347 + /* Now that the alt settings are re-installed, enable LTM and LPM. */
1348 +- usb_set_usb2_hardware_lpm(udev, 1);
1349 ++ usb_enable_usb2_hardware_lpm(udev);
1350 + usb_unlocked_enable_lpm(udev);
1351 + usb_enable_ltm(udev);
1352 + usb_release_bos_descriptor(udev);
1353 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1354 +index bfa5eda0cc26..4f33eb632a88 100644
1355 +--- a/drivers/usb/core/message.c
1356 ++++ b/drivers/usb/core/message.c
1357 +@@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1358 + dev->actconfig->interface[i] = NULL;
1359 + }
1360 +
1361 +- if (dev->usb2_hw_lpm_enabled == 1)
1362 +- usb_set_usb2_hardware_lpm(dev, 0);
1363 ++ usb_disable_usb2_hardware_lpm(dev);
1364 + usb_unlocked_disable_lpm(dev);
1365 + usb_disable_ltm(dev);
1366 +
1367 +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
1368 +index ea18284dfa9a..7e88fdfe3cf5 100644
1369 +--- a/drivers/usb/core/sysfs.c
1370 ++++ b/drivers/usb/core/sysfs.c
1371 +@@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
1372 +
1373 + if (!ret) {
1374 + udev->usb2_hw_lpm_allowed = value;
1375 +- ret = usb_set_usb2_hardware_lpm(udev, value);
1376 ++ if (value)
1377 ++ ret = usb_enable_usb2_hardware_lpm(udev);
1378 ++ else
1379 ++ ret = usb_disable_usb2_hardware_lpm(udev);
1380 + }
1381 +
1382 + usb_unlock_device(udev);
1383 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
1384 +index 546a2219454b..d95a5358f73d 100644
1385 +--- a/drivers/usb/core/usb.h
1386 ++++ b/drivers/usb/core/usb.h
1387 +@@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
1388 + extern int usb_runtime_suspend(struct device *dev);
1389 + extern int usb_runtime_resume(struct device *dev);
1390 + extern int usb_runtime_idle(struct device *dev);
1391 +-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
1392 ++extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
1393 ++extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
1394 +
1395 + #else
1396 +
1397 +@@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
1398 + return 0;
1399 + }
1400 +
1401 +-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1402 ++static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
1403 ++{
1404 ++ return 0;
1405 ++}
1406 ++
1407 ++static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
1408 + {
1409 + return 0;
1410 + }
1411 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1412 +index 73652e21efec..d0f731c9920a 100644
1413 +--- a/drivers/vfio/vfio_iommu_type1.c
1414 ++++ b/drivers/vfio/vfio_iommu_type1.c
1415 +@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
1416 + MODULE_PARM_DESC(disable_hugepages,
1417 + "Disable VFIO IOMMU support for IOMMU hugepages.");
1418 +
1419 ++static unsigned int dma_entry_limit __read_mostly = U16_MAX;
1420 ++module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
1421 ++MODULE_PARM_DESC(dma_entry_limit,
1422 ++ "Maximum number of user DMA mappings per container (65535).");
1423 ++
1424 + struct vfio_iommu {
1425 + struct list_head domain_list;
1426 + struct vfio_domain *external_domain; /* domain for external user */
1427 + struct mutex lock;
1428 + struct rb_root dma_list;
1429 + struct blocking_notifier_head notifier;
1430 ++ unsigned int dma_avail;
1431 + bool v2;
1432 + bool nesting;
1433 + };
1434 +@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
1435 + vfio_unlink_dma(iommu, dma);
1436 + put_task_struct(dma->task);
1437 + kfree(dma);
1438 ++ iommu->dma_avail++;
1439 + }
1440 +
1441 + static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
1442 +@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1443 + goto out_unlock;
1444 + }
1445 +
1446 ++ if (!iommu->dma_avail) {
1447 ++ ret = -ENOSPC;
1448 ++ goto out_unlock;
1449 ++ }
1450 ++
1451 + dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1452 + if (!dma) {
1453 + ret = -ENOMEM;
1454 + goto out_unlock;
1455 + }
1456 +
1457 ++ iommu->dma_avail--;
1458 + dma->iova = iova;
1459 + dma->vaddr = vaddr;
1460 + dma->prot = prot;
1461 +@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
1462 +
1463 + INIT_LIST_HEAD(&iommu->domain_list);
1464 + iommu->dma_list = RB_ROOT;
1465 ++ iommu->dma_avail = dma_entry_limit;
1466 + mutex_init(&iommu->lock);
1467 + BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
1468 +
1469 +diff --git a/fs/aio.c b/fs/aio.c
1470 +index 3d9669d011b9..efa13410e04e 100644
1471 +--- a/fs/aio.c
1472 ++++ b/fs/aio.c
1473 +@@ -181,7 +181,7 @@ struct poll_iocb {
1474 + struct file *file;
1475 + struct wait_queue_head *head;
1476 + __poll_t events;
1477 +- bool woken;
1478 ++ bool done;
1479 + bool cancelled;
1480 + struct wait_queue_entry wait;
1481 + struct work_struct work;
1482 +@@ -204,8 +204,7 @@ struct aio_kiocb {
1483 + struct kioctx *ki_ctx;
1484 + kiocb_cancel_fn *ki_cancel;
1485 +
1486 +- struct iocb __user *ki_user_iocb; /* user's aiocb */
1487 +- __u64 ki_user_data; /* user's data for completion */
1488 ++ struct io_event ki_res;
1489 +
1490 + struct list_head ki_list; /* the aio core uses this
1491 + * for cancellation */
1492 +@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
1493 + /* aio_get_req
1494 + * Allocate a slot for an aio request.
1495 + * Returns NULL if no requests are free.
1496 ++ *
1497 ++ * The refcount is initialized to 2 - one for the async op completion,
1498 ++ * one for the synchronous code that does this.
1499 + */
1500 + static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1501 + {
1502 +@@ -1034,7 +1036,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1503 + percpu_ref_get(&ctx->reqs);
1504 + req->ki_ctx = ctx;
1505 + INIT_LIST_HEAD(&req->ki_list);
1506 +- refcount_set(&req->ki_refcnt, 0);
1507 ++ refcount_set(&req->ki_refcnt, 2);
1508 + req->ki_eventfd = NULL;
1509 + return req;
1510 + }
1511 +@@ -1067,30 +1069,18 @@ out:
1512 + return ret;
1513 + }
1514 +
1515 +-static inline void iocb_put(struct aio_kiocb *iocb)
1516 ++static inline void iocb_destroy(struct aio_kiocb *iocb)
1517 + {
1518 +- if (refcount_read(&iocb->ki_refcnt) == 0 ||
1519 +- refcount_dec_and_test(&iocb->ki_refcnt)) {
1520 +- if (iocb->ki_filp)
1521 +- fput(iocb->ki_filp);
1522 +- percpu_ref_put(&iocb->ki_ctx->reqs);
1523 +- kmem_cache_free(kiocb_cachep, iocb);
1524 +- }
1525 +-}
1526 +-
1527 +-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
1528 +- long res, long res2)
1529 +-{
1530 +- ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
1531 +- ev->data = iocb->ki_user_data;
1532 +- ev->res = res;
1533 +- ev->res2 = res2;
1534 ++ if (iocb->ki_filp)
1535 ++ fput(iocb->ki_filp);
1536 ++ percpu_ref_put(&iocb->ki_ctx->reqs);
1537 ++ kmem_cache_free(kiocb_cachep, iocb);
1538 + }
1539 +
1540 + /* aio_complete
1541 + * Called when the io request on the given iocb is complete.
1542 + */
1543 +-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1544 ++static void aio_complete(struct aio_kiocb *iocb)
1545 + {
1546 + struct kioctx *ctx = iocb->ki_ctx;
1547 + struct aio_ring *ring;
1548 +@@ -1114,14 +1104,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1549 + ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1550 + event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1551 +
1552 +- aio_fill_event(event, iocb, res, res2);
1553 ++ *event = iocb->ki_res;
1554 +
1555 + kunmap_atomic(ev_page);
1556 + flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1557 +
1558 +- pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1559 +- ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
1560 +- res, res2);
1561 ++ pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1562 ++ (void __user *)(unsigned long)iocb->ki_res.obj,
1563 ++ iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1564 +
1565 + /* after flagging the request as done, we
1566 + * must never even look at it again
1567 +@@ -1163,7 +1153,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1568 +
1569 + if (waitqueue_active(&ctx->wait))
1570 + wake_up(&ctx->wait);
1571 +- iocb_put(iocb);
1572 ++}
1573 ++
1574 ++static inline void iocb_put(struct aio_kiocb *iocb)
1575 ++{
1576 ++ if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1577 ++ aio_complete(iocb);
1578 ++ iocb_destroy(iocb);
1579 ++ }
1580 + }
1581 +
1582 + /* aio_read_events_ring
1583 +@@ -1437,7 +1434,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1584 + file_end_write(kiocb->ki_filp);
1585 + }
1586 +
1587 +- aio_complete(iocb, res, res2);
1588 ++ iocb->ki_res.res = res;
1589 ++ iocb->ki_res.res2 = res2;
1590 ++ iocb_put(iocb);
1591 + }
1592 +
1593 + static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1594 +@@ -1585,11 +1584,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1595 +
1596 + static void aio_fsync_work(struct work_struct *work)
1597 + {
1598 +- struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
1599 +- int ret;
1600 ++ struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1601 +
1602 +- ret = vfs_fsync(req->file, req->datasync);
1603 +- aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
1604 ++ iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1605 ++ iocb_put(iocb);
1606 + }
1607 +
1608 + static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1609 +@@ -1608,11 +1606,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1610 + return 0;
1611 + }
1612 +
1613 +-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
1614 +-{
1615 +- aio_complete(iocb, mangle_poll(mask), 0);
1616 +-}
1617 +-
1618 + static void aio_poll_complete_work(struct work_struct *work)
1619 + {
1620 + struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1621 +@@ -1638,9 +1631,11 @@ static void aio_poll_complete_work(struct work_struct *work)
1622 + return;
1623 + }
1624 + list_del_init(&iocb->ki_list);
1625 ++ iocb->ki_res.res = mangle_poll(mask);
1626 ++ req->done = true;
1627 + spin_unlock_irq(&ctx->ctx_lock);
1628 +
1629 +- aio_poll_complete(iocb, mask);
1630 ++ iocb_put(iocb);
1631 + }
1632 +
1633 + /* assumes we are called with irqs disabled */
1634 +@@ -1668,31 +1663,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1635 + __poll_t mask = key_to_poll(key);
1636 + unsigned long flags;
1637 +
1638 +- req->woken = true;
1639 +-
1640 + /* for instances that support it check for an event match first: */
1641 +- if (mask) {
1642 +- if (!(mask & req->events))
1643 +- return 0;
1644 ++ if (mask && !(mask & req->events))
1645 ++ return 0;
1646 ++
1647 ++ list_del_init(&req->wait.entry);
1648 +
1649 ++ if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1650 + /*
1651 + * Try to complete the iocb inline if we can. Use
1652 + * irqsave/irqrestore because not all filesystems (e.g. fuse)
1653 + * call this function with IRQs disabled and because IRQs
1654 + * have to be disabled before ctx_lock is obtained.
1655 + */
1656 +- if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1657 +- list_del(&iocb->ki_list);
1658 +- spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
1659 +-
1660 +- list_del_init(&req->wait.entry);
1661 +- aio_poll_complete(iocb, mask);
1662 +- return 1;
1663 +- }
1664 ++ list_del(&iocb->ki_list);
1665 ++ iocb->ki_res.res = mangle_poll(mask);
1666 ++ req->done = true;
1667 ++ spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
1668 ++ iocb_put(iocb);
1669 ++ } else {
1670 ++ schedule_work(&req->work);
1671 + }
1672 +-
1673 +- list_del_init(&req->wait.entry);
1674 +- schedule_work(&req->work);
1675 + return 1;
1676 + }
1677 +
1678 +@@ -1724,6 +1715,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1679 + struct kioctx *ctx = aiocb->ki_ctx;
1680 + struct poll_iocb *req = &aiocb->poll;
1681 + struct aio_poll_table apt;
1682 ++ bool cancel = false;
1683 + __poll_t mask;
1684 +
1685 + /* reject any unknown events outside the normal event mask. */
1686 +@@ -1737,7 +1729,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1687 + req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1688 +
1689 + req->head = NULL;
1690 +- req->woken = false;
1691 ++ req->done = false;
1692 + req->cancelled = false;
1693 +
1694 + apt.pt._qproc = aio_poll_queue_proc;
1695 +@@ -1749,41 +1741,34 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1696 + INIT_LIST_HEAD(&req->wait.entry);
1697 + init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1698 +
1699 +- /* one for removal from waitqueue, one for this function */
1700 +- refcount_set(&aiocb->ki_refcnt, 2);
1701 +-
1702 + mask = vfs_poll(req->file, &apt.pt) & req->events;
1703 +- if (unlikely(!req->head)) {
1704 +- /* we did not manage to set up a waitqueue, done */
1705 +- goto out;
1706 +- }
1707 +-
1708 + spin_lock_irq(&ctx->ctx_lock);
1709 +- spin_lock(&req->head->lock);
1710 +- if (req->woken) {
1711 +- /* wake_up context handles the rest */
1712 +- mask = 0;
1713 ++ if (likely(req->head)) {
1714 ++ spin_lock(&req->head->lock);
1715 ++ if (unlikely(list_empty(&req->wait.entry))) {
1716 ++ if (apt.error)
1717 ++ cancel = true;
1718 ++ apt.error = 0;
1719 ++ mask = 0;
1720 ++ }
1721 ++ if (mask || apt.error) {
1722 ++ list_del_init(&req->wait.entry);
1723 ++ } else if (cancel) {
1724 ++ WRITE_ONCE(req->cancelled, true);
1725 ++ } else if (!req->done) { /* actually waiting for an event */
1726 ++ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1727 ++ aiocb->ki_cancel = aio_poll_cancel;
1728 ++ }
1729 ++ spin_unlock(&req->head->lock);
1730 ++ }
1731 ++ if (mask) { /* no async, we'd stolen it */
1732 ++ aiocb->ki_res.res = mangle_poll(mask);
1733 + apt.error = 0;
1734 +- } else if (mask || apt.error) {
1735 +- /* if we get an error or a mask we are done */
1736 +- WARN_ON_ONCE(list_empty(&req->wait.entry));
1737 +- list_del_init(&req->wait.entry);
1738 +- } else {
1739 +- /* actually waiting for an event */
1740 +- list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1741 +- aiocb->ki_cancel = aio_poll_cancel;
1742 + }
1743 +- spin_unlock(&req->head->lock);
1744 + spin_unlock_irq(&ctx->ctx_lock);
1745 +-
1746 +-out:
1747 +- if (unlikely(apt.error))
1748 +- return apt.error;
1749 +-
1750 + if (mask)
1751 +- aio_poll_complete(aiocb, mask);
1752 +- iocb_put(aiocb);
1753 +- return 0;
1754 ++ iocb_put(aiocb);
1755 ++ return apt.error;
1756 + }
1757 +
1758 + static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1759 +@@ -1842,8 +1827,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1760 + goto out_put_req;
1761 + }
1762 +
1763 +- req->ki_user_iocb = user_iocb;
1764 +- req->ki_user_data = iocb->aio_data;
1765 ++ req->ki_res.obj = (u64)(unsigned long)user_iocb;
1766 ++ req->ki_res.data = iocb->aio_data;
1767 ++ req->ki_res.res = 0;
1768 ++ req->ki_res.res2 = 0;
1769 +
1770 + switch (iocb->aio_lio_opcode) {
1771 + case IOCB_CMD_PREAD:
1772 +@@ -1873,18 +1860,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1773 + break;
1774 + }
1775 +
1776 ++ /* Done with the synchronous reference */
1777 ++ iocb_put(req);
1778 ++
1779 + /*
1780 + * If ret is 0, we'd either done aio_complete() ourselves or have
1781 + * arranged for that to be done asynchronously. Anything non-zero
1782 + * means that we need to destroy req ourselves.
1783 + */
1784 +- if (ret)
1785 +- goto out_put_req;
1786 +- return 0;
1787 ++ if (!ret)
1788 ++ return 0;
1789 ++
1790 + out_put_req:
1791 + if (req->ki_eventfd)
1792 + eventfd_ctx_put(req->ki_eventfd);
1793 +- iocb_put(req);
1794 ++ iocb_destroy(req);
1795 + out_put_reqs_available:
1796 + put_reqs_available(ctx, 1);
1797 + return ret;
1798 +@@ -1997,24 +1987,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
1799 + }
1800 + #endif
1801 +
1802 +-/* lookup_kiocb
1803 +- * Finds a given iocb for cancellation.
1804 +- */
1805 +-static struct aio_kiocb *
1806 +-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
1807 +-{
1808 +- struct aio_kiocb *kiocb;
1809 +-
1810 +- assert_spin_locked(&ctx->ctx_lock);
1811 +-
1812 +- /* TODO: use a hash or array, this sucks. */
1813 +- list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
1814 +- if (kiocb->ki_user_iocb == iocb)
1815 +- return kiocb;
1816 +- }
1817 +- return NULL;
1818 +-}
1819 +-
1820 + /* sys_io_cancel:
1821 + * Attempts to cancel an iocb previously passed to io_submit. If
1822 + * the operation is successfully cancelled, the resulting event is
1823 +@@ -2032,6 +2004,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1824 + struct aio_kiocb *kiocb;
1825 + int ret = -EINVAL;
1826 + u32 key;
1827 ++ u64 obj = (u64)(unsigned long)iocb;
1828 +
1829 + if (unlikely(get_user(key, &iocb->aio_key)))
1830 + return -EFAULT;
1831 +@@ -2043,10 +2016,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1832 + return -EINVAL;
1833 +
1834 + spin_lock_irq(&ctx->ctx_lock);
1835 +- kiocb = lookup_kiocb(ctx, iocb);
1836 +- if (kiocb) {
1837 +- ret = kiocb->ki_cancel(&kiocb->rw);
1838 +- list_del_init(&kiocb->ki_list);
1839 ++ /* TODO: use a hash or array, this sucks. */
1840 ++ list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
1841 ++ if (kiocb->ki_res.obj == obj) {
1842 ++ ret = kiocb->ki_cancel(&kiocb->rw);
1843 ++ list_del_init(&kiocb->ki_list);
1844 ++ break;
1845 ++ }
1846 + }
1847 + spin_unlock_irq(&ctx->ctx_lock);
1848 +
1849 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
1850 +index 82928cea0209..7f3f64ba464f 100644
1851 +--- a/fs/ceph/dir.c
1852 ++++ b/fs/ceph/dir.c
1853 +@@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
1854 + unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1855 + {
1856 + struct ceph_inode_info *dci = ceph_inode(dir);
1857 ++ unsigned hash;
1858 +
1859 + switch (dci->i_dir_layout.dl_dir_hash) {
1860 + case 0: /* for backward compat */
1861 +@@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1862 + return dn->d_name.hash;
1863 +
1864 + default:
1865 +- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1866 ++ spin_lock(&dn->d_lock);
1867 ++ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1868 + dn->d_name.name, dn->d_name.len);
1869 ++ spin_unlock(&dn->d_lock);
1870 ++ return hash;
1871 + }
1872 + }
1873 +
1874 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1875 +index 163fc74bf221..5cec784e30f6 100644
1876 +--- a/fs/ceph/mds_client.c
1877 ++++ b/fs/ceph/mds_client.c
1878 +@@ -1286,6 +1286,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1879 + list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1880 + ci->i_prealloc_cap_flush = NULL;
1881 + }
1882 ++
1883 ++ if (drop &&
1884 ++ ci->i_wrbuffer_ref_head == 0 &&
1885 ++ ci->i_wr_ref == 0 &&
1886 ++ ci->i_dirty_caps == 0 &&
1887 ++ ci->i_flushing_caps == 0) {
1888 ++ ceph_put_snap_context(ci->i_head_snapc);
1889 ++ ci->i_head_snapc = NULL;
1890 ++ }
1891 + }
1892 + spin_unlock(&ci->i_ceph_lock);
1893 + while (!list_empty(&to_remove)) {
1894 +@@ -1958,10 +1967,39 @@ retry:
1895 + return path;
1896 + }
1897 +
1898 ++/* Duplicate the dentry->d_name.name safely */
1899 ++static int clone_dentry_name(struct dentry *dentry, const char **ppath,
1900 ++ int *ppathlen)
1901 ++{
1902 ++ u32 len;
1903 ++ char *name;
1904 ++
1905 ++retry:
1906 ++ len = READ_ONCE(dentry->d_name.len);
1907 ++ name = kmalloc(len + 1, GFP_NOFS);
1908 ++ if (!name)
1909 ++ return -ENOMEM;
1910 ++
1911 ++ spin_lock(&dentry->d_lock);
1912 ++ if (dentry->d_name.len != len) {
1913 ++ spin_unlock(&dentry->d_lock);
1914 ++ kfree(name);
1915 ++ goto retry;
1916 ++ }
1917 ++ memcpy(name, dentry->d_name.name, len);
1918 ++ spin_unlock(&dentry->d_lock);
1919 ++
1920 ++ name[len] = '\0';
1921 ++ *ppath = name;
1922 ++ *ppathlen = len;
1923 ++ return 0;
1924 ++}
1925 ++
1926 + static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1927 + const char **ppath, int *ppathlen, u64 *pino,
1928 +- int *pfreepath)
1929 ++ bool *pfreepath, bool parent_locked)
1930 + {
1931 ++ int ret;
1932 + char *path;
1933 +
1934 + rcu_read_lock();
1935 +@@ -1970,8 +2008,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1936 + if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
1937 + *pino = ceph_ino(dir);
1938 + rcu_read_unlock();
1939 +- *ppath = dentry->d_name.name;
1940 +- *ppathlen = dentry->d_name.len;
1941 ++ if (parent_locked) {
1942 ++ *ppath = dentry->d_name.name;
1943 ++ *ppathlen = dentry->d_name.len;
1944 ++ } else {
1945 ++ ret = clone_dentry_name(dentry, ppath, ppathlen);
1946 ++ if (ret)
1947 ++ return ret;
1948 ++ *pfreepath = true;
1949 ++ }
1950 + return 0;
1951 + }
1952 + rcu_read_unlock();
1953 +@@ -1979,13 +2024,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1954 + if (IS_ERR(path))
1955 + return PTR_ERR(path);
1956 + *ppath = path;
1957 +- *pfreepath = 1;
1958 ++ *pfreepath = true;
1959 + return 0;
1960 + }
1961 +
1962 + static int build_inode_path(struct inode *inode,
1963 + const char **ppath, int *ppathlen, u64 *pino,
1964 +- int *pfreepath)
1965 ++ bool *pfreepath)
1966 + {
1967 + struct dentry *dentry;
1968 + char *path;
1969 +@@ -2001,7 +2046,7 @@ static int build_inode_path(struct inode *inode,
1970 + if (IS_ERR(path))
1971 + return PTR_ERR(path);
1972 + *ppath = path;
1973 +- *pfreepath = 1;
1974 ++ *pfreepath = true;
1975 + return 0;
1976 + }
1977 +
1978 +@@ -2012,7 +2057,7 @@ static int build_inode_path(struct inode *inode,
1979 + static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1980 + struct inode *rdiri, const char *rpath,
1981 + u64 rino, const char **ppath, int *pathlen,
1982 +- u64 *ino, int *freepath)
1983 ++ u64 *ino, bool *freepath, bool parent_locked)
1984 + {
1985 + int r = 0;
1986 +
1987 +@@ -2022,7 +2067,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1988 + ceph_snap(rinode));
1989 + } else if (rdentry) {
1990 + r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
1991 +- freepath);
1992 ++ freepath, parent_locked);
1993 + dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1994 + *ppath);
1995 + } else if (rpath || rino) {
1996 +@@ -2048,7 +2093,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1997 + const char *path2 = NULL;
1998 + u64 ino1 = 0, ino2 = 0;
1999 + int pathlen1 = 0, pathlen2 = 0;
2000 +- int freepath1 = 0, freepath2 = 0;
2001 ++ bool freepath1 = false, freepath2 = false;
2002 + int len;
2003 + u16 releases;
2004 + void *p, *end;
2005 +@@ -2056,16 +2101,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2006 +
2007 + ret = set_request_path_attr(req->r_inode, req->r_dentry,
2008 + req->r_parent, req->r_path1, req->r_ino1.ino,
2009 +- &path1, &pathlen1, &ino1, &freepath1);
2010 ++ &path1, &pathlen1, &ino1, &freepath1,
2011 ++ test_bit(CEPH_MDS_R_PARENT_LOCKED,
2012 ++ &req->r_req_flags));
2013 + if (ret < 0) {
2014 + msg = ERR_PTR(ret);
2015 + goto out;
2016 + }
2017 +
2018 ++ /* If r_old_dentry is set, then assume that its parent is locked */
2019 + ret = set_request_path_attr(NULL, req->r_old_dentry,
2020 + req->r_old_dentry_dir,
2021 + req->r_path2, req->r_ino2.ino,
2022 +- &path2, &pathlen2, &ino2, &freepath2);
2023 ++ &path2, &pathlen2, &ino2, &freepath2, true);
2024 + if (ret < 0) {
2025 + msg = ERR_PTR(ret);
2026 + goto out_free1;
2027 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
2028 +index f74193da0e09..1f46b02f7314 100644
2029 +--- a/fs/ceph/snap.c
2030 ++++ b/fs/ceph/snap.c
2031 +@@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
2032 + old_snapc = NULL;
2033 +
2034 + update_snapc:
2035 +- if (ci->i_head_snapc) {
2036 ++ if (ci->i_wrbuffer_ref_head == 0 &&
2037 ++ ci->i_wr_ref == 0 &&
2038 ++ ci->i_dirty_caps == 0 &&
2039 ++ ci->i_flushing_caps == 0) {
2040 ++ ci->i_head_snapc = NULL;
2041 ++ } else {
2042 + ci->i_head_snapc = ceph_get_snap_context(new_snapc);
2043 + dout(" new snapc is %p\n", new_snapc);
2044 + }
2045 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2046 +index 7c05353b766c..7c3f9d00586e 100644
2047 +--- a/fs/cifs/file.c
2048 ++++ b/fs/cifs/file.c
2049 +@@ -2796,7 +2796,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2050 + struct cifs_tcon *tcon;
2051 + struct cifs_sb_info *cifs_sb;
2052 + struct dentry *dentry = ctx->cfile->dentry;
2053 +- unsigned int i;
2054 + int rc;
2055 +
2056 + tcon = tlink_tcon(ctx->cfile->tlink);
2057 +@@ -2860,10 +2859,6 @@ restart_loop:
2058 + kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2059 + }
2060 +
2061 +- if (!ctx->direct_io)
2062 +- for (i = 0; i < ctx->npages; i++)
2063 +- put_page(ctx->bv[i].bv_page);
2064 +-
2065 + cifs_stats_bytes_written(tcon, ctx->total_len);
2066 + set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2067 +
2068 +@@ -3472,7 +3467,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
2069 + struct iov_iter *to = &ctx->iter;
2070 + struct cifs_sb_info *cifs_sb;
2071 + struct cifs_tcon *tcon;
2072 +- unsigned int i;
2073 + int rc;
2074 +
2075 + tcon = tlink_tcon(ctx->cfile->tlink);
2076 +@@ -3556,15 +3550,8 @@ again:
2077 + kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2078 + }
2079 +
2080 +- if (!ctx->direct_io) {
2081 +- for (i = 0; i < ctx->npages; i++) {
2082 +- if (ctx->should_dirty)
2083 +- set_page_dirty(ctx->bv[i].bv_page);
2084 +- put_page(ctx->bv[i].bv_page);
2085 +- }
2086 +-
2087 ++ if (!ctx->direct_io)
2088 + ctx->total_len = ctx->len - iov_iter_count(to);
2089 +- }
2090 +
2091 + cifs_stats_bytes_read(tcon, ctx->total_len);
2092 +
2093 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2094 +index 53fdb5df0d2e..538fd7d807e4 100644
2095 +--- a/fs/cifs/inode.c
2096 ++++ b/fs/cifs/inode.c
2097 +@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
2098 + if (rc == 0 || rc != -EBUSY)
2099 + goto do_rename_exit;
2100 +
2101 ++ /* Don't fall back to using SMB on SMB 2+ mount */
2102 ++ if (server->vals->protocol_id != 0)
2103 ++ goto do_rename_exit;
2104 ++
2105 + /* open-file renames don't work across directories */
2106 + if (to_dentry->d_parent != from_dentry->d_parent)
2107 + goto do_rename_exit;
2108 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2109 +index 1e1626a2cfc3..0dc6f08020ac 100644
2110 +--- a/fs/cifs/misc.c
2111 ++++ b/fs/cifs/misc.c
2112 +@@ -789,6 +789,11 @@ cifs_aio_ctx_alloc(void)
2113 + {
2114 + struct cifs_aio_ctx *ctx;
2115 +
2116 ++ /*
2117 ++ * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
2118 ++ * to false so that we know when we have to unreference pages within
2119 ++ * cifs_aio_ctx_release()
2120 ++ */
2121 + ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
2122 + if (!ctx)
2123 + return NULL;
2124 +@@ -807,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
2125 + struct cifs_aio_ctx, refcount);
2126 +
2127 + cifsFileInfo_put(ctx->cfile);
2128 +- kvfree(ctx->bv);
2129 ++
2130 ++ /*
2131 ++ * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
2132 ++ * which means that iov_iter_get_pages() was a success and thus that
2133 ++ * we have taken reference on pages.
2134 ++ */
2135 ++ if (ctx->bv) {
2136 ++ unsigned i;
2137 ++
2138 ++ for (i = 0; i < ctx->npages; i++) {
2139 ++ if (ctx->should_dirty)
2140 ++ set_page_dirty(ctx->bv[i].bv_page);
2141 ++ put_page(ctx->bv[i].bv_page);
2142 ++ }
2143 ++ kvfree(ctx->bv);
2144 ++ }
2145 ++
2146 + kfree(ctx);
2147 + }
2148 +
2149 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2150 +index 938e75cc3b66..85a3c051e622 100644
2151 +--- a/fs/cifs/smb2pdu.c
2152 ++++ b/fs/cifs/smb2pdu.c
2153 +@@ -3402,6 +3402,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2154 + rc);
2155 + }
2156 + free_rsp_buf(resp_buftype, rsp_iov.iov_base);
2157 ++ cifs_small_buf_release(req);
2158 + return rc == -ENODATA ? 0 : rc;
2159 + } else
2160 + trace_smb3_read_done(xid, req->PersistentFileId,
2161 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2162 +index 86ed9c686249..dc82e7757f67 100644
2163 +--- a/fs/ext4/xattr.c
2164 ++++ b/fs/ext4/xattr.c
2165 +@@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
2166 + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2167 + if (IS_ERR(bh)) {
2168 + ret = PTR_ERR(bh);
2169 ++ bh = NULL;
2170 + goto out;
2171 + }
2172 +
2173 +@@ -2903,6 +2904,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
2174 + if (error == -EIO)
2175 + EXT4_ERROR_INODE(inode, "block %llu read error",
2176 + EXT4_I(inode)->i_file_acl);
2177 ++ bh = NULL;
2178 + goto cleanup;
2179 + }
2180 + error = ext4_xattr_check_block(inode, bh);
2181 +@@ -3059,6 +3061,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
2182 + if (IS_ERR(bh)) {
2183 + if (PTR_ERR(bh) == -ENOMEM)
2184 + return NULL;
2185 ++ bh = NULL;
2186 + EXT4_ERROR_INODE(inode, "block %lu read error",
2187 + (unsigned long)ce->e_value);
2188 + } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
2189 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2190 +index 0570391eaa16..15c025c1a305 100644
2191 +--- a/fs/nfs/super.c
2192 ++++ b/fs/nfs/super.c
2193 +@@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
2194 + memcpy(sap, &data->addr, sizeof(data->addr));
2195 + args->nfs_server.addrlen = sizeof(data->addr);
2196 + args->nfs_server.port = ntohs(data->addr.sin_port);
2197 +- if (!nfs_verify_server_address(sap))
2198 ++ if (sap->sa_family != AF_INET ||
2199 ++ !nfs_verify_server_address(sap))
2200 + goto out_no_address;
2201 +
2202 + if (!(data->flags & NFS_MOUNT_TCP))
2203 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
2204 +index c74e4538d0eb..258f741d6a21 100644
2205 +--- a/fs/nfsd/nfs4callback.c
2206 ++++ b/fs/nfsd/nfs4callback.c
2207 +@@ -1023,8 +1023,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
2208 + cb->cb_seq_status = 1;
2209 + cb->cb_status = 0;
2210 + if (minorversion) {
2211 +- if (!nfsd41_cb_get_slot(clp, task))
2212 ++ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
2213 + return;
2214 ++ cb->cb_holds_slot = true;
2215 + }
2216 + rpc_call_start(task);
2217 + }
2218 +@@ -1051,6 +1052,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
2219 + return true;
2220 + }
2221 +
2222 ++ if (!cb->cb_holds_slot)
2223 ++ goto need_restart;
2224 ++
2225 + switch (cb->cb_seq_status) {
2226 + case 0:
2227 + /*
2228 +@@ -1089,6 +1093,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
2229 + cb->cb_seq_status);
2230 + }
2231 +
2232 ++ cb->cb_holds_slot = false;
2233 + clear_bit(0, &clp->cl_cb_slot_busy);
2234 + rpc_wake_up_next(&clp->cl_cb_waitq);
2235 + dprintk("%s: freed slot, new seqid=%d\n", __func__,
2236 +@@ -1296,6 +1301,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
2237 + cb->cb_seq_status = 1;
2238 + cb->cb_status = 0;
2239 + cb->cb_need_restart = false;
2240 ++ cb->cb_holds_slot = false;
2241 + }
2242 +
2243 + void nfsd4_run_cb(struct nfsd4_callback *cb)
2244 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2245 +index 6a45fb00c5fc..f056b1d3fecd 100644
2246 +--- a/fs/nfsd/nfs4state.c
2247 ++++ b/fs/nfsd/nfs4state.c
2248 +@@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
2249 + static void
2250 + free_blocked_lock(struct nfsd4_blocked_lock *nbl)
2251 + {
2252 ++ locks_delete_block(&nbl->nbl_lock);
2253 + locks_release_private(&nbl->nbl_lock);
2254 + kfree(nbl);
2255 + }
2256 +@@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
2257 + nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
2258 + nbl_lru);
2259 + list_del_init(&nbl->nbl_lru);
2260 +- locks_delete_block(&nbl->nbl_lock);
2261 + free_blocked_lock(nbl);
2262 + }
2263 + }
2264 +
2265 ++static void
2266 ++nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
2267 ++{
2268 ++ struct nfsd4_blocked_lock *nbl = container_of(cb,
2269 ++ struct nfsd4_blocked_lock, nbl_cb);
2270 ++ locks_delete_block(&nbl->nbl_lock);
2271 ++}
2272 ++
2273 + static int
2274 + nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
2275 + {
2276 +@@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
2277 + }
2278 +
2279 + static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
2280 ++ .prepare = nfsd4_cb_notify_lock_prepare,
2281 + .done = nfsd4_cb_notify_lock_done,
2282 + .release = nfsd4_cb_notify_lock_release,
2283 + };
2284 +@@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn)
2285 + nbl = list_first_entry(&reaplist,
2286 + struct nfsd4_blocked_lock, nbl_lru);
2287 + list_del_init(&nbl->nbl_lru);
2288 +- locks_delete_block(&nbl->nbl_lock);
2289 + free_blocked_lock(nbl);
2290 + }
2291 + out:
2292 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
2293 +index 396c76755b03..9d6cb246c6c5 100644
2294 +--- a/fs/nfsd/state.h
2295 ++++ b/fs/nfsd/state.h
2296 +@@ -70,6 +70,7 @@ struct nfsd4_callback {
2297 + int cb_seq_status;
2298 + int cb_status;
2299 + bool cb_need_restart;
2300 ++ bool cb_holds_slot;
2301 + };
2302 +
2303 + struct nfsd4_callback_ops {
2304 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
2305 +index d65390727541..7325baa8f9d4 100644
2306 +--- a/fs/proc/proc_sysctl.c
2307 ++++ b/fs/proc/proc_sysctl.c
2308 +@@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
2309 + if (--header->nreg)
2310 + return;
2311 +
2312 +- if (parent)
2313 ++ if (parent) {
2314 + put_links(header);
2315 +- start_unregistering(header);
2316 ++ start_unregistering(header);
2317 ++ }
2318 ++
2319 + if (!--header->count)
2320 + kfree_rcu(header, rcu);
2321 +
2322 +diff --git a/fs/splice.c b/fs/splice.c
2323 +index 90c29675d573..7da7d5437472 100644
2324 +--- a/fs/splice.c
2325 ++++ b/fs/splice.c
2326 +@@ -333,8 +333,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
2327 + .get = generic_pipe_buf_get,
2328 + };
2329 +
2330 +-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
2331 +- struct pipe_buffer *buf)
2332 ++int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
2333 ++ struct pipe_buffer *buf)
2334 + {
2335 + return 1;
2336 + }
2337 +diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
2338 +index 1021106438b2..c80e5833b1d6 100644
2339 +--- a/include/drm/ttm/ttm_bo_driver.h
2340 ++++ b/include/drm/ttm/ttm_bo_driver.h
2341 +@@ -411,7 +411,6 @@ extern struct ttm_bo_global {
2342 + /**
2343 + * Protected by ttm_global_mutex.
2344 + */
2345 +- unsigned int use_count;
2346 + struct list_head device_list;
2347 +
2348 + /**
2349 +diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
2350 +index 2c0af7b00715..c94ab8b53a23 100644
2351 +--- a/include/linux/etherdevice.h
2352 ++++ b/include/linux/etherdevice.h
2353 +@@ -447,6 +447,18 @@ static inline void eth_addr_dec(u8 *addr)
2354 + u64_to_ether_addr(u, addr);
2355 + }
2356 +
2357 ++/**
2358 ++ * eth_addr_inc() - Increment the given MAC address.
2359 ++ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
2360 ++ */
2361 ++static inline void eth_addr_inc(u8 *addr)
2362 ++{
2363 ++ u64 u = ether_addr_to_u64(addr);
2364 ++
2365 ++ u++;
2366 ++ u64_to_ether_addr(u, addr);
2367 ++}
2368 ++
2369 + /**
2370 + * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
2371 + * @dev: Pointer to a device structure
2372 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
2373 +index 3ecd7ea212ae..66ee63cd5968 100644
2374 +--- a/include/linux/pipe_fs_i.h
2375 ++++ b/include/linux/pipe_fs_i.h
2376 +@@ -181,6 +181,7 @@ void free_pipe_info(struct pipe_inode_info *);
2377 + void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
2378 + int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
2379 + int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
2380 ++int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
2381 + void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
2382 + void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
2383 +
2384 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
2385 +index 0612439909dc..9e0b9ecb43db 100644
2386 +--- a/include/net/netfilter/nf_tables.h
2387 ++++ b/include/net/netfilter/nf_tables.h
2388 +@@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
2389 + * @dtype: data type (verdict or numeric type defined by userspace)
2390 + * @objtype: object type (see NFT_OBJECT_* definitions)
2391 + * @size: maximum set size
2392 ++ * @use: number of rules references to this set
2393 + * @nelems: number of elements
2394 + * @ndeact: number of deactivated elements queued for removal
2395 + * @timeout: default timeout value in jiffies
2396 +@@ -407,6 +408,7 @@ struct nft_set {
2397 + u32 dtype;
2398 + u32 objtype;
2399 + u32 size;
2400 ++ u32 use;
2401 + atomic_t nelems;
2402 + u32 ndeact;
2403 + u64 timeout;
2404 +@@ -467,6 +469,10 @@ struct nft_set_binding {
2405 + u32 flags;
2406 + };
2407 +
2408 ++enum nft_trans_phase;
2409 ++void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
2410 ++ struct nft_set_binding *binding,
2411 ++ enum nft_trans_phase phase);
2412 + int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2413 + struct nft_set_binding *binding);
2414 + void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2415 +diff --git a/include/net/netrom.h b/include/net/netrom.h
2416 +index 5a0714ff500f..80f15b1c1a48 100644
2417 +--- a/include/net/netrom.h
2418 ++++ b/include/net/netrom.h
2419 +@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
2420 + int nr_t1timer_running(struct sock *);
2421 +
2422 + /* sysctl_net_netrom.c */
2423 +-void nr_register_sysctl(void);
2424 ++int nr_register_sysctl(void);
2425 + void nr_unregister_sysctl(void);
2426 +
2427 + #endif
2428 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
2429 +index fb8b7b5d745d..451b1f9e80a6 100644
2430 +--- a/kernel/sched/deadline.c
2431 ++++ b/kernel/sched/deadline.c
2432 +@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
2433 + if (dl_entity_is_special(dl_se))
2434 + return;
2435 +
2436 +- WARN_ON(hrtimer_active(&dl_se->inactive_timer));
2437 + WARN_ON(dl_se->dl_non_contending);
2438 +
2439 + zerolag_time = dl_se->deadline -
2440 +@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
2441 + * If the "0-lag time" already passed, decrease the active
2442 + * utilization now, instead of starting a timer
2443 + */
2444 +- if (zerolag_time < 0) {
2445 ++ if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
2446 + if (dl_task(p))
2447 + sub_running_bw(dl_se, dl_rq);
2448 + if (!dl_task(p) || p->state == TASK_DEAD) {
2449 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2450 +index eeb605656d59..be55a64748ba 100644
2451 +--- a/kernel/sched/fair.c
2452 ++++ b/kernel/sched/fair.c
2453 +@@ -1994,6 +1994,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2454 + if (p->last_task_numa_placement) {
2455 + delta = runtime - p->last_sum_exec_runtime;
2456 + *period = now - p->last_task_numa_placement;
2457 ++
2458 ++ /* Avoid time going backwards, prevent potential divide error: */
2459 ++ if (unlikely((s64)*period < 0))
2460 ++ *period = 0;
2461 + } else {
2462 + delta = p->se.avg.load_sum;
2463 + *period = LOAD_AVG_MAX;
2464 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2465 +index b49affb4666b..4463ae28bf1a 100644
2466 +--- a/kernel/trace/ring_buffer.c
2467 ++++ b/kernel/trace/ring_buffer.c
2468 +@@ -776,7 +776,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
2469 +
2470 + preempt_disable_notrace();
2471 + time = rb_time_stamp(buffer);
2472 +- preempt_enable_no_resched_notrace();
2473 ++ preempt_enable_notrace();
2474 +
2475 + return time;
2476 + }
2477 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2478 +index 89158aa93fa6..d07fc2836786 100644
2479 +--- a/kernel/trace/trace.c
2480 ++++ b/kernel/trace/trace.c
2481 +@@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
2482 + * not modified.
2483 + */
2484 + pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
2485 +- if (!pid_list)
2486 ++ if (!pid_list) {
2487 ++ trace_parser_put(&parser);
2488 + return -ENOMEM;
2489 ++ }
2490 +
2491 + pid_list->pid_max = READ_ONCE(pid_max);
2492 +
2493 +@@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
2494 +
2495 + pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
2496 + if (!pid_list->pids) {
2497 ++ trace_parser_put(&parser);
2498 + kfree(pid_list);
2499 + return -ENOMEM;
2500 + }
2501 +@@ -6820,19 +6823,23 @@ struct buffer_ref {
2502 + struct ring_buffer *buffer;
2503 + void *page;
2504 + int cpu;
2505 +- int ref;
2506 ++ refcount_t refcount;
2507 + };
2508 +
2509 ++static void buffer_ref_release(struct buffer_ref *ref)
2510 ++{
2511 ++ if (!refcount_dec_and_test(&ref->refcount))
2512 ++ return;
2513 ++ ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2514 ++ kfree(ref);
2515 ++}
2516 ++
2517 + static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
2518 + struct pipe_buffer *buf)
2519 + {
2520 + struct buffer_ref *ref = (struct buffer_ref *)buf->private;
2521 +
2522 +- if (--ref->ref)
2523 +- return;
2524 +-
2525 +- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2526 +- kfree(ref);
2527 ++ buffer_ref_release(ref);
2528 + buf->private = 0;
2529 + }
2530 +
2531 +@@ -6841,7 +6848,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
2532 + {
2533 + struct buffer_ref *ref = (struct buffer_ref *)buf->private;
2534 +
2535 +- ref->ref++;
2536 ++ refcount_inc(&ref->refcount);
2537 + }
2538 +
2539 + /* Pipe buffer operations for a buffer. */
2540 +@@ -6849,7 +6856,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2541 + .can_merge = 0,
2542 + .confirm = generic_pipe_buf_confirm,
2543 + .release = buffer_pipe_buf_release,
2544 +- .steal = generic_pipe_buf_steal,
2545 ++ .steal = generic_pipe_buf_nosteal,
2546 + .get = buffer_pipe_buf_get,
2547 + };
2548 +
2549 +@@ -6862,11 +6869,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2550 + struct buffer_ref *ref =
2551 + (struct buffer_ref *)spd->partial[i].private;
2552 +
2553 +- if (--ref->ref)
2554 +- return;
2555 +-
2556 +- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2557 +- kfree(ref);
2558 ++ buffer_ref_release(ref);
2559 + spd->partial[i].private = 0;
2560 + }
2561 +
2562 +@@ -6921,7 +6924,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2563 + break;
2564 + }
2565 +
2566 +- ref->ref = 1;
2567 ++ refcount_set(&ref->refcount, 1);
2568 + ref->buffer = iter->trace_buffer->buffer;
2569 + ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2570 + if (IS_ERR(ref->page)) {
2571 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2572 +index fc5d23d752a5..e94d2b6bee7f 100644
2573 +--- a/kernel/workqueue.c
2574 ++++ b/kernel/workqueue.c
2575 +@@ -2931,6 +2931,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
2576 + if (WARN_ON(!wq_online))
2577 + return false;
2578 +
2579 ++ if (WARN_ON(!work->func))
2580 ++ return false;
2581 ++
2582 + if (!from_cancel) {
2583 + lock_map_acquire(&work->lockdep_map);
2584 + lock_map_release(&work->lockdep_map);
2585 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2586 +index d4df5b24d75e..350d5328014f 100644
2587 +--- a/lib/Kconfig.debug
2588 ++++ b/lib/Kconfig.debug
2589 +@@ -1952,6 +1952,7 @@ config TEST_KMOD
2590 + depends on m
2591 + depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
2592 + depends on NETDEVICES && NET_CORE && INET # for TUN
2593 ++ depends on BLOCK
2594 + select TEST_LKM
2595 + select XFS_FS
2596 + select TUN
2597 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2598 +index 20dd3283bb1b..318ef6ccdb3b 100644
2599 +--- a/mm/page_alloc.c
2600 ++++ b/mm/page_alloc.c
2601 +@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
2602 +
2603 + int min_free_kbytes = 1024;
2604 + int user_min_free_kbytes = -1;
2605 ++#ifdef CONFIG_DISCONTIGMEM
2606 ++/*
2607 ++ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
2608 ++ * are not on separate NUMA nodes. Functionally this works but with
2609 ++ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
2610 ++ * quite small. By default, do not boost watermarks on discontigmem as in
2611 ++ * many cases very high-order allocations like THP are likely to be
2612 ++ * unsupported and the premature reclaim offsets the advantage of long-term
2613 ++ * fragmentation avoidance.
2614 ++ */
2615 ++int watermark_boost_factor __read_mostly;
2616 ++#else
2617 + int watermark_boost_factor __read_mostly = 15000;
2618 ++#endif
2619 + int watermark_scale_factor = 10;
2620 +
2621 + static unsigned long nr_kernel_pages __initdata;
2622 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2623 +index f77888ec93f1..0bb4d712b80c 100644
2624 +--- a/net/bridge/netfilter/ebtables.c
2625 ++++ b/net/bridge/netfilter/ebtables.c
2626 +@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2627 + if (match_kern)
2628 + match_kern->match_size = ret;
2629 +
2630 +- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
2631 ++ /* rule should have no remaining data after target */
2632 ++ if (type == EBT_COMPAT_TARGET && size_left)
2633 + return -EINVAL;
2634 +
2635 + match32 = (struct compat_ebt_entry_mwt *) buf;
2636 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2637 +index 25d9bef27d03..3c89ca325947 100644
2638 +--- a/net/ipv4/route.c
2639 ++++ b/net/ipv4/route.c
2640 +@@ -1183,25 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
2641 + return dst;
2642 + }
2643 +
2644 +-static void ipv4_link_failure(struct sk_buff *skb)
2645 ++static void ipv4_send_dest_unreach(struct sk_buff *skb)
2646 + {
2647 + struct ip_options opt;
2648 +- struct rtable *rt;
2649 + int res;
2650 +
2651 + /* Recompile ip options since IPCB may not be valid anymore.
2652 ++ * Also check we have a reasonable ipv4 header.
2653 + */
2654 +- memset(&opt, 0, sizeof(opt));
2655 +- opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
2656 ++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
2657 ++ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
2658 ++ return;
2659 +
2660 +- rcu_read_lock();
2661 +- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
2662 +- rcu_read_unlock();
2663 ++ memset(&opt, 0, sizeof(opt));
2664 ++ if (ip_hdr(skb)->ihl > 5) {
2665 ++ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
2666 ++ return;
2667 ++ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
2668 +
2669 +- if (res)
2670 +- return;
2671 ++ rcu_read_lock();
2672 ++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
2673 ++ rcu_read_unlock();
2674 +
2675 ++ if (res)
2676 ++ return;
2677 ++ }
2678 + __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
2679 ++}
2680 ++
2681 ++static void ipv4_link_failure(struct sk_buff *skb)
2682 ++{
2683 ++ struct rtable *rt;
2684 ++
2685 ++ ipv4_send_dest_unreach(skb);
2686 +
2687 + rt = skb_rtable(skb);
2688 + if (rt)
2689 +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
2690 +index ba0fc4b18465..eeb4041fa5f9 100644
2691 +--- a/net/ipv4/sysctl_net_ipv4.c
2692 ++++ b/net/ipv4/sysctl_net_ipv4.c
2693 +@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
2694 + static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
2695 + static int comp_sack_nr_max = 255;
2696 + static u32 u32_max_div_HZ = UINT_MAX / HZ;
2697 ++static int one_day_secs = 24 * 3600;
2698 +
2699 + /* obsolete */
2700 + static int sysctl_tcp_low_latency __read_mostly;
2701 +@@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
2702 + .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
2703 + .maxlen = sizeof(int),
2704 + .mode = 0644,
2705 +- .proc_handler = proc_dointvec
2706 ++ .proc_handler = proc_dointvec_minmax,
2707 ++ .extra1 = &zero,
2708 ++ .extra2 = &one_day_secs
2709 + },
2710 + {
2711 + .procname = "tcp_autocorking",
2712 +diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
2713 +index dc07fcc7938e..802db01e3075 100644
2714 +--- a/net/ncsi/ncsi-rsp.c
2715 ++++ b/net/ncsi/ncsi-rsp.c
2716 +@@ -11,6 +11,7 @@
2717 + #include <linux/kernel.h>
2718 + #include <linux/init.h>
2719 + #include <linux/netdevice.h>
2720 ++#include <linux/etherdevice.h>
2721 + #include <linux/skbuff.h>
2722 +
2723 + #include <net/ncsi.h>
2724 +@@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
2725 + ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2726 + memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
2727 + /* Increase mac address by 1 for BMC's address */
2728 +- saddr.sa_data[ETH_ALEN - 1]++;
2729 ++ eth_addr_inc((u8 *)saddr.sa_data);
2730 ++ if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
2731 ++ return -ENXIO;
2732 ++
2733 + ret = ops->ndo_set_mac_address(ndev, &saddr);
2734 + if (ret < 0)
2735 + netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
2736 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2737 +index acb124ce92ec..e2aac80f9b7b 100644
2738 +--- a/net/netfilter/nf_tables_api.c
2739 ++++ b/net/netfilter/nf_tables_api.c
2740 +@@ -3624,6 +3624,9 @@ err1:
2741 +
2742 + static void nft_set_destroy(struct nft_set *set)
2743 + {
2744 ++ if (WARN_ON(set->use > 0))
2745 ++ return;
2746 ++
2747 + set->ops->destroy(set);
2748 + module_put(to_set_type(set->ops)->owner);
2749 + kfree(set->name);
2750 +@@ -3664,7 +3667,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
2751 + NL_SET_BAD_ATTR(extack, attr);
2752 + return PTR_ERR(set);
2753 + }
2754 +- if (!list_empty(&set->bindings) ||
2755 ++ if (set->use ||
2756 + (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
2757 + NL_SET_BAD_ATTR(extack, attr);
2758 + return -EBUSY;
2759 +@@ -3694,6 +3697,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2760 + struct nft_set_binding *i;
2761 + struct nft_set_iter iter;
2762 +
2763 ++ if (set->use == UINT_MAX)
2764 ++ return -EOVERFLOW;
2765 ++
2766 + if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
2767 + return -EBUSY;
2768 +
2769 +@@ -3721,6 +3727,7 @@ bind:
2770 + binding->chain = ctx->chain;
2771 + list_add_tail_rcu(&binding->list, &set->bindings);
2772 + nft_set_trans_bind(ctx, set);
2773 ++ set->use++;
2774 +
2775 + return 0;
2776 + }
2777 +@@ -3740,6 +3747,25 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2778 + }
2779 + EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
2780 +
2781 ++void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
2782 ++ struct nft_set_binding *binding,
2783 ++ enum nft_trans_phase phase)
2784 ++{
2785 ++ switch (phase) {
2786 ++ case NFT_TRANS_PREPARE:
2787 ++ set->use--;
2788 ++ return;
2789 ++ case NFT_TRANS_ABORT:
2790 ++ case NFT_TRANS_RELEASE:
2791 ++ set->use--;
2792 ++ /* fall through */
2793 ++ default:
2794 ++ nf_tables_unbind_set(ctx, set, binding,
2795 ++ phase == NFT_TRANS_COMMIT);
2796 ++ }
2797 ++}
2798 ++EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
2799 ++
2800 + void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
2801 + {
2802 + if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
2803 +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
2804 +index f1172f99752b..eb7f9a5f2aeb 100644
2805 +--- a/net/netfilter/nft_dynset.c
2806 ++++ b/net/netfilter/nft_dynset.c
2807 +@@ -241,11 +241,15 @@ static void nft_dynset_deactivate(const struct nft_ctx *ctx,
2808 + {
2809 + struct nft_dynset *priv = nft_expr_priv(expr);
2810 +
2811 +- if (phase == NFT_TRANS_PREPARE)
2812 +- return;
2813 ++ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
2814 ++}
2815 ++
2816 ++static void nft_dynset_activate(const struct nft_ctx *ctx,
2817 ++ const struct nft_expr *expr)
2818 ++{
2819 ++ struct nft_dynset *priv = nft_expr_priv(expr);
2820 +
2821 +- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
2822 +- phase == NFT_TRANS_COMMIT);
2823 ++ priv->set->use++;
2824 + }
2825 +
2826 + static void nft_dynset_destroy(const struct nft_ctx *ctx,
2827 +@@ -293,6 +297,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
2828 + .eval = nft_dynset_eval,
2829 + .init = nft_dynset_init,
2830 + .destroy = nft_dynset_destroy,
2831 ++ .activate = nft_dynset_activate,
2832 + .deactivate = nft_dynset_deactivate,
2833 + .dump = nft_dynset_dump,
2834 + };
2835 +diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
2836 +index 14496da5141d..161c3451a747 100644
2837 +--- a/net/netfilter/nft_lookup.c
2838 ++++ b/net/netfilter/nft_lookup.c
2839 +@@ -127,11 +127,15 @@ static void nft_lookup_deactivate(const struct nft_ctx *ctx,
2840 + {
2841 + struct nft_lookup *priv = nft_expr_priv(expr);
2842 +
2843 +- if (phase == NFT_TRANS_PREPARE)
2844 +- return;
2845 ++ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
2846 ++}
2847 ++
2848 ++static void nft_lookup_activate(const struct nft_ctx *ctx,
2849 ++ const struct nft_expr *expr)
2850 ++{
2851 ++ struct nft_lookup *priv = nft_expr_priv(expr);
2852 +
2853 +- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
2854 +- phase == NFT_TRANS_COMMIT);
2855 ++ priv->set->use++;
2856 + }
2857 +
2858 + static void nft_lookup_destroy(const struct nft_ctx *ctx,
2859 +@@ -222,6 +226,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
2860 + .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
2861 + .eval = nft_lookup_eval,
2862 + .init = nft_lookup_init,
2863 ++ .activate = nft_lookup_activate,
2864 + .deactivate = nft_lookup_deactivate,
2865 + .destroy = nft_lookup_destroy,
2866 + .dump = nft_lookup_dump,
2867 +diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
2868 +index ae178e914486..bf92a40dd1b2 100644
2869 +--- a/net/netfilter/nft_objref.c
2870 ++++ b/net/netfilter/nft_objref.c
2871 +@@ -64,21 +64,34 @@ nla_put_failure:
2872 + return -1;
2873 + }
2874 +
2875 +-static void nft_objref_destroy(const struct nft_ctx *ctx,
2876 +- const struct nft_expr *expr)
2877 ++static void nft_objref_deactivate(const struct nft_ctx *ctx,
2878 ++ const struct nft_expr *expr,
2879 ++ enum nft_trans_phase phase)
2880 + {
2881 + struct nft_object *obj = nft_objref_priv(expr);
2882 +
2883 ++ if (phase == NFT_TRANS_COMMIT)
2884 ++ return;
2885 ++
2886 + obj->use--;
2887 + }
2888 +
2889 ++static void nft_objref_activate(const struct nft_ctx *ctx,
2890 ++ const struct nft_expr *expr)
2891 ++{
2892 ++ struct nft_object *obj = nft_objref_priv(expr);
2893 ++
2894 ++ obj->use++;
2895 ++}
2896 ++
2897 + static struct nft_expr_type nft_objref_type;
2898 + static const struct nft_expr_ops nft_objref_ops = {
2899 + .type = &nft_objref_type,
2900 + .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
2901 + .eval = nft_objref_eval,
2902 + .init = nft_objref_init,
2903 +- .destroy = nft_objref_destroy,
2904 ++ .activate = nft_objref_activate,
2905 ++ .deactivate = nft_objref_deactivate,
2906 + .dump = nft_objref_dump,
2907 + };
2908 +
2909 +@@ -161,11 +174,15 @@ static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
2910 + {
2911 + struct nft_objref_map *priv = nft_expr_priv(expr);
2912 +
2913 +- if (phase == NFT_TRANS_PREPARE)
2914 +- return;
2915 ++ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
2916 ++}
2917 ++
2918 ++static void nft_objref_map_activate(const struct nft_ctx *ctx,
2919 ++ const struct nft_expr *expr)
2920 ++{
2921 ++ struct nft_objref_map *priv = nft_expr_priv(expr);
2922 +
2923 +- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
2924 +- phase == NFT_TRANS_COMMIT);
2925 ++ priv->set->use++;
2926 + }
2927 +
2928 + static void nft_objref_map_destroy(const struct nft_ctx *ctx,
2929 +@@ -182,6 +199,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
2930 + .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
2931 + .eval = nft_objref_map_eval,
2932 + .init = nft_objref_map_init,
2933 ++ .activate = nft_objref_map_activate,
2934 + .deactivate = nft_objref_map_deactivate,
2935 + .destroy = nft_objref_map_destroy,
2936 + .dump = nft_objref_map_dump,
2937 +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
2938 +index 1d3144d19903..71ffd1a6dc7c 100644
2939 +--- a/net/netrom/af_netrom.c
2940 ++++ b/net/netrom/af_netrom.c
2941 +@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
2942 + int i;
2943 + int rc = proto_register(&nr_proto, 0);
2944 +
2945 +- if (rc != 0)
2946 +- goto out;
2947 ++ if (rc)
2948 ++ return rc;
2949 +
2950 + if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
2951 +- printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
2952 +- return -1;
2953 ++ pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
2954 ++ __func__);
2955 ++ rc = -EINVAL;
2956 ++ goto unregister_proto;
2957 + }
2958 +
2959 + dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
2960 +- if (dev_nr == NULL) {
2961 +- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
2962 +- return -1;
2963 ++ if (!dev_nr) {
2964 ++ pr_err("NET/ROM: %s - unable to allocate device array\n",
2965 ++ __func__);
2966 ++ rc = -ENOMEM;
2967 ++ goto unregister_proto;
2968 + }
2969 +
2970 + for (i = 0; i < nr_ndevs; i++) {
2971 +@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
2972 + sprintf(name, "nr%d", i);
2973 + dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
2974 + if (!dev) {
2975 +- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
2976 ++ rc = -ENOMEM;
2977 + goto fail;
2978 + }
2979 +
2980 + dev->base_addr = i;
2981 +- if (register_netdev(dev)) {
2982 +- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
2983 ++ rc = register_netdev(dev);
2984 ++ if (rc) {
2985 + free_netdev(dev);
2986 + goto fail;
2987 + }
2988 +@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
2989 + dev_nr[i] = dev;
2990 + }
2991 +
2992 +- if (sock_register(&nr_family_ops)) {
2993 +- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
2994 ++ rc = sock_register(&nr_family_ops);
2995 ++ if (rc)
2996 + goto fail;
2997 +- }
2998 +
2999 +- register_netdevice_notifier(&nr_dev_notifier);
3000 ++ rc = register_netdevice_notifier(&nr_dev_notifier);
3001 ++ if (rc)
3002 ++ goto out_sock;
3003 +
3004 + ax25_register_pid(&nr_pid);
3005 + ax25_linkfail_register(&nr_linkfail_notifier);
3006 +
3007 + #ifdef CONFIG_SYSCTL
3008 +- nr_register_sysctl();
3009 ++ rc = nr_register_sysctl();
3010 ++ if (rc)
3011 ++ goto out_sysctl;
3012 + #endif
3013 +
3014 + nr_loopback_init();
3015 +
3016 +- proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
3017 +- proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
3018 +- proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
3019 +-out:
3020 +- return rc;
3021 ++ rc = -ENOMEM;
3022 ++ if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
3023 ++ goto proc_remove1;
3024 ++ if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
3025 ++ &nr_neigh_seqops))
3026 ++ goto proc_remove2;
3027 ++ if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
3028 ++ &nr_node_seqops))
3029 ++ goto proc_remove3;
3030 ++
3031 ++ return 0;
3032 ++
3033 ++proc_remove3:
3034 ++ remove_proc_entry("nr_neigh", init_net.proc_net);
3035 ++proc_remove2:
3036 ++ remove_proc_entry("nr", init_net.proc_net);
3037 ++proc_remove1:
3038 ++
3039 ++ nr_loopback_clear();
3040 ++ nr_rt_free();
3041 ++
3042 ++#ifdef CONFIG_SYSCTL
3043 ++ nr_unregister_sysctl();
3044 ++out_sysctl:
3045 ++#endif
3046 ++ ax25_linkfail_release(&nr_linkfail_notifier);
3047 ++ ax25_protocol_release(AX25_P_NETROM);
3048 ++ unregister_netdevice_notifier(&nr_dev_notifier);
3049 ++out_sock:
3050 ++ sock_unregister(PF_NETROM);
3051 + fail:
3052 + while (--i >= 0) {
3053 + unregister_netdev(dev_nr[i]);
3054 + free_netdev(dev_nr[i]);
3055 + }
3056 + kfree(dev_nr);
3057 ++unregister_proto:
3058 + proto_unregister(&nr_proto);
3059 +- rc = -1;
3060 +- goto out;
3061 ++ return rc;
3062 + }
3063 +
3064 + module_init(nr_proto_init);
3065 +diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
3066 +index 215ad22a9647..93d13f019981 100644
3067 +--- a/net/netrom/nr_loopback.c
3068 ++++ b/net/netrom/nr_loopback.c
3069 +@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
3070 + }
3071 + }
3072 +
3073 +-void __exit nr_loopback_clear(void)
3074 ++void nr_loopback_clear(void)
3075 + {
3076 + del_timer_sync(&loopback_timer);
3077 + skb_queue_purge(&loopback_queue);
3078 +diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
3079 +index 6485f593e2f0..b76aa668a94b 100644
3080 +--- a/net/netrom/nr_route.c
3081 ++++ b/net/netrom/nr_route.c
3082 +@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
3083 + /*
3084 + * Free all memory associated with the nodes and routes lists.
3085 + */
3086 +-void __exit nr_rt_free(void)
3087 ++void nr_rt_free(void)
3088 + {
3089 + struct nr_neigh *s = NULL;
3090 + struct nr_node *t = NULL;
3091 +diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
3092 +index ba1c368b3f18..771011b84270 100644
3093 +--- a/net/netrom/sysctl_net_netrom.c
3094 ++++ b/net/netrom/sysctl_net_netrom.c
3095 +@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
3096 + { }
3097 + };
3098 +
3099 +-void __init nr_register_sysctl(void)
3100 ++int __init nr_register_sysctl(void)
3101 + {
3102 + nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
3103 ++ if (!nr_table_header)
3104 ++ return -ENOMEM;
3105 ++ return 0;
3106 + }
3107 +
3108 + void nr_unregister_sysctl(void)
3109 +diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
3110 +index 65387e1e6964..cd7e01ea8144 100644
3111 +--- a/net/rds/af_rds.c
3112 ++++ b/net/rds/af_rds.c
3113 +@@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
3114 + struct rds_sock *rs = rds_sk_to_rs(sk);
3115 + int ret = 0;
3116 +
3117 ++ if (addr_len < offsetofend(struct sockaddr, sa_family))
3118 ++ return -EINVAL;
3119 ++
3120 + lock_sock(sk);
3121 +
3122 + switch (uaddr->sa_family) {
3123 +diff --git a/net/rds/bind.c b/net/rds/bind.c
3124 +index 17c9d9f0c848..0f4398e7f2a7 100644
3125 +--- a/net/rds/bind.c
3126 ++++ b/net/rds/bind.c
3127 +@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3128 + /* We allow an RDS socket to be bound to either IPv4 or IPv6
3129 + * address.
3130 + */
3131 ++ if (addr_len < offsetofend(struct sockaddr, sa_family))
3132 ++ return -EINVAL;
3133 + if (uaddr->sa_family == AF_INET) {
3134 + struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
3135 +
3136 +diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
3137 +index e0f70c4051b6..01e764f8f224 100644
3138 +--- a/net/rds/ib_fmr.c
3139 ++++ b/net/rds/ib_fmr.c
3140 +@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
3141 + else
3142 + pool = rds_ibdev->mr_1m_pool;
3143 +
3144 ++ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
3145 ++ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
3146 ++
3147 ++ /* Switch pools if one of the pool is reaching upper limit */
3148 ++ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
3149 ++ if (pool->pool_type == RDS_IB_MR_8K_POOL)
3150 ++ pool = rds_ibdev->mr_1m_pool;
3151 ++ else
3152 ++ pool = rds_ibdev->mr_8k_pool;
3153 ++ }
3154 ++
3155 + ibmr = rds_ib_try_reuse_ibmr(pool);
3156 + if (ibmr)
3157 + return ibmr;
3158 +diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
3159 +index 63c8d107adcf..d664e9ade74d 100644
3160 +--- a/net/rds/ib_rdma.c
3161 ++++ b/net/rds/ib_rdma.c
3162 +@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
3163 + struct rds_ib_mr *ibmr = NULL;
3164 + int iter = 0;
3165 +
3166 +- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
3167 +- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
3168 +-
3169 + while (1) {
3170 + ibmr = rds_ib_reuse_mr(pool);
3171 + if (ibmr)
3172 +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
3173 +index 7af4f99c4a93..094a6621f8e8 100644
3174 +--- a/net/rose/rose_loopback.c
3175 ++++ b/net/rose/rose_loopback.c
3176 +@@ -16,6 +16,7 @@
3177 + #include <linux/init.h>
3178 +
3179 + static struct sk_buff_head loopback_queue;
3180 ++#define ROSE_LOOPBACK_LIMIT 1000
3181 + static struct timer_list loopback_timer;
3182 +
3183 + static void rose_set_loopback_timer(void);
3184 +@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
3185 +
3186 + int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
3187 + {
3188 +- struct sk_buff *skbn;
3189 ++ struct sk_buff *skbn = NULL;
3190 +
3191 +- skbn = skb_clone(skb, GFP_ATOMIC);
3192 ++ if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
3193 ++ skbn = skb_clone(skb, GFP_ATOMIC);
3194 +
3195 +- kfree_skb(skb);
3196 +-
3197 +- if (skbn != NULL) {
3198 ++ if (skbn) {
3199 ++ consume_skb(skb);
3200 + skb_queue_tail(&loopback_queue, skbn);
3201 +
3202 + if (!rose_loopback_running())
3203 + rose_set_loopback_timer();
3204 ++ } else {
3205 ++ kfree_skb(skb);
3206 + }
3207 +
3208 + return 1;
3209 + }
3210 +
3211 +-
3212 + static void rose_set_loopback_timer(void)
3213 + {
3214 +- del_timer(&loopback_timer);
3215 +-
3216 +- loopback_timer.expires = jiffies + 10;
3217 +- add_timer(&loopback_timer);
3218 ++ mod_timer(&loopback_timer, jiffies + 10);
3219 + }
3220 +
3221 + static void rose_loopback_timer(struct timer_list *unused)
3222 +@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
3223 + struct sock *sk;
3224 + unsigned short frametype;
3225 + unsigned int lci_i, lci_o;
3226 ++ int count;
3227 +
3228 +- while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
3229 ++ for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
3230 ++ skb = skb_dequeue(&loopback_queue);
3231 ++ if (!skb)
3232 ++ return;
3233 + if (skb->len < ROSE_MIN_LEN) {
3234 + kfree_skb(skb);
3235 + continue;
3236 +@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
3237 + kfree_skb(skb);
3238 + }
3239 + }
3240 ++ if (!skb_queue_empty(&loopback_queue))
3241 ++ mod_timer(&loopback_timer, jiffies + 1);
3242 + }
3243 +
3244 + void __exit rose_loopback_clear(void)
3245 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
3246 +index 9128aa0e40aa..b4ffb81223ad 100644
3247 +--- a/net/rxrpc/input.c
3248 ++++ b/net/rxrpc/input.c
3249 +@@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
3250 + * handle data received on the local endpoint
3251 + * - may be called in interrupt context
3252 + *
3253 +- * The socket is locked by the caller and this prevents the socket from being
3254 +- * shut down and the local endpoint from going away, thus sk_user_data will not
3255 +- * be cleared until this function returns.
3256 ++ * [!] Note that as this is called from the encap_rcv hook, the socket is not
3257 ++ * held locked by the caller and nothing prevents sk_user_data on the UDP from
3258 ++ * being cleared in the middle of processing this function.
3259 + *
3260 + * Called with the RCU read lock held from the IP layer via UDP.
3261 + */
3262 + int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
3263 + {
3264 ++ struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
3265 + struct rxrpc_connection *conn;
3266 + struct rxrpc_channel *chan;
3267 + struct rxrpc_call *call = NULL;
3268 + struct rxrpc_skb_priv *sp;
3269 +- struct rxrpc_local *local = udp_sk->sk_user_data;
3270 + struct rxrpc_peer *peer = NULL;
3271 + struct rxrpc_sock *rx = NULL;
3272 + unsigned int channel;
3273 +@@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
3274 +
3275 + _enter("%p", udp_sk);
3276 +
3277 ++ if (unlikely(!local)) {
3278 ++ kfree_skb(skb);
3279 ++ return 0;
3280 ++ }
3281 + if (skb->tstamp == 0)
3282 + skb->tstamp = ktime_get_real();
3283 +
3284 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
3285 +index 0906e51d3cfb..10317dbdab5f 100644
3286 +--- a/net/rxrpc/local_object.c
3287 ++++ b/net/rxrpc/local_object.c
3288 +@@ -304,7 +304,8 @@ nomem:
3289 + ret = -ENOMEM;
3290 + sock_error:
3291 + mutex_unlock(&rxnet->local_mutex);
3292 +- kfree(local);
3293 ++ if (local)
3294 ++ call_rcu(&local->rcu, rxrpc_local_rcu);
3295 + _leave(" = %d", ret);
3296 + return ERR_PTR(ret);
3297 +
3298 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
3299 +index 12bb23b8e0c5..261131dfa1f1 100644
3300 +--- a/net/sunrpc/cache.c
3301 ++++ b/net/sunrpc/cache.c
3302 +@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
3303 + h->last_refresh = now;
3304 + }
3305 +
3306 ++static inline int cache_is_valid(struct cache_head *h);
3307 + static void cache_fresh_locked(struct cache_head *head, time_t expiry,
3308 + struct cache_detail *detail);
3309 + static void cache_fresh_unlocked(struct cache_head *head,
3310 +@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
3311 + if (cache_is_expired(detail, tmp)) {
3312 + hlist_del_init_rcu(&tmp->cache_list);
3313 + detail->entries --;
3314 ++ if (cache_is_valid(tmp) == -EAGAIN)
3315 ++ set_bit(CACHE_NEGATIVE, &tmp->flags);
3316 + cache_fresh_locked(tmp, 0, detail);
3317 + freeme = tmp;
3318 + break;
3319 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
3320 +index 4ad3586da8f0..340a6e7c43a7 100644
3321 +--- a/net/tipc/netlink_compat.c
3322 ++++ b/net/tipc/netlink_compat.c
3323 +@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
3324 + if (msg->rep_type)
3325 + tipc_tlv_init(msg->rep, msg->rep_type);
3326 +
3327 +- if (cmd->header)
3328 +- (*cmd->header)(msg);
3329 ++ if (cmd->header) {
3330 ++ err = (*cmd->header)(msg);
3331 ++ if (err) {
3332 ++ kfree_skb(msg->rep);
3333 ++ msg->rep = NULL;
3334 ++ return err;
3335 ++ }
3336 ++ }
3337 +
3338 + arg = nlmsg_new(0, GFP_KERNEL);
3339 + if (!arg) {
3340 +@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
3341 + if (!bearer)
3342 + return -EMSGSIZE;
3343 +
3344 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
3345 ++ len = TLV_GET_DATA_LEN(msg->req);
3346 ++ len -= offsetof(struct tipc_bearer_config, name);
3347 ++ if (len <= 0)
3348 ++ return -EINVAL;
3349 ++
3350 ++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
3351 + if (!string_is_valid(b->name, len))
3352 + return -EINVAL;
3353 +
3354 +@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
3355 +
3356 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3357 +
3358 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3359 ++ len = TLV_GET_DATA_LEN(msg->req);
3360 ++ len -= offsetof(struct tipc_link_config, name);
3361 ++ if (len <= 0)
3362 ++ return -EINVAL;
3363 ++
3364 ++ len = min_t(int, len, TIPC_MAX_LINK_NAME);
3365 + if (!string_is_valid(lc->name, len))
3366 + return -EINVAL;
3367 +
3368 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
3369 +index 4b5ff3d44912..5f1d937c4be9 100644
3370 +--- a/net/tls/tls_device.c
3371 ++++ b/net/tls/tls_device.c
3372 +@@ -884,7 +884,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
3373 + goto release_netdev;
3374 +
3375 + free_sw_resources:
3376 ++ up_read(&device_offload_lock);
3377 + tls_sw_free_resources_rx(sk);
3378 ++ down_read(&device_offload_lock);
3379 + release_ctx:
3380 + ctx->priv_ctx_rx = NULL;
3381 + release_netdev:
3382 +@@ -919,8 +921,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
3383 + }
3384 + out:
3385 + up_read(&device_offload_lock);
3386 +- kfree(tls_ctx->rx.rec_seq);
3387 +- kfree(tls_ctx->rx.iv);
3388 + tls_sw_release_resources_rx(sk);
3389 + }
3390 +
3391 +diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
3392 +index 450a6dbc5a88..ef8934fd8698 100644
3393 +--- a/net/tls/tls_device_fallback.c
3394 ++++ b/net/tls/tls_device_fallback.c
3395 +@@ -193,6 +193,9 @@ static void update_chksum(struct sk_buff *skb, int headln)
3396 +
3397 + static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
3398 + {
3399 ++ struct sock *sk = skb->sk;
3400 ++ int delta;
3401 ++
3402 + skb_copy_header(nskb, skb);
3403 +
3404 + skb_put(nskb, skb->len);
3405 +@@ -200,11 +203,15 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
3406 + update_chksum(nskb, headln);
3407 +
3408 + nskb->destructor = skb->destructor;
3409 +- nskb->sk = skb->sk;
3410 ++ nskb->sk = sk;
3411 + skb->destructor = NULL;
3412 + skb->sk = NULL;
3413 +- refcount_add(nskb->truesize - skb->truesize,
3414 +- &nskb->sk->sk_wmem_alloc);
3415 ++
3416 ++ delta = nskb->truesize - skb->truesize;
3417 ++ if (likely(delta < 0))
3418 ++ WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
3419 ++ else if (delta)
3420 ++ refcount_add(delta, &sk->sk_wmem_alloc);
3421 + }
3422 +
3423 + /* This function may be called after the user socket is already
3424 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
3425 +index 96dbac91ac6e..ce5dd79365a7 100644
3426 +--- a/net/tls/tls_main.c
3427 ++++ b/net/tls/tls_main.c
3428 +@@ -304,11 +304,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
3429 + #endif
3430 + }
3431 +
3432 +- if (ctx->rx_conf == TLS_SW) {
3433 +- kfree(ctx->rx.rec_seq);
3434 +- kfree(ctx->rx.iv);
3435 ++ if (ctx->rx_conf == TLS_SW)
3436 + tls_sw_free_resources_rx(sk);
3437 +- }
3438 +
3439 + #ifdef CONFIG_TLS_DEVICE
3440 + if (ctx->rx_conf == TLS_HW)
3441 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
3442 +index d2d4f7c0d4be..839a0a0b5dfa 100644
3443 +--- a/net/tls/tls_sw.c
3444 ++++ b/net/tls/tls_sw.c
3445 +@@ -1830,6 +1830,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
3446 + struct tls_context *tls_ctx = tls_get_ctx(sk);
3447 + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
3448 +
3449 ++ kfree(tls_ctx->rx.rec_seq);
3450 ++ kfree(tls_ctx->rx.iv);
3451 ++
3452 + if (ctx->aead_recv) {
3453 + kfree_skb(ctx->recv_pkt);
3454 + ctx->recv_pkt = NULL;
3455 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3456 +index f061167062bc..a9f69c3a3e0b 100644
3457 +--- a/sound/pci/hda/patch_realtek.c
3458 ++++ b/sound/pci/hda/patch_realtek.c
3459 +@@ -5490,7 +5490,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
3460 + jack->jack->button_state = report;
3461 + }
3462 +
3463 +-static void alc295_fixup_chromebook(struct hda_codec *codec,
3464 ++static void alc_fixup_headset_jack(struct hda_codec *codec,
3465 + const struct hda_fixup *fix, int action)
3466 + {
3467 +
3468 +@@ -5500,16 +5500,6 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
3469 + alc_headset_btn_callback);
3470 + snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
3471 + SND_JACK_HEADSET, alc_headset_btn_keymap);
3472 +- switch (codec->core.vendor_id) {
3473 +- case 0x10ec0295:
3474 +- alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
3475 +- alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
3476 +- break;
3477 +- case 0x10ec0236:
3478 +- alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
3479 +- alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
3480 +- break;
3481 +- }
3482 + break;
3483 + case HDA_FIXUP_ACT_INIT:
3484 + switch (codec->core.vendor_id) {
3485 +@@ -5530,6 +5520,25 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
3486 + }
3487 + }
3488 +
3489 ++static void alc295_fixup_chromebook(struct hda_codec *codec,
3490 ++ const struct hda_fixup *fix, int action)
3491 ++{
3492 ++ switch (action) {
3493 ++ case HDA_FIXUP_ACT_INIT:
3494 ++ switch (codec->core.vendor_id) {
3495 ++ case 0x10ec0295:
3496 ++ alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
3497 ++ alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
3498 ++ break;
3499 ++ case 0x10ec0236:
3500 ++ alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
3501 ++ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
3502 ++ break;
3503 ++ }
3504 ++ break;
3505 ++ }
3506 ++}
3507 ++
3508 + static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
3509 + const struct hda_fixup *fix, int action)
3510 + {
3511 +@@ -5684,6 +5693,7 @@ enum {
3512 + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
3513 + ALC255_FIXUP_ACER_HEADSET_MIC,
3514 + ALC295_FIXUP_CHROME_BOOK,
3515 ++ ALC225_FIXUP_HEADSET_JACK,
3516 + ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
3517 + ALC225_FIXUP_WYSE_AUTO_MUTE,
3518 + ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
3519 +@@ -6645,6 +6655,12 @@ static const struct hda_fixup alc269_fixups[] = {
3520 + [ALC295_FIXUP_CHROME_BOOK] = {
3521 + .type = HDA_FIXUP_FUNC,
3522 + .v.func = alc295_fixup_chromebook,
3523 ++ .chained = true,
3524 ++ .chain_id = ALC225_FIXUP_HEADSET_JACK
3525 ++ },
3526 ++ [ALC225_FIXUP_HEADSET_JACK] = {
3527 ++ .type = HDA_FIXUP_FUNC,
3528 ++ .v.func = alc_fixup_headset_jack,
3529 + },
3530 + [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
3531 + .type = HDA_FIXUP_PINS,
3532 +@@ -7143,7 +7159,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3533 + {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
3534 + {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
3535 + {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
3536 +- {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
3537 ++ {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
3538 ++ {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
3539 + {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
3540 + {}
3541 + };