Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 02 May 2019 10:14:56
Message-Id: 1556792071.eeb900881e025ab32608757f38b6e88714b6b229.mpagano@gentoo
1 commit: eeb900881e025ab32608757f38b6e88714b6b229
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 2 10:14:31 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 2 10:14:31 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eeb90088
7
8 Linux patch 4.14.115
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1114_linux-4.14.115.patch | 2061 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2065 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 10dfc5f..937317e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -499,6 +499,10 @@ Patch: 1113_4.14.114.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.114
23
24 +Patch: 1114_4.14.115.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.115
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1114_linux-4.14.115.patch b/1114_linux-4.14.115.patch
33 new file mode 100644
34 index 0000000..5ba140e
35 --- /dev/null
36 +++ b/1114_linux-4.14.115.patch
37 @@ -0,0 +1,2061 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 7d8b17ce8804..94fa46d2d805 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -2680,7 +2680,11 @@
43 + nosmt=force: Force disable SMT, cannot be undone
44 + via the sysfs control file.
45 +
46 +- nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
47 ++ nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
48 ++ check bypass). With this option data leaks are possible
49 ++ in the system.
50 ++
51 ++ nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
52 + (indirect branch prediction) vulnerability. System may
53 + allow data leaks with this option, which is equivalent
54 + to spectre_v2=off.
55 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
56 +index a054b5ad410a..828fcd6711b3 100644
57 +--- a/Documentation/networking/ip-sysctl.txt
58 ++++ b/Documentation/networking/ip-sysctl.txt
59 +@@ -402,6 +402,7 @@ tcp_min_rtt_wlen - INTEGER
60 + minimum RTT when it is moved to a longer path (e.g., due to traffic
61 + engineering). A longer window makes the filter more resistant to RTT
62 + inflations such as transient congestion. The unit is seconds.
63 ++ Possible values: 0 - 86400 (1 day)
64 + Default: 300
65 +
66 + tcp_moderate_rcvbuf - BOOLEAN
67 +diff --git a/Makefile b/Makefile
68 +index 47a9f9883bdd..b27ffc1814e8 100644
69 +--- a/Makefile
70 ++++ b/Makefile
71 +@@ -1,7 +1,7 @@
72 + # SPDX-License-Identifier: GPL-2.0
73 + VERSION = 4
74 + PATCHLEVEL = 14
75 +-SUBLEVEL = 114
76 ++SUBLEVEL = 115
77 + EXTRAVERSION =
78 + NAME = Petit Gorille
79 +
80 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
81 +index 5f687ba1eaa7..8ca539bdac35 100644
82 +--- a/arch/arm/boot/compressed/head.S
83 ++++ b/arch/arm/boot/compressed/head.S
84 +@@ -1393,7 +1393,21 @@ ENTRY(efi_stub_entry)
85 +
86 + @ Preserve return value of efi_entry() in r4
87 + mov r4, r0
88 +- bl cache_clean_flush
89 ++
90 ++ @ our cache maintenance code relies on CP15 barrier instructions
91 ++ @ but since we arrived here with the MMU and caches configured
92 ++ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
93 ++ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
94 ++ @ the enable path will be executed on v7+ only.
95 ++ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
96 ++ tst r1, #(1 << 5) @ CP15BEN bit set?
97 ++ bne 0f
98 ++ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
99 ++ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
100 ++ ARM( .inst 0xf57ff06f @ v7+ isb )
101 ++ THUMB( isb )
102 ++
103 ++0: bl cache_clean_flush
104 + bl cache_off
105 +
106 + @ Set parameters for booting zImage according to boot protocol
107 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
108 +index 9ebe3e2403b1..c6b2e484d6c1 100644
109 +--- a/arch/mips/kernel/scall64-o32.S
110 ++++ b/arch/mips/kernel/scall64-o32.S
111 +@@ -125,7 +125,7 @@ trace_a_syscall:
112 + subu t1, v0, __NR_O32_Linux
113 + move a1, v0
114 + bnez t1, 1f /* __NR_syscall at offset 0 */
115 +- lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
116 ++ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
117 + .set pop
118 +
119 + 1: jal syscall_trace_enter
120 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
121 +index c5290aecdf06..eb1f8f249dc3 100644
122 +--- a/arch/x86/Makefile
123 ++++ b/arch/x86/Makefile
124 +@@ -242,6 +242,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
125 + # Avoid indirect branches in kernel to deal with Spectre
126 + ifdef CONFIG_RETPOLINE
127 + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
128 ++ # Additionally, avoid generating expensive indirect jumps which
129 ++ # are subject to retpolines for small number of switch cases.
130 ++ # clang turns off jump table generation by default when under
131 ++ # retpoline builds, however, gcc does not for x86. This has
132 ++ # only been fixed starting from gcc stable version 8.4.0 and
133 ++ # onwards, but not for older ones. See gcc bug #86952.
134 ++ ifndef CONFIG_CC_IS_CLANG
135 ++ KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
136 ++ endif
137 + endif
138 +
139 + archscripts: scripts_basic
140 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
141 +index b9281f2725a6..e0b0399ff7ec 100644
142 +--- a/drivers/android/binder_alloc.c
143 ++++ b/drivers/android/binder_alloc.c
144 +@@ -945,14 +945,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
145 +
146 + index = page - alloc->pages;
147 + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
148 ++
149 ++ mm = alloc->vma_vm_mm;
150 ++ if (!mmget_not_zero(mm))
151 ++ goto err_mmget;
152 ++ if (!down_write_trylock(&mm->mmap_sem))
153 ++ goto err_down_write_mmap_sem_failed;
154 + vma = binder_alloc_get_vma(alloc);
155 +- if (vma) {
156 +- if (!mmget_not_zero(alloc->vma_vm_mm))
157 +- goto err_mmget;
158 +- mm = alloc->vma_vm_mm;
159 +- if (!down_write_trylock(&mm->mmap_sem))
160 +- goto err_down_write_mmap_sem_failed;
161 +- }
162 +
163 + list_lru_isolate(lru, item);
164 + spin_unlock(lock);
165 +@@ -965,10 +964,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
166 + PAGE_SIZE);
167 +
168 + trace_binder_unmap_user_end(alloc, index);
169 +-
170 +- up_write(&mm->mmap_sem);
171 +- mmput(mm);
172 + }
173 ++ up_write(&mm->mmap_sem);
174 ++ mmput(mm);
175 +
176 + trace_binder_unmap_kernel_start(alloc, index);
177 +
178 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
179 +index 24a3fb35614f..bd447de4a5b8 100644
180 +--- a/drivers/block/loop.c
181 ++++ b/drivers/block/loop.c
182 +@@ -82,7 +82,6 @@
183 +
184 + static DEFINE_IDR(loop_index_idr);
185 + static DEFINE_MUTEX(loop_index_mutex);
186 +-static DEFINE_MUTEX(loop_ctl_mutex);
187 +
188 + static int max_part;
189 + static int part_shift;
190 +@@ -1019,7 +1018,7 @@ static int loop_clr_fd(struct loop_device *lo)
191 + */
192 + if (atomic_read(&lo->lo_refcnt) > 1) {
193 + lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
194 +- mutex_unlock(&loop_ctl_mutex);
195 ++ mutex_unlock(&lo->lo_ctl_mutex);
196 + return 0;
197 + }
198 +
199 +@@ -1071,12 +1070,12 @@ static int loop_clr_fd(struct loop_device *lo)
200 + if (!part_shift)
201 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
202 + loop_unprepare_queue(lo);
203 +- mutex_unlock(&loop_ctl_mutex);
204 ++ mutex_unlock(&lo->lo_ctl_mutex);
205 + /*
206 +- * Need not hold loop_ctl_mutex to fput backing file.
207 +- * Calling fput holding loop_ctl_mutex triggers a circular
208 ++ * Need not hold lo_ctl_mutex to fput backing file.
209 ++ * Calling fput holding lo_ctl_mutex triggers a circular
210 + * lock dependency possibility warning as fput can take
211 +- * bd_mutex which is usually taken before loop_ctl_mutex.
212 ++ * bd_mutex which is usually taken before lo_ctl_mutex.
213 + */
214 + fput(filp);
215 + return 0;
216 +@@ -1195,7 +1194,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
217 + int ret;
218 +
219 + if (lo->lo_state != Lo_bound) {
220 +- mutex_unlock(&loop_ctl_mutex);
221 ++ mutex_unlock(&lo->lo_ctl_mutex);
222 + return -ENXIO;
223 + }
224 +
225 +@@ -1214,10 +1213,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
226 + lo->lo_encrypt_key_size);
227 + }
228 +
229 +- /* Drop loop_ctl_mutex while we call into the filesystem. */
230 ++ /* Drop lo_ctl_mutex while we call into the filesystem. */
231 + path = lo->lo_backing_file->f_path;
232 + path_get(&path);
233 +- mutex_unlock(&loop_ctl_mutex);
234 ++ mutex_unlock(&lo->lo_ctl_mutex);
235 + ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
236 + if (!ret) {
237 + info->lo_device = huge_encode_dev(stat.dev);
238 +@@ -1309,7 +1308,7 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
239 + int err;
240 +
241 + if (!arg) {
242 +- mutex_unlock(&loop_ctl_mutex);
243 ++ mutex_unlock(&lo->lo_ctl_mutex);
244 + return -EINVAL;
245 + }
246 + err = loop_get_status(lo, &info64);
247 +@@ -1327,7 +1326,7 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
248 + int err;
249 +
250 + if (!arg) {
251 +- mutex_unlock(&loop_ctl_mutex);
252 ++ mutex_unlock(&lo->lo_ctl_mutex);
253 + return -EINVAL;
254 + }
255 + err = loop_get_status(lo, &info64);
256 +@@ -1402,7 +1401,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
257 + struct loop_device *lo = bdev->bd_disk->private_data;
258 + int err;
259 +
260 +- mutex_lock_nested(&loop_ctl_mutex, 1);
261 ++ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
262 + switch (cmd) {
263 + case LOOP_SET_FD:
264 + err = loop_set_fd(lo, mode, bdev, arg);
265 +@@ -1411,7 +1410,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
266 + err = loop_change_fd(lo, bdev, arg);
267 + break;
268 + case LOOP_CLR_FD:
269 +- /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
270 ++ /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
271 + err = loop_clr_fd(lo);
272 + if (!err)
273 + goto out_unlocked;
274 +@@ -1424,7 +1423,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
275 + break;
276 + case LOOP_GET_STATUS:
277 + err = loop_get_status_old(lo, (struct loop_info __user *) arg);
278 +- /* loop_get_status() unlocks loop_ctl_mutex */
279 ++ /* loop_get_status() unlocks lo_ctl_mutex */
280 + goto out_unlocked;
281 + case LOOP_SET_STATUS64:
282 + err = -EPERM;
283 +@@ -1434,7 +1433,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
284 + break;
285 + case LOOP_GET_STATUS64:
286 + err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
287 +- /* loop_get_status() unlocks loop_ctl_mutex */
288 ++ /* loop_get_status() unlocks lo_ctl_mutex */
289 + goto out_unlocked;
290 + case LOOP_SET_CAPACITY:
291 + err = -EPERM;
292 +@@ -1454,7 +1453,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
293 + default:
294 + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
295 + }
296 +- mutex_unlock(&loop_ctl_mutex);
297 ++ mutex_unlock(&lo->lo_ctl_mutex);
298 +
299 + out_unlocked:
300 + return err;
301 +@@ -1571,7 +1570,7 @@ loop_get_status_compat(struct loop_device *lo,
302 + int err;
303 +
304 + if (!arg) {
305 +- mutex_unlock(&loop_ctl_mutex);
306 ++ mutex_unlock(&lo->lo_ctl_mutex);
307 + return -EINVAL;
308 + }
309 + err = loop_get_status(lo, &info64);
310 +@@ -1588,16 +1587,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
311 +
312 + switch(cmd) {
313 + case LOOP_SET_STATUS:
314 +- mutex_lock(&loop_ctl_mutex);
315 ++ mutex_lock(&lo->lo_ctl_mutex);
316 + err = loop_set_status_compat(
317 + lo, (const struct compat_loop_info __user *) arg);
318 +- mutex_unlock(&loop_ctl_mutex);
319 ++ mutex_unlock(&lo->lo_ctl_mutex);
320 + break;
321 + case LOOP_GET_STATUS:
322 +- mutex_lock(&loop_ctl_mutex);
323 ++ mutex_lock(&lo->lo_ctl_mutex);
324 + err = loop_get_status_compat(
325 + lo, (struct compat_loop_info __user *) arg);
326 +- /* loop_get_status() unlocks loop_ctl_mutex */
327 ++ /* loop_get_status() unlocks lo_ctl_mutex */
328 + break;
329 + case LOOP_SET_CAPACITY:
330 + case LOOP_CLR_FD:
331 +@@ -1641,7 +1640,7 @@ static void __lo_release(struct loop_device *lo)
332 + if (atomic_dec_return(&lo->lo_refcnt))
333 + return;
334 +
335 +- mutex_lock(&loop_ctl_mutex);
336 ++ mutex_lock(&lo->lo_ctl_mutex);
337 + if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
338 + /*
339 + * In autoclear mode, stop the loop thread
340 +@@ -1659,7 +1658,7 @@ static void __lo_release(struct loop_device *lo)
341 + blk_mq_unfreeze_queue(lo->lo_queue);
342 + }
343 +
344 +- mutex_unlock(&loop_ctl_mutex);
345 ++ mutex_unlock(&lo->lo_ctl_mutex);
346 + }
347 +
348 + static void lo_release(struct gendisk *disk, fmode_t mode)
349 +@@ -1705,10 +1704,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
350 + struct loop_device *lo = ptr;
351 + struct loop_func_table *xfer = data;
352 +
353 +- mutex_lock(&loop_ctl_mutex);
354 ++ mutex_lock(&lo->lo_ctl_mutex);
355 + if (lo->lo_encryption == xfer)
356 + loop_release_xfer(lo);
357 +- mutex_unlock(&loop_ctl_mutex);
358 ++ mutex_unlock(&lo->lo_ctl_mutex);
359 + return 0;
360 + }
361 +
362 +@@ -1881,6 +1880,7 @@ static int loop_add(struct loop_device **l, int i)
363 + if (!part_shift)
364 + disk->flags |= GENHD_FL_NO_PART_SCAN;
365 + disk->flags |= GENHD_FL_EXT_DEVT;
366 ++ mutex_init(&lo->lo_ctl_mutex);
367 + atomic_set(&lo->lo_refcnt, 0);
368 + lo->lo_number = i;
369 + spin_lock_init(&lo->lo_lock);
370 +@@ -1993,19 +1993,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
371 + ret = loop_lookup(&lo, parm);
372 + if (ret < 0)
373 + break;
374 +- mutex_lock(&loop_ctl_mutex);
375 ++ mutex_lock(&lo->lo_ctl_mutex);
376 + if (lo->lo_state != Lo_unbound) {
377 + ret = -EBUSY;
378 +- mutex_unlock(&loop_ctl_mutex);
379 ++ mutex_unlock(&lo->lo_ctl_mutex);
380 + break;
381 + }
382 + if (atomic_read(&lo->lo_refcnt) > 0) {
383 + ret = -EBUSY;
384 +- mutex_unlock(&loop_ctl_mutex);
385 ++ mutex_unlock(&lo->lo_ctl_mutex);
386 + break;
387 + }
388 + lo->lo_disk->private_data = NULL;
389 +- mutex_unlock(&loop_ctl_mutex);
390 ++ mutex_unlock(&lo->lo_ctl_mutex);
391 + idr_remove(&loop_index_idr, lo->lo_number);
392 + loop_remove(lo);
393 + break;
394 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
395 +index b2251752452b..dfc54ceba410 100644
396 +--- a/drivers/block/loop.h
397 ++++ b/drivers/block/loop.h
398 +@@ -54,6 +54,7 @@ struct loop_device {
399 +
400 + spinlock_t lo_lock;
401 + int lo_state;
402 ++ struct mutex lo_ctl_mutex;
403 + struct kthread_worker worker;
404 + struct task_struct *worker_task;
405 + bool use_dio;
406 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
407 +index a46776a84480..133178c9b2cf 100644
408 +--- a/drivers/block/zram/zram_drv.c
409 ++++ b/drivers/block/zram/zram_drv.c
410 +@@ -488,18 +488,18 @@ struct zram_work {
411 + struct zram *zram;
412 + unsigned long entry;
413 + struct bio *bio;
414 ++ struct bio_vec bvec;
415 + };
416 +
417 + #if PAGE_SIZE != 4096
418 + static void zram_sync_read(struct work_struct *work)
419 + {
420 +- struct bio_vec bvec;
421 + struct zram_work *zw = container_of(work, struct zram_work, work);
422 + struct zram *zram = zw->zram;
423 + unsigned long entry = zw->entry;
424 + struct bio *bio = zw->bio;
425 +
426 +- read_from_bdev_async(zram, &bvec, entry, bio);
427 ++ read_from_bdev_async(zram, &zw->bvec, entry, bio);
428 + }
429 +
430 + /*
431 +@@ -512,6 +512,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
432 + {
433 + struct zram_work work;
434 +
435 ++ work.bvec = *bvec;
436 + work.zram = zram;
437 + work.entry = entry;
438 + work.bio = bio;
439 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
440 +index 9d6ce5051d8f..77b126525dac 100644
441 +--- a/drivers/dma/sh/rcar-dmac.c
442 ++++ b/drivers/dma/sh/rcar-dmac.c
443 +@@ -1332,6 +1332,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
444 + enum dma_status status;
445 + unsigned long flags;
446 + unsigned int residue;
447 ++ bool cyclic;
448 +
449 + status = dma_cookie_status(chan, cookie, txstate);
450 + if (status == DMA_COMPLETE || !txstate)
451 +@@ -1339,10 +1340,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
452 +
453 + spin_lock_irqsave(&rchan->lock, flags);
454 + residue = rcar_dmac_chan_get_residue(rchan, cookie);
455 ++ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
456 + spin_unlock_irqrestore(&rchan->lock, flags);
457 +
458 + /* if there's no residue, the cookie is complete */
459 +- if (!residue)
460 ++ if (!residue && !cyclic)
461 + return DMA_COMPLETE;
462 +
463 + dma_set_residue(txstate, residue);
464 +diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
465 +index 14eb8a064562..da2d309574ba 100644
466 +--- a/drivers/gpu/drm/i915/intel_fbdev.c
467 ++++ b/drivers/gpu/drm/i915/intel_fbdev.c
468 +@@ -326,8 +326,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
469 + bool *enabled, int width, int height)
470 + {
471 + struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
472 ++ unsigned long conn_configured, conn_seq, mask;
473 + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
474 +- unsigned long conn_configured, conn_seq;
475 + int i, j;
476 + bool *save_enabled;
477 + bool fallback = true, ret = true;
478 +@@ -345,9 +345,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
479 + drm_modeset_backoff(&ctx);
480 +
481 + memcpy(save_enabled, enabled, count);
482 +- conn_seq = GENMASK(count - 1, 0);
483 ++ mask = GENMASK(count - 1, 0);
484 + conn_configured = 0;
485 + retry:
486 ++ conn_seq = conn_configured;
487 + for (i = 0; i < count; i++) {
488 + struct drm_fb_helper_connector *fb_conn;
489 + struct drm_connector *connector;
490 +@@ -360,8 +361,7 @@ retry:
491 + if (conn_configured & BIT(i))
492 + continue;
493 +
494 +- /* First pass, only consider tiled connectors */
495 +- if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
496 ++ if (conn_seq == 0 && !connector->has_tile)
497 + continue;
498 +
499 + if (connector->status == connector_status_connected)
500 +@@ -465,10 +465,8 @@ retry:
501 + conn_configured |= BIT(i);
502 + }
503 +
504 +- if (conn_configured != conn_seq) { /* repeat until no more are found */
505 +- conn_seq = conn_configured;
506 ++ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
507 + goto retry;
508 +- }
509 +
510 + /*
511 + * If the BIOS didn't enable everything it could, fall back to have the
512 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
513 +index ce1e3b9e14c9..7747f160c740 100644
514 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
515 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
516 +@@ -867,7 +867,7 @@ static void
517 + vc4_crtc_reset(struct drm_crtc *crtc)
518 + {
519 + if (crtc->state)
520 +- __drm_atomic_helper_crtc_destroy_state(crtc->state);
521 ++ vc4_crtc_destroy_state(crtc, crtc->state);
522 +
523 + crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
524 + if (crtc->state)
525 +diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
526 +index bb27a3150563..2a3ae9006c58 100644
527 +--- a/drivers/hwtracing/intel_th/gth.c
528 ++++ b/drivers/hwtracing/intel_th/gth.c
529 +@@ -624,7 +624,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
530 + othdev->output.port = -1;
531 + othdev->output.active = false;
532 + gth->output[port].output = NULL;
533 +- for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
534 ++ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
535 + if (gth->master[master] == port)
536 + gth->master[master] = -1;
537 + spin_unlock(&gth->gth_lock);
538 +diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
539 +index 524e6134642e..e7013d2d4f0e 100644
540 +--- a/drivers/infiniband/sw/rdmavt/mr.c
541 ++++ b/drivers/infiniband/sw/rdmavt/mr.c
542 +@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
543 + if (unlikely(mapped_segs == mr->mr.max_segs))
544 + return -ENOMEM;
545 +
546 +- if (mr->mr.length == 0) {
547 +- mr->mr.user_base = addr;
548 +- mr->mr.iova = addr;
549 +- }
550 +-
551 + m = mapped_segs / RVT_SEGSZ;
552 + n = mapped_segs % RVT_SEGSZ;
553 + mr->mr.map[m]->segs[n].vaddr = (void *)addr;
554 +@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
555 + * @sg_nents: number of entries in sg
556 + * @sg_offset: offset in bytes into sg
557 + *
558 ++ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
559 ++ *
560 + * Return: number of sg elements mapped to the memory region
561 + */
562 + int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
563 + int sg_nents, unsigned int *sg_offset)
564 + {
565 + struct rvt_mr *mr = to_imr(ibmr);
566 ++ int ret;
567 +
568 + mr->mr.length = 0;
569 + mr->mr.page_shift = PAGE_SHIFT;
570 +- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
571 +- rvt_set_page);
572 ++ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
573 ++ mr->mr.user_base = ibmr->iova;
574 ++ mr->mr.iova = ibmr->iova;
575 ++ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
576 ++ mr->mr.length = (size_t)ibmr->length;
577 ++ return ret;
578 + }
579 +
580 + /**
581 +@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
582 + ibmr->rkey = key;
583 + mr->mr.lkey = key;
584 + mr->mr.access_flags = access;
585 ++ mr->mr.iova = ibmr->iova;
586 + atomic_set(&mr->mr.lkey_invalid, 0);
587 +
588 + return 0;
589 +diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
590 +index bc5e37f30ac1..bb63b8823d62 100644
591 +--- a/drivers/input/rmi4/rmi_f11.c
592 ++++ b/drivers/input/rmi4/rmi_f11.c
593 +@@ -1239,7 +1239,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
594 + }
595 +
596 + rc = f11_write_control_regs(fn, &f11->sens_query,
597 +- &f11->dev_controls, fn->fd.query_base_addr);
598 ++ &f11->dev_controls, fn->fd.control_base_addr);
599 + if (rc)
600 + dev_warn(&fn->dev, "Failed to write control registers\n");
601 +
602 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
603 +index 036379a23499..23f0f4eaaa2e 100644
604 +--- a/drivers/md/dm-integrity.c
605 ++++ b/drivers/md/dm-integrity.c
606 +@@ -2917,17 +2917,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
607 + goto bad;
608 + }
609 + ic->sectors_per_block = val >> SECTOR_SHIFT;
610 +- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
611 ++ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
612 + r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
613 + "Invalid internal_hash argument");
614 + if (r)
615 + goto bad;
616 +- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
617 ++ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
618 + r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
619 + "Invalid journal_crypt argument");
620 + if (r)
621 + goto bad;
622 +- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
623 ++ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
624 + r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
625 + "Invalid journal_mac argument");
626 + if (r)
627 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
628 +index 103c0a742d03..fef0bff4a54b 100644
629 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
630 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
631 +@@ -58,6 +58,8 @@ static int __init fm10k_init_module(void)
632 + /* create driver workqueue */
633 + fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
634 + fm10k_driver_name);
635 ++ if (!fm10k_workqueue)
636 ++ return -ENOMEM;
637 +
638 + fm10k_dbg_init();
639 +
640 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
641 +index d9db3ad3d765..26ad27b3f687 100644
642 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
643 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
644 +@@ -1622,7 +1622,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
645 + break;
646 + case MLX5_MODULE_ID_SFP:
647 + modinfo->type = ETH_MODULE_SFF_8472;
648 +- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
649 ++ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
650 + break;
651 + default:
652 + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
653 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
654 +index ccb6287aeeb7..1d2bb7fa68b1 100644
655 +--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
656 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
657 +@@ -392,10 +392,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
658 + size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
659 +
660 + i2c_addr = MLX5_I2C_ADDR_LOW;
661 +- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
662 +- i2c_addr = MLX5_I2C_ADDR_HIGH;
663 +- offset -= MLX5_EEPROM_PAGE_LENGTH;
664 +- }
665 +
666 + MLX5_SET(mcia_reg, in, l, 0);
667 + MLX5_SET(mcia_reg, in, module, module_num);
668 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
669 +index 29d37355d8c6..ab09f9e43c79 100644
670 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
671 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
672 +@@ -2521,11 +2521,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
673 + if (err)
674 + return err;
675 +
676 ++ mlxsw_sp_port->link.autoneg = autoneg;
677 ++
678 + if (!netif_running(dev))
679 + return 0;
680 +
681 +- mlxsw_sp_port->link.autoneg = autoneg;
682 +-
683 + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
684 + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
685 +
686 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
687 +index f2429ec07b57..ecf3f8c1bc0e 100644
688 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
689 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
690 +@@ -2582,8 +2582,6 @@ static int stmmac_open(struct net_device *dev)
691 + struct stmmac_priv *priv = netdev_priv(dev);
692 + int ret;
693 +
694 +- stmmac_check_ether_addr(priv);
695 +-
696 + if (priv->hw->pcs != STMMAC_PCS_RGMII &&
697 + priv->hw->pcs != STMMAC_PCS_TBI &&
698 + priv->hw->pcs != STMMAC_PCS_RTBI) {
699 +@@ -4213,6 +4211,8 @@ int stmmac_dvr_probe(struct device *device,
700 + if (ret)
701 + goto error_hw_init;
702 +
703 ++ stmmac_check_ether_addr(priv);
704 ++
705 + /* Configure real RX and TX queues */
706 + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
707 + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
708 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
709 +index d819e8eaba12..cc1e887e47b5 100644
710 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
711 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
712 +@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
713 + },
714 + .driver_data = (void *)&galileo_stmmac_dmi_data,
715 + },
716 ++ /*
717 ++ * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
718 ++ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
719 ++ * has only one pci network device while other asset tags are
720 ++ * for IOT2040 which has two.
721 ++ */
722 + {
723 + .matches = {
724 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
725 +@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
726 + {
727 + .matches = {
728 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
729 +- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
730 +- "6ES7647-0AA00-1YA2"),
731 + },
732 + .driver_data = (void *)&iot2040_stmmac_dmi_data,
733 + },
734 +diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
735 +index f4e93f5fc204..ea90db3c7705 100644
736 +--- a/drivers/net/slip/slhc.c
737 ++++ b/drivers/net/slip/slhc.c
738 +@@ -153,7 +153,7 @@ out_fail:
739 + void
740 + slhc_free(struct slcompress *comp)
741 + {
742 +- if ( comp == NULLSLCOMPR )
743 ++ if ( IS_ERR_OR_NULL(comp) )
744 + return;
745 +
746 + if ( comp->tstate != NULLSLSTATE )
747 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
748 +index fea141e71705..e9a92ed5a308 100644
749 +--- a/drivers/net/team/team.c
750 ++++ b/drivers/net/team/team.c
751 +@@ -1157,6 +1157,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
752 + return -EINVAL;
753 + }
754 +
755 ++ if (netdev_has_upper_dev(dev, port_dev)) {
756 ++ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
757 ++ portname);
758 ++ return -EBUSY;
759 ++ }
760 ++
761 + if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
762 + vlan_uses_dev(dev)) {
763 + netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
764 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
765 +index 2f3dbf1c3c2d..79d2c0bf7870 100644
766 +--- a/drivers/usb/core/driver.c
767 ++++ b/drivers/usb/core/driver.c
768 +@@ -1891,14 +1891,11 @@ int usb_runtime_idle(struct device *dev)
769 + return -EBUSY;
770 + }
771 +
772 +-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
773 ++static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
774 + {
775 + struct usb_hcd *hcd = bus_to_hcd(udev->bus);
776 + int ret = -EPERM;
777 +
778 +- if (enable && !udev->usb2_hw_lpm_allowed)
779 +- return 0;
780 +-
781 + if (hcd->driver->set_usb2_hw_lpm) {
782 + ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
783 + if (!ret)
784 +@@ -1908,6 +1905,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
785 + return ret;
786 + }
787 +
788 ++int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
789 ++{
790 ++ if (!udev->usb2_hw_lpm_capable ||
791 ++ !udev->usb2_hw_lpm_allowed ||
792 ++ udev->usb2_hw_lpm_enabled)
793 ++ return 0;
794 ++
795 ++ return usb_set_usb2_hardware_lpm(udev, 1);
796 ++}
797 ++
798 ++int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
799 ++{
800 ++ if (!udev->usb2_hw_lpm_enabled)
801 ++ return 0;
802 ++
803 ++ return usb_set_usb2_hardware_lpm(udev, 0);
804 ++}
805 ++
806 + #endif /* CONFIG_PM */
807 +
808 + struct bus_type usb_bus_type = {
809 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
810 +index 4a4e666a8e09..a9541525ea4f 100644
811 +--- a/drivers/usb/core/hub.c
812 ++++ b/drivers/usb/core/hub.c
813 +@@ -3174,8 +3174,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
814 + }
815 +
816 + /* disable USB2 hardware LPM */
817 +- if (udev->usb2_hw_lpm_enabled == 1)
818 +- usb_set_usb2_hardware_lpm(udev, 0);
819 ++ usb_disable_usb2_hardware_lpm(udev);
820 +
821 + if (usb_disable_ltm(udev)) {
822 + dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
823 +@@ -3213,8 +3212,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
824 + usb_enable_ltm(udev);
825 + err_ltm:
826 + /* Try to enable USB2 hardware LPM again */
827 +- if (udev->usb2_hw_lpm_capable == 1)
828 +- usb_set_usb2_hardware_lpm(udev, 1);
829 ++ usb_enable_usb2_hardware_lpm(udev);
830 +
831 + if (udev->do_remote_wakeup)
832 + (void) usb_disable_remote_wakeup(udev);
833 +@@ -3497,8 +3495,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
834 + hub_port_logical_disconnect(hub, port1);
835 + } else {
836 + /* Try to enable USB2 hardware LPM */
837 +- if (udev->usb2_hw_lpm_capable == 1)
838 +- usb_set_usb2_hardware_lpm(udev, 1);
839 ++ usb_enable_usb2_hardware_lpm(udev);
840 +
841 + /* Try to enable USB3 LTM */
842 + usb_enable_ltm(udev);
843 +@@ -4334,7 +4331,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
844 + if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
845 + connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
846 + udev->usb2_hw_lpm_allowed = 1;
847 +- usb_set_usb2_hardware_lpm(udev, 1);
848 ++ usb_enable_usb2_hardware_lpm(udev);
849 + }
850 + }
851 +
852 +@@ -5491,8 +5488,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
853 + /* Disable USB2 hardware LPM.
854 + * It will be re-enabled by the enumeration process.
855 + */
856 +- if (udev->usb2_hw_lpm_enabled == 1)
857 +- usb_set_usb2_hardware_lpm(udev, 0);
858 ++ usb_disable_usb2_hardware_lpm(udev);
859 +
860 + /* Disable LPM and LTM while we reset the device and reinstall the alt
861 + * settings. Device-initiated LPM settings, and system exit latency
862 +@@ -5602,7 +5598,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
863 +
864 + done:
865 + /* Now that the alt settings are re-installed, enable LTM and LPM. */
866 +- usb_set_usb2_hardware_lpm(udev, 1);
867 ++ usb_enable_usb2_hardware_lpm(udev);
868 + usb_unlocked_enable_lpm(udev);
869 + usb_enable_ltm(udev);
870 + usb_release_bos_descriptor(udev);
871 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
872 +index 833ddd228e3a..1fe3c5d3be5f 100644
873 +--- a/drivers/usb/core/message.c
874 ++++ b/drivers/usb/core/message.c
875 +@@ -1182,8 +1182,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
876 + dev->actconfig->interface[i] = NULL;
877 + }
878 +
879 +- if (dev->usb2_hw_lpm_enabled == 1)
880 +- usb_set_usb2_hardware_lpm(dev, 0);
881 ++ usb_disable_usb2_hardware_lpm(dev);
882 + usb_unlocked_disable_lpm(dev);
883 + usb_disable_ltm(dev);
884 +
885 +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
886 +index d930bfda4010..15c19863f7b3 100644
887 +--- a/drivers/usb/core/sysfs.c
888 ++++ b/drivers/usb/core/sysfs.c
889 +@@ -508,7 +508,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
890 +
891 + if (!ret) {
892 + udev->usb2_hw_lpm_allowed = value;
893 +- ret = usb_set_usb2_hardware_lpm(udev, value);
894 ++ if (value)
895 ++ ret = usb_enable_usb2_hardware_lpm(udev);
896 ++ else
897 ++ ret = usb_disable_usb2_hardware_lpm(udev);
898 + }
899 +
900 + usb_unlock_device(udev);
901 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
902 +index dc6949248823..1b5f346d93eb 100644
903 +--- a/drivers/usb/core/usb.h
904 ++++ b/drivers/usb/core/usb.h
905 +@@ -89,7 +89,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
906 + extern int usb_runtime_suspend(struct device *dev);
907 + extern int usb_runtime_resume(struct device *dev);
908 + extern int usb_runtime_idle(struct device *dev);
909 +-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
910 ++extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
911 ++extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
912 +
913 + #else
914 +
915 +@@ -109,7 +110,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
916 + return 0;
917 + }
918 +
919 +-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
920 ++static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
921 ++{
922 ++ return 0;
923 ++}
924 ++
925 ++static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
926 + {
927 + return 0;
928 + }
929 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
930 +index 50eeb74ddc0a..f77a9b3370b5 100644
931 +--- a/drivers/vfio/vfio_iommu_type1.c
932 ++++ b/drivers/vfio/vfio_iommu_type1.c
933 +@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
934 + MODULE_PARM_DESC(disable_hugepages,
935 + "Disable VFIO IOMMU support for IOMMU hugepages.");
936 +
937 ++static unsigned int dma_entry_limit __read_mostly = U16_MAX;
938 ++module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
939 ++MODULE_PARM_DESC(dma_entry_limit,
940 ++ "Maximum number of user DMA mappings per container (65535).");
941 ++
942 + struct vfio_iommu {
943 + struct list_head domain_list;
944 + struct vfio_domain *external_domain; /* domain for external user */
945 + struct mutex lock;
946 + struct rb_root dma_list;
947 + struct blocking_notifier_head notifier;
948 ++ unsigned int dma_avail;
949 + bool v2;
950 + bool nesting;
951 + };
952 +@@ -732,6 +738,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
953 + vfio_unlink_dma(iommu, dma);
954 + put_task_struct(dma->task);
955 + kfree(dma);
956 ++ iommu->dma_avail++;
957 + }
958 +
959 + static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
960 +@@ -1003,12 +1010,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
961 + goto out_unlock;
962 + }
963 +
964 ++ if (!iommu->dma_avail) {
965 ++ ret = -ENOSPC;
966 ++ goto out_unlock;
967 ++ }
968 ++
969 + dma = kzalloc(sizeof(*dma), GFP_KERNEL);
970 + if (!dma) {
971 + ret = -ENOMEM;
972 + goto out_unlock;
973 + }
974 +
975 ++ iommu->dma_avail--;
976 + dma->iova = iova;
977 + dma->vaddr = vaddr;
978 + dma->prot = prot;
979 +@@ -1504,6 +1517,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
980 +
981 + INIT_LIST_HEAD(&iommu->domain_list);
982 + iommu->dma_list = RB_ROOT;
983 ++ iommu->dma_avail = dma_entry_limit;
984 + mutex_init(&iommu->lock);
985 + BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
986 +
987 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
988 +index 8a5266699b67..56e8fc896f6b 100644
989 +--- a/fs/ceph/dir.c
990 ++++ b/fs/ceph/dir.c
991 +@@ -1454,6 +1454,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
992 + unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
993 + {
994 + struct ceph_inode_info *dci = ceph_inode(dir);
995 ++ unsigned hash;
996 +
997 + switch (dci->i_dir_layout.dl_dir_hash) {
998 + case 0: /* for backward compat */
999 +@@ -1461,8 +1462,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1000 + return dn->d_name.hash;
1001 +
1002 + default:
1003 +- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1004 ++ spin_lock(&dn->d_lock);
1005 ++ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1006 + dn->d_name.name, dn->d_name.len);
1007 ++ spin_unlock(&dn->d_lock);
1008 ++ return hash;
1009 + }
1010 + }
1011 +
1012 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1013 +index a48984dd6426..e1ded4bd6115 100644
1014 +--- a/fs/ceph/mds_client.c
1015 ++++ b/fs/ceph/mds_client.c
1016 +@@ -1219,6 +1219,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1017 + list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1018 + ci->i_prealloc_cap_flush = NULL;
1019 + }
1020 ++
1021 ++ if (drop &&
1022 ++ ci->i_wrbuffer_ref_head == 0 &&
1023 ++ ci->i_wr_ref == 0 &&
1024 ++ ci->i_dirty_caps == 0 &&
1025 ++ ci->i_flushing_caps == 0) {
1026 ++ ceph_put_snap_context(ci->i_head_snapc);
1027 ++ ci->i_head_snapc = NULL;
1028 ++ }
1029 + }
1030 + spin_unlock(&ci->i_ceph_lock);
1031 + while (!list_empty(&to_remove)) {
1032 +@@ -1863,10 +1872,39 @@ retry:
1033 + return path;
1034 + }
1035 +
1036 ++/* Duplicate the dentry->d_name.name safely */
1037 ++static int clone_dentry_name(struct dentry *dentry, const char **ppath,
1038 ++ int *ppathlen)
1039 ++{
1040 ++ u32 len;
1041 ++ char *name;
1042 ++
1043 ++retry:
1044 ++ len = READ_ONCE(dentry->d_name.len);
1045 ++ name = kmalloc(len + 1, GFP_NOFS);
1046 ++ if (!name)
1047 ++ return -ENOMEM;
1048 ++
1049 ++ spin_lock(&dentry->d_lock);
1050 ++ if (dentry->d_name.len != len) {
1051 ++ spin_unlock(&dentry->d_lock);
1052 ++ kfree(name);
1053 ++ goto retry;
1054 ++ }
1055 ++ memcpy(name, dentry->d_name.name, len);
1056 ++ spin_unlock(&dentry->d_lock);
1057 ++
1058 ++ name[len] = '\0';
1059 ++ *ppath = name;
1060 ++ *ppathlen = len;
1061 ++ return 0;
1062 ++}
1063 ++
1064 + static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1065 + const char **ppath, int *ppathlen, u64 *pino,
1066 +- int *pfreepath)
1067 ++ bool *pfreepath, bool parent_locked)
1068 + {
1069 ++ int ret;
1070 + char *path;
1071 +
1072 + rcu_read_lock();
1073 +@@ -1875,8 +1913,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1074 + if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
1075 + *pino = ceph_ino(dir);
1076 + rcu_read_unlock();
1077 +- *ppath = dentry->d_name.name;
1078 +- *ppathlen = dentry->d_name.len;
1079 ++ if (parent_locked) {
1080 ++ *ppath = dentry->d_name.name;
1081 ++ *ppathlen = dentry->d_name.len;
1082 ++ } else {
1083 ++ ret = clone_dentry_name(dentry, ppath, ppathlen);
1084 ++ if (ret)
1085 ++ return ret;
1086 ++ *pfreepath = true;
1087 ++ }
1088 + return 0;
1089 + }
1090 + rcu_read_unlock();
1091 +@@ -1884,13 +1929,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1092 + if (IS_ERR(path))
1093 + return PTR_ERR(path);
1094 + *ppath = path;
1095 +- *pfreepath = 1;
1096 ++ *pfreepath = true;
1097 + return 0;
1098 + }
1099 +
1100 + static int build_inode_path(struct inode *inode,
1101 + const char **ppath, int *ppathlen, u64 *pino,
1102 +- int *pfreepath)
1103 ++ bool *pfreepath)
1104 + {
1105 + struct dentry *dentry;
1106 + char *path;
1107 +@@ -1906,7 +1951,7 @@ static int build_inode_path(struct inode *inode,
1108 + if (IS_ERR(path))
1109 + return PTR_ERR(path);
1110 + *ppath = path;
1111 +- *pfreepath = 1;
1112 ++ *pfreepath = true;
1113 + return 0;
1114 + }
1115 +
1116 +@@ -1917,7 +1962,7 @@ static int build_inode_path(struct inode *inode,
1117 + static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1118 + struct inode *rdiri, const char *rpath,
1119 + u64 rino, const char **ppath, int *pathlen,
1120 +- u64 *ino, int *freepath)
1121 ++ u64 *ino, bool *freepath, bool parent_locked)
1122 + {
1123 + int r = 0;
1124 +
1125 +@@ -1927,7 +1972,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1126 + ceph_snap(rinode));
1127 + } else if (rdentry) {
1128 + r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
1129 +- freepath);
1130 ++ freepath, parent_locked);
1131 + dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1132 + *ppath);
1133 + } else if (rpath || rino) {
1134 +@@ -1953,7 +1998,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1135 + const char *path2 = NULL;
1136 + u64 ino1 = 0, ino2 = 0;
1137 + int pathlen1 = 0, pathlen2 = 0;
1138 +- int freepath1 = 0, freepath2 = 0;
1139 ++ bool freepath1 = false, freepath2 = false;
1140 + int len;
1141 + u16 releases;
1142 + void *p, *end;
1143 +@@ -1961,16 +2006,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1144 +
1145 + ret = set_request_path_attr(req->r_inode, req->r_dentry,
1146 + req->r_parent, req->r_path1, req->r_ino1.ino,
1147 +- &path1, &pathlen1, &ino1, &freepath1);
1148 ++ &path1, &pathlen1, &ino1, &freepath1,
1149 ++ test_bit(CEPH_MDS_R_PARENT_LOCKED,
1150 ++ &req->r_req_flags));
1151 + if (ret < 0) {
1152 + msg = ERR_PTR(ret);
1153 + goto out;
1154 + }
1155 +
1156 ++ /* If r_old_dentry is set, then assume that its parent is locked */
1157 + ret = set_request_path_attr(NULL, req->r_old_dentry,
1158 + req->r_old_dentry_dir,
1159 + req->r_path2, req->r_ino2.ino,
1160 +- &path2, &pathlen2, &ino2, &freepath2);
1161 ++ &path2, &pathlen2, &ino2, &freepath2, true);
1162 + if (ret < 0) {
1163 + msg = ERR_PTR(ret);
1164 + goto out_free1;
1165 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
1166 +index 9b6207c84b68..a7e763dac038 100644
1167 +--- a/fs/ceph/snap.c
1168 ++++ b/fs/ceph/snap.c
1169 +@@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
1170 + old_snapc = NULL;
1171 +
1172 + update_snapc:
1173 +- if (ci->i_head_snapc) {
1174 ++ if (ci->i_wrbuffer_ref_head == 0 &&
1175 ++ ci->i_wr_ref == 0 &&
1176 ++ ci->i_dirty_caps == 0 &&
1177 ++ ci->i_flushing_caps == 0) {
1178 ++ ci->i_head_snapc = NULL;
1179 ++ } else {
1180 + ci->i_head_snapc = ceph_get_snap_context(new_snapc);
1181 + dout(" new snapc is %p\n", new_snapc);
1182 + }
1183 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
1184 +index 6fd4a6a75234..e7192ee7a89c 100644
1185 +--- a/fs/cifs/inode.c
1186 ++++ b/fs/cifs/inode.c
1187 +@@ -1730,6 +1730,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
1188 + if (rc == 0 || rc != -EBUSY)
1189 + goto do_rename_exit;
1190 +
1191 ++ /* Don't fall back to using SMB on SMB 2+ mount */
1192 ++ if (server->vals->protocol_id != 0)
1193 ++ goto do_rename_exit;
1194 ++
1195 + /* open-file renames don't work across directories */
1196 + if (to_dentry->d_parent != from_dentry->d_parent)
1197 + goto do_rename_exit;
1198 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1199 +index 311761a6ef6d..6761e905cab0 100644
1200 +--- a/fs/ext4/xattr.c
1201 ++++ b/fs/ext4/xattr.c
1202 +@@ -828,6 +828,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
1203 + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
1204 + if (IS_ERR(bh)) {
1205 + ret = PTR_ERR(bh);
1206 ++ bh = NULL;
1207 + goto out;
1208 + }
1209 +
1210 +@@ -2905,6 +2906,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
1211 + if (error == -EIO)
1212 + EXT4_ERROR_INODE(inode, "block %llu read error",
1213 + EXT4_I(inode)->i_file_acl);
1214 ++ bh = NULL;
1215 + goto cleanup;
1216 + }
1217 + error = ext4_xattr_check_block(inode, bh);
1218 +@@ -3061,6 +3063,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
1219 + if (IS_ERR(bh)) {
1220 + if (PTR_ERR(bh) == -ENOMEM)
1221 + return NULL;
1222 ++ bh = NULL;
1223 + EXT4_ERROR_INODE(inode, "block %lu read error",
1224 + (unsigned long)ce->e_value);
1225 + } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
1226 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
1227 +index 77d8d03344c8..f464f8d9060c 100644
1228 +--- a/fs/nfs/super.c
1229 ++++ b/fs/nfs/super.c
1230 +@@ -2044,7 +2044,8 @@ static int nfs23_validate_mount_data(void *options,
1231 + memcpy(sap, &data->addr, sizeof(data->addr));
1232 + args->nfs_server.addrlen = sizeof(data->addr);
1233 + args->nfs_server.port = ntohs(data->addr.sin_port);
1234 +- if (!nfs_verify_server_address(sap))
1235 ++ if (sap->sa_family != AF_INET ||
1236 ++ !nfs_verify_server_address(sap))
1237 + goto out_no_address;
1238 +
1239 + if (!(data->flags & NFS_MOUNT_TCP))
1240 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
1241 +index 49b0a9e7ff18..80aeb19b176b 100644
1242 +--- a/fs/nfsd/nfs4callback.c
1243 ++++ b/fs/nfsd/nfs4callback.c
1244 +@@ -939,8 +939,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
1245 + cb->cb_seq_status = 1;
1246 + cb->cb_status = 0;
1247 + if (minorversion) {
1248 +- if (!nfsd41_cb_get_slot(clp, task))
1249 ++ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
1250 + return;
1251 ++ cb->cb_holds_slot = true;
1252 + }
1253 + rpc_call_start(task);
1254 + }
1255 +@@ -967,6 +968,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
1256 + return true;
1257 + }
1258 +
1259 ++ if (!cb->cb_holds_slot)
1260 ++ goto need_restart;
1261 ++
1262 + switch (cb->cb_seq_status) {
1263 + case 0:
1264 + /*
1265 +@@ -1004,6 +1008,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
1266 + cb->cb_seq_status);
1267 + }
1268 +
1269 ++ cb->cb_holds_slot = false;
1270 + clear_bit(0, &clp->cl_cb_slot_busy);
1271 + rpc_wake_up_next(&clp->cl_cb_waitq);
1272 + dprintk("%s: freed slot, new seqid=%d\n", __func__,
1273 +@@ -1211,6 +1216,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1274 + cb->cb_seq_status = 1;
1275 + cb->cb_status = 0;
1276 + cb->cb_need_restart = false;
1277 ++ cb->cb_holds_slot = false;
1278 + }
1279 +
1280 + void nfsd4_run_cb(struct nfsd4_callback *cb)
1281 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
1282 +index 86aa92d200e1..133d8bf62a5c 100644
1283 +--- a/fs/nfsd/state.h
1284 ++++ b/fs/nfsd/state.h
1285 +@@ -69,6 +69,7 @@ struct nfsd4_callback {
1286 + int cb_seq_status;
1287 + int cb_status;
1288 + bool cb_need_restart;
1289 ++ bool cb_holds_slot;
1290 + };
1291 +
1292 + struct nfsd4_callback_ops {
1293 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
1294 +index 8d5422bb9c1a..555698ddb943 100644
1295 +--- a/fs/proc/proc_sysctl.c
1296 ++++ b/fs/proc/proc_sysctl.c
1297 +@@ -1620,9 +1620,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
1298 + if (--header->nreg)
1299 + return;
1300 +
1301 +- if (parent)
1302 ++ if (parent) {
1303 + put_links(header);
1304 +- start_unregistering(header);
1305 ++ start_unregistering(header);
1306 ++ }
1307 ++
1308 + if (!--header->count)
1309 + kfree_rcu(header, rcu);
1310 +
1311 +diff --git a/fs/splice.c b/fs/splice.c
1312 +index 00d2f142dcf9..a598d444abe1 100644
1313 +--- a/fs/splice.c
1314 ++++ b/fs/splice.c
1315 +@@ -332,8 +332,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
1316 + .get = generic_pipe_buf_get,
1317 + };
1318 +
1319 +-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
1320 +- struct pipe_buffer *buf)
1321 ++int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
1322 ++ struct pipe_buffer *buf)
1323 + {
1324 + return 1;
1325 + }
1326 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
1327 +index befdcd304b3d..2dcf6e81b2e2 100644
1328 +--- a/include/linux/pipe_fs_i.h
1329 ++++ b/include/linux/pipe_fs_i.h
1330 +@@ -182,6 +182,7 @@ void free_pipe_info(struct pipe_inode_info *);
1331 + void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
1332 + int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
1333 + int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
1334 ++int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
1335 + void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
1336 + void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
1337 +
1338 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
1339 +index b2589c7e9439..22770168bff8 100644
1340 +--- a/kernel/sched/deadline.c
1341 ++++ b/kernel/sched/deadline.c
1342 +@@ -217,7 +217,6 @@ static void task_non_contending(struct task_struct *p)
1343 + if (dl_se->dl_runtime == 0)
1344 + return;
1345 +
1346 +- WARN_ON(hrtimer_active(&dl_se->inactive_timer));
1347 + WARN_ON(dl_se->dl_non_contending);
1348 +
1349 + zerolag_time = dl_se->deadline -
1350 +@@ -234,7 +233,7 @@ static void task_non_contending(struct task_struct *p)
1351 + * If the "0-lag time" already passed, decrease the active
1352 + * utilization now, instead of starting a timer
1353 + */
1354 +- if (zerolag_time < 0) {
1355 ++ if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
1356 + if (dl_task(p))
1357 + sub_running_bw(dl_se->dl_bw, dl_rq);
1358 + if (!dl_task(p) || p->state == TASK_DEAD) {
1359 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1360 +index a5d163903835..af7de1f9906c 100644
1361 +--- a/kernel/sched/fair.c
1362 ++++ b/kernel/sched/fair.c
1363 +@@ -2026,6 +2026,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1364 + if (p->last_task_numa_placement) {
1365 + delta = runtime - p->last_sum_exec_runtime;
1366 + *period = now - p->last_task_numa_placement;
1367 ++
1368 ++ /* Avoid time going backwards, prevent potential divide error: */
1369 ++ if (unlikely((s64)*period < 0))
1370 ++ *period = 0;
1371 + } else {
1372 + delta = p->se.avg.load_sum / p->se.load.weight;
1373 + *period = LOAD_AVG_MAX;
1374 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1375 +index 5f7f4f07499f..8123a8b53c54 100644
1376 +--- a/kernel/trace/ring_buffer.c
1377 ++++ b/kernel/trace/ring_buffer.c
1378 +@@ -700,7 +700,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
1379 +
1380 + preempt_disable_notrace();
1381 + time = rb_time_stamp(buffer);
1382 +- preempt_enable_no_resched_notrace();
1383 ++ preempt_enable_notrace();
1384 +
1385 + return time;
1386 + }
1387 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1388 +index ffddb5ac255c..591be15404a1 100644
1389 +--- a/kernel/trace/trace.c
1390 ++++ b/kernel/trace/trace.c
1391 +@@ -494,8 +494,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
1392 + * not modified.
1393 + */
1394 + pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1395 +- if (!pid_list)
1396 ++ if (!pid_list) {
1397 ++ trace_parser_put(&parser);
1398 + return -ENOMEM;
1399 ++ }
1400 +
1401 + pid_list->pid_max = READ_ONCE(pid_max);
1402 +
1403 +@@ -505,6 +507,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
1404 +
1405 + pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
1406 + if (!pid_list->pids) {
1407 ++ trace_parser_put(&parser);
1408 + kfree(pid_list);
1409 + return -ENOMEM;
1410 + }
1411 +@@ -6716,19 +6719,23 @@ struct buffer_ref {
1412 + struct ring_buffer *buffer;
1413 + void *page;
1414 + int cpu;
1415 +- int ref;
1416 ++ refcount_t refcount;
1417 + };
1418 +
1419 ++static void buffer_ref_release(struct buffer_ref *ref)
1420 ++{
1421 ++ if (!refcount_dec_and_test(&ref->refcount))
1422 ++ return;
1423 ++ ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
1424 ++ kfree(ref);
1425 ++}
1426 ++
1427 + static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
1428 + struct pipe_buffer *buf)
1429 + {
1430 + struct buffer_ref *ref = (struct buffer_ref *)buf->private;
1431 +
1432 +- if (--ref->ref)
1433 +- return;
1434 +-
1435 +- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
1436 +- kfree(ref);
1437 ++ buffer_ref_release(ref);
1438 + buf->private = 0;
1439 + }
1440 +
1441 +@@ -6737,7 +6744,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
1442 + {
1443 + struct buffer_ref *ref = (struct buffer_ref *)buf->private;
1444 +
1445 +- ref->ref++;
1446 ++ refcount_inc(&ref->refcount);
1447 + }
1448 +
1449 + /* Pipe buffer operations for a buffer. */
1450 +@@ -6745,7 +6752,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
1451 + .can_merge = 0,
1452 + .confirm = generic_pipe_buf_confirm,
1453 + .release = buffer_pipe_buf_release,
1454 +- .steal = generic_pipe_buf_steal,
1455 ++ .steal = generic_pipe_buf_nosteal,
1456 + .get = buffer_pipe_buf_get,
1457 + };
1458 +
1459 +@@ -6758,11 +6765,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1460 + struct buffer_ref *ref =
1461 + (struct buffer_ref *)spd->partial[i].private;
1462 +
1463 +- if (--ref->ref)
1464 +- return;
1465 +-
1466 +- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
1467 +- kfree(ref);
1468 ++ buffer_ref_release(ref);
1469 + spd->partial[i].private = 0;
1470 + }
1471 +
1472 +@@ -6817,7 +6820,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
1473 + break;
1474 + }
1475 +
1476 +- ref->ref = 1;
1477 ++ refcount_set(&ref->refcount, 1);
1478 + ref->buffer = iter->trace_buffer->buffer;
1479 + ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
1480 + if (IS_ERR(ref->page)) {
1481 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
1482 +index 62d0e25c054c..131d5871f8c9 100644
1483 +--- a/lib/Kconfig.debug
1484 ++++ b/lib/Kconfig.debug
1485 +@@ -1884,6 +1884,7 @@ config TEST_KMOD
1486 + depends on m
1487 + depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
1488 + depends on NETDEVICES && NET_CORE && INET # for TUN
1489 ++ depends on BLOCK
1490 + select TEST_LKM
1491 + select XFS_FS
1492 + select TUN
1493 +diff --git a/mm/memory.c b/mm/memory.c
1494 +index fb9f7737c1ff..f99b64ca1303 100644
1495 +--- a/mm/memory.c
1496 ++++ b/mm/memory.c
1497 +@@ -1804,10 +1804,15 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1498 + * in may not match the PFN we have mapped if the
1499 + * mapped PFN is a writeable COW page. In the mkwrite
1500 + * case we are creating a writable PTE for a shared
1501 +- * mapping and we expect the PFNs to match.
1502 ++ * mapping and we expect the PFNs to match. If they
1503 ++ * don't match, we are likely racing with block
1504 ++ * allocation and mapping invalidation so just skip the
1505 ++ * update.
1506 + */
1507 +- if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
1508 ++ if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1509 ++ WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1510 + goto out_unlock;
1511 ++ }
1512 + entry = *pte;
1513 + goto out_mkwrite;
1514 + } else
1515 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1516 +index 38b3309edba8..b967bd51bf1f 100644
1517 +--- a/net/bridge/netfilter/ebtables.c
1518 ++++ b/net/bridge/netfilter/ebtables.c
1519 +@@ -2030,7 +2030,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1520 + if (match_kern)
1521 + match_kern->match_size = ret;
1522 +
1523 +- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
1524 ++ /* rule should have no remaining data after target */
1525 ++ if (type == EBT_COMPAT_TARGET && size_left)
1526 + return -EINVAL;
1527 +
1528 + match32 = (struct compat_ebt_entry_mwt *) buf;
1529 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1530 +index c64f062d6323..6a7e187dd0a9 100644
1531 +--- a/net/ipv4/route.c
1532 ++++ b/net/ipv4/route.c
1533 +@@ -1192,25 +1192,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1534 + return dst;
1535 + }
1536 +
1537 +-static void ipv4_link_failure(struct sk_buff *skb)
1538 ++static void ipv4_send_dest_unreach(struct sk_buff *skb)
1539 + {
1540 + struct ip_options opt;
1541 +- struct rtable *rt;
1542 + int res;
1543 +
1544 + /* Recompile ip options since IPCB may not be valid anymore.
1545 ++ * Also check we have a reasonable ipv4 header.
1546 + */
1547 +- memset(&opt, 0, sizeof(opt));
1548 +- opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1549 ++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1550 ++ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1551 ++ return;
1552 +
1553 +- rcu_read_lock();
1554 +- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1555 +- rcu_read_unlock();
1556 ++ memset(&opt, 0, sizeof(opt));
1557 ++ if (ip_hdr(skb)->ihl > 5) {
1558 ++ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1559 ++ return;
1560 ++ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1561 +
1562 +- if (res)
1563 +- return;
1564 ++ rcu_read_lock();
1565 ++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1566 ++ rcu_read_unlock();
1567 +
1568 ++ if (res)
1569 ++ return;
1570 ++ }
1571 + __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1572 ++}
1573 ++
1574 ++static void ipv4_link_failure(struct sk_buff *skb)
1575 ++{
1576 ++ struct rtable *rt;
1577 ++
1578 ++ ipv4_send_dest_unreach(skb);
1579 +
1580 + rt = skb_rtable(skb);
1581 + if (rt)
1582 +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
1583 +index d82e8344fc54..e8caab8e2f5c 100644
1584 +--- a/net/ipv4/sysctl_net_ipv4.c
1585 ++++ b/net/ipv4/sysctl_net_ipv4.c
1586 +@@ -45,6 +45,7 @@ static int tcp_syn_retries_min = 1;
1587 + static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
1588 + static int ip_ping_group_range_min[] = { 0, 0 };
1589 + static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
1590 ++static int one_day_secs = 24 * 3600;
1591 +
1592 + /* obsolete */
1593 + static int sysctl_tcp_low_latency __read_mostly;
1594 +@@ -552,7 +553,9 @@ static struct ctl_table ipv4_table[] = {
1595 + .data = &sysctl_tcp_min_rtt_wlen,
1596 + .maxlen = sizeof(int),
1597 + .mode = 0644,
1598 +- .proc_handler = proc_dointvec
1599 ++ .proc_handler = proc_dointvec_minmax,
1600 ++ .extra1 = &zero,
1601 ++ .extra2 = &one_day_secs
1602 + },
1603 + {
1604 + .procname = "tcp_low_latency",
1605 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
1606 +index 56dd5ce6274f..6d7608b88f66 100644
1607 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
1608 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
1609 +@@ -889,12 +889,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
1610 + {
1611 + struct ip_vs_dest *dest;
1612 + unsigned int atype, i;
1613 +- int ret = 0;
1614 +
1615 + EnterFunction(2);
1616 +
1617 + #ifdef CONFIG_IP_VS_IPV6
1618 + if (udest->af == AF_INET6) {
1619 ++ int ret;
1620 ++
1621 + atype = ipv6_addr_type(&udest->addr.in6);
1622 + if ((!(atype & IPV6_ADDR_UNICAST) ||
1623 + atype & IPV6_ADDR_LINKLOCAL) &&
1624 +diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
1625 +index 86ef907067bb..353b59d3bd44 100644
1626 +--- a/net/rds/ib_fmr.c
1627 ++++ b/net/rds/ib_fmr.c
1628 +@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
1629 + else
1630 + pool = rds_ibdev->mr_1m_pool;
1631 +
1632 ++ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
1633 ++ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
1634 ++
1635 ++ /* Switch pools if one of the pool is reaching upper limit */
1636 ++ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
1637 ++ if (pool->pool_type == RDS_IB_MR_8K_POOL)
1638 ++ pool = rds_ibdev->mr_1m_pool;
1639 ++ else
1640 ++ pool = rds_ibdev->mr_8k_pool;
1641 ++ }
1642 ++
1643 + ibmr = rds_ib_try_reuse_ibmr(pool);
1644 + if (ibmr)
1645 + return ibmr;
1646 +diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
1647 +index 9a3c54e659e9..fe5d2e8a95d9 100644
1648 +--- a/net/rds/ib_rdma.c
1649 ++++ b/net/rds/ib_rdma.c
1650 +@@ -442,9 +442,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
1651 + struct rds_ib_mr *ibmr = NULL;
1652 + int iter = 0;
1653 +
1654 +- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
1655 +- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
1656 +-
1657 + while (1) {
1658 + ibmr = rds_ib_reuse_mr(pool);
1659 + if (ibmr)
1660 +diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1661 +index 4a9729257023..6a5c4992cf61 100644
1662 +--- a/net/rose/af_rose.c
1663 ++++ b/net/rose/af_rose.c
1664 +@@ -318,9 +318,11 @@ void rose_destroy_socket(struct sock *);
1665 + /*
1666 + * Handler for deferred kills.
1667 + */
1668 +-static void rose_destroy_timer(unsigned long data)
1669 ++static void rose_destroy_timer(struct timer_list *t)
1670 + {
1671 +- rose_destroy_socket((struct sock *)data);
1672 ++ struct sock *sk = from_timer(sk, t, sk_timer);
1673 ++
1674 ++ rose_destroy_socket(sk);
1675 + }
1676 +
1677 + /*
1678 +@@ -353,8 +355,7 @@ void rose_destroy_socket(struct sock *sk)
1679 +
1680 + if (sk_has_allocations(sk)) {
1681 + /* Defer: outstanding buffers */
1682 +- setup_timer(&sk->sk_timer, rose_destroy_timer,
1683 +- (unsigned long)sk);
1684 ++ timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
1685 + sk->sk_timer.expires = jiffies + 10 * HZ;
1686 + add_timer(&sk->sk_timer);
1687 + } else
1688 +@@ -538,8 +539,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol,
1689 + sock->ops = &rose_proto_ops;
1690 + sk->sk_protocol = protocol;
1691 +
1692 +- init_timer(&rose->timer);
1693 +- init_timer(&rose->idletimer);
1694 ++ timer_setup(&rose->timer, NULL, 0);
1695 ++ timer_setup(&rose->idletimer, NULL, 0);
1696 +
1697 + rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
1698 + rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
1699 +@@ -582,8 +583,8 @@ static struct sock *rose_make_new(struct sock *osk)
1700 + sk->sk_state = TCP_ESTABLISHED;
1701 + sock_copy_flags(sk, osk);
1702 +
1703 +- init_timer(&rose->timer);
1704 +- init_timer(&rose->idletimer);
1705 ++ timer_setup(&rose->timer, NULL, 0);
1706 ++ timer_setup(&rose->idletimer, NULL, 0);
1707 +
1708 + orose = rose_sk(osk);
1709 + rose->t1 = orose->t1;
1710 +diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
1711 +index c76638cc2cd5..cda4c6678ef1 100644
1712 +--- a/net/rose/rose_link.c
1713 ++++ b/net/rose/rose_link.c
1714 +@@ -27,8 +27,8 @@
1715 + #include <linux/interrupt.h>
1716 + #include <net/rose.h>
1717 +
1718 +-static void rose_ftimer_expiry(unsigned long);
1719 +-static void rose_t0timer_expiry(unsigned long);
1720 ++static void rose_ftimer_expiry(struct timer_list *);
1721 ++static void rose_t0timer_expiry(struct timer_list *);
1722 +
1723 + static void rose_transmit_restart_confirmation(struct rose_neigh *neigh);
1724 + static void rose_transmit_restart_request(struct rose_neigh *neigh);
1725 +@@ -37,8 +37,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
1726 + {
1727 + del_timer(&neigh->ftimer);
1728 +
1729 +- neigh->ftimer.data = (unsigned long)neigh;
1730 +- neigh->ftimer.function = &rose_ftimer_expiry;
1731 ++ neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
1732 + neigh->ftimer.expires =
1733 + jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
1734 +
1735 +@@ -49,8 +48,7 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
1736 + {
1737 + del_timer(&neigh->t0timer);
1738 +
1739 +- neigh->t0timer.data = (unsigned long)neigh;
1740 +- neigh->t0timer.function = &rose_t0timer_expiry;
1741 ++ neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
1742 + neigh->t0timer.expires =
1743 + jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
1744 +
1745 +@@ -77,13 +75,13 @@ static int rose_t0timer_running(struct rose_neigh *neigh)
1746 + return timer_pending(&neigh->t0timer);
1747 + }
1748 +
1749 +-static void rose_ftimer_expiry(unsigned long param)
1750 ++static void rose_ftimer_expiry(struct timer_list *t)
1751 + {
1752 + }
1753 +
1754 +-static void rose_t0timer_expiry(unsigned long param)
1755 ++static void rose_t0timer_expiry(struct timer_list *t)
1756 + {
1757 +- struct rose_neigh *neigh = (struct rose_neigh *)param;
1758 ++ struct rose_neigh *neigh = from_timer(neigh, t, t0timer);
1759 +
1760 + rose_transmit_restart_request(neigh);
1761 +
1762 +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
1763 +index 344456206b70..094a6621f8e8 100644
1764 +--- a/net/rose/rose_loopback.c
1765 ++++ b/net/rose/rose_loopback.c
1766 +@@ -16,15 +16,17 @@
1767 + #include <linux/init.h>
1768 +
1769 + static struct sk_buff_head loopback_queue;
1770 ++#define ROSE_LOOPBACK_LIMIT 1000
1771 + static struct timer_list loopback_timer;
1772 +
1773 + static void rose_set_loopback_timer(void);
1774 ++static void rose_loopback_timer(struct timer_list *unused);
1775 +
1776 + void rose_loopback_init(void)
1777 + {
1778 + skb_queue_head_init(&loopback_queue);
1779 +
1780 +- init_timer(&loopback_timer);
1781 ++ timer_setup(&loopback_timer, rose_loopback_timer, 0);
1782 + }
1783 +
1784 + static int rose_loopback_running(void)
1785 +@@ -34,36 +36,30 @@ static int rose_loopback_running(void)
1786 +
1787 + int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
1788 + {
1789 +- struct sk_buff *skbn;
1790 ++ struct sk_buff *skbn = NULL;
1791 +
1792 +- skbn = skb_clone(skb, GFP_ATOMIC);
1793 ++ if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
1794 ++ skbn = skb_clone(skb, GFP_ATOMIC);
1795 +
1796 +- kfree_skb(skb);
1797 +-
1798 +- if (skbn != NULL) {
1799 ++ if (skbn) {
1800 ++ consume_skb(skb);
1801 + skb_queue_tail(&loopback_queue, skbn);
1802 +
1803 + if (!rose_loopback_running())
1804 + rose_set_loopback_timer();
1805 ++ } else {
1806 ++ kfree_skb(skb);
1807 + }
1808 +
1809 + return 1;
1810 + }
1811 +
1812 +-static void rose_loopback_timer(unsigned long);
1813 +-
1814 + static void rose_set_loopback_timer(void)
1815 + {
1816 +- del_timer(&loopback_timer);
1817 +-
1818 +- loopback_timer.data = 0;
1819 +- loopback_timer.function = &rose_loopback_timer;
1820 +- loopback_timer.expires = jiffies + 10;
1821 +-
1822 +- add_timer(&loopback_timer);
1823 ++ mod_timer(&loopback_timer, jiffies + 10);
1824 + }
1825 +
1826 +-static void rose_loopback_timer(unsigned long param)
1827 ++static void rose_loopback_timer(struct timer_list *unused)
1828 + {
1829 + struct sk_buff *skb;
1830 + struct net_device *dev;
1831 +@@ -71,8 +67,12 @@ static void rose_loopback_timer(unsigned long param)
1832 + struct sock *sk;
1833 + unsigned short frametype;
1834 + unsigned int lci_i, lci_o;
1835 ++ int count;
1836 +
1837 +- while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
1838 ++ for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
1839 ++ skb = skb_dequeue(&loopback_queue);
1840 ++ if (!skb)
1841 ++ return;
1842 + if (skb->len < ROSE_MIN_LEN) {
1843 + kfree_skb(skb);
1844 + continue;
1845 +@@ -109,6 +109,8 @@ static void rose_loopback_timer(unsigned long param)
1846 + kfree_skb(skb);
1847 + }
1848 + }
1849 ++ if (!skb_queue_empty(&loopback_queue))
1850 ++ mod_timer(&loopback_timer, jiffies + 1);
1851 + }
1852 +
1853 + void __exit rose_loopback_clear(void)
1854 +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
1855 +index 2741abec7ee7..d94d6110bb1c 100644
1856 +--- a/net/rose/rose_route.c
1857 ++++ b/net/rose/rose_route.c
1858 +@@ -104,8 +104,8 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
1859 +
1860 + skb_queue_head_init(&rose_neigh->queue);
1861 +
1862 +- init_timer(&rose_neigh->ftimer);
1863 +- init_timer(&rose_neigh->t0timer);
1864 ++ timer_setup(&rose_neigh->ftimer, NULL, 0);
1865 ++ timer_setup(&rose_neigh->t0timer, NULL, 0);
1866 +
1867 + if (rose_route->ndigis != 0) {
1868 + rose_neigh->digipeat =
1869 +@@ -390,8 +390,8 @@ void rose_add_loopback_neigh(void)
1870 +
1871 + skb_queue_head_init(&sn->queue);
1872 +
1873 +- init_timer(&sn->ftimer);
1874 +- init_timer(&sn->t0timer);
1875 ++ timer_setup(&sn->ftimer, NULL, 0);
1876 ++ timer_setup(&sn->t0timer, NULL, 0);
1877 +
1878 + spin_lock_bh(&rose_neigh_list_lock);
1879 + sn->next = rose_neigh_list;
1880 +diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
1881 +index bc5469d6d9cb..3b89d66f15bb 100644
1882 +--- a/net/rose/rose_timer.c
1883 ++++ b/net/rose/rose_timer.c
1884 +@@ -29,8 +29,8 @@
1885 + #include <net/rose.h>
1886 +
1887 + static void rose_heartbeat_expiry(unsigned long);
1888 +-static void rose_timer_expiry(unsigned long);
1889 +-static void rose_idletimer_expiry(unsigned long);
1890 ++static void rose_timer_expiry(struct timer_list *);
1891 ++static void rose_idletimer_expiry(struct timer_list *);
1892 +
1893 + void rose_start_heartbeat(struct sock *sk)
1894 + {
1895 +@@ -49,8 +49,7 @@ void rose_start_t1timer(struct sock *sk)
1896 +
1897 + del_timer(&rose->timer);
1898 +
1899 +- rose->timer.data = (unsigned long)sk;
1900 +- rose->timer.function = &rose_timer_expiry;
1901 ++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
1902 + rose->timer.expires = jiffies + rose->t1;
1903 +
1904 + add_timer(&rose->timer);
1905 +@@ -62,8 +61,7 @@ void rose_start_t2timer(struct sock *sk)
1906 +
1907 + del_timer(&rose->timer);
1908 +
1909 +- rose->timer.data = (unsigned long)sk;
1910 +- rose->timer.function = &rose_timer_expiry;
1911 ++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
1912 + rose->timer.expires = jiffies + rose->t2;
1913 +
1914 + add_timer(&rose->timer);
1915 +@@ -75,8 +73,7 @@ void rose_start_t3timer(struct sock *sk)
1916 +
1917 + del_timer(&rose->timer);
1918 +
1919 +- rose->timer.data = (unsigned long)sk;
1920 +- rose->timer.function = &rose_timer_expiry;
1921 ++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
1922 + rose->timer.expires = jiffies + rose->t3;
1923 +
1924 + add_timer(&rose->timer);
1925 +@@ -88,8 +85,7 @@ void rose_start_hbtimer(struct sock *sk)
1926 +
1927 + del_timer(&rose->timer);
1928 +
1929 +- rose->timer.data = (unsigned long)sk;
1930 +- rose->timer.function = &rose_timer_expiry;
1931 ++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
1932 + rose->timer.expires = jiffies + rose->hb;
1933 +
1934 + add_timer(&rose->timer);
1935 +@@ -102,8 +98,7 @@ void rose_start_idletimer(struct sock *sk)
1936 + del_timer(&rose->idletimer);
1937 +
1938 + if (rose->idle > 0) {
1939 +- rose->idletimer.data = (unsigned long)sk;
1940 +- rose->idletimer.function = &rose_idletimer_expiry;
1941 ++ rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
1942 + rose->idletimer.expires = jiffies + rose->idle;
1943 +
1944 + add_timer(&rose->idletimer);
1945 +@@ -163,10 +158,10 @@ static void rose_heartbeat_expiry(unsigned long param)
1946 + bh_unlock_sock(sk);
1947 + }
1948 +
1949 +-static void rose_timer_expiry(unsigned long param)
1950 ++static void rose_timer_expiry(struct timer_list *t)
1951 + {
1952 +- struct sock *sk = (struct sock *)param;
1953 +- struct rose_sock *rose = rose_sk(sk);
1954 ++ struct rose_sock *rose = from_timer(rose, t, timer);
1955 ++ struct sock *sk = &rose->sock;
1956 +
1957 + bh_lock_sock(sk);
1958 + switch (rose->state) {
1959 +@@ -192,9 +187,10 @@ static void rose_timer_expiry(unsigned long param)
1960 + bh_unlock_sock(sk);
1961 + }
1962 +
1963 +-static void rose_idletimer_expiry(unsigned long param)
1964 ++static void rose_idletimer_expiry(struct timer_list *t)
1965 + {
1966 +- struct sock *sk = (struct sock *)param;
1967 ++ struct rose_sock *rose = from_timer(rose, t, idletimer);
1968 ++ struct sock *sk = &rose->sock;
1969 +
1970 + bh_lock_sock(sk);
1971 + rose_clear_queues(sk);
1972 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
1973 +index f2cf4edf219b..475b453dc7ae 100644
1974 +--- a/net/sunrpc/cache.c
1975 ++++ b/net/sunrpc/cache.c
1976 +@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
1977 + h->last_refresh = now;
1978 + }
1979 +
1980 ++static inline int cache_is_valid(struct cache_head *h);
1981 + static void cache_fresh_locked(struct cache_head *head, time_t expiry,
1982 + struct cache_detail *detail);
1983 + static void cache_fresh_unlocked(struct cache_head *head,
1984 +@@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
1985 + if (cache_is_expired(detail, tmp)) {
1986 + hlist_del_init(&tmp->cache_list);
1987 + detail->entries --;
1988 ++ if (cache_is_valid(tmp) == -EAGAIN)
1989 ++ set_bit(CACHE_NEGATIVE, &tmp->flags);
1990 + cache_fresh_locked(tmp, 0, detail);
1991 + freeme = tmp;
1992 + break;
1993 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1994 +index 73895daf8943..aa75bc8b158f 100644
1995 +--- a/net/tipc/netlink_compat.c
1996 ++++ b/net/tipc/netlink_compat.c
1997 +@@ -262,8 +262,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
1998 + if (msg->rep_type)
1999 + tipc_tlv_init(msg->rep, msg->rep_type);
2000 +
2001 +- if (cmd->header)
2002 +- (*cmd->header)(msg);
2003 ++ if (cmd->header) {
2004 ++ err = (*cmd->header)(msg);
2005 ++ if (err) {
2006 ++ kfree_skb(msg->rep);
2007 ++ msg->rep = NULL;
2008 ++ return err;
2009 ++ }
2010 ++ }
2011 +
2012 + arg = nlmsg_new(0, GFP_KERNEL);
2013 + if (!arg) {
2014 +@@ -388,7 +394,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
2015 + if (!bearer)
2016 + return -EMSGSIZE;
2017 +
2018 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
2019 ++ len = TLV_GET_DATA_LEN(msg->req);
2020 ++ len -= offsetof(struct tipc_bearer_config, name);
2021 ++ if (len <= 0)
2022 ++ return -EINVAL;
2023 ++
2024 ++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
2025 + if (!string_is_valid(b->name, len))
2026 + return -EINVAL;
2027 +
2028 +@@ -757,7 +768,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
2029 +
2030 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
2031 +
2032 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
2033 ++ len = TLV_GET_DATA_LEN(msg->req);
2034 ++ len -= offsetof(struct tipc_link_config, name);
2035 ++ if (len <= 0)
2036 ++ return -EINVAL;
2037 ++
2038 ++ len = min_t(int, len, TIPC_MAX_LINK_NAME);
2039 + if (!string_is_valid(lc->name, len))
2040 + return -EINVAL;
2041 +
2042 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
2043 +index edba7ab97563..40a8731c663b 100644
2044 +--- a/net/vmw_vsock/virtio_transport_common.c
2045 ++++ b/net/vmw_vsock/virtio_transport_common.c
2046 +@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
2047 + */
2048 + static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
2049 + {
2050 ++ const struct virtio_transport *t;
2051 ++ struct virtio_vsock_pkt *reply;
2052 + struct virtio_vsock_pkt_info info = {
2053 + .op = VIRTIO_VSOCK_OP_RST,
2054 + .type = le16_to_cpu(pkt->hdr.type),
2055 +@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
2056 + if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
2057 + return 0;
2058 +
2059 +- pkt = virtio_transport_alloc_pkt(&info, 0,
2060 +- le64_to_cpu(pkt->hdr.dst_cid),
2061 +- le32_to_cpu(pkt->hdr.dst_port),
2062 +- le64_to_cpu(pkt->hdr.src_cid),
2063 +- le32_to_cpu(pkt->hdr.src_port));
2064 +- if (!pkt)
2065 ++ reply = virtio_transport_alloc_pkt(&info, 0,
2066 ++ le64_to_cpu(pkt->hdr.dst_cid),
2067 ++ le32_to_cpu(pkt->hdr.dst_port),
2068 ++ le64_to_cpu(pkt->hdr.src_cid),
2069 ++ le32_to_cpu(pkt->hdr.src_port));
2070 ++ if (!reply)
2071 + return -ENOMEM;
2072 +
2073 +- return virtio_transport_get_ops()->send_pkt(pkt);
2074 ++ t = virtio_transport_get_ops();
2075 ++ if (!t) {
2076 ++ virtio_transport_free_pkt(reply);
2077 ++ return -ENOTCONN;
2078 ++ }
2079 ++
2080 ++ return t->send_pkt(reply);
2081 + }
2082 +
2083 + static void virtio_transport_wait_close(struct sock *sk, long timeout)
2084 +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
2085 +index a0ad87e869f9..a33fa1a91873 100644
2086 +--- a/scripts/Kbuild.include
2087 ++++ b/scripts/Kbuild.include
2088 +@@ -165,9 +165,7 @@ cc-ldoption = $(call try-run,\
2089 +
2090 + # ld-option
2091 + # Usage: LDFLAGS += $(call ld-option, -X)
2092 +-ld-option = $(call try-run,\
2093 +- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
2094 +- $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
2095 ++ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
2096 +
2097 + # ar-option
2098 + # Usage: KBUILD_ARFLAGS := $(call ar-option,D)