Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:03
Message-Id: 1572357542.0dd711b119384f33b8fd38ccb24275ecea1b33d3.mpagano@gentoo
1 commit: 0dd711b119384f33b8fd38ccb24275ecea1b33d3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 3 13:02:31 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0dd711b1
7
8 Linux patch 4.14.132
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1131_linux-4.14.132.patch | 1558 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1562 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 562acb4..f51c89c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -567,6 +567,10 @@ Patch: 1130_linux-4.14.131.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.131
23
24 +Patch: 1131_linux-4.14.132.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.132
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1131_linux-4.14.132.patch b/1131_linux-4.14.132.patch
33 new file mode 100644
34 index 0000000..4119e06
35 --- /dev/null
36 +++ b/1131_linux-4.14.132.patch
37 @@ -0,0 +1,1558 @@
38 +diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
39 +index 6c42c75103eb..6361fb01c9c1 100644
40 +--- a/Documentation/robust-futexes.txt
41 ++++ b/Documentation/robust-futexes.txt
42 +@@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have
43 + the new syscalls yet.
44 +
45 + Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
46 +-inline function before writing up the syscalls (that function returns
47 +--ENOSYS right now).
48 ++inline function before writing up the syscalls.
49 +diff --git a/Makefile b/Makefile
50 +index 275343cf27f7..23b2916ef0ff 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,7 +1,7 @@
54 + # SPDX-License-Identifier: GPL-2.0
55 + VERSION = 4
56 + PATCHLEVEL = 14
57 +-SUBLEVEL = 131
58 ++SUBLEVEL = 132
59 + EXTRAVERSION =
60 + NAME = Petit Gorille
61 +
62 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
63 +index c7e30a6ed56e..232917e9c1d9 100644
64 +--- a/arch/arm64/include/asm/futex.h
65 ++++ b/arch/arm64/include/asm/futex.h
66 +@@ -134,7 +134,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
67 + : "memory");
68 + uaccess_disable();
69 +
70 +- *uval = val;
71 ++ if (!ret)
72 ++ *uval = val;
73 ++
74 + return ret;
75 + }
76 +
77 +diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
78 +index 4214c38d016b..e3193fd39d8d 100644
79 +--- a/arch/arm64/include/asm/insn.h
80 ++++ b/arch/arm64/include/asm/insn.h
81 +@@ -271,6 +271,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
82 + __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
83 + __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
84 + __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
85 ++__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
86 + __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
87 + __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
88 + __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
89 +@@ -383,6 +384,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
90 + enum aarch64_insn_register state,
91 + enum aarch64_insn_size_type size,
92 + enum aarch64_insn_ldst_type type);
93 ++u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
94 ++ enum aarch64_insn_register address,
95 ++ enum aarch64_insn_register value,
96 ++ enum aarch64_insn_size_type size);
97 ++u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
98 ++ enum aarch64_insn_register value,
99 ++ enum aarch64_insn_size_type size);
100 + u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
101 + enum aarch64_insn_register src,
102 + int imm, enum aarch64_insn_variant variant,
103 +diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
104 +index 2718a77da165..4381aa7b071d 100644
105 +--- a/arch/arm64/kernel/insn.c
106 ++++ b/arch/arm64/kernel/insn.c
107 +@@ -793,6 +793,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
108 + state);
109 + }
110 +
111 ++u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
112 ++ enum aarch64_insn_register address,
113 ++ enum aarch64_insn_register value,
114 ++ enum aarch64_insn_size_type size)
115 ++{
116 ++ u32 insn = aarch64_insn_get_ldadd_value();
117 ++
118 ++ switch (size) {
119 ++ case AARCH64_INSN_SIZE_32:
120 ++ case AARCH64_INSN_SIZE_64:
121 ++ break;
122 ++ default:
123 ++ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
124 ++ return AARCH64_BREAK_FAULT;
125 ++ }
126 ++
127 ++ insn = aarch64_insn_encode_ldst_size(size, insn);
128 ++
129 ++ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
130 ++ result);
131 ++
132 ++ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
133 ++ address);
134 ++
135 ++ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
136 ++ value);
137 ++}
138 ++
139 ++u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
140 ++ enum aarch64_insn_register value,
141 ++ enum aarch64_insn_size_type size)
142 ++{
143 ++ /*
144 ++ * STADD is simply encoded as an alias for LDADD with XZR as
145 ++ * the destination register.
146 ++ */
147 ++ return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
148 ++ value, size);
149 ++}
150 ++
151 + static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
152 + enum aarch64_insn_prfm_target target,
153 + enum aarch64_insn_prfm_policy policy,
154 +diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
155 +index 6c881659ee8a..76606e87233f 100644
156 +--- a/arch/arm64/net/bpf_jit.h
157 ++++ b/arch/arm64/net/bpf_jit.h
158 +@@ -100,6 +100,10 @@
159 + #define A64_STXR(sf, Rt, Rn, Rs) \
160 + A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
161 +
162 ++/* LSE atomics */
163 ++#define A64_STADD(sf, Rn, Rs) \
164 ++ aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
165 ++
166 + /* Add/subtract (immediate) */
167 + #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
168 + aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
169 +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
170 +index 6110fe344368..b742171bfef7 100644
171 +--- a/arch/arm64/net/bpf_jit_comp.c
172 ++++ b/arch/arm64/net/bpf_jit_comp.c
173 +@@ -330,7 +330,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
174 + const int i = insn - ctx->prog->insnsi;
175 + const bool is64 = BPF_CLASS(code) == BPF_ALU64;
176 + const bool isdw = BPF_SIZE(code) == BPF_DW;
177 +- u8 jmp_cond;
178 ++ u8 jmp_cond, reg;
179 + s32 jmp_offset;
180 +
181 + #define check_imm(bits, imm) do { \
182 +@@ -706,18 +706,28 @@ emit_cond_jmp:
183 + break;
184 + }
185 + break;
186 ++
187 + /* STX XADD: lock *(u32 *)(dst + off) += src */
188 + case BPF_STX | BPF_XADD | BPF_W:
189 + /* STX XADD: lock *(u64 *)(dst + off) += src */
190 + case BPF_STX | BPF_XADD | BPF_DW:
191 +- emit_a64_mov_i(1, tmp, off, ctx);
192 +- emit(A64_ADD(1, tmp, tmp, dst), ctx);
193 +- emit(A64_LDXR(isdw, tmp2, tmp), ctx);
194 +- emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
195 +- emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
196 +- jmp_offset = -3;
197 +- check_imm19(jmp_offset);
198 +- emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
199 ++ if (!off) {
200 ++ reg = dst;
201 ++ } else {
202 ++ emit_a64_mov_i(1, tmp, off, ctx);
203 ++ emit(A64_ADD(1, tmp, tmp, dst), ctx);
204 ++ reg = tmp;
205 ++ }
206 ++ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
207 ++ emit(A64_STADD(isdw, reg, src), ctx);
208 ++ } else {
209 ++ emit(A64_LDXR(isdw, tmp2, reg), ctx);
210 ++ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
211 ++ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
212 ++ jmp_offset = -3;
213 ++ check_imm19(jmp_offset);
214 ++ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
215 ++ }
216 + break;
217 +
218 + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
219 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
220 +index 2769e0f5c686..3b44d39aca1d 100644
221 +--- a/arch/x86/kernel/cpu/bugs.c
222 ++++ b/arch/x86/kernel/cpu/bugs.c
223 +@@ -820,6 +820,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
224 + break;
225 + }
226 +
227 ++ /*
228 ++ * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
229 ++ * bit in the mask to allow guests to use the mitigation even in the
230 ++ * case where the host does not enable it.
231 ++ */
232 ++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
233 ++ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
234 ++ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
235 ++ }
236 ++
237 + /*
238 + * We have three CPU feature flags that are in play here:
239 + * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
240 +@@ -837,7 +847,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
241 + x86_amd_ssb_disable();
242 + } else {
243 + x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
244 +- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
245 + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
246 + }
247 + }
248 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
249 +index 4a2100ac3423..93c22e7ee424 100644
250 +--- a/arch/x86/kernel/cpu/microcode/core.c
251 ++++ b/arch/x86/kernel/cpu/microcode/core.c
252 +@@ -790,13 +790,16 @@ static struct syscore_ops mc_syscore_ops = {
253 + .resume = mc_bp_resume,
254 + };
255 +
256 +-static int mc_cpu_online(unsigned int cpu)
257 ++static int mc_cpu_starting(unsigned int cpu)
258 + {
259 +- struct device *dev;
260 +-
261 +- dev = get_cpu_device(cpu);
262 + microcode_update_cpu(cpu);
263 + pr_debug("CPU%d added\n", cpu);
264 ++ return 0;
265 ++}
266 ++
267 ++static int mc_cpu_online(unsigned int cpu)
268 ++{
269 ++ struct device *dev = get_cpu_device(cpu);
270 +
271 + if (sysfs_create_group(&dev->kobj, &mc_attr_group))
272 + pr_err("Failed to create group for CPU%d\n", cpu);
273 +@@ -873,7 +876,9 @@ int __init microcode_init(void)
274 + goto out_ucode_group;
275 +
276 + register_syscore_ops(&mc_syscore_ops);
277 +- cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
278 ++ cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
279 ++ mc_cpu_starting, NULL);
280 ++ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
281 + mc_cpu_online, mc_cpu_down_prep);
282 +
283 + pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
284 +diff --git a/block/bio.c b/block/bio.c
285 +index d01ab919b313..1384f9790882 100644
286 +--- a/block/bio.c
287 ++++ b/block/bio.c
288 +@@ -773,7 +773,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
289 + return 0;
290 + }
291 +
292 +- if (bio->bi_vcnt >= bio->bi_max_vecs)
293 ++ if (bio_full(bio))
294 + return 0;
295 +
296 + /*
297 +@@ -821,65 +821,97 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
298 + EXPORT_SYMBOL(bio_add_pc_page);
299 +
300 + /**
301 +- * bio_add_page - attempt to add page to bio
302 +- * @bio: destination bio
303 +- * @page: page to add
304 +- * @len: vec entry length
305 +- * @offset: vec entry offset
306 ++ * __bio_try_merge_page - try appending data to an existing bvec.
307 ++ * @bio: destination bio
308 ++ * @page: page to add
309 ++ * @len: length of the data to add
310 ++ * @off: offset of the data in @page
311 + *
312 +- * Attempt to add a page to the bio_vec maplist. This will only fail
313 +- * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
314 ++ * Try to add the data at @page + @off to the last bvec of @bio. This is a
315 ++ * a useful optimisation for file systems with a block size smaller than the
316 ++ * page size.
317 ++ *
318 ++ * Return %true on success or %false on failure.
319 + */
320 +-int bio_add_page(struct bio *bio, struct page *page,
321 +- unsigned int len, unsigned int offset)
322 ++bool __bio_try_merge_page(struct bio *bio, struct page *page,
323 ++ unsigned int len, unsigned int off)
324 + {
325 +- struct bio_vec *bv;
326 +-
327 +- /*
328 +- * cloned bio must not modify vec list
329 +- */
330 + if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
331 +- return 0;
332 ++ return false;
333 +
334 +- /*
335 +- * For filesystems with a blocksize smaller than the pagesize
336 +- * we will often be called with the same page as last time and
337 +- * a consecutive offset. Optimize this special case.
338 +- */
339 + if (bio->bi_vcnt > 0) {
340 +- bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
341 ++ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
342 +
343 +- if (page == bv->bv_page &&
344 +- offset == bv->bv_offset + bv->bv_len) {
345 ++ if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
346 + bv->bv_len += len;
347 +- goto done;
348 ++ bio->bi_iter.bi_size += len;
349 ++ return true;
350 + }
351 + }
352 ++ return false;
353 ++}
354 ++EXPORT_SYMBOL_GPL(__bio_try_merge_page);
355 +
356 +- if (bio->bi_vcnt >= bio->bi_max_vecs)
357 +- return 0;
358 ++/**
359 ++ * __bio_add_page - add page to a bio in a new segment
360 ++ * @bio: destination bio
361 ++ * @page: page to add
362 ++ * @len: length of the data to add
363 ++ * @off: offset of the data in @page
364 ++ *
365 ++ * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
366 ++ * that @bio has space for another bvec.
367 ++ */
368 ++void __bio_add_page(struct bio *bio, struct page *page,
369 ++ unsigned int len, unsigned int off)
370 ++{
371 ++ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
372 +
373 +- bv = &bio->bi_io_vec[bio->bi_vcnt];
374 +- bv->bv_page = page;
375 +- bv->bv_len = len;
376 +- bv->bv_offset = offset;
377 ++ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
378 ++ WARN_ON_ONCE(bio_full(bio));
379 ++
380 ++ bv->bv_page = page;
381 ++ bv->bv_offset = off;
382 ++ bv->bv_len = len;
383 +
384 +- bio->bi_vcnt++;
385 +-done:
386 + bio->bi_iter.bi_size += len;
387 ++ bio->bi_vcnt++;
388 ++}
389 ++EXPORT_SYMBOL_GPL(__bio_add_page);
390 ++
391 ++/**
392 ++ * bio_add_page - attempt to add page to bio
393 ++ * @bio: destination bio
394 ++ * @page: page to add
395 ++ * @len: vec entry length
396 ++ * @offset: vec entry offset
397 ++ *
398 ++ * Attempt to add a page to the bio_vec maplist. This will only fail
399 ++ * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
400 ++ */
401 ++int bio_add_page(struct bio *bio, struct page *page,
402 ++ unsigned int len, unsigned int offset)
403 ++{
404 ++ if (!__bio_try_merge_page(bio, page, len, offset)) {
405 ++ if (bio_full(bio))
406 ++ return 0;
407 ++ __bio_add_page(bio, page, len, offset);
408 ++ }
409 + return len;
410 + }
411 + EXPORT_SYMBOL(bio_add_page);
412 +
413 + /**
414 +- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
415 ++ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
416 + * @bio: bio to add pages to
417 + * @iter: iov iterator describing the region to be mapped
418 + *
419 +- * Pins as many pages from *iter and appends them to @bio's bvec array. The
420 ++ * Pins pages from *iter and appends them to @bio's bvec array. The
421 + * pages will have to be released using put_page() when done.
422 ++ * For multi-segment *iter, this function only adds pages from the
423 ++ * the next non-empty segment of the iov iterator.
424 + */
425 +-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
426 ++static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
427 + {
428 + unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
429 + struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
430 +@@ -916,6 +948,33 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
431 + iov_iter_advance(iter, size);
432 + return 0;
433 + }
434 ++
435 ++/**
436 ++ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
437 ++ * @bio: bio to add pages to
438 ++ * @iter: iov iterator describing the region to be mapped
439 ++ *
440 ++ * Pins pages from *iter and appends them to @bio's bvec array. The
441 ++ * pages will have to be released using put_page() when done.
442 ++ * The function tries, but does not guarantee, to pin as many pages as
443 ++ * fit into the bio, or are requested in *iter, whatever is smaller.
444 ++ * If MM encounters an error pinning the requested pages, it stops.
445 ++ * Error is returned only if 0 pages could be pinned.
446 ++ */
447 ++int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
448 ++{
449 ++ unsigned short orig_vcnt = bio->bi_vcnt;
450 ++
451 ++ do {
452 ++ int ret = __bio_iov_iter_get_pages(bio, iter);
453 ++
454 ++ if (unlikely(ret))
455 ++ return bio->bi_vcnt > orig_vcnt ? 0 : ret;
456 ++
457 ++ } while (iov_iter_count(iter) && !bio_full(bio));
458 ++
459 ++ return 0;
460 ++}
461 + EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
462 +
463 + struct submit_bio_ret {
464 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
465 +index cbe5ab26d95b..75275f9e363d 100644
466 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c
467 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
468 +@@ -132,25 +132,22 @@ static int defer_packet_queue(
469 + struct hfi1_user_sdma_pkt_q *pq =
470 + container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
471 + struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
472 +- struct user_sdma_txreq *tx =
473 +- container_of(txreq, struct user_sdma_txreq, txreq);
474 +
475 +- if (sdma_progress(sde, seq, txreq)) {
476 +- if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
477 +- goto eagain;
478 +- }
479 ++ write_seqlock(&dev->iowait_lock);
480 ++ if (sdma_progress(sde, seq, txreq))
481 ++ goto eagain;
482 + /*
483 + * We are assuming that if the list is enqueued somewhere, it
484 + * is to the dmawait list since that is the only place where
485 + * it is supposed to be enqueued.
486 + */
487 + xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
488 +- write_seqlock(&dev->iowait_lock);
489 + if (list_empty(&pq->busy.list))
490 + iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
491 + write_sequnlock(&dev->iowait_lock);
492 + return -EBUSY;
493 + eagain:
494 ++ write_sequnlock(&dev->iowait_lock);
495 + return -EAGAIN;
496 + }
497 +
498 +@@ -803,7 +800,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
499 +
500 + tx->flags = 0;
501 + tx->req = req;
502 +- tx->busycount = 0;
503 + INIT_LIST_HEAD(&tx->list);
504 +
505 + /*
506 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
507 +index 2b5326d6db53..87b0c567f442 100644
508 +--- a/drivers/infiniband/hw/hfi1/user_sdma.h
509 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
510 +@@ -236,7 +236,6 @@ struct user_sdma_txreq {
511 + struct list_head list;
512 + struct user_sdma_request *req;
513 + u16 flags;
514 +- unsigned int busycount;
515 + u64 seqnum;
516 + };
517 +
518 +diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
519 +index 8b80a9ce9ea9..dafedbc28bcc 100644
520 +--- a/drivers/md/dm-log-writes.c
521 ++++ b/drivers/md/dm-log-writes.c
522 +@@ -57,6 +57,7 @@
523 +
524 + #define WRITE_LOG_VERSION 1ULL
525 + #define WRITE_LOG_MAGIC 0x6a736677736872ULL
526 ++#define WRITE_LOG_SUPER_SECTOR 0
527 +
528 + /*
529 + * The disk format for this is braindead simple.
530 +@@ -112,6 +113,7 @@ struct log_writes_c {
531 + struct list_head logging_blocks;
532 + wait_queue_head_t wait;
533 + struct task_struct *log_kthread;
534 ++ struct completion super_done;
535 + };
536 +
537 + struct pending_block {
538 +@@ -177,6 +179,14 @@ static void log_end_io(struct bio *bio)
539 + bio_put(bio);
540 + }
541 +
542 ++static void log_end_super(struct bio *bio)
543 ++{
544 ++ struct log_writes_c *lc = bio->bi_private;
545 ++
546 ++ complete(&lc->super_done);
547 ++ log_end_io(bio);
548 ++}
549 ++
550 + /*
551 + * Meant to be called if there is an error, it will free all the pages
552 + * associated with the block.
553 +@@ -212,7 +222,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
554 + bio->bi_iter.bi_size = 0;
555 + bio->bi_iter.bi_sector = sector;
556 + bio_set_dev(bio, lc->logdev->bdev);
557 +- bio->bi_end_io = log_end_io;
558 ++ bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
559 ++ log_end_super : log_end_io;
560 + bio->bi_private = lc;
561 + bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
562 +
563 +@@ -334,11 +345,18 @@ static int log_super(struct log_writes_c *lc)
564 + super.nr_entries = cpu_to_le64(lc->logged_entries);
565 + super.sectorsize = cpu_to_le32(lc->sectorsize);
566 +
567 +- if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
568 ++ if (write_metadata(lc, &super, sizeof(super), NULL, 0,
569 ++ WRITE_LOG_SUPER_SECTOR)) {
570 + DMERR("Couldn't write super");
571 + return -1;
572 + }
573 +
574 ++ /*
575 ++ * Super sector should be writen in-order, otherwise the
576 ++ * nr_entries could be rewritten incorrectly by an old bio.
577 ++ */
578 ++ wait_for_completion_io(&lc->super_done);
579 ++
580 + return 0;
581 + }
582 +
583 +@@ -447,6 +465,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
584 + INIT_LIST_HEAD(&lc->unflushed_blocks);
585 + INIT_LIST_HEAD(&lc->logging_blocks);
586 + init_waitqueue_head(&lc->wait);
587 ++ init_completion(&lc->super_done);
588 + atomic_set(&lc->io_blocks, 0);
589 + atomic_set(&lc->pending_blocks, 0);
590 +
591 +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
592 +index 59dcd97ee3de..6b58ee2e2a25 100644
593 +--- a/drivers/misc/eeprom/at24.c
594 ++++ b/drivers/misc/eeprom/at24.c
595 +@@ -113,22 +113,6 @@ MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)");
596 + ((1 << AT24_SIZE_FLAGS | (_flags)) \
597 + << AT24_SIZE_BYTELEN | ilog2(_len))
598 +
599 +-/*
600 +- * Both reads and writes fail if the previous write didn't complete yet. This
601 +- * macro loops a few times waiting at least long enough for one entire page
602 +- * write to work while making sure that at least one iteration is run before
603 +- * checking the break condition.
604 +- *
605 +- * It takes two parameters: a variable in which the future timeout in jiffies
606 +- * will be stored and a temporary variable holding the time of the last
607 +- * iteration of processing the request. Both should be unsigned integers
608 +- * holding at least 32 bits.
609 +- */
610 +-#define loop_until_timeout(tout, op_time) \
611 +- for (tout = jiffies + msecs_to_jiffies(write_timeout), op_time = 0; \
612 +- op_time ? time_before(op_time, tout) : true; \
613 +- usleep_range(1000, 1500), op_time = jiffies)
614 +-
615 + static const struct i2c_device_id at24_ids[] = {
616 + /* needs 8 addresses as A0-A2 are ignored */
617 + { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) },
618 +@@ -234,7 +218,14 @@ static ssize_t at24_eeprom_read_smbus(struct at24_data *at24, char *buf,
619 + if (count > I2C_SMBUS_BLOCK_MAX)
620 + count = I2C_SMBUS_BLOCK_MAX;
621 +
622 +- loop_until_timeout(timeout, read_time) {
623 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
624 ++ do {
625 ++ /*
626 ++ * The timestamp shall be taken before the actual operation
627 ++ * to avoid a premature timeout in case of high CPU load.
628 ++ */
629 ++ read_time = jiffies;
630 ++
631 + status = i2c_smbus_read_i2c_block_data_or_emulated(client,
632 + offset,
633 + count, buf);
634 +@@ -244,7 +235,9 @@ static ssize_t at24_eeprom_read_smbus(struct at24_data *at24, char *buf,
635 +
636 + if (status == count)
637 + return count;
638 +- }
639 ++
640 ++ usleep_range(1000, 1500);
641 ++ } while (time_before(read_time, timeout));
642 +
643 + return -ETIMEDOUT;
644 + }
645 +@@ -284,7 +277,14 @@ static ssize_t at24_eeprom_read_i2c(struct at24_data *at24, char *buf,
646 + msg[1].buf = buf;
647 + msg[1].len = count;
648 +
649 +- loop_until_timeout(timeout, read_time) {
650 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
651 ++ do {
652 ++ /*
653 ++ * The timestamp shall be taken before the actual operation
654 ++ * to avoid a premature timeout in case of high CPU load.
655 ++ */
656 ++ read_time = jiffies;
657 ++
658 + status = i2c_transfer(client->adapter, msg, 2);
659 + if (status == 2)
660 + status = count;
661 +@@ -294,7 +294,9 @@ static ssize_t at24_eeprom_read_i2c(struct at24_data *at24, char *buf,
662 +
663 + if (status == count)
664 + return count;
665 +- }
666 ++
667 ++ usleep_range(1000, 1500);
668 ++ } while (time_before(read_time, timeout));
669 +
670 + return -ETIMEDOUT;
671 + }
672 +@@ -343,11 +345,20 @@ static ssize_t at24_eeprom_read_serial(struct at24_data *at24, char *buf,
673 + msg[1].buf = buf;
674 + msg[1].len = count;
675 +
676 +- loop_until_timeout(timeout, read_time) {
677 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
678 ++ do {
679 ++ /*
680 ++ * The timestamp shall be taken before the actual operation
681 ++ * to avoid a premature timeout in case of high CPU load.
682 ++ */
683 ++ read_time = jiffies;
684 ++
685 + status = i2c_transfer(client->adapter, msg, 2);
686 + if (status == 2)
687 + return count;
688 +- }
689 ++
690 ++ usleep_range(1000, 1500);
691 ++ } while (time_before(read_time, timeout));
692 +
693 + return -ETIMEDOUT;
694 + }
695 +@@ -374,11 +385,20 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
696 + msg[1].buf = buf;
697 + msg[1].len = count;
698 +
699 +- loop_until_timeout(timeout, read_time) {
700 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
701 ++ do {
702 ++ /*
703 ++ * The timestamp shall be taken before the actual operation
704 ++ * to avoid a premature timeout in case of high CPU load.
705 ++ */
706 ++ read_time = jiffies;
707 ++
708 + status = i2c_transfer(client->adapter, msg, 2);
709 + if (status == 2)
710 + return count;
711 +- }
712 ++
713 ++ usleep_range(1000, 1500);
714 ++ } while (time_before(read_time, timeout));
715 +
716 + return -ETIMEDOUT;
717 + }
718 +@@ -420,7 +440,14 @@ static ssize_t at24_eeprom_write_smbus_block(struct at24_data *at24,
719 + client = at24_translate_offset(at24, &offset);
720 + count = at24_adjust_write_count(at24, offset, count);
721 +
722 +- loop_until_timeout(timeout, write_time) {
723 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
724 ++ do {
725 ++ /*
726 ++ * The timestamp shall be taken before the actual operation
727 ++ * to avoid a premature timeout in case of high CPU load.
728 ++ */
729 ++ write_time = jiffies;
730 ++
731 + status = i2c_smbus_write_i2c_block_data(client,
732 + offset, count, buf);
733 + if (status == 0)
734 +@@ -431,7 +458,9 @@ static ssize_t at24_eeprom_write_smbus_block(struct at24_data *at24,
735 +
736 + if (status == count)
737 + return count;
738 +- }
739 ++
740 ++ usleep_range(1000, 1500);
741 ++ } while (time_before(write_time, timeout));
742 +
743 + return -ETIMEDOUT;
744 + }
745 +@@ -446,7 +475,14 @@ static ssize_t at24_eeprom_write_smbus_byte(struct at24_data *at24,
746 +
747 + client = at24_translate_offset(at24, &offset);
748 +
749 +- loop_until_timeout(timeout, write_time) {
750 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
751 ++ do {
752 ++ /*
753 ++ * The timestamp shall be taken before the actual operation
754 ++ * to avoid a premature timeout in case of high CPU load.
755 ++ */
756 ++ write_time = jiffies;
757 ++
758 + status = i2c_smbus_write_byte_data(client, offset, buf[0]);
759 + if (status == 0)
760 + status = count;
761 +@@ -456,7 +492,9 @@ static ssize_t at24_eeprom_write_smbus_byte(struct at24_data *at24,
762 +
763 + if (status == count)
764 + return count;
765 +- }
766 ++
767 ++ usleep_range(1000, 1500);
768 ++ } while (time_before(write_time, timeout));
769 +
770 + return -ETIMEDOUT;
771 + }
772 +@@ -485,7 +523,14 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
773 + memcpy(&msg.buf[i], buf, count);
774 + msg.len = i + count;
775 +
776 +- loop_until_timeout(timeout, write_time) {
777 ++ timeout = jiffies + msecs_to_jiffies(write_timeout);
778 ++ do {
779 ++ /*
780 ++ * The timestamp shall be taken before the actual operation
781 ++ * to avoid a premature timeout in case of high CPU load.
782 ++ */
783 ++ write_time = jiffies;
784 ++
785 + status = i2c_transfer(client->adapter, &msg, 1);
786 + if (status == 1)
787 + status = count;
788 +@@ -495,7 +540,9 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
789 +
790 + if (status == count)
791 + return count;
792 +- }
793 ++
794 ++ usleep_range(1000, 1500);
795 ++ } while (time_before(write_time, timeout));
796 +
797 + return -ETIMEDOUT;
798 + }
799 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
800 +index 1edd4ff5382c..8536a75f32e3 100644
801 +--- a/drivers/net/bonding/bond_main.c
802 ++++ b/drivers/net/bonding/bond_main.c
803 +@@ -4263,12 +4263,12 @@ void bond_setup(struct net_device *bond_dev)
804 + bond_dev->features |= NETIF_F_NETNS_LOCAL;
805 +
806 + bond_dev->hw_features = BOND_VLAN_FEATURES |
807 +- NETIF_F_HW_VLAN_CTAG_TX |
808 + NETIF_F_HW_VLAN_CTAG_RX |
809 + NETIF_F_HW_VLAN_CTAG_FILTER;
810 +
811 + bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
812 + bond_dev->features |= bond_dev->hw_features;
813 ++ bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
814 + }
815 +
816 + /* Destroy a bonding device.
817 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
818 +index 08c19ebd5306..41d528fbebb4 100644
819 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
820 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
821 +@@ -121,7 +121,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
822 + * programmed with (2^32 – <new_sec_value>)
823 + */
824 + if (gmac4)
825 +- sec = (100000000ULL - sec);
826 ++ sec = -sec;
827 +
828 + value = readl(ioaddr + PTP_TCR);
829 + if (value & PTP_TCR_TSCTRLSSR)
830 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
831 +index e9a92ed5a308..f3e3e568311a 100644
832 +--- a/drivers/net/team/team.c
833 ++++ b/drivers/net/team/team.c
834 +@@ -2131,12 +2131,12 @@ static void team_setup(struct net_device *dev)
835 + dev->features |= NETIF_F_NETNS_LOCAL;
836 +
837 + dev->hw_features = TEAM_VLAN_FEATURES |
838 +- NETIF_F_HW_VLAN_CTAG_TX |
839 + NETIF_F_HW_VLAN_CTAG_RX |
840 + NETIF_F_HW_VLAN_CTAG_FILTER;
841 +
842 + dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
843 + dev->features |= dev->hw_features;
844 ++ dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
845 + }
846 +
847 + static int team_newlink(struct net *src_net, struct net_device *dev,
848 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
849 +index 3b13d9e4030a..84a33c81b9b7 100644
850 +--- a/drivers/net/tun.c
851 ++++ b/drivers/net/tun.c
852 +@@ -831,18 +831,8 @@ static void tun_net_uninit(struct net_device *dev)
853 + /* Net device open. */
854 + static int tun_net_open(struct net_device *dev)
855 + {
856 +- struct tun_struct *tun = netdev_priv(dev);
857 +- int i;
858 +-
859 + netif_tx_start_all_queues(dev);
860 +
861 +- for (i = 0; i < tun->numqueues; i++) {
862 +- struct tun_file *tfile;
863 +-
864 +- tfile = rtnl_dereference(tun->tfiles[i]);
865 +- tfile->socket.sk->sk_write_space(tfile->socket.sk);
866 +- }
867 +-
868 + return 0;
869 + }
870 +
871 +@@ -2826,6 +2816,7 @@ static int tun_device_event(struct notifier_block *unused,
872 + {
873 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
874 + struct tun_struct *tun = netdev_priv(dev);
875 ++ int i;
876 +
877 + if (dev->rtnl_link_ops != &tun_link_ops)
878 + return NOTIFY_DONE;
879 +@@ -2835,6 +2826,14 @@ static int tun_device_event(struct notifier_block *unused,
880 + if (tun_queue_resize(tun))
881 + return NOTIFY_BAD;
882 + break;
883 ++ case NETDEV_UP:
884 ++ for (i = 0; i < tun->numqueues; i++) {
885 ++ struct tun_file *tfile;
886 ++
887 ++ tfile = rtnl_dereference(tun->tfiles[i]);
888 ++ tfile->socket.sk->sk_write_space(tfile->socket.sk);
889 ++ }
890 ++ break;
891 + default:
892 + break;
893 + }
894 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
895 +index c2d6c501dd85..063daa3435e4 100644
896 +--- a/drivers/net/usb/qmi_wwan.c
897 ++++ b/drivers/net/usb/qmi_wwan.c
898 +@@ -1395,14 +1395,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
899 + return -ENODEV;
900 + }
901 +
902 +- info = (void *)&id->driver_info;
903 +-
904 + /* Several Quectel modems supports dynamic interface configuration, so
905 + * we need to match on class/subclass/protocol. These values are
906 + * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
907 + * different. Ignore the current interface if the number of endpoints
908 + * equals the number for the diag interface (two).
909 + */
910 ++ info = (void *)id->driver_info;
911 ++
912 + if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
913 + if (desc->bNumEndpoints == 2)
914 + return -ENODEV;
915 +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
916 +index 890b8aaf95e1..64eb8ffb2ddf 100644
917 +--- a/drivers/scsi/vmw_pvscsi.c
918 ++++ b/drivers/scsi/vmw_pvscsi.c
919 +@@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
920 + struct pvscsi_adapter *adapter = shost_priv(host);
921 + struct pvscsi_ctx *ctx;
922 + unsigned long flags;
923 ++ unsigned char op;
924 +
925 + spin_lock_irqsave(&adapter->hw_lock, flags);
926 +
927 +@@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
928 + }
929 +
930 + cmd->scsi_done = done;
931 ++ op = cmd->cmnd[0];
932 +
933 + dev_dbg(&cmd->device->sdev_gendev,
934 +- "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
935 ++ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
936 +
937 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
938 +
939 +- pvscsi_kick_io(adapter, cmd->cmnd[0]);
940 ++ pvscsi_kick_io(adapter, op);
941 +
942 + return 0;
943 + }
944 +diff --git a/fs/9p/acl.c b/fs/9p/acl.c
945 +index 082d227fa56b..6261719f6f2a 100644
946 +--- a/fs/9p/acl.c
947 ++++ b/fs/9p/acl.c
948 +@@ -276,7 +276,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
949 + switch (handler->flags) {
950 + case ACL_TYPE_ACCESS:
951 + if (acl) {
952 +- struct iattr iattr;
953 ++ struct iattr iattr = { 0 };
954 + struct posix_acl *old_acl = acl;
955 +
956 + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
957 +diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
958 +index 5d6b94475f27..78b5bac82559 100644
959 +--- a/fs/binfmt_flat.c
960 ++++ b/fs/binfmt_flat.c
961 +@@ -856,9 +856,14 @@ err:
962 +
963 + static int load_flat_shared_library(int id, struct lib_info *libs)
964 + {
965 ++ /*
966 ++ * This is a fake bprm struct; only the members "buf", "file" and
967 ++ * "filename" are actually used.
968 ++ */
969 + struct linux_binprm bprm;
970 + int res;
971 + char buf[16];
972 ++ loff_t pos = 0;
973 +
974 + memset(&bprm, 0, sizeof(bprm));
975 +
976 +@@ -872,25 +877,11 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
977 + if (IS_ERR(bprm.file))
978 + return res;
979 +
980 +- bprm.cred = prepare_exec_creds();
981 +- res = -ENOMEM;
982 +- if (!bprm.cred)
983 +- goto out;
984 +-
985 +- /* We don't really care about recalculating credentials at this point
986 +- * as we're past the point of no return and are dealing with shared
987 +- * libraries.
988 +- */
989 +- bprm.called_set_creds = 1;
990 ++ res = kernel_read(bprm.file, bprm.buf, BINPRM_BUF_SIZE, &pos);
991 +
992 +- res = prepare_binprm(&bprm);
993 +-
994 +- if (!res)
995 ++ if (res >= 0)
996 + res = load_flat_file(&bprm, libs, id, NULL);
997 +
998 +- abort_creds(bprm.cred);
999 +-
1000 +-out:
1001 + allow_write_access(bprm.file);
1002 + fput(bprm.file);
1003 +
1004 +diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
1005 +index 9f69e83810ca..2de6e87e5ee5 100644
1006 +--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
1007 ++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
1008 +@@ -18,7 +18,7 @@
1009 +
1010 + #define NFSDBG_FACILITY NFSDBG_PNFS_LD
1011 +
1012 +-static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
1013 ++static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
1014 + static unsigned int dataserver_retrans;
1015 +
1016 + static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
1017 +diff --git a/fs/proc/array.c b/fs/proc/array.c
1018 +index 4ac811e1a26c..37c7ed0dc820 100644
1019 +--- a/fs/proc/array.c
1020 ++++ b/fs/proc/array.c
1021 +@@ -448,7 +448,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1022 + * a program is not able to use ptrace(2) in that case. It is
1023 + * safe because the task has stopped executing permanently.
1024 + */
1025 +- if (permitted && (task->flags & PF_DUMPCORE)) {
1026 ++ if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
1027 + if (try_get_task_stack(task)) {
1028 + eip = KSTK_EIP(task);
1029 + esp = KSTK_ESP(task);
1030 +diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
1031 +index fcb61b4659b3..8666fe7f35d7 100644
1032 +--- a/include/asm-generic/futex.h
1033 ++++ b/include/asm-generic/futex.h
1034 +@@ -23,7 +23,9 @@
1035 + *
1036 + * Return:
1037 + * 0 - On success
1038 +- * <0 - On error
1039 ++ * -EFAULT - User access resulted in a page fault
1040 ++ * -EAGAIN - Atomic operation was unable to complete due to contention
1041 ++ * -ENOSYS - Operation not supported
1042 + */
1043 + static inline int
1044 + arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
1045 +@@ -85,7 +87,9 @@ out_pagefault_enable:
1046 + *
1047 + * Return:
1048 + * 0 - On success
1049 +- * <0 - On error
1050 ++ * -EFAULT - User access resulted in a page fault
1051 ++ * -EAGAIN - Atomic operation was unable to complete due to contention
1052 ++ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
1053 + */
1054 + static inline int
1055 + futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1056 +diff --git a/include/linux/bio.h b/include/linux/bio.h
1057 +index d4b39caf081d..e260f000b9ac 100644
1058 +--- a/include/linux/bio.h
1059 ++++ b/include/linux/bio.h
1060 +@@ -123,6 +123,11 @@ static inline void *bio_data(struct bio *bio)
1061 + return NULL;
1062 + }
1063 +
1064 ++static inline bool bio_full(struct bio *bio)
1065 ++{
1066 ++ return bio->bi_vcnt >= bio->bi_max_vecs;
1067 ++}
1068 ++
1069 + /*
1070 + * will die
1071 + */
1072 +@@ -459,6 +464,10 @@ void bio_chain(struct bio *, struct bio *);
1073 + extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
1074 + extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
1075 + unsigned int, unsigned int);
1076 ++bool __bio_try_merge_page(struct bio *bio, struct page *page,
1077 ++ unsigned int len, unsigned int off);
1078 ++void __bio_add_page(struct bio *bio, struct page *page,
1079 ++ unsigned int len, unsigned int off);
1080 + int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
1081 + struct rq_map_data;
1082 + extern struct bio *bio_map_user_iov(struct request_queue *,
1083 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
1084 +index 67c3934fb9ed..a704d032713b 100644
1085 +--- a/include/linux/compiler.h
1086 ++++ b/include/linux/compiler.h
1087 +@@ -119,10 +119,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
1088 + # define ASM_UNREACHABLE
1089 + #endif
1090 + #ifndef unreachable
1091 +-# define unreachable() do { \
1092 +- annotate_unreachable(); \
1093 +- __builtin_unreachable(); \
1094 +-} while (0)
1095 ++# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
1096 + #endif
1097 +
1098 + /*
1099 +diff --git a/kernel/cpu.c b/kernel/cpu.c
1100 +index 127a69b8b192..f370a0f43005 100644
1101 +--- a/kernel/cpu.c
1102 ++++ b/kernel/cpu.c
1103 +@@ -2308,6 +2308,9 @@ static int __init mitigations_parse_cmdline(char *arg)
1104 + cpu_mitigations = CPU_MITIGATIONS_AUTO;
1105 + else if (!strcmp(arg, "auto,nosmt"))
1106 + cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
1107 ++ else
1108 ++ pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
1109 ++ arg);
1110 +
1111 + return 0;
1112 + }
1113 +diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
1114 +index 3ea65cdff30d..4ad967453b6f 100644
1115 +--- a/kernel/trace/trace_branch.c
1116 ++++ b/kernel/trace/trace_branch.c
1117 +@@ -205,8 +205,6 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
1118 + void ftrace_likely_update(struct ftrace_likely_data *f, int val,
1119 + int expect, int is_constant)
1120 + {
1121 +- unsigned long flags = user_access_save();
1122 +-
1123 + /* A constant is always correct */
1124 + if (is_constant) {
1125 + f->constant++;
1126 +@@ -225,8 +223,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
1127 + f->data.correct++;
1128 + else
1129 + f->data.incorrect++;
1130 +-
1131 +- user_access_restore(flags);
1132 + }
1133 + EXPORT_SYMBOL(ftrace_likely_update);
1134 +
1135 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1136 +index 6ca0225335eb..a37cfa88669e 100644
1137 +--- a/mm/mempolicy.c
1138 ++++ b/mm/mempolicy.c
1139 +@@ -305,7 +305,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
1140 + else {
1141 + nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
1142 + *nodes);
1143 +- pol->w.cpuset_mems_allowed = tmp;
1144 ++ pol->w.cpuset_mems_allowed = *nodes;
1145 + }
1146 +
1147 + if (nodes_empty(tmp))
1148 +diff --git a/mm/page_idle.c b/mm/page_idle.c
1149 +index e412a63b2b74..504684181827 100644
1150 +--- a/mm/page_idle.c
1151 ++++ b/mm/page_idle.c
1152 +@@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
1153 +
1154 + end_pfn = pfn + count * BITS_PER_BYTE;
1155 + if (end_pfn > max_pfn)
1156 +- end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
1157 ++ end_pfn = max_pfn;
1158 +
1159 + for (; pfn < end_pfn; pfn++) {
1160 + bit = pfn % BITMAP_CHUNK_BITS;
1161 +@@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
1162 +
1163 + end_pfn = pfn + count * BITS_PER_BYTE;
1164 + if (end_pfn > max_pfn)
1165 +- end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
1166 ++ end_pfn = max_pfn;
1167 +
1168 + for (; pfn < end_pfn; pfn++) {
1169 + bit = pfn % BITMAP_CHUNK_BITS;
1170 +diff --git a/net/9p/protocol.c b/net/9p/protocol.c
1171 +index 766d1ef4640a..1885403c9a3e 100644
1172 +--- a/net/9p/protocol.c
1173 ++++ b/net/9p/protocol.c
1174 +@@ -622,13 +622,19 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int len,
1175 + if (ret) {
1176 + p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
1177 + trace_9p_protocol_dump(clnt, &fake_pdu);
1178 +- goto out;
1179 ++ return ret;
1180 + }
1181 +
1182 +- strcpy(dirent->d_name, nameptr);
1183 ++ ret = strscpy(dirent->d_name, nameptr, sizeof(dirent->d_name));
1184 ++ if (ret < 0) {
1185 ++ p9_debug(P9_DEBUG_ERROR,
1186 ++ "On the wire dirent name too long: %s\n",
1187 ++ nameptr);
1188 ++ kfree(nameptr);
1189 ++ return ret;
1190 ++ }
1191 + kfree(nameptr);
1192 +
1193 +-out:
1194 + return fake_pdu.offset;
1195 + }
1196 + EXPORT_SYMBOL(p9dirent_read);
1197 +diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
1198 +index 38aa6345bdfa..9c0c894b56f8 100644
1199 +--- a/net/9p/trans_common.c
1200 ++++ b/net/9p/trans_common.c
1201 +@@ -14,6 +14,7 @@
1202 +
1203 + #include <linux/mm.h>
1204 + #include <linux/module.h>
1205 ++#include "trans_common.h"
1206 +
1207 + /**
1208 + * p9_release_req_pages - Release pages after the transaction.
1209 +diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
1210 +index f58467a49090..16a4a31f16e0 100644
1211 +--- a/net/9p/trans_rdma.c
1212 ++++ b/net/9p/trans_rdma.c
1213 +@@ -276,8 +276,7 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
1214 + case RDMA_CM_EVENT_DISCONNECTED:
1215 + if (rdma)
1216 + rdma->state = P9_RDMA_CLOSED;
1217 +- if (c)
1218 +- c->status = Disconnected;
1219 ++ c->status = Disconnected;
1220 + break;
1221 +
1222 + case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1223 +@@ -476,7 +475,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
1224 +
1225 + err = post_recv(client, rpl_context);
1226 + if (err) {
1227 +- p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
1228 ++ p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
1229 + goto recv_error;
1230 + }
1231 + /* remove posted receive buffer from request structure */
1232 +@@ -545,7 +544,7 @@ dont_need_post_recv:
1233 + recv_error:
1234 + kfree(rpl_context);
1235 + spin_lock_irqsave(&rdma->req_lock, flags);
1236 +- if (rdma->state < P9_RDMA_CLOSING) {
1237 ++ if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
1238 + rdma->state = P9_RDMA_CLOSING;
1239 + spin_unlock_irqrestore(&rdma->req_lock, flags);
1240 + rdma_disconnect(rdma->cm_id);
1241 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
1242 +index c10bdf63eae7..389eb635ec2c 100644
1243 +--- a/net/9p/trans_xen.c
1244 ++++ b/net/9p/trans_xen.c
1245 +@@ -392,8 +392,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
1246 + unsigned int max_rings, max_ring_order, len = 0;
1247 +
1248 + versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
1249 +- if (!len)
1250 +- return -EINVAL;
1251 ++ if (IS_ERR(versions))
1252 ++ return PTR_ERR(versions);
1253 + if (strcmp(versions, "1")) {
1254 + kfree(versions);
1255 + return -EINVAL;
1256 +diff --git a/net/core/sock.c b/net/core/sock.c
1257 +index a88579589946..c8d39092e8bf 100644
1258 +--- a/net/core/sock.c
1259 ++++ b/net/core/sock.c
1260 +@@ -1358,9 +1358,6 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1261 + {
1262 + u32 meminfo[SK_MEMINFO_VARS];
1263 +
1264 +- if (get_user(len, optlen))
1265 +- return -EFAULT;
1266 +-
1267 + sk_get_meminfo(sk, meminfo);
1268 +
1269 + len = min_t(unsigned int, len, sizeof(meminfo));
1270 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
1271 +index 115d9fd413e2..53a11894f9e4 100644
1272 +--- a/net/ipv4/raw.c
1273 ++++ b/net/ipv4/raw.c
1274 +@@ -202,7 +202,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
1275 + }
1276 + sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
1277 + iph->saddr, iph->daddr,
1278 +- skb->dev->ifindex, sdif);
1279 ++ dif, sdif);
1280 + }
1281 + out:
1282 + read_unlock(&raw_v4_hashinfo.lock);
1283 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1284 +index b89920c0f226..54343dc29cb4 100644
1285 +--- a/net/ipv4/udp.c
1286 ++++ b/net/ipv4/udp.c
1287 +@@ -563,7 +563,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
1288 + struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
1289 + __be16 sport, __be16 dport)
1290 + {
1291 +- return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
1292 ++ const struct iphdr *iph = ip_hdr(skb);
1293 ++
1294 ++ return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
1295 ++ iph->daddr, dport, inet_iif(skb),
1296 ++ inet_sdif(skb), &udp_table, NULL);
1297 + }
1298 + EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
1299 +
1300 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1301 +index 8d185a0fc5af..9f7bfeb90fb0 100644
1302 +--- a/net/ipv6/udp.c
1303 ++++ b/net/ipv6/udp.c
1304 +@@ -308,7 +308,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
1305 +
1306 + return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
1307 + &iph->daddr, dport, inet6_iif(skb),
1308 +- inet6_sdif(skb), &udp_table, skb);
1309 ++ inet6_sdif(skb), &udp_table, NULL);
1310 + }
1311 + EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
1312 +
1313 +@@ -506,7 +506,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1314 + struct net *net = dev_net(skb->dev);
1315 +
1316 + sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
1317 +- inet6_iif(skb), 0, udptable, skb);
1318 ++ inet6_iif(skb), 0, udptable, NULL);
1319 + if (!sk) {
1320 + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
1321 + ICMP6_MIB_INERRORS);
1322 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1323 +index a968e81d4c81..047ee7ff7038 100644
1324 +--- a/net/packet/af_packet.c
1325 ++++ b/net/packet/af_packet.c
1326 +@@ -2438,6 +2438,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
1327 +
1328 + ts = __packet_set_timestamp(po, ph, skb);
1329 + __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
1330 ++
1331 ++ if (!packet_read_pending(&po->tx_ring))
1332 ++ complete(&po->skb_completion);
1333 + }
1334 +
1335 + sock_wfree(skb);
1336 +@@ -2632,7 +2635,7 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
1337 +
1338 + static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1339 + {
1340 +- struct sk_buff *skb;
1341 ++ struct sk_buff *skb = NULL;
1342 + struct net_device *dev;
1343 + struct virtio_net_hdr *vnet_hdr = NULL;
1344 + struct sockcm_cookie sockc;
1345 +@@ -2647,6 +2650,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1346 + int len_sum = 0;
1347 + int status = TP_STATUS_AVAILABLE;
1348 + int hlen, tlen, copylen = 0;
1349 ++ long timeo = 0;
1350 +
1351 + mutex_lock(&po->pg_vec_lock);
1352 +
1353 +@@ -2693,12 +2697,21 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1354 + if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
1355 + size_max = dev->mtu + reserve + VLAN_HLEN;
1356 +
1357 ++ reinit_completion(&po->skb_completion);
1358 ++
1359 + do {
1360 + ph = packet_current_frame(po, &po->tx_ring,
1361 + TP_STATUS_SEND_REQUEST);
1362 + if (unlikely(ph == NULL)) {
1363 +- if (need_wait && need_resched())
1364 +- schedule();
1365 ++ if (need_wait && skb) {
1366 ++ timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
1367 ++ timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
1368 ++ if (timeo <= 0) {
1369 ++ err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
1370 ++ goto out_put;
1371 ++ }
1372 ++ }
1373 ++ /* check for additional frames */
1374 + continue;
1375 + }
1376 +
1377 +@@ -3252,6 +3265,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
1378 + sock_init_data(sock, sk);
1379 +
1380 + po = pkt_sk(sk);
1381 ++ init_completion(&po->skb_completion);
1382 + sk->sk_family = PF_PACKET;
1383 + po->num = proto;
1384 + po->xmit = dev_queue_xmit;
1385 +@@ -4340,7 +4354,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1386 + req3->tp_sizeof_priv ||
1387 + req3->tp_feature_req_word) {
1388 + err = -EINVAL;
1389 +- goto out;
1390 ++ goto out_free_pg_vec;
1391 + }
1392 + }
1393 + break;
1394 +@@ -4404,6 +4418,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1395 + prb_shutdown_retire_blk_timer(po, rb_queue);
1396 + }
1397 +
1398 ++out_free_pg_vec:
1399 + if (pg_vec)
1400 + free_pg_vec(pg_vec, order, req->tp_block_nr);
1401 + out:
1402 +diff --git a/net/packet/internal.h b/net/packet/internal.h
1403 +index 3bb7c5fb3bff..c70a2794456f 100644
1404 +--- a/net/packet/internal.h
1405 ++++ b/net/packet/internal.h
1406 +@@ -128,6 +128,7 @@ struct packet_sock {
1407 + unsigned int tp_hdrlen;
1408 + unsigned int tp_reserve;
1409 + unsigned int tp_tstamp;
1410 ++ struct completion skb_completion;
1411 + struct net_device __rcu *cached_dev;
1412 + int (*xmit)(struct sk_buff *skb);
1413 + struct packet_type prot_hook ____cacheline_aligned_in_smp;
1414 +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
1415 +index ee1e601a0b11..5d4079ef3de6 100644
1416 +--- a/net/sctp/endpointola.c
1417 ++++ b/net/sctp/endpointola.c
1418 +@@ -126,10 +126,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
1419 + /* Initialize the bind addr area */
1420 + sctp_bind_addr_init(&ep->base.bind_addr, 0);
1421 +
1422 +- /* Remember who we are attached to. */
1423 +- ep->base.sk = sk;
1424 +- sock_hold(ep->base.sk);
1425 +-
1426 + /* Create the lists of associations. */
1427 + INIT_LIST_HEAD(&ep->asocs);
1428 +
1429 +@@ -167,6 +163,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
1430 + ep->prsctp_enable = net->sctp.prsctp_enable;
1431 + ep->reconf_enable = net->sctp.reconf_enable;
1432 +
1433 ++ /* Remember who we are attached to. */
1434 ++ ep->base.sk = sk;
1435 ++ sock_hold(ep->base.sk);
1436 ++
1437 + return ep;
1438 +
1439 + nomem_hmacs:
1440 +diff --git a/net/tipc/core.c b/net/tipc/core.c
1441 +index 7561e64c220e..67ac10434ba2 100644
1442 +--- a/net/tipc/core.c
1443 ++++ b/net/tipc/core.c
1444 +@@ -128,7 +128,7 @@ static int __init tipc_init(void)
1445 + if (err)
1446 + goto out_sysctl;
1447 +
1448 +- err = register_pernet_subsys(&tipc_net_ops);
1449 ++ err = register_pernet_device(&tipc_net_ops);
1450 + if (err)
1451 + goto out_pernet;
1452 +
1453 +@@ -136,7 +136,7 @@ static int __init tipc_init(void)
1454 + if (err)
1455 + goto out_socket;
1456 +
1457 +- err = register_pernet_subsys(&tipc_topsrv_net_ops);
1458 ++ err = register_pernet_device(&tipc_topsrv_net_ops);
1459 + if (err)
1460 + goto out_pernet_topsrv;
1461 +
1462 +@@ -147,11 +147,11 @@ static int __init tipc_init(void)
1463 + pr_info("Started in single node mode\n");
1464 + return 0;
1465 + out_bearer:
1466 +- unregister_pernet_subsys(&tipc_topsrv_net_ops);
1467 ++ unregister_pernet_device(&tipc_topsrv_net_ops);
1468 + out_pernet_topsrv:
1469 + tipc_socket_stop();
1470 + out_socket:
1471 +- unregister_pernet_subsys(&tipc_net_ops);
1472 ++ unregister_pernet_device(&tipc_net_ops);
1473 + out_pernet:
1474 + tipc_unregister_sysctl();
1475 + out_sysctl:
1476 +@@ -166,9 +166,9 @@ out_netlink:
1477 + static void __exit tipc_exit(void)
1478 + {
1479 + tipc_bearer_cleanup();
1480 +- unregister_pernet_subsys(&tipc_topsrv_net_ops);
1481 ++ unregister_pernet_device(&tipc_topsrv_net_ops);
1482 + tipc_socket_stop();
1483 +- unregister_pernet_subsys(&tipc_net_ops);
1484 ++ unregister_pernet_device(&tipc_net_ops);
1485 + tipc_netlink_stop();
1486 + tipc_netlink_compat_stop();
1487 + tipc_unregister_sysctl();
1488 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1489 +index aa75bc8b158f..41954ed7ff51 100644
1490 +--- a/net/tipc/netlink_compat.c
1491 ++++ b/net/tipc/netlink_compat.c
1492 +@@ -436,7 +436,11 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
1493 + if (!bearer)
1494 + return -EMSGSIZE;
1495 +
1496 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
1497 ++ len = TLV_GET_DATA_LEN(msg->req);
1498 ++ if (len <= 0)
1499 ++ return -EINVAL;
1500 ++
1501 ++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
1502 + if (!string_is_valid(name, len))
1503 + return -EINVAL;
1504 +
1505 +@@ -528,7 +532,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
1506 +
1507 + name = (char *)TLV_DATA(msg->req);
1508 +
1509 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
1510 ++ len = TLV_GET_DATA_LEN(msg->req);
1511 ++ if (len <= 0)
1512 ++ return -EINVAL;
1513 ++
1514 ++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
1515 + if (!string_is_valid(name, len))
1516 + return -EINVAL;
1517 +
1518 +@@ -806,7 +814,11 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
1519 + if (!link)
1520 + return -EMSGSIZE;
1521 +
1522 +- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
1523 ++ len = TLV_GET_DATA_LEN(msg->req);
1524 ++ if (len <= 0)
1525 ++ return -EINVAL;
1526 ++
1527 ++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
1528 + if (!string_is_valid(name, len))
1529 + return -EINVAL;
1530 +
1531 +diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
1532 +index e3cff9d6c092..de011fdd7964 100644
1533 +--- a/net/tipc/udp_media.c
1534 ++++ b/net/tipc/udp_media.c
1535 +@@ -174,7 +174,6 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
1536 + goto tx_error;
1537 + }
1538 +
1539 +- skb->dev = rt->dst.dev;
1540 + ttl = ip4_dst_hoplimit(&rt->dst);
1541 + udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
1542 + dst->ipv4.s_addr, 0, ttl, 0, src->port,
1543 +@@ -193,10 +192,9 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
1544 + if (err)
1545 + goto tx_error;
1546 + ttl = ip6_dst_hoplimit(ndst);
1547 +- err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
1548 +- ndst->dev, &src->ipv6,
1549 +- &dst->ipv6, 0, ttl, 0, src->port,
1550 +- dst->port, false);
1551 ++ err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
1552 ++ &src->ipv6, &dst->ipv6, 0, ttl, 0,
1553 ++ src->port, dst->port, false);
1554 + #endif
1555 + }
1556 + return err;
1557 +diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
1558 +index a0f7ed2b869b..3c364798093b 100644
1559 +--- a/tools/perf/builtin-help.c
1560 ++++ b/tools/perf/builtin-help.c
1561 +@@ -189,7 +189,7 @@ static void add_man_viewer(const char *name)
1562 + while (*p)
1563 + p = &((*p)->next);
1564 + *p = zalloc(sizeof(**p) + len + 1);
1565 +- strncpy((*p)->name, name, len);
1566 ++ strcpy((*p)->name, name);
1567 + }
1568 +
1569 + static int supported_man_viewer(const char *name, size_t len)
1570 +diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
1571 +index 4ca799aadb4e..93d6b7240285 100644
1572 +--- a/tools/perf/ui/tui/helpline.c
1573 ++++ b/tools/perf/ui/tui/helpline.c
1574 +@@ -24,7 +24,7 @@ static void tui_helpline__push(const char *msg)
1575 + SLsmg_set_color(0);
1576 + SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
1577 + SLsmg_refresh();
1578 +- strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
1579 ++ strlcpy(ui_helpline__current, msg, sz);
1580 + }
1581 +
1582 + static int tui_helpline__show(const char *format, va_list ap)
1583 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
1584 +index 696f2654826b..f11cead6a151 100644
1585 +--- a/tools/perf/util/header.c
1586 ++++ b/tools/perf/util/header.c
1587 +@@ -3171,7 +3171,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool,
1588 + if (ev == NULL)
1589 + return -ENOMEM;
1590 +
1591 +- strncpy(ev->data, evsel->name, len);
1592 ++ strlcpy(ev->data, evsel->name, len + 1);
1593 + err = process(tool, (union perf_event*) ev, NULL, NULL);
1594 + free(ev);
1595 + return err;