Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 27 Apr 2019 17:36:54
Message-Id: 1556386585.9a14f331fb8aa8fdc24e235cd198f4d86616db6e.mpagano@gentoo
1 commit: 9a14f331fb8aa8fdc24e235cd198f4d86616db6e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Apr 27 17:36:25 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Apr 27 17:36:25 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a14f331
7
8 Linux patch 4.19.37
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1036_linux-4.19.37.patch | 5470 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5474 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 043542a..2acba8b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -187,6 +187,10 @@ Patch: 1035_linux-4.19.36.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.36
23
24 +Patch: 1036_linux-4.19.37.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.37
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1036_linux-4.19.37.patch b/1036_linux-4.19.37.patch
33 new file mode 100644
34 index 0000000..0a0d611
35 --- /dev/null
36 +++ b/1036_linux-4.19.37.patch
37 @@ -0,0 +1,5470 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3fac08f6a11e..7b495cad8c2e 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 36
47 ++SUBLEVEL = 37
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +@@ -661,8 +661,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
52 + KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
53 +
54 + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
55 +-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
56 +-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
57 ++KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
58 + else
59 + ifdef CONFIG_PROFILE_ALL_BRANCHES
60 + KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
61 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
62 +index b447b4db423a..fd1e722f3821 100644
63 +--- a/arch/arm64/include/asm/futex.h
64 ++++ b/arch/arm64/include/asm/futex.h
65 +@@ -50,7 +50,7 @@ do { \
66 + static inline int
67 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
68 + {
69 +- int oldval, ret, tmp;
70 ++ int oldval = 0, ret, tmp;
71 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
72 +
73 + pagefault_disable();
74 +diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
75 +index 3b6e70d085da..8457cdd47f75 100644
76 +--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
77 ++++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
78 +@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
79 + vpaddq t2,t1,t1
80 + vmovq t1x,d4
81 +
82 ++ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
83 ++ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
84 ++ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
85 ++ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
86 ++ # integers. It's true in a single-block implementation, but not here.
87 ++
88 + # d1 += d0 >> 26
89 + mov d0,%rax
90 + shr $26,%rax
91 +@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
92 + # h0 += (d4 >> 26) * 5
93 + mov d4,%rax
94 + shr $26,%rax
95 +- lea (%eax,%eax,4),%eax
96 +- add %eax,%ebx
97 ++ lea (%rax,%rax,4),%rax
98 ++ add %rax,%rbx
99 + # h4 = d4 & 0x3ffffff
100 + mov d4,%rax
101 + and $0x3ffffff,%eax
102 + mov %eax,h4
103 +
104 + # h1 += h0 >> 26
105 +- mov %ebx,%eax
106 +- shr $26,%eax
107 ++ mov %rbx,%rax
108 ++ shr $26,%rax
109 + add %eax,h1
110 + # h0 = h0 & 0x3ffffff
111 + andl $0x3ffffff,%ebx
112 +diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
113 +index c88c670cb5fc..5851c7418fb7 100644
114 +--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
115 ++++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
116 +@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
117 + # h0 += (d4 >> 26) * 5
118 + mov d4,%rax
119 + shr $26,%rax
120 +- lea (%eax,%eax,4),%eax
121 +- add %eax,%ebx
122 ++ lea (%rax,%rax,4),%rax
123 ++ add %rax,%rbx
124 + # h4 = d4 & 0x3ffffff
125 + mov d4,%rax
126 + and $0x3ffffff,%eax
127 + mov %eax,h4
128 +
129 + # h1 += h0 >> 26
130 +- mov %ebx,%eax
131 +- shr $26,%eax
132 ++ mov %rbx,%rax
133 ++ shr $26,%rax
134 + add %eax,h1
135 + # h0 = h0 & 0x3ffffff
136 + andl $0x3ffffff,%ebx
137 +@@ -520,6 +520,12 @@ ENTRY(poly1305_2block_sse2)
138 + paddq t2,t1
139 + movq t1,d4
140 +
141 ++ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
142 ++ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
143 ++ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
144 ++ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
145 ++ # integers. It's true in a single-block implementation, but not here.
146 ++
147 + # d1 += d0 >> 26
148 + mov d0,%rax
149 + shr $26,%rax
150 +@@ -558,16 +564,16 @@ ENTRY(poly1305_2block_sse2)
151 + # h0 += (d4 >> 26) * 5
152 + mov d4,%rax
153 + shr $26,%rax
154 +- lea (%eax,%eax,4),%eax
155 +- add %eax,%ebx
156 ++ lea (%rax,%rax,4),%rax
157 ++ add %rax,%rbx
158 + # h4 = d4 & 0x3ffffff
159 + mov d4,%rax
160 + and $0x3ffffff,%eax
161 + mov %eax,h4
162 +
163 + # h1 += h0 >> 26
164 +- mov %ebx,%eax
165 +- shr $26,%eax
166 ++ mov %rbx,%rax
167 ++ shr $26,%rax
168 + add %eax,h1
169 + # h0 = h0 & 0x3ffffff
170 + andl $0x3ffffff,%ebx
171 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
172 +index 3e5dd85b019a..263af6312329 100644
173 +--- a/arch/x86/events/amd/core.c
174 ++++ b/arch/x86/events/amd/core.c
175 +@@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
176 + };
177 +
178 + /*
179 +- * AMD Performance Monitor K7 and later.
180 ++ * AMD Performance Monitor K7 and later, up to and including Family 16h:
181 + */
182 + static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
183 + {
184 +- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
185 +- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
186 +- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
187 +- [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
188 +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
189 +- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
190 +- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
191 +- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
192 ++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
193 ++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
194 ++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
195 ++ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
196 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
197 ++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
198 ++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
199 ++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
200 ++};
201 ++
202 ++/*
203 ++ * AMD Performance Monitor Family 17h and later:
204 ++ */
205 ++static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
206 ++{
207 ++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
208 ++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
209 ++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
210 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
211 ++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
212 ++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
213 ++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
214 + };
215 +
216 + static u64 amd_pmu_event_map(int hw_event)
217 + {
218 ++ if (boot_cpu_data.x86 >= 0x17)
219 ++ return amd_f17h_perfmon_event_map[hw_event];
220 ++
221 + return amd_perfmon_event_map[hw_event];
222 + }
223 +
224 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
225 +index 12453cf7c11b..3dd204d1dd19 100644
226 +--- a/arch/x86/events/intel/core.c
227 ++++ b/arch/x86/events/intel/core.c
228 +@@ -3014,7 +3014,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
229 + flags &= ~PERF_SAMPLE_TIME;
230 + if (!event->attr.exclude_kernel)
231 + flags &= ~PERF_SAMPLE_REGS_USER;
232 +- if (event->attr.sample_regs_user & ~PEBS_REGS)
233 ++ if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
234 + flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
235 + return flags;
236 + }
237 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
238 +index 42a36280d168..05659c7b43d4 100644
239 +--- a/arch/x86/events/perf_event.h
240 ++++ b/arch/x86/events/perf_event.h
241 +@@ -96,25 +96,25 @@ struct amd_nb {
242 + PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
243 + PERF_SAMPLE_PERIOD)
244 +
245 +-#define PEBS_REGS \
246 +- (PERF_REG_X86_AX | \
247 +- PERF_REG_X86_BX | \
248 +- PERF_REG_X86_CX | \
249 +- PERF_REG_X86_DX | \
250 +- PERF_REG_X86_DI | \
251 +- PERF_REG_X86_SI | \
252 +- PERF_REG_X86_SP | \
253 +- PERF_REG_X86_BP | \
254 +- PERF_REG_X86_IP | \
255 +- PERF_REG_X86_FLAGS | \
256 +- PERF_REG_X86_R8 | \
257 +- PERF_REG_X86_R9 | \
258 +- PERF_REG_X86_R10 | \
259 +- PERF_REG_X86_R11 | \
260 +- PERF_REG_X86_R12 | \
261 +- PERF_REG_X86_R13 | \
262 +- PERF_REG_X86_R14 | \
263 +- PERF_REG_X86_R15)
264 ++#define PEBS_GP_REGS \
265 ++ ((1ULL << PERF_REG_X86_AX) | \
266 ++ (1ULL << PERF_REG_X86_BX) | \
267 ++ (1ULL << PERF_REG_X86_CX) | \
268 ++ (1ULL << PERF_REG_X86_DX) | \
269 ++ (1ULL << PERF_REG_X86_DI) | \
270 ++ (1ULL << PERF_REG_X86_SI) | \
271 ++ (1ULL << PERF_REG_X86_SP) | \
272 ++ (1ULL << PERF_REG_X86_BP) | \
273 ++ (1ULL << PERF_REG_X86_IP) | \
274 ++ (1ULL << PERF_REG_X86_FLAGS) | \
275 ++ (1ULL << PERF_REG_X86_R8) | \
276 ++ (1ULL << PERF_REG_X86_R9) | \
277 ++ (1ULL << PERF_REG_X86_R10) | \
278 ++ (1ULL << PERF_REG_X86_R11) | \
279 ++ (1ULL << PERF_REG_X86_R12) | \
280 ++ (1ULL << PERF_REG_X86_R13) | \
281 ++ (1ULL << PERF_REG_X86_R14) | \
282 ++ (1ULL << PERF_REG_X86_R15))
283 +
284 + /*
285 + * Per register state.
286 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
287 +index 1e0c4c74195c..e5258bd64200 100644
288 +--- a/arch/x86/kernel/cpu/bugs.c
289 ++++ b/arch/x86/kernel/cpu/bugs.c
290 +@@ -272,7 +272,7 @@ static const struct {
291 + const char *option;
292 + enum spectre_v2_user_cmd cmd;
293 + bool secure;
294 +-} v2_user_options[] __initdata = {
295 ++} v2_user_options[] __initconst = {
296 + { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
297 + { "off", SPECTRE_V2_USER_CMD_NONE, false },
298 + { "on", SPECTRE_V2_USER_CMD_FORCE, true },
299 +@@ -407,7 +407,7 @@ static const struct {
300 + const char *option;
301 + enum spectre_v2_mitigation_cmd cmd;
302 + bool secure;
303 +-} mitigation_options[] __initdata = {
304 ++} mitigation_options[] __initconst = {
305 + { "off", SPECTRE_V2_CMD_NONE, false },
306 + { "on", SPECTRE_V2_CMD_FORCE, true },
307 + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
308 +@@ -643,7 +643,7 @@ static const char * const ssb_strings[] = {
309 + static const struct {
310 + const char *option;
311 + enum ssb_mitigation_cmd cmd;
312 +-} ssb_mitigation_options[] __initdata = {
313 ++} ssb_mitigation_options[] __initconst = {
314 + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
315 + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
316 + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
317 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
318 +index b0d1e81c96bb..acb901b43ce4 100644
319 +--- a/arch/x86/kernel/kprobes/core.c
320 ++++ b/arch/x86/kernel/kprobes/core.c
321 +@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
322 + unsigned long *sara = stack_addr(regs);
323 +
324 + ri->ret_addr = (kprobe_opcode_t *) *sara;
325 ++ ri->fp = sara;
326 +
327 + /* Replace the return addr with trampoline addr */
328 + *sara = (unsigned long) &kretprobe_trampoline;
329 +@@ -759,15 +760,21 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
330 + unsigned long flags, orig_ret_address = 0;
331 + unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
332 + kprobe_opcode_t *correct_ret_addr = NULL;
333 ++ void *frame_pointer;
334 ++ bool skipped = false;
335 +
336 + INIT_HLIST_HEAD(&empty_rp);
337 + kretprobe_hash_lock(current, &head, &flags);
338 + /* fixup registers */
339 + #ifdef CONFIG_X86_64
340 + regs->cs = __KERNEL_CS;
341 ++ /* On x86-64, we use pt_regs->sp for return address holder. */
342 ++ frame_pointer = &regs->sp;
343 + #else
344 + regs->cs = __KERNEL_CS | get_kernel_rpl();
345 + regs->gs = 0;
346 ++ /* On x86-32, we use pt_regs->flags for return address holder. */
347 ++ frame_pointer = &regs->flags;
348 + #endif
349 + regs->ip = trampoline_address;
350 + regs->orig_ax = ~0UL;
351 +@@ -789,8 +796,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
352 + if (ri->task != current)
353 + /* another task is sharing our hash bucket */
354 + continue;
355 ++ /*
356 ++ * Return probes must be pushed on this hash list correct
357 ++ * order (same as return order) so that it can be poped
358 ++ * correctly. However, if we find it is pushed it incorrect
359 ++ * order, this means we find a function which should not be
360 ++ * probed, because the wrong order entry is pushed on the
361 ++ * path of processing other kretprobe itself.
362 ++ */
363 ++ if (ri->fp != frame_pointer) {
364 ++ if (!skipped)
365 ++ pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
366 ++ skipped = true;
367 ++ continue;
368 ++ }
369 +
370 + orig_ret_address = (unsigned long)ri->ret_addr;
371 ++ if (skipped)
372 ++ pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
373 ++ ri->rp->kp.addr);
374 +
375 + if (orig_ret_address != trampoline_address)
376 + /*
377 +@@ -808,6 +832,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
378 + if (ri->task != current)
379 + /* another task is sharing our hash bucket */
380 + continue;
381 ++ if (ri->fp != frame_pointer)
382 ++ continue;
383 +
384 + orig_ret_address = (unsigned long)ri->ret_addr;
385 + if (ri->rp && ri->rp->handler) {
386 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
387 +index 7d31192296a8..b8b08e61ac73 100644
388 +--- a/arch/x86/kernel/process.c
389 ++++ b/arch/x86/kernel/process.c
390 +@@ -411,6 +411,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
391 + u64 msr = x86_spec_ctrl_base;
392 + bool updmsr = false;
393 +
394 ++ lockdep_assert_irqs_disabled();
395 ++
396 + /*
397 + * If TIF_SSBD is different, select the proper mitigation
398 + * method. Note that if SSBD mitigation is disabled or permanentely
399 +@@ -462,10 +464,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
400 +
401 + void speculation_ctrl_update(unsigned long tif)
402 + {
403 ++ unsigned long flags;
404 ++
405 + /* Forced update. Make sure all relevant TIF flags are different */
406 +- preempt_disable();
407 ++ local_irq_save(flags);
408 + __speculation_ctrl_update(~tif, tif);
409 +- preempt_enable();
410 ++ local_irq_restore(flags);
411 + }
412 +
413 + /* Called from seccomp/prctl update */
414 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
415 +index 106482da6388..860bd271619d 100644
416 +--- a/arch/x86/kvm/emulate.c
417 ++++ b/arch/x86/kvm/emulate.c
418 +@@ -2575,15 +2575,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
419 + * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
420 + * supports long mode.
421 + */
422 +- cr4 = ctxt->ops->get_cr(ctxt, 4);
423 + if (emulator_has_longmode(ctxt)) {
424 + struct desc_struct cs_desc;
425 +
426 + /* Zero CR4.PCIDE before CR0.PG. */
427 +- if (cr4 & X86_CR4_PCIDE) {
428 ++ cr4 = ctxt->ops->get_cr(ctxt, 4);
429 ++ if (cr4 & X86_CR4_PCIDE)
430 + ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
431 +- cr4 &= ~X86_CR4_PCIDE;
432 +- }
433 +
434 + /* A 32-bit code segment is required to clear EFER.LMA. */
435 + memset(&cs_desc, 0, sizeof(cs_desc));
436 +@@ -2597,13 +2595,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
437 + if (cr0 & X86_CR0_PE)
438 + ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
439 +
440 +- /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
441 +- if (cr4 & X86_CR4_PAE)
442 +- ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
443 ++ if (emulator_has_longmode(ctxt)) {
444 ++ /* Clear CR4.PAE before clearing EFER.LME. */
445 ++ cr4 = ctxt->ops->get_cr(ctxt, 4);
446 ++ if (cr4 & X86_CR4_PAE)
447 ++ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
448 +
449 +- /* And finally go back to 32-bit mode. */
450 +- efer = 0;
451 +- ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
452 ++ /* And finally go back to 32-bit mode. */
453 ++ efer = 0;
454 ++ ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
455 ++ }
456 +
457 + smbase = ctxt->ops->get_smbase(ctxt);
458 +
459 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
460 +index 6dc72804fe6e..813cb60eb401 100644
461 +--- a/arch/x86/kvm/svm.c
462 ++++ b/arch/x86/kvm/svm.c
463 +@@ -2679,6 +2679,7 @@ static int npf_interception(struct vcpu_svm *svm)
464 + static int db_interception(struct vcpu_svm *svm)
465 + {
466 + struct kvm_run *kvm_run = svm->vcpu.run;
467 ++ struct kvm_vcpu *vcpu = &svm->vcpu;
468 +
469 + if (!(svm->vcpu.guest_debug &
470 + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
471 +@@ -2689,6 +2690,8 @@ static int db_interception(struct vcpu_svm *svm)
472 +
473 + if (svm->nmi_singlestep) {
474 + disable_nmi_singlestep(svm);
475 ++ /* Make sure we check for pending NMIs upon entry */
476 ++ kvm_make_request(KVM_REQ_EVENT, vcpu);
477 + }
478 +
479 + if (svm->vcpu.guest_debug &
480 +@@ -4493,14 +4496,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
481 + kvm_lapic_reg_write(apic, APIC_ICR, icrl);
482 + break;
483 + case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
484 ++ int i;
485 ++ struct kvm_vcpu *vcpu;
486 ++ struct kvm *kvm = svm->vcpu.kvm;
487 + struct kvm_lapic *apic = svm->vcpu.arch.apic;
488 +
489 + /*
490 +- * Update ICR high and low, then emulate sending IPI,
491 +- * which is handled when writing APIC_ICR.
492 ++ * At this point, we expect that the AVIC HW has already
493 ++ * set the appropriate IRR bits on the valid target
494 ++ * vcpus. So, we just need to kick the appropriate vcpu.
495 + */
496 +- kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
497 +- kvm_lapic_reg_write(apic, APIC_ICR, icrl);
498 ++ kvm_for_each_vcpu(i, vcpu, kvm) {
499 ++ bool m = kvm_apic_match_dest(vcpu, apic,
500 ++ icrl & KVM_APIC_SHORT_MASK,
501 ++ GET_APIC_DEST_FIELD(icrh),
502 ++ icrl & KVM_APIC_DEST_MASK);
503 ++
504 ++ if (m && !avic_vcpu_is_running(vcpu))
505 ++ kvm_vcpu_wake_up(vcpu);
506 ++ }
507 + break;
508 + }
509 + case AVIC_IPI_FAILURE_INVALID_TARGET:
510 +diff --git a/crypto/testmgr.h b/crypto/testmgr.h
511 +index 862ee1d04263..74e1454cae1e 100644
512 +--- a/crypto/testmgr.h
513 ++++ b/crypto/testmgr.h
514 +@@ -5592,7 +5592,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
515 + .psize = 80,
516 + .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
517 + "\x00\x00\x00\x00\x00\x00\x00\x00",
518 +- },
519 ++ }, { /* Regression test for overflow in AVX2 implementation */
520 ++ .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
521 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
522 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
523 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
524 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
525 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
526 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
527 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
528 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
529 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
530 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
531 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
532 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
533 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
534 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
535 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
536 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
537 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
538 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
539 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
540 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
541 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
542 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
543 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
544 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
545 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
546 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
547 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
548 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
549 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
550 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
551 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
552 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
553 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
554 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
555 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
556 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
557 ++ "\xff\xff\xff\xff",
558 ++ .psize = 300,
559 ++ .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
560 ++ "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
561 ++ }
562 + };
563 +
564 + /*
565 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
566 +index df2175b1169a..925dbc751322 100644
567 +--- a/drivers/acpi/nfit/core.c
568 ++++ b/drivers/acpi/nfit/core.c
569 +@@ -1298,19 +1298,30 @@ static ssize_t scrub_show(struct device *dev,
570 + struct device_attribute *attr, char *buf)
571 + {
572 + struct nvdimm_bus_descriptor *nd_desc;
573 ++ struct acpi_nfit_desc *acpi_desc;
574 + ssize_t rc = -ENXIO;
575 ++ bool busy;
576 +
577 + device_lock(dev);
578 + nd_desc = dev_get_drvdata(dev);
579 +- if (nd_desc) {
580 +- struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
581 ++ if (!nd_desc) {
582 ++ device_unlock(dev);
583 ++ return rc;
584 ++ }
585 ++ acpi_desc = to_acpi_desc(nd_desc);
586 +
587 +- mutex_lock(&acpi_desc->init_mutex);
588 +- rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
589 +- acpi_desc->scrub_busy
590 +- && !acpi_desc->cancel ? "+\n" : "\n");
591 +- mutex_unlock(&acpi_desc->init_mutex);
592 ++ mutex_lock(&acpi_desc->init_mutex);
593 ++ busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
594 ++ && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
595 ++ rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
596 ++ /* Allow an admin to poll the busy state at a higher rate */
597 ++ if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
598 ++ &acpi_desc->scrub_flags)) {
599 ++ acpi_desc->scrub_tmo = 1;
600 ++ mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
601 + }
602 ++
603 ++ mutex_unlock(&acpi_desc->init_mutex);
604 + device_unlock(dev);
605 + return rc;
606 + }
607 +@@ -2529,7 +2540,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
608 +
609 + if (rc < 0)
610 + return rc;
611 +- return cmd_rc;
612 ++ if (cmd_rc < 0)
613 ++ return cmd_rc;
614 ++ set_bit(ARS_VALID, &acpi_desc->scrub_flags);
615 ++ return 0;
616 + }
617 +
618 + static int ars_continue(struct acpi_nfit_desc *acpi_desc)
619 +@@ -2539,11 +2553,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
620 + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
621 + struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
622 +
623 +- memset(&ars_start, 0, sizeof(ars_start));
624 +- ars_start.address = ars_status->restart_address;
625 +- ars_start.length = ars_status->restart_length;
626 +- ars_start.type = ars_status->type;
627 +- ars_start.flags = acpi_desc->ars_start_flags;
628 ++ ars_start = (struct nd_cmd_ars_start) {
629 ++ .address = ars_status->restart_address,
630 ++ .length = ars_status->restart_length,
631 ++ .type = ars_status->type,
632 ++ };
633 + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
634 + sizeof(ars_start), &cmd_rc);
635 + if (rc < 0)
636 +@@ -2622,6 +2636,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
637 + */
638 + if (ars_status->out_length < 44)
639 + return 0;
640 ++
641 ++ /*
642 ++ * Ignore potentially stale results that are only refreshed
643 ++ * after a start-ARS event.
644 ++ */
645 ++ if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
646 ++ dev_dbg(acpi_desc->dev, "skip %d stale records\n",
647 ++ ars_status->num_records);
648 ++ return 0;
649 ++ }
650 ++
651 + for (i = 0; i < ars_status->num_records; i++) {
652 + /* only process full records */
653 + if (ars_status->out_length
654 +@@ -2960,7 +2985,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
655 +
656 + lockdep_assert_held(&acpi_desc->init_mutex);
657 +
658 +- if (acpi_desc->cancel)
659 ++ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
660 + return 0;
661 +
662 + if (query_rc == -EBUSY) {
663 +@@ -3034,7 +3059,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
664 + {
665 + lockdep_assert_held(&acpi_desc->init_mutex);
666 +
667 +- acpi_desc->scrub_busy = 1;
668 ++ set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
669 + /* note this should only be set from within the workqueue */
670 + if (tmo)
671 + acpi_desc->scrub_tmo = tmo;
672 +@@ -3050,7 +3075,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
673 + {
674 + lockdep_assert_held(&acpi_desc->init_mutex);
675 +
676 +- acpi_desc->scrub_busy = 0;
677 ++ clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
678 + acpi_desc->scrub_count++;
679 + if (acpi_desc->scrub_count_state)
680 + sysfs_notify_dirent(acpi_desc->scrub_count_state);
681 +@@ -3071,6 +3096,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
682 + else
683 + notify_ars_done(acpi_desc);
684 + memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
685 ++ clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
686 + mutex_unlock(&acpi_desc->init_mutex);
687 + }
688 +
689 +@@ -3105,6 +3131,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
690 + struct nfit_spa *nfit_spa;
691 + int rc;
692 +
693 ++ set_bit(ARS_VALID, &acpi_desc->scrub_flags);
694 + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
695 + switch (nfit_spa_type(nfit_spa->spa)) {
696 + case NFIT_SPA_VOLATILE:
697 +@@ -3322,7 +3349,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
698 + struct nfit_spa *nfit_spa;
699 +
700 + mutex_lock(&acpi_desc->init_mutex);
701 +- if (acpi_desc->cancel) {
702 ++ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
703 + mutex_unlock(&acpi_desc->init_mutex);
704 + return 0;
705 + }
706 +@@ -3401,7 +3428,7 @@ void acpi_nfit_shutdown(void *data)
707 + mutex_unlock(&acpi_desc_lock);
708 +
709 + mutex_lock(&acpi_desc->init_mutex);
710 +- acpi_desc->cancel = 1;
711 ++ set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
712 + cancel_delayed_work_sync(&acpi_desc->dwork);
713 + mutex_unlock(&acpi_desc->init_mutex);
714 +
715 +diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
716 +index 02c10de50386..68848fc4b7c9 100644
717 +--- a/drivers/acpi/nfit/nfit.h
718 ++++ b/drivers/acpi/nfit/nfit.h
719 +@@ -181,6 +181,13 @@ struct nfit_mem {
720 + bool has_lsw;
721 + };
722 +
723 ++enum scrub_flags {
724 ++ ARS_BUSY,
725 ++ ARS_CANCEL,
726 ++ ARS_VALID,
727 ++ ARS_POLL,
728 ++};
729 ++
730 + struct acpi_nfit_desc {
731 + struct nvdimm_bus_descriptor nd_desc;
732 + struct acpi_table_header acpi_header;
733 +@@ -194,7 +201,6 @@ struct acpi_nfit_desc {
734 + struct list_head idts;
735 + struct nvdimm_bus *nvdimm_bus;
736 + struct device *dev;
737 +- u8 ars_start_flags;
738 + struct nd_cmd_ars_status *ars_status;
739 + struct nfit_spa *scrub_spa;
740 + struct delayed_work dwork;
741 +@@ -203,8 +209,7 @@ struct acpi_nfit_desc {
742 + unsigned int max_ars;
743 + unsigned int scrub_count;
744 + unsigned int scrub_mode;
745 +- unsigned int scrub_busy:1;
746 +- unsigned int cancel:1;
747 ++ unsigned long scrub_flags;
748 + unsigned long dimm_cmd_force_en;
749 + unsigned long bus_cmd_force_en;
750 + unsigned long bus_nfit_cmd_force_en;
751 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
752 +index d5f7a12e350e..3fb297b5fb17 100644
753 +--- a/drivers/char/ipmi/ipmi_msghandler.c
754 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
755 +@@ -213,6 +213,9 @@ struct ipmi_user {
756 +
757 + /* Does this interface receive IPMI events? */
758 + bool gets_events;
759 ++
760 ++ /* Free must run in process context for RCU cleanup. */
761 ++ struct work_struct remove_work;
762 + };
763 +
764 + static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
765 +@@ -1078,6 +1081,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
766 + }
767 +
768 +
769 ++static void free_user_work(struct work_struct *work)
770 ++{
771 ++ struct ipmi_user *user = container_of(work, struct ipmi_user,
772 ++ remove_work);
773 ++
774 ++ cleanup_srcu_struct(&user->release_barrier);
775 ++ kfree(user);
776 ++}
777 ++
778 + int ipmi_create_user(unsigned int if_num,
779 + const struct ipmi_user_hndl *handler,
780 + void *handler_data,
781 +@@ -1121,6 +1133,8 @@ int ipmi_create_user(unsigned int if_num,
782 + goto out_kfree;
783 +
784 + found:
785 ++ INIT_WORK(&new_user->remove_work, free_user_work);
786 ++
787 + rv = init_srcu_struct(&new_user->release_barrier);
788 + if (rv)
789 + goto out_kfree;
790 +@@ -1183,8 +1197,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
791 + static void free_user(struct kref *ref)
792 + {
793 + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
794 +- cleanup_srcu_struct(&user->release_barrier);
795 +- kfree(user);
796 ++
797 ++ /* SRCU cleanup must happen in task context. */
798 ++ schedule_work(&user->remove_work);
799 + }
800 +
801 + static void _ipmi_destroy_user(struct ipmi_user *user)
802 +diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
803 +index 1b8fa9de2cac..41b9f6c92da7 100644
804 +--- a/drivers/char/tpm/eventlog/tpm2.c
805 ++++ b/drivers/char/tpm/eventlog/tpm2.c
806 +@@ -37,8 +37,8 @@
807 + *
808 + * Returns size of the event. If it is an invalid event, returns 0.
809 + */
810 +-static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
811 +- struct tcg_pcr_event *event_header)
812 ++static size_t calc_tpm2_event_size(struct tcg_pcr_event2 *event,
813 ++ struct tcg_pcr_event *event_header)
814 + {
815 + struct tcg_efi_specid_event *efispecid;
816 + struct tcg_event_field *event_field;
817 +diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
818 +index 32a8e27c5382..cc4e642d3180 100644
819 +--- a/drivers/char/tpm/tpm_i2c_atmel.c
820 ++++ b/drivers/char/tpm/tpm_i2c_atmel.c
821 +@@ -69,6 +69,10 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
822 + if (status < 0)
823 + return status;
824 +
825 ++ /* The upper layer does not support incomplete sends. */
826 ++ if (status != len)
827 ++ return -E2BIG;
828 ++
829 + return 0;
830 + }
831 +
832 +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
833 +index e70a0d4d6db4..c963eec58c70 100644
834 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
835 ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
836 +@@ -164,6 +164,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
837 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
838 + L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
839 + }
840 ++ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
841 +
842 + tmp = mmVM_L2_CNTL4_DEFAULT;
843 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
844 +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
845 +index f841accc2c00..f77c81db161b 100644
846 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
847 ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
848 +@@ -730,7 +730,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
849 + }
850 +
851 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
852 +- if (!(flags & TTM_PAGE_FLAG_DMA32)) {
853 ++ if (!(flags & TTM_PAGE_FLAG_DMA32) &&
854 ++ (npages - i) >= HPAGE_PMD_NR) {
855 + for (j = 0; j < HPAGE_PMD_NR; ++j)
856 + if (p++ != pages[i + j])
857 + break;
858 +@@ -759,7 +760,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
859 + unsigned max_size, n2free;
860 +
861 + spin_lock_irqsave(&huge->lock, irq_flags);
862 +- while (i < npages) {
863 ++ while ((npages - i) >= HPAGE_PMD_NR) {
864 + struct page *p = pages[i];
865 + unsigned j;
866 +
867 +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
868 +index 1d645c9ab417..cac262a912c1 100644
869 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
870 ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
871 +@@ -337,7 +337,8 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
872 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
873 + },
874 + .driver_data = (void *)&sipodev_desc
875 +- }
876 ++ },
877 ++ { } /* Terminate list */
878 + };
879 +
880 +
881 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
882 +index 471caa5323e4..e5fdca74a630 100644
883 +--- a/drivers/iio/accel/kxcjk-1013.c
884 ++++ b/drivers/iio/accel/kxcjk-1013.c
885 +@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
886 +
887 + mutex_lock(&data->mutex);
888 + ret = kxcjk1013_set_mode(data, OPERATION);
889 ++ if (ret == 0)
890 ++ ret = kxcjk1013_set_range(data, data->range);
891 + mutex_unlock(&data->mutex);
892 +
893 + return ret;
894 +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
895 +index fc9510716ac7..ae2a5097f449 100644
896 +--- a/drivers/iio/adc/ad_sigma_delta.c
897 ++++ b/drivers/iio/adc/ad_sigma_delta.c
898 +@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
899 + if (sigma_delta->info->has_registers) {
900 + data[0] = reg << sigma_delta->info->addr_shift;
901 + data[0] |= sigma_delta->info->read_mask;
902 ++ data[0] |= sigma_delta->comm;
903 + spi_message_add_tail(&t[0], &m);
904 + }
905 + spi_message_add_tail(&t[1], &m);
906 +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
907 +index 75d2f73582a3..596841a3c4db 100644
908 +--- a/drivers/iio/adc/at91_adc.c
909 ++++ b/drivers/iio/adc/at91_adc.c
910 +@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
911 + ret = wait_event_interruptible_timeout(st->wq_data_avail,
912 + st->done,
913 + msecs_to_jiffies(1000));
914 +- if (ret == 0)
915 +- ret = -ETIMEDOUT;
916 +- if (ret < 0) {
917 +- mutex_unlock(&st->lock);
918 +- return ret;
919 +- }
920 +-
921 +- *val = st->last_value;
922 +
923 ++ /* Disable interrupts, regardless if adc conversion was
924 ++ * successful or not
925 ++ */
926 + at91_adc_writel(st, AT91_ADC_CHDR,
927 + AT91_ADC_CH(chan->channel));
928 + at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
929 +
930 +- st->last_value = 0;
931 +- st->done = false;
932 ++ if (ret > 0) {
933 ++ /* a valid conversion took place */
934 ++ *val = st->last_value;
935 ++ st->last_value = 0;
936 ++ st->done = false;
937 ++ ret = IIO_VAL_INT;
938 ++ } else if (ret == 0) {
939 ++ /* conversion timeout */
940 ++ dev_err(&idev->dev, "ADC Channel %d timeout.\n",
941 ++ chan->channel);
942 ++ ret = -ETIMEDOUT;
943 ++ }
944 ++
945 + mutex_unlock(&st->lock);
946 +- return IIO_VAL_INT;
947 ++ return ret;
948 +
949 + case IIO_CHAN_INFO_SCALE:
950 + *val = st->vref_mv;
951 +diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
952 +index e049323f209a..71dd635fce2d 100644
953 +--- a/drivers/iio/chemical/bme680.h
954 ++++ b/drivers/iio/chemical/bme680.h
955 +@@ -2,11 +2,9 @@
956 + #ifndef BME680_H_
957 + #define BME680_H_
958 +
959 +-#define BME680_REG_CHIP_I2C_ID 0xD0
960 +-#define BME680_REG_CHIP_SPI_ID 0x50
961 ++#define BME680_REG_CHIP_ID 0xD0
962 + #define BME680_CHIP_ID_VAL 0x61
963 +-#define BME680_REG_SOFT_RESET_I2C 0xE0
964 +-#define BME680_REG_SOFT_RESET_SPI 0x60
965 ++#define BME680_REG_SOFT_RESET 0xE0
966 + #define BME680_CMD_SOFTRESET 0xB6
967 + #define BME680_REG_STATUS 0x73
968 + #define BME680_SPI_MEM_PAGE_BIT BIT(4)
969 +diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
970 +index 7d9bb62baa3f..b2db59812755 100644
971 +--- a/drivers/iio/chemical/bme680_core.c
972 ++++ b/drivers/iio/chemical/bme680_core.c
973 +@@ -63,9 +63,23 @@ struct bme680_data {
974 + s32 t_fine;
975 + };
976 +
977 ++static const struct regmap_range bme680_volatile_ranges[] = {
978 ++ regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
979 ++ regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
980 ++ regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
981 ++};
982 ++
983 ++static const struct regmap_access_table bme680_volatile_table = {
984 ++ .yes_ranges = bme680_volatile_ranges,
985 ++ .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges),
986 ++};
987 ++
988 + const struct regmap_config bme680_regmap_config = {
989 + .reg_bits = 8,
990 + .val_bits = 8,
991 ++ .max_register = 0xef,
992 ++ .volatile_table = &bme680_volatile_table,
993 ++ .cache_type = REGCACHE_RBTREE,
994 + };
995 + EXPORT_SYMBOL(bme680_regmap_config);
996 +
997 +@@ -330,6 +344,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
998 + s64 var1, var2, var3;
999 + s16 calc_temp;
1000 +
1001 ++ /* If the calibration is invalid, attempt to reload it */
1002 ++ if (!calib->par_t2)
1003 ++ bme680_read_calib(data, calib);
1004 ++
1005 + var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
1006 + var2 = (var1 * calib->par_t2) >> 11;
1007 + var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
1008 +@@ -591,8 +609,7 @@ static int bme680_gas_config(struct bme680_data *data)
1009 + return ret;
1010 + }
1011 +
1012 +-static int bme680_read_temp(struct bme680_data *data,
1013 +- int *val, int *val2)
1014 ++static int bme680_read_temp(struct bme680_data *data, int *val)
1015 + {
1016 + struct device *dev = regmap_get_device(data->regmap);
1017 + int ret;
1018 +@@ -625,10 +642,9 @@ static int bme680_read_temp(struct bme680_data *data,
1019 + * compensate_press/compensate_humid to get compensated
1020 + * pressure/humidity readings.
1021 + */
1022 +- if (val && val2) {
1023 +- *val = comp_temp;
1024 +- *val2 = 100;
1025 +- return IIO_VAL_FRACTIONAL;
1026 ++ if (val) {
1027 ++ *val = comp_temp * 10; /* Centidegrees to millidegrees */
1028 ++ return IIO_VAL_INT;
1029 + }
1030 +
1031 + return ret;
1032 +@@ -643,7 +659,7 @@ static int bme680_read_press(struct bme680_data *data,
1033 + s32 adc_press;
1034 +
1035 + /* Read and compensate temperature to get a reading of t_fine */
1036 +- ret = bme680_read_temp(data, NULL, NULL);
1037 ++ ret = bme680_read_temp(data, NULL);
1038 + if (ret < 0)
1039 + return ret;
1040 +
1041 +@@ -676,7 +692,7 @@ static int bme680_read_humid(struct bme680_data *data,
1042 + u32 comp_humidity;
1043 +
1044 + /* Read and compensate temperature to get a reading of t_fine */
1045 +- ret = bme680_read_temp(data, NULL, NULL);
1046 ++ ret = bme680_read_temp(data, NULL);
1047 + if (ret < 0)
1048 + return ret;
1049 +
1050 +@@ -769,7 +785,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
1051 + case IIO_CHAN_INFO_PROCESSED:
1052 + switch (chan->type) {
1053 + case IIO_TEMP:
1054 +- return bme680_read_temp(data, val, val2);
1055 ++ return bme680_read_temp(data, val);
1056 + case IIO_PRESSURE:
1057 + return bme680_read_press(data, val, val2);
1058 + case IIO_HUMIDITYRELATIVE:
1059 +@@ -905,8 +921,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
1060 + {
1061 + struct iio_dev *indio_dev;
1062 + struct bme680_data *data;
1063 ++ unsigned int val;
1064 + int ret;
1065 +
1066 ++ ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
1067 ++ BME680_CMD_SOFTRESET);
1068 ++ if (ret < 0) {
1069 ++ dev_err(dev, "Failed to reset chip\n");
1070 ++ return ret;
1071 ++ }
1072 ++
1073 ++ ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
1074 ++ if (ret < 0) {
1075 ++ dev_err(dev, "Error reading chip ID\n");
1076 ++ return ret;
1077 ++ }
1078 ++
1079 ++ if (val != BME680_CHIP_ID_VAL) {
1080 ++ dev_err(dev, "Wrong chip ID, got %x expected %x\n",
1081 ++ val, BME680_CHIP_ID_VAL);
1082 ++ return -ENODEV;
1083 ++ }
1084 ++
1085 + indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
1086 + if (!indio_dev)
1087 + return -ENOMEM;
1088 +diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
1089 +index 06d4be539d2e..cfc4449edf1b 100644
1090 +--- a/drivers/iio/chemical/bme680_i2c.c
1091 ++++ b/drivers/iio/chemical/bme680_i2c.c
1092 +@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
1093 + {
1094 + struct regmap *regmap;
1095 + const char *name = NULL;
1096 +- unsigned int val;
1097 +- int ret;
1098 +
1099 + regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
1100 + if (IS_ERR(regmap)) {
1101 +@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
1102 + return PTR_ERR(regmap);
1103 + }
1104 +
1105 +- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
1106 +- BME680_CMD_SOFTRESET);
1107 +- if (ret < 0) {
1108 +- dev_err(&client->dev, "Failed to reset chip\n");
1109 +- return ret;
1110 +- }
1111 +-
1112 +- ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
1113 +- if (ret < 0) {
1114 +- dev_err(&client->dev, "Error reading I2C chip ID\n");
1115 +- return ret;
1116 +- }
1117 +-
1118 +- if (val != BME680_CHIP_ID_VAL) {
1119 +- dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
1120 +- val, BME680_CHIP_ID_VAL);
1121 +- return -ENODEV;
1122 +- }
1123 +-
1124 + if (id)
1125 + name = id->name;
1126 +
1127 +diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
1128 +index c9fb05e8d0b9..881778e55d38 100644
1129 +--- a/drivers/iio/chemical/bme680_spi.c
1130 ++++ b/drivers/iio/chemical/bme680_spi.c
1131 +@@ -11,28 +11,93 @@
1132 +
1133 + #include "bme680.h"
1134 +
1135 ++struct bme680_spi_bus_context {
1136 ++ struct spi_device *spi;
1137 ++ u8 current_page;
1138 ++};
1139 ++
1140 ++/*
1141 ++ * In SPI mode there are only 7 address bits, a "page" register determines
1142 ++ * which part of the 8-bit range is active. This function looks at the address
1143 ++ * and writes the page selection bit if needed
1144 ++ */
1145 ++static int bme680_regmap_spi_select_page(
1146 ++ struct bme680_spi_bus_context *ctx, u8 reg)
1147 ++{
1148 ++ struct spi_device *spi = ctx->spi;
1149 ++ int ret;
1150 ++ u8 buf[2];
1151 ++ u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
1152 ++
1153 ++ if (page == ctx->current_page)
1154 ++ return 0;
1155 ++
1156 ++ /*
1157 ++ * Data sheet claims we're only allowed to change bit 4, so we must do
1158 ++ * a read-modify-write on each and every page select
1159 ++ */
1160 ++ buf[0] = BME680_REG_STATUS;
1161 ++ ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
1162 ++ if (ret < 0) {
1163 ++ dev_err(&spi->dev, "failed to set page %u\n", page);
1164 ++ return ret;
1165 ++ }
1166 ++
1167 ++ buf[0] = BME680_REG_STATUS;
1168 ++ if (page)
1169 ++ buf[1] |= BME680_SPI_MEM_PAGE_BIT;
1170 ++ else
1171 ++ buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
1172 ++
1173 ++ ret = spi_write(spi, buf, 2);
1174 ++ if (ret < 0) {
1175 ++ dev_err(&spi->dev, "failed to set page %u\n", page);
1176 ++ return ret;
1177 ++ }
1178 ++
1179 ++ ctx->current_page = page;
1180 ++
1181 ++ return 0;
1182 ++}
1183 ++
1184 + static int bme680_regmap_spi_write(void *context, const void *data,
1185 + size_t count)
1186 + {
1187 +- struct spi_device *spi = context;
1188 ++ struct bme680_spi_bus_context *ctx = context;
1189 ++ struct spi_device *spi = ctx->spi;
1190 ++ int ret;
1191 + u8 buf[2];
1192 +
1193 + memcpy(buf, data, 2);
1194 ++
1195 ++ ret = bme680_regmap_spi_select_page(ctx, buf[0]);
1196 ++ if (ret)
1197 ++ return ret;
1198 ++
1199 + /*
1200 + * The SPI register address (= full register address without bit 7)
1201 + * and the write command (bit7 = RW = '0')
1202 + */
1203 + buf[0] &= ~0x80;
1204 +
1205 +- return spi_write_then_read(spi, buf, 2, NULL, 0);
1206 ++ return spi_write(spi, buf, 2);
1207 + }
1208 +
1209 + static int bme680_regmap_spi_read(void *context, const void *reg,
1210 + size_t reg_size, void *val, size_t val_size)
1211 + {
1212 +- struct spi_device *spi = context;
1213 ++ struct bme680_spi_bus_context *ctx = context;
1214 ++ struct spi_device *spi = ctx->spi;
1215 ++ int ret;
1216 ++ u8 addr = *(const u8 *)reg;
1217 ++
1218 ++ ret = bme680_regmap_spi_select_page(ctx, addr);
1219 ++ if (ret)
1220 ++ return ret;
1221 +
1222 +- return spi_write_then_read(spi, reg, reg_size, val, val_size);
1223 ++ addr |= 0x80; /* bit7 = RW = '1' */
1224 ++
1225 ++ return spi_write_then_read(spi, &addr, 1, val, val_size);
1226 + }
1227 +
1228 + static struct regmap_bus bme680_regmap_bus = {
1229 +@@ -45,8 +110,8 @@ static struct regmap_bus bme680_regmap_bus = {
1230 + static int bme680_spi_probe(struct spi_device *spi)
1231 + {
1232 + const struct spi_device_id *id = spi_get_device_id(spi);
1233 ++ struct bme680_spi_bus_context *bus_context;
1234 + struct regmap *regmap;
1235 +- unsigned int val;
1236 + int ret;
1237 +
1238 + spi->bits_per_word = 8;
1239 +@@ -56,45 +121,21 @@ static int bme680_spi_probe(struct spi_device *spi)
1240 + return ret;
1241 + }
1242 +
1243 ++ bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
1244 ++ if (!bus_context)
1245 ++ return -ENOMEM;
1246 ++
1247 ++ bus_context->spi = spi;
1248 ++ bus_context->current_page = 0xff; /* Undefined on warm boot */
1249 ++
1250 + regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
1251 +- &spi->dev, &bme680_regmap_config);
1252 ++ bus_context, &bme680_regmap_config);
1253 + if (IS_ERR(regmap)) {
1254 + dev_err(&spi->dev, "Failed to register spi regmap %d\n",
1255 + (int)PTR_ERR(regmap));
1256 + return PTR_ERR(regmap);
1257 + }
1258 +
1259 +- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
1260 +- BME680_CMD_SOFTRESET);
1261 +- if (ret < 0) {
1262 +- dev_err(&spi->dev, "Failed to reset chip\n");
1263 +- return ret;
1264 +- }
1265 +-
1266 +- /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
1267 +- ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
1268 +- if (ret < 0) {
1269 +- dev_err(&spi->dev, "Error reading SPI chip ID\n");
1270 +- return ret;
1271 +- }
1272 +-
1273 +- if (val != BME680_CHIP_ID_VAL) {
1274 +- dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
1275 +- val, BME680_CHIP_ID_VAL);
1276 +- return -ENODEV;
1277 +- }
1278 +- /*
1279 +- * select Page 1 of spi_mem_page to enable access to
1280 +- * to registers from address 0x00 to 0x7F.
1281 +- */
1282 +- ret = regmap_write_bits(regmap, BME680_REG_STATUS,
1283 +- BME680_SPI_MEM_PAGE_BIT,
1284 +- BME680_SPI_MEM_PAGE_1_VAL);
1285 +- if (ret < 0) {
1286 +- dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
1287 +- return ret;
1288 +- }
1289 +-
1290 + return bme680_core_probe(&spi->dev, regmap, id->name);
1291 + }
1292 +
1293 +diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
1294 +index 89cb0066a6e0..8d76afb87d87 100644
1295 +--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
1296 ++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
1297 +@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
1298 + * Do not use IIO_DEGREE_TO_RAD to avoid precision
1299 + * loss. Round to the nearest integer.
1300 + */
1301 +- *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
1302 +- *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
1303 +- ret = IIO_VAL_FRACTIONAL;
1304 ++ *val = 0;
1305 ++ *val2 = div_s64(val64 * 3141592653ULL,
1306 ++ 180 << (CROS_EC_SENSOR_BITS - 1));
1307 ++ ret = IIO_VAL_INT_PLUS_NANO;
1308 + break;
1309 + case MOTIONSENSE_TYPE_MAG:
1310 + /*
1311 +diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
1312 +index 8b5aad4c32d9..30dc2775cbfb 100644
1313 +--- a/drivers/iio/dac/mcp4725.c
1314 ++++ b/drivers/iio/dac/mcp4725.c
1315 +@@ -98,6 +98,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
1316 +
1317 + inoutbuf[0] = 0x60; /* write EEPROM */
1318 + inoutbuf[0] |= data->ref_mode << 3;
1319 ++ inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
1320 + inoutbuf[1] = data->dac_value >> 4;
1321 + inoutbuf[2] = (data->dac_value & 0xf) << 4;
1322 +
1323 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
1324 +index 63ca31628a93..92c07ab826eb 100644
1325 +--- a/drivers/iio/gyro/bmg160_core.c
1326 ++++ b/drivers/iio/gyro/bmg160_core.c
1327 +@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
1328 + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
1329 + return bmg160_get_filter(data, val);
1330 + case IIO_CHAN_INFO_SCALE:
1331 +- *val = 0;
1332 + switch (chan->type) {
1333 + case IIO_TEMP:
1334 +- *val2 = 500000;
1335 +- return IIO_VAL_INT_PLUS_MICRO;
1336 ++ *val = 500;
1337 ++ return IIO_VAL_INT;
1338 + case IIO_ANGL_VEL:
1339 + {
1340 + int i;
1341 +@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
1342 + for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
1343 + if (bmg160_scale_table[i].dps_range ==
1344 + data->dps_range) {
1345 ++ *val = 0;
1346 + *val2 = bmg160_scale_table[i].scale;
1347 + return IIO_VAL_INT_PLUS_MICRO;
1348 + }
1349 +diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
1350 +index 77fac81a3adc..5ddebede31a6 100644
1351 +--- a/drivers/iio/gyro/mpu3050-core.c
1352 ++++ b/drivers/iio/gyro/mpu3050-core.c
1353 +@@ -29,7 +29,8 @@
1354 +
1355 + #include "mpu3050.h"
1356 +
1357 +-#define MPU3050_CHIP_ID 0x69
1358 ++#define MPU3050_CHIP_ID 0x68
1359 ++#define MPU3050_CHIP_ID_MASK 0x7E
1360 +
1361 + /*
1362 + * Register map: anything suffixed *_H is a big-endian high byte and always
1363 +@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
1364 + goto err_power_down;
1365 + }
1366 +
1367 +- if (val != MPU3050_CHIP_ID) {
1368 +- dev_err(dev, "unsupported chip id %02x\n", (u8)val);
1369 ++ if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
1370 ++ dev_err(dev, "unsupported chip id %02x\n",
1371 ++ (u8)(val & MPU3050_CHIP_ID_MASK));
1372 + ret = -ENODEV;
1373 + goto err_power_down;
1374 + }
1375 +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
1376 +index cd5bfe39591b..dadd921a4a30 100644
1377 +--- a/drivers/iio/industrialio-buffer.c
1378 ++++ b/drivers/iio/industrialio-buffer.c
1379 +@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
1380 + const unsigned long *mask;
1381 + unsigned long *trialmask;
1382 +
1383 +- trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
1384 +- sizeof(*trialmask),
1385 +- GFP_KERNEL);
1386 ++ trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
1387 ++ sizeof(*trialmask), GFP_KERNEL);
1388 + if (trialmask == NULL)
1389 + return -ENOMEM;
1390 + if (!indio_dev->masklength) {
1391 +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
1392 +index a062cfddc5af..49d4b4f1a457 100644
1393 +--- a/drivers/iio/industrialio-core.c
1394 ++++ b/drivers/iio/industrialio-core.c
1395 +@@ -1735,10 +1735,10 @@ EXPORT_SYMBOL(__iio_device_register);
1396 + **/
1397 + void iio_device_unregister(struct iio_dev *indio_dev)
1398 + {
1399 +- mutex_lock(&indio_dev->info_exist_lock);
1400 +-
1401 + cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
1402 +
1403 ++ mutex_lock(&indio_dev->info_exist_lock);
1404 ++
1405 + iio_device_unregister_debugfs(indio_dev);
1406 +
1407 + iio_disable_all_buffers(indio_dev);
1408 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1409 +index 628ef617bb2f..f9525d6f0bfe 100644
1410 +--- a/drivers/input/mouse/elan_i2c_core.c
1411 ++++ b/drivers/input/mouse/elan_i2c_core.c
1412 +@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
1413 + { "ELAN0600", 0 },
1414 + { "ELAN0601", 0 },
1415 + { "ELAN0602", 0 },
1416 ++ { "ELAN0603", 0 },
1417 ++ { "ELAN0604", 0 },
1418 + { "ELAN0605", 0 },
1419 ++ { "ELAN0606", 0 },
1420 ++ { "ELAN0607", 0 },
1421 + { "ELAN0608", 0 },
1422 + { "ELAN0609", 0 },
1423 + { "ELAN060B", 0 },
1424 + { "ELAN060C", 0 },
1425 ++ { "ELAN060F", 0 },
1426 ++ { "ELAN0610", 0 },
1427 + { "ELAN0611", 0 },
1428 + { "ELAN0612", 0 },
1429 ++ { "ELAN0615", 0 },
1430 ++ { "ELAN0616", 0 },
1431 + { "ELAN0617", 0 },
1432 + { "ELAN0618", 0 },
1433 ++ { "ELAN0619", 0 },
1434 ++ { "ELAN061A", 0 },
1435 ++ { "ELAN061B", 0 },
1436 + { "ELAN061C", 0 },
1437 + { "ELAN061D", 0 },
1438 + { "ELAN061E", 0 },
1439 ++ { "ELAN061F", 0 },
1440 + { "ELAN0620", 0 },
1441 + { "ELAN0621", 0 },
1442 + { "ELAN0622", 0 },
1443 ++ { "ELAN0623", 0 },
1444 ++ { "ELAN0624", 0 },
1445 ++ { "ELAN0625", 0 },
1446 ++ { "ELAN0626", 0 },
1447 ++ { "ELAN0627", 0 },
1448 ++ { "ELAN0628", 0 },
1449 ++ { "ELAN0629", 0 },
1450 ++ { "ELAN062A", 0 },
1451 ++ { "ELAN062B", 0 },
1452 ++ { "ELAN062C", 0 },
1453 ++ { "ELAN062D", 0 },
1454 ++ { "ELAN0631", 0 },
1455 ++ { "ELAN0632", 0 },
1456 + { "ELAN1000", 0 },
1457 + { }
1458 + };
1459 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
1460 +index 8dae12b841b3..629860f7327c 100644
1461 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
1462 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
1463 +@@ -429,7 +429,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
1464 + val = readl(host->ioaddr + ESDHC_MIX_CTRL);
1465 + else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
1466 + /* the std tuning bits is in ACMD12_ERR for imx6sl */
1467 +- val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
1468 ++ val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1469 + }
1470 +
1471 + if (val & ESDHC_MIX_CTRL_EXE_TUNE)
1472 +@@ -494,7 +494,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
1473 + }
1474 + writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
1475 + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
1476 +- u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
1477 ++ u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1478 + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
1479 + if (val & SDHCI_CTRL_TUNED_CLK) {
1480 + v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
1481 +@@ -512,7 +512,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
1482 + v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
1483 + }
1484 +
1485 +- writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
1486 ++ writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1487 + writel(m, host->ioaddr + ESDHC_MIX_CTRL);
1488 + }
1489 + return;
1490 +@@ -957,9 +957,9 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
1491 + writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
1492 + writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
1493 + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
1494 +- ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR);
1495 ++ ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1496 + ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
1497 +- writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR);
1498 ++ writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1499 + }
1500 + }
1501 + }
1502 +@@ -1319,7 +1319,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1503 +
1504 + /* clear tuning bits in case ROM has set it already */
1505 + writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
1506 +- writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
1507 ++ writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
1508 + writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
1509 + }
1510 +
1511 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1512 +index 654051e00117..c749d3dc1d36 100644
1513 +--- a/drivers/mmc/host/sdhci.c
1514 ++++ b/drivers/mmc/host/sdhci.c
1515 +@@ -82,8 +82,8 @@ void sdhci_dumpregs(struct sdhci_host *host)
1516 + SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
1517 + sdhci_readl(host, SDHCI_INT_ENABLE),
1518 + sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
1519 +- SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
1520 +- sdhci_readw(host, SDHCI_ACMD12_ERR),
1521 ++ SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
1522 ++ sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
1523 + sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
1524 + SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
1525 + sdhci_readl(host, SDHCI_CAPABILITIES),
1526 +@@ -841,6 +841,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1527 + else
1528 + host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1529 +
1530 ++ if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1531 ++ host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1532 ++ else
1533 ++ host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1534 ++
1535 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1536 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1537 + }
1538 +@@ -1078,8 +1083,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1539 + return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1540 + ((mrq->cmd && mrq->cmd->error) ||
1541 + (mrq->sbc && mrq->sbc->error) ||
1542 +- (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
1543 +- (mrq->data->stop && mrq->data->stop->error))) ||
1544 ++ (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1545 + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1546 + }
1547 +
1548 +@@ -1131,6 +1135,16 @@ static void sdhci_finish_data(struct sdhci_host *host)
1549 + host->data = NULL;
1550 + host->data_cmd = NULL;
1551 +
1552 ++ /*
1553 ++ * The controller needs a reset of internal state machines upon error
1554 ++ * conditions.
1555 ++ */
1556 ++ if (data->error) {
1557 ++ if (!host->cmd || host->cmd == data_cmd)
1558 ++ sdhci_do_reset(host, SDHCI_RESET_CMD);
1559 ++ sdhci_do_reset(host, SDHCI_RESET_DATA);
1560 ++ }
1561 ++
1562 + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1563 + (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1564 + sdhci_adma_table_post(host, data);
1565 +@@ -1155,17 +1169,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
1566 + if (data->stop &&
1567 + (data->error ||
1568 + !data->mrq->sbc)) {
1569 +-
1570 +- /*
1571 +- * The controller needs a reset of internal state machines
1572 +- * upon error conditions.
1573 +- */
1574 +- if (data->error) {
1575 +- if (!host->cmd || host->cmd == data_cmd)
1576 +- sdhci_do_reset(host, SDHCI_RESET_CMD);
1577 +- sdhci_do_reset(host, SDHCI_RESET_DATA);
1578 +- }
1579 +-
1580 + /*
1581 + * 'cap_cmd_during_tfr' request must not use the command line
1582 + * after mmc_command_done() has been called. It is upper layer's
1583 +@@ -2642,8 +2645,23 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
1584 + * *
1585 + \*****************************************************************************/
1586 +
1587 +-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1588 ++static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
1589 + {
1590 ++ /* Handle auto-CMD12 error */
1591 ++ if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
1592 ++ struct mmc_request *mrq = host->data_cmd->mrq;
1593 ++ u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
1594 ++ int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
1595 ++ SDHCI_INT_DATA_TIMEOUT :
1596 ++ SDHCI_INT_DATA_CRC;
1597 ++
1598 ++ /* Treat auto-CMD12 error the same as data error */
1599 ++ if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1600 ++ *intmask_p |= data_err_bit;
1601 ++ return;
1602 ++ }
1603 ++ }
1604 ++
1605 + if (!host->cmd) {
1606 + /*
1607 + * SDHCI recovers from errors by resetting the cmd and data
1608 +@@ -2665,20 +2683,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1609 + else
1610 + host->cmd->error = -EILSEQ;
1611 +
1612 +- /*
1613 +- * If this command initiates a data phase and a response
1614 +- * CRC error is signalled, the card can start transferring
1615 +- * data - the card may have received the command without
1616 +- * error. We must not terminate the mmc_request early.
1617 +- *
1618 +- * If the card did not receive the command or returned an
1619 +- * error which prevented it sending data, the data phase
1620 +- * will time out.
1621 +- */
1622 ++ /* Treat data command CRC error the same as data CRC error */
1623 + if (host->cmd->data &&
1624 + (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
1625 + SDHCI_INT_CRC) {
1626 + host->cmd = NULL;
1627 ++ *intmask_p |= SDHCI_INT_DATA_CRC;
1628 + return;
1629 + }
1630 +
1631 +@@ -2686,6 +2696,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1632 + return;
1633 + }
1634 +
1635 ++ /* Handle auto-CMD23 error */
1636 ++ if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
1637 ++ struct mmc_request *mrq = host->cmd->mrq;
1638 ++ u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
1639 ++ int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
1640 ++ -ETIMEDOUT :
1641 ++ -EILSEQ;
1642 ++
1643 ++ if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1644 ++ mrq->sbc->error = err;
1645 ++ sdhci_finish_mrq(host, mrq);
1646 ++ return;
1647 ++ }
1648 ++ }
1649 ++
1650 + if (intmask & SDHCI_INT_RESPONSE)
1651 + sdhci_finish_command(host);
1652 + }
1653 +@@ -2906,7 +2931,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
1654 + }
1655 +
1656 + if (intmask & SDHCI_INT_CMD_MASK)
1657 +- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1658 ++ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
1659 +
1660 + if (intmask & SDHCI_INT_DATA_MASK)
1661 + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1662 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
1663 +index f0bd36ce3817..0f8c4f3ccafc 100644
1664 +--- a/drivers/mmc/host/sdhci.h
1665 ++++ b/drivers/mmc/host/sdhci.h
1666 +@@ -144,14 +144,15 @@
1667 + #define SDHCI_INT_DATA_CRC 0x00200000
1668 + #define SDHCI_INT_DATA_END_BIT 0x00400000
1669 + #define SDHCI_INT_BUS_POWER 0x00800000
1670 +-#define SDHCI_INT_ACMD12ERR 0x01000000
1671 ++#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
1672 + #define SDHCI_INT_ADMA_ERROR 0x02000000
1673 +
1674 + #define SDHCI_INT_NORMAL_MASK 0x00007FFF
1675 + #define SDHCI_INT_ERROR_MASK 0xFFFF8000
1676 +
1677 + #define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
1678 +- SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
1679 ++ SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
1680 ++ SDHCI_INT_AUTO_CMD_ERR)
1681 + #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
1682 + SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
1683 + SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
1684 +@@ -166,7 +167,11 @@
1685 +
1686 + #define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
1687 +
1688 +-#define SDHCI_ACMD12_ERR 0x3C
1689 ++#define SDHCI_AUTO_CMD_STATUS 0x3C
1690 ++#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002
1691 ++#define SDHCI_AUTO_CMD_CRC 0x00000004
1692 ++#define SDHCI_AUTO_CMD_END_BIT 0x00000008
1693 ++#define SDHCI_AUTO_CMD_INDEX 0x00000010
1694 +
1695 + #define SDHCI_HOST_CONTROL2 0x3E
1696 + #define SDHCI_CTRL_UHS_MASK 0x0007
1697 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1698 +index b2c42cae3081..091b454e83fc 100644
1699 +--- a/drivers/net/bonding/bond_main.c
1700 ++++ b/drivers/net/bonding/bond_main.c
1701 +@@ -3198,8 +3198,12 @@ static int bond_netdev_event(struct notifier_block *this,
1702 + return NOTIFY_DONE;
1703 +
1704 + if (event_dev->flags & IFF_MASTER) {
1705 ++ int ret;
1706 ++
1707 + netdev_dbg(event_dev, "IFF_MASTER\n");
1708 +- return bond_master_netdev_event(event, event_dev);
1709 ++ ret = bond_master_netdev_event(event, event_dev);
1710 ++ if (ret != NOTIFY_DONE)
1711 ++ return ret;
1712 + }
1713 +
1714 + if (event_dev->flags & IFF_SLAVE) {
1715 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1716 +index 9800738448ec..dca02b35c231 100644
1717 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1718 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1719 +@@ -32,6 +32,13 @@
1720 + #define DRV_NAME "nicvf"
1721 + #define DRV_VERSION "1.0"
1722 +
1723 ++/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
1724 ++ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
1725 ++ * this value, keeping headroom for the 14 byte Ethernet header and two
1726 ++ * VLAN tags (for QinQ)
1727 ++ */
1728 ++#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
1729 ++
1730 + /* Supported devices */
1731 + static const struct pci_device_id nicvf_id_table[] = {
1732 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
1733 +@@ -1547,6 +1554,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1734 + struct nicvf *nic = netdev_priv(netdev);
1735 + int orig_mtu = netdev->mtu;
1736 +
1737 ++ /* For now just support only the usual MTU sized frames,
1738 ++ * plus some headroom for VLAN, QinQ.
1739 ++ */
1740 ++ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
1741 ++ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1742 ++ netdev->mtu);
1743 ++ return -EINVAL;
1744 ++ }
1745 ++
1746 + netdev->mtu = new_mtu;
1747 +
1748 + if (!netif_running(netdev))
1749 +@@ -1795,8 +1811,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1750 + bool bpf_attached = false;
1751 + int ret = 0;
1752 +
1753 +- /* For now just support only the usual MTU sized frames */
1754 +- if (prog && (dev->mtu > 1500)) {
1755 ++ /* For now just support only the usual MTU sized frames,
1756 ++ * plus some headroom for VLAN, QinQ.
1757 ++ */
1758 ++ if (prog && dev->mtu > MAX_XDP_MTU) {
1759 + netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1760 + dev->mtu);
1761 + return -EOPNOTSUPP;
1762 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1763 +index 8de64e88c670..22a2ef111514 100644
1764 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1765 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1766 +@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
1767 + return ret;
1768 + }
1769 +
1770 +-static void mlx5_fpga_tls_release_swid(struct idr *idr,
1771 +- spinlock_t *idr_spinlock, u32 swid)
1772 ++static void *mlx5_fpga_tls_release_swid(struct idr *idr,
1773 ++ spinlock_t *idr_spinlock, u32 swid)
1774 + {
1775 + unsigned long flags;
1776 ++ void *ptr;
1777 +
1778 + spin_lock_irqsave(idr_spinlock, flags);
1779 +- idr_remove(idr, swid);
1780 ++ ptr = idr_remove(idr, swid);
1781 + spin_unlock_irqrestore(idr_spinlock, flags);
1782 ++ return ptr;
1783 + }
1784 +
1785 + static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
1786 +@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
1787 + kfree(buf);
1788 + }
1789 +
1790 +-struct mlx5_teardown_stream_context {
1791 +- struct mlx5_fpga_tls_command_context cmd;
1792 +- u32 swid;
1793 +-};
1794 +-
1795 + static void
1796 + mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
1797 + struct mlx5_fpga_device *fdev,
1798 + struct mlx5_fpga_tls_command_context *cmd,
1799 + struct mlx5_fpga_dma_buf *resp)
1800 + {
1801 +- struct mlx5_teardown_stream_context *ctx =
1802 +- container_of(cmd, struct mlx5_teardown_stream_context, cmd);
1803 +-
1804 + if (resp) {
1805 + u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
1806 +
1807 +@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
1808 + mlx5_fpga_err(fdev,
1809 + "Teardown stream failed with syndrome = %d",
1810 + syndrome);
1811 +- else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
1812 +- mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
1813 +- &fdev->tls->tx_idr_spinlock,
1814 +- ctx->swid);
1815 +- else
1816 +- mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
1817 +- &fdev->tls->rx_idr_spinlock,
1818 +- ctx->swid);
1819 + }
1820 + mlx5_fpga_tls_put_command_ctx(cmd);
1821 + }
1822 +@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1823 + void *cmd;
1824 + int ret;
1825 +
1826 +- rcu_read_lock();
1827 +- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1828 +- rcu_read_unlock();
1829 +-
1830 +- if (!flow) {
1831 +- WARN_ONCE(1, "Received NULL pointer for handle\n");
1832 +- return -EINVAL;
1833 +- }
1834 +-
1835 + buf = kzalloc(size, GFP_ATOMIC);
1836 + if (!buf)
1837 + return -ENOMEM;
1838 +
1839 + cmd = (buf + 1);
1840 +
1841 ++ rcu_read_lock();
1842 ++ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1843 ++ if (unlikely(!flow)) {
1844 ++ rcu_read_unlock();
1845 ++ WARN_ONCE(1, "Received NULL pointer for handle\n");
1846 ++ kfree(buf);
1847 ++ return -EINVAL;
1848 ++ }
1849 + mlx5_fpga_tls_flow_to_cmd(flow, cmd);
1850 ++ rcu_read_unlock();
1851 +
1852 + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
1853 + MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
1854 +@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1855 + static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
1856 + void *flow, u32 swid, gfp_t flags)
1857 + {
1858 +- struct mlx5_teardown_stream_context *ctx;
1859 ++ struct mlx5_fpga_tls_command_context *ctx;
1860 + struct mlx5_fpga_dma_buf *buf;
1861 + void *cmd;
1862 +
1863 +@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
1864 + if (!ctx)
1865 + return;
1866 +
1867 +- buf = &ctx->cmd.buf;
1868 ++ buf = &ctx->buf;
1869 + cmd = (ctx + 1);
1870 + MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
1871 + MLX5_SET(tls_cmd, cmd, swid, swid);
1872 +@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
1873 + buf->sg[0].data = cmd;
1874 + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
1875 +
1876 +- ctx->swid = swid;
1877 +- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
1878 ++ mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
1879 + mlx5_fpga_tls_teardown_completion);
1880 + }
1881 +
1882 +@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
1883 + struct mlx5_fpga_tls *tls = mdev->fpga->tls;
1884 + void *flow;
1885 +
1886 +- rcu_read_lock();
1887 + if (direction_sx)
1888 +- flow = idr_find(&tls->tx_idr, swid);
1889 ++ flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
1890 ++ &tls->tx_idr_spinlock,
1891 ++ swid);
1892 + else
1893 +- flow = idr_find(&tls->rx_idr, swid);
1894 +-
1895 +- rcu_read_unlock();
1896 ++ flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
1897 ++ &tls->rx_idr_spinlock,
1898 ++ swid);
1899 +
1900 + if (!flow) {
1901 + mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
1902 +@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
1903 + return;
1904 + }
1905 +
1906 ++ synchronize_rcu(); /* before kfree(flow) */
1907 + mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
1908 + }
1909 +
1910 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
1911 +index 7a1e9cd9cc62..777b99416062 100644
1912 +--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
1913 ++++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
1914 +@@ -80,8 +80,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
1915 +
1916 + tmp_push_vlan_tci =
1917 + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
1918 +- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
1919 +- NFP_FL_PUSH_VLAN_CFI;
1920 ++ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action));
1921 + push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
1922 + }
1923 +
1924 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
1925 +index 325954b829c8..9b018321e24e 100644
1926 +--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
1927 ++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
1928 +@@ -55,7 +55,7 @@
1929 + #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
1930 +
1931 + #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
1932 +-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
1933 ++#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
1934 + #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
1935 +
1936 + #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
1937 +@@ -109,7 +109,6 @@
1938 + #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
1939 +
1940 + #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
1941 +-#define NFP_FL_PUSH_VLAN_CFI BIT(12)
1942 + #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
1943 +
1944 + /* LAG ports */
1945 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
1946 +index 17acb8cc6044..b99d55cf81f1 100644
1947 +--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
1948 ++++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
1949 +@@ -56,14 +56,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
1950 + FLOW_DISSECTOR_KEY_VLAN,
1951 + target);
1952 + /* Populate the tci field. */
1953 +- if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
1954 +- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
1955 +- flow_vlan->vlan_priority) |
1956 +- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
1957 +- flow_vlan->vlan_id) |
1958 +- NFP_FLOWER_MASK_VLAN_CFI;
1959 +- frame->tci = cpu_to_be16(tmp_tci);
1960 +- }
1961 ++ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
1962 ++ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
1963 ++ flow_vlan->vlan_priority) |
1964 ++ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
1965 ++ flow_vlan->vlan_id);
1966 ++ frame->tci = cpu_to_be16(tmp_tci);
1967 + }
1968 + }
1969 +
1970 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1971 +index 95ee9d815d76..e23eaf3f6d03 100644
1972 +--- a/drivers/net/team/team.c
1973 ++++ b/drivers/net/team/team.c
1974 +@@ -1250,6 +1250,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1975 + goto err_option_port_add;
1976 + }
1977 +
1978 ++ /* set promiscuity level to new slave */
1979 ++ if (dev->flags & IFF_PROMISC) {
1980 ++ err = dev_set_promiscuity(port_dev, 1);
1981 ++ if (err)
1982 ++ goto err_set_slave_promisc;
1983 ++ }
1984 ++
1985 ++ /* set allmulti level to new slave */
1986 ++ if (dev->flags & IFF_ALLMULTI) {
1987 ++ err = dev_set_allmulti(port_dev, 1);
1988 ++ if (err) {
1989 ++ if (dev->flags & IFF_PROMISC)
1990 ++ dev_set_promiscuity(port_dev, -1);
1991 ++ goto err_set_slave_promisc;
1992 ++ }
1993 ++ }
1994 ++
1995 + netif_addr_lock_bh(dev);
1996 + dev_uc_sync_multiple(port_dev, dev);
1997 + dev_mc_sync_multiple(port_dev, dev);
1998 +@@ -1266,6 +1283,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1999 +
2000 + return 0;
2001 +
2002 ++err_set_slave_promisc:
2003 ++ __team_option_inst_del_port(team, port);
2004 ++
2005 + err_option_port_add:
2006 + team_upper_dev_unlink(team, port);
2007 +
2008 +@@ -1311,6 +1331,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
2009 +
2010 + team_port_disable(team, port);
2011 + list_del_rcu(&port->list);
2012 ++
2013 ++ if (dev->flags & IFF_PROMISC)
2014 ++ dev_set_promiscuity(port_dev, -1);
2015 ++ if (dev->flags & IFF_ALLMULTI)
2016 ++ dev_set_allmulti(port_dev, -1);
2017 ++
2018 + team_upper_dev_unlink(team, port);
2019 + netdev_rx_handler_unregister(port_dev);
2020 + team_port_disable_netpoll(port);
2021 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
2022 +index a279a4363bc1..1d21424eae8a 100644
2023 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
2024 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
2025 +@@ -672,7 +672,6 @@ enum rt2x00_state_flags {
2026 + CONFIG_CHANNEL_HT40,
2027 + CONFIG_POWERSAVING,
2028 + CONFIG_HT_DISABLED,
2029 +- CONFIG_QOS_DISABLED,
2030 + CONFIG_MONITORING,
2031 +
2032 + /*
2033 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
2034 +index fa2fd64084ac..da526684596f 100644
2035 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
2036 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
2037 +@@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
2038 + rt2x00dev->intf_associated--;
2039 +
2040 + rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
2041 +-
2042 +- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
2043 + }
2044 +
2045 +- /*
2046 +- * Check for access point which do not support 802.11e . We have to
2047 +- * generate data frames sequence number in S/W for such AP, because
2048 +- * of H/W bug.
2049 +- */
2050 +- if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
2051 +- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
2052 +-
2053 + /*
2054 + * When the erp information has changed, we should perform
2055 + * additional configuration steps. For all other changes we are done.
2056 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
2057 +index 710e9641552e..85e320178a0e 100644
2058 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
2059 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
2060 +@@ -200,15 +200,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
2061 + if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
2062 + /*
2063 + * rt2800 has a H/W (or F/W) bug, device incorrectly increase
2064 +- * seqno on retransmited data (non-QOS) frames. To workaround
2065 +- * the problem let's generate seqno in software if QOS is
2066 +- * disabled.
2067 ++ * seqno on retransmitted data (non-QOS) and management frames.
2068 ++ * To workaround the problem let's generate seqno in software.
2069 ++ * Except for beacons which are transmitted periodically by H/W
2070 ++ * hence hardware has to assign seqno for them.
2071 + */
2072 +- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
2073 +- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2074 +- else
2075 ++ if (ieee80211_is_beacon(hdr->frame_control)) {
2076 ++ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2077 + /* H/W will generate sequence number */
2078 + return;
2079 ++ }
2080 ++
2081 ++ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2082 + }
2083 +
2084 + /*
2085 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
2086 +index 1797e47fab38..3d51a936f6d5 100644
2087 +--- a/drivers/scsi/libfc/fc_rport.c
2088 ++++ b/drivers/scsi/libfc/fc_rport.c
2089 +@@ -2153,7 +2153,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
2090 + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
2091 + fc_rport_state(rdata));
2092 +
2093 +- rdata->flags &= ~FC_RP_STARTED;
2094 + fc_rport_enter_delete(rdata, RPORT_EV_STOP);
2095 + mutex_unlock(&rdata->rp_mutex);
2096 + kref_put(&rdata->kref, fc_rport_destroy);
2097 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2098 +index 655790f30434..1fc832751a4f 100644
2099 +--- a/drivers/scsi/scsi_lib.c
2100 ++++ b/drivers/scsi/scsi_lib.c
2101 +@@ -2149,8 +2149,12 @@ out_put_budget:
2102 + ret = BLK_STS_DEV_RESOURCE;
2103 + break;
2104 + default:
2105 ++ if (unlikely(!scsi_device_online(sdev)))
2106 ++ scsi_req(req)->result = DID_NO_CONNECT << 16;
2107 ++ else
2108 ++ scsi_req(req)->result = DID_ERROR << 16;
2109 + /*
2110 +- * Make sure to release all allocated ressources when
2111 ++ * Make sure to release all allocated resources when
2112 + * we hit an error, as we will never see this command
2113 + * again.
2114 + */
2115 +diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
2116 +index 808ed92ed66f..1bb1cb651349 100644
2117 +--- a/drivers/staging/comedi/drivers/ni_usb6501.c
2118 ++++ b/drivers/staging/comedi/drivers/ni_usb6501.c
2119 +@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
2120 +
2121 + size = usb_endpoint_maxp(devpriv->ep_tx);
2122 + devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
2123 +- if (!devpriv->usb_tx_buf) {
2124 +- kfree(devpriv->usb_rx_buf);
2125 ++ if (!devpriv->usb_tx_buf)
2126 + return -ENOMEM;
2127 +- }
2128 +
2129 + return 0;
2130 + }
2131 +@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
2132 + if (!devpriv)
2133 + return -ENOMEM;
2134 +
2135 ++ mutex_init(&devpriv->mut);
2136 ++ usb_set_intfdata(intf, devpriv);
2137 ++
2138 + ret = ni6501_find_endpoints(dev);
2139 + if (ret)
2140 + return ret;
2141 +@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
2142 + if (ret)
2143 + return ret;
2144 +
2145 +- mutex_init(&devpriv->mut);
2146 +- usb_set_intfdata(intf, devpriv);
2147 +-
2148 + ret = comedi_alloc_subdevices(dev, 2);
2149 + if (ret)
2150 + return ret;
2151 +diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
2152 +index 6234b649d887..65dc6c51037e 100644
2153 +--- a/drivers/staging/comedi/drivers/vmk80xx.c
2154 ++++ b/drivers/staging/comedi/drivers/vmk80xx.c
2155 +@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
2156 +
2157 + size = usb_endpoint_maxp(devpriv->ep_tx);
2158 + devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
2159 +- if (!devpriv->usb_tx_buf) {
2160 +- kfree(devpriv->usb_rx_buf);
2161 ++ if (!devpriv->usb_tx_buf)
2162 + return -ENOMEM;
2163 +- }
2164 +
2165 + return 0;
2166 + }
2167 +@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
2168 +
2169 + devpriv->model = board->model;
2170 +
2171 ++ sema_init(&devpriv->limit_sem, 8);
2172 ++
2173 + ret = vmk80xx_find_usb_endpoints(dev);
2174 + if (ret)
2175 + return ret;
2176 +@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
2177 + if (ret)
2178 + return ret;
2179 +
2180 +- sema_init(&devpriv->limit_sem, 8);
2181 +-
2182 + usb_set_intfdata(intf, devpriv);
2183 +
2184 + if (devpriv->model == VMK8055_MODEL)
2185 +diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
2186 +index df0499fc4802..6857a4bf7297 100644
2187 +--- a/drivers/staging/iio/adc/ad7192.c
2188 ++++ b/drivers/staging/iio/adc/ad7192.c
2189 +@@ -109,10 +109,10 @@
2190 + #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
2191 + #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
2192 +
2193 +-#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
2194 +-#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
2195 +-#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
2196 +-#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
2197 ++#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
2198 ++#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
2199 ++#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
2200 ++#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
2201 + #define AD7193_CH_TEMP 0x100 /* Temp senseor */
2202 + #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
2203 + #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
2204 +diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
2205 +index 029c3bf42d4d..07774c000c5a 100644
2206 +--- a/drivers/staging/iio/meter/ade7854.c
2207 ++++ b/drivers/staging/iio/meter/ade7854.c
2208 +@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
2209 + static IIO_DEV_ATTR_IPEAK(0644,
2210 + ade7854_read_32bit,
2211 + ade7854_write_32bit,
2212 +- ADE7854_VPEAK);
2213 ++ ADE7854_IPEAK);
2214 + static IIO_DEV_ATTR_APHCAL(0644,
2215 + ade7854_read_16bit,
2216 + ade7854_write_16bit,
2217 +diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
2218 +index 52ad62722996..25a077f4ea94 100644
2219 +--- a/drivers/staging/most/core.c
2220 ++++ b/drivers/staging/most/core.c
2221 +@@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface)
2222 +
2223 + INIT_LIST_HEAD(&iface->p->channel_list);
2224 + iface->p->dev_id = id;
2225 +- snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
2226 ++ strcpy(iface->p->name, iface->description);
2227 + iface->dev.init_name = iface->p->name;
2228 + iface->dev.bus = &mc.bus;
2229 + iface->dev.parent = &mc.dev;
2230 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2231 +index cbbf239aea0f..03fe3fb4bff6 100644
2232 +--- a/drivers/tty/serial/sh-sci.c
2233 ++++ b/drivers/tty/serial/sh-sci.c
2234 +@@ -2497,14 +2497,16 @@ done:
2235 + * center of the last stop bit in sampling clocks.
2236 + */
2237 + int last_stop = bits * 2 - 1;
2238 +- int deviation = min_err * srr * last_stop / 2 / baud;
2239 ++ int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
2240 ++ (int)(srr + 1),
2241 ++ 2 * (int)baud);
2242 +
2243 + if (abs(deviation) >= 2) {
2244 + /* At least two sampling clocks off at the
2245 + * last stop bit; we can increase the error
2246 + * margin by shifting the sampling point.
2247 + */
2248 +- int shift = min(-8, max(7, deviation / 2));
2249 ++ int shift = clamp(deviation / 2, -8, 7);
2250 +
2251 + hssrr |= (shift << HSCIF_SRHP_SHIFT) &
2252 + HSCIF_SRHP_MASK;
2253 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2254 +index b9a9a07f1ee9..3e5ec1cee059 100644
2255 +--- a/drivers/tty/vt/vt.c
2256 ++++ b/drivers/tty/vt/vt.c
2257 +@@ -1521,7 +1521,8 @@ static void csi_J(struct vc_data *vc, int vpar)
2258 + return;
2259 + }
2260 + scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
2261 +- update_region(vc, (unsigned long) start, count);
2262 ++ if (con_should_update(vc))
2263 ++ do_update_region(vc, (unsigned long) start, count);
2264 + vc->vc_need_wrap = 0;
2265 + }
2266 +
2267 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2268 +index b214a72d5caa..c163bc15976a 100644
2269 +--- a/drivers/vhost/vhost.c
2270 ++++ b/drivers/vhost/vhost.c
2271 +@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
2272 + u64 start, u64 size, u64 end,
2273 + u64 userspace_addr, int perm)
2274 + {
2275 +- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
2276 ++ struct vhost_umem_node *tmp, *node;
2277 +
2278 ++ if (!size)
2279 ++ return -EFAULT;
2280 ++
2281 ++ node = kmalloc(sizeof(*node), GFP_ATOMIC);
2282 + if (!node)
2283 + return -ENOMEM;
2284 +
2285 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2286 +index 80f33582059e..6f227cc781e5 100644
2287 +--- a/fs/cifs/cifsglob.h
2288 ++++ b/fs/cifs/cifsglob.h
2289 +@@ -1263,6 +1263,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
2290 + }
2291 +
2292 + struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
2293 ++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
2294 + void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
2295 +
2296 + #define CIFS_CACHE_READ_FLG 1
2297 +@@ -1763,6 +1764,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
2298 + #endif /* CONFIG_CIFS_ACL */
2299 +
2300 + void cifs_oplock_break(struct work_struct *work);
2301 ++void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
2302 +
2303 + extern const struct slow_work_ops cifs_oplock_break_ops;
2304 + extern struct workqueue_struct *cifsiod_wq;
2305 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2306 +index d847132ab027..d6b45682833b 100644
2307 +--- a/fs/cifs/file.c
2308 ++++ b/fs/cifs/file.c
2309 +@@ -358,12 +358,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
2310 + return cifs_file;
2311 + }
2312 +
2313 +-/*
2314 +- * Release a reference on the file private data. This may involve closing
2315 +- * the filehandle out on the server. Must be called without holding
2316 +- * tcon->open_file_lock and cifs_file->file_info_lock.
2317 ++/**
2318 ++ * cifsFileInfo_put - release a reference of file priv data
2319 ++ *
2320 ++ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
2321 + */
2322 + void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2323 ++{
2324 ++ _cifsFileInfo_put(cifs_file, true);
2325 ++}
2326 ++
2327 ++/**
2328 ++ * _cifsFileInfo_put - release a reference of file priv data
2329 ++ *
2330 ++ * This may involve closing the filehandle @cifs_file out on the
2331 ++ * server. Must be called without holding tcon->open_file_lock and
2332 ++ * cifs_file->file_info_lock.
2333 ++ *
2334 ++ * If @wait_for_oplock_handler is true and we are releasing the last
2335 ++ * reference, wait for any running oplock break handler of the file
2336 ++ * and cancel any pending one. If calling this function from the
2337 ++ * oplock break handler, you need to pass false.
2338 ++ *
2339 ++ */
2340 ++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
2341 + {
2342 + struct inode *inode = d_inode(cifs_file->dentry);
2343 + struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
2344 +@@ -411,7 +429,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2345 +
2346 + spin_unlock(&tcon->open_file_lock);
2347 +
2348 +- oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
2349 ++ oplock_break_cancelled = wait_oplock_handler ?
2350 ++ cancel_work_sync(&cifs_file->oplock_break) : false;
2351 +
2352 + if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
2353 + struct TCP_Server_Info *server = tcon->ses->server;
2354 +@@ -4170,6 +4189,7 @@ void cifs_oplock_break(struct work_struct *work)
2355 + cinode);
2356 + cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
2357 + }
2358 ++ _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
2359 + cifs_done_oplock_break(cinode);
2360 + }
2361 +
2362 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2363 +index 6926685e513c..facc94e159a1 100644
2364 +--- a/fs/cifs/misc.c
2365 ++++ b/fs/cifs/misc.c
2366 +@@ -490,8 +490,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
2367 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2368 + &pCifsInode->flags);
2369 +
2370 +- queue_work(cifsoplockd_wq,
2371 +- &netfile->oplock_break);
2372 ++ cifs_queue_oplock_break(netfile);
2373 + netfile->oplock_break_cancelled = false;
2374 +
2375 + spin_unlock(&tcon->open_file_lock);
2376 +@@ -588,6 +587,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
2377 + spin_unlock(&cinode->writers_lock);
2378 + }
2379 +
2380 ++/**
2381 ++ * cifs_queue_oplock_break - queue the oplock break handler for cfile
2382 ++ *
2383 ++ * This function is called from the demultiplex thread when it
2384 ++ * receives an oplock break for @cfile.
2385 ++ *
2386 ++ * Assumes the tcon->open_file_lock is held.
2387 ++ * Assumes cfile->file_info_lock is NOT held.
2388 ++ */
2389 ++void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
2390 ++{
2391 ++ /*
2392 ++ * Bump the handle refcount now while we hold the
2393 ++ * open_file_lock to enforce the validity of it for the oplock
2394 ++ * break handler. The matching put is done at the end of the
2395 ++ * handler.
2396 ++ */
2397 ++ cifsFileInfo_get(cfile);
2398 ++
2399 ++ queue_work(cifsoplockd_wq, &cfile->oplock_break);
2400 ++}
2401 ++
2402 + void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
2403 + {
2404 + clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
2405 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2406 +index 58700d2ba8cd..0a7ed2e3ad4f 100644
2407 +--- a/fs/cifs/smb2misc.c
2408 ++++ b/fs/cifs/smb2misc.c
2409 +@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
2410 + clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2411 + &cinode->flags);
2412 +
2413 +- queue_work(cifsoplockd_wq, &cfile->oplock_break);
2414 ++ cifs_queue_oplock_break(cfile);
2415 + kfree(lw);
2416 + return true;
2417 + }
2418 +@@ -719,8 +719,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2419 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2420 + &cinode->flags);
2421 + spin_unlock(&cfile->file_info_lock);
2422 +- queue_work(cifsoplockd_wq,
2423 +- &cfile->oplock_break);
2424 ++
2425 ++ cifs_queue_oplock_break(cfile);
2426 +
2427 + spin_unlock(&tcon->open_file_lock);
2428 + spin_unlock(&cifs_tcp_ses_lock);
2429 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2430 +index d4d7d61a6fe2..2001184afe70 100644
2431 +--- a/fs/cifs/smb2ops.c
2432 ++++ b/fs/cifs/smb2ops.c
2433 +@@ -1906,6 +1906,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2434 +
2435 + rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
2436 + &resp_buftype);
2437 ++ if (!rc)
2438 ++ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2439 + if (!rc || !err_iov.iov_base) {
2440 + rc = -ENOENT;
2441 + goto free_path;
2442 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2443 +index 71f32d983384..c6fd3acc5560 100644
2444 +--- a/fs/cifs/smb2pdu.c
2445 ++++ b/fs/cifs/smb2pdu.c
2446 +@@ -3273,8 +3273,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2447 + rqst.rq_nvec = 1;
2448 +
2449 + rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
2450 +- cifs_small_buf_release(req);
2451 +-
2452 + rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
2453 +
2454 + if (rc) {
2455 +@@ -3293,6 +3291,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2456 + io_parms->tcon->tid, ses->Suid,
2457 + io_parms->offset, io_parms->length);
2458 +
2459 ++ cifs_small_buf_release(req);
2460 ++
2461 + *nbytes = le32_to_cpu(rsp->DataLength);
2462 + if ((*nbytes > CIFS_MAX_MSGSIZE) ||
2463 + (*nbytes > io_parms->length)) {
2464 +@@ -3591,7 +3591,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2465 +
2466 + rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
2467 + &resp_buftype, flags, &rsp_iov);
2468 +- cifs_small_buf_release(req);
2469 + rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
2470 +
2471 + if (rc) {
2472 +@@ -3609,6 +3608,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2473 + io_parms->offset, *nbytes);
2474 + }
2475 +
2476 ++ cifs_small_buf_release(req);
2477 + free_rsp_buf(resp_buftype, rsp);
2478 + return rc;
2479 + }
2480 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2481 +index d76fe166f6ce..c5819baee35c 100644
2482 +--- a/fs/proc/task_mmu.c
2483 ++++ b/fs/proc/task_mmu.c
2484 +@@ -1138,6 +1138,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
2485 + count = -EINTR;
2486 + goto out_mm;
2487 + }
2488 ++ /*
2489 ++ * Avoid to modify vma->vm_flags
2490 ++ * without locked ops while the
2491 ++ * coredump reads the vm_flags.
2492 ++ */
2493 ++ if (!mmget_still_valid(mm)) {
2494 ++ /*
2495 ++ * Silently return "count"
2496 ++ * like if get_task_mm()
2497 ++ * failed. FIXME: should this
2498 ++ * function have returned
2499 ++ * -ESRCH if get_task_mm()
2500 ++ * failed like if
2501 ++ * get_proc_task() fails?
2502 ++ */
2503 ++ up_write(&mm->mmap_sem);
2504 ++ goto out_mm;
2505 ++ }
2506 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
2507 + vma->vm_flags &= ~VM_SOFTDIRTY;
2508 + vma_set_page_prot(vma);
2509 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
2510 +index d8b8323e80f4..aaca81b5e119 100644
2511 +--- a/fs/userfaultfd.c
2512 ++++ b/fs/userfaultfd.c
2513 +@@ -630,6 +630,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
2514 +
2515 + /* the various vma->vm_userfaultfd_ctx still points to it */
2516 + down_write(&mm->mmap_sem);
2517 ++ /* no task can run (and in turn coredump) yet */
2518 ++ VM_WARN_ON(!mmget_still_valid(mm));
2519 + for (vma = mm->mmap; vma; vma = vma->vm_next)
2520 + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
2521 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2522 +@@ -884,6 +886,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2523 + * taking the mmap_sem for writing.
2524 + */
2525 + down_write(&mm->mmap_sem);
2526 ++ if (!mmget_still_valid(mm))
2527 ++ goto skip_mm;
2528 + prev = NULL;
2529 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
2530 + cond_resched();
2531 +@@ -906,6 +910,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2532 + vma->vm_flags = new_flags;
2533 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2534 + }
2535 ++skip_mm:
2536 + up_write(&mm->mmap_sem);
2537 + mmput(mm);
2538 + wakeup:
2539 +@@ -1334,6 +1339,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
2540 + goto out;
2541 +
2542 + down_write(&mm->mmap_sem);
2543 ++ if (!mmget_still_valid(mm))
2544 ++ goto out_unlock;
2545 + vma = find_vma_prev(mm, start, &prev);
2546 + if (!vma)
2547 + goto out_unlock;
2548 +@@ -1521,6 +1528,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
2549 + goto out;
2550 +
2551 + down_write(&mm->mmap_sem);
2552 ++ if (!mmget_still_valid(mm))
2553 ++ goto out_unlock;
2554 + vma = find_vma_prev(mm, start, &prev);
2555 + if (!vma)
2556 + goto out_unlock;
2557 +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
2558 +index e909413e4e38..32cae0f35b9d 100644
2559 +--- a/include/linux/kprobes.h
2560 ++++ b/include/linux/kprobes.h
2561 +@@ -173,6 +173,7 @@ struct kretprobe_instance {
2562 + struct kretprobe *rp;
2563 + kprobe_opcode_t *ret_addr;
2564 + struct task_struct *task;
2565 ++ void *fp;
2566 + char data[0];
2567 + };
2568 +
2569 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2570 +index 21fef8c5eca7..8c2fec0bcb26 100644
2571 +--- a/include/linux/netdevice.h
2572 ++++ b/include/linux/netdevice.h
2573 +@@ -1456,6 +1456,7 @@ struct net_device_ops {
2574 + * @IFF_FAILOVER: device is a failover master device
2575 + * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
2576 + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
2577 ++ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
2578 + */
2579 + enum netdev_priv_flags {
2580 + IFF_802_1Q_VLAN = 1<<0,
2581 +@@ -1488,6 +1489,7 @@ enum netdev_priv_flags {
2582 + IFF_FAILOVER = 1<<27,
2583 + IFF_FAILOVER_SLAVE = 1<<28,
2584 + IFF_L3MDEV_RX_HANDLER = 1<<29,
2585 ++ IFF_LIVE_RENAME_OK = 1<<30,
2586 + };
2587 +
2588 + #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
2589 +@@ -1519,6 +1521,7 @@ enum netdev_priv_flags {
2590 + #define IFF_FAILOVER IFF_FAILOVER
2591 + #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
2592 + #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
2593 ++#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
2594 +
2595 + /**
2596 + * struct net_device - The DEVICE structure.
2597 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
2598 +index aebb370a0006..cebb79fe2c72 100644
2599 +--- a/include/linux/sched/mm.h
2600 ++++ b/include/linux/sched/mm.h
2601 +@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
2602 + __mmdrop(mm);
2603 + }
2604 +
2605 ++/*
2606 ++ * This has to be called after a get_task_mm()/mmget_not_zero()
2607 ++ * followed by taking the mmap_sem for writing before modifying the
2608 ++ * vmas or anything the coredump pretends not to change from under it.
2609 ++ *
2610 ++ * NOTE: find_extend_vma() called from GUP context is the only place
2611 ++ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
2612 ++ * for reading and outside the context of the process, so it is also
2613 ++ * the only case that holds the mmap_sem for reading that must call
2614 ++ * this function. Generally if the mmap_sem is hold for reading
2615 ++ * there's no need of this check after get_task_mm()/mmget_not_zero().
2616 ++ *
2617 ++ * This function can be obsoleted and the check can be removed, after
2618 ++ * the coredump code will hold the mmap_sem for writing before
2619 ++ * invoking the ->core_dump methods.
2620 ++ */
2621 ++static inline bool mmget_still_valid(struct mm_struct *mm)
2622 ++{
2623 ++ return likely(!mm->core_state);
2624 ++}
2625 ++
2626 + /**
2627 + * mmget() - Pin the address space associated with a &struct mm_struct.
2628 + * @mm: The address space to pin.
2629 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
2630 +index 1662cbc0b46b..b02bf737d019 100644
2631 +--- a/include/net/inet_frag.h
2632 ++++ b/include/net/inet_frag.h
2633 +@@ -77,8 +77,8 @@ struct inet_frag_queue {
2634 + struct timer_list timer;
2635 + spinlock_t lock;
2636 + refcount_t refcnt;
2637 +- struct sk_buff *fragments; /* Used in IPv6. */
2638 +- struct rb_root rb_fragments; /* Used in IPv4. */
2639 ++ struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
2640 ++ struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
2641 + struct sk_buff *fragments_tail;
2642 + struct sk_buff *last_run_head;
2643 + ktime_t stamp;
2644 +@@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
2645 +
2646 + extern const u8 ip_frag_ecn_table[16];
2647 +
2648 ++/* Return values of inet_frag_queue_insert() */
2649 ++#define IPFRAG_OK 0
2650 ++#define IPFRAG_DUP 1
2651 ++#define IPFRAG_OVERLAP 2
2652 ++int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
2653 ++ int offset, int end);
2654 ++void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
2655 ++ struct sk_buff *parent);
2656 ++void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
2657 ++ void *reasm_data);
2658 ++struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
2659 ++
2660 + #endif
2661 +diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
2662 +index 6ced1e6899b6..28aa9b30aece 100644
2663 +--- a/include/net/ipv6_frag.h
2664 ++++ b/include/net/ipv6_frag.h
2665 +@@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
2666 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
2667 +
2668 + /* Don't send error if the first segment did not arrive. */
2669 +- head = fq->q.fragments;
2670 +- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
2671 ++ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
2672 ++ goto out;
2673 ++
2674 ++ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
2675 ++ * pull the head out of the tree in order to be able to
2676 ++ * deal with head->dev.
2677 ++ */
2678 ++ head = inet_frag_pull_head(&fq->q);
2679 ++ if (!head)
2680 + goto out;
2681 +
2682 + head->dev = dev;
2683 +diff --git a/include/net/tls.h b/include/net/tls.h
2684 +index 0a769cf2f5f3..c423b7d0b6ab 100644
2685 +--- a/include/net/tls.h
2686 ++++ b/include/net/tls.h
2687 +@@ -317,7 +317,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
2688 + static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
2689 + {
2690 + #ifdef CONFIG_SOCK_VALIDATE_XMIT
2691 +- return sk_fullsock(sk) &
2692 ++ return sk_fullsock(sk) &&
2693 + (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
2694 + &tls_validate_xmit_skb);
2695 + #else
2696 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2697 +index 4344381664cc..29ff6635d259 100644
2698 +--- a/kernel/kprobes.c
2699 ++++ b/kernel/kprobes.c
2700 +@@ -703,7 +703,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
2701 + static int reuse_unused_kprobe(struct kprobe *ap)
2702 + {
2703 + struct optimized_kprobe *op;
2704 +- int ret;
2705 +
2706 + BUG_ON(!kprobe_unused(ap));
2707 + /*
2708 +@@ -715,9 +714,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
2709 + /* Enable the probe again */
2710 + ap->flags &= ~KPROBE_FLAG_DISABLED;
2711 + /* Optimize it again (remove from op->list) */
2712 +- ret = kprobe_optready(ap);
2713 +- if (ret)
2714 +- return ret;
2715 ++ if (!kprobe_optready(ap))
2716 ++ return -EINVAL;
2717 +
2718 + optimize_kprobe(ap);
2719 + return 0;
2720 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
2721 +index 0cbdbbb0729f..26b57e24476f 100644
2722 +--- a/kernel/locking/lockdep.c
2723 ++++ b/kernel/locking/lockdep.c
2724 +@@ -3567,9 +3567,6 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2725 + unsigned int depth;
2726 + int i;
2727 +
2728 +- if (unlikely(!debug_locks))
2729 +- return 0;
2730 +-
2731 + depth = curr->lockdep_depth;
2732 + /*
2733 + * This function is about (re)setting the class of a held lock,
2734 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2735 +index 640094391169..4aa8e7d90c25 100644
2736 +--- a/kernel/sched/fair.c
2737 ++++ b/kernel/sched/fair.c
2738 +@@ -4847,12 +4847,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2739 + return HRTIMER_NORESTART;
2740 + }
2741 +
2742 ++extern const u64 max_cfs_quota_period;
2743 ++
2744 + static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2745 + {
2746 + struct cfs_bandwidth *cfs_b =
2747 + container_of(timer, struct cfs_bandwidth, period_timer);
2748 + int overrun;
2749 + int idle = 0;
2750 ++ int count = 0;
2751 +
2752 + raw_spin_lock(&cfs_b->lock);
2753 + for (;;) {
2754 +@@ -4860,6 +4863,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2755 + if (!overrun)
2756 + break;
2757 +
2758 ++ if (++count > 3) {
2759 ++ u64 new, old = ktime_to_ns(cfs_b->period);
2760 ++
2761 ++ new = (old * 147) / 128; /* ~115% */
2762 ++ new = min(new, max_cfs_quota_period);
2763 ++
2764 ++ cfs_b->period = ns_to_ktime(new);
2765 ++
2766 ++ /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
2767 ++ cfs_b->quota *= new;
2768 ++ cfs_b->quota = div64_u64(cfs_b->quota, old);
2769 ++
2770 ++ pr_warn_ratelimited(
2771 ++ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
2772 ++ smp_processor_id(),
2773 ++ div_u64(new, NSEC_PER_USEC),
2774 ++ div_u64(cfs_b->quota, NSEC_PER_USEC));
2775 ++
2776 ++ /* reset count so we don't come right back in here */
2777 ++ count = 0;
2778 ++ }
2779 ++
2780 + idle = do_sched_cfs_period_timer(cfs_b, overrun);
2781 + }
2782 + if (idle)
2783 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2784 +index 9e22660153ff..9a85c7ae7362 100644
2785 +--- a/kernel/sysctl.c
2786 ++++ b/kernel/sysctl.c
2787 +@@ -125,6 +125,7 @@ static int zero;
2788 + static int __maybe_unused one = 1;
2789 + static int __maybe_unused two = 2;
2790 + static int __maybe_unused four = 4;
2791 ++static unsigned long zero_ul;
2792 + static unsigned long one_ul = 1;
2793 + static unsigned long long_max = LONG_MAX;
2794 + static int one_hundred = 100;
2795 +@@ -1696,7 +1697,7 @@ static struct ctl_table fs_table[] = {
2796 + .maxlen = sizeof(files_stat.max_files),
2797 + .mode = 0644,
2798 + .proc_handler = proc_doulongvec_minmax,
2799 +- .extra1 = &zero,
2800 ++ .extra1 = &zero_ul,
2801 + .extra2 = &long_max,
2802 + },
2803 + {
2804 +diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
2805 +index cbc72c2c1fca..78eb05aa8003 100644
2806 +--- a/kernel/time/sched_clock.c
2807 ++++ b/kernel/time/sched_clock.c
2808 +@@ -275,7 +275,7 @@ static u64 notrace suspended_sched_clock_read(void)
2809 + return cd.read_data[seq & 1].epoch_cyc;
2810 + }
2811 +
2812 +-static int sched_clock_suspend(void)
2813 ++int sched_clock_suspend(void)
2814 + {
2815 + struct clock_read_data *rd = &cd.read_data[0];
2816 +
2817 +@@ -286,7 +286,7 @@ static int sched_clock_suspend(void)
2818 + return 0;
2819 + }
2820 +
2821 +-static void sched_clock_resume(void)
2822 ++void sched_clock_resume(void)
2823 + {
2824 + struct clock_read_data *rd = &cd.read_data[0];
2825 +
2826 +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
2827 +index 14de3727b18e..a02e0f6b287c 100644
2828 +--- a/kernel/time/tick-common.c
2829 ++++ b/kernel/time/tick-common.c
2830 +@@ -491,6 +491,7 @@ void tick_freeze(void)
2831 + trace_suspend_resume(TPS("timekeeping_freeze"),
2832 + smp_processor_id(), true);
2833 + system_state = SYSTEM_SUSPEND;
2834 ++ sched_clock_suspend();
2835 + timekeeping_suspend();
2836 + } else {
2837 + tick_suspend_local();
2838 +@@ -514,6 +515,7 @@ void tick_unfreeze(void)
2839 +
2840 + if (tick_freeze_depth == num_online_cpus()) {
2841 + timekeeping_resume();
2842 ++ sched_clock_resume();
2843 + system_state = SYSTEM_RUNNING;
2844 + trace_suspend_resume(TPS("timekeeping_freeze"),
2845 + smp_processor_id(), false);
2846 +diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
2847 +index 7a9b4eb7a1d5..141ab3ab0354 100644
2848 +--- a/kernel/time/timekeeping.h
2849 ++++ b/kernel/time/timekeeping.h
2850 +@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
2851 + extern void timekeeping_warp_clock(void);
2852 + extern int timekeeping_suspend(void);
2853 + extern void timekeeping_resume(void);
2854 ++#ifdef CONFIG_GENERIC_SCHED_CLOCK
2855 ++extern int sched_clock_suspend(void);
2856 ++extern void sched_clock_resume(void);
2857 ++#else
2858 ++static inline int sched_clock_suspend(void) { return 0; }
2859 ++static inline void sched_clock_resume(void) { }
2860 ++#endif
2861 +
2862 + extern void do_timer(unsigned long ticks);
2863 + extern void update_wall_time(void);
2864 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2865 +index e23eb9fc77aa..1688782f3dfb 100644
2866 +--- a/kernel/trace/ftrace.c
2867 ++++ b/kernel/trace/ftrace.c
2868 +@@ -34,6 +34,7 @@
2869 + #include <linux/list.h>
2870 + #include <linux/hash.h>
2871 + #include <linux/rcupdate.h>
2872 ++#include <linux/kprobes.h>
2873 +
2874 + #include <trace/events/sched.h>
2875 +
2876 +@@ -6250,7 +6251,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
2877 + tr->ops->func = ftrace_stub;
2878 + }
2879 +
2880 +-static inline void
2881 ++static nokprobe_inline void
2882 + __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
2883 + struct ftrace_ops *ignored, struct pt_regs *regs)
2884 + {
2885 +@@ -6310,11 +6311,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
2886 + {
2887 + __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
2888 + }
2889 ++NOKPROBE_SYMBOL(ftrace_ops_list_func);
2890 + #else
2891 + static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
2892 + {
2893 + __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2894 + }
2895 ++NOKPROBE_SYMBOL(ftrace_ops_no_ops);
2896 + #endif
2897 +
2898 + /*
2899 +@@ -6341,6 +6344,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
2900 + preempt_enable_notrace();
2901 + trace_clear_recursion(bit);
2902 + }
2903 ++NOKPROBE_SYMBOL(ftrace_ops_assist_func);
2904 +
2905 + /**
2906 + * ftrace_ops_get_func - get the function a trampoline should call
2907 +diff --git a/mm/mmap.c b/mm/mmap.c
2908 +index 43507f7e66b4..1480880ff814 100644
2909 +--- a/mm/mmap.c
2910 ++++ b/mm/mmap.c
2911 +@@ -45,6 +45,7 @@
2912 + #include <linux/moduleparam.h>
2913 + #include <linux/pkeys.h>
2914 + #include <linux/oom.h>
2915 ++#include <linux/sched/mm.h>
2916 +
2917 + #include <linux/uaccess.h>
2918 + #include <asm/cacheflush.h>
2919 +@@ -2491,7 +2492,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2920 + vma = find_vma_prev(mm, addr, &prev);
2921 + if (vma && (vma->vm_start <= addr))
2922 + return vma;
2923 +- if (!prev || expand_stack(prev, addr))
2924 ++ /* don't alter vm_end if the coredump is running */
2925 ++ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
2926 + return NULL;
2927 + if (prev->vm_flags & VM_LOCKED)
2928 + populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2929 +@@ -2517,6 +2519,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2930 + return vma;
2931 + if (!(vma->vm_flags & VM_GROWSDOWN))
2932 + return NULL;
2933 ++ /* don't alter vm_start if the coredump is running */
2934 ++ if (!mmget_still_valid(mm))
2935 ++ return NULL;
2936 + start = vma->vm_start;
2937 + if (expand_stack(vma, addr))
2938 + return NULL;
2939 +diff --git a/mm/percpu.c b/mm/percpu.c
2940 +index 4b90682623e9..41e58f3d8fbf 100644
2941 +--- a/mm/percpu.c
2942 ++++ b/mm/percpu.c
2943 +@@ -2529,8 +2529,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2944 + ai->groups[group].base_offset = areas[group] - base;
2945 + }
2946 +
2947 +- pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2948 +- PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2949 ++ pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2950 ++ PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2951 + ai->dyn_size, ai->unit_size);
2952 +
2953 + rc = pcpu_setup_first_chunk(ai, base);
2954 +@@ -2651,8 +2651,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
2955 + }
2956 +
2957 + /* we're ready, commit */
2958 +- pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2959 +- unit_pages, psize_str, vm.addr, ai->static_size,
2960 ++ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2961 ++ unit_pages, psize_str, ai->static_size,
2962 + ai->reserved_size, ai->dyn_size);
2963 +
2964 + rc = pcpu_setup_first_chunk(ai, vm.addr);
2965 +diff --git a/mm/vmstat.c b/mm/vmstat.c
2966 +index 2878dc4e9af6..4a387937f9f5 100644
2967 +--- a/mm/vmstat.c
2968 ++++ b/mm/vmstat.c
2969 +@@ -1272,13 +1272,8 @@ const char * const vmstat_text[] = {
2970 + #endif
2971 + #endif /* CONFIG_MEMORY_BALLOON */
2972 + #ifdef CONFIG_DEBUG_TLBFLUSH
2973 +-#ifdef CONFIG_SMP
2974 + "nr_tlb_remote_flush",
2975 + "nr_tlb_remote_flush_received",
2976 +-#else
2977 +- "", /* nr_tlb_remote_flush */
2978 +- "", /* nr_tlb_remote_flush_received */
2979 +-#endif /* CONFIG_SMP */
2980 + "nr_tlb_local_flush_all",
2981 + "nr_tlb_local_flush_one",
2982 + #endif /* CONFIG_DEBUG_TLBFLUSH */
2983 +diff --git a/net/atm/lec.c b/net/atm/lec.c
2984 +index d7f5cf5b7594..ad4f829193f0 100644
2985 +--- a/net/atm/lec.c
2986 ++++ b/net/atm/lec.c
2987 +@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
2988 +
2989 + static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
2990 + {
2991 +- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
2992 ++ if (arg < 0 || arg >= MAX_LEC_ITF)
2993 ++ return -EINVAL;
2994 ++ arg = array_index_nospec(arg, MAX_LEC_ITF);
2995 ++ if (!dev_lec[arg])
2996 + return -EINVAL;
2997 + vcc->proto_data = dev_lec[arg];
2998 + return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
2999 +@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
3000 + i = arg;
3001 + if (arg >= MAX_LEC_ITF)
3002 + return -EINVAL;
3003 ++ i = array_index_nospec(arg, MAX_LEC_ITF);
3004 + if (!dev_lec[i]) {
3005 + int size;
3006 +
3007 +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
3008 +index 72074276c088..fed0ff446abb 100644
3009 +--- a/net/bridge/br_input.c
3010 ++++ b/net/bridge/br_input.c
3011 +@@ -195,13 +195,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
3012 + /* note: already called with rcu_read_lock */
3013 + static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
3014 + {
3015 +- struct net_bridge_port *p = br_port_get_rcu(skb->dev);
3016 +-
3017 + __br_handle_local_finish(skb);
3018 +
3019 +- BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
3020 +- br_pass_frame_up(skb);
3021 +- return 0;
3022 ++ /* return 1 to signal the okfn() was called so it's ok to use the skb */
3023 ++ return 1;
3024 + }
3025 +
3026 + /*
3027 +@@ -278,10 +275,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
3028 + goto forward;
3029 + }
3030 +
3031 +- /* Deliver packet to local host only */
3032 +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
3033 +- NULL, skb, skb->dev, NULL, br_handle_local_finish);
3034 +- return RX_HANDLER_CONSUMED;
3035 ++ /* The else clause should be hit when nf_hook():
3036 ++ * - returns < 0 (drop/error)
3037 ++ * - returns = 0 (stolen/nf_queue)
3038 ++ * Thus return 1 from the okfn() to signal the skb is ok to pass
3039 ++ */
3040 ++ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
3041 ++ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
3042 ++ br_handle_local_finish) == 1) {
3043 ++ return RX_HANDLER_PASS;
3044 ++ } else {
3045 ++ return RX_HANDLER_CONSUMED;
3046 ++ }
3047 + }
3048 +
3049 + forward:
3050 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3051 +index 20ed7adcf1cc..75901c4641b1 100644
3052 +--- a/net/bridge/br_multicast.c
3053 ++++ b/net/bridge/br_multicast.c
3054 +@@ -2152,7 +2152,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
3055 +
3056 + __br_multicast_open(br, query);
3057 +
3058 +- list_for_each_entry(port, &br->port_list, list) {
3059 ++ rcu_read_lock();
3060 ++ list_for_each_entry_rcu(port, &br->port_list, list) {
3061 + if (port->state == BR_STATE_DISABLED ||
3062 + port->state == BR_STATE_BLOCKING)
3063 + continue;
3064 +@@ -2164,6 +2165,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
3065 + br_multicast_enable(&port->ip6_own_query);
3066 + #endif
3067 + }
3068 ++ rcu_read_unlock();
3069 + }
3070 +
3071 + int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3072 +diff --git a/net/core/dev.c b/net/core/dev.c
3073 +index d47554307a6d..3bcec116a5f2 100644
3074 +--- a/net/core/dev.c
3075 ++++ b/net/core/dev.c
3076 +@@ -1180,7 +1180,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
3077 + BUG_ON(!dev_net(dev));
3078 +
3079 + net = dev_net(dev);
3080 +- if (dev->flags & IFF_UP)
3081 ++
3082 ++ /* Some auto-enslaved devices e.g. failover slaves are
3083 ++ * special, as userspace might rename the device after
3084 ++ * the interface had been brought up and running since
3085 ++ * the point kernel initiated auto-enslavement. Allow
3086 ++ * live name change even when these slave devices are
3087 ++ * up and running.
3088 ++ *
3089 ++ * Typically, users of these auto-enslaving devices
3090 ++ * don't actually care about slave name change, as
3091 ++ * they are supposed to operate on master interface
3092 ++ * directly.
3093 ++ */
3094 ++ if (dev->flags & IFF_UP &&
3095 ++ likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
3096 + return -EBUSY;
3097 +
3098 + write_seqcount_begin(&devnet_rename_seq);
3099 +diff --git a/net/core/failover.c b/net/core/failover.c
3100 +index 4a92a98ccce9..b5cd3c727285 100644
3101 +--- a/net/core/failover.c
3102 ++++ b/net/core/failover.c
3103 +@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
3104 + goto err_upper_link;
3105 + }
3106 +
3107 +- slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
3108 ++ slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
3109 +
3110 + if (fops && fops->slave_register &&
3111 + !fops->slave_register(slave_dev, failover_dev))
3112 + return NOTIFY_OK;
3113 +
3114 + netdev_upper_dev_unlink(slave_dev, failover_dev);
3115 +- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
3116 ++ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
3117 + err_upper_link:
3118 + netdev_rx_handler_unregister(slave_dev);
3119 + done:
3120 +@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
3121 +
3122 + netdev_rx_handler_unregister(slave_dev);
3123 + netdev_upper_dev_unlink(slave_dev, failover_dev);
3124 +- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
3125 ++ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
3126 +
3127 + if (fops && fops->slave_unregister &&
3128 + !fops->slave_unregister(slave_dev, failover_dev))
3129 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3130 +index ceee28e184af..8b5768113acd 100644
3131 +--- a/net/core/skbuff.c
3132 ++++ b/net/core/skbuff.c
3133 +@@ -5071,7 +5071,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
3134 +
3135 + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
3136 + {
3137 +- int mac_len;
3138 ++ int mac_len, meta_len;
3139 ++ void *meta;
3140 +
3141 + if (skb_cow(skb, skb_headroom(skb)) < 0) {
3142 + kfree_skb(skb);
3143 +@@ -5083,6 +5084,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
3144 + memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
3145 + mac_len - VLAN_HLEN - ETH_TLEN);
3146 + }
3147 ++
3148 ++ meta_len = skb_metadata_len(skb);
3149 ++ if (meta_len) {
3150 ++ meta = skb_metadata_end(skb) - meta_len;
3151 ++ memmove(meta + VLAN_HLEN, meta, meta_len);
3152 ++ }
3153 ++
3154 + skb->mac_header += VLAN_HLEN;
3155 + return skb;
3156 + }
3157 +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
3158 +index 500a59906b87..854ff1e4c41f 100644
3159 +--- a/net/ipv4/fou.c
3160 ++++ b/net/ipv4/fou.c
3161 +@@ -120,6 +120,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
3162 + struct guehdr *guehdr;
3163 + void *data;
3164 + u16 doffset = 0;
3165 ++ u8 proto_ctype;
3166 +
3167 + if (!fou)
3168 + return 1;
3169 +@@ -211,13 +212,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
3170 + if (unlikely(guehdr->control))
3171 + return gue_control_message(skb, guehdr);
3172 +
3173 ++ proto_ctype = guehdr->proto_ctype;
3174 + __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
3175 + skb_reset_transport_header(skb);
3176 +
3177 + if (iptunnel_pull_offloads(skb))
3178 + goto drop;
3179 +
3180 +- return -guehdr->proto_ctype;
3181 ++ return -proto_ctype;
3182 +
3183 + drop:
3184 + kfree_skb(skb);
3185 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3186 +index 760a9e52e02b..9f69411251d0 100644
3187 +--- a/net/ipv4/inet_fragment.c
3188 ++++ b/net/ipv4/inet_fragment.c
3189 +@@ -25,6 +25,62 @@
3190 + #include <net/sock.h>
3191 + #include <net/inet_frag.h>
3192 + #include <net/inet_ecn.h>
3193 ++#include <net/ip.h>
3194 ++#include <net/ipv6.h>
3195 ++
3196 ++/* Use skb->cb to track consecutive/adjacent fragments coming at
3197 ++ * the end of the queue. Nodes in the rb-tree queue will
3198 ++ * contain "runs" of one or more adjacent fragments.
3199 ++ *
3200 ++ * Invariants:
3201 ++ * - next_frag is NULL at the tail of a "run";
3202 ++ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
3203 ++ */
3204 ++struct ipfrag_skb_cb {
3205 ++ union {
3206 ++ struct inet_skb_parm h4;
3207 ++ struct inet6_skb_parm h6;
3208 ++ };
3209 ++ struct sk_buff *next_frag;
3210 ++ int frag_run_len;
3211 ++};
3212 ++
3213 ++#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
3214 ++
3215 ++static void fragcb_clear(struct sk_buff *skb)
3216 ++{
3217 ++ RB_CLEAR_NODE(&skb->rbnode);
3218 ++ FRAG_CB(skb)->next_frag = NULL;
3219 ++ FRAG_CB(skb)->frag_run_len = skb->len;
3220 ++}
3221 ++
3222 ++/* Append skb to the last "run". */
3223 ++static void fragrun_append_to_last(struct inet_frag_queue *q,
3224 ++ struct sk_buff *skb)
3225 ++{
3226 ++ fragcb_clear(skb);
3227 ++
3228 ++ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
3229 ++ FRAG_CB(q->fragments_tail)->next_frag = skb;
3230 ++ q->fragments_tail = skb;
3231 ++}
3232 ++
3233 ++/* Create a new "run" with the skb. */
3234 ++static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
3235 ++{
3236 ++ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
3237 ++ fragcb_clear(skb);
3238 ++
3239 ++ if (q->last_run_head)
3240 ++ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
3241 ++ &q->last_run_head->rbnode.rb_right);
3242 ++ else
3243 ++ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
3244 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
3245 ++
3246 ++ q->fragments_tail = skb;
3247 ++ q->last_run_head = skb;
3248 ++}
3249 +
3250 + /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
3251 + * Value : 0xff if frame should be dropped.
3252 +@@ -123,6 +179,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
3253 + kmem_cache_free(f->frags_cachep, q);
3254 + }
3255 +
3256 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root)
3257 ++{
3258 ++ struct rb_node *p = rb_first(root);
3259 ++ unsigned int sum = 0;
3260 ++
3261 ++ while (p) {
3262 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3263 ++
3264 ++ p = rb_next(p);
3265 ++ rb_erase(&skb->rbnode, root);
3266 ++ while (skb) {
3267 ++ struct sk_buff *next = FRAG_CB(skb)->next_frag;
3268 ++
3269 ++ sum += skb->truesize;
3270 ++ kfree_skb(skb);
3271 ++ skb = next;
3272 ++ }
3273 ++ }
3274 ++ return sum;
3275 ++}
3276 ++EXPORT_SYMBOL(inet_frag_rbtree_purge);
3277 ++
3278 + void inet_frag_destroy(struct inet_frag_queue *q)
3279 + {
3280 + struct sk_buff *fp;
3281 +@@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
3282 + return fq;
3283 + }
3284 + EXPORT_SYMBOL(inet_frag_find);
3285 ++
3286 ++int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
3287 ++ int offset, int end)
3288 ++{
3289 ++ struct sk_buff *last = q->fragments_tail;
3290 ++
3291 ++ /* RFC5722, Section 4, amended by Errata ID : 3089
3292 ++ * When reassembling an IPv6 datagram, if
3293 ++ * one or more its constituent fragments is determined to be an
3294 ++ * overlapping fragment, the entire datagram (and any constituent
3295 ++ * fragments) MUST be silently discarded.
3296 ++ *
3297 ++ * Duplicates, however, should be ignored (i.e. skb dropped, but the
3298 ++ * queue/fragments kept for later reassembly).
3299 ++ */
3300 ++ if (!last)
3301 ++ fragrun_create(q, skb); /* First fragment. */
3302 ++ else if (last->ip_defrag_offset + last->len < end) {
3303 ++ /* This is the common case: skb goes to the end. */
3304 ++ /* Detect and discard overlaps. */
3305 ++ if (offset < last->ip_defrag_offset + last->len)
3306 ++ return IPFRAG_OVERLAP;
3307 ++ if (offset == last->ip_defrag_offset + last->len)
3308 ++ fragrun_append_to_last(q, skb);
3309 ++ else
3310 ++ fragrun_create(q, skb);
3311 ++ } else {
3312 ++ /* Binary search. Note that skb can become the first fragment,
3313 ++ * but not the last (covered above).
3314 ++ */
3315 ++ struct rb_node **rbn, *parent;
3316 ++
3317 ++ rbn = &q->rb_fragments.rb_node;
3318 ++ do {
3319 ++ struct sk_buff *curr;
3320 ++ int curr_run_end;
3321 ++
3322 ++ parent = *rbn;
3323 ++ curr = rb_to_skb(parent);
3324 ++ curr_run_end = curr->ip_defrag_offset +
3325 ++ FRAG_CB(curr)->frag_run_len;
3326 ++ if (end <= curr->ip_defrag_offset)
3327 ++ rbn = &parent->rb_left;
3328 ++ else if (offset >= curr_run_end)
3329 ++ rbn = &parent->rb_right;
3330 ++ else if (offset >= curr->ip_defrag_offset &&
3331 ++ end <= curr_run_end)
3332 ++ return IPFRAG_DUP;
3333 ++ else
3334 ++ return IPFRAG_OVERLAP;
3335 ++ } while (*rbn);
3336 ++ /* Here we have parent properly set, and rbn pointing to
3337 ++ * one of its NULL left/right children. Insert skb.
3338 ++ */
3339 ++ fragcb_clear(skb);
3340 ++ rb_link_node(&skb->rbnode, parent, rbn);
3341 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
3342 ++ }
3343 ++
3344 ++ skb->ip_defrag_offset = offset;
3345 ++
3346 ++ return IPFRAG_OK;
3347 ++}
3348 ++EXPORT_SYMBOL(inet_frag_queue_insert);
3349 ++
3350 ++void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
3351 ++ struct sk_buff *parent)
3352 ++{
3353 ++ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
3354 ++ struct sk_buff **nextp;
3355 ++ int delta;
3356 ++
3357 ++ if (head != skb) {
3358 ++ fp = skb_clone(skb, GFP_ATOMIC);
3359 ++ if (!fp)
3360 ++ return NULL;
3361 ++ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
3362 ++ if (RB_EMPTY_NODE(&skb->rbnode))
3363 ++ FRAG_CB(parent)->next_frag = fp;
3364 ++ else
3365 ++ rb_replace_node(&skb->rbnode, &fp->rbnode,
3366 ++ &q->rb_fragments);
3367 ++ if (q->fragments_tail == skb)
3368 ++ q->fragments_tail = fp;
3369 ++ skb_morph(skb, head);
3370 ++ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
3371 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
3372 ++ &q->rb_fragments);
3373 ++ consume_skb(head);
3374 ++ head = skb;
3375 ++ }
3376 ++ WARN_ON(head->ip_defrag_offset != 0);
3377 ++
3378 ++ delta = -head->truesize;
3379 ++
3380 ++ /* Head of list must not be cloned. */
3381 ++ if (skb_unclone(head, GFP_ATOMIC))
3382 ++ return NULL;
3383 ++
3384 ++ delta += head->truesize;
3385 ++ if (delta)
3386 ++ add_frag_mem_limit(q->net, delta);
3387 ++
3388 ++ /* If the first fragment is fragmented itself, we split
3389 ++ * it to two chunks: the first with data and paged part
3390 ++ * and the second, holding only fragments.
3391 ++ */
3392 ++ if (skb_has_frag_list(head)) {
3393 ++ struct sk_buff *clone;
3394 ++ int i, plen = 0;
3395 ++
3396 ++ clone = alloc_skb(0, GFP_ATOMIC);
3397 ++ if (!clone)
3398 ++ return NULL;
3399 ++ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
3400 ++ skb_frag_list_init(head);
3401 ++ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
3402 ++ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
3403 ++ clone->data_len = head->data_len - plen;
3404 ++ clone->len = clone->data_len;
3405 ++ head->truesize += clone->truesize;
3406 ++ clone->csum = 0;
3407 ++ clone->ip_summed = head->ip_summed;
3408 ++ add_frag_mem_limit(q->net, clone->truesize);
3409 ++ skb_shinfo(head)->frag_list = clone;
3410 ++ nextp = &clone->next;
3411 ++ } else {
3412 ++ nextp = &skb_shinfo(head)->frag_list;
3413 ++ }
3414 ++
3415 ++ return nextp;
3416 ++}
3417 ++EXPORT_SYMBOL(inet_frag_reasm_prepare);
3418 ++
3419 ++void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
3420 ++ void *reasm_data)
3421 ++{
3422 ++ struct sk_buff **nextp = (struct sk_buff **)reasm_data;
3423 ++ struct rb_node *rbn;
3424 ++ struct sk_buff *fp;
3425 ++
3426 ++ skb_push(head, head->data - skb_network_header(head));
3427 ++
3428 ++ /* Traverse the tree in order, to build frag_list. */
3429 ++ fp = FRAG_CB(head)->next_frag;
3430 ++ rbn = rb_next(&head->rbnode);
3431 ++ rb_erase(&head->rbnode, &q->rb_fragments);
3432 ++ while (rbn || fp) {
3433 ++ /* fp points to the next sk_buff in the current run;
3434 ++ * rbn points to the next run.
3435 ++ */
3436 ++ /* Go through the current run. */
3437 ++ while (fp) {
3438 ++ *nextp = fp;
3439 ++ nextp = &fp->next;
3440 ++ fp->prev = NULL;
3441 ++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
3442 ++ fp->sk = NULL;
3443 ++ head->data_len += fp->len;
3444 ++ head->len += fp->len;
3445 ++ if (head->ip_summed != fp->ip_summed)
3446 ++ head->ip_summed = CHECKSUM_NONE;
3447 ++ else if (head->ip_summed == CHECKSUM_COMPLETE)
3448 ++ head->csum = csum_add(head->csum, fp->csum);
3449 ++ head->truesize += fp->truesize;
3450 ++ fp = FRAG_CB(fp)->next_frag;
3451 ++ }
3452 ++ /* Move to the next run. */
3453 ++ if (rbn) {
3454 ++ struct rb_node *rbnext = rb_next(rbn);
3455 ++
3456 ++ fp = rb_to_skb(rbn);
3457 ++ rb_erase(rbn, &q->rb_fragments);
3458 ++ rbn = rbnext;
3459 ++ }
3460 ++ }
3461 ++ sub_frag_mem_limit(q->net, head->truesize);
3462 ++
3463 ++ *nextp = NULL;
3464 ++ skb_mark_not_on_list(head);
3465 ++ head->prev = NULL;
3466 ++ head->tstamp = q->stamp;
3467 ++}
3468 ++EXPORT_SYMBOL(inet_frag_reasm_finish);
3469 ++
3470 ++struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
3471 ++{
3472 ++ struct sk_buff *head;
3473 ++
3474 ++ if (q->fragments) {
3475 ++ head = q->fragments;
3476 ++ q->fragments = head->next;
3477 ++ } else {
3478 ++ struct sk_buff *skb;
3479 ++
3480 ++ head = skb_rb_first(&q->rb_fragments);
3481 ++ if (!head)
3482 ++ return NULL;
3483 ++ skb = FRAG_CB(head)->next_frag;
3484 ++ if (skb)
3485 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
3486 ++ &q->rb_fragments);
3487 ++ else
3488 ++ rb_erase(&head->rbnode, &q->rb_fragments);
3489 ++ memset(&head->rbnode, 0, sizeof(head->rbnode));
3490 ++ barrier();
3491 ++ }
3492 ++ if (head == q->fragments_tail)
3493 ++ q->fragments_tail = NULL;
3494 ++
3495 ++ sub_frag_mem_limit(q->net, head->truesize);
3496 ++
3497 ++ return head;
3498 ++}
3499 ++EXPORT_SYMBOL(inet_frag_pull_head);
3500 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
3501 +index d95b32af4a0e..5a1d39e32196 100644
3502 +--- a/net/ipv4/ip_fragment.c
3503 ++++ b/net/ipv4/ip_fragment.c
3504 +@@ -57,57 +57,6 @@
3505 + */
3506 + static const char ip_frag_cache_name[] = "ip4-frags";
3507 +
3508 +-/* Use skb->cb to track consecutive/adjacent fragments coming at
3509 +- * the end of the queue. Nodes in the rb-tree queue will
3510 +- * contain "runs" of one or more adjacent fragments.
3511 +- *
3512 +- * Invariants:
3513 +- * - next_frag is NULL at the tail of a "run";
3514 +- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
3515 +- */
3516 +-struct ipfrag_skb_cb {
3517 +- struct inet_skb_parm h;
3518 +- struct sk_buff *next_frag;
3519 +- int frag_run_len;
3520 +-};
3521 +-
3522 +-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
3523 +-
3524 +-static void ip4_frag_init_run(struct sk_buff *skb)
3525 +-{
3526 +- BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
3527 +-
3528 +- FRAG_CB(skb)->next_frag = NULL;
3529 +- FRAG_CB(skb)->frag_run_len = skb->len;
3530 +-}
3531 +-
3532 +-/* Append skb to the last "run". */
3533 +-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
3534 +- struct sk_buff *skb)
3535 +-{
3536 +- RB_CLEAR_NODE(&skb->rbnode);
3537 +- FRAG_CB(skb)->next_frag = NULL;
3538 +-
3539 +- FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
3540 +- FRAG_CB(q->fragments_tail)->next_frag = skb;
3541 +- q->fragments_tail = skb;
3542 +-}
3543 +-
3544 +-/* Create a new "run" with the skb. */
3545 +-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
3546 +-{
3547 +- if (q->last_run_head)
3548 +- rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
3549 +- &q->last_run_head->rbnode.rb_right);
3550 +- else
3551 +- rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
3552 +- rb_insert_color(&skb->rbnode, &q->rb_fragments);
3553 +-
3554 +- ip4_frag_init_run(skb);
3555 +- q->fragments_tail = skb;
3556 +- q->last_run_head = skb;
3557 +-}
3558 +-
3559 + /* Describe an entry in the "incomplete datagrams" queue. */
3560 + struct ipq {
3561 + struct inet_frag_queue q;
3562 +@@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
3563 + * pull the head out of the tree in order to be able to
3564 + * deal with head->dev.
3565 + */
3566 +- if (qp->q.fragments) {
3567 +- head = qp->q.fragments;
3568 +- qp->q.fragments = head->next;
3569 +- } else {
3570 +- head = skb_rb_first(&qp->q.rb_fragments);
3571 +- if (!head)
3572 +- goto out;
3573 +- if (FRAG_CB(head)->next_frag)
3574 +- rb_replace_node(&head->rbnode,
3575 +- &FRAG_CB(head)->next_frag->rbnode,
3576 +- &qp->q.rb_fragments);
3577 +- else
3578 +- rb_erase(&head->rbnode, &qp->q.rb_fragments);
3579 +- memset(&head->rbnode, 0, sizeof(head->rbnode));
3580 +- barrier();
3581 +- }
3582 +- if (head == qp->q.fragments_tail)
3583 +- qp->q.fragments_tail = NULL;
3584 +-
3585 +- sub_frag_mem_limit(qp->q.net, head->truesize);
3586 +-
3587 ++ head = inet_frag_pull_head(&qp->q);
3588 ++ if (!head)
3589 ++ goto out;
3590 + head->dev = dev_get_by_index_rcu(net, qp->iif);
3591 + if (!head->dev)
3592 + goto out;
3593 +@@ -345,12 +276,10 @@ static int ip_frag_reinit(struct ipq *qp)
3594 + static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3595 + {
3596 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
3597 +- struct rb_node **rbn, *parent;
3598 +- struct sk_buff *skb1, *prev_tail;
3599 +- int ihl, end, skb1_run_end;
3600 ++ int ihl, end, flags, offset;
3601 ++ struct sk_buff *prev_tail;
3602 + struct net_device *dev;
3603 + unsigned int fragsize;
3604 +- int flags, offset;
3605 + int err = -ENOENT;
3606 + u8 ecn;
3607 +
3608 +@@ -382,7 +311,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3609 + */
3610 + if (end < qp->q.len ||
3611 + ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
3612 +- goto err;
3613 ++ goto discard_qp;
3614 + qp->q.flags |= INET_FRAG_LAST_IN;
3615 + qp->q.len = end;
3616 + } else {
3617 +@@ -394,82 +323,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3618 + if (end > qp->q.len) {
3619 + /* Some bits beyond end -> corruption. */
3620 + if (qp->q.flags & INET_FRAG_LAST_IN)
3621 +- goto err;
3622 ++ goto discard_qp;
3623 + qp->q.len = end;
3624 + }
3625 + }
3626 + if (end == offset)
3627 +- goto err;
3628 ++ goto discard_qp;
3629 +
3630 + err = -ENOMEM;
3631 + if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
3632 +- goto err;
3633 ++ goto discard_qp;
3634 +
3635 + err = pskb_trim_rcsum(skb, end - offset);
3636 + if (err)
3637 +- goto err;
3638 ++ goto discard_qp;
3639 +
3640 + /* Note : skb->rbnode and skb->dev share the same location. */
3641 + dev = skb->dev;
3642 + /* Makes sure compiler wont do silly aliasing games */
3643 + barrier();
3644 +
3645 +- /* RFC5722, Section 4, amended by Errata ID : 3089
3646 +- * When reassembling an IPv6 datagram, if
3647 +- * one or more its constituent fragments is determined to be an
3648 +- * overlapping fragment, the entire datagram (and any constituent
3649 +- * fragments) MUST be silently discarded.
3650 +- *
3651 +- * We do the same here for IPv4 (and increment an snmp counter) but
3652 +- * we do not want to drop the whole queue in response to a duplicate
3653 +- * fragment.
3654 +- */
3655 +-
3656 +- err = -EINVAL;
3657 +- /* Find out where to put this fragment. */
3658 + prev_tail = qp->q.fragments_tail;
3659 +- if (!prev_tail)
3660 +- ip4_frag_create_run(&qp->q, skb); /* First fragment. */
3661 +- else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
3662 +- /* This is the common case: skb goes to the end. */
3663 +- /* Detect and discard overlaps. */
3664 +- if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
3665 +- goto discard_qp;
3666 +- if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
3667 +- ip4_frag_append_to_last_run(&qp->q, skb);
3668 +- else
3669 +- ip4_frag_create_run(&qp->q, skb);
3670 +- } else {
3671 +- /* Binary search. Note that skb can become the first fragment,
3672 +- * but not the last (covered above).
3673 +- */
3674 +- rbn = &qp->q.rb_fragments.rb_node;
3675 +- do {
3676 +- parent = *rbn;
3677 +- skb1 = rb_to_skb(parent);
3678 +- skb1_run_end = skb1->ip_defrag_offset +
3679 +- FRAG_CB(skb1)->frag_run_len;
3680 +- if (end <= skb1->ip_defrag_offset)
3681 +- rbn = &parent->rb_left;
3682 +- else if (offset >= skb1_run_end)
3683 +- rbn = &parent->rb_right;
3684 +- else if (offset >= skb1->ip_defrag_offset &&
3685 +- end <= skb1_run_end)
3686 +- goto err; /* No new data, potential duplicate */
3687 +- else
3688 +- goto discard_qp; /* Found an overlap */
3689 +- } while (*rbn);
3690 +- /* Here we have parent properly set, and rbn pointing to
3691 +- * one of its NULL left/right children. Insert skb.
3692 +- */
3693 +- ip4_frag_init_run(skb);
3694 +- rb_link_node(&skb->rbnode, parent, rbn);
3695 +- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
3696 +- }
3697 ++ err = inet_frag_queue_insert(&qp->q, skb, offset, end);
3698 ++ if (err)
3699 ++ goto insert_error;
3700 +
3701 + if (dev)
3702 + qp->iif = dev->ifindex;
3703 +- skb->ip_defrag_offset = offset;
3704 +
3705 + qp->q.stamp = skb->tstamp;
3706 + qp->q.meat += skb->len;
3707 +@@ -494,15 +374,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
3708 + skb->_skb_refdst = 0UL;
3709 + err = ip_frag_reasm(qp, skb, prev_tail, dev);
3710 + skb->_skb_refdst = orefdst;
3711 ++ if (err)
3712 ++ inet_frag_kill(&qp->q);
3713 + return err;
3714 + }
3715 +
3716 + skb_dst_drop(skb);
3717 + return -EINPROGRESS;
3718 +
3719 ++insert_error:
3720 ++ if (err == IPFRAG_DUP) {
3721 ++ kfree_skb(skb);
3722 ++ return -EINVAL;
3723 ++ }
3724 ++ err = -EINVAL;
3725 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
3726 + discard_qp:
3727 + inet_frag_kill(&qp->q);
3728 +- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
3729 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
3730 + err:
3731 + kfree_skb(skb);
3732 + return err;
3733 +@@ -514,13 +403,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3734 + {
3735 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
3736 + struct iphdr *iph;
3737 +- struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
3738 +- struct sk_buff **nextp; /* To build frag_list. */
3739 +- struct rb_node *rbn;
3740 +- int len;
3741 +- int ihlen;
3742 +- int delta;
3743 +- int err;
3744 ++ void *reasm_data;
3745 ++ int len, err;
3746 + u8 ecn;
3747 +
3748 + ipq_kill(qp);
3749 +@@ -530,117 +414,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3750 + err = -EINVAL;
3751 + goto out_fail;
3752 + }
3753 +- /* Make the one we just received the head. */
3754 +- if (head != skb) {
3755 +- fp = skb_clone(skb, GFP_ATOMIC);
3756 +- if (!fp)
3757 +- goto out_nomem;
3758 +- FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
3759 +- if (RB_EMPTY_NODE(&skb->rbnode))
3760 +- FRAG_CB(prev_tail)->next_frag = fp;
3761 +- else
3762 +- rb_replace_node(&skb->rbnode, &fp->rbnode,
3763 +- &qp->q.rb_fragments);
3764 +- if (qp->q.fragments_tail == skb)
3765 +- qp->q.fragments_tail = fp;
3766 +- skb_morph(skb, head);
3767 +- FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
3768 +- rb_replace_node(&head->rbnode, &skb->rbnode,
3769 +- &qp->q.rb_fragments);
3770 +- consume_skb(head);
3771 +- head = skb;
3772 +- }
3773 +
3774 +- WARN_ON(head->ip_defrag_offset != 0);
3775 +-
3776 +- /* Allocate a new buffer for the datagram. */
3777 +- ihlen = ip_hdrlen(head);
3778 +- len = ihlen + qp->q.len;
3779 ++ /* Make the one we just received the head. */
3780 ++ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
3781 ++ if (!reasm_data)
3782 ++ goto out_nomem;
3783 +
3784 ++ len = ip_hdrlen(skb) + qp->q.len;
3785 + err = -E2BIG;
3786 + if (len > 65535)
3787 + goto out_oversize;
3788 +
3789 +- delta = - head->truesize;
3790 +-
3791 +- /* Head of list must not be cloned. */
3792 +- if (skb_unclone(head, GFP_ATOMIC))
3793 +- goto out_nomem;
3794 +-
3795 +- delta += head->truesize;
3796 +- if (delta)
3797 +- add_frag_mem_limit(qp->q.net, delta);
3798 +-
3799 +- /* If the first fragment is fragmented itself, we split
3800 +- * it to two chunks: the first with data and paged part
3801 +- * and the second, holding only fragments. */
3802 +- if (skb_has_frag_list(head)) {
3803 +- struct sk_buff *clone;
3804 +- int i, plen = 0;
3805 +-
3806 +- clone = alloc_skb(0, GFP_ATOMIC);
3807 +- if (!clone)
3808 +- goto out_nomem;
3809 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
3810 +- skb_frag_list_init(head);
3811 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
3812 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
3813 +- clone->len = clone->data_len = head->data_len - plen;
3814 +- head->truesize += clone->truesize;
3815 +- clone->csum = 0;
3816 +- clone->ip_summed = head->ip_summed;
3817 +- add_frag_mem_limit(qp->q.net, clone->truesize);
3818 +- skb_shinfo(head)->frag_list = clone;
3819 +- nextp = &clone->next;
3820 +- } else {
3821 +- nextp = &skb_shinfo(head)->frag_list;
3822 +- }
3823 ++ inet_frag_reasm_finish(&qp->q, skb, reasm_data);
3824 +
3825 +- skb_push(head, head->data - skb_network_header(head));
3826 ++ skb->dev = dev;
3827 ++ IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
3828 +
3829 +- /* Traverse the tree in order, to build frag_list. */
3830 +- fp = FRAG_CB(head)->next_frag;
3831 +- rbn = rb_next(&head->rbnode);
3832 +- rb_erase(&head->rbnode, &qp->q.rb_fragments);
3833 +- while (rbn || fp) {
3834 +- /* fp points to the next sk_buff in the current run;
3835 +- * rbn points to the next run.
3836 +- */
3837 +- /* Go through the current run. */
3838 +- while (fp) {
3839 +- *nextp = fp;
3840 +- nextp = &fp->next;
3841 +- fp->prev = NULL;
3842 +- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
3843 +- fp->sk = NULL;
3844 +- head->data_len += fp->len;
3845 +- head->len += fp->len;
3846 +- if (head->ip_summed != fp->ip_summed)
3847 +- head->ip_summed = CHECKSUM_NONE;
3848 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
3849 +- head->csum = csum_add(head->csum, fp->csum);
3850 +- head->truesize += fp->truesize;
3851 +- fp = FRAG_CB(fp)->next_frag;
3852 +- }
3853 +- /* Move to the next run. */
3854 +- if (rbn) {
3855 +- struct rb_node *rbnext = rb_next(rbn);
3856 +-
3857 +- fp = rb_to_skb(rbn);
3858 +- rb_erase(rbn, &qp->q.rb_fragments);
3859 +- rbn = rbnext;
3860 +- }
3861 +- }
3862 +- sub_frag_mem_limit(qp->q.net, head->truesize);
3863 +-
3864 +- *nextp = NULL;
3865 +- head->next = NULL;
3866 +- head->prev = NULL;
3867 +- head->dev = dev;
3868 +- head->tstamp = qp->q.stamp;
3869 +- IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
3870 +-
3871 +- iph = ip_hdr(head);
3872 ++ iph = ip_hdr(skb);
3873 + iph->tot_len = htons(len);
3874 + iph->tos |= ecn;
3875 +
3876 +@@ -653,7 +443,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3877 + * from one very small df-fragment and one large non-df frag.
3878 + */
3879 + if (qp->max_df_size == qp->q.max_size) {
3880 +- IPCB(head)->flags |= IPSKB_FRAG_PMTU;
3881 ++ IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
3882 + iph->frag_off = htons(IP_DF);
3883 + } else {
3884 + iph->frag_off = 0;
3885 +@@ -751,28 +541,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
3886 + }
3887 + EXPORT_SYMBOL(ip_check_defrag);
3888 +
3889 +-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
3890 +-{
3891 +- struct rb_node *p = rb_first(root);
3892 +- unsigned int sum = 0;
3893 +-
3894 +- while (p) {
3895 +- struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3896 +-
3897 +- p = rb_next(p);
3898 +- rb_erase(&skb->rbnode, root);
3899 +- while (skb) {
3900 +- struct sk_buff *next = FRAG_CB(skb)->next_frag;
3901 +-
3902 +- sum += skb->truesize;
3903 +- kfree_skb(skb);
3904 +- skb = next;
3905 +- }
3906 +- }
3907 +- return sum;
3908 +-}
3909 +-EXPORT_SYMBOL(inet_frag_rbtree_purge);
3910 +-
3911 + #ifdef CONFIG_SYSCTL
3912 + static int dist_min;
3913 +
3914 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3915 +index 7a556e459375..98c81c21b753 100644
3916 +--- a/net/ipv4/route.c
3917 ++++ b/net/ipv4/route.c
3918 +@@ -1187,9 +1187,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3919 +
3920 + static void ipv4_link_failure(struct sk_buff *skb)
3921 + {
3922 ++ struct ip_options opt;
3923 + struct rtable *rt;
3924 ++ int res;
3925 +
3926 +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
3927 ++ /* Recompile ip options since IPCB may not be valid anymore.
3928 ++ */
3929 ++ memset(&opt, 0, sizeof(opt));
3930 ++ opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
3931 ++
3932 ++ rcu_read_lock();
3933 ++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
3934 ++ rcu_read_unlock();
3935 ++
3936 ++ if (res)
3937 ++ return;
3938 ++
3939 ++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
3940 +
3941 + rt = skb_rtable(skb);
3942 + if (rt)
3943 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3944 +index 572f79abd393..cfdd70e32755 100644
3945 +--- a/net/ipv4/tcp_input.c
3946 ++++ b/net/ipv4/tcp_input.c
3947 +@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
3948 + static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
3949 + {
3950 + struct tcp_sock *tp = tcp_sk(sk);
3951 ++ int room;
3952 ++
3953 ++ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
3954 +
3955 + /* Check #1 */
3956 +- if (tp->rcv_ssthresh < tp->window_clamp &&
3957 +- (int)tp->rcv_ssthresh < tcp_space(sk) &&
3958 +- !tcp_under_memory_pressure(sk)) {
3959 ++ if (room > 0 && !tcp_under_memory_pressure(sk)) {
3960 + int incr;
3961 +
3962 + /* Check #2. Increase window, if skb with such overhead
3963 +@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
3964 +
3965 + if (incr) {
3966 + incr = max_t(int, incr, 2 * skb->len);
3967 +- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
3968 +- tp->window_clamp);
3969 ++ tp->rcv_ssthresh += min(room, incr);
3970 + inet_csk(sk)->icsk_ack.quick |= 1;
3971 + }
3972 + }
3973 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3974 +index 043ed8eb0ab9..cb1b4772dac0 100644
3975 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3976 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3977 +@@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
3978 + }
3979 + #endif
3980 +
3981 ++static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
3982 ++ struct sk_buff *prev_tail, struct net_device *dev);
3983 ++
3984 + static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
3985 + {
3986 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
3987 +@@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
3988 + static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
3989 + const struct frag_hdr *fhdr, int nhoff)
3990 + {
3991 +- struct sk_buff *prev, *next;
3992 + unsigned int payload_len;
3993 +- int offset, end;
3994 ++ struct net_device *dev;
3995 ++ struct sk_buff *prev;
3996 ++ int offset, end, err;
3997 + u8 ecn;
3998 +
3999 + if (fq->q.flags & INET_FRAG_COMPLETE) {
4000 +@@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
4001 + goto err;
4002 + }
4003 +
4004 +- /* Find out which fragments are in front and at the back of us
4005 +- * in the chain of fragments so far. We must know where to put
4006 +- * this fragment, right?
4007 +- */
4008 +- prev = fq->q.fragments_tail;
4009 +- if (!prev || prev->ip_defrag_offset < offset) {
4010 +- next = NULL;
4011 +- goto found;
4012 +- }
4013 +- prev = NULL;
4014 +- for (next = fq->q.fragments; next != NULL; next = next->next) {
4015 +- if (next->ip_defrag_offset >= offset)
4016 +- break; /* bingo! */
4017 +- prev = next;
4018 +- }
4019 +-
4020 +-found:
4021 +- /* RFC5722, Section 4:
4022 +- * When reassembling an IPv6 datagram, if
4023 +- * one or more its constituent fragments is determined to be an
4024 +- * overlapping fragment, the entire datagram (and any constituent
4025 +- * fragments, including those not yet received) MUST be silently
4026 +- * discarded.
4027 +- */
4028 +-
4029 +- /* Check for overlap with preceding fragment. */
4030 +- if (prev &&
4031 +- (prev->ip_defrag_offset + prev->len) > offset)
4032 +- goto discard_fq;
4033 +-
4034 +- /* Look for overlap with succeeding segment. */
4035 +- if (next && next->ip_defrag_offset < end)
4036 +- goto discard_fq;
4037 +-
4038 +- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
4039 +- if (skb->dev)
4040 +- fq->iif = skb->dev->ifindex;
4041 ++ /* Note : skb->rbnode and skb->dev share the same location. */
4042 ++ dev = skb->dev;
4043 + /* Makes sure compiler wont do silly aliasing games */
4044 + barrier();
4045 +- skb->ip_defrag_offset = offset;
4046 +
4047 +- /* Insert this fragment in the chain of fragments. */
4048 +- skb->next = next;
4049 +- if (!next)
4050 +- fq->q.fragments_tail = skb;
4051 +- if (prev)
4052 +- prev->next = skb;
4053 +- else
4054 +- fq->q.fragments = skb;
4055 ++ prev = fq->q.fragments_tail;
4056 ++ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
4057 ++ if (err)
4058 ++ goto insert_error;
4059 ++
4060 ++ if (dev)
4061 ++ fq->iif = dev->ifindex;
4062 +
4063 + fq->q.stamp = skb->tstamp;
4064 + fq->q.meat += skb->len;
4065 +@@ -319,11 +286,25 @@ found:
4066 + fq->q.flags |= INET_FRAG_FIRST_IN;
4067 + }
4068 +
4069 +- return 0;
4070 ++ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
4071 ++ fq->q.meat == fq->q.len) {
4072 ++ unsigned long orefdst = skb->_skb_refdst;
4073 ++
4074 ++ skb->_skb_refdst = 0UL;
4075 ++ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
4076 ++ skb->_skb_refdst = orefdst;
4077 ++ return err;
4078 ++ }
4079 ++
4080 ++ skb_dst_drop(skb);
4081 ++ return -EINPROGRESS;
4082 +
4083 +-discard_fq:
4084 ++insert_error:
4085 ++ if (err == IPFRAG_DUP)
4086 ++ goto err;
4087 + inet_frag_kill(&fq->q);
4088 + err:
4089 ++ skb_dst_drop(skb);
4090 + return -EINVAL;
4091 + }
4092 +
4093 +@@ -333,147 +314,67 @@ err:
4094 + * It is called with locked fq, and caller must check that
4095 + * queue is eligible for reassembly i.e. it is not COMPLETE,
4096 + * the last and the first frames arrived and all the bits are here.
4097 +- *
4098 +- * returns true if *prev skb has been transformed into the reassembled
4099 +- * skb, false otherwise.
4100 + */
4101 +-static bool
4102 +-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
4103 ++static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
4104 ++ struct sk_buff *prev_tail, struct net_device *dev)
4105 + {
4106 +- struct sk_buff *fp, *head = fq->q.fragments;
4107 +- int payload_len, delta;
4108 ++ void *reasm_data;
4109 ++ int payload_len;
4110 + u8 ecn;
4111 +
4112 + inet_frag_kill(&fq->q);
4113 +
4114 +- WARN_ON(head == NULL);
4115 +- WARN_ON(head->ip_defrag_offset != 0);
4116 +-
4117 + ecn = ip_frag_ecn_table[fq->ecn];
4118 + if (unlikely(ecn == 0xff))
4119 +- return false;
4120 ++ goto err;
4121 ++
4122 ++ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
4123 ++ if (!reasm_data)
4124 ++ goto err;
4125 +
4126 +- /* Unfragmented part is taken from the first segment. */
4127 +- payload_len = ((head->data - skb_network_header(head)) -
4128 ++ payload_len = ((skb->data - skb_network_header(skb)) -
4129 + sizeof(struct ipv6hdr) + fq->q.len -
4130 + sizeof(struct frag_hdr));
4131 + if (payload_len > IPV6_MAXPLEN) {
4132 + net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
4133 + payload_len);
4134 +- return false;
4135 +- }
4136 +-
4137 +- delta = - head->truesize;
4138 +-
4139 +- /* Head of list must not be cloned. */
4140 +- if (skb_unclone(head, GFP_ATOMIC))
4141 +- return false;
4142 +-
4143 +- delta += head->truesize;
4144 +- if (delta)
4145 +- add_frag_mem_limit(fq->q.net, delta);
4146 +-
4147 +- /* If the first fragment is fragmented itself, we split
4148 +- * it to two chunks: the first with data and paged part
4149 +- * and the second, holding only fragments. */
4150 +- if (skb_has_frag_list(head)) {
4151 +- struct sk_buff *clone;
4152 +- int i, plen = 0;
4153 +-
4154 +- clone = alloc_skb(0, GFP_ATOMIC);
4155 +- if (clone == NULL)
4156 +- return false;
4157 +-
4158 +- clone->next = head->next;
4159 +- head->next = clone;
4160 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
4161 +- skb_frag_list_init(head);
4162 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
4163 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
4164 +- clone->len = clone->data_len = head->data_len - plen;
4165 +- head->data_len -= clone->len;
4166 +- head->len -= clone->len;
4167 +- clone->csum = 0;
4168 +- clone->ip_summed = head->ip_summed;
4169 +-
4170 +- add_frag_mem_limit(fq->q.net, clone->truesize);
4171 +- }
4172 +-
4173 +- /* morph head into last received skb: prev.
4174 +- *
4175 +- * This allows callers of ipv6 conntrack defrag to continue
4176 +- * to use the last skb(frag) passed into the reasm engine.
4177 +- * The last skb frag 'silently' turns into the full reassembled skb.
4178 +- *
4179 +- * Since prev is also part of q->fragments we have to clone it first.
4180 +- */
4181 +- if (head != prev) {
4182 +- struct sk_buff *iter;
4183 +-
4184 +- fp = skb_clone(prev, GFP_ATOMIC);
4185 +- if (!fp)
4186 +- return false;
4187 +-
4188 +- fp->next = prev->next;
4189 +-
4190 +- iter = head;
4191 +- while (iter) {
4192 +- if (iter->next == prev) {
4193 +- iter->next = fp;
4194 +- break;
4195 +- }
4196 +- iter = iter->next;
4197 +- }
4198 +-
4199 +- skb_morph(prev, head);
4200 +- prev->next = head->next;
4201 +- consume_skb(head);
4202 +- head = prev;
4203 ++ goto err;
4204 + }
4205 +
4206 + /* We have to remove fragment header from datagram and to relocate
4207 + * header in order to calculate ICV correctly. */
4208 +- skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
4209 +- memmove(head->head + sizeof(struct frag_hdr), head->head,
4210 +- (head->data - head->head) - sizeof(struct frag_hdr));
4211 +- head->mac_header += sizeof(struct frag_hdr);
4212 +- head->network_header += sizeof(struct frag_hdr);
4213 +-
4214 +- skb_shinfo(head)->frag_list = head->next;
4215 +- skb_reset_transport_header(head);
4216 +- skb_push(head, head->data - skb_network_header(head));
4217 +-
4218 +- for (fp = head->next; fp; fp = fp->next) {
4219 +- head->data_len += fp->len;
4220 +- head->len += fp->len;
4221 +- if (head->ip_summed != fp->ip_summed)
4222 +- head->ip_summed = CHECKSUM_NONE;
4223 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
4224 +- head->csum = csum_add(head->csum, fp->csum);
4225 +- head->truesize += fp->truesize;
4226 +- fp->sk = NULL;
4227 +- }
4228 +- sub_frag_mem_limit(fq->q.net, head->truesize);
4229 ++ skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
4230 ++ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
4231 ++ (skb->data - skb->head) - sizeof(struct frag_hdr));
4232 ++ skb->mac_header += sizeof(struct frag_hdr);
4233 ++ skb->network_header += sizeof(struct frag_hdr);
4234 ++
4235 ++ skb_reset_transport_header(skb);
4236 ++
4237 ++ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
4238 +
4239 +- head->ignore_df = 1;
4240 +- head->next = NULL;
4241 +- head->dev = dev;
4242 +- head->tstamp = fq->q.stamp;
4243 +- ipv6_hdr(head)->payload_len = htons(payload_len);
4244 +- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
4245 +- IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
4246 ++ skb->ignore_df = 1;
4247 ++ skb->dev = dev;
4248 ++ ipv6_hdr(skb)->payload_len = htons(payload_len);
4249 ++ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
4250 ++ IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
4251 +
4252 + /* Yes, and fold redundant checksum back. 8) */
4253 +- if (head->ip_summed == CHECKSUM_COMPLETE)
4254 +- head->csum = csum_partial(skb_network_header(head),
4255 +- skb_network_header_len(head),
4256 +- head->csum);
4257 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
4258 ++ skb->csum = csum_partial(skb_network_header(skb),
4259 ++ skb_network_header_len(skb),
4260 ++ skb->csum);
4261 +
4262 + fq->q.fragments = NULL;
4263 + fq->q.rb_fragments = RB_ROOT;
4264 + fq->q.fragments_tail = NULL;
4265 ++ fq->q.last_run_head = NULL;
4266 +
4267 +- return true;
4268 ++ return 0;
4269 ++
4270 ++err:
4271 ++ inet_frag_kill(&fq->q);
4272 ++ return -EINVAL;
4273 + }
4274 +
4275 + /*
4276 +@@ -542,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
4277 + int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4278 + {
4279 + u16 savethdr = skb->transport_header;
4280 +- struct net_device *dev = skb->dev;
4281 + int fhoff, nhoff, ret;
4282 + struct frag_hdr *fhdr;
4283 + struct frag_queue *fq;
4284 +@@ -565,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4285 + hdr = ipv6_hdr(skb);
4286 + fhdr = (struct frag_hdr *)skb_transport_header(skb);
4287 +
4288 +- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
4289 +- fhdr->frag_off & htons(IP6_MF))
4290 +- return -EINVAL;
4291 +-
4292 + skb_orphan(skb);
4293 + fq = fq_find(net, fhdr->identification, user, hdr,
4294 + skb->dev ? skb->dev->ifindex : 0);
4295 +@@ -580,31 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4296 + spin_lock_bh(&fq->q.lock);
4297 +
4298 + ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
4299 +- if (ret < 0) {
4300 +- if (ret == -EPROTO) {
4301 +- skb->transport_header = savethdr;
4302 +- ret = 0;
4303 +- }
4304 +- goto out_unlock;
4305 ++ if (ret == -EPROTO) {
4306 ++ skb->transport_header = savethdr;
4307 ++ ret = 0;
4308 + }
4309 +
4310 + /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
4311 + * must be returned.
4312 + */
4313 +- ret = -EINPROGRESS;
4314 +- if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
4315 +- fq->q.meat == fq->q.len) {
4316 +- unsigned long orefdst = skb->_skb_refdst;
4317 +-
4318 +- skb->_skb_refdst = 0UL;
4319 +- if (nf_ct_frag6_reasm(fq, skb, dev))
4320 +- ret = 0;
4321 +- skb->_skb_refdst = orefdst;
4322 +- } else {
4323 +- skb_dst_drop(skb);
4324 +- }
4325 ++ if (ret)
4326 ++ ret = -EINPROGRESS;
4327 +
4328 +-out_unlock:
4329 + spin_unlock_bh(&fq->q.lock);
4330 + inet_frag_put(&fq->q);
4331 + return ret;
4332 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
4333 +index 7c943392c128..095825f964e2 100644
4334 +--- a/net/ipv6/reassembly.c
4335 ++++ b/net/ipv6/reassembly.c
4336 +@@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
4337 +
4338 + static struct inet_frags ip6_frags;
4339 +
4340 +-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4341 +- struct net_device *dev);
4342 ++static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
4343 ++ struct sk_buff *prev_tail, struct net_device *dev);
4344 +
4345 + static void ip6_frag_expire(struct timer_list *t)
4346 + {
4347 +@@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
4348 + struct frag_hdr *fhdr, int nhoff,
4349 + u32 *prob_offset)
4350 + {
4351 +- struct sk_buff *prev, *next;
4352 +- struct net_device *dev;
4353 +- int offset, end, fragsize;
4354 + struct net *net = dev_net(skb_dst(skb)->dev);
4355 ++ int offset, end, fragsize;
4356 ++ struct sk_buff *prev_tail;
4357 ++ struct net_device *dev;
4358 ++ int err = -ENOENT;
4359 + u8 ecn;
4360 +
4361 + if (fq->q.flags & INET_FRAG_COMPLETE)
4362 + goto err;
4363 +
4364 ++ err = -EINVAL;
4365 + offset = ntohs(fhdr->frag_off) & ~0x7;
4366 + end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
4367 + ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
4368 +
4369 + if ((unsigned int)end > IPV6_MAXPLEN) {
4370 + *prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
4371 ++ /* note that if prob_offset is set, the skb is freed elsewhere,
4372 ++ * we do not free it here.
4373 ++ */
4374 + return -1;
4375 + }
4376 +
4377 +@@ -145,7 +150,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
4378 + */
4379 + if (end < fq->q.len ||
4380 + ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
4381 +- goto err;
4382 ++ goto discard_fq;
4383 + fq->q.flags |= INET_FRAG_LAST_IN;
4384 + fq->q.len = end;
4385 + } else {
4386 +@@ -162,70 +167,35 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
4387 + if (end > fq->q.len) {
4388 + /* Some bits beyond end -> corruption. */
4389 + if (fq->q.flags & INET_FRAG_LAST_IN)
4390 +- goto err;
4391 ++ goto discard_fq;
4392 + fq->q.len = end;
4393 + }
4394 + }
4395 +
4396 + if (end == offset)
4397 +- goto err;
4398 ++ goto discard_fq;
4399 +
4400 ++ err = -ENOMEM;
4401 + /* Point into the IP datagram 'data' part. */
4402 + if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
4403 +- goto err;
4404 +-
4405 +- if (pskb_trim_rcsum(skb, end - offset))
4406 +- goto err;
4407 +-
4408 +- /* Find out which fragments are in front and at the back of us
4409 +- * in the chain of fragments so far. We must know where to put
4410 +- * this fragment, right?
4411 +- */
4412 +- prev = fq->q.fragments_tail;
4413 +- if (!prev || prev->ip_defrag_offset < offset) {
4414 +- next = NULL;
4415 +- goto found;
4416 +- }
4417 +- prev = NULL;
4418 +- for (next = fq->q.fragments; next != NULL; next = next->next) {
4419 +- if (next->ip_defrag_offset >= offset)
4420 +- break; /* bingo! */
4421 +- prev = next;
4422 +- }
4423 +-
4424 +-found:
4425 +- /* RFC5722, Section 4, amended by Errata ID : 3089
4426 +- * When reassembling an IPv6 datagram, if
4427 +- * one or more its constituent fragments is determined to be an
4428 +- * overlapping fragment, the entire datagram (and any constituent
4429 +- * fragments) MUST be silently discarded.
4430 +- */
4431 +-
4432 +- /* Check for overlap with preceding fragment. */
4433 +- if (prev &&
4434 +- (prev->ip_defrag_offset + prev->len) > offset)
4435 + goto discard_fq;
4436 +
4437 +- /* Look for overlap with succeeding segment. */
4438 +- if (next && next->ip_defrag_offset < end)
4439 ++ err = pskb_trim_rcsum(skb, end - offset);
4440 ++ if (err)
4441 + goto discard_fq;
4442 +
4443 +- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
4444 ++ /* Note : skb->rbnode and skb->dev share the same location. */
4445 + dev = skb->dev;
4446 +- if (dev)
4447 +- fq->iif = dev->ifindex;
4448 + /* Makes sure compiler wont do silly aliasing games */
4449 + barrier();
4450 +- skb->ip_defrag_offset = offset;
4451 +
4452 +- /* Insert this fragment in the chain of fragments. */
4453 +- skb->next = next;
4454 +- if (!next)
4455 +- fq->q.fragments_tail = skb;
4456 +- if (prev)
4457 +- prev->next = skb;
4458 +- else
4459 +- fq->q.fragments = skb;
4460 ++ prev_tail = fq->q.fragments_tail;
4461 ++ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
4462 ++ if (err)
4463 ++ goto insert_error;
4464 ++
4465 ++ if (dev)
4466 ++ fq->iif = dev->ifindex;
4467 +
4468 + fq->q.stamp = skb->tstamp;
4469 + fq->q.meat += skb->len;
4470 +@@ -246,44 +216,48 @@ found:
4471 +
4472 + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
4473 + fq->q.meat == fq->q.len) {
4474 +- int res;
4475 + unsigned long orefdst = skb->_skb_refdst;
4476 +
4477 + skb->_skb_refdst = 0UL;
4478 +- res = ip6_frag_reasm(fq, prev, dev);
4479 ++ err = ip6_frag_reasm(fq, skb, prev_tail, dev);
4480 + skb->_skb_refdst = orefdst;
4481 +- return res;
4482 ++ return err;
4483 + }
4484 +
4485 + skb_dst_drop(skb);
4486 +- return -1;
4487 ++ return -EINPROGRESS;
4488 +
4489 ++insert_error:
4490 ++ if (err == IPFRAG_DUP) {
4491 ++ kfree_skb(skb);
4492 ++ return -EINVAL;
4493 ++ }
4494 ++ err = -EINVAL;
4495 ++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
4496 ++ IPSTATS_MIB_REASM_OVERLAPS);
4497 + discard_fq:
4498 + inet_frag_kill(&fq->q);
4499 +-err:
4500 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
4501 + IPSTATS_MIB_REASMFAILS);
4502 ++err:
4503 + kfree_skb(skb);
4504 +- return -1;
4505 ++ return err;
4506 + }
4507 +
4508 + /*
4509 + * Check if this packet is complete.
4510 +- * Returns NULL on failure by any reason, and pointer
4511 +- * to current nexthdr field in reassembled frame.
4512 + *
4513 + * It is called with locked fq, and caller must check that
4514 + * queue is eligible for reassembly i.e. it is not COMPLETE,
4515 + * the last and the first frames arrived and all the bits are here.
4516 + */
4517 +-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4518 +- struct net_device *dev)
4519 ++static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
4520 ++ struct sk_buff *prev_tail, struct net_device *dev)
4521 + {
4522 + struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
4523 +- struct sk_buff *fp, *head = fq->q.fragments;
4524 +- int payload_len, delta;
4525 + unsigned int nhoff;
4526 +- int sum_truesize;
4527 ++ void *reasm_data;
4528 ++ int payload_len;
4529 + u8 ecn;
4530 +
4531 + inet_frag_kill(&fq->q);
4532 +@@ -292,121 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4533 + if (unlikely(ecn == 0xff))
4534 + goto out_fail;
4535 +
4536 +- /* Make the one we just received the head. */
4537 +- if (prev) {
4538 +- head = prev->next;
4539 +- fp = skb_clone(head, GFP_ATOMIC);
4540 +-
4541 +- if (!fp)
4542 +- goto out_oom;
4543 +-
4544 +- fp->next = head->next;
4545 +- if (!fp->next)
4546 +- fq->q.fragments_tail = fp;
4547 +- prev->next = fp;
4548 +-
4549 +- skb_morph(head, fq->q.fragments);
4550 +- head->next = fq->q.fragments->next;
4551 +-
4552 +- consume_skb(fq->q.fragments);
4553 +- fq->q.fragments = head;
4554 +- }
4555 +-
4556 +- WARN_ON(head == NULL);
4557 +- WARN_ON(head->ip_defrag_offset != 0);
4558 ++ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
4559 ++ if (!reasm_data)
4560 ++ goto out_oom;
4561 +
4562 +- /* Unfragmented part is taken from the first segment. */
4563 +- payload_len = ((head->data - skb_network_header(head)) -
4564 ++ payload_len = ((skb->data - skb_network_header(skb)) -
4565 + sizeof(struct ipv6hdr) + fq->q.len -
4566 + sizeof(struct frag_hdr));
4567 + if (payload_len > IPV6_MAXPLEN)
4568 + goto out_oversize;
4569 +
4570 +- delta = - head->truesize;
4571 +-
4572 +- /* Head of list must not be cloned. */
4573 +- if (skb_unclone(head, GFP_ATOMIC))
4574 +- goto out_oom;
4575 +-
4576 +- delta += head->truesize;
4577 +- if (delta)
4578 +- add_frag_mem_limit(fq->q.net, delta);
4579 +-
4580 +- /* If the first fragment is fragmented itself, we split
4581 +- * it to two chunks: the first with data and paged part
4582 +- * and the second, holding only fragments. */
4583 +- if (skb_has_frag_list(head)) {
4584 +- struct sk_buff *clone;
4585 +- int i, plen = 0;
4586 +-
4587 +- clone = alloc_skb(0, GFP_ATOMIC);
4588 +- if (!clone)
4589 +- goto out_oom;
4590 +- clone->next = head->next;
4591 +- head->next = clone;
4592 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
4593 +- skb_frag_list_init(head);
4594 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
4595 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
4596 +- clone->len = clone->data_len = head->data_len - plen;
4597 +- head->data_len -= clone->len;
4598 +- head->len -= clone->len;
4599 +- clone->csum = 0;
4600 +- clone->ip_summed = head->ip_summed;
4601 +- add_frag_mem_limit(fq->q.net, clone->truesize);
4602 +- }
4603 +-
4604 + /* We have to remove fragment header from datagram and to relocate
4605 + * header in order to calculate ICV correctly. */
4606 + nhoff = fq->nhoffset;
4607 +- skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
4608 +- memmove(head->head + sizeof(struct frag_hdr), head->head,
4609 +- (head->data - head->head) - sizeof(struct frag_hdr));
4610 +- if (skb_mac_header_was_set(head))
4611 +- head->mac_header += sizeof(struct frag_hdr);
4612 +- head->network_header += sizeof(struct frag_hdr);
4613 +-
4614 +- skb_reset_transport_header(head);
4615 +- skb_push(head, head->data - skb_network_header(head));
4616 +-
4617 +- sum_truesize = head->truesize;
4618 +- for (fp = head->next; fp;) {
4619 +- bool headstolen;
4620 +- int delta;
4621 +- struct sk_buff *next = fp->next;
4622 +-
4623 +- sum_truesize += fp->truesize;
4624 +- if (head->ip_summed != fp->ip_summed)
4625 +- head->ip_summed = CHECKSUM_NONE;
4626 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
4627 +- head->csum = csum_add(head->csum, fp->csum);
4628 +-
4629 +- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
4630 +- kfree_skb_partial(fp, headstolen);
4631 +- } else {
4632 +- fp->sk = NULL;
4633 +- if (!skb_shinfo(head)->frag_list)
4634 +- skb_shinfo(head)->frag_list = fp;
4635 +- head->data_len += fp->len;
4636 +- head->len += fp->len;
4637 +- head->truesize += fp->truesize;
4638 +- }
4639 +- fp = next;
4640 +- }
4641 +- sub_frag_mem_limit(fq->q.net, sum_truesize);
4642 ++ skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
4643 ++ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
4644 ++ (skb->data - skb->head) - sizeof(struct frag_hdr));
4645 ++ if (skb_mac_header_was_set(skb))
4646 ++ skb->mac_header += sizeof(struct frag_hdr);
4647 ++ skb->network_header += sizeof(struct frag_hdr);
4648 ++
4649 ++ skb_reset_transport_header(skb);
4650 ++
4651 ++ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
4652 +
4653 +- head->next = NULL;
4654 +- head->dev = dev;
4655 +- head->tstamp = fq->q.stamp;
4656 +- ipv6_hdr(head)->payload_len = htons(payload_len);
4657 +- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
4658 +- IP6CB(head)->nhoff = nhoff;
4659 +- IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
4660 +- IP6CB(head)->frag_max_size = fq->q.max_size;
4661 ++ skb->dev = dev;
4662 ++ ipv6_hdr(skb)->payload_len = htons(payload_len);
4663 ++ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
4664 ++ IP6CB(skb)->nhoff = nhoff;
4665 ++ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
4666 ++ IP6CB(skb)->frag_max_size = fq->q.max_size;
4667 +
4668 + /* Yes, and fold redundant checksum back. 8) */
4669 +- skb_postpush_rcsum(head, skb_network_header(head),
4670 +- skb_network_header_len(head));
4671 ++ skb_postpush_rcsum(skb, skb_network_header(skb),
4672 ++ skb_network_header_len(skb));
4673 +
4674 + rcu_read_lock();
4675 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
4676 +@@ -414,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4677 + fq->q.fragments = NULL;
4678 + fq->q.rb_fragments = RB_ROOT;
4679 + fq->q.fragments_tail = NULL;
4680 ++ fq->q.last_run_head = NULL;
4681 + return 1;
4682 +
4683 + out_oversize:
4684 +@@ -425,6 +319,7 @@ out_fail:
4685 + rcu_read_lock();
4686 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
4687 + rcu_read_unlock();
4688 ++ inet_frag_kill(&fq->q);
4689 + return -1;
4690 + }
4691 +
4692 +@@ -463,10 +358,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
4693 + return 1;
4694 + }
4695 +
4696 +- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
4697 +- fhdr->frag_off & htons(IP6_MF))
4698 +- goto fail_hdr;
4699 +-
4700 + iif = skb->dev ? skb->dev->ifindex : 0;
4701 + fq = fq_find(net, fhdr->identification, hdr, iif);
4702 + if (fq) {
4703 +@@ -484,6 +375,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
4704 + if (prob_offset) {
4705 + __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
4706 + IPSTATS_MIB_INHDRERRORS);
4707 ++ /* icmpv6_param_prob() calls kfree_skb(skb) */
4708 + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
4709 + }
4710 + return ret;
4711 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4712 +index 9006bb3c9e72..06fa8425d82c 100644
4713 +--- a/net/ipv6/route.c
4714 ++++ b/net/ipv6/route.c
4715 +@@ -2367,6 +2367,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
4716 +
4717 + rcu_read_lock();
4718 + from = rcu_dereference(rt6->from);
4719 ++ if (!from) {
4720 ++ rcu_read_unlock();
4721 ++ return;
4722 ++ }
4723 + nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
4724 + if (nrt6) {
4725 + rt6_do_update_pmtu(nrt6, mtu);
4726 +diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
4727 +index 8f6998091d26..2123f6e90fc0 100644
4728 +--- a/net/mac80211/driver-ops.h
4729 ++++ b/net/mac80211/driver-ops.h
4730 +@@ -1166,6 +1166,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
4731 + {
4732 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
4733 +
4734 ++ if (local->in_reconfig)
4735 ++ return;
4736 ++
4737 + if (!check_sdata_in_driver(sdata))
4738 + return;
4739 +
4740 +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
4741 +index 793016d722ec..9fd37d91b5ed 100644
4742 +--- a/net/sched/sch_cake.c
4743 ++++ b/net/sched/sch_cake.c
4744 +@@ -1508,32 +1508,29 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
4745 + return idx + (tin << 16);
4746 + }
4747 +
4748 +-static void cake_wash_diffserv(struct sk_buff *skb)
4749 +-{
4750 +- switch (skb->protocol) {
4751 +- case htons(ETH_P_IP):
4752 +- ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
4753 +- break;
4754 +- case htons(ETH_P_IPV6):
4755 +- ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
4756 +- break;
4757 +- default:
4758 +- break;
4759 +- }
4760 +-}
4761 +-
4762 + static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
4763 + {
4764 ++ int wlen = skb_network_offset(skb);
4765 + u8 dscp;
4766 +
4767 +- switch (skb->protocol) {
4768 ++ switch (tc_skb_protocol(skb)) {
4769 + case htons(ETH_P_IP):
4770 ++ wlen += sizeof(struct iphdr);
4771 ++ if (!pskb_may_pull(skb, wlen) ||
4772 ++ skb_try_make_writable(skb, wlen))
4773 ++ return 0;
4774 ++
4775 + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
4776 + if (wash && dscp)
4777 + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
4778 + return dscp;
4779 +
4780 + case htons(ETH_P_IPV6):
4781 ++ wlen += sizeof(struct ipv6hdr);
4782 ++ if (!pskb_may_pull(skb, wlen) ||
4783 ++ skb_try_make_writable(skb, wlen))
4784 ++ return 0;
4785 ++
4786 + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
4787 + if (wash && dscp)
4788 + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
4789 +@@ -1553,25 +1550,27 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
4790 + {
4791 + struct cake_sched_data *q = qdisc_priv(sch);
4792 + u32 tin;
4793 ++ u8 dscp;
4794 ++
4795 ++ /* Tin selection: Default to diffserv-based selection, allow overriding
4796 ++ * using firewall marks or skb->priority.
4797 ++ */
4798 ++ dscp = cake_handle_diffserv(skb,
4799 ++ q->rate_flags & CAKE_FLAG_WASH);
4800 +
4801 +- if (TC_H_MAJ(skb->priority) == sch->handle &&
4802 +- TC_H_MIN(skb->priority) > 0 &&
4803 +- TC_H_MIN(skb->priority) <= q->tin_cnt) {
4804 ++ if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
4805 ++ tin = 0;
4806 ++
4807 ++ else if (TC_H_MAJ(skb->priority) == sch->handle &&
4808 ++ TC_H_MIN(skb->priority) > 0 &&
4809 ++ TC_H_MIN(skb->priority) <= q->tin_cnt)
4810 + tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
4811 +
4812 +- if (q->rate_flags & CAKE_FLAG_WASH)
4813 +- cake_wash_diffserv(skb);
4814 +- } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
4815 +- /* extract the Diffserv Precedence field, if it exists */
4816 +- /* and clear DSCP bits if washing */
4817 +- tin = q->tin_index[cake_handle_diffserv(skb,
4818 +- q->rate_flags & CAKE_FLAG_WASH)];
4819 ++ else {
4820 ++ tin = q->tin_index[dscp];
4821 ++
4822 + if (unlikely(tin >= q->tin_cnt))
4823 + tin = 0;
4824 +- } else {
4825 +- tin = 0;
4826 +- if (q->rate_flags & CAKE_FLAG_WASH)
4827 +- cake_wash_diffserv(skb);
4828 + }
4829 +
4830 + return &q->tins[tin];
4831 +diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
4832 +index 66d5b2c5987a..d72985ca1d55 100644
4833 +--- a/net/tipc/name_table.c
4834 ++++ b/net/tipc/name_table.c
4835 +@@ -908,7 +908,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
4836 + for (; i < TIPC_NAMETBL_SIZE; i++) {
4837 + head = &tn->nametbl->services[i];
4838 +
4839 +- if (*last_type) {
4840 ++ if (*last_type ||
4841 ++ (!i && *last_key && (*last_lower == *last_key))) {
4842 + service = tipc_service_find(net, *last_type);
4843 + if (!service)
4844 + return -EPIPE;
4845 +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
4846 +index 7be43697ff84..7f40b6aab689 100644
4847 +--- a/scripts/mod/file2alias.c
4848 ++++ b/scripts/mod/file2alias.c
4849 +@@ -47,49 +47,9 @@ typedef struct {
4850 + struct devtable {
4851 + const char *device_id; /* name of table, __mod_<name>__*_device_table. */
4852 + unsigned long id_size;
4853 +- void *function;
4854 ++ int (*do_entry)(const char *filename, void *symval, char *alias);
4855 + };
4856 +
4857 +-#define ___cat(a,b) a ## b
4858 +-#define __cat(a,b) ___cat(a,b)
4859 +-
4860 +-/* we need some special handling for this host tool running eventually on
4861 +- * Darwin. The Mach-O section handling is a bit different than ELF section
4862 +- * handling. The differnces in detail are:
4863 +- * a) we have segments which have sections
4864 +- * b) we need a API call to get the respective section symbols */
4865 +-#if defined(__MACH__)
4866 +-#include <mach-o/getsect.h>
4867 +-
4868 +-#define INIT_SECTION(name) do { \
4869 +- unsigned long name ## _len; \
4870 +- char *__cat(pstart_,name) = getsectdata("__TEXT", \
4871 +- #name, &__cat(name,_len)); \
4872 +- char *__cat(pstop_,name) = __cat(pstart_,name) + \
4873 +- __cat(name, _len); \
4874 +- __cat(__start_,name) = (void *)__cat(pstart_,name); \
4875 +- __cat(__stop_,name) = (void *)__cat(pstop_,name); \
4876 +- } while (0)
4877 +-#define SECTION(name) __attribute__((section("__TEXT, " #name)))
4878 +-
4879 +-struct devtable **__start___devtable, **__stop___devtable;
4880 +-#else
4881 +-#define INIT_SECTION(name) /* no-op for ELF */
4882 +-#define SECTION(name) __attribute__((section(#name)))
4883 +-
4884 +-/* We construct a table of pointers in an ELF section (pointers generally
4885 +- * go unpadded by gcc). ld creates boundary syms for us. */
4886 +-extern struct devtable *__start___devtable[], *__stop___devtable[];
4887 +-#endif /* __MACH__ */
4888 +-
4889 +-#if !defined(__used)
4890 +-# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
4891 +-# define __used __attribute__((__unused__))
4892 +-# else
4893 +-# define __used __attribute__((__used__))
4894 +-# endif
4895 +-#endif
4896 +-
4897 + /* Define a variable f that holds the value of field f of struct devid
4898 + * based at address m.
4899 + */
4900 +@@ -102,16 +62,6 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
4901 + #define DEF_FIELD_ADDR(m, devid, f) \
4902 + typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
4903 +
4904 +-/* Add a table entry. We test function type matches while we're here. */
4905 +-#define ADD_TO_DEVTABLE(device_id, type, function) \
4906 +- static struct devtable __cat(devtable,__LINE__) = { \
4907 +- device_id + 0*sizeof((function)((const char *)NULL, \
4908 +- (void *)NULL, \
4909 +- (char *)NULL)), \
4910 +- SIZE_##type, (function) }; \
4911 +- static struct devtable *SECTION(__devtable) __used \
4912 +- __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
4913 +-
4914 + #define ADD(str, sep, cond, field) \
4915 + do { \
4916 + strcat(str, sep); \
4917 +@@ -431,7 +381,6 @@ static int do_hid_entry(const char *filename,
4918 +
4919 + return 1;
4920 + }
4921 +-ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
4922 +
4923 + /* Looks like: ieee1394:venNmoNspNverN */
4924 + static int do_ieee1394_entry(const char *filename,
4925 +@@ -456,7 +405,6 @@ static int do_ieee1394_entry(const char *filename,
4926 + add_wildcard(alias);
4927 + return 1;
4928 + }
4929 +-ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
4930 +
4931 + /* Looks like: pci:vNdNsvNsdNbcNscNiN. */
4932 + static int do_pci_entry(const char *filename,
4933 +@@ -500,7 +448,6 @@ static int do_pci_entry(const char *filename,
4934 + add_wildcard(alias);
4935 + return 1;
4936 + }
4937 +-ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
4938 +
4939 + /* looks like: "ccw:tNmNdtNdmN" */
4940 + static int do_ccw_entry(const char *filename,
4941 +@@ -524,7 +471,6 @@ static int do_ccw_entry(const char *filename,
4942 + add_wildcard(alias);
4943 + return 1;
4944 + }
4945 +-ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
4946 +
4947 + /* looks like: "ap:tN" */
4948 + static int do_ap_entry(const char *filename,
4949 +@@ -535,7 +481,6 @@ static int do_ap_entry(const char *filename,
4950 + sprintf(alias, "ap:t%02X*", dev_type);
4951 + return 1;
4952 + }
4953 +-ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
4954 +
4955 + /* looks like: "css:tN" */
4956 + static int do_css_entry(const char *filename,
4957 +@@ -546,7 +491,6 @@ static int do_css_entry(const char *filename,
4958 + sprintf(alias, "css:t%01X", type);
4959 + return 1;
4960 + }
4961 +-ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
4962 +
4963 + /* Looks like: "serio:tyNprNidNexN" */
4964 + static int do_serio_entry(const char *filename,
4965 +@@ -566,7 +510,6 @@ static int do_serio_entry(const char *filename,
4966 + add_wildcard(alias);
4967 + return 1;
4968 + }
4969 +-ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
4970 +
4971 + /* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
4972 + * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
4973 +@@ -604,7 +547,6 @@ static int do_acpi_entry(const char *filename,
4974 + }
4975 + return 1;
4976 + }
4977 +-ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
4978 +
4979 + /* looks like: "pnp:dD" */
4980 + static void do_pnp_device_entry(void *symval, unsigned long size,
4981 +@@ -725,7 +667,6 @@ static int do_pcmcia_entry(const char *filename,
4982 + add_wildcard(alias);
4983 + return 1;
4984 + }
4985 +-ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
4986 +
4987 + static int do_vio_entry(const char *filename, void *symval,
4988 + char *alias)
4989 +@@ -745,7 +686,6 @@ static int do_vio_entry(const char *filename, void *symval,
4990 + add_wildcard(alias);
4991 + return 1;
4992 + }
4993 +-ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
4994 +
4995 + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
4996 +
4997 +@@ -818,7 +758,6 @@ static int do_input_entry(const char *filename, void *symval,
4998 + do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
4999 + return 1;
5000 + }
5001 +-ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
5002 +
5003 + static int do_eisa_entry(const char *filename, void *symval,
5004 + char *alias)
5005 +@@ -830,7 +769,6 @@ static int do_eisa_entry(const char *filename, void *symval,
5006 + strcat(alias, "*");
5007 + return 1;
5008 + }
5009 +-ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
5010 +
5011 + /* Looks like: parisc:tNhvNrevNsvN */
5012 + static int do_parisc_entry(const char *filename, void *symval,
5013 +@@ -850,7 +788,6 @@ static int do_parisc_entry(const char *filename, void *symval,
5014 + add_wildcard(alias);
5015 + return 1;
5016 + }
5017 +-ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
5018 +
5019 + /* Looks like: sdio:cNvNdN. */
5020 + static int do_sdio_entry(const char *filename,
5021 +@@ -867,7 +804,6 @@ static int do_sdio_entry(const char *filename,
5022 + add_wildcard(alias);
5023 + return 1;
5024 + }
5025 +-ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
5026 +
5027 + /* Looks like: ssb:vNidNrevN. */
5028 + static int do_ssb_entry(const char *filename,
5029 +@@ -884,7 +820,6 @@ static int do_ssb_entry(const char *filename,
5030 + add_wildcard(alias);
5031 + return 1;
5032 + }
5033 +-ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
5034 +
5035 + /* Looks like: bcma:mNidNrevNclN. */
5036 + static int do_bcma_entry(const char *filename,
5037 +@@ -903,7 +838,6 @@ static int do_bcma_entry(const char *filename,
5038 + add_wildcard(alias);
5039 + return 1;
5040 + }
5041 +-ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
5042 +
5043 + /* Looks like: virtio:dNvN */
5044 + static int do_virtio_entry(const char *filename, void *symval,
5045 +@@ -919,7 +853,6 @@ static int do_virtio_entry(const char *filename, void *symval,
5046 + add_wildcard(alias);
5047 + return 1;
5048 + }
5049 +-ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
5050 +
5051 + /*
5052 + * Looks like: vmbus:guid
5053 +@@ -942,7 +875,6 @@ static int do_vmbus_entry(const char *filename, void *symval,
5054 +
5055 + return 1;
5056 + }
5057 +-ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
5058 +
5059 + /* Looks like: rpmsg:S */
5060 + static int do_rpmsg_entry(const char *filename, void *symval,
5061 +@@ -953,7 +885,6 @@ static int do_rpmsg_entry(const char *filename, void *symval,
5062 +
5063 + return 1;
5064 + }
5065 +-ADD_TO_DEVTABLE("rpmsg", rpmsg_device_id, do_rpmsg_entry);
5066 +
5067 + /* Looks like: i2c:S */
5068 + static int do_i2c_entry(const char *filename, void *symval,
5069 +@@ -964,7 +895,6 @@ static int do_i2c_entry(const char *filename, void *symval,
5070 +
5071 + return 1;
5072 + }
5073 +-ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
5074 +
5075 + /* Looks like: spi:S */
5076 + static int do_spi_entry(const char *filename, void *symval,
5077 +@@ -975,7 +905,6 @@ static int do_spi_entry(const char *filename, void *symval,
5078 +
5079 + return 1;
5080 + }
5081 +-ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
5082 +
5083 + static const struct dmifield {
5084 + const char *prefix;
5085 +@@ -1030,7 +959,6 @@ static int do_dmi_entry(const char *filename, void *symval,
5086 + strcat(alias, ":");
5087 + return 1;
5088 + }
5089 +-ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
5090 +
5091 + static int do_platform_entry(const char *filename,
5092 + void *symval, char *alias)
5093 +@@ -1039,7 +967,6 @@ static int do_platform_entry(const char *filename,
5094 + sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
5095 + return 1;
5096 + }
5097 +-ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
5098 +
5099 + static int do_mdio_entry(const char *filename,
5100 + void *symval, char *alias)
5101 +@@ -1064,7 +991,6 @@ static int do_mdio_entry(const char *filename,
5102 +
5103 + return 1;
5104 + }
5105 +-ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
5106 +
5107 + /* Looks like: zorro:iN. */
5108 + static int do_zorro_entry(const char *filename, void *symval,
5109 +@@ -1075,7 +1001,6 @@ static int do_zorro_entry(const char *filename, void *symval,
5110 + ADD(alias, "i", id != ZORRO_WILDCARD, id);
5111 + return 1;
5112 + }
5113 +-ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
5114 +
5115 + /* looks like: "pnp:dD" */
5116 + static int do_isapnp_entry(const char *filename,
5117 +@@ -1091,7 +1016,6 @@ static int do_isapnp_entry(const char *filename,
5118 + (function >> 12) & 0x0f, (function >> 8) & 0x0f);
5119 + return 1;
5120 + }
5121 +-ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
5122 +
5123 + /* Looks like: "ipack:fNvNdN". */
5124 + static int do_ipack_entry(const char *filename,
5125 +@@ -1107,7 +1031,6 @@ static int do_ipack_entry(const char *filename,
5126 + add_wildcard(alias);
5127 + return 1;
5128 + }
5129 +-ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
5130 +
5131 + /*
5132 + * Append a match expression for a single masked hex digit.
5133 +@@ -1178,7 +1101,6 @@ static int do_amba_entry(const char *filename,
5134 +
5135 + return 1;
5136 + }
5137 +-ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
5138 +
5139 + /*
5140 + * looks like: "mipscdmm:tN"
5141 +@@ -1194,7 +1116,6 @@ static int do_mips_cdmm_entry(const char *filename,
5142 + sprintf(alias, "mipscdmm:t%02X*", type);
5143 + return 1;
5144 + }
5145 +-ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
5146 +
5147 + /* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
5148 + * All fields are numbers. It would be nicer to use strings for vendor
5149 +@@ -1219,7 +1140,6 @@ static int do_x86cpu_entry(const char *filename, void *symval,
5150 + sprintf(alias + strlen(alias), "%04X*", feature);
5151 + return 1;
5152 + }
5153 +-ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
5154 +
5155 + /* LOOKS like cpu:type:*:feature:*FEAT* */
5156 + static int do_cpu_entry(const char *filename, void *symval, char *alias)
5157 +@@ -1229,7 +1149,6 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
5158 + sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
5159 + return 1;
5160 + }
5161 +-ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
5162 +
5163 + /* Looks like: mei:S:uuid:N:* */
5164 + static int do_mei_entry(const char *filename, void *symval,
5165 +@@ -1248,7 +1167,6 @@ static int do_mei_entry(const char *filename, void *symval,
5166 +
5167 + return 1;
5168 + }
5169 +-ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry);
5170 +
5171 + /* Looks like: rapidio:vNdNavNadN */
5172 + static int do_rio_entry(const char *filename,
5173 +@@ -1268,7 +1186,6 @@ static int do_rio_entry(const char *filename,
5174 + add_wildcard(alias);
5175 + return 1;
5176 + }
5177 +-ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry);
5178 +
5179 + /* Looks like: ulpi:vNpN */
5180 + static int do_ulpi_entry(const char *filename, void *symval,
5181 +@@ -1281,7 +1198,6 @@ static int do_ulpi_entry(const char *filename, void *symval,
5182 +
5183 + return 1;
5184 + }
5185 +-ADD_TO_DEVTABLE("ulpi", ulpi_device_id, do_ulpi_entry);
5186 +
5187 + /* Looks like: hdaudio:vNrNaN */
5188 + static int do_hda_entry(const char *filename, void *symval, char *alias)
5189 +@@ -1298,7 +1214,6 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
5190 + add_wildcard(alias);
5191 + return 1;
5192 + }
5193 +-ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
5194 +
5195 + /* Looks like: sdw:mNpN */
5196 + static int do_sdw_entry(const char *filename, void *symval, char *alias)
5197 +@@ -1313,7 +1228,6 @@ static int do_sdw_entry(const char *filename, void *symval, char *alias)
5198 + add_wildcard(alias);
5199 + return 1;
5200 + }
5201 +-ADD_TO_DEVTABLE("sdw", sdw_device_id, do_sdw_entry);
5202 +
5203 + /* Looks like: fsl-mc:vNdN */
5204 + static int do_fsl_mc_entry(const char *filename, void *symval,
5205 +@@ -1325,7 +1239,6 @@ static int do_fsl_mc_entry(const char *filename, void *symval,
5206 + sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
5207 + return 1;
5208 + }
5209 +-ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
5210 +
5211 + /* Looks like: tbsvc:kSpNvNrN */
5212 + static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
5213 +@@ -1350,7 +1263,6 @@ static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
5214 + add_wildcard(alias);
5215 + return 1;
5216 + }
5217 +-ADD_TO_DEVTABLE("tbsvc", tb_service_id, do_tbsvc_entry);
5218 +
5219 + /* Looks like: typec:idNmN */
5220 + static int do_typec_entry(const char *filename, void *symval, char *alias)
5221 +@@ -1363,7 +1275,6 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
5222 +
5223 + return 1;
5224 + }
5225 +-ADD_TO_DEVTABLE("typec", typec_device_id, do_typec_entry);
5226 +
5227 + /* Does namelen bytes of name exactly match the symbol? */
5228 + static bool sym_is(const char *name, unsigned namelen, const char *symbol)
5229 +@@ -1377,12 +1288,11 @@ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
5230 + static void do_table(void *symval, unsigned long size,
5231 + unsigned long id_size,
5232 + const char *device_id,
5233 +- void *function,
5234 ++ int (*do_entry)(const char *filename, void *symval, char *alias),
5235 + struct module *mod)
5236 + {
5237 + unsigned int i;
5238 + char alias[500];
5239 +- int (*do_entry)(const char *, void *entry, char *alias) = function;
5240 +
5241 + device_id_check(mod->name, device_id, size, id_size, symval);
5242 + /* Leave last one: it's the terminator. */
5243 +@@ -1396,6 +1306,48 @@ static void do_table(void *symval, unsigned long size,
5244 + }
5245 + }
5246 +
5247 ++static const struct devtable devtable[] = {
5248 ++ {"hid", SIZE_hid_device_id, do_hid_entry},
5249 ++ {"ieee1394", SIZE_ieee1394_device_id, do_ieee1394_entry},
5250 ++ {"pci", SIZE_pci_device_id, do_pci_entry},
5251 ++ {"ccw", SIZE_ccw_device_id, do_ccw_entry},
5252 ++ {"ap", SIZE_ap_device_id, do_ap_entry},
5253 ++ {"css", SIZE_css_device_id, do_css_entry},
5254 ++ {"serio", SIZE_serio_device_id, do_serio_entry},
5255 ++ {"acpi", SIZE_acpi_device_id, do_acpi_entry},
5256 ++ {"pcmcia", SIZE_pcmcia_device_id, do_pcmcia_entry},
5257 ++ {"vio", SIZE_vio_device_id, do_vio_entry},
5258 ++ {"input", SIZE_input_device_id, do_input_entry},
5259 ++ {"eisa", SIZE_eisa_device_id, do_eisa_entry},
5260 ++ {"parisc", SIZE_parisc_device_id, do_parisc_entry},
5261 ++ {"sdio", SIZE_sdio_device_id, do_sdio_entry},
5262 ++ {"ssb", SIZE_ssb_device_id, do_ssb_entry},
5263 ++ {"bcma", SIZE_bcma_device_id, do_bcma_entry},
5264 ++ {"virtio", SIZE_virtio_device_id, do_virtio_entry},
5265 ++ {"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
5266 ++ {"rpmsg", SIZE_rpmsg_device_id, do_rpmsg_entry},
5267 ++ {"i2c", SIZE_i2c_device_id, do_i2c_entry},
5268 ++ {"spi", SIZE_spi_device_id, do_spi_entry},
5269 ++ {"dmi", SIZE_dmi_system_id, do_dmi_entry},
5270 ++ {"platform", SIZE_platform_device_id, do_platform_entry},
5271 ++ {"mdio", SIZE_mdio_device_id, do_mdio_entry},
5272 ++ {"zorro", SIZE_zorro_device_id, do_zorro_entry},
5273 ++ {"isapnp", SIZE_isapnp_device_id, do_isapnp_entry},
5274 ++ {"ipack", SIZE_ipack_device_id, do_ipack_entry},
5275 ++ {"amba", SIZE_amba_id, do_amba_entry},
5276 ++ {"mipscdmm", SIZE_mips_cdmm_device_id, do_mips_cdmm_entry},
5277 ++ {"x86cpu", SIZE_x86_cpu_id, do_x86cpu_entry},
5278 ++ {"cpu", SIZE_cpu_feature, do_cpu_entry},
5279 ++ {"mei", SIZE_mei_cl_device_id, do_mei_entry},
5280 ++ {"rapidio", SIZE_rio_device_id, do_rio_entry},
5281 ++ {"ulpi", SIZE_ulpi_device_id, do_ulpi_entry},
5282 ++ {"hdaudio", SIZE_hda_device_id, do_hda_entry},
5283 ++ {"sdw", SIZE_sdw_device_id, do_sdw_entry},
5284 ++ {"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
5285 ++ {"tbsvc", SIZE_tb_service_id, do_tbsvc_entry},
5286 ++ {"typec", SIZE_typec_device_id, do_typec_entry},
5287 ++};
5288 ++
5289 + /* Create MODULE_ALIAS() statements.
5290 + * At this time, we cannot write the actual output C source yet,
5291 + * so we write into the mod->dev_table_buf buffer. */
5292 +@@ -1450,13 +1402,14 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
5293 + else if (sym_is(name, namelen, "pnp_card"))
5294 + do_pnp_card_entries(symval, sym->st_size, mod);
5295 + else {
5296 +- struct devtable **p;
5297 +- INIT_SECTION(__devtable);
5298 ++ int i;
5299 ++
5300 ++ for (i = 0; i < ARRAY_SIZE(devtable); i++) {
5301 ++ const struct devtable *p = &devtable[i];
5302 +
5303 +- for (p = __start___devtable; p < __stop___devtable; p++) {
5304 +- if (sym_is(name, namelen, (*p)->device_id)) {
5305 +- do_table(symval, sym->st_size, (*p)->id_size,
5306 +- (*p)->device_id, (*p)->function, mod);
5307 ++ if (sym_is(name, namelen, p->device_id)) {
5308 ++ do_table(symval, sym->st_size, p->id_size,
5309 ++ p->device_id, p->do_entry, mod);
5310 + break;
5311 + }
5312 + }
5313 +diff --git a/security/device_cgroup.c b/security/device_cgroup.c
5314 +index cd97929fac66..dc28914fa72e 100644
5315 +--- a/security/device_cgroup.c
5316 ++++ b/security/device_cgroup.c
5317 +@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
5318 + devcg->behavior == DEVCG_DEFAULT_ALLOW) {
5319 + rc = dev_exception_add(devcg, ex);
5320 + if (rc)
5321 +- break;
5322 ++ return rc;
5323 + } else {
5324 + /*
5325 + * in the other possible cases:
5326 +diff --git a/sound/core/info.c b/sound/core/info.c
5327 +index fe502bc5e6d2..679136fba730 100644
5328 +--- a/sound/core/info.c
5329 ++++ b/sound/core/info.c
5330 +@@ -722,8 +722,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
5331 + INIT_LIST_HEAD(&entry->children);
5332 + INIT_LIST_HEAD(&entry->list);
5333 + entry->parent = parent;
5334 +- if (parent)
5335 ++ if (parent) {
5336 ++ mutex_lock(&parent->access);
5337 + list_add_tail(&entry->list, &parent->children);
5338 ++ mutex_unlock(&parent->access);
5339 ++ }
5340 + return entry;
5341 + }
5342 +
5343 +@@ -805,7 +808,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
5344 + list_for_each_entry_safe(p, n, &entry->children, list)
5345 + snd_info_free_entry(p);
5346 +
5347 +- list_del(&entry->list);
5348 ++ p = entry->parent;
5349 ++ if (p) {
5350 ++ mutex_lock(&p->access);
5351 ++ list_del(&entry->list);
5352 ++ mutex_unlock(&p->access);
5353 ++ }
5354 + kfree(entry->name);
5355 + if (entry->private_free)
5356 + entry->private_free(entry);
5357 +diff --git a/sound/core/init.c b/sound/core/init.c
5358 +index 4849c611c0fe..16b7cc7aa66b 100644
5359 +--- a/sound/core/init.c
5360 ++++ b/sound/core/init.c
5361 +@@ -407,14 +407,7 @@ int snd_card_disconnect(struct snd_card *card)
5362 + card->shutdown = 1;
5363 + spin_unlock(&card->files_lock);
5364 +
5365 +- /* phase 1: disable fops (user space) operations for ALSA API */
5366 +- mutex_lock(&snd_card_mutex);
5367 +- snd_cards[card->number] = NULL;
5368 +- clear_bit(card->number, snd_cards_lock);
5369 +- mutex_unlock(&snd_card_mutex);
5370 +-
5371 +- /* phase 2: replace file->f_op with special dummy operations */
5372 +-
5373 ++ /* replace file->f_op with special dummy operations */
5374 + spin_lock(&card->files_lock);
5375 + list_for_each_entry(mfile, &card->files_list, list) {
5376 + /* it's critical part, use endless loop */
5377 +@@ -430,7 +423,7 @@ int snd_card_disconnect(struct snd_card *card)
5378 + }
5379 + spin_unlock(&card->files_lock);
5380 +
5381 +- /* phase 3: notify all connected devices about disconnection */
5382 ++ /* notify all connected devices about disconnection */
5383 + /* at this point, they cannot respond to any calls except release() */
5384 +
5385 + #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
5386 +@@ -446,6 +439,13 @@ int snd_card_disconnect(struct snd_card *card)
5387 + device_del(&card->card_dev);
5388 + card->registered = false;
5389 + }
5390 ++
5391 ++ /* disable fops (user space) operations for ALSA API */
5392 ++ mutex_lock(&snd_card_mutex);
5393 ++ snd_cards[card->number] = NULL;
5394 ++ clear_bit(card->number, snd_cards_lock);
5395 ++ mutex_unlock(&snd_card_mutex);
5396 ++
5397 + #ifdef CONFIG_PM
5398 + wake_up(&card->power_sleep);
5399 + #endif
5400 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5401 +index bd60eb7168fa..0a745d677b1c 100644
5402 +--- a/sound/pci/hda/patch_realtek.c
5403 ++++ b/sound/pci/hda/patch_realtek.c
5404 +@@ -7170,6 +7170,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5405 + {0x12, 0x90a60140},
5406 + {0x14, 0x90170150},
5407 + {0x21, 0x02211020}),
5408 ++ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5409 ++ {0x21, 0x02211020}),
5410 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
5411 + {0x14, 0x90170110},
5412 + {0x21, 0x02211020}),
5413 +@@ -7280,6 +7282,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5414 + {0x21, 0x0221101f}),
5415 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5416 + ALC256_STANDARD_PINS),
5417 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5418 ++ {0x14, 0x90170110},
5419 ++ {0x1b, 0x01011020},
5420 ++ {0x21, 0x0221101f}),
5421 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
5422 + {0x14, 0x90170110},
5423 + {0x1b, 0x90a70130},
5424 +diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
5425 +index 7029e0b85f9e..4ac78d7a4b2d 100644
5426 +--- a/sound/soc/rockchip/rockchip_pcm.c
5427 ++++ b/sound/soc/rockchip/rockchip_pcm.c
5428 +@@ -21,7 +21,8 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
5429 + .info = SNDRV_PCM_INFO_MMAP |
5430 + SNDRV_PCM_INFO_MMAP_VALID |
5431 + SNDRV_PCM_INFO_PAUSE |
5432 +- SNDRV_PCM_INFO_RESUME,
5433 ++ SNDRV_PCM_INFO_RESUME |
5434 ++ SNDRV_PCM_INFO_INTERLEAVED,
5435 + .period_bytes_min = 32,
5436 + .period_bytes_max = 8192,
5437 + .periods_min = 1,
5438 +diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
5439 +index acc704bd3998..0b0ef3abc966 100644
5440 +--- a/tools/include/linux/bitops.h
5441 ++++ b/tools/include/linux/bitops.h
5442 +@@ -3,8 +3,6 @@
5443 + #define _TOOLS_LINUX_BITOPS_H_
5444 +
5445 + #include <asm/types.h>
5446 +-#include <linux/compiler.h>
5447 +-
5448 + #ifndef __WORDSIZE
5449 + #define __WORDSIZE (__SIZEOF_LONG__ * 8)
5450 + #endif
5451 +@@ -12,10 +10,9 @@
5452 + #ifndef BITS_PER_LONG
5453 + # define BITS_PER_LONG __WORDSIZE
5454 + #endif
5455 ++#include <linux/bits.h>
5456 ++#include <linux/compiler.h>
5457 +
5458 +-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
5459 +-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
5460 +-#define BITS_PER_BYTE 8
5461 + #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
5462 + #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
5463 + #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
5464 +diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
5465 +new file mode 100644
5466 +index 000000000000..2b7b532c1d51
5467 +--- /dev/null
5468 ++++ b/tools/include/linux/bits.h
5469 +@@ -0,0 +1,26 @@
5470 ++/* SPDX-License-Identifier: GPL-2.0 */
5471 ++#ifndef __LINUX_BITS_H
5472 ++#define __LINUX_BITS_H
5473 ++#include <asm/bitsperlong.h>
5474 ++
5475 ++#define BIT(nr) (1UL << (nr))
5476 ++#define BIT_ULL(nr) (1ULL << (nr))
5477 ++#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
5478 ++#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
5479 ++#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
5480 ++#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
5481 ++#define BITS_PER_BYTE 8
5482 ++
5483 ++/*
5484 ++ * Create a contiguous bitmask starting at bit position @l and ending at
5485 ++ * position @h. For example
5486 ++ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
5487 ++ */
5488 ++#define GENMASK(h, l) \
5489 ++ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
5490 ++
5491 ++#define GENMASK_ULL(h, l) \
5492 ++ (((~0ULL) - (1ULL << (l)) + 1) & \
5493 ++ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
5494 ++
5495 ++#endif /* __LINUX_BITS_H */
5496 +diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
5497 +index 466540ee8ea7..c72cc73a6b09 100755
5498 +--- a/tools/perf/check-headers.sh
5499 ++++ b/tools/perf/check-headers.sh
5500 +@@ -14,6 +14,7 @@ include/uapi/linux/sched.h
5501 + include/uapi/linux/stat.h
5502 + include/uapi/linux/vhost.h
5503 + include/uapi/sound/asound.h
5504 ++include/linux/bits.h
5505 + include/linux/hash.h
5506 + include/uapi/linux/hw_breakpoint.h
5507 + arch/x86/include/asm/disabled-features.h