Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 27 Apr 2019 17:35:25
Message-Id: 1556386501.dabdcb80da4a893445e4cf5920b5fc3d4f884451.mpagano@gentoo
1 commit: dabdcb80da4a893445e4cf5920b5fc3d4f884451
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Apr 27 17:35:01 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Apr 27 17:35:01 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dabdcb80
7
8 Linux patch 4.14.114
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1113_linux-4.14.114.patch | 4835 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4839 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 94d78d9..10dfc5f 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -495,6 +495,10 @@ Patch: 1112_4.14.113.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.113
23
24 +Patch: 1113_4.14.114.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.114
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1113_linux-4.14.114.patch b/1113_linux-4.14.114.patch
33 new file mode 100644
34 index 0000000..df15879
35 --- /dev/null
36 +++ b/1113_linux-4.14.114.patch
37 @@ -0,0 +1,4835 @@
38 +diff --git a/Makefile b/Makefile
39 +index fcfef30ca9a6..47a9f9883bdd 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 113
47 ++SUBLEVEL = 114
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +@@ -653,8 +653,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
52 + KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
53 +
54 + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
55 +-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
56 +-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
57 ++KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
58 + else
59 + ifdef CONFIG_PROFILE_ALL_BRANCHES
60 + KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
61 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
62 +index b447b4db423a..fd1e722f3821 100644
63 +--- a/arch/arm64/include/asm/futex.h
64 ++++ b/arch/arm64/include/asm/futex.h
65 +@@ -50,7 +50,7 @@ do { \
66 + static inline int
67 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
68 + {
69 +- int oldval, ret, tmp;
70 ++ int oldval = 0, ret, tmp;
71 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
72 +
73 + pagefault_disable();
74 +diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
75 +index 3b6e70d085da..8457cdd47f75 100644
76 +--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
77 ++++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
78 +@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
79 + vpaddq t2,t1,t1
80 + vmovq t1x,d4
81 +
82 ++ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
83 ++ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
84 ++ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
85 ++ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
86 ++ # integers. It's true in a single-block implementation, but not here.
87 ++
88 + # d1 += d0 >> 26
89 + mov d0,%rax
90 + shr $26,%rax
91 +@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
92 + # h0 += (d4 >> 26) * 5
93 + mov d4,%rax
94 + shr $26,%rax
95 +- lea (%eax,%eax,4),%eax
96 +- add %eax,%ebx
97 ++ lea (%rax,%rax,4),%rax
98 ++ add %rax,%rbx
99 + # h4 = d4 & 0x3ffffff
100 + mov d4,%rax
101 + and $0x3ffffff,%eax
102 + mov %eax,h4
103 +
104 + # h1 += h0 >> 26
105 +- mov %ebx,%eax
106 +- shr $26,%eax
107 ++ mov %rbx,%rax
108 ++ shr $26,%rax
109 + add %eax,h1
110 + # h0 = h0 & 0x3ffffff
111 + andl $0x3ffffff,%ebx
112 +diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
113 +index c88c670cb5fc..5851c7418fb7 100644
114 +--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
115 ++++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
116 +@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
117 + # h0 += (d4 >> 26) * 5
118 + mov d4,%rax
119 + shr $26,%rax
120 +- lea (%eax,%eax,4),%eax
121 +- add %eax,%ebx
122 ++ lea (%rax,%rax,4),%rax
123 ++ add %rax,%rbx
124 + # h4 = d4 & 0x3ffffff
125 + mov d4,%rax
126 + and $0x3ffffff,%eax
127 + mov %eax,h4
128 +
129 + # h1 += h0 >> 26
130 +- mov %ebx,%eax
131 +- shr $26,%eax
132 ++ mov %rbx,%rax
133 ++ shr $26,%rax
134 + add %eax,h1
135 + # h0 = h0 & 0x3ffffff
136 + andl $0x3ffffff,%ebx
137 +@@ -520,6 +520,12 @@ ENTRY(poly1305_2block_sse2)
138 + paddq t2,t1
139 + movq t1,d4
140 +
141 ++ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
142 ++ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
143 ++ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
144 ++ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
145 ++ # integers. It's true in a single-block implementation, but not here.
146 ++
147 + # d1 += d0 >> 26
148 + mov d0,%rax
149 + shr $26,%rax
150 +@@ -558,16 +564,16 @@ ENTRY(poly1305_2block_sse2)
151 + # h0 += (d4 >> 26) * 5
152 + mov d4,%rax
153 + shr $26,%rax
154 +- lea (%eax,%eax,4),%eax
155 +- add %eax,%ebx
156 ++ lea (%rax,%rax,4),%rax
157 ++ add %rax,%rbx
158 + # h4 = d4 & 0x3ffffff
159 + mov d4,%rax
160 + and $0x3ffffff,%eax
161 + mov %eax,h4
162 +
163 + # h1 += h0 >> 26
164 +- mov %ebx,%eax
165 +- shr $26,%eax
166 ++ mov %rbx,%rax
167 ++ shr $26,%rax
168 + add %eax,h1
169 + # h0 = h0 & 0x3ffffff
170 + andl $0x3ffffff,%ebx
171 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
172 +index 3e5dd85b019a..263af6312329 100644
173 +--- a/arch/x86/events/amd/core.c
174 ++++ b/arch/x86/events/amd/core.c
175 +@@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
176 + };
177 +
178 + /*
179 +- * AMD Performance Monitor K7 and later.
180 ++ * AMD Performance Monitor K7 and later, up to and including Family 16h:
181 + */
182 + static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
183 + {
184 +- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
185 +- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
186 +- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
187 +- [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
188 +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
189 +- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
190 +- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
191 +- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
192 ++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
193 ++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
194 ++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
195 ++ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
196 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
197 ++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
198 ++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
199 ++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
200 ++};
201 ++
202 ++/*
203 ++ * AMD Performance Monitor Family 17h and later:
204 ++ */
205 ++static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
206 ++{
207 ++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
208 ++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
209 ++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
210 ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
211 ++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
212 ++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
213 ++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
214 + };
215 +
216 + static u64 amd_pmu_event_map(int hw_event)
217 + {
218 ++ if (boot_cpu_data.x86 >= 0x17)
219 ++ return amd_f17h_perfmon_event_map[hw_event];
220 ++
221 + return amd_perfmon_event_map[hw_event];
222 + }
223 +
224 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
225 +index dc8f8b3e6cec..99d45660242e 100644
226 +--- a/arch/x86/events/intel/core.c
227 ++++ b/arch/x86/events/intel/core.c
228 +@@ -3001,7 +3001,7 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
229 + flags &= ~PERF_SAMPLE_TIME;
230 + if (!event->attr.exclude_kernel)
231 + flags &= ~PERF_SAMPLE_REGS_USER;
232 +- if (event->attr.sample_regs_user & ~PEBS_REGS)
233 ++ if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
234 + flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
235 + return flags;
236 + }
237 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
238 +index 84b3841c131d..bfe16631fd1d 100644
239 +--- a/arch/x86/events/perf_event.h
240 ++++ b/arch/x86/events/perf_event.h
241 +@@ -95,25 +95,25 @@ struct amd_nb {
242 + PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
243 + PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
244 +
245 +-#define PEBS_REGS \
246 +- (PERF_REG_X86_AX | \
247 +- PERF_REG_X86_BX | \
248 +- PERF_REG_X86_CX | \
249 +- PERF_REG_X86_DX | \
250 +- PERF_REG_X86_DI | \
251 +- PERF_REG_X86_SI | \
252 +- PERF_REG_X86_SP | \
253 +- PERF_REG_X86_BP | \
254 +- PERF_REG_X86_IP | \
255 +- PERF_REG_X86_FLAGS | \
256 +- PERF_REG_X86_R8 | \
257 +- PERF_REG_X86_R9 | \
258 +- PERF_REG_X86_R10 | \
259 +- PERF_REG_X86_R11 | \
260 +- PERF_REG_X86_R12 | \
261 +- PERF_REG_X86_R13 | \
262 +- PERF_REG_X86_R14 | \
263 +- PERF_REG_X86_R15)
264 ++#define PEBS_GP_REGS \
265 ++ ((1ULL << PERF_REG_X86_AX) | \
266 ++ (1ULL << PERF_REG_X86_BX) | \
267 ++ (1ULL << PERF_REG_X86_CX) | \
268 ++ (1ULL << PERF_REG_X86_DX) | \
269 ++ (1ULL << PERF_REG_X86_DI) | \
270 ++ (1ULL << PERF_REG_X86_SI) | \
271 ++ (1ULL << PERF_REG_X86_SP) | \
272 ++ (1ULL << PERF_REG_X86_BP) | \
273 ++ (1ULL << PERF_REG_X86_IP) | \
274 ++ (1ULL << PERF_REG_X86_FLAGS) | \
275 ++ (1ULL << PERF_REG_X86_R8) | \
276 ++ (1ULL << PERF_REG_X86_R9) | \
277 ++ (1ULL << PERF_REG_X86_R10) | \
278 ++ (1ULL << PERF_REG_X86_R11) | \
279 ++ (1ULL << PERF_REG_X86_R12) | \
280 ++ (1ULL << PERF_REG_X86_R13) | \
281 ++ (1ULL << PERF_REG_X86_R14) | \
282 ++ (1ULL << PERF_REG_X86_R15))
283 +
284 + /*
285 + * Per register state.
286 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
287 +index ec7aedba3d74..5567705e0601 100644
288 +--- a/arch/x86/kernel/cpu/bugs.c
289 ++++ b/arch/x86/kernel/cpu/bugs.c
290 +@@ -271,7 +271,7 @@ static const struct {
291 + const char *option;
292 + enum spectre_v2_user_cmd cmd;
293 + bool secure;
294 +-} v2_user_options[] __initdata = {
295 ++} v2_user_options[] __initconst = {
296 + { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
297 + { "off", SPECTRE_V2_USER_CMD_NONE, false },
298 + { "on", SPECTRE_V2_USER_CMD_FORCE, true },
299 +@@ -406,7 +406,7 @@ static const struct {
300 + const char *option;
301 + enum spectre_v2_mitigation_cmd cmd;
302 + bool secure;
303 +-} mitigation_options[] __initdata = {
304 ++} mitigation_options[] __initconst = {
305 + { "off", SPECTRE_V2_CMD_NONE, false },
306 + { "on", SPECTRE_V2_CMD_FORCE, true },
307 + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
308 +@@ -642,7 +642,7 @@ static const char * const ssb_strings[] = {
309 + static const struct {
310 + const char *option;
311 + enum ssb_mitigation_cmd cmd;
312 +-} ssb_mitigation_options[] __initdata = {
313 ++} ssb_mitigation_options[] __initconst = {
314 + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
315 + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
316 + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
317 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
318 +index 65452d555f05..56cf6c263254 100644
319 +--- a/arch/x86/kernel/kprobes/core.c
320 ++++ b/arch/x86/kernel/kprobes/core.c
321 +@@ -553,6 +553,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
322 + unsigned long *sara = stack_addr(regs);
323 +
324 + ri->ret_addr = (kprobe_opcode_t *) *sara;
325 ++ ri->fp = sara;
326 +
327 + /* Replace the return addr with trampoline addr */
328 + *sara = (unsigned long) &kretprobe_trampoline;
329 +@@ -754,15 +755,21 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
330 + unsigned long flags, orig_ret_address = 0;
331 + unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
332 + kprobe_opcode_t *correct_ret_addr = NULL;
333 ++ void *frame_pointer;
334 ++ bool skipped = false;
335 +
336 + INIT_HLIST_HEAD(&empty_rp);
337 + kretprobe_hash_lock(current, &head, &flags);
338 + /* fixup registers */
339 + #ifdef CONFIG_X86_64
340 + regs->cs = __KERNEL_CS;
341 ++ /* On x86-64, we use pt_regs->sp for return address holder. */
342 ++ frame_pointer = &regs->sp;
343 + #else
344 + regs->cs = __KERNEL_CS | get_kernel_rpl();
345 + regs->gs = 0;
346 ++ /* On x86-32, we use pt_regs->flags for return address holder. */
347 ++ frame_pointer = &regs->flags;
348 + #endif
349 + regs->ip = trampoline_address;
350 + regs->orig_ax = ~0UL;
351 +@@ -784,8 +791,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
352 + if (ri->task != current)
353 + /* another task is sharing our hash bucket */
354 + continue;
355 ++ /*
356 ++ * Return probes must be pushed on this hash list correct
357 ++ * order (same as return order) so that it can be poped
358 ++ * correctly. However, if we find it is pushed it incorrect
359 ++ * order, this means we find a function which should not be
360 ++ * probed, because the wrong order entry is pushed on the
361 ++ * path of processing other kretprobe itself.
362 ++ */
363 ++ if (ri->fp != frame_pointer) {
364 ++ if (!skipped)
365 ++ pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
366 ++ skipped = true;
367 ++ continue;
368 ++ }
369 +
370 + orig_ret_address = (unsigned long)ri->ret_addr;
371 ++ if (skipped)
372 ++ pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
373 ++ ri->rp->kp.addr);
374 +
375 + if (orig_ret_address != trampoline_address)
376 + /*
377 +@@ -803,6 +827,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
378 + if (ri->task != current)
379 + /* another task is sharing our hash bucket */
380 + continue;
381 ++ if (ri->fp != frame_pointer)
382 ++ continue;
383 +
384 + orig_ret_address = (unsigned long)ri->ret_addr;
385 + if (ri->rp && ri->rp->handler) {
386 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
387 +index a98d1cdd6299..d2ef967bfafb 100644
388 +--- a/arch/x86/kernel/process.c
389 ++++ b/arch/x86/kernel/process.c
390 +@@ -465,10 +465,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
391 +
392 + void speculation_ctrl_update(unsigned long tif)
393 + {
394 ++ unsigned long flags;
395 ++
396 + /* Forced update. Make sure all relevant TIF flags are different */
397 +- preempt_disable();
398 ++ local_irq_save(flags);
399 + __speculation_ctrl_update(~tif, tif);
400 +- preempt_enable();
401 ++ local_irq_restore(flags);
402 + }
403 +
404 + /* Called from seccomp/prctl update */
405 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
406 +index 5f758568fc44..2bcadfc5b2f0 100644
407 +--- a/arch/x86/kvm/emulate.c
408 ++++ b/arch/x86/kvm/emulate.c
409 +@@ -2588,15 +2588,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
410 + * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
411 + * supports long mode.
412 + */
413 +- cr4 = ctxt->ops->get_cr(ctxt, 4);
414 + if (emulator_has_longmode(ctxt)) {
415 + struct desc_struct cs_desc;
416 +
417 + /* Zero CR4.PCIDE before CR0.PG. */
418 +- if (cr4 & X86_CR4_PCIDE) {
419 ++ cr4 = ctxt->ops->get_cr(ctxt, 4);
420 ++ if (cr4 & X86_CR4_PCIDE)
421 + ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
422 +- cr4 &= ~X86_CR4_PCIDE;
423 +- }
424 +
425 + /* A 32-bit code segment is required to clear EFER.LMA. */
426 + memset(&cs_desc, 0, sizeof(cs_desc));
427 +@@ -2610,13 +2608,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
428 + if (cr0 & X86_CR0_PE)
429 + ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
430 +
431 +- /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
432 +- if (cr4 & X86_CR4_PAE)
433 +- ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
434 ++ if (emulator_has_longmode(ctxt)) {
435 ++ /* Clear CR4.PAE before clearing EFER.LME. */
436 ++ cr4 = ctxt->ops->get_cr(ctxt, 4);
437 ++ if (cr4 & X86_CR4_PAE)
438 ++ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
439 +
440 +- /* And finally go back to 32-bit mode. */
441 +- efer = 0;
442 +- ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
443 ++ /* And finally go back to 32-bit mode. */
444 ++ efer = 0;
445 ++ ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
446 ++ }
447 +
448 + smbase = ctxt->ops->get_smbase(ctxt);
449 + if (emulator_has_longmode(ctxt))
450 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
451 +index c387047e926a..1296e44fd969 100644
452 +--- a/arch/x86/kvm/svm.c
453 ++++ b/arch/x86/kvm/svm.c
454 +@@ -2211,6 +2211,7 @@ static int pf_interception(struct vcpu_svm *svm)
455 + static int db_interception(struct vcpu_svm *svm)
456 + {
457 + struct kvm_run *kvm_run = svm->vcpu.run;
458 ++ struct kvm_vcpu *vcpu = &svm->vcpu;
459 +
460 + if (!(svm->vcpu.guest_debug &
461 + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
462 +@@ -2221,6 +2222,8 @@ static int db_interception(struct vcpu_svm *svm)
463 +
464 + if (svm->nmi_singlestep) {
465 + disable_nmi_singlestep(svm);
466 ++ /* Make sure we check for pending NMIs upon entry */
467 ++ kvm_make_request(KVM_REQ_EVENT, vcpu);
468 + }
469 +
470 + if (svm->vcpu.guest_debug &
471 +@@ -4014,14 +4017,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
472 + kvm_lapic_reg_write(apic, APIC_ICR, icrl);
473 + break;
474 + case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
475 ++ int i;
476 ++ struct kvm_vcpu *vcpu;
477 ++ struct kvm *kvm = svm->vcpu.kvm;
478 + struct kvm_lapic *apic = svm->vcpu.arch.apic;
479 +
480 + /*
481 +- * Update ICR high and low, then emulate sending IPI,
482 +- * which is handled when writing APIC_ICR.
483 ++ * At this point, we expect that the AVIC HW has already
484 ++ * set the appropriate IRR bits on the valid target
485 ++ * vcpus. So, we just need to kick the appropriate vcpu.
486 + */
487 +- kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
488 +- kvm_lapic_reg_write(apic, APIC_ICR, icrl);
489 ++ kvm_for_each_vcpu(i, vcpu, kvm) {
490 ++ bool m = kvm_apic_match_dest(vcpu, apic,
491 ++ icrl & KVM_APIC_SHORT_MASK,
492 ++ GET_APIC_DEST_FIELD(icrh),
493 ++ icrl & KVM_APIC_DEST_MASK);
494 ++
495 ++ if (m && !avic_vcpu_is_running(vcpu))
496 ++ kvm_vcpu_wake_up(vcpu);
497 ++ }
498 + break;
499 + }
500 + case AVIC_IPI_FAILURE_INVALID_TARGET:
501 +diff --git a/crypto/testmgr.h b/crypto/testmgr.h
502 +index fbc0fab5e79e..12835f072614 100644
503 +--- a/crypto/testmgr.h
504 ++++ b/crypto/testmgr.h
505 +@@ -4660,7 +4660,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
506 + .psize = 80,
507 + .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
508 + "\x00\x00\x00\x00\x00\x00\x00\x00",
509 +- },
510 ++ }, { /* Regression test for overflow in AVX2 implementation */
511 ++ .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
512 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
513 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
514 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
515 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
516 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
517 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
518 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
519 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
520 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
521 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
522 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
523 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
524 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
525 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
526 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
527 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
528 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
529 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
530 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
531 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
532 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
533 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
534 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
535 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
536 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
537 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
538 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
539 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
540 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
541 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
542 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
543 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
544 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
545 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
546 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
547 ++ "\xff\xff\xff\xff\xff\xff\xff\xff"
548 ++ "\xff\xff\xff\xff",
549 ++ .psize = 300,
550 ++ .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
551 ++ "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
552 ++ }
553 + };
554 +
555 + /*
556 +diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
557 +index 32a8e27c5382..cc4e642d3180 100644
558 +--- a/drivers/char/tpm/tpm_i2c_atmel.c
559 ++++ b/drivers/char/tpm/tpm_i2c_atmel.c
560 +@@ -69,6 +69,10 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
561 + if (status < 0)
562 + return status;
563 +
564 ++ /* The upper layer does not support incomplete sends. */
565 ++ if (status != len)
566 ++ return -E2BIG;
567 ++
568 + return 0;
569 + }
570 +
571 +diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
572 +index 4afca3968773..e3b8bebfdd30 100644
573 +--- a/drivers/crypto/amcc/crypto4xx_alg.c
574 ++++ b/drivers/crypto/amcc/crypto4xx_alg.c
575 +@@ -138,7 +138,8 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
576 + sa = (struct dynamic_sa_ctl *) ctx->sa_in;
577 + ctx->hash_final = 0;
578 +
579 +- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
580 ++ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
581 ++ SA_SAVE_IV : SA_NOT_SAVE_IV),
582 + SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
583 + SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
584 + SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
585 +diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
586 +index 3f9eee7e555f..8d4d8db244e9 100644
587 +--- a/drivers/crypto/amcc/crypto4xx_core.c
588 ++++ b/drivers/crypto/amcc/crypto4xx_core.c
589 +@@ -645,6 +645,15 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
590 + addr = dma_map_page(dev->core_dev->device, sg_page(dst),
591 + dst->offset, dst->length, DMA_FROM_DEVICE);
592 + }
593 ++
594 ++ if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
595 ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
596 ++
597 ++ crypto4xx_memcpy_from_le32((u32 *)req->iv,
598 ++ pd_uinfo->sr_va->save_iv,
599 ++ crypto_skcipher_ivsize(skcipher));
600 ++ }
601 ++
602 + crypto4xx_ret_sg_desc(dev, pd_uinfo);
603 + if (ablk_req->base.complete != NULL)
604 + ablk_req->base.complete(&ablk_req->base, 0);
605 +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
606 +index 1d645c9ab417..cac262a912c1 100644
607 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
608 ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
609 +@@ -337,7 +337,8 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
610 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
611 + },
612 + .driver_data = (void *)&sipodev_desc
613 +- }
614 ++ },
615 ++ { } /* Terminate list */
616 + };
617 +
618 +
619 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
620 +index 784636800361..780f886ccbfe 100644
621 +--- a/drivers/iio/accel/kxcjk-1013.c
622 ++++ b/drivers/iio/accel/kxcjk-1013.c
623 +@@ -1340,6 +1340,8 @@ static int kxcjk1013_resume(struct device *dev)
624 +
625 + mutex_lock(&data->mutex);
626 + ret = kxcjk1013_set_mode(data, OPERATION);
627 ++ if (ret == 0)
628 ++ ret = kxcjk1013_set_range(data, data->range);
629 + mutex_unlock(&data->mutex);
630 +
631 + return ret;
632 +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
633 +index 22c4c17cd996..a1d072ecb717 100644
634 +--- a/drivers/iio/adc/ad_sigma_delta.c
635 ++++ b/drivers/iio/adc/ad_sigma_delta.c
636 +@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
637 + if (sigma_delta->info->has_registers) {
638 + data[0] = reg << sigma_delta->info->addr_shift;
639 + data[0] |= sigma_delta->info->read_mask;
640 ++ data[0] |= sigma_delta->comm;
641 + spi_message_add_tail(&t[0], &m);
642 + }
643 + spi_message_add_tail(&t[1], &m);
644 +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
645 +index cd686179aa92..492f6c8ba735 100644
646 +--- a/drivers/iio/adc/at91_adc.c
647 ++++ b/drivers/iio/adc/at91_adc.c
648 +@@ -705,23 +705,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
649 + ret = wait_event_interruptible_timeout(st->wq_data_avail,
650 + st->done,
651 + msecs_to_jiffies(1000));
652 +- if (ret == 0)
653 +- ret = -ETIMEDOUT;
654 +- if (ret < 0) {
655 +- mutex_unlock(&st->lock);
656 +- return ret;
657 +- }
658 +-
659 +- *val = st->last_value;
660 +
661 ++ /* Disable interrupts, regardless if adc conversion was
662 ++ * successful or not
663 ++ */
664 + at91_adc_writel(st, AT91_ADC_CHDR,
665 + AT91_ADC_CH(chan->channel));
666 + at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
667 +
668 +- st->last_value = 0;
669 +- st->done = false;
670 ++ if (ret > 0) {
671 ++ /* a valid conversion took place */
672 ++ *val = st->last_value;
673 ++ st->last_value = 0;
674 ++ st->done = false;
675 ++ ret = IIO_VAL_INT;
676 ++ } else if (ret == 0) {
677 ++ /* conversion timeout */
678 ++ dev_err(&idev->dev, "ADC Channel %d timeout.\n",
679 ++ chan->channel);
680 ++ ret = -ETIMEDOUT;
681 ++ }
682 ++
683 + mutex_unlock(&st->lock);
684 +- return IIO_VAL_INT;
685 ++ return ret;
686 +
687 + case IIO_CHAN_INFO_SCALE:
688 + *val = st->vref_mv;
689 +diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
690 +index 38e8783e4b05..287fbe08264d 100644
691 +--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
692 ++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
693 +@@ -104,9 +104,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
694 + * Do not use IIO_DEGREE_TO_RAD to avoid precision
695 + * loss. Round to the nearest integer.
696 + */
697 +- *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
698 +- *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
699 +- ret = IIO_VAL_FRACTIONAL;
700 ++ *val = 0;
701 ++ *val2 = div_s64(val64 * 3141592653ULL,
702 ++ 180 << (CROS_EC_SENSOR_BITS - 1));
703 ++ ret = IIO_VAL_INT_PLUS_NANO;
704 + break;
705 + case MOTIONSENSE_TYPE_MAG:
706 + /*
707 +diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
708 +index 6ab1f23e5a79..fe3e42defb33 100644
709 +--- a/drivers/iio/dac/mcp4725.c
710 ++++ b/drivers/iio/dac/mcp4725.c
711 +@@ -98,6 +98,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
712 +
713 + inoutbuf[0] = 0x60; /* write EEPROM */
714 + inoutbuf[0] |= data->ref_mode << 3;
715 ++ inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
716 + inoutbuf[1] = data->dac_value >> 4;
717 + inoutbuf[2] = (data->dac_value & 0xf) << 4;
718 +
719 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
720 +index 821919dd245b..b5a5517e3ce1 100644
721 +--- a/drivers/iio/gyro/bmg160_core.c
722 ++++ b/drivers/iio/gyro/bmg160_core.c
723 +@@ -583,11 +583,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
724 + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
725 + return bmg160_get_filter(data, val);
726 + case IIO_CHAN_INFO_SCALE:
727 +- *val = 0;
728 + switch (chan->type) {
729 + case IIO_TEMP:
730 +- *val2 = 500000;
731 +- return IIO_VAL_INT_PLUS_MICRO;
732 ++ *val = 500;
733 ++ return IIO_VAL_INT;
734 + case IIO_ANGL_VEL:
735 + {
736 + int i;
737 +@@ -595,6 +594,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
738 + for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
739 + if (bmg160_scale_table[i].dps_range ==
740 + data->dps_range) {
741 ++ *val = 0;
742 + *val2 = bmg160_scale_table[i].scale;
743 + return IIO_VAL_INT_PLUS_MICRO;
744 + }
745 +diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
746 +index e0d241a9aa30..a7be4670bf8f 100644
747 +--- a/drivers/iio/gyro/mpu3050-core.c
748 ++++ b/drivers/iio/gyro/mpu3050-core.c
749 +@@ -29,7 +29,8 @@
750 +
751 + #include "mpu3050.h"
752 +
753 +-#define MPU3050_CHIP_ID 0x69
754 ++#define MPU3050_CHIP_ID 0x68
755 ++#define MPU3050_CHIP_ID_MASK 0x7E
756 +
757 + /*
758 + * Register map: anything suffixed *_H is a big-endian high byte and always
759 +@@ -1178,8 +1179,9 @@ int mpu3050_common_probe(struct device *dev,
760 + goto err_power_down;
761 + }
762 +
763 +- if (val != MPU3050_CHIP_ID) {
764 +- dev_err(dev, "unsupported chip id %02x\n", (u8)val);
765 ++ if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
766 ++ dev_err(dev, "unsupported chip id %02x\n",
767 ++ (u8)(val & MPU3050_CHIP_ID_MASK));
768 + ret = -ENODEV;
769 + goto err_power_down;
770 + }
771 +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
772 +index 78482d456c3b..d50125766093 100644
773 +--- a/drivers/iio/industrialio-buffer.c
774 ++++ b/drivers/iio/industrialio-buffer.c
775 +@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
776 + const unsigned long *mask;
777 + unsigned long *trialmask;
778 +
779 +- trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
780 +- sizeof(*trialmask),
781 +- GFP_KERNEL);
782 ++ trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
783 ++ sizeof(*trialmask), GFP_KERNEL);
784 + if (trialmask == NULL)
785 + return -ENOMEM;
786 + if (!indio_dev->masklength) {
787 +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
788 +index e565fd4fc414..97b7266ee0ff 100644
789 +--- a/drivers/iio/industrialio-core.c
790 ++++ b/drivers/iio/industrialio-core.c
791 +@@ -1741,10 +1741,10 @@ EXPORT_SYMBOL(iio_device_register);
792 + **/
793 + void iio_device_unregister(struct iio_dev *indio_dev)
794 + {
795 +- mutex_lock(&indio_dev->info_exist_lock);
796 +-
797 + cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
798 +
799 ++ mutex_lock(&indio_dev->info_exist_lock);
800 ++
801 + iio_device_unregister_debugfs(indio_dev);
802 +
803 + iio_disable_all_buffers(indio_dev);
804 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
805 +index ff5c4ad37a3a..8c0b80a54e4d 100644
806 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
807 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
808 +@@ -425,7 +425,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
809 + val = readl(host->ioaddr + ESDHC_MIX_CTRL);
810 + else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
811 + /* the std tuning bits is in ACMD12_ERR for imx6sl */
812 +- val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
813 ++ val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
814 + }
815 +
816 + if (val & ESDHC_MIX_CTRL_EXE_TUNE)
817 +@@ -490,7 +490,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
818 + }
819 + writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
820 + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
821 +- u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
822 ++ u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
823 + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
824 + if (val & SDHCI_CTRL_TUNED_CLK) {
825 + v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
826 +@@ -508,7 +508,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
827 + v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
828 + }
829 +
830 +- writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
831 ++ writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
832 + writel(m, host->ioaddr + ESDHC_MIX_CTRL);
833 + }
834 + return;
835 +@@ -937,9 +937,9 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
836 + writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
837 + writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
838 + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
839 +- ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR);
840 ++ ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
841 + ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
842 +- writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR);
843 ++ writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
844 + }
845 + }
846 + }
847 +@@ -1303,7 +1303,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
848 +
849 + /* clear tuning bits in case ROM has set it already */
850 + writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
851 +- writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
852 ++ writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
853 + writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
854 + }
855 +
856 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
857 +index 0edcc2763f3c..9540fda7fc6b 100644
858 +--- a/drivers/mmc/host/sdhci.c
859 ++++ b/drivers/mmc/host/sdhci.c
860 +@@ -82,8 +82,8 @@ void sdhci_dumpregs(struct sdhci_host *host)
861 + SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
862 + sdhci_readl(host, SDHCI_INT_ENABLE),
863 + sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
864 +- SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
865 +- sdhci_readw(host, SDHCI_ACMD12_ERR),
866 ++ SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
867 ++ sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
868 + sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
869 + SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
870 + sdhci_readl(host, SDHCI_CAPABILITIES),
871 +@@ -790,6 +790,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
872 + else
873 + host->ier = (host->ier & ~dma_irqs) | pio_irqs;
874 +
875 ++ if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
876 ++ host->ier |= SDHCI_INT_AUTO_CMD_ERR;
877 ++ else
878 ++ host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
879 ++
880 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
881 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
882 + }
883 +@@ -1002,8 +1007,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
884 + return (!(host->flags & SDHCI_DEVICE_DEAD) &&
885 + ((mrq->cmd && mrq->cmd->error) ||
886 + (mrq->sbc && mrq->sbc->error) ||
887 +- (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
888 +- (mrq->data->stop && mrq->data->stop->error))) ||
889 ++ (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
890 + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
891 + }
892 +
893 +@@ -1055,6 +1059,16 @@ static void sdhci_finish_data(struct sdhci_host *host)
894 + host->data = NULL;
895 + host->data_cmd = NULL;
896 +
897 ++ /*
898 ++ * The controller needs a reset of internal state machines upon error
899 ++ * conditions.
900 ++ */
901 ++ if (data->error) {
902 ++ if (!host->cmd || host->cmd == data_cmd)
903 ++ sdhci_do_reset(host, SDHCI_RESET_CMD);
904 ++ sdhci_do_reset(host, SDHCI_RESET_DATA);
905 ++ }
906 ++
907 + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
908 + (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
909 + sdhci_adma_table_post(host, data);
910 +@@ -1079,17 +1093,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
911 + if (data->stop &&
912 + (data->error ||
913 + !data->mrq->sbc)) {
914 +-
915 +- /*
916 +- * The controller needs a reset of internal state machines
917 +- * upon error conditions.
918 +- */
919 +- if (data->error) {
920 +- if (!host->cmd || host->cmd == data_cmd)
921 +- sdhci_do_reset(host, SDHCI_RESET_CMD);
922 +- sdhci_do_reset(host, SDHCI_RESET_DATA);
923 +- }
924 +-
925 + /*
926 + * 'cap_cmd_during_tfr' request must not use the command line
927 + * after mmc_command_done() has been called. It is upper layer's
928 +@@ -2560,8 +2563,23 @@ static void sdhci_timeout_data_timer(unsigned long data)
929 + * *
930 + \*****************************************************************************/
931 +
932 +-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
933 ++static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
934 + {
935 ++ /* Handle auto-CMD12 error */
936 ++ if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
937 ++ struct mmc_request *mrq = host->data_cmd->mrq;
938 ++ u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
939 ++ int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
940 ++ SDHCI_INT_DATA_TIMEOUT :
941 ++ SDHCI_INT_DATA_CRC;
942 ++
943 ++ /* Treat auto-CMD12 error the same as data error */
944 ++ if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
945 ++ *intmask_p |= data_err_bit;
946 ++ return;
947 ++ }
948 ++ }
949 ++
950 + if (!host->cmd) {
951 + /*
952 + * SDHCI recovers from errors by resetting the cmd and data
953 +@@ -2583,20 +2601,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
954 + else
955 + host->cmd->error = -EILSEQ;
956 +
957 +- /*
958 +- * If this command initiates a data phase and a response
959 +- * CRC error is signalled, the card can start transferring
960 +- * data - the card may have received the command without
961 +- * error. We must not terminate the mmc_request early.
962 +- *
963 +- * If the card did not receive the command or returned an
964 +- * error which prevented it sending data, the data phase
965 +- * will time out.
966 +- */
967 ++ /* Treat data command CRC error the same as data CRC error */
968 + if (host->cmd->data &&
969 + (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
970 + SDHCI_INT_CRC) {
971 + host->cmd = NULL;
972 ++ *intmask_p |= SDHCI_INT_DATA_CRC;
973 + return;
974 + }
975 +
976 +@@ -2604,6 +2614,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
977 + return;
978 + }
979 +
980 ++ /* Handle auto-CMD23 error */
981 ++ if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
982 ++ struct mmc_request *mrq = host->cmd->mrq;
983 ++ u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
984 ++ int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
985 ++ -ETIMEDOUT :
986 ++ -EILSEQ;
987 ++
988 ++ if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
989 ++ mrq->sbc->error = err;
990 ++ sdhci_finish_mrq(host, mrq);
991 ++ return;
992 ++ }
993 ++ }
994 ++
995 + if (intmask & SDHCI_INT_RESPONSE)
996 + sdhci_finish_command(host);
997 + }
998 +@@ -2824,7 +2849,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
999 + }
1000 +
1001 + if (intmask & SDHCI_INT_CMD_MASK)
1002 +- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1003 ++ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
1004 +
1005 + if (intmask & SDHCI_INT_DATA_MASK)
1006 + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1007 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
1008 +index 1d7d61e25dbf..c0d5458c36d4 100644
1009 +--- a/drivers/mmc/host/sdhci.h
1010 ++++ b/drivers/mmc/host/sdhci.h
1011 +@@ -144,14 +144,15 @@
1012 + #define SDHCI_INT_DATA_CRC 0x00200000
1013 + #define SDHCI_INT_DATA_END_BIT 0x00400000
1014 + #define SDHCI_INT_BUS_POWER 0x00800000
1015 +-#define SDHCI_INT_ACMD12ERR 0x01000000
1016 ++#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
1017 + #define SDHCI_INT_ADMA_ERROR 0x02000000
1018 +
1019 + #define SDHCI_INT_NORMAL_MASK 0x00007FFF
1020 + #define SDHCI_INT_ERROR_MASK 0xFFFF8000
1021 +
1022 + #define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
1023 +- SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
1024 ++ SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
1025 ++ SDHCI_INT_AUTO_CMD_ERR)
1026 + #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
1027 + SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
1028 + SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
1029 +@@ -166,7 +167,11 @@
1030 +
1031 + #define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
1032 +
1033 +-#define SDHCI_ACMD12_ERR 0x3C
1034 ++#define SDHCI_AUTO_CMD_STATUS 0x3C
1035 ++#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002
1036 ++#define SDHCI_AUTO_CMD_CRC 0x00000004
1037 ++#define SDHCI_AUTO_CMD_END_BIT 0x00000008
1038 ++#define SDHCI_AUTO_CMD_INDEX 0x00000010
1039 +
1040 + #define SDHCI_HOST_CONTROL2 0x3E
1041 + #define SDHCI_CTRL_UHS_MASK 0x0007
1042 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1043 +index 99e60bb5fe07..1edd4ff5382c 100644
1044 +--- a/drivers/net/bonding/bond_main.c
1045 ++++ b/drivers/net/bonding/bond_main.c
1046 +@@ -3169,8 +3169,12 @@ static int bond_netdev_event(struct notifier_block *this,
1047 + return NOTIFY_DONE;
1048 +
1049 + if (event_dev->flags & IFF_MASTER) {
1050 ++ int ret;
1051 ++
1052 + netdev_dbg(event_dev, "IFF_MASTER\n");
1053 +- return bond_master_netdev_event(event, event_dev);
1054 ++ ret = bond_master_netdev_event(event, event_dev);
1055 ++ if (ret != NOTIFY_DONE)
1056 ++ return ret;
1057 + }
1058 +
1059 + if (event_dev->flags & IFF_SLAVE) {
1060 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1061 +index 59b62b49ad48..98734a37b6f6 100644
1062 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1063 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
1064 +@@ -29,6 +29,13 @@
1065 + #define DRV_NAME "thunder-nicvf"
1066 + #define DRV_VERSION "1.0"
1067 +
1068 ++/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
1069 ++ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
1070 ++ * this value, keeping headroom for the 14 byte Ethernet header and two
1071 ++ * VLAN tags (for QinQ)
1072 ++ */
1073 ++#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
1074 ++
1075 + /* Supported devices */
1076 + static const struct pci_device_id nicvf_id_table[] = {
1077 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
1078 +@@ -1454,6 +1461,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1079 + struct nicvf *nic = netdev_priv(netdev);
1080 + int orig_mtu = netdev->mtu;
1081 +
1082 ++ /* For now just support only the usual MTU sized frames,
1083 ++ * plus some headroom for VLAN, QinQ.
1084 ++ */
1085 ++ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
1086 ++ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1087 ++ netdev->mtu);
1088 ++ return -EINVAL;
1089 ++ }
1090 ++
1091 + netdev->mtu = new_mtu;
1092 +
1093 + if (!netif_running(netdev))
1094 +@@ -1702,8 +1718,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1095 + bool bpf_attached = false;
1096 + int ret = 0;
1097 +
1098 +- /* For now just support only the usual MTU sized frames */
1099 +- if (prog && (dev->mtu > 1500)) {
1100 ++ /* For now just support only the usual MTU sized frames,
1101 ++ * plus some headroom for VLAN, QinQ.
1102 ++ */
1103 ++ if (prog && dev->mtu > MAX_XDP_MTU) {
1104 + netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1105 + dev->mtu);
1106 + return -EOPNOTSUPP;
1107 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1108 +index bb96153f496e..fea141e71705 100644
1109 +--- a/drivers/net/team/team.c
1110 ++++ b/drivers/net/team/team.c
1111 +@@ -1245,6 +1245,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1112 + goto err_option_port_add;
1113 + }
1114 +
1115 ++ /* set promiscuity level to new slave */
1116 ++ if (dev->flags & IFF_PROMISC) {
1117 ++ err = dev_set_promiscuity(port_dev, 1);
1118 ++ if (err)
1119 ++ goto err_set_slave_promisc;
1120 ++ }
1121 ++
1122 ++ /* set allmulti level to new slave */
1123 ++ if (dev->flags & IFF_ALLMULTI) {
1124 ++ err = dev_set_allmulti(port_dev, 1);
1125 ++ if (err) {
1126 ++ if (dev->flags & IFF_PROMISC)
1127 ++ dev_set_promiscuity(port_dev, -1);
1128 ++ goto err_set_slave_promisc;
1129 ++ }
1130 ++ }
1131 ++
1132 + netif_addr_lock_bh(dev);
1133 + dev_uc_sync_multiple(port_dev, dev);
1134 + dev_mc_sync_multiple(port_dev, dev);
1135 +@@ -1261,6 +1278,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1136 +
1137 + return 0;
1138 +
1139 ++err_set_slave_promisc:
1140 ++ __team_option_inst_del_port(team, port);
1141 ++
1142 + err_option_port_add:
1143 + team_upper_dev_unlink(team, port);
1144 +
1145 +@@ -1306,6 +1326,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1146 +
1147 + team_port_disable(team, port);
1148 + list_del_rcu(&port->list);
1149 ++
1150 ++ if (dev->flags & IFF_PROMISC)
1151 ++ dev_set_promiscuity(port_dev, -1);
1152 ++ if (dev->flags & IFF_ALLMULTI)
1153 ++ dev_set_allmulti(port_dev, -1);
1154 ++
1155 + team_upper_dev_unlink(team, port);
1156 + netdev_rx_handler_unregister(port_dev);
1157 + team_port_disable_netpoll(port);
1158 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
1159 +index 1f38c338ca7a..2a25996d058d 100644
1160 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
1161 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
1162 +@@ -672,7 +672,6 @@ enum rt2x00_state_flags {
1163 + CONFIG_CHANNEL_HT40,
1164 + CONFIG_POWERSAVING,
1165 + CONFIG_HT_DISABLED,
1166 +- CONFIG_QOS_DISABLED,
1167 + CONFIG_MONITORING,
1168 +
1169 + /*
1170 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
1171 +index 6fe0c6abe0d6..84728c281f46 100644
1172 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
1173 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
1174 +@@ -670,18 +670,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1175 + rt2x00dev->intf_associated--;
1176 +
1177 + rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
1178 +-
1179 +- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
1180 + }
1181 +
1182 +- /*
1183 +- * Check for access point which do not support 802.11e . We have to
1184 +- * generate data frames sequence number in S/W for such AP, because
1185 +- * of H/W bug.
1186 +- */
1187 +- if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
1188 +- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
1189 +-
1190 + /*
1191 + * When the erp information has changed, we should perform
1192 + * additional configuration steps. For all other changes we are done.
1193 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1194 +index e1660b92b20c..1b0f2da8a10d 100644
1195 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1196 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1197 +@@ -200,15 +200,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
1198 + if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
1199 + /*
1200 + * rt2800 has a H/W (or F/W) bug, device incorrectly increase
1201 +- * seqno on retransmited data (non-QOS) frames. To workaround
1202 +- * the problem let's generate seqno in software if QOS is
1203 +- * disabled.
1204 ++ * seqno on retransmitted data (non-QOS) and management frames.
1205 ++ * To workaround the problem let's generate seqno in software.
1206 ++ * Except for beacons which are transmitted periodically by H/W
1207 ++ * hence hardware has to assign seqno for them.
1208 + */
1209 +- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
1210 +- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
1211 +- else
1212 ++ if (ieee80211_is_beacon(hdr->frame_control)) {
1213 ++ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
1214 + /* H/W will generate sequence number */
1215 + return;
1216 ++ }
1217 ++
1218 ++ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
1219 + }
1220 +
1221 + /*
1222 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
1223 +index 89b1f1af2fd4..31d31aad3de1 100644
1224 +--- a/drivers/scsi/libfc/fc_rport.c
1225 ++++ b/drivers/scsi/libfc/fc_rport.c
1226 +@@ -2164,7 +2164,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1227 + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1228 + fc_rport_state(rdata));
1229 +
1230 +- rdata->flags &= ~FC_RP_STARTED;
1231 + fc_rport_enter_delete(rdata, RPORT_EV_STOP);
1232 + mutex_unlock(&rdata->rp_mutex);
1233 + kref_put(&rdata->kref, fc_rport_destroy);
1234 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1235 +index 37d366696d21..c89f0e129f58 100644
1236 +--- a/drivers/scsi/scsi_lib.c
1237 ++++ b/drivers/scsi/scsi_lib.c
1238 +@@ -2050,8 +2050,12 @@ out:
1239 + blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1240 + break;
1241 + default:
1242 ++ if (unlikely(!scsi_device_online(sdev)))
1243 ++ scsi_req(req)->result = DID_NO_CONNECT << 16;
1244 ++ else
1245 ++ scsi_req(req)->result = DID_ERROR << 16;
1246 + /*
1247 +- * Make sure to release all allocated ressources when
1248 ++ * Make sure to release all allocated resources when
1249 + * we hit an error, as we will never see this command
1250 + * again.
1251 + */
1252 +diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
1253 +index 9a0a96329a55..009c5277387b 100644
1254 +--- a/drivers/staging/comedi/drivers/ni_usb6501.c
1255 ++++ b/drivers/staging/comedi/drivers/ni_usb6501.c
1256 +@@ -472,10 +472,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
1257 +
1258 + size = usb_endpoint_maxp(devpriv->ep_tx);
1259 + devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
1260 +- if (!devpriv->usb_tx_buf) {
1261 +- kfree(devpriv->usb_rx_buf);
1262 ++ if (!devpriv->usb_tx_buf)
1263 + return -ENOMEM;
1264 +- }
1265 +
1266 + return 0;
1267 + }
1268 +@@ -527,6 +525,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
1269 + if (!devpriv)
1270 + return -ENOMEM;
1271 +
1272 ++ mutex_init(&devpriv->mut);
1273 ++ usb_set_intfdata(intf, devpriv);
1274 ++
1275 + ret = ni6501_find_endpoints(dev);
1276 + if (ret)
1277 + return ret;
1278 +@@ -535,9 +536,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
1279 + if (ret)
1280 + return ret;
1281 +
1282 +- mutex_init(&devpriv->mut);
1283 +- usb_set_intfdata(intf, devpriv);
1284 +-
1285 + ret = comedi_alloc_subdevices(dev, 2);
1286 + if (ret)
1287 + return ret;
1288 +diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
1289 +index a004aed0147a..1800eb3ae017 100644
1290 +--- a/drivers/staging/comedi/drivers/vmk80xx.c
1291 ++++ b/drivers/staging/comedi/drivers/vmk80xx.c
1292 +@@ -691,10 +691,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
1293 +
1294 + size = usb_endpoint_maxp(devpriv->ep_tx);
1295 + devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
1296 +- if (!devpriv->usb_tx_buf) {
1297 +- kfree(devpriv->usb_rx_buf);
1298 ++ if (!devpriv->usb_tx_buf)
1299 + return -ENOMEM;
1300 +- }
1301 +
1302 + return 0;
1303 + }
1304 +@@ -809,6 +807,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
1305 +
1306 + devpriv->model = board->model;
1307 +
1308 ++ sema_init(&devpriv->limit_sem, 8);
1309 ++
1310 + ret = vmk80xx_find_usb_endpoints(dev);
1311 + if (ret)
1312 + return ret;
1313 +@@ -817,8 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
1314 + if (ret)
1315 + return ret;
1316 +
1317 +- sema_init(&devpriv->limit_sem, 8);
1318 +-
1319 + usb_set_intfdata(intf, devpriv);
1320 +
1321 + if (devpriv->model == VMK8055_MODEL)
1322 +diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
1323 +index 31a195d1bf05..f58c80327ba5 100644
1324 +--- a/drivers/staging/iio/adc/ad7192.c
1325 ++++ b/drivers/staging/iio/adc/ad7192.c
1326 +@@ -109,10 +109,10 @@
1327 + #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
1328 + #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
1329 +
1330 +-#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
1331 +-#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
1332 +-#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
1333 +-#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
1334 ++#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
1335 ++#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
1336 ++#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
1337 ++#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
1338 + #define AD7193_CH_TEMP 0x100 /* Temp senseor */
1339 + #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
1340 + #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
1341 +diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
1342 +index 70612da64a8b..7ae774ef9da3 100644
1343 +--- a/drivers/staging/iio/meter/ade7854.c
1344 ++++ b/drivers/staging/iio/meter/ade7854.c
1345 +@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
1346 + static IIO_DEV_ATTR_IPEAK(0644,
1347 + ade7854_read_32bit,
1348 + ade7854_write_32bit,
1349 +- ADE7854_VPEAK);
1350 ++ ADE7854_IPEAK);
1351 + static IIO_DEV_ATTR_APHCAL(0644,
1352 + ade7854_read_16bit,
1353 + ade7854_write_16bit,
1354 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1355 +index d7c22ae5c368..0e93ac888a5f 100644
1356 +--- a/drivers/vhost/vhost.c
1357 ++++ b/drivers/vhost/vhost.c
1358 +@@ -918,8 +918,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
1359 + u64 start, u64 size, u64 end,
1360 + u64 userspace_addr, int perm)
1361 + {
1362 +- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
1363 ++ struct vhost_umem_node *tmp, *node;
1364 +
1365 ++ if (!size)
1366 ++ return -EFAULT;
1367 ++
1368 ++ node = kmalloc(sizeof(*node), GFP_ATOMIC);
1369 + if (!node)
1370 + return -ENOMEM;
1371 +
1372 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1373 +index f29cdb1cdeb7..7b7ab10a9db1 100644
1374 +--- a/fs/cifs/cifsglob.h
1375 ++++ b/fs/cifs/cifsglob.h
1376 +@@ -1189,6 +1189,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
1377 + }
1378 +
1379 + struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
1380 ++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
1381 + void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
1382 +
1383 + #define CIFS_CACHE_READ_FLG 1
1384 +@@ -1693,6 +1694,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
1385 + #endif /* CONFIG_CIFS_ACL */
1386 +
1387 + void cifs_oplock_break(struct work_struct *work);
1388 ++void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
1389 +
1390 + extern const struct slow_work_ops cifs_oplock_break_ops;
1391 + extern struct workqueue_struct *cifsiod_wq;
1392 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1393 +index cd69c1e9750f..48ea9dfd5f02 100644
1394 +--- a/fs/cifs/file.c
1395 ++++ b/fs/cifs/file.c
1396 +@@ -358,12 +358,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
1397 + return cifs_file;
1398 + }
1399 +
1400 +-/*
1401 +- * Release a reference on the file private data. This may involve closing
1402 +- * the filehandle out on the server. Must be called without holding
1403 +- * tcon->open_file_lock and cifs_file->file_info_lock.
1404 ++/**
1405 ++ * cifsFileInfo_put - release a reference of file priv data
1406 ++ *
1407 ++ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
1408 + */
1409 + void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
1410 ++{
1411 ++ _cifsFileInfo_put(cifs_file, true);
1412 ++}
1413 ++
1414 ++/**
1415 ++ * _cifsFileInfo_put - release a reference of file priv data
1416 ++ *
1417 ++ * This may involve closing the filehandle @cifs_file out on the
1418 ++ * server. Must be called without holding tcon->open_file_lock and
1419 ++ * cifs_file->file_info_lock.
1420 ++ *
1421 ++ * If @wait_for_oplock_handler is true and we are releasing the last
1422 ++ * reference, wait for any running oplock break handler of the file
1423 ++ * and cancel any pending one. If calling this function from the
1424 ++ * oplock break handler, you need to pass false.
1425 ++ *
1426 ++ */
1427 ++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
1428 + {
1429 + struct inode *inode = d_inode(cifs_file->dentry);
1430 + struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
1431 +@@ -411,7 +429,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
1432 +
1433 + spin_unlock(&tcon->open_file_lock);
1434 +
1435 +- oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
1436 ++ oplock_break_cancelled = wait_oplock_handler ?
1437 ++ cancel_work_sync(&cifs_file->oplock_break) : false;
1438 +
1439 + if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
1440 + struct TCP_Server_Info *server = tcon->ses->server;
1441 +@@ -4136,6 +4155,7 @@ void cifs_oplock_break(struct work_struct *work)
1442 + cinode);
1443 + cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
1444 + }
1445 ++ _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
1446 + cifs_done_oplock_break(cinode);
1447 + }
1448 +
1449 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
1450 +index bcab30d4a6c7..76f1649ab444 100644
1451 +--- a/fs/cifs/misc.c
1452 ++++ b/fs/cifs/misc.c
1453 +@@ -486,8 +486,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
1454 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
1455 + &pCifsInode->flags);
1456 +
1457 +- queue_work(cifsoplockd_wq,
1458 +- &netfile->oplock_break);
1459 ++ cifs_queue_oplock_break(netfile);
1460 + netfile->oplock_break_cancelled = false;
1461 +
1462 + spin_unlock(&tcon->open_file_lock);
1463 +@@ -584,6 +583,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
1464 + spin_unlock(&cinode->writers_lock);
1465 + }
1466 +
1467 ++/**
1468 ++ * cifs_queue_oplock_break - queue the oplock break handler for cfile
1469 ++ *
1470 ++ * This function is called from the demultiplex thread when it
1471 ++ * receives an oplock break for @cfile.
1472 ++ *
1473 ++ * Assumes the tcon->open_file_lock is held.
1474 ++ * Assumes cfile->file_info_lock is NOT held.
1475 ++ */
1476 ++void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
1477 ++{
1478 ++ /*
1479 ++ * Bump the handle refcount now while we hold the
1480 ++ * open_file_lock to enforce the validity of it for the oplock
1481 ++ * break handler. The matching put is done at the end of the
1482 ++ * handler.
1483 ++ */
1484 ++ cifsFileInfo_get(cfile);
1485 ++
1486 ++ queue_work(cifsoplockd_wq, &cfile->oplock_break);
1487 ++}
1488 ++
1489 + void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
1490 + {
1491 + clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
1492 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
1493 +index a97a0e0b1a74..31f01f09d25a 100644
1494 +--- a/fs/cifs/smb2misc.c
1495 ++++ b/fs/cifs/smb2misc.c
1496 +@@ -517,7 +517,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
1497 + clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
1498 + &cinode->flags);
1499 +
1500 +- queue_work(cifsoplockd_wq, &cfile->oplock_break);
1501 ++ cifs_queue_oplock_break(cfile);
1502 + kfree(lw);
1503 + return true;
1504 + }
1505 +@@ -661,8 +661,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
1506 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
1507 + &cinode->flags);
1508 + spin_unlock(&cfile->file_info_lock);
1509 +- queue_work(cifsoplockd_wq,
1510 +- &cfile->oplock_break);
1511 ++
1512 ++ cifs_queue_oplock_break(cfile);
1513 +
1514 + spin_unlock(&tcon->open_file_lock);
1515 + spin_unlock(&cifs_tcp_ses_lock);
1516 +diff --git a/fs/direct-io.c b/fs/direct-io.c
1517 +index 2c90d541f527..30bf22c989de 100644
1518 +--- a/fs/direct-io.c
1519 ++++ b/fs/direct-io.c
1520 +@@ -219,6 +219,27 @@ static inline struct page *dio_get_page(struct dio *dio,
1521 + return dio->pages[sdio->head];
1522 + }
1523 +
1524 ++/*
1525 ++ * Warn about a page cache invalidation failure during a direct io write.
1526 ++ */
1527 ++void dio_warn_stale_pagecache(struct file *filp)
1528 ++{
1529 ++ static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
1530 ++ char pathname[128];
1531 ++ struct inode *inode = file_inode(filp);
1532 ++ char *path;
1533 ++
1534 ++ errseq_set(&inode->i_mapping->wb_err, -EIO);
1535 ++ if (__ratelimit(&_rs)) {
1536 ++ path = file_path(filp, pathname, sizeof(pathname));
1537 ++ if (IS_ERR(path))
1538 ++ path = "(unknown)";
1539 ++ pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
1540 ++ pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
1541 ++ current->comm);
1542 ++ }
1543 ++}
1544 ++
1545 + /**
1546 + * dio_complete() - called when all DIO BIO I/O has been completed
1547 + * @offset: the byte offset in the file of the completed operation
1548 +@@ -290,7 +311,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
1549 + err = invalidate_inode_pages2_range(dio->inode->i_mapping,
1550 + offset >> PAGE_SHIFT,
1551 + (offset + ret - 1) >> PAGE_SHIFT);
1552 +- WARN_ON_ONCE(err);
1553 ++ if (err)
1554 ++ dio_warn_stale_pagecache(dio->iocb->ki_filp);
1555 + }
1556 +
1557 + if (!(dio->flags & DIO_SKIP_DIO_COUNT))
1558 +diff --git a/fs/iomap.c b/fs/iomap.c
1559 +index 8f7673a69273..467d98bf7054 100644
1560 +--- a/fs/iomap.c
1561 ++++ b/fs/iomap.c
1562 +@@ -753,7 +753,8 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1563 + err = invalidate_inode_pages2_range(inode->i_mapping,
1564 + offset >> PAGE_SHIFT,
1565 + (offset + dio->size - 1) >> PAGE_SHIFT);
1566 +- WARN_ON_ONCE(err);
1567 ++ if (err)
1568 ++ dio_warn_stale_pagecache(iocb->ki_filp);
1569 + }
1570 +
1571 + inode_dio_end(file_inode(iocb->ki_filp));
1572 +@@ -1010,9 +1011,16 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1573 + if (ret)
1574 + goto out_free_dio;
1575 +
1576 ++ /*
1577 ++ * Try to invalidate cache pages for the range we're direct
1578 ++ * writing. If this invalidation fails, tough, the write will
1579 ++ * still work, but racing two incompatible write paths is a
1580 ++ * pretty crazy thing to do, so we don't support it 100%.
1581 ++ */
1582 + ret = invalidate_inode_pages2_range(mapping,
1583 + start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1584 +- WARN_ON_ONCE(ret);
1585 ++ if (ret)
1586 ++ dio_warn_stale_pagecache(iocb->ki_filp);
1587 + ret = 0;
1588 +
1589 + if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
1590 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1591 +index 5e63c459dc61..309d24118f9a 100644
1592 +--- a/fs/proc/task_mmu.c
1593 ++++ b/fs/proc/task_mmu.c
1594 +@@ -1160,6 +1160,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1595 + count = -EINTR;
1596 + goto out_mm;
1597 + }
1598 ++ /*
1599 ++ * Avoid to modify vma->vm_flags
1600 ++ * without locked ops while the
1601 ++ * coredump reads the vm_flags.
1602 ++ */
1603 ++ if (!mmget_still_valid(mm)) {
1604 ++ /*
1605 ++ * Silently return "count"
1606 ++ * like if get_task_mm()
1607 ++ * failed. FIXME: should this
1608 ++ * function have returned
1609 ++ * -ESRCH if get_task_mm()
1610 ++ * failed like if
1611 ++ * get_proc_task() fails?
1612 ++ */
1613 ++ up_write(&mm->mmap_sem);
1614 ++ goto out_mm;
1615 ++ }
1616 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
1617 + vma->vm_flags &= ~VM_SOFTDIRTY;
1618 + vma_set_page_prot(vma);
1619 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
1620 +index 5f10052d2671..7a908d683258 100644
1621 +--- a/fs/userfaultfd.c
1622 ++++ b/fs/userfaultfd.c
1623 +@@ -627,6 +627,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1624 +
1625 + /* the various vma->vm_userfaultfd_ctx still points to it */
1626 + down_write(&mm->mmap_sem);
1627 ++ /* no task can run (and in turn coredump) yet */
1628 ++ VM_WARN_ON(!mmget_still_valid(mm));
1629 + for (vma = mm->mmap; vma; vma = vma->vm_next)
1630 + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
1631 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1632 +@@ -867,6 +869,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1633 + * taking the mmap_sem for writing.
1634 + */
1635 + down_write(&mm->mmap_sem);
1636 ++ if (!mmget_still_valid(mm))
1637 ++ goto skip_mm;
1638 + prev = NULL;
1639 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
1640 + cond_resched();
1641 +@@ -889,6 +893,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1642 + vma->vm_flags = new_flags;
1643 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1644 + }
1645 ++skip_mm:
1646 + up_write(&mm->mmap_sem);
1647 + mmput(mm);
1648 + wakeup:
1649 +@@ -1327,6 +1332,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1650 + goto out;
1651 +
1652 + down_write(&mm->mmap_sem);
1653 ++ if (!mmget_still_valid(mm))
1654 ++ goto out_unlock;
1655 + vma = find_vma_prev(mm, start, &prev);
1656 + if (!vma)
1657 + goto out_unlock;
1658 +@@ -1514,6 +1521,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1659 + goto out;
1660 +
1661 + down_write(&mm->mmap_sem);
1662 ++ if (!mmget_still_valid(mm))
1663 ++ goto out_unlock;
1664 + vma = find_vma_prev(mm, start, &prev);
1665 + if (!vma)
1666 + goto out_unlock;
1667 +diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
1668 +index ea66f04f46f7..e4265db08e4b 100644
1669 +--- a/fs/xfs/libxfs/xfs_attr.c
1670 ++++ b/fs/xfs/libxfs/xfs_attr.c
1671 +@@ -212,6 +212,7 @@ xfs_attr_set(
1672 + int flags)
1673 + {
1674 + struct xfs_mount *mp = dp->i_mount;
1675 ++ struct xfs_buf *leaf_bp = NULL;
1676 + struct xfs_da_args args;
1677 + struct xfs_defer_ops dfops;
1678 + struct xfs_trans_res tres;
1679 +@@ -327,9 +328,16 @@ xfs_attr_set(
1680 + * GROT: another possible req'mt for a double-split btree op.
1681 + */
1682 + xfs_defer_init(args.dfops, args.firstblock);
1683 +- error = xfs_attr_shortform_to_leaf(&args);
1684 ++ error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
1685 + if (error)
1686 + goto out_defer_cancel;
1687 ++ /*
1688 ++ * Prevent the leaf buffer from being unlocked so that a
1689 ++ * concurrent AIL push cannot grab the half-baked leaf
1690 ++ * buffer and run into problems with the write verifier.
1691 ++ */
1692 ++ xfs_trans_bhold(args.trans, leaf_bp);
1693 ++ xfs_defer_bjoin(args.dfops, leaf_bp);
1694 + xfs_defer_ijoin(args.dfops, dp);
1695 + error = xfs_defer_finish(&args.trans, args.dfops);
1696 + if (error)
1697 +@@ -337,13 +345,14 @@ xfs_attr_set(
1698 +
1699 + /*
1700 + * Commit the leaf transformation. We'll need another (linked)
1701 +- * transaction to add the new attribute to the leaf.
1702 ++ * transaction to add the new attribute to the leaf, which
1703 ++ * means that we have to hold & join the leaf buffer here too.
1704 + */
1705 +-
1706 + error = xfs_trans_roll_inode(&args.trans, dp);
1707 + if (error)
1708 + goto out;
1709 +-
1710 ++ xfs_trans_bjoin(args.trans, leaf_bp);
1711 ++ leaf_bp = NULL;
1712 + }
1713 +
1714 + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
1715 +@@ -374,8 +383,9 @@ xfs_attr_set(
1716 +
1717 + out_defer_cancel:
1718 + xfs_defer_cancel(&dfops);
1719 +- args.trans = NULL;
1720 + out:
1721 ++ if (leaf_bp)
1722 ++ xfs_trans_brelse(args.trans, leaf_bp);
1723 + if (args.trans)
1724 + xfs_trans_cancel(args.trans);
1725 + xfs_iunlock(dp, XFS_ILOCK_EXCL);
1726 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
1727 +index 40e53a4fc0a6..73a541755d5b 100644
1728 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
1729 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
1730 +@@ -739,10 +739,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
1731 + }
1732 +
1733 + /*
1734 +- * Convert from using the shortform to the leaf.
1735 ++ * Convert from using the shortform to the leaf. On success, return the
1736 ++ * buffer so that we can keep it locked until we're totally done with it.
1737 + */
1738 + int
1739 +-xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
1740 ++xfs_attr_shortform_to_leaf(
1741 ++ struct xfs_da_args *args,
1742 ++ struct xfs_buf **leaf_bp)
1743 + {
1744 + xfs_inode_t *dp;
1745 + xfs_attr_shortform_t *sf;
1746 +@@ -821,7 +824,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
1747 + sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
1748 + }
1749 + error = 0;
1750 +-
1751 ++ *leaf_bp = bp;
1752 + out:
1753 + kmem_free(tmpbuffer);
1754 + return error;
1755 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
1756 +index f7dda0c237b0..894124efb421 100644
1757 +--- a/fs/xfs/libxfs/xfs_attr_leaf.h
1758 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.h
1759 +@@ -48,7 +48,8 @@ void xfs_attr_shortform_create(struct xfs_da_args *args);
1760 + void xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
1761 + int xfs_attr_shortform_lookup(struct xfs_da_args *args);
1762 + int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
1763 +-int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
1764 ++int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
1765 ++ struct xfs_buf **leaf_bp);
1766 + int xfs_attr_shortform_remove(struct xfs_da_args *args);
1767 + int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
1768 + int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
1769 +diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
1770 +index 072ebfe1d6ae..087fea02c389 100644
1771 +--- a/fs/xfs/libxfs/xfs_defer.c
1772 ++++ b/fs/xfs/libxfs/xfs_defer.c
1773 +@@ -249,6 +249,10 @@ xfs_defer_trans_roll(
1774 + for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
1775 + xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
1776 +
1777 ++ /* Hold the (previously bjoin'd) buffer locked across the roll. */
1778 ++ for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
1779 ++ xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
1780 ++
1781 + trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
1782 +
1783 + /* Roll the transaction. */
1784 +@@ -264,6 +268,12 @@ xfs_defer_trans_roll(
1785 + for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
1786 + xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
1787 +
1788 ++ /* Rejoin the buffers and dirty them so the log moves forward. */
1789 ++ for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
1790 ++ xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
1791 ++ xfs_trans_bhold(*tp, dop->dop_bufs[i]);
1792 ++ }
1793 ++
1794 + return error;
1795 + }
1796 +
1797 +@@ -295,6 +305,31 @@ xfs_defer_ijoin(
1798 + }
1799 + }
1800 +
1801 ++ ASSERT(0);
1802 ++ return -EFSCORRUPTED;
1803 ++}
1804 ++
1805 ++/*
1806 ++ * Add this buffer to the deferred op. Each joined buffer is relogged
1807 ++ * each time we roll the transaction.
1808 ++ */
1809 ++int
1810 ++xfs_defer_bjoin(
1811 ++ struct xfs_defer_ops *dop,
1812 ++ struct xfs_buf *bp)
1813 ++{
1814 ++ int i;
1815 ++
1816 ++ for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
1817 ++ if (dop->dop_bufs[i] == bp)
1818 ++ return 0;
1819 ++ else if (dop->dop_bufs[i] == NULL) {
1820 ++ dop->dop_bufs[i] = bp;
1821 ++ return 0;
1822 ++ }
1823 ++ }
1824 ++
1825 ++ ASSERT(0);
1826 + return -EFSCORRUPTED;
1827 + }
1828 +
1829 +@@ -493,9 +528,7 @@ xfs_defer_init(
1830 + struct xfs_defer_ops *dop,
1831 + xfs_fsblock_t *fbp)
1832 + {
1833 +- dop->dop_committed = false;
1834 +- dop->dop_low = false;
1835 +- memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
1836 ++ memset(dop, 0, sizeof(struct xfs_defer_ops));
1837 + *fbp = NULLFSBLOCK;
1838 + INIT_LIST_HEAD(&dop->dop_intake);
1839 + INIT_LIST_HEAD(&dop->dop_pending);
1840 +diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
1841 +index d4f046dd44bd..045beacdd37d 100644
1842 +--- a/fs/xfs/libxfs/xfs_defer.h
1843 ++++ b/fs/xfs/libxfs/xfs_defer.h
1844 +@@ -59,6 +59,7 @@ enum xfs_defer_ops_type {
1845 + };
1846 +
1847 + #define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
1848 ++#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
1849 +
1850 + struct xfs_defer_ops {
1851 + bool dop_committed; /* did any trans commit? */
1852 +@@ -66,8 +67,9 @@ struct xfs_defer_ops {
1853 + struct list_head dop_intake; /* unlogged pending work */
1854 + struct list_head dop_pending; /* logged pending work */
1855 +
1856 +- /* relog these inodes with each roll */
1857 ++ /* relog these with each roll */
1858 + struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
1859 ++ struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
1860 + };
1861 +
1862 + void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
1863 +@@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop);
1864 + void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
1865 + bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
1866 + int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
1867 ++int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
1868 +
1869 + /* Description of a deferred type. */
1870 + struct xfs_defer_op_type {
1871 +diff --git a/include/linux/fs.h b/include/linux/fs.h
1872 +index f6a577edec67..dafac283b0ff 100644
1873 +--- a/include/linux/fs.h
1874 ++++ b/include/linux/fs.h
1875 +@@ -2965,6 +2965,7 @@ enum {
1876 + };
1877 +
1878 + void dio_end_io(struct bio *bio);
1879 ++void dio_warn_stale_pagecache(struct file *filp);
1880 +
1881 + ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1882 + struct block_device *bdev, struct iov_iter *iter,
1883 +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
1884 +index bd2684700b74..520702b82134 100644
1885 +--- a/include/linux/kprobes.h
1886 ++++ b/include/linux/kprobes.h
1887 +@@ -198,6 +198,7 @@ struct kretprobe_instance {
1888 + struct kretprobe *rp;
1889 + kprobe_opcode_t *ret_addr;
1890 + struct task_struct *task;
1891 ++ void *fp;
1892 + char data[0];
1893 + };
1894 +
1895 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
1896 +index 3d49b91b674d..ef4ae0a545fe 100644
1897 +--- a/include/linux/sched/mm.h
1898 ++++ b/include/linux/sched/mm.h
1899 +@@ -57,6 +57,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
1900 + }
1901 + }
1902 +
1903 ++/*
1904 ++ * This has to be called after a get_task_mm()/mmget_not_zero()
1905 ++ * followed by taking the mmap_sem for writing before modifying the
1906 ++ * vmas or anything the coredump pretends not to change from under it.
1907 ++ *
1908 ++ * NOTE: find_extend_vma() called from GUP context is the only place
1909 ++ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
1910 ++ * for reading and outside the context of the process, so it is also
1911 ++ * the only case that holds the mmap_sem for reading that must call
1912 ++ * this function. Generally if the mmap_sem is hold for reading
1913 ++ * there's no need of this check after get_task_mm()/mmget_not_zero().
1914 ++ *
1915 ++ * This function can be obsoleted and the check can be removed, after
1916 ++ * the coredump code will hold the mmap_sem for writing before
1917 ++ * invoking the ->core_dump methods.
1918 ++ */
1919 ++static inline bool mmget_still_valid(struct mm_struct *mm)
1920 ++{
1921 ++ return likely(!mm->core_state);
1922 ++}
1923 ++
1924 + /**
1925 + * mmget() - Pin the address space associated with a &struct mm_struct.
1926 + * @mm: The address space to pin.
1927 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
1928 +index 335cf7851f12..008f64823c41 100644
1929 +--- a/include/net/inet_frag.h
1930 ++++ b/include/net/inet_frag.h
1931 +@@ -77,8 +77,8 @@ struct inet_frag_queue {
1932 + struct timer_list timer;
1933 + spinlock_t lock;
1934 + refcount_t refcnt;
1935 +- struct sk_buff *fragments; /* Used in IPv6. */
1936 +- struct rb_root rb_fragments; /* Used in IPv4. */
1937 ++ struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
1938 ++ struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
1939 + struct sk_buff *fragments_tail;
1940 + struct sk_buff *last_run_head;
1941 + ktime_t stamp;
1942 +@@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
1943 +
1944 + extern const u8 ip_frag_ecn_table[16];
1945 +
1946 ++/* Return values of inet_frag_queue_insert() */
1947 ++#define IPFRAG_OK 0
1948 ++#define IPFRAG_DUP 1
1949 ++#define IPFRAG_OVERLAP 2
1950 ++int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
1951 ++ int offset, int end);
1952 ++void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
1953 ++ struct sk_buff *parent);
1954 ++void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
1955 ++ void *reasm_data);
1956 ++struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
1957 ++
1958 + #endif
1959 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1960 +index fa87a62e9bd3..6294d20a5f0e 100644
1961 +--- a/include/net/ipv6.h
1962 ++++ b/include/net/ipv6.h
1963 +@@ -512,35 +512,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
1964 + }
1965 + #endif
1966 +
1967 +-struct inet_frag_queue;
1968 +-
1969 +-enum ip6_defrag_users {
1970 +- IP6_DEFRAG_LOCAL_DELIVER,
1971 +- IP6_DEFRAG_CONNTRACK_IN,
1972 +- __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
1973 +- IP6_DEFRAG_CONNTRACK_OUT,
1974 +- __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
1975 +- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
1976 +- __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
1977 +-};
1978 +-
1979 +-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
1980 +-extern const struct rhashtable_params ip6_rhash_params;
1981 +-
1982 +-/*
1983 +- * Equivalent of ipv4 struct ip
1984 +- */
1985 +-struct frag_queue {
1986 +- struct inet_frag_queue q;
1987 +-
1988 +- int iif;
1989 +- unsigned int csum;
1990 +- __u16 nhoffset;
1991 +- u8 ecn;
1992 +-};
1993 +-
1994 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
1995 +-
1996 + static inline bool ipv6_addr_any(const struct in6_addr *a)
1997 + {
1998 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1999 +diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
2000 +new file mode 100644
2001 +index 000000000000..28aa9b30aece
2002 +--- /dev/null
2003 ++++ b/include/net/ipv6_frag.h
2004 +@@ -0,0 +1,111 @@
2005 ++/* SPDX-License-Identifier: GPL-2.0 */
2006 ++#ifndef _IPV6_FRAG_H
2007 ++#define _IPV6_FRAG_H
2008 ++#include <linux/kernel.h>
2009 ++#include <net/addrconf.h>
2010 ++#include <net/ipv6.h>
2011 ++#include <net/inet_frag.h>
2012 ++
2013 ++enum ip6_defrag_users {
2014 ++ IP6_DEFRAG_LOCAL_DELIVER,
2015 ++ IP6_DEFRAG_CONNTRACK_IN,
2016 ++ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
2017 ++ IP6_DEFRAG_CONNTRACK_OUT,
2018 ++ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
2019 ++ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
2020 ++ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
2021 ++};
2022 ++
2023 ++/*
2024 ++ * Equivalent of ipv4 struct ip
2025 ++ */
2026 ++struct frag_queue {
2027 ++ struct inet_frag_queue q;
2028 ++
2029 ++ int iif;
2030 ++ __u16 nhoffset;
2031 ++ u8 ecn;
2032 ++};
2033 ++
2034 ++#if IS_ENABLED(CONFIG_IPV6)
2035 ++static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
2036 ++{
2037 ++ struct frag_queue *fq = container_of(q, struct frag_queue, q);
2038 ++ const struct frag_v6_compare_key *key = a;
2039 ++
2040 ++ q->key.v6 = *key;
2041 ++ fq->ecn = 0;
2042 ++}
2043 ++
2044 ++static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
2045 ++{
2046 ++ return jhash2(data,
2047 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
2048 ++}
2049 ++
2050 ++static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
2051 ++{
2052 ++ const struct inet_frag_queue *fq = data;
2053 ++
2054 ++ return jhash2((const u32 *)&fq->key.v6,
2055 ++ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
2056 ++}
2057 ++
2058 ++static inline int
2059 ++ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
2060 ++{
2061 ++ const struct frag_v6_compare_key *key = arg->key;
2062 ++ const struct inet_frag_queue *fq = ptr;
2063 ++
2064 ++ return !!memcmp(&fq->key, key, sizeof(*key));
2065 ++}
2066 ++
2067 ++static inline void
2068 ++ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
2069 ++{
2070 ++ struct net_device *dev = NULL;
2071 ++ struct sk_buff *head;
2072 ++
2073 ++ rcu_read_lock();
2074 ++ spin_lock(&fq->q.lock);
2075 ++
2076 ++ if (fq->q.flags & INET_FRAG_COMPLETE)
2077 ++ goto out;
2078 ++
2079 ++ inet_frag_kill(&fq->q);
2080 ++
2081 ++ dev = dev_get_by_index_rcu(net, fq->iif);
2082 ++ if (!dev)
2083 ++ goto out;
2084 ++
2085 ++ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
2086 ++ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
2087 ++
2088 ++ /* Don't send error if the first segment did not arrive. */
2089 ++ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
2090 ++ goto out;
2091 ++
2092 ++ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
2093 ++ * pull the head out of the tree in order to be able to
2094 ++ * deal with head->dev.
2095 ++ */
2096 ++ head = inet_frag_pull_head(&fq->q);
2097 ++ if (!head)
2098 ++ goto out;
2099 ++
2100 ++ head->dev = dev;
2101 ++ skb_get(head);
2102 ++ spin_unlock(&fq->q.lock);
2103 ++
2104 ++ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
2105 ++ kfree_skb(head);
2106 ++ goto out_rcu_unlock;
2107 ++
2108 ++out:
2109 ++ spin_unlock(&fq->q.lock);
2110 ++out_rcu_unlock:
2111 ++ rcu_read_unlock();
2112 ++ inet_frag_put(&fq->q);
2113 ++}
2114 ++#endif
2115 ++#endif
2116 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2117 +index 5cbad4fb9107..ec11bb986a8b 100644
2118 +--- a/kernel/kprobes.c
2119 ++++ b/kernel/kprobes.c
2120 +@@ -703,7 +703,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
2121 + static int reuse_unused_kprobe(struct kprobe *ap)
2122 + {
2123 + struct optimized_kprobe *op;
2124 +- int ret;
2125 +
2126 + BUG_ON(!kprobe_unused(ap));
2127 + /*
2128 +@@ -717,9 +716,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
2129 + /* Enable the probe again */
2130 + ap->flags &= ~KPROBE_FLAG_DISABLED;
2131 + /* Optimize it again (remove from op->list) */
2132 +- ret = kprobe_optready(ap);
2133 +- if (ret)
2134 +- return ret;
2135 ++ if (!kprobe_optready(ap))
2136 ++ return -EINVAL;
2137 +
2138 + optimize_kprobe(ap);
2139 + return 0;
2140 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
2141 +index e57be7031cb3..bf694c709b96 100644
2142 +--- a/kernel/locking/lockdep.c
2143 ++++ b/kernel/locking/lockdep.c
2144 +@@ -3650,9 +3650,6 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2145 + unsigned int depth;
2146 + int i;
2147 +
2148 +- if (unlikely(!debug_locks))
2149 +- return 0;
2150 +-
2151 + depth = curr->lockdep_depth;
2152 + /*
2153 + * This function is about (re)setting the class of a held lock,
2154 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2155 +index 9829ede00498..a5d163903835 100644
2156 +--- a/kernel/sched/fair.c
2157 ++++ b/kernel/sched/fair.c
2158 +@@ -4672,12 +4672,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2159 + return HRTIMER_NORESTART;
2160 + }
2161 +
2162 ++extern const u64 max_cfs_quota_period;
2163 ++
2164 + static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2165 + {
2166 + struct cfs_bandwidth *cfs_b =
2167 + container_of(timer, struct cfs_bandwidth, period_timer);
2168 + int overrun;
2169 + int idle = 0;
2170 ++ int count = 0;
2171 +
2172 + raw_spin_lock(&cfs_b->lock);
2173 + for (;;) {
2174 +@@ -4685,6 +4688,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2175 + if (!overrun)
2176 + break;
2177 +
2178 ++ if (++count > 3) {
2179 ++ u64 new, old = ktime_to_ns(cfs_b->period);
2180 ++
2181 ++ new = (old * 147) / 128; /* ~115% */
2182 ++ new = min(new, max_cfs_quota_period);
2183 ++
2184 ++ cfs_b->period = ns_to_ktime(new);
2185 ++
2186 ++ /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
2187 ++ cfs_b->quota *= new;
2188 ++ cfs_b->quota = div64_u64(cfs_b->quota, old);
2189 ++
2190 ++ pr_warn_ratelimited(
2191 ++ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
2192 ++ smp_processor_id(),
2193 ++ div_u64(new, NSEC_PER_USEC),
2194 ++ div_u64(cfs_b->quota, NSEC_PER_USEC));
2195 ++
2196 ++ /* reset count so we don't come right back in here */
2197 ++ count = 0;
2198 ++ }
2199 ++
2200 + idle = do_sched_cfs_period_timer(cfs_b, overrun);
2201 + }
2202 + if (idle)
2203 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2204 +index 34a3b8a262a9..f13601a616ad 100644
2205 +--- a/kernel/sysctl.c
2206 ++++ b/kernel/sysctl.c
2207 +@@ -124,6 +124,7 @@ static int zero;
2208 + static int __maybe_unused one = 1;
2209 + static int __maybe_unused two = 2;
2210 + static int __maybe_unused four = 4;
2211 ++static unsigned long zero_ul;
2212 + static unsigned long one_ul = 1;
2213 + static unsigned long long_max = LONG_MAX;
2214 + static int one_hundred = 100;
2215 +@@ -1682,7 +1683,7 @@ static struct ctl_table fs_table[] = {
2216 + .maxlen = sizeof(files_stat.max_files),
2217 + .mode = 0644,
2218 + .proc_handler = proc_doulongvec_minmax,
2219 +- .extra1 = &zero,
2220 ++ .extra1 = &zero_ul,
2221 + .extra2 = &long_max,
2222 + },
2223 + {
2224 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2225 +index 9937d7cf2a64..3e92852c8b23 100644
2226 +--- a/kernel/trace/ftrace.c
2227 ++++ b/kernel/trace/ftrace.c
2228 +@@ -33,6 +33,7 @@
2229 + #include <linux/list.h>
2230 + #include <linux/hash.h>
2231 + #include <linux/rcupdate.h>
2232 ++#include <linux/kprobes.h>
2233 +
2234 + #include <trace/events/sched.h>
2235 +
2236 +@@ -6035,7 +6036,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
2237 + tr->ops->func = ftrace_stub;
2238 + }
2239 +
2240 +-static inline void
2241 ++static nokprobe_inline void
2242 + __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
2243 + struct ftrace_ops *ignored, struct pt_regs *regs)
2244 + {
2245 +@@ -6098,11 +6099,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
2246 + {
2247 + __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
2248 + }
2249 ++NOKPROBE_SYMBOL(ftrace_ops_list_func);
2250 + #else
2251 + static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
2252 + {
2253 + __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2254 + }
2255 ++NOKPROBE_SYMBOL(ftrace_ops_no_ops);
2256 + #endif
2257 +
2258 + /*
2259 +@@ -6132,6 +6135,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
2260 + preempt_enable_notrace();
2261 + trace_clear_recursion(bit);
2262 + }
2263 ++NOKPROBE_SYMBOL(ftrace_ops_assist_func);
2264 +
2265 + /**
2266 + * ftrace_ops_get_func - get the function a trampoline should call
2267 +diff --git a/mm/mmap.c b/mm/mmap.c
2268 +index 00dab291e61d..59fd53b41c9c 100644
2269 +--- a/mm/mmap.c
2270 ++++ b/mm/mmap.c
2271 +@@ -45,6 +45,7 @@
2272 + #include <linux/moduleparam.h>
2273 + #include <linux/pkeys.h>
2274 + #include <linux/oom.h>
2275 ++#include <linux/sched/mm.h>
2276 +
2277 + #include <linux/uaccess.h>
2278 + #include <asm/cacheflush.h>
2279 +@@ -2448,7 +2449,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2280 + vma = find_vma_prev(mm, addr, &prev);
2281 + if (vma && (vma->vm_start <= addr))
2282 + return vma;
2283 +- if (!prev || expand_stack(prev, addr))
2284 ++ /* don't alter vm_end if the coredump is running */
2285 ++ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
2286 + return NULL;
2287 + if (prev->vm_flags & VM_LOCKED)
2288 + populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2289 +@@ -2474,6 +2476,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2290 + return vma;
2291 + if (!(vma->vm_flags & VM_GROWSDOWN))
2292 + return NULL;
2293 ++ /* don't alter vm_start if the coredump is running */
2294 ++ if (!mmget_still_valid(mm))
2295 ++ return NULL;
2296 + start = vma->vm_start;
2297 + if (expand_stack(vma, addr))
2298 + return NULL;
2299 +diff --git a/mm/percpu.c b/mm/percpu.c
2300 +index 3074148b7e0d..0c06e2f549a7 100644
2301 +--- a/mm/percpu.c
2302 ++++ b/mm/percpu.c
2303 +@@ -2507,8 +2507,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2304 + ai->groups[group].base_offset = areas[group] - base;
2305 + }
2306 +
2307 +- pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2308 +- PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2309 ++ pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2310 ++ PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2311 + ai->dyn_size, ai->unit_size);
2312 +
2313 + rc = pcpu_setup_first_chunk(ai, base);
2314 +@@ -2629,8 +2629,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
2315 + }
2316 +
2317 + /* we're ready, commit */
2318 +- pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2319 +- unit_pages, psize_str, vm.addr, ai->static_size,
2320 ++ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2321 ++ unit_pages, psize_str, ai->static_size,
2322 + ai->reserved_size, ai->dyn_size);
2323 +
2324 + rc = pcpu_setup_first_chunk(ai, vm.addr);
2325 +diff --git a/mm/vmstat.c b/mm/vmstat.c
2326 +index 6389e876c7a7..28c45c26f901 100644
2327 +--- a/mm/vmstat.c
2328 ++++ b/mm/vmstat.c
2329 +@@ -1201,13 +1201,8 @@ const char * const vmstat_text[] = {
2330 + #endif
2331 + #endif /* CONFIG_MEMORY_BALLOON */
2332 + #ifdef CONFIG_DEBUG_TLBFLUSH
2333 +-#ifdef CONFIG_SMP
2334 + "nr_tlb_remote_flush",
2335 + "nr_tlb_remote_flush_received",
2336 +-#else
2337 +- "", /* nr_tlb_remote_flush */
2338 +- "", /* nr_tlb_remote_flush_received */
2339 +-#endif /* CONFIG_SMP */
2340 + "nr_tlb_local_flush_all",
2341 + "nr_tlb_local_flush_one",
2342 + #endif /* CONFIG_DEBUG_TLBFLUSH */
2343 +diff --git a/net/atm/lec.c b/net/atm/lec.c
2344 +index 9f2365694ad4..85ce89c8a35c 100644
2345 +--- a/net/atm/lec.c
2346 ++++ b/net/atm/lec.c
2347 +@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
2348 +
2349 + static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
2350 + {
2351 +- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
2352 ++ if (arg < 0 || arg >= MAX_LEC_ITF)
2353 ++ return -EINVAL;
2354 ++ arg = array_index_nospec(arg, MAX_LEC_ITF);
2355 ++ if (!dev_lec[arg])
2356 + return -EINVAL;
2357 + vcc->proto_data = dev_lec[arg];
2358 + return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
2359 +@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
2360 + i = arg;
2361 + if (arg >= MAX_LEC_ITF)
2362 + return -EINVAL;
2363 ++ i = array_index_nospec(arg, MAX_LEC_ITF);
2364 + if (!dev_lec[i]) {
2365 + int size;
2366 +
2367 +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
2368 +index 7637f58c1226..10fa84056cb5 100644
2369 +--- a/net/bridge/br_input.c
2370 ++++ b/net/bridge/br_input.c
2371 +@@ -236,13 +236,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
2372 + /* note: already called with rcu_read_lock */
2373 + static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2374 + {
2375 +- struct net_bridge_port *p = br_port_get_rcu(skb->dev);
2376 +-
2377 + __br_handle_local_finish(skb);
2378 +
2379 +- BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
2380 +- br_pass_frame_up(skb);
2381 +- return 0;
2382 ++ /* return 1 to signal the okfn() was called so it's ok to use the skb */
2383 ++ return 1;
2384 + }
2385 +
2386 + /*
2387 +@@ -318,10 +315,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
2388 + goto forward;
2389 + }
2390 +
2391 +- /* Deliver packet to local host only */
2392 +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
2393 +- NULL, skb, skb->dev, NULL, br_handle_local_finish);
2394 +- return RX_HANDLER_CONSUMED;
2395 ++ /* The else clause should be hit when nf_hook():
2396 ++ * - returns < 0 (drop/error)
2397 ++ * - returns = 0 (stolen/nf_queue)
2398 ++ * Thus return 1 from the okfn() to signal the skb is ok to pass
2399 ++ */
2400 ++ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
2401 ++ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
2402 ++ br_handle_local_finish) == 1) {
2403 ++ return RX_HANDLER_PASS;
2404 ++ } else {
2405 ++ return RX_HANDLER_CONSUMED;
2406 ++ }
2407 + }
2408 +
2409 + forward:
2410 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2411 +index 8dc5c8d69bcd..e83048cb53ce 100644
2412 +--- a/net/bridge/br_multicast.c
2413 ++++ b/net/bridge/br_multicast.c
2414 +@@ -2119,7 +2119,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
2415 +
2416 + __br_multicast_open(br, query);
2417 +
2418 +- list_for_each_entry(port, &br->port_list, list) {
2419 ++ rcu_read_lock();
2420 ++ list_for_each_entry_rcu(port, &br->port_list, list) {
2421 + if (port->state == BR_STATE_DISABLED ||
2422 + port->state == BR_STATE_BLOCKING)
2423 + continue;
2424 +@@ -2131,6 +2132,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
2425 + br_multicast_enable(&port->ip6_own_query);
2426 + #endif
2427 + }
2428 ++ rcu_read_unlock();
2429 + }
2430 +
2431 + int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2432 +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
2433 +index 2cc224106b69..ec7a5da56129 100644
2434 +--- a/net/ieee802154/6lowpan/reassembly.c
2435 ++++ b/net/ieee802154/6lowpan/reassembly.c
2436 +@@ -25,7 +25,7 @@
2437 +
2438 + #include <net/ieee802154_netdev.h>
2439 + #include <net/6lowpan.h>
2440 +-#include <net/ipv6.h>
2441 ++#include <net/ipv6_frag.h>
2442 + #include <net/inet_frag.h>
2443 +
2444 + #include "6lowpan_i.h"
2445 +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
2446 +index c9ec1603666b..665f11d7388e 100644
2447 +--- a/net/ipv4/fou.c
2448 ++++ b/net/ipv4/fou.c
2449 +@@ -120,6 +120,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
2450 + struct guehdr *guehdr;
2451 + void *data;
2452 + u16 doffset = 0;
2453 ++ u8 proto_ctype;
2454 +
2455 + if (!fou)
2456 + return 1;
2457 +@@ -211,13 +212,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
2458 + if (unlikely(guehdr->control))
2459 + return gue_control_message(skb, guehdr);
2460 +
2461 ++ proto_ctype = guehdr->proto_ctype;
2462 + __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
2463 + skb_reset_transport_header(skb);
2464 +
2465 + if (iptunnel_pull_offloads(skb))
2466 + goto drop;
2467 +
2468 +- return -guehdr->proto_ctype;
2469 ++ return -proto_ctype;
2470 +
2471 + drop:
2472 + kfree_skb(skb);
2473 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2474 +index 6ffee9d2b0e5..481cded81b2d 100644
2475 +--- a/net/ipv4/inet_fragment.c
2476 ++++ b/net/ipv4/inet_fragment.c
2477 +@@ -24,6 +24,62 @@
2478 + #include <net/sock.h>
2479 + #include <net/inet_frag.h>
2480 + #include <net/inet_ecn.h>
2481 ++#include <net/ip.h>
2482 ++#include <net/ipv6.h>
2483 ++
2484 ++/* Use skb->cb to track consecutive/adjacent fragments coming at
2485 ++ * the end of the queue. Nodes in the rb-tree queue will
2486 ++ * contain "runs" of one or more adjacent fragments.
2487 ++ *
2488 ++ * Invariants:
2489 ++ * - next_frag is NULL at the tail of a "run";
2490 ++ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
2491 ++ */
2492 ++struct ipfrag_skb_cb {
2493 ++ union {
2494 ++ struct inet_skb_parm h4;
2495 ++ struct inet6_skb_parm h6;
2496 ++ };
2497 ++ struct sk_buff *next_frag;
2498 ++ int frag_run_len;
2499 ++};
2500 ++
2501 ++#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
2502 ++
2503 ++static void fragcb_clear(struct sk_buff *skb)
2504 ++{
2505 ++ RB_CLEAR_NODE(&skb->rbnode);
2506 ++ FRAG_CB(skb)->next_frag = NULL;
2507 ++ FRAG_CB(skb)->frag_run_len = skb->len;
2508 ++}
2509 ++
2510 ++/* Append skb to the last "run". */
2511 ++static void fragrun_append_to_last(struct inet_frag_queue *q,
2512 ++ struct sk_buff *skb)
2513 ++{
2514 ++ fragcb_clear(skb);
2515 ++
2516 ++ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
2517 ++ FRAG_CB(q->fragments_tail)->next_frag = skb;
2518 ++ q->fragments_tail = skb;
2519 ++}
2520 ++
2521 ++/* Create a new "run" with the skb. */
2522 ++static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
2523 ++{
2524 ++ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
2525 ++ fragcb_clear(skb);
2526 ++
2527 ++ if (q->last_run_head)
2528 ++ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
2529 ++ &q->last_run_head->rbnode.rb_right);
2530 ++ else
2531 ++ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
2532 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
2533 ++
2534 ++ q->fragments_tail = skb;
2535 ++ q->last_run_head = skb;
2536 ++}
2537 +
2538 + /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
2539 + * Value : 0xff if frame should be dropped.
2540 +@@ -122,6 +178,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
2541 + kmem_cache_free(f->frags_cachep, q);
2542 + }
2543 +
2544 ++unsigned int inet_frag_rbtree_purge(struct rb_root *root)
2545 ++{
2546 ++ struct rb_node *p = rb_first(root);
2547 ++ unsigned int sum = 0;
2548 ++
2549 ++ while (p) {
2550 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
2551 ++
2552 ++ p = rb_next(p);
2553 ++ rb_erase(&skb->rbnode, root);
2554 ++ while (skb) {
2555 ++ struct sk_buff *next = FRAG_CB(skb)->next_frag;
2556 ++
2557 ++ sum += skb->truesize;
2558 ++ kfree_skb(skb);
2559 ++ skb = next;
2560 ++ }
2561 ++ }
2562 ++ return sum;
2563 ++}
2564 ++EXPORT_SYMBOL(inet_frag_rbtree_purge);
2565 ++
2566 + void inet_frag_destroy(struct inet_frag_queue *q)
2567 + {
2568 + struct sk_buff *fp;
2569 +@@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
2570 + return fq;
2571 + }
2572 + EXPORT_SYMBOL(inet_frag_find);
2573 ++
2574 ++int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
2575 ++ int offset, int end)
2576 ++{
2577 ++ struct sk_buff *last = q->fragments_tail;
2578 ++
2579 ++ /* RFC5722, Section 4, amended by Errata ID : 3089
2580 ++ * When reassembling an IPv6 datagram, if
2581 ++ * one or more its constituent fragments is determined to be an
2582 ++ * overlapping fragment, the entire datagram (and any constituent
2583 ++ * fragments) MUST be silently discarded.
2584 ++ *
2585 ++ * Duplicates, however, should be ignored (i.e. skb dropped, but the
2586 ++ * queue/fragments kept for later reassembly).
2587 ++ */
2588 ++ if (!last)
2589 ++ fragrun_create(q, skb); /* First fragment. */
2590 ++ else if (last->ip_defrag_offset + last->len < end) {
2591 ++ /* This is the common case: skb goes to the end. */
2592 ++ /* Detect and discard overlaps. */
2593 ++ if (offset < last->ip_defrag_offset + last->len)
2594 ++ return IPFRAG_OVERLAP;
2595 ++ if (offset == last->ip_defrag_offset + last->len)
2596 ++ fragrun_append_to_last(q, skb);
2597 ++ else
2598 ++ fragrun_create(q, skb);
2599 ++ } else {
2600 ++ /* Binary search. Note that skb can become the first fragment,
2601 ++ * but not the last (covered above).
2602 ++ */
2603 ++ struct rb_node **rbn, *parent;
2604 ++
2605 ++ rbn = &q->rb_fragments.rb_node;
2606 ++ do {
2607 ++ struct sk_buff *curr;
2608 ++ int curr_run_end;
2609 ++
2610 ++ parent = *rbn;
2611 ++ curr = rb_to_skb(parent);
2612 ++ curr_run_end = curr->ip_defrag_offset +
2613 ++ FRAG_CB(curr)->frag_run_len;
2614 ++ if (end <= curr->ip_defrag_offset)
2615 ++ rbn = &parent->rb_left;
2616 ++ else if (offset >= curr_run_end)
2617 ++ rbn = &parent->rb_right;
2618 ++ else if (offset >= curr->ip_defrag_offset &&
2619 ++ end <= curr_run_end)
2620 ++ return IPFRAG_DUP;
2621 ++ else
2622 ++ return IPFRAG_OVERLAP;
2623 ++ } while (*rbn);
2624 ++ /* Here we have parent properly set, and rbn pointing to
2625 ++ * one of its NULL left/right children. Insert skb.
2626 ++ */
2627 ++ fragcb_clear(skb);
2628 ++ rb_link_node(&skb->rbnode, parent, rbn);
2629 ++ rb_insert_color(&skb->rbnode, &q->rb_fragments);
2630 ++ }
2631 ++
2632 ++ skb->ip_defrag_offset = offset;
2633 ++
2634 ++ return IPFRAG_OK;
2635 ++}
2636 ++EXPORT_SYMBOL(inet_frag_queue_insert);
2637 ++
2638 ++void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
2639 ++ struct sk_buff *parent)
2640 ++{
2641 ++ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
2642 ++ struct sk_buff **nextp;
2643 ++ int delta;
2644 ++
2645 ++ if (head != skb) {
2646 ++ fp = skb_clone(skb, GFP_ATOMIC);
2647 ++ if (!fp)
2648 ++ return NULL;
2649 ++ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
2650 ++ if (RB_EMPTY_NODE(&skb->rbnode))
2651 ++ FRAG_CB(parent)->next_frag = fp;
2652 ++ else
2653 ++ rb_replace_node(&skb->rbnode, &fp->rbnode,
2654 ++ &q->rb_fragments);
2655 ++ if (q->fragments_tail == skb)
2656 ++ q->fragments_tail = fp;
2657 ++ skb_morph(skb, head);
2658 ++ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
2659 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
2660 ++ &q->rb_fragments);
2661 ++ consume_skb(head);
2662 ++ head = skb;
2663 ++ }
2664 ++ WARN_ON(head->ip_defrag_offset != 0);
2665 ++
2666 ++ delta = -head->truesize;
2667 ++
2668 ++ /* Head of list must not be cloned. */
2669 ++ if (skb_unclone(head, GFP_ATOMIC))
2670 ++ return NULL;
2671 ++
2672 ++ delta += head->truesize;
2673 ++ if (delta)
2674 ++ add_frag_mem_limit(q->net, delta);
2675 ++
2676 ++ /* If the first fragment is fragmented itself, we split
2677 ++ * it to two chunks: the first with data and paged part
2678 ++ * and the second, holding only fragments.
2679 ++ */
2680 ++ if (skb_has_frag_list(head)) {
2681 ++ struct sk_buff *clone;
2682 ++ int i, plen = 0;
2683 ++
2684 ++ clone = alloc_skb(0, GFP_ATOMIC);
2685 ++ if (!clone)
2686 ++ return NULL;
2687 ++ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
2688 ++ skb_frag_list_init(head);
2689 ++ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
2690 ++ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
2691 ++ clone->data_len = head->data_len - plen;
2692 ++ clone->len = clone->data_len;
2693 ++ head->truesize += clone->truesize;
2694 ++ clone->csum = 0;
2695 ++ clone->ip_summed = head->ip_summed;
2696 ++ add_frag_mem_limit(q->net, clone->truesize);
2697 ++ skb_shinfo(head)->frag_list = clone;
2698 ++ nextp = &clone->next;
2699 ++ } else {
2700 ++ nextp = &skb_shinfo(head)->frag_list;
2701 ++ }
2702 ++
2703 ++ return nextp;
2704 ++}
2705 ++EXPORT_SYMBOL(inet_frag_reasm_prepare);
2706 ++
2707 ++void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
2708 ++ void *reasm_data)
2709 ++{
2710 ++ struct sk_buff **nextp = (struct sk_buff **)reasm_data;
2711 ++ struct rb_node *rbn;
2712 ++ struct sk_buff *fp;
2713 ++
2714 ++ skb_push(head, head->data - skb_network_header(head));
2715 ++
2716 ++ /* Traverse the tree in order, to build frag_list. */
2717 ++ fp = FRAG_CB(head)->next_frag;
2718 ++ rbn = rb_next(&head->rbnode);
2719 ++ rb_erase(&head->rbnode, &q->rb_fragments);
2720 ++ while (rbn || fp) {
2721 ++ /* fp points to the next sk_buff in the current run;
2722 ++ * rbn points to the next run.
2723 ++ */
2724 ++ /* Go through the current run. */
2725 ++ while (fp) {
2726 ++ *nextp = fp;
2727 ++ nextp = &fp->next;
2728 ++ fp->prev = NULL;
2729 ++ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
2730 ++ fp->sk = NULL;
2731 ++ head->data_len += fp->len;
2732 ++ head->len += fp->len;
2733 ++ if (head->ip_summed != fp->ip_summed)
2734 ++ head->ip_summed = CHECKSUM_NONE;
2735 ++ else if (head->ip_summed == CHECKSUM_COMPLETE)
2736 ++ head->csum = csum_add(head->csum, fp->csum);
2737 ++ head->truesize += fp->truesize;
2738 ++ fp = FRAG_CB(fp)->next_frag;
2739 ++ }
2740 ++ /* Move to the next run. */
2741 ++ if (rbn) {
2742 ++ struct rb_node *rbnext = rb_next(rbn);
2743 ++
2744 ++ fp = rb_to_skb(rbn);
2745 ++ rb_erase(rbn, &q->rb_fragments);
2746 ++ rbn = rbnext;
2747 ++ }
2748 ++ }
2749 ++ sub_frag_mem_limit(q->net, head->truesize);
2750 ++
2751 ++ *nextp = NULL;
2752 ++ head->next = NULL;
2753 ++ head->prev = NULL;
2754 ++ head->tstamp = q->stamp;
2755 ++}
2756 ++EXPORT_SYMBOL(inet_frag_reasm_finish);
2757 ++
2758 ++struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
2759 ++{
2760 ++ struct sk_buff *head;
2761 ++
2762 ++ if (q->fragments) {
2763 ++ head = q->fragments;
2764 ++ q->fragments = head->next;
2765 ++ } else {
2766 ++ struct sk_buff *skb;
2767 ++
2768 ++ head = skb_rb_first(&q->rb_fragments);
2769 ++ if (!head)
2770 ++ return NULL;
2771 ++ skb = FRAG_CB(head)->next_frag;
2772 ++ if (skb)
2773 ++ rb_replace_node(&head->rbnode, &skb->rbnode,
2774 ++ &q->rb_fragments);
2775 ++ else
2776 ++ rb_erase(&head->rbnode, &q->rb_fragments);
2777 ++ memset(&head->rbnode, 0, sizeof(head->rbnode));
2778 ++ barrier();
2779 ++ }
2780 ++ if (head == q->fragments_tail)
2781 ++ q->fragments_tail = NULL;
2782 ++
2783 ++ sub_frag_mem_limit(q->net, head->truesize);
2784 ++
2785 ++ return head;
2786 ++}
2787 ++EXPORT_SYMBOL(inet_frag_pull_head);
2788 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2789 +index d95b32af4a0e..5a1d39e32196 100644
2790 +--- a/net/ipv4/ip_fragment.c
2791 ++++ b/net/ipv4/ip_fragment.c
2792 +@@ -57,57 +57,6 @@
2793 + */
2794 + static const char ip_frag_cache_name[] = "ip4-frags";
2795 +
2796 +-/* Use skb->cb to track consecutive/adjacent fragments coming at
2797 +- * the end of the queue. Nodes in the rb-tree queue will
2798 +- * contain "runs" of one or more adjacent fragments.
2799 +- *
2800 +- * Invariants:
2801 +- * - next_frag is NULL at the tail of a "run";
2802 +- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
2803 +- */
2804 +-struct ipfrag_skb_cb {
2805 +- struct inet_skb_parm h;
2806 +- struct sk_buff *next_frag;
2807 +- int frag_run_len;
2808 +-};
2809 +-
2810 +-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
2811 +-
2812 +-static void ip4_frag_init_run(struct sk_buff *skb)
2813 +-{
2814 +- BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
2815 +-
2816 +- FRAG_CB(skb)->next_frag = NULL;
2817 +- FRAG_CB(skb)->frag_run_len = skb->len;
2818 +-}
2819 +-
2820 +-/* Append skb to the last "run". */
2821 +-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
2822 +- struct sk_buff *skb)
2823 +-{
2824 +- RB_CLEAR_NODE(&skb->rbnode);
2825 +- FRAG_CB(skb)->next_frag = NULL;
2826 +-
2827 +- FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
2828 +- FRAG_CB(q->fragments_tail)->next_frag = skb;
2829 +- q->fragments_tail = skb;
2830 +-}
2831 +-
2832 +-/* Create a new "run" with the skb. */
2833 +-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
2834 +-{
2835 +- if (q->last_run_head)
2836 +- rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
2837 +- &q->last_run_head->rbnode.rb_right);
2838 +- else
2839 +- rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
2840 +- rb_insert_color(&skb->rbnode, &q->rb_fragments);
2841 +-
2842 +- ip4_frag_init_run(skb);
2843 +- q->fragments_tail = skb;
2844 +- q->last_run_head = skb;
2845 +-}
2846 +-
2847 + /* Describe an entry in the "incomplete datagrams" queue. */
2848 + struct ipq {
2849 + struct inet_frag_queue q;
2850 +@@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
2851 + * pull the head out of the tree in order to be able to
2852 + * deal with head->dev.
2853 + */
2854 +- if (qp->q.fragments) {
2855 +- head = qp->q.fragments;
2856 +- qp->q.fragments = head->next;
2857 +- } else {
2858 +- head = skb_rb_first(&qp->q.rb_fragments);
2859 +- if (!head)
2860 +- goto out;
2861 +- if (FRAG_CB(head)->next_frag)
2862 +- rb_replace_node(&head->rbnode,
2863 +- &FRAG_CB(head)->next_frag->rbnode,
2864 +- &qp->q.rb_fragments);
2865 +- else
2866 +- rb_erase(&head->rbnode, &qp->q.rb_fragments);
2867 +- memset(&head->rbnode, 0, sizeof(head->rbnode));
2868 +- barrier();
2869 +- }
2870 +- if (head == qp->q.fragments_tail)
2871 +- qp->q.fragments_tail = NULL;
2872 +-
2873 +- sub_frag_mem_limit(qp->q.net, head->truesize);
2874 +-
2875 ++ head = inet_frag_pull_head(&qp->q);
2876 ++ if (!head)
2877 ++ goto out;
2878 + head->dev = dev_get_by_index_rcu(net, qp->iif);
2879 + if (!head->dev)
2880 + goto out;
2881 +@@ -345,12 +276,10 @@ static int ip_frag_reinit(struct ipq *qp)
2882 + static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
2883 + {
2884 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
2885 +- struct rb_node **rbn, *parent;
2886 +- struct sk_buff *skb1, *prev_tail;
2887 +- int ihl, end, skb1_run_end;
2888 ++ int ihl, end, flags, offset;
2889 ++ struct sk_buff *prev_tail;
2890 + struct net_device *dev;
2891 + unsigned int fragsize;
2892 +- int flags, offset;
2893 + int err = -ENOENT;
2894 + u8 ecn;
2895 +
2896 +@@ -382,7 +311,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
2897 + */
2898 + if (end < qp->q.len ||
2899 + ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
2900 +- goto err;
2901 ++ goto discard_qp;
2902 + qp->q.flags |= INET_FRAG_LAST_IN;
2903 + qp->q.len = end;
2904 + } else {
2905 +@@ -394,82 +323,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
2906 + if (end > qp->q.len) {
2907 + /* Some bits beyond end -> corruption. */
2908 + if (qp->q.flags & INET_FRAG_LAST_IN)
2909 +- goto err;
2910 ++ goto discard_qp;
2911 + qp->q.len = end;
2912 + }
2913 + }
2914 + if (end == offset)
2915 +- goto err;
2916 ++ goto discard_qp;
2917 +
2918 + err = -ENOMEM;
2919 + if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
2920 +- goto err;
2921 ++ goto discard_qp;
2922 +
2923 + err = pskb_trim_rcsum(skb, end - offset);
2924 + if (err)
2925 +- goto err;
2926 ++ goto discard_qp;
2927 +
2928 + /* Note : skb->rbnode and skb->dev share the same location. */
2929 + dev = skb->dev;
2930 + /* Makes sure compiler wont do silly aliasing games */
2931 + barrier();
2932 +
2933 +- /* RFC5722, Section 4, amended by Errata ID : 3089
2934 +- * When reassembling an IPv6 datagram, if
2935 +- * one or more its constituent fragments is determined to be an
2936 +- * overlapping fragment, the entire datagram (and any constituent
2937 +- * fragments) MUST be silently discarded.
2938 +- *
2939 +- * We do the same here for IPv4 (and increment an snmp counter) but
2940 +- * we do not want to drop the whole queue in response to a duplicate
2941 +- * fragment.
2942 +- */
2943 +-
2944 +- err = -EINVAL;
2945 +- /* Find out where to put this fragment. */
2946 + prev_tail = qp->q.fragments_tail;
2947 +- if (!prev_tail)
2948 +- ip4_frag_create_run(&qp->q, skb); /* First fragment. */
2949 +- else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
2950 +- /* This is the common case: skb goes to the end. */
2951 +- /* Detect and discard overlaps. */
2952 +- if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
2953 +- goto discard_qp;
2954 +- if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
2955 +- ip4_frag_append_to_last_run(&qp->q, skb);
2956 +- else
2957 +- ip4_frag_create_run(&qp->q, skb);
2958 +- } else {
2959 +- /* Binary search. Note that skb can become the first fragment,
2960 +- * but not the last (covered above).
2961 +- */
2962 +- rbn = &qp->q.rb_fragments.rb_node;
2963 +- do {
2964 +- parent = *rbn;
2965 +- skb1 = rb_to_skb(parent);
2966 +- skb1_run_end = skb1->ip_defrag_offset +
2967 +- FRAG_CB(skb1)->frag_run_len;
2968 +- if (end <= skb1->ip_defrag_offset)
2969 +- rbn = &parent->rb_left;
2970 +- else if (offset >= skb1_run_end)
2971 +- rbn = &parent->rb_right;
2972 +- else if (offset >= skb1->ip_defrag_offset &&
2973 +- end <= skb1_run_end)
2974 +- goto err; /* No new data, potential duplicate */
2975 +- else
2976 +- goto discard_qp; /* Found an overlap */
2977 +- } while (*rbn);
2978 +- /* Here we have parent properly set, and rbn pointing to
2979 +- * one of its NULL left/right children. Insert skb.
2980 +- */
2981 +- ip4_frag_init_run(skb);
2982 +- rb_link_node(&skb->rbnode, parent, rbn);
2983 +- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
2984 +- }
2985 ++ err = inet_frag_queue_insert(&qp->q, skb, offset, end);
2986 ++ if (err)
2987 ++ goto insert_error;
2988 +
2989 + if (dev)
2990 + qp->iif = dev->ifindex;
2991 +- skb->ip_defrag_offset = offset;
2992 +
2993 + qp->q.stamp = skb->tstamp;
2994 + qp->q.meat += skb->len;
2995 +@@ -494,15 +374,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
2996 + skb->_skb_refdst = 0UL;
2997 + err = ip_frag_reasm(qp, skb, prev_tail, dev);
2998 + skb->_skb_refdst = orefdst;
2999 ++ if (err)
3000 ++ inet_frag_kill(&qp->q);
3001 + return err;
3002 + }
3003 +
3004 + skb_dst_drop(skb);
3005 + return -EINPROGRESS;
3006 +
3007 ++insert_error:
3008 ++ if (err == IPFRAG_DUP) {
3009 ++ kfree_skb(skb);
3010 ++ return -EINVAL;
3011 ++ }
3012 ++ err = -EINVAL;
3013 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
3014 + discard_qp:
3015 + inet_frag_kill(&qp->q);
3016 +- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
3017 ++ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
3018 + err:
3019 + kfree_skb(skb);
3020 + return err;
3021 +@@ -514,13 +403,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3022 + {
3023 + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
3024 + struct iphdr *iph;
3025 +- struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
3026 +- struct sk_buff **nextp; /* To build frag_list. */
3027 +- struct rb_node *rbn;
3028 +- int len;
3029 +- int ihlen;
3030 +- int delta;
3031 +- int err;
3032 ++ void *reasm_data;
3033 ++ int len, err;
3034 + u8 ecn;
3035 +
3036 + ipq_kill(qp);
3037 +@@ -530,117 +414,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3038 + err = -EINVAL;
3039 + goto out_fail;
3040 + }
3041 +- /* Make the one we just received the head. */
3042 +- if (head != skb) {
3043 +- fp = skb_clone(skb, GFP_ATOMIC);
3044 +- if (!fp)
3045 +- goto out_nomem;
3046 +- FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
3047 +- if (RB_EMPTY_NODE(&skb->rbnode))
3048 +- FRAG_CB(prev_tail)->next_frag = fp;
3049 +- else
3050 +- rb_replace_node(&skb->rbnode, &fp->rbnode,
3051 +- &qp->q.rb_fragments);
3052 +- if (qp->q.fragments_tail == skb)
3053 +- qp->q.fragments_tail = fp;
3054 +- skb_morph(skb, head);
3055 +- FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
3056 +- rb_replace_node(&head->rbnode, &skb->rbnode,
3057 +- &qp->q.rb_fragments);
3058 +- consume_skb(head);
3059 +- head = skb;
3060 +- }
3061 +
3062 +- WARN_ON(head->ip_defrag_offset != 0);
3063 +-
3064 +- /* Allocate a new buffer for the datagram. */
3065 +- ihlen = ip_hdrlen(head);
3066 +- len = ihlen + qp->q.len;
3067 ++ /* Make the one we just received the head. */
3068 ++ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
3069 ++ if (!reasm_data)
3070 ++ goto out_nomem;
3071 +
3072 ++ len = ip_hdrlen(skb) + qp->q.len;
3073 + err = -E2BIG;
3074 + if (len > 65535)
3075 + goto out_oversize;
3076 +
3077 +- delta = - head->truesize;
3078 +-
3079 +- /* Head of list must not be cloned. */
3080 +- if (skb_unclone(head, GFP_ATOMIC))
3081 +- goto out_nomem;
3082 +-
3083 +- delta += head->truesize;
3084 +- if (delta)
3085 +- add_frag_mem_limit(qp->q.net, delta);
3086 +-
3087 +- /* If the first fragment is fragmented itself, we split
3088 +- * it to two chunks: the first with data and paged part
3089 +- * and the second, holding only fragments. */
3090 +- if (skb_has_frag_list(head)) {
3091 +- struct sk_buff *clone;
3092 +- int i, plen = 0;
3093 +-
3094 +- clone = alloc_skb(0, GFP_ATOMIC);
3095 +- if (!clone)
3096 +- goto out_nomem;
3097 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
3098 +- skb_frag_list_init(head);
3099 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
3100 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
3101 +- clone->len = clone->data_len = head->data_len - plen;
3102 +- head->truesize += clone->truesize;
3103 +- clone->csum = 0;
3104 +- clone->ip_summed = head->ip_summed;
3105 +- add_frag_mem_limit(qp->q.net, clone->truesize);
3106 +- skb_shinfo(head)->frag_list = clone;
3107 +- nextp = &clone->next;
3108 +- } else {
3109 +- nextp = &skb_shinfo(head)->frag_list;
3110 +- }
3111 ++ inet_frag_reasm_finish(&qp->q, skb, reasm_data);
3112 +
3113 +- skb_push(head, head->data - skb_network_header(head));
3114 ++ skb->dev = dev;
3115 ++ IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
3116 +
3117 +- /* Traverse the tree in order, to build frag_list. */
3118 +- fp = FRAG_CB(head)->next_frag;
3119 +- rbn = rb_next(&head->rbnode);
3120 +- rb_erase(&head->rbnode, &qp->q.rb_fragments);
3121 +- while (rbn || fp) {
3122 +- /* fp points to the next sk_buff in the current run;
3123 +- * rbn points to the next run.
3124 +- */
3125 +- /* Go through the current run. */
3126 +- while (fp) {
3127 +- *nextp = fp;
3128 +- nextp = &fp->next;
3129 +- fp->prev = NULL;
3130 +- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
3131 +- fp->sk = NULL;
3132 +- head->data_len += fp->len;
3133 +- head->len += fp->len;
3134 +- if (head->ip_summed != fp->ip_summed)
3135 +- head->ip_summed = CHECKSUM_NONE;
3136 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
3137 +- head->csum = csum_add(head->csum, fp->csum);
3138 +- head->truesize += fp->truesize;
3139 +- fp = FRAG_CB(fp)->next_frag;
3140 +- }
3141 +- /* Move to the next run. */
3142 +- if (rbn) {
3143 +- struct rb_node *rbnext = rb_next(rbn);
3144 +-
3145 +- fp = rb_to_skb(rbn);
3146 +- rb_erase(rbn, &qp->q.rb_fragments);
3147 +- rbn = rbnext;
3148 +- }
3149 +- }
3150 +- sub_frag_mem_limit(qp->q.net, head->truesize);
3151 +-
3152 +- *nextp = NULL;
3153 +- head->next = NULL;
3154 +- head->prev = NULL;
3155 +- head->dev = dev;
3156 +- head->tstamp = qp->q.stamp;
3157 +- IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
3158 +-
3159 +- iph = ip_hdr(head);
3160 ++ iph = ip_hdr(skb);
3161 + iph->tot_len = htons(len);
3162 + iph->tos |= ecn;
3163 +
3164 +@@ -653,7 +443,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
3165 + * from one very small df-fragment and one large non-df frag.
3166 + */
3167 + if (qp->max_df_size == qp->q.max_size) {
3168 +- IPCB(head)->flags |= IPSKB_FRAG_PMTU;
3169 ++ IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
3170 + iph->frag_off = htons(IP_DF);
3171 + } else {
3172 + iph->frag_off = 0;
3173 +@@ -751,28 +541,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
3174 + }
3175 + EXPORT_SYMBOL(ip_check_defrag);
3176 +
3177 +-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
3178 +-{
3179 +- struct rb_node *p = rb_first(root);
3180 +- unsigned int sum = 0;
3181 +-
3182 +- while (p) {
3183 +- struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3184 +-
3185 +- p = rb_next(p);
3186 +- rb_erase(&skb->rbnode, root);
3187 +- while (skb) {
3188 +- struct sk_buff *next = FRAG_CB(skb)->next_frag;
3189 +-
3190 +- sum += skb->truesize;
3191 +- kfree_skb(skb);
3192 +- skb = next;
3193 +- }
3194 +- }
3195 +- return sum;
3196 +-}
3197 +-EXPORT_SYMBOL(inet_frag_rbtree_purge);
3198 +-
3199 + #ifdef CONFIG_SYSCTL
3200 + static int dist_min;
3201 +
3202 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3203 +index a1bf87711bfa..c64f062d6323 100644
3204 +--- a/net/ipv4/route.c
3205 ++++ b/net/ipv4/route.c
3206 +@@ -1194,9 +1194,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3207 +
3208 + static void ipv4_link_failure(struct sk_buff *skb)
3209 + {
3210 ++ struct ip_options opt;
3211 + struct rtable *rt;
3212 ++ int res;
3213 +
3214 +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
3215 ++ /* Recompile ip options since IPCB may not be valid anymore.
3216 ++ */
3217 ++ memset(&opt, 0, sizeof(opt));
3218 ++ opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
3219 ++
3220 ++ rcu_read_lock();
3221 ++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
3222 ++ rcu_read_unlock();
3223 ++
3224 ++ if (res)
3225 ++ return;
3226 ++
3227 ++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
3228 +
3229 + rt = skb_rtable(skb);
3230 + if (rt)
3231 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3232 +index c8227e07d574..657d33e2ff6a 100644
3233 +--- a/net/ipv4/tcp_input.c
3234 ++++ b/net/ipv4/tcp_input.c
3235 +@@ -389,11 +389,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
3236 + static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
3237 + {
3238 + struct tcp_sock *tp = tcp_sk(sk);
3239 ++ int room;
3240 ++
3241 ++ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
3242 +
3243 + /* Check #1 */
3244 +- if (tp->rcv_ssthresh < tp->window_clamp &&
3245 +- (int)tp->rcv_ssthresh < tcp_space(sk) &&
3246 +- !tcp_under_memory_pressure(sk)) {
3247 ++ if (room > 0 && !tcp_under_memory_pressure(sk)) {
3248 + int incr;
3249 +
3250 + /* Check #2. Increase window, if skb with such overhead
3251 +@@ -406,8 +407,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
3252 +
3253 + if (incr) {
3254 + incr = max_t(int, incr, 2 * skb->len);
3255 +- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
3256 +- tp->window_clamp);
3257 ++ tp->rcv_ssthresh += min(room, incr);
3258 + inet_csk(sk)->icsk_ack.quick |= 1;
3259 + }
3260 + }
3261 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3262 +index 237fb04c6716..cb1b4772dac0 100644
3263 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3264 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3265 +@@ -33,9 +33,8 @@
3266 +
3267 + #include <net/sock.h>
3268 + #include <net/snmp.h>
3269 +-#include <net/inet_frag.h>
3270 ++#include <net/ipv6_frag.h>
3271 +
3272 +-#include <net/ipv6.h>
3273 + #include <net/protocol.h>
3274 + #include <net/transp_v6.h>
3275 + #include <net/rawv6.h>
3276 +@@ -52,14 +51,6 @@
3277 +
3278 + static const char nf_frags_cache_name[] = "nf-frags";
3279 +
3280 +-struct nf_ct_frag6_skb_cb
3281 +-{
3282 +- struct inet6_skb_parm h;
3283 +- int offset;
3284 +-};
3285 +-
3286 +-#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb))
3287 +-
3288 + static struct inet_frags nf_frags;
3289 +
3290 + #ifdef CONFIG_SYSCTL
3291 +@@ -145,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
3292 + }
3293 + #endif
3294 +
3295 ++static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
3296 ++ struct sk_buff *prev_tail, struct net_device *dev);
3297 ++
3298 + static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
3299 + {
3300 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
3301 +@@ -159,7 +153,7 @@ static void nf_ct_frag6_expire(struct timer_list *t)
3302 + fq = container_of(frag, struct frag_queue, q);
3303 + net = container_of(fq->q.net, struct net, nf_frag.frags);
3304 +
3305 +- ip6_expire_frag_queue(net, fq);
3306 ++ ip6frag_expire_frag_queue(net, fq);
3307 + }
3308 +
3309 + /* Creation primitives. */
3310 +@@ -186,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
3311 + static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
3312 + const struct frag_hdr *fhdr, int nhoff)
3313 + {
3314 +- struct sk_buff *prev, *next;
3315 + unsigned int payload_len;
3316 +- int offset, end;
3317 ++ struct net_device *dev;
3318 ++ struct sk_buff *prev;
3319 ++ int offset, end, err;
3320 + u8 ecn;
3321 +
3322 + if (fq->q.flags & INET_FRAG_COMPLETE) {
3323 +@@ -263,55 +258,19 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
3324 + goto err;
3325 + }
3326 +
3327 +- /* Find out which fragments are in front and at the back of us
3328 +- * in the chain of fragments so far. We must know where to put
3329 +- * this fragment, right?
3330 +- */
3331 ++ /* Note : skb->rbnode and skb->dev share the same location. */
3332 ++ dev = skb->dev;
3333 ++ /* Makes sure compiler wont do silly aliasing games */
3334 ++ barrier();
3335 ++
3336 + prev = fq->q.fragments_tail;
3337 +- if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
3338 +- next = NULL;
3339 +- goto found;
3340 +- }
3341 +- prev = NULL;
3342 +- for (next = fq->q.fragments; next != NULL; next = next->next) {
3343 +- if (NFCT_FRAG6_CB(next)->offset >= offset)
3344 +- break; /* bingo! */
3345 +- prev = next;
3346 +- }
3347 ++ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
3348 ++ if (err)
3349 ++ goto insert_error;
3350 +
3351 +-found:
3352 +- /* RFC5722, Section 4:
3353 +- * When reassembling an IPv6 datagram, if
3354 +- * one or more its constituent fragments is determined to be an
3355 +- * overlapping fragment, the entire datagram (and any constituent
3356 +- * fragments, including those not yet received) MUST be silently
3357 +- * discarded.
3358 +- */
3359 ++ if (dev)
3360 ++ fq->iif = dev->ifindex;
3361 +
3362 +- /* Check for overlap with preceding fragment. */
3363 +- if (prev &&
3364 +- (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
3365 +- goto discard_fq;
3366 +-
3367 +- /* Look for overlap with succeeding segment. */
3368 +- if (next && NFCT_FRAG6_CB(next)->offset < end)
3369 +- goto discard_fq;
3370 +-
3371 +- NFCT_FRAG6_CB(skb)->offset = offset;
3372 +-
3373 +- /* Insert this fragment in the chain of fragments. */
3374 +- skb->next = next;
3375 +- if (!next)
3376 +- fq->q.fragments_tail = skb;
3377 +- if (prev)
3378 +- prev->next = skb;
3379 +- else
3380 +- fq->q.fragments = skb;
3381 +-
3382 +- if (skb->dev) {
3383 +- fq->iif = skb->dev->ifindex;
3384 +- skb->dev = NULL;
3385 +- }
3386 + fq->q.stamp = skb->tstamp;
3387 + fq->q.meat += skb->len;
3388 + fq->ecn |= ecn;
3389 +@@ -327,11 +286,25 @@ found:
3390 + fq->q.flags |= INET_FRAG_FIRST_IN;
3391 + }
3392 +
3393 +- return 0;
3394 ++ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
3395 ++ fq->q.meat == fq->q.len) {
3396 ++ unsigned long orefdst = skb->_skb_refdst;
3397 +
3398 +-discard_fq:
3399 ++ skb->_skb_refdst = 0UL;
3400 ++ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
3401 ++ skb->_skb_refdst = orefdst;
3402 ++ return err;
3403 ++ }
3404 ++
3405 ++ skb_dst_drop(skb);
3406 ++ return -EINPROGRESS;
3407 ++
3408 ++insert_error:
3409 ++ if (err == IPFRAG_DUP)
3410 ++ goto err;
3411 + inet_frag_kill(&fq->q);
3412 + err:
3413 ++ skb_dst_drop(skb);
3414 + return -EINVAL;
3415 + }
3416 +
3417 +@@ -341,147 +314,67 @@ err:
3418 + * It is called with locked fq, and caller must check that
3419 + * queue is eligible for reassembly i.e. it is not COMPLETE,
3420 + * the last and the first frames arrived and all the bits are here.
3421 +- *
3422 +- * returns true if *prev skb has been transformed into the reassembled
3423 +- * skb, false otherwise.
3424 + */
3425 +-static bool
3426 +-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
3427 ++static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
3428 ++ struct sk_buff *prev_tail, struct net_device *dev)
3429 + {
3430 +- struct sk_buff *fp, *head = fq->q.fragments;
3431 +- int payload_len, delta;
3432 ++ void *reasm_data;
3433 ++ int payload_len;
3434 + u8 ecn;
3435 +
3436 + inet_frag_kill(&fq->q);
3437 +
3438 +- WARN_ON(head == NULL);
3439 +- WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
3440 +-
3441 + ecn = ip_frag_ecn_table[fq->ecn];
3442 + if (unlikely(ecn == 0xff))
3443 +- return false;
3444 ++ goto err;
3445 +
3446 +- /* Unfragmented part is taken from the first segment. */
3447 +- payload_len = ((head->data - skb_network_header(head)) -
3448 ++ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
3449 ++ if (!reasm_data)
3450 ++ goto err;
3451 ++
3452 ++ payload_len = ((skb->data - skb_network_header(skb)) -
3453 + sizeof(struct ipv6hdr) + fq->q.len -
3454 + sizeof(struct frag_hdr));
3455 + if (payload_len > IPV6_MAXPLEN) {
3456 + net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
3457 + payload_len);
3458 +- return false;
3459 +- }
3460 +-
3461 +- delta = - head->truesize;
3462 +-
3463 +- /* Head of list must not be cloned. */
3464 +- if (skb_unclone(head, GFP_ATOMIC))
3465 +- return false;
3466 +-
3467 +- delta += head->truesize;
3468 +- if (delta)
3469 +- add_frag_mem_limit(fq->q.net, delta);
3470 +-
3471 +- /* If the first fragment is fragmented itself, we split
3472 +- * it to two chunks: the first with data and paged part
3473 +- * and the second, holding only fragments. */
3474 +- if (skb_has_frag_list(head)) {
3475 +- struct sk_buff *clone;
3476 +- int i, plen = 0;
3477 +-
3478 +- clone = alloc_skb(0, GFP_ATOMIC);
3479 +- if (clone == NULL)
3480 +- return false;
3481 +-
3482 +- clone->next = head->next;
3483 +- head->next = clone;
3484 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
3485 +- skb_frag_list_init(head);
3486 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
3487 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
3488 +- clone->len = clone->data_len = head->data_len - plen;
3489 +- head->data_len -= clone->len;
3490 +- head->len -= clone->len;
3491 +- clone->csum = 0;
3492 +- clone->ip_summed = head->ip_summed;
3493 +-
3494 +- add_frag_mem_limit(fq->q.net, clone->truesize);
3495 +- }
3496 +-
3497 +- /* morph head into last received skb: prev.
3498 +- *
3499 +- * This allows callers of ipv6 conntrack defrag to continue
3500 +- * to use the last skb(frag) passed into the reasm engine.
3501 +- * The last skb frag 'silently' turns into the full reassembled skb.
3502 +- *
3503 +- * Since prev is also part of q->fragments we have to clone it first.
3504 +- */
3505 +- if (head != prev) {
3506 +- struct sk_buff *iter;
3507 +-
3508 +- fp = skb_clone(prev, GFP_ATOMIC);
3509 +- if (!fp)
3510 +- return false;
3511 +-
3512 +- fp->next = prev->next;
3513 +-
3514 +- iter = head;
3515 +- while (iter) {
3516 +- if (iter->next == prev) {
3517 +- iter->next = fp;
3518 +- break;
3519 +- }
3520 +- iter = iter->next;
3521 +- }
3522 +-
3523 +- skb_morph(prev, head);
3524 +- prev->next = head->next;
3525 +- consume_skb(head);
3526 +- head = prev;
3527 ++ goto err;
3528 + }
3529 +
3530 + /* We have to remove fragment header from datagram and to relocate
3531 + * header in order to calculate ICV correctly. */
3532 +- skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
3533 +- memmove(head->head + sizeof(struct frag_hdr), head->head,
3534 +- (head->data - head->head) - sizeof(struct frag_hdr));
3535 +- head->mac_header += sizeof(struct frag_hdr);
3536 +- head->network_header += sizeof(struct frag_hdr);
3537 +-
3538 +- skb_shinfo(head)->frag_list = head->next;
3539 +- skb_reset_transport_header(head);
3540 +- skb_push(head, head->data - skb_network_header(head));
3541 +-
3542 +- for (fp = head->next; fp; fp = fp->next) {
3543 +- head->data_len += fp->len;
3544 +- head->len += fp->len;
3545 +- if (head->ip_summed != fp->ip_summed)
3546 +- head->ip_summed = CHECKSUM_NONE;
3547 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
3548 +- head->csum = csum_add(head->csum, fp->csum);
3549 +- head->truesize += fp->truesize;
3550 +- fp->sk = NULL;
3551 +- }
3552 +- sub_frag_mem_limit(fq->q.net, head->truesize);
3553 ++ skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
3554 ++ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
3555 ++ (skb->data - skb->head) - sizeof(struct frag_hdr));
3556 ++ skb->mac_header += sizeof(struct frag_hdr);
3557 ++ skb->network_header += sizeof(struct frag_hdr);
3558 ++
3559 ++ skb_reset_transport_header(skb);
3560 +
3561 +- head->ignore_df = 1;
3562 +- head->next = NULL;
3563 +- head->dev = dev;
3564 +- head->tstamp = fq->q.stamp;
3565 +- ipv6_hdr(head)->payload_len = htons(payload_len);
3566 +- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
3567 +- IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
3568 ++ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
3569 ++
3570 ++ skb->ignore_df = 1;
3571 ++ skb->dev = dev;
3572 ++ ipv6_hdr(skb)->payload_len = htons(payload_len);
3573 ++ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
3574 ++ IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
3575 +
3576 + /* Yes, and fold redundant checksum back. 8) */
3577 +- if (head->ip_summed == CHECKSUM_COMPLETE)
3578 +- head->csum = csum_partial(skb_network_header(head),
3579 +- skb_network_header_len(head),
3580 +- head->csum);
3581 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
3582 ++ skb->csum = csum_partial(skb_network_header(skb),
3583 ++ skb_network_header_len(skb),
3584 ++ skb->csum);
3585 +
3586 + fq->q.fragments = NULL;
3587 + fq->q.rb_fragments = RB_ROOT;
3588 + fq->q.fragments_tail = NULL;
3589 ++ fq->q.last_run_head = NULL;
3590 ++
3591 ++ return 0;
3592 +
3593 +- return true;
3594 ++err:
3595 ++ inet_frag_kill(&fq->q);
3596 ++ return -EINVAL;
3597 + }
3598 +
3599 + /*
3600 +@@ -550,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
3601 + int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3602 + {
3603 + u16 savethdr = skb->transport_header;
3604 +- struct net_device *dev = skb->dev;
3605 + int fhoff, nhoff, ret;
3606 + struct frag_hdr *fhdr;
3607 + struct frag_queue *fq;
3608 +@@ -573,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3609 + hdr = ipv6_hdr(skb);
3610 + fhdr = (struct frag_hdr *)skb_transport_header(skb);
3611 +
3612 +- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
3613 +- fhdr->frag_off & htons(IP6_MF))
3614 +- return -EINVAL;
3615 +-
3616 + skb_orphan(skb);
3617 + fq = fq_find(net, fhdr->identification, user, hdr,
3618 + skb->dev ? skb->dev->ifindex : 0);
3619 +@@ -588,24 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3620 + spin_lock_bh(&fq->q.lock);
3621 +
3622 + ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
3623 +- if (ret < 0) {
3624 +- if (ret == -EPROTO) {
3625 +- skb->transport_header = savethdr;
3626 +- ret = 0;
3627 +- }
3628 +- goto out_unlock;
3629 ++ if (ret == -EPROTO) {
3630 ++ skb->transport_header = savethdr;
3631 ++ ret = 0;
3632 + }
3633 +
3634 + /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
3635 + * must be returned.
3636 + */
3637 +- ret = -EINPROGRESS;
3638 +- if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
3639 +- fq->q.meat == fq->q.len &&
3640 +- nf_ct_frag6_reasm(fq, skb, dev))
3641 +- ret = 0;
3642 ++ if (ret)
3643 ++ ret = -EINPROGRESS;
3644 +
3645 +-out_unlock:
3646 + spin_unlock_bh(&fq->q.lock);
3647 + inet_frag_put(&fq->q);
3648 + return ret;
3649 +@@ -641,16 +522,24 @@ static struct pernet_operations nf_ct_net_ops = {
3650 + .exit = nf_ct_net_exit,
3651 + };
3652 +
3653 ++static const struct rhashtable_params nfct_rhash_params = {
3654 ++ .head_offset = offsetof(struct inet_frag_queue, node),
3655 ++ .hashfn = ip6frag_key_hashfn,
3656 ++ .obj_hashfn = ip6frag_obj_hashfn,
3657 ++ .obj_cmpfn = ip6frag_obj_cmpfn,
3658 ++ .automatic_shrinking = true,
3659 ++};
3660 ++
3661 + int nf_ct_frag6_init(void)
3662 + {
3663 + int ret = 0;
3664 +
3665 +- nf_frags.constructor = ip6_frag_init;
3666 ++ nf_frags.constructor = ip6frag_init;
3667 + nf_frags.destructor = NULL;
3668 + nf_frags.qsize = sizeof(struct frag_queue);
3669 + nf_frags.frag_expire = nf_ct_frag6_expire;
3670 + nf_frags.frags_cache_name = nf_frags_cache_name;
3671 +- nf_frags.rhash_params = ip6_rhash_params;
3672 ++ nf_frags.rhash_params = nfct_rhash_params;
3673 + ret = inet_frags_init(&nf_frags);
3674 + if (ret)
3675 + goto out;
3676 +diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
3677 +index b326da59257f..123bfb13a5d1 100644
3678 +--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
3679 ++++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
3680 +@@ -14,8 +14,7 @@
3681 + #include <linux/skbuff.h>
3682 + #include <linux/icmp.h>
3683 + #include <linux/sysctl.h>
3684 +-#include <net/ipv6.h>
3685 +-#include <net/inet_frag.h>
3686 ++#include <net/ipv6_frag.h>
3687 +
3688 + #include <linux/netfilter_ipv6.h>
3689 + #include <linux/netfilter_bridge.h>
3690 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
3691 +index 2a8c680b67cd..fe797b29ca89 100644
3692 +--- a/net/ipv6/reassembly.c
3693 ++++ b/net/ipv6/reassembly.c
3694 +@@ -57,18 +57,11 @@
3695 + #include <net/rawv6.h>
3696 + #include <net/ndisc.h>
3697 + #include <net/addrconf.h>
3698 +-#include <net/inet_frag.h>
3699 ++#include <net/ipv6_frag.h>
3700 + #include <net/inet_ecn.h>
3701 +
3702 + static const char ip6_frag_cache_name[] = "ip6-frags";
3703 +
3704 +-struct ip6frag_skb_cb {
3705 +- struct inet6_skb_parm h;
3706 +- int offset;
3707 +-};
3708 +-
3709 +-#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
3710 +-
3711 + static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
3712 + {
3713 + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
3714 +@@ -76,63 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
3715 +
3716 + static struct inet_frags ip6_frags;
3717 +
3718 +-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
3719 +- struct net_device *dev);
3720 +-
3721 +-void ip6_frag_init(struct inet_frag_queue *q, const void *a)
3722 +-{
3723 +- struct frag_queue *fq = container_of(q, struct frag_queue, q);
3724 +- const struct frag_v6_compare_key *key = a;
3725 +-
3726 +- q->key.v6 = *key;
3727 +- fq->ecn = 0;
3728 +-}
3729 +-EXPORT_SYMBOL(ip6_frag_init);
3730 +-
3731 +-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
3732 +-{
3733 +- struct net_device *dev = NULL;
3734 +- struct sk_buff *head;
3735 +-
3736 +- rcu_read_lock();
3737 +- spin_lock(&fq->q.lock);
3738 +-
3739 +- if (fq->q.flags & INET_FRAG_COMPLETE)
3740 +- goto out;
3741 +-
3742 +- inet_frag_kill(&fq->q);
3743 +-
3744 +- dev = dev_get_by_index_rcu(net, fq->iif);
3745 +- if (!dev)
3746 +- goto out;
3747 +-
3748 +- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
3749 +- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
3750 +-
3751 +- /* Don't send error if the first segment did not arrive. */
3752 +- head = fq->q.fragments;
3753 +- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
3754 +- goto out;
3755 +-
3756 +- /* But use as source device on which LAST ARRIVED
3757 +- * segment was received. And do not use fq->dev
3758 +- * pointer directly, device might already disappeared.
3759 +- */
3760 +- head->dev = dev;
3761 +- skb_get(head);
3762 +- spin_unlock(&fq->q.lock);
3763 +-
3764 +- icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
3765 +- kfree_skb(head);
3766 +- goto out_rcu_unlock;
3767 +-
3768 +-out:
3769 +- spin_unlock(&fq->q.lock);
3770 +-out_rcu_unlock:
3771 +- rcu_read_unlock();
3772 +- inet_frag_put(&fq->q);
3773 +-}
3774 +-EXPORT_SYMBOL(ip6_expire_frag_queue);
3775 ++static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
3776 ++ struct sk_buff *prev_tail, struct net_device *dev);
3777 +
3778 + static void ip6_frag_expire(struct timer_list *t)
3779 + {
3780 +@@ -143,7 +81,7 @@ static void ip6_frag_expire(struct timer_list *t)
3781 + fq = container_of(frag, struct frag_queue, q);
3782 + net = container_of(fq->q.net, struct net, ipv6.frags);
3783 +
3784 +- ip6_expire_frag_queue(net, fq);
3785 ++ ip6frag_expire_frag_queue(net, fq);
3786 + }
3787 +
3788 + static struct frag_queue *
3789 +@@ -170,27 +108,29 @@ fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
3790 + }
3791 +
3792 + static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
3793 +- struct frag_hdr *fhdr, int nhoff)
3794 ++ struct frag_hdr *fhdr, int nhoff,
3795 ++ u32 *prob_offset)
3796 + {
3797 +- struct sk_buff *prev, *next;
3798 +- struct net_device *dev;
3799 +- int offset, end, fragsize;
3800 + struct net *net = dev_net(skb_dst(skb)->dev);
3801 ++ int offset, end, fragsize;
3802 ++ struct sk_buff *prev_tail;
3803 ++ struct net_device *dev;
3804 ++ int err = -ENOENT;
3805 + u8 ecn;
3806 +
3807 + if (fq->q.flags & INET_FRAG_COMPLETE)
3808 + goto err;
3809 +
3810 ++ err = -EINVAL;
3811 + offset = ntohs(fhdr->frag_off) & ~0x7;
3812 + end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
3813 + ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
3814 +
3815 + if ((unsigned int)end > IPV6_MAXPLEN) {
3816 +- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3817 +- IPSTATS_MIB_INHDRERRORS);
3818 +- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
3819 +- ((u8 *)&fhdr->frag_off -
3820 +- skb_network_header(skb)));
3821 ++ *prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
3822 ++ /* note that if prob_offset is set, the skb is freed elsewhere,
3823 ++ * we do not free it here.
3824 ++ */
3825 + return -1;
3826 + }
3827 +
3828 +@@ -210,7 +150,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
3829 + */
3830 + if (end < fq->q.len ||
3831 + ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
3832 +- goto err;
3833 ++ goto discard_fq;
3834 + fq->q.flags |= INET_FRAG_LAST_IN;
3835 + fq->q.len = end;
3836 + } else {
3837 +@@ -221,79 +161,42 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
3838 + /* RFC2460 says always send parameter problem in
3839 + * this case. -DaveM
3840 + */
3841 +- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3842 +- IPSTATS_MIB_INHDRERRORS);
3843 +- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
3844 +- offsetof(struct ipv6hdr, payload_len));
3845 ++ *prob_offset = offsetof(struct ipv6hdr, payload_len);
3846 + return -1;
3847 + }
3848 + if (end > fq->q.len) {
3849 + /* Some bits beyond end -> corruption. */
3850 + if (fq->q.flags & INET_FRAG_LAST_IN)
3851 +- goto err;
3852 ++ goto discard_fq;
3853 + fq->q.len = end;
3854 + }
3855 + }
3856 +
3857 + if (end == offset)
3858 +- goto err;
3859 ++ goto discard_fq;
3860 +
3861 ++ err = -ENOMEM;
3862 + /* Point into the IP datagram 'data' part. */
3863 + if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
3864 +- goto err;
3865 +-
3866 +- if (pskb_trim_rcsum(skb, end - offset))
3867 +- goto err;
3868 +-
3869 +- /* Find out which fragments are in front and at the back of us
3870 +- * in the chain of fragments so far. We must know where to put
3871 +- * this fragment, right?
3872 +- */
3873 +- prev = fq->q.fragments_tail;
3874 +- if (!prev || FRAG6_CB(prev)->offset < offset) {
3875 +- next = NULL;
3876 +- goto found;
3877 +- }
3878 +- prev = NULL;
3879 +- for (next = fq->q.fragments; next != NULL; next = next->next) {
3880 +- if (FRAG6_CB(next)->offset >= offset)
3881 +- break; /* bingo! */
3882 +- prev = next;
3883 +- }
3884 +-
3885 +-found:
3886 +- /* RFC5722, Section 4, amended by Errata ID : 3089
3887 +- * When reassembling an IPv6 datagram, if
3888 +- * one or more its constituent fragments is determined to be an
3889 +- * overlapping fragment, the entire datagram (and any constituent
3890 +- * fragments) MUST be silently discarded.
3891 +- */
3892 +-
3893 +- /* Check for overlap with preceding fragment. */
3894 +- if (prev &&
3895 +- (FRAG6_CB(prev)->offset + prev->len) > offset)
3896 + goto discard_fq;
3897 +
3898 +- /* Look for overlap with succeeding segment. */
3899 +- if (next && FRAG6_CB(next)->offset < end)
3900 ++ err = pskb_trim_rcsum(skb, end - offset);
3901 ++ if (err)
3902 + goto discard_fq;
3903 +
3904 +- FRAG6_CB(skb)->offset = offset;
3905 ++ /* Note : skb->rbnode and skb->dev share the same location. */
3906 ++ dev = skb->dev;
3907 ++ /* Makes sure compiler wont do silly aliasing games */
3908 ++ barrier();
3909 +
3910 +- /* Insert this fragment in the chain of fragments. */
3911 +- skb->next = next;
3912 +- if (!next)
3913 +- fq->q.fragments_tail = skb;
3914 +- if (prev)
3915 +- prev->next = skb;
3916 +- else
3917 +- fq->q.fragments = skb;
3918 ++ prev_tail = fq->q.fragments_tail;
3919 ++ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
3920 ++ if (err)
3921 ++ goto insert_error;
3922 +
3923 +- dev = skb->dev;
3924 +- if (dev) {
3925 ++ if (dev)
3926 + fq->iif = dev->ifindex;
3927 +- skb->dev = NULL;
3928 +- }
3929 ++
3930 + fq->q.stamp = skb->tstamp;
3931 + fq->q.meat += skb->len;
3932 + fq->ecn |= ecn;
3933 +@@ -313,44 +216,48 @@ found:
3934 +
3935 + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
3936 + fq->q.meat == fq->q.len) {
3937 +- int res;
3938 + unsigned long orefdst = skb->_skb_refdst;
3939 +
3940 + skb->_skb_refdst = 0UL;
3941 +- res = ip6_frag_reasm(fq, prev, dev);
3942 ++ err = ip6_frag_reasm(fq, skb, prev_tail, dev);
3943 + skb->_skb_refdst = orefdst;
3944 +- return res;
3945 ++ return err;
3946 + }
3947 +
3948 + skb_dst_drop(skb);
3949 +- return -1;
3950 ++ return -EINPROGRESS;
3951 +
3952 ++insert_error:
3953 ++ if (err == IPFRAG_DUP) {
3954 ++ kfree_skb(skb);
3955 ++ return -EINVAL;
3956 ++ }
3957 ++ err = -EINVAL;
3958 ++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3959 ++ IPSTATS_MIB_REASM_OVERLAPS);
3960 + discard_fq:
3961 + inet_frag_kill(&fq->q);
3962 +-err:
3963 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3964 + IPSTATS_MIB_REASMFAILS);
3965 ++err:
3966 + kfree_skb(skb);
3967 +- return -1;
3968 ++ return err;
3969 + }
3970 +
3971 + /*
3972 + * Check if this packet is complete.
3973 +- * Returns NULL on failure by any reason, and pointer
3974 +- * to current nexthdr field in reassembled frame.
3975 + *
3976 + * It is called with locked fq, and caller must check that
3977 + * queue is eligible for reassembly i.e. it is not COMPLETE,
3978 + * the last and the first frames arrived and all the bits are here.
3979 + */
3980 +-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
3981 +- struct net_device *dev)
3982 ++static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
3983 ++ struct sk_buff *prev_tail, struct net_device *dev)
3984 + {
3985 + struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
3986 +- struct sk_buff *fp, *head = fq->q.fragments;
3987 +- int payload_len, delta;
3988 + unsigned int nhoff;
3989 +- int sum_truesize;
3990 ++ void *reasm_data;
3991 ++ int payload_len;
3992 + u8 ecn;
3993 +
3994 + inet_frag_kill(&fq->q);
3995 +@@ -359,120 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
3996 + if (unlikely(ecn == 0xff))
3997 + goto out_fail;
3998 +
3999 +- /* Make the one we just received the head. */
4000 +- if (prev) {
4001 +- head = prev->next;
4002 +- fp = skb_clone(head, GFP_ATOMIC);
4003 +-
4004 +- if (!fp)
4005 +- goto out_oom;
4006 +-
4007 +- fp->next = head->next;
4008 +- if (!fp->next)
4009 +- fq->q.fragments_tail = fp;
4010 +- prev->next = fp;
4011 +-
4012 +- skb_morph(head, fq->q.fragments);
4013 +- head->next = fq->q.fragments->next;
4014 +-
4015 +- consume_skb(fq->q.fragments);
4016 +- fq->q.fragments = head;
4017 +- }
4018 +-
4019 +- WARN_ON(head == NULL);
4020 +- WARN_ON(FRAG6_CB(head)->offset != 0);
4021 ++ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
4022 ++ if (!reasm_data)
4023 ++ goto out_oom;
4024 +
4025 +- /* Unfragmented part is taken from the first segment. */
4026 +- payload_len = ((head->data - skb_network_header(head)) -
4027 ++ payload_len = ((skb->data - skb_network_header(skb)) -
4028 + sizeof(struct ipv6hdr) + fq->q.len -
4029 + sizeof(struct frag_hdr));
4030 + if (payload_len > IPV6_MAXPLEN)
4031 + goto out_oversize;
4032 +
4033 +- delta = - head->truesize;
4034 +-
4035 +- /* Head of list must not be cloned. */
4036 +- if (skb_unclone(head, GFP_ATOMIC))
4037 +- goto out_oom;
4038 +-
4039 +- delta += head->truesize;
4040 +- if (delta)
4041 +- add_frag_mem_limit(fq->q.net, delta);
4042 +-
4043 +- /* If the first fragment is fragmented itself, we split
4044 +- * it to two chunks: the first with data and paged part
4045 +- * and the second, holding only fragments. */
4046 +- if (skb_has_frag_list(head)) {
4047 +- struct sk_buff *clone;
4048 +- int i, plen = 0;
4049 +-
4050 +- clone = alloc_skb(0, GFP_ATOMIC);
4051 +- if (!clone)
4052 +- goto out_oom;
4053 +- clone->next = head->next;
4054 +- head->next = clone;
4055 +- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
4056 +- skb_frag_list_init(head);
4057 +- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
4058 +- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
4059 +- clone->len = clone->data_len = head->data_len - plen;
4060 +- head->data_len -= clone->len;
4061 +- head->len -= clone->len;
4062 +- clone->csum = 0;
4063 +- clone->ip_summed = head->ip_summed;
4064 +- add_frag_mem_limit(fq->q.net, clone->truesize);
4065 +- }
4066 +-
4067 + /* We have to remove fragment header from datagram and to relocate
4068 + * header in order to calculate ICV correctly. */
4069 + nhoff = fq->nhoffset;
4070 +- skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
4071 +- memmove(head->head + sizeof(struct frag_hdr), head->head,
4072 +- (head->data - head->head) - sizeof(struct frag_hdr));
4073 +- if (skb_mac_header_was_set(head))
4074 +- head->mac_header += sizeof(struct frag_hdr);
4075 +- head->network_header += sizeof(struct frag_hdr);
4076 +-
4077 +- skb_reset_transport_header(head);
4078 +- skb_push(head, head->data - skb_network_header(head));
4079 +-
4080 +- sum_truesize = head->truesize;
4081 +- for (fp = head->next; fp;) {
4082 +- bool headstolen;
4083 +- int delta;
4084 +- struct sk_buff *next = fp->next;
4085 +-
4086 +- sum_truesize += fp->truesize;
4087 +- if (head->ip_summed != fp->ip_summed)
4088 +- head->ip_summed = CHECKSUM_NONE;
4089 +- else if (head->ip_summed == CHECKSUM_COMPLETE)
4090 +- head->csum = csum_add(head->csum, fp->csum);
4091 +-
4092 +- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
4093 +- kfree_skb_partial(fp, headstolen);
4094 +- } else {
4095 +- if (!skb_shinfo(head)->frag_list)
4096 +- skb_shinfo(head)->frag_list = fp;
4097 +- head->data_len += fp->len;
4098 +- head->len += fp->len;
4099 +- head->truesize += fp->truesize;
4100 +- }
4101 +- fp = next;
4102 +- }
4103 +- sub_frag_mem_limit(fq->q.net, sum_truesize);
4104 ++ skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
4105 ++ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
4106 ++ (skb->data - skb->head) - sizeof(struct frag_hdr));
4107 ++ if (skb_mac_header_was_set(skb))
4108 ++ skb->mac_header += sizeof(struct frag_hdr);
4109 ++ skb->network_header += sizeof(struct frag_hdr);
4110 ++
4111 ++ skb_reset_transport_header(skb);
4112 ++
4113 ++ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
4114 +
4115 +- head->next = NULL;
4116 +- head->dev = dev;
4117 +- head->tstamp = fq->q.stamp;
4118 +- ipv6_hdr(head)->payload_len = htons(payload_len);
4119 +- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
4120 +- IP6CB(head)->nhoff = nhoff;
4121 +- IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
4122 +- IP6CB(head)->frag_max_size = fq->q.max_size;
4123 ++ skb->dev = dev;
4124 ++ ipv6_hdr(skb)->payload_len = htons(payload_len);
4125 ++ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
4126 ++ IP6CB(skb)->nhoff = nhoff;
4127 ++ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
4128 ++ IP6CB(skb)->frag_max_size = fq->q.max_size;
4129 +
4130 + /* Yes, and fold redundant checksum back. 8) */
4131 +- skb_postpush_rcsum(head, skb_network_header(head),
4132 +- skb_network_header_len(head));
4133 ++ skb_postpush_rcsum(skb, skb_network_header(skb),
4134 ++ skb_network_header_len(skb));
4135 +
4136 + rcu_read_lock();
4137 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
4138 +@@ -480,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
4139 + fq->q.fragments = NULL;
4140 + fq->q.rb_fragments = RB_ROOT;
4141 + fq->q.fragments_tail = NULL;
4142 ++ fq->q.last_run_head = NULL;
4143 + return 1;
4144 +
4145 + out_oversize:
4146 +@@ -491,6 +319,7 @@ out_fail:
4147 + rcu_read_lock();
4148 + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
4149 + rcu_read_unlock();
4150 ++ inet_frag_kill(&fq->q);
4151 + return -1;
4152 + }
4153 +
4154 +@@ -529,22 +358,26 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
4155 + return 1;
4156 + }
4157 +
4158 +- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
4159 +- fhdr->frag_off & htons(IP6_MF))
4160 +- goto fail_hdr;
4161 +-
4162 + iif = skb->dev ? skb->dev->ifindex : 0;
4163 + fq = fq_find(net, fhdr->identification, hdr, iif);
4164 + if (fq) {
4165 ++ u32 prob_offset = 0;
4166 + int ret;
4167 +
4168 + spin_lock(&fq->q.lock);
4169 +
4170 + fq->iif = iif;
4171 +- ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
4172 ++ ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
4173 ++ &prob_offset);
4174 +
4175 + spin_unlock(&fq->q.lock);
4176 + inet_frag_put(&fq->q);
4177 ++ if (prob_offset) {
4178 ++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
4179 ++ IPSTATS_MIB_INHDRERRORS);
4180 ++ /* icmpv6_param_prob() calls kfree_skb(skb) */
4181 ++ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
4182 ++ }
4183 + return ret;
4184 + }
4185 +
4186 +@@ -712,42 +545,19 @@ static struct pernet_operations ip6_frags_ops = {
4187 + .exit = ipv6_frags_exit_net,
4188 + };
4189 +
4190 +-static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
4191 +-{
4192 +- return jhash2(data,
4193 +- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
4194 +-}
4195 +-
4196 +-static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
4197 +-{
4198 +- const struct inet_frag_queue *fq = data;
4199 +-
4200 +- return jhash2((const u32 *)&fq->key.v6,
4201 +- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
4202 +-}
4203 +-
4204 +-static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
4205 +-{
4206 +- const struct frag_v6_compare_key *key = arg->key;
4207 +- const struct inet_frag_queue *fq = ptr;
4208 +-
4209 +- return !!memcmp(&fq->key, key, sizeof(*key));
4210 +-}
4211 +-
4212 +-const struct rhashtable_params ip6_rhash_params = {
4213 ++static const struct rhashtable_params ip6_rhash_params = {
4214 + .head_offset = offsetof(struct inet_frag_queue, node),
4215 +- .hashfn = ip6_key_hashfn,
4216 +- .obj_hashfn = ip6_obj_hashfn,
4217 +- .obj_cmpfn = ip6_obj_cmpfn,
4218 ++ .hashfn = ip6frag_key_hashfn,
4219 ++ .obj_hashfn = ip6frag_obj_hashfn,
4220 ++ .obj_cmpfn = ip6frag_obj_cmpfn,
4221 + .automatic_shrinking = true,
4222 + };
4223 +-EXPORT_SYMBOL(ip6_rhash_params);
4224 +
4225 + int __init ipv6_frag_init(void)
4226 + {
4227 + int ret;
4228 +
4229 +- ip6_frags.constructor = ip6_frag_init;
4230 ++ ip6_frags.constructor = ip6frag_init;
4231 + ip6_frags.destructor = NULL;
4232 + ip6_frags.qsize = sizeof(struct frag_queue);
4233 + ip6_frags.frag_expire = ip6_frag_expire;
4234 +diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
4235 +index 4d82fe7d627c..284276b3e0b4 100644
4236 +--- a/net/mac80211/driver-ops.h
4237 ++++ b/net/mac80211/driver-ops.h
4238 +@@ -1164,6 +1164,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
4239 + {
4240 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
4241 +
4242 ++ if (local->in_reconfig)
4243 ++ return;
4244 ++
4245 + if (!check_sdata_in_driver(sdata))
4246 + return;
4247 +
4248 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
4249 +index 285f8797c26a..0171b27a2b81 100644
4250 +--- a/net/openvswitch/conntrack.c
4251 ++++ b/net/openvswitch/conntrack.c
4252 +@@ -23,6 +23,7 @@
4253 + #include <net/netfilter/nf_conntrack_seqadj.h>
4254 + #include <net/netfilter/nf_conntrack_zones.h>
4255 + #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
4256 ++#include <net/ipv6_frag.h>
4257 +
4258 + #ifdef CONFIG_NF_NAT_NEEDED
4259 + #include <linux/netfilter/nf_nat.h>
4260 +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
4261 +index 29d6699d5a06..55b4c0dc2b93 100644
4262 +--- a/scripts/mod/file2alias.c
4263 ++++ b/scripts/mod/file2alias.c
4264 +@@ -47,49 +47,9 @@ typedef struct {
4265 + struct devtable {
4266 + const char *device_id; /* name of table, __mod_<name>__*_device_table. */
4267 + unsigned long id_size;
4268 +- void *function;
4269 ++ int (*do_entry)(const char *filename, void *symval, char *alias);
4270 + };
4271 +
4272 +-#define ___cat(a,b) a ## b
4273 +-#define __cat(a,b) ___cat(a,b)
4274 +-
4275 +-/* we need some special handling for this host tool running eventually on
4276 +- * Darwin. The Mach-O section handling is a bit different than ELF section
4277 +- * handling. The differnces in detail are:
4278 +- * a) we have segments which have sections
4279 +- * b) we need a API call to get the respective section symbols */
4280 +-#if defined(__MACH__)
4281 +-#include <mach-o/getsect.h>
4282 +-
4283 +-#define INIT_SECTION(name) do { \
4284 +- unsigned long name ## _len; \
4285 +- char *__cat(pstart_,name) = getsectdata("__TEXT", \
4286 +- #name, &__cat(name,_len)); \
4287 +- char *__cat(pstop_,name) = __cat(pstart_,name) + \
4288 +- __cat(name, _len); \
4289 +- __cat(__start_,name) = (void *)__cat(pstart_,name); \
4290 +- __cat(__stop_,name) = (void *)__cat(pstop_,name); \
4291 +- } while (0)
4292 +-#define SECTION(name) __attribute__((section("__TEXT, " #name)))
4293 +-
4294 +-struct devtable **__start___devtable, **__stop___devtable;
4295 +-#else
4296 +-#define INIT_SECTION(name) /* no-op for ELF */
4297 +-#define SECTION(name) __attribute__((section(#name)))
4298 +-
4299 +-/* We construct a table of pointers in an ELF section (pointers generally
4300 +- * go unpadded by gcc). ld creates boundary syms for us. */
4301 +-extern struct devtable *__start___devtable[], *__stop___devtable[];
4302 +-#endif /* __MACH__ */
4303 +-
4304 +-#if !defined(__used)
4305 +-# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
4306 +-# define __used __attribute__((__unused__))
4307 +-# else
4308 +-# define __used __attribute__((__used__))
4309 +-# endif
4310 +-#endif
4311 +-
4312 + /* Define a variable f that holds the value of field f of struct devid
4313 + * based at address m.
4314 + */
4315 +@@ -102,16 +62,6 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
4316 + #define DEF_FIELD_ADDR(m, devid, f) \
4317 + typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
4318 +
4319 +-/* Add a table entry. We test function type matches while we're here. */
4320 +-#define ADD_TO_DEVTABLE(device_id, type, function) \
4321 +- static struct devtable __cat(devtable,__LINE__) = { \
4322 +- device_id + 0*sizeof((function)((const char *)NULL, \
4323 +- (void *)NULL, \
4324 +- (char *)NULL)), \
4325 +- SIZE_##type, (function) }; \
4326 +- static struct devtable *SECTION(__devtable) __used \
4327 +- __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
4328 +-
4329 + #define ADD(str, sep, cond, field) \
4330 + do { \
4331 + strcat(str, sep); \
4332 +@@ -431,7 +381,6 @@ static int do_hid_entry(const char *filename,
4333 +
4334 + return 1;
4335 + }
4336 +-ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
4337 +
4338 + /* Looks like: ieee1394:venNmoNspNverN */
4339 + static int do_ieee1394_entry(const char *filename,
4340 +@@ -456,7 +405,6 @@ static int do_ieee1394_entry(const char *filename,
4341 + add_wildcard(alias);
4342 + return 1;
4343 + }
4344 +-ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
4345 +
4346 + /* Looks like: pci:vNdNsvNsdNbcNscNiN. */
4347 + static int do_pci_entry(const char *filename,
4348 +@@ -500,7 +448,6 @@ static int do_pci_entry(const char *filename,
4349 + add_wildcard(alias);
4350 + return 1;
4351 + }
4352 +-ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
4353 +
4354 + /* looks like: "ccw:tNmNdtNdmN" */
4355 + static int do_ccw_entry(const char *filename,
4356 +@@ -524,7 +471,6 @@ static int do_ccw_entry(const char *filename,
4357 + add_wildcard(alias);
4358 + return 1;
4359 + }
4360 +-ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
4361 +
4362 + /* looks like: "ap:tN" */
4363 + static int do_ap_entry(const char *filename,
4364 +@@ -535,7 +481,6 @@ static int do_ap_entry(const char *filename,
4365 + sprintf(alias, "ap:t%02X*", dev_type);
4366 + return 1;
4367 + }
4368 +-ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
4369 +
4370 + /* looks like: "css:tN" */
4371 + static int do_css_entry(const char *filename,
4372 +@@ -546,7 +491,6 @@ static int do_css_entry(const char *filename,
4373 + sprintf(alias, "css:t%01X", type);
4374 + return 1;
4375 + }
4376 +-ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
4377 +
4378 + /* Looks like: "serio:tyNprNidNexN" */
4379 + static int do_serio_entry(const char *filename,
4380 +@@ -566,7 +510,6 @@ static int do_serio_entry(const char *filename,
4381 + add_wildcard(alias);
4382 + return 1;
4383 + }
4384 +-ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
4385 +
4386 + /* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
4387 + * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
4388 +@@ -604,7 +547,6 @@ static int do_acpi_entry(const char *filename,
4389 + }
4390 + return 1;
4391 + }
4392 +-ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
4393 +
4394 + /* looks like: "pnp:dD" */
4395 + static void do_pnp_device_entry(void *symval, unsigned long size,
4396 +@@ -725,7 +667,6 @@ static int do_pcmcia_entry(const char *filename,
4397 + add_wildcard(alias);
4398 + return 1;
4399 + }
4400 +-ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
4401 +
4402 + static int do_vio_entry(const char *filename, void *symval,
4403 + char *alias)
4404 +@@ -745,7 +686,6 @@ static int do_vio_entry(const char *filename, void *symval,
4405 + add_wildcard(alias);
4406 + return 1;
4407 + }
4408 +-ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
4409 +
4410 + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
4411 +
4412 +@@ -818,7 +758,6 @@ static int do_input_entry(const char *filename, void *symval,
4413 + do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
4414 + return 1;
4415 + }
4416 +-ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
4417 +
4418 + static int do_eisa_entry(const char *filename, void *symval,
4419 + char *alias)
4420 +@@ -830,7 +769,6 @@ static int do_eisa_entry(const char *filename, void *symval,
4421 + strcat(alias, "*");
4422 + return 1;
4423 + }
4424 +-ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
4425 +
4426 + /* Looks like: parisc:tNhvNrevNsvN */
4427 + static int do_parisc_entry(const char *filename, void *symval,
4428 +@@ -850,7 +788,6 @@ static int do_parisc_entry(const char *filename, void *symval,
4429 + add_wildcard(alias);
4430 + return 1;
4431 + }
4432 +-ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
4433 +
4434 + /* Looks like: sdio:cNvNdN. */
4435 + static int do_sdio_entry(const char *filename,
4436 +@@ -867,7 +804,6 @@ static int do_sdio_entry(const char *filename,
4437 + add_wildcard(alias);
4438 + return 1;
4439 + }
4440 +-ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
4441 +
4442 + /* Looks like: ssb:vNidNrevN. */
4443 + static int do_ssb_entry(const char *filename,
4444 +@@ -884,7 +820,6 @@ static int do_ssb_entry(const char *filename,
4445 + add_wildcard(alias);
4446 + return 1;
4447 + }
4448 +-ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
4449 +
4450 + /* Looks like: bcma:mNidNrevNclN. */
4451 + static int do_bcma_entry(const char *filename,
4452 +@@ -903,7 +838,6 @@ static int do_bcma_entry(const char *filename,
4453 + add_wildcard(alias);
4454 + return 1;
4455 + }
4456 +-ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
4457 +
4458 + /* Looks like: virtio:dNvN */
4459 + static int do_virtio_entry(const char *filename, void *symval,
4460 +@@ -919,7 +853,6 @@ static int do_virtio_entry(const char *filename, void *symval,
4461 + add_wildcard(alias);
4462 + return 1;
4463 + }
4464 +-ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
4465 +
4466 + /*
4467 + * Looks like: vmbus:guid
4468 +@@ -942,7 +875,6 @@ static int do_vmbus_entry(const char *filename, void *symval,
4469 +
4470 + return 1;
4471 + }
4472 +-ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
4473 +
4474 + /* Looks like: i2c:S */
4475 + static int do_i2c_entry(const char *filename, void *symval,
4476 +@@ -953,7 +885,6 @@ static int do_i2c_entry(const char *filename, void *symval,
4477 +
4478 + return 1;
4479 + }
4480 +-ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
4481 +
4482 + /* Looks like: spi:S */
4483 + static int do_spi_entry(const char *filename, void *symval,
4484 +@@ -964,7 +895,6 @@ static int do_spi_entry(const char *filename, void *symval,
4485 +
4486 + return 1;
4487 + }
4488 +-ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
4489 +
4490 + static const struct dmifield {
4491 + const char *prefix;
4492 +@@ -1019,7 +949,6 @@ static int do_dmi_entry(const char *filename, void *symval,
4493 + strcat(alias, ":");
4494 + return 1;
4495 + }
4496 +-ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
4497 +
4498 + static int do_platform_entry(const char *filename,
4499 + void *symval, char *alias)
4500 +@@ -1028,7 +957,6 @@ static int do_platform_entry(const char *filename,
4501 + sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
4502 + return 1;
4503 + }
4504 +-ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
4505 +
4506 + static int do_mdio_entry(const char *filename,
4507 + void *symval, char *alias)
4508 +@@ -1053,7 +981,6 @@ static int do_mdio_entry(const char *filename,
4509 +
4510 + return 1;
4511 + }
4512 +-ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
4513 +
4514 + /* Looks like: zorro:iN. */
4515 + static int do_zorro_entry(const char *filename, void *symval,
4516 +@@ -1064,7 +991,6 @@ static int do_zorro_entry(const char *filename, void *symval,
4517 + ADD(alias, "i", id != ZORRO_WILDCARD, id);
4518 + return 1;
4519 + }
4520 +-ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
4521 +
4522 + /* looks like: "pnp:dD" */
4523 + static int do_isapnp_entry(const char *filename,
4524 +@@ -1080,7 +1006,6 @@ static int do_isapnp_entry(const char *filename,
4525 + (function >> 12) & 0x0f, (function >> 8) & 0x0f);
4526 + return 1;
4527 + }
4528 +-ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
4529 +
4530 + /* Looks like: "ipack:fNvNdN". */
4531 + static int do_ipack_entry(const char *filename,
4532 +@@ -1096,7 +1021,6 @@ static int do_ipack_entry(const char *filename,
4533 + add_wildcard(alias);
4534 + return 1;
4535 + }
4536 +-ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
4537 +
4538 + /*
4539 + * Append a match expression for a single masked hex digit.
4540 +@@ -1167,7 +1091,6 @@ static int do_amba_entry(const char *filename,
4541 +
4542 + return 1;
4543 + }
4544 +-ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
4545 +
4546 + /*
4547 + * looks like: "mipscdmm:tN"
4548 +@@ -1183,7 +1106,6 @@ static int do_mips_cdmm_entry(const char *filename,
4549 + sprintf(alias, "mipscdmm:t%02X*", type);
4550 + return 1;
4551 + }
4552 +-ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
4553 +
4554 + /* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
4555 + * All fields are numbers. It would be nicer to use strings for vendor
4556 +@@ -1208,7 +1130,6 @@ static int do_x86cpu_entry(const char *filename, void *symval,
4557 + sprintf(alias + strlen(alias), "%04X*", feature);
4558 + return 1;
4559 + }
4560 +-ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
4561 +
4562 + /* LOOKS like cpu:type:*:feature:*FEAT* */
4563 + static int do_cpu_entry(const char *filename, void *symval, char *alias)
4564 +@@ -1218,7 +1139,6 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
4565 + sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
4566 + return 1;
4567 + }
4568 +-ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
4569 +
4570 + /* Looks like: mei:S:uuid:N:* */
4571 + static int do_mei_entry(const char *filename, void *symval,
4572 +@@ -1237,7 +1157,6 @@ static int do_mei_entry(const char *filename, void *symval,
4573 +
4574 + return 1;
4575 + }
4576 +-ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry);
4577 +
4578 + /* Looks like: rapidio:vNdNavNadN */
4579 + static int do_rio_entry(const char *filename,
4580 +@@ -1257,7 +1176,6 @@ static int do_rio_entry(const char *filename,
4581 + add_wildcard(alias);
4582 + return 1;
4583 + }
4584 +-ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry);
4585 +
4586 + /* Looks like: ulpi:vNpN */
4587 + static int do_ulpi_entry(const char *filename, void *symval,
4588 +@@ -1270,7 +1188,6 @@ static int do_ulpi_entry(const char *filename, void *symval,
4589 +
4590 + return 1;
4591 + }
4592 +-ADD_TO_DEVTABLE("ulpi", ulpi_device_id, do_ulpi_entry);
4593 +
4594 + /* Looks like: hdaudio:vNrNaN */
4595 + static int do_hda_entry(const char *filename, void *symval, char *alias)
4596 +@@ -1287,7 +1204,6 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
4597 + add_wildcard(alias);
4598 + return 1;
4599 + }
4600 +-ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
4601 +
4602 + /* Looks like: fsl-mc:vNdN */
4603 + static int do_fsl_mc_entry(const char *filename, void *symval,
4604 +@@ -1299,7 +1215,6 @@ static int do_fsl_mc_entry(const char *filename, void *symval,
4605 + sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
4606 + return 1;
4607 + }
4608 +-ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
4609 +
4610 + /* Does namelen bytes of name exactly match the symbol? */
4611 + static bool sym_is(const char *name, unsigned namelen, const char *symbol)
4612 +@@ -1313,12 +1228,11 @@ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
4613 + static void do_table(void *symval, unsigned long size,
4614 + unsigned long id_size,
4615 + const char *device_id,
4616 +- void *function,
4617 ++ int (*do_entry)(const char *filename, void *symval, char *alias),
4618 + struct module *mod)
4619 + {
4620 + unsigned int i;
4621 + char alias[500];
4622 +- int (*do_entry)(const char *, void *entry, char *alias) = function;
4623 +
4624 + device_id_check(mod->name, device_id, size, id_size, symval);
4625 + /* Leave last one: it's the terminator. */
4626 +@@ -1332,6 +1246,44 @@ static void do_table(void *symval, unsigned long size,
4627 + }
4628 + }
4629 +
4630 ++static const struct devtable devtable[] = {
4631 ++ {"hid", SIZE_hid_device_id, do_hid_entry},
4632 ++ {"ieee1394", SIZE_ieee1394_device_id, do_ieee1394_entry},
4633 ++ {"pci", SIZE_pci_device_id, do_pci_entry},
4634 ++ {"ccw", SIZE_ccw_device_id, do_ccw_entry},
4635 ++ {"ap", SIZE_ap_device_id, do_ap_entry},
4636 ++ {"css", SIZE_css_device_id, do_css_entry},
4637 ++ {"serio", SIZE_serio_device_id, do_serio_entry},
4638 ++ {"acpi", SIZE_acpi_device_id, do_acpi_entry},
4639 ++ {"pcmcia", SIZE_pcmcia_device_id, do_pcmcia_entry},
4640 ++ {"vio", SIZE_vio_device_id, do_vio_entry},
4641 ++ {"input", SIZE_input_device_id, do_input_entry},
4642 ++ {"eisa", SIZE_eisa_device_id, do_eisa_entry},
4643 ++ {"parisc", SIZE_parisc_device_id, do_parisc_entry},
4644 ++ {"sdio", SIZE_sdio_device_id, do_sdio_entry},
4645 ++ {"ssb", SIZE_ssb_device_id, do_ssb_entry},
4646 ++ {"bcma", SIZE_bcma_device_id, do_bcma_entry},
4647 ++ {"virtio", SIZE_virtio_device_id, do_virtio_entry},
4648 ++ {"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
4649 ++ {"i2c", SIZE_i2c_device_id, do_i2c_entry},
4650 ++ {"spi", SIZE_spi_device_id, do_spi_entry},
4651 ++ {"dmi", SIZE_dmi_system_id, do_dmi_entry},
4652 ++ {"platform", SIZE_platform_device_id, do_platform_entry},
4653 ++ {"mdio", SIZE_mdio_device_id, do_mdio_entry},
4654 ++ {"zorro", SIZE_zorro_device_id, do_zorro_entry},
4655 ++ {"isapnp", SIZE_isapnp_device_id, do_isapnp_entry},
4656 ++ {"ipack", SIZE_ipack_device_id, do_ipack_entry},
4657 ++ {"amba", SIZE_amba_id, do_amba_entry},
4658 ++ {"mipscdmm", SIZE_mips_cdmm_device_id, do_mips_cdmm_entry},
4659 ++ {"x86cpu", SIZE_x86_cpu_id, do_x86cpu_entry},
4660 ++ {"cpu", SIZE_cpu_feature, do_cpu_entry},
4661 ++ {"mei", SIZE_mei_cl_device_id, do_mei_entry},
4662 ++ {"rapidio", SIZE_rio_device_id, do_rio_entry},
4663 ++ {"ulpi", SIZE_ulpi_device_id, do_ulpi_entry},
4664 ++ {"hdaudio", SIZE_hda_device_id, do_hda_entry},
4665 ++ {"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
4666 ++};
4667 ++
4668 + /* Create MODULE_ALIAS() statements.
4669 + * At this time, we cannot write the actual output C source yet,
4670 + * so we write into the mod->dev_table_buf buffer. */
4671 +@@ -1386,13 +1338,14 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
4672 + else if (sym_is(name, namelen, "pnp_card"))
4673 + do_pnp_card_entries(symval, sym->st_size, mod);
4674 + else {
4675 +- struct devtable **p;
4676 +- INIT_SECTION(__devtable);
4677 ++ int i;
4678 ++
4679 ++ for (i = 0; i < ARRAY_SIZE(devtable); i++) {
4680 ++ const struct devtable *p = &devtable[i];
4681 +
4682 +- for (p = __start___devtable; p < __stop___devtable; p++) {
4683 +- if (sym_is(name, namelen, (*p)->device_id)) {
4684 +- do_table(symval, sym->st_size, (*p)->id_size,
4685 +- (*p)->device_id, (*p)->function, mod);
4686 ++ if (sym_is(name, namelen, p->device_id)) {
4687 ++ do_table(symval, sym->st_size, p->id_size,
4688 ++ p->device_id, p->do_entry, mod);
4689 + break;
4690 + }
4691 + }
4692 +diff --git a/security/device_cgroup.c b/security/device_cgroup.c
4693 +index 5ef7e5240563..ea014df89428 100644
4694 +--- a/security/device_cgroup.c
4695 ++++ b/security/device_cgroup.c
4696 +@@ -569,7 +569,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
4697 + devcg->behavior == DEVCG_DEFAULT_ALLOW) {
4698 + rc = dev_exception_add(devcg, ex);
4699 + if (rc)
4700 +- break;
4701 ++ return rc;
4702 + } else {
4703 + /*
4704 + * in the other possible cases:
4705 +diff --git a/sound/core/info.c b/sound/core/info.c
4706 +index bcf6a48cc70d..5fb00437507b 100644
4707 +--- a/sound/core/info.c
4708 ++++ b/sound/core/info.c
4709 +@@ -722,8 +722,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
4710 + INIT_LIST_HEAD(&entry->children);
4711 + INIT_LIST_HEAD(&entry->list);
4712 + entry->parent = parent;
4713 +- if (parent)
4714 ++ if (parent) {
4715 ++ mutex_lock(&parent->access);
4716 + list_add_tail(&entry->list, &parent->children);
4717 ++ mutex_unlock(&parent->access);
4718 ++ }
4719 + return entry;
4720 + }
4721 +
4722 +@@ -805,7 +808,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
4723 + list_for_each_entry_safe(p, n, &entry->children, list)
4724 + snd_info_free_entry(p);
4725 +
4726 +- list_del(&entry->list);
4727 ++ p = entry->parent;
4728 ++ if (p) {
4729 ++ mutex_lock(&p->access);
4730 ++ list_del(&entry->list);
4731 ++ mutex_unlock(&p->access);
4732 ++ }
4733 + kfree(entry->name);
4734 + if (entry->private_free)
4735 + entry->private_free(entry);
4736 +diff --git a/sound/core/init.c b/sound/core/init.c
4737 +index 32ebe2f6bc59..dcb9199f5e4f 100644
4738 +--- a/sound/core/init.c
4739 ++++ b/sound/core/init.c
4740 +@@ -406,14 +406,7 @@ int snd_card_disconnect(struct snd_card *card)
4741 + card->shutdown = 1;
4742 + spin_unlock(&card->files_lock);
4743 +
4744 +- /* phase 1: disable fops (user space) operations for ALSA API */
4745 +- mutex_lock(&snd_card_mutex);
4746 +- snd_cards[card->number] = NULL;
4747 +- clear_bit(card->number, snd_cards_lock);
4748 +- mutex_unlock(&snd_card_mutex);
4749 +-
4750 +- /* phase 2: replace file->f_op with special dummy operations */
4751 +-
4752 ++ /* replace file->f_op with special dummy operations */
4753 + spin_lock(&card->files_lock);
4754 + list_for_each_entry(mfile, &card->files_list, list) {
4755 + /* it's critical part, use endless loop */
4756 +@@ -429,7 +422,7 @@ int snd_card_disconnect(struct snd_card *card)
4757 + }
4758 + spin_unlock(&card->files_lock);
4759 +
4760 +- /* phase 3: notify all connected devices about disconnection */
4761 ++ /* notify all connected devices about disconnection */
4762 + /* at this point, they cannot respond to any calls except release() */
4763 +
4764 + #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
4765 +@@ -445,6 +438,13 @@ int snd_card_disconnect(struct snd_card *card)
4766 + device_del(&card->card_dev);
4767 + card->registered = false;
4768 + }
4769 ++
4770 ++ /* disable fops (user space) operations for ALSA API */
4771 ++ mutex_lock(&snd_card_mutex);
4772 ++ snd_cards[card->number] = NULL;
4773 ++ clear_bit(card->number, snd_cards_lock);
4774 ++ mutex_unlock(&snd_card_mutex);
4775 ++
4776 + #ifdef CONFIG_PM
4777 + wake_up(&card->power_sleep);
4778 + #endif
4779 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4780 +index 9637d0bbdeb5..b9e720cb6f02 100644
4781 +--- a/sound/pci/hda/patch_realtek.c
4782 ++++ b/sound/pci/hda/patch_realtek.c
4783 +@@ -6743,6 +6743,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4784 + {0x12, 0x90a60140},
4785 + {0x14, 0x90170150},
4786 + {0x21, 0x02211020}),
4787 ++ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4788 ++ {0x21, 0x02211020}),
4789 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
4790 + {0x14, 0x90170110},
4791 + {0x21, 0x02211020}),
4792 +@@ -6853,6 +6855,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4793 + {0x21, 0x0221101f}),
4794 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4795 + ALC256_STANDARD_PINS),
4796 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4797 ++ {0x14, 0x90170110},
4798 ++ {0x1b, 0x01011020},
4799 ++ {0x21, 0x0221101f}),
4800 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
4801 + {0x14, 0x90170110},
4802 + {0x1b, 0x90a70130},
4803 +diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
4804 +index acc704bd3998..0b0ef3abc966 100644
4805 +--- a/tools/include/linux/bitops.h
4806 ++++ b/tools/include/linux/bitops.h
4807 +@@ -3,8 +3,6 @@
4808 + #define _TOOLS_LINUX_BITOPS_H_
4809 +
4810 + #include <asm/types.h>
4811 +-#include <linux/compiler.h>
4812 +-
4813 + #ifndef __WORDSIZE
4814 + #define __WORDSIZE (__SIZEOF_LONG__ * 8)
4815 + #endif
4816 +@@ -12,10 +10,9 @@
4817 + #ifndef BITS_PER_LONG
4818 + # define BITS_PER_LONG __WORDSIZE
4819 + #endif
4820 ++#include <linux/bits.h>
4821 ++#include <linux/compiler.h>
4822 +
4823 +-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
4824 +-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
4825 +-#define BITS_PER_BYTE 8
4826 + #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
4827 + #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
4828 + #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
4829 +diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
4830 +new file mode 100644
4831 +index 000000000000..2b7b532c1d51
4832 +--- /dev/null
4833 ++++ b/tools/include/linux/bits.h
4834 +@@ -0,0 +1,26 @@
4835 ++/* SPDX-License-Identifier: GPL-2.0 */
4836 ++#ifndef __LINUX_BITS_H
4837 ++#define __LINUX_BITS_H
4838 ++#include <asm/bitsperlong.h>
4839 ++
4840 ++#define BIT(nr) (1UL << (nr))
4841 ++#define BIT_ULL(nr) (1ULL << (nr))
4842 ++#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
4843 ++#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
4844 ++#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
4845 ++#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
4846 ++#define BITS_PER_BYTE 8
4847 ++
4848 ++/*
4849 ++ * Create a contiguous bitmask starting at bit position @l and ending at
4850 ++ * position @h. For example
4851 ++ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
4852 ++ */
4853 ++#define GENMASK(h, l) \
4854 ++ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
4855 ++
4856 ++#define GENMASK_ULL(h, l) \
4857 ++ (((~0ULL) - (1ULL << (l)) + 1) & \
4858 ++ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
4859 ++
4860 ++#endif /* __LINUX_BITS_H */
4861 +diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
4862 +index 50cd6228f506..df1dbee8d98d 100755
4863 +--- a/tools/perf/check-headers.sh
4864 ++++ b/tools/perf/check-headers.sh
4865 +@@ -11,6 +11,7 @@ include/uapi/linux/sched.h
4866 + include/uapi/linux/stat.h
4867 + include/uapi/linux/vhost.h
4868 + include/uapi/sound/asound.h
4869 ++include/linux/bits.h
4870 + include/linux/hash.h
4871 + include/uapi/linux/hw_breakpoint.h
4872 + arch/x86/include/asm/disabled-features.h