1 |
commit: 3e36410423095c3ba385b2850ebad08d7a842228 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Oct 3 17:45:26 2015 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Oct 3 17:45:26 2015 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3e364104 |
7 |
|
8 |
Linux patch 3.18.22 |
9 |
|
10 |
0000_README | 4 + |
11 |
1021_linux-3.18.22.patch | 2622 ++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 2626 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 558f14f..7e934be 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -127,6 +127,10 @@ Patch: 1020_linux-3.18.21.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 3.18.21 |
21 |
|
22 |
+Patch: 1021_linux-3.18.22.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 3.18.22 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1021_linux-3.18.22.patch b/1021_linux-3.18.22.patch |
31 |
new file mode 100644 |
32 |
index 0000000..0049146 |
33 |
--- /dev/null |
34 |
+++ b/1021_linux-3.18.22.patch |
35 |
@@ -0,0 +1,2622 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index 6be90fab361b..7adbbbeeb421 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,6 +1,6 @@ |
41 |
+ VERSION = 3 |
42 |
+ PATCHLEVEL = 18 |
43 |
+-SUBLEVEL = 21 |
44 |
++SUBLEVEL = 22 |
45 |
+ EXTRAVERSION = |
46 |
+ NAME = Diseased Newt |
47 |
+ |
48 |
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c |
49 |
+index 81a02a8762b0..86825f8883de 100644 |
50 |
+--- a/arch/arm64/kvm/inject_fault.c |
51 |
++++ b/arch/arm64/kvm/inject_fault.c |
52 |
+@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
53 |
+ { |
54 |
+ if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
55 |
+ inject_abt32(vcpu, false, addr); |
56 |
+- |
57 |
+- inject_abt64(vcpu, false, addr); |
58 |
++ else |
59 |
++ inject_abt64(vcpu, false, addr); |
60 |
+ } |
61 |
+ |
62 |
+ /** |
63 |
+@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
64 |
+ { |
65 |
+ if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
66 |
+ inject_abt32(vcpu, true, addr); |
67 |
+- |
68 |
+- inject_abt64(vcpu, true, addr); |
69 |
++ else |
70 |
++ inject_abt64(vcpu, true, addr); |
71 |
+ } |
72 |
+ |
73 |
+ /** |
74 |
+@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
75 |
+ { |
76 |
+ if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
77 |
+ inject_undef32(vcpu); |
78 |
+- |
79 |
+- inject_undef64(vcpu); |
80 |
++ else |
81 |
++ inject_undef64(vcpu); |
82 |
+ } |
83 |
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S |
84 |
+index 5251565e344b..a6576cf1e6d9 100644 |
85 |
+--- a/arch/mips/kernel/scall64-64.S |
86 |
++++ b/arch/mips/kernel/scall64-64.S |
87 |
+@@ -80,7 +80,7 @@ syscall_trace_entry: |
88 |
+ SAVE_STATIC |
89 |
+ move s0, t2 |
90 |
+ move a0, sp |
91 |
+- daddiu a1, v0, __NR_64_Linux |
92 |
++ move a1, v0 |
93 |
+ jal syscall_trace_enter |
94 |
+ |
95 |
+ bltz v0, 2f # seccomp failed? Skip syscall |
96 |
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S |
97 |
+index 77e74398b828..a8eb6575edc0 100644 |
98 |
+--- a/arch/mips/kernel/scall64-n32.S |
99 |
++++ b/arch/mips/kernel/scall64-n32.S |
100 |
+@@ -72,7 +72,7 @@ n32_syscall_trace_entry: |
101 |
+ SAVE_STATIC |
102 |
+ move s0, t2 |
103 |
+ move a0, sp |
104 |
+- daddiu a1, v0, __NR_N32_Linux |
105 |
++ move a1, v0 |
106 |
+ jal syscall_trace_enter |
107 |
+ |
108 |
+ bltz v0, 2f # seccomp failed? Skip syscall |
109 |
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h |
110 |
+index a94b82e8f156..69126184c609 100644 |
111 |
+--- a/arch/x86/include/asm/desc.h |
112 |
++++ b/arch/x86/include/asm/desc.h |
113 |
+@@ -280,21 +280,6 @@ static inline void clear_LDT(void) |
114 |
+ set_ldt(NULL, 0); |
115 |
+ } |
116 |
+ |
117 |
+-/* |
118 |
+- * load one particular LDT into the current CPU |
119 |
+- */ |
120 |
+-static inline void load_LDT_nolock(mm_context_t *pc) |
121 |
+-{ |
122 |
+- set_ldt(pc->ldt, pc->size); |
123 |
+-} |
124 |
+- |
125 |
+-static inline void load_LDT(mm_context_t *pc) |
126 |
+-{ |
127 |
+- preempt_disable(); |
128 |
+- load_LDT_nolock(pc); |
129 |
+- preempt_enable(); |
130 |
+-} |
131 |
+- |
132 |
+ static inline unsigned long get_desc_base(const struct desc_struct *desc) |
133 |
+ { |
134 |
+ return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); |
135 |
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h |
136 |
+index 876e74e8eec7..b6b7bc3f5d26 100644 |
137 |
+--- a/arch/x86/include/asm/mmu.h |
138 |
++++ b/arch/x86/include/asm/mmu.h |
139 |
+@@ -9,8 +9,7 @@ |
140 |
+ * we put the segment information here. |
141 |
+ */ |
142 |
+ typedef struct { |
143 |
+- void *ldt; |
144 |
+- int size; |
145 |
++ struct ldt_struct *ldt; |
146 |
+ |
147 |
+ #ifdef CONFIG_X86_64 |
148 |
+ /* True if mm supports a task running in 32 bit compatibility mode. */ |
149 |
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h |
150 |
+index 166af2a8e865..23e0625a6183 100644 |
151 |
+--- a/arch/x86/include/asm/mmu_context.h |
152 |
++++ b/arch/x86/include/asm/mmu_context.h |
153 |
+@@ -20,6 +20,50 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, |
154 |
+ #endif /* !CONFIG_PARAVIRT */ |
155 |
+ |
156 |
+ /* |
157 |
++ * ldt_structs can be allocated, used, and freed, but they are never |
158 |
++ * modified while live. |
159 |
++ */ |
160 |
++struct ldt_struct { |
161 |
++ /* |
162 |
++ * Xen requires page-aligned LDTs with special permissions. This is |
163 |
++ * needed to prevent us from installing evil descriptors such as |
164 |
++ * call gates. On native, we could merge the ldt_struct and LDT |
165 |
++ * allocations, but it's not worth trying to optimize. |
166 |
++ */ |
167 |
++ struct desc_struct *entries; |
168 |
++ int size; |
169 |
++}; |
170 |
++ |
171 |
++static inline void load_mm_ldt(struct mm_struct *mm) |
172 |
++{ |
173 |
++ struct ldt_struct *ldt; |
174 |
++ |
175 |
++ /* lockless_dereference synchronizes with smp_store_release */ |
176 |
++ ldt = lockless_dereference(mm->context.ldt); |
177 |
++ |
178 |
++ /* |
179 |
++ * Any change to mm->context.ldt is followed by an IPI to all |
180 |
++ * CPUs with the mm active. The LDT will not be freed until |
181 |
++ * after the IPI is handled by all such CPUs. This means that, |
182 |
++ * if the ldt_struct changes before we return, the values we see |
183 |
++ * will be safe, and the new values will be loaded before we run |
184 |
++ * any user code. |
185 |
++ * |
186 |
++ * NB: don't try to convert this to use RCU without extreme care. |
187 |
++ * We would still need IRQs off, because we don't want to change |
188 |
++ * the local LDT after an IPI loaded a newer value than the one |
189 |
++ * that we can see. |
190 |
++ */ |
191 |
++ |
192 |
++ if (unlikely(ldt)) |
193 |
++ set_ldt(ldt->entries, ldt->size); |
194 |
++ else |
195 |
++ clear_LDT(); |
196 |
++ |
197 |
++ DEBUG_LOCKS_WARN_ON(preemptible()); |
198 |
++} |
199 |
++ |
200 |
++/* |
201 |
+ * Used for LDT copy/destruction. |
202 |
+ */ |
203 |
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
204 |
+@@ -55,7 +99,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
205 |
+ |
206 |
+ /* Load the LDT, if the LDT is different: */ |
207 |
+ if (unlikely(prev->context.ldt != next->context.ldt)) |
208 |
+- load_LDT_nolock(&next->context); |
209 |
++ load_mm_ldt(next); |
210 |
+ } |
211 |
+ #ifdef CONFIG_SMP |
212 |
+ else { |
213 |
+@@ -77,7 +121,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
214 |
+ */ |
215 |
+ load_cr3(next->pgd); |
216 |
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
217 |
+- load_LDT_nolock(&next->context); |
218 |
++ load_mm_ldt(next); |
219 |
+ } |
220 |
+ } |
221 |
+ #endif |
222 |
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
223 |
+index 7bc49c3b9684..e757fcbe90db 100644 |
224 |
+--- a/arch/x86/kernel/cpu/common.c |
225 |
++++ b/arch/x86/kernel/cpu/common.c |
226 |
+@@ -1383,7 +1383,7 @@ void cpu_init(void) |
227 |
+ load_sp0(t, ¤t->thread); |
228 |
+ set_tss_desc(cpu, t); |
229 |
+ load_TR_desc(); |
230 |
+- load_LDT(&init_mm.context); |
231 |
++ load_mm_ldt(&init_mm); |
232 |
+ |
233 |
+ clear_all_debug_regs(); |
234 |
+ dbg_restore_debug_regs(); |
235 |
+@@ -1426,7 +1426,7 @@ void cpu_init(void) |
236 |
+ load_sp0(t, thread); |
237 |
+ set_tss_desc(cpu, t); |
238 |
+ load_TR_desc(); |
239 |
+- load_LDT(&init_mm.context); |
240 |
++ load_mm_ldt(&init_mm); |
241 |
+ |
242 |
+ t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
243 |
+ |
244 |
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c |
245 |
+index 6b5acd5f4a34..c832e9f54cd6 100644 |
246 |
+--- a/arch/x86/kernel/cpu/perf_event.c |
247 |
++++ b/arch/x86/kernel/cpu/perf_event.c |
248 |
+@@ -32,6 +32,7 @@ |
249 |
+ #include <asm/smp.h> |
250 |
+ #include <asm/alternative.h> |
251 |
+ #include <asm/tlbflush.h> |
252 |
++#include <asm/mmu_context.h> |
253 |
+ #include <asm/timer.h> |
254 |
+ #include <asm/desc.h> |
255 |
+ #include <asm/ldt.h> |
256 |
+@@ -1987,21 +1988,25 @@ static unsigned long get_segment_base(unsigned int segment) |
257 |
+ int idx = segment >> 3; |
258 |
+ |
259 |
+ if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
260 |
++ struct ldt_struct *ldt; |
261 |
++ |
262 |
+ if (idx > LDT_ENTRIES) |
263 |
+ return 0; |
264 |
+ |
265 |
+- if (idx > current->active_mm->context.size) |
266 |
++ /* IRQs are off, so this synchronizes with smp_store_release */ |
267 |
++ ldt = lockless_dereference(current->active_mm->context.ldt); |
268 |
++ if (!ldt || idx > ldt->size) |
269 |
+ return 0; |
270 |
+ |
271 |
+- desc = current->active_mm->context.ldt; |
272 |
++ desc = &ldt->entries[idx]; |
273 |
+ } else { |
274 |
+ if (idx > GDT_ENTRIES) |
275 |
+ return 0; |
276 |
+ |
277 |
+- desc = raw_cpu_ptr(gdt_page.gdt); |
278 |
++ desc = raw_cpu_ptr(gdt_page.gdt) + idx; |
279 |
+ } |
280 |
+ |
281 |
+- return get_desc_base(desc + idx); |
282 |
++ return get_desc_base(desc); |
283 |
+ } |
284 |
+ |
285 |
+ #ifdef CONFIG_COMPAT |
286 |
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
287 |
+index e36d9815ef56..fad5cd9d7c4b 100644 |
288 |
+--- a/arch/x86/kernel/entry_64.S |
289 |
++++ b/arch/x86/kernel/entry_64.S |
290 |
+@@ -1459,20 +1459,77 @@ ENTRY(nmi) |
291 |
+ * a nested NMI that updated the copy interrupt stack frame, a |
292 |
+ * jump will be made to the repeat_nmi code that will handle the second |
293 |
+ * NMI. |
294 |
++ * |
295 |
++ * However, espfix prevents us from directly returning to userspace |
296 |
++ * with a single IRET instruction. Similarly, IRET to user mode |
297 |
++ * can fault. We therefore handle NMIs from user space like |
298 |
++ * other IST entries. |
299 |
+ */ |
300 |
+ |
301 |
+ /* Use %rdx as out temp variable throughout */ |
302 |
+ pushq_cfi %rdx |
303 |
+ CFI_REL_OFFSET rdx, 0 |
304 |
+ |
305 |
++ testb $3, CS-RIP+8(%rsp) |
306 |
++ jz .Lnmi_from_kernel |
307 |
++ |
308 |
++ /* |
309 |
++ * NMI from user mode. We need to run on the thread stack, but we |
310 |
++ * can't go through the normal entry paths: NMIs are masked, and |
311 |
++ * we don't want to enable interrupts, because then we'll end |
312 |
++ * up in an awkward situation in which IRQs are on but NMIs |
313 |
++ * are off. |
314 |
++ */ |
315 |
++ |
316 |
++ SWAPGS |
317 |
++ cld |
318 |
++ movq %rsp, %rdx |
319 |
++ movq PER_CPU_VAR(kernel_stack), %rsp |
320 |
++ addq $KERNEL_STACK_OFFSET, %rsp |
321 |
++ pushq 5*8(%rdx) /* pt_regs->ss */ |
322 |
++ pushq 4*8(%rdx) /* pt_regs->rsp */ |
323 |
++ pushq 3*8(%rdx) /* pt_regs->flags */ |
324 |
++ pushq 2*8(%rdx) /* pt_regs->cs */ |
325 |
++ pushq 1*8(%rdx) /* pt_regs->rip */ |
326 |
++ pushq $-1 /* pt_regs->orig_ax */ |
327 |
++ pushq %rdi /* pt_regs->di */ |
328 |
++ pushq %rsi /* pt_regs->si */ |
329 |
++ pushq (%rdx) /* pt_regs->dx */ |
330 |
++ pushq %rcx /* pt_regs->cx */ |
331 |
++ pushq %rax /* pt_regs->ax */ |
332 |
++ pushq %r8 /* pt_regs->r8 */ |
333 |
++ pushq %r9 /* pt_regs->r9 */ |
334 |
++ pushq %r10 /* pt_regs->r10 */ |
335 |
++ pushq %r11 /* pt_regs->r11 */ |
336 |
++ pushq %rbx /* pt_regs->rbx */ |
337 |
++ pushq %rbp /* pt_regs->rbp */ |
338 |
++ pushq %r12 /* pt_regs->r12 */ |
339 |
++ pushq %r13 /* pt_regs->r13 */ |
340 |
++ pushq %r14 /* pt_regs->r14 */ |
341 |
++ pushq %r15 /* pt_regs->r15 */ |
342 |
++ |
343 |
+ /* |
344 |
+- * If %cs was not the kernel segment, then the NMI triggered in user |
345 |
+- * space, which means it is definitely not nested. |
346 |
++ * At this point we no longer need to worry about stack damage |
347 |
++ * due to nesting -- we're on the normal thread stack and we're |
348 |
++ * done with the NMI stack. |
349 |
+ */ |
350 |
+- cmpl $__KERNEL_CS, 16(%rsp) |
351 |
+- jne first_nmi |
352 |
++ |
353 |
++ movq %rsp, %rdi |
354 |
++ movq $-1, %rsi |
355 |
++ call do_nmi |
356 |
+ |
357 |
+ /* |
358 |
++ * Return back to user mode. We must *not* do the normal exit |
359 |
++ * work, because we don't want to enable interrupts. Fortunately, |
360 |
++ * do_nmi doesn't modify pt_regs. |
361 |
++ */ |
362 |
++ SWAPGS |
363 |
++ |
364 |
++ addq $6*8, %rsp /* skip bx, bp, and r12-r15 */ |
365 |
++ jmp restore_args |
366 |
++ |
367 |
++.Lnmi_from_kernel: |
368 |
++ /* |
369 |
+ * Check the special variable on the stack to see if NMIs are |
370 |
+ * executing. |
371 |
+ */ |
372 |
+@@ -1629,29 +1686,11 @@ end_repeat_nmi: |
373 |
+ call save_paranoid |
374 |
+ DEFAULT_FRAME 0 |
375 |
+ |
376 |
+- /* |
377 |
+- * Save off the CR2 register. If we take a page fault in the NMI then |
378 |
+- * it could corrupt the CR2 value. If the NMI preempts a page fault |
379 |
+- * handler before it was able to read the CR2 register, and then the |
380 |
+- * NMI itself takes a page fault, the page fault that was preempted |
381 |
+- * will read the information from the NMI page fault and not the |
382 |
+- * origin fault. Save it off and restore it if it changes. |
383 |
+- * Use the r12 callee-saved register. |
384 |
+- */ |
385 |
+- movq %cr2, %r12 |
386 |
+- |
387 |
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
388 |
+ movq %rsp,%rdi |
389 |
+ movq $-1,%rsi |
390 |
+ call do_nmi |
391 |
+ |
392 |
+- /* Did the NMI take a page fault? Restore cr2 if it did */ |
393 |
+- movq %cr2, %rcx |
394 |
+- cmpq %rcx, %r12 |
395 |
+- je 1f |
396 |
+- movq %r12, %cr2 |
397 |
+-1: |
398 |
+- |
399 |
+ testl %ebx,%ebx /* swapgs needed? */ |
400 |
+ jnz nmi_restore |
401 |
+ nmi_swapgs: |
402 |
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c |
403 |
+index c37886d759cc..2bcc0525f1c1 100644 |
404 |
+--- a/arch/x86/kernel/ldt.c |
405 |
++++ b/arch/x86/kernel/ldt.c |
406 |
+@@ -12,6 +12,7 @@ |
407 |
+ #include <linux/string.h> |
408 |
+ #include <linux/mm.h> |
409 |
+ #include <linux/smp.h> |
410 |
++#include <linux/slab.h> |
411 |
+ #include <linux/vmalloc.h> |
412 |
+ #include <linux/uaccess.h> |
413 |
+ |
414 |
+@@ -20,82 +21,82 @@ |
415 |
+ #include <asm/mmu_context.h> |
416 |
+ #include <asm/syscalls.h> |
417 |
+ |
418 |
+-#ifdef CONFIG_SMP |
419 |
++/* context.lock is held for us, so we don't need any locking. */ |
420 |
+ static void flush_ldt(void *current_mm) |
421 |
+ { |
422 |
+- if (current->active_mm == current_mm) |
423 |
+- load_LDT(¤t->active_mm->context); |
424 |
++ mm_context_t *pc; |
425 |
++ |
426 |
++ if (current->active_mm != current_mm) |
427 |
++ return; |
428 |
++ |
429 |
++ pc = ¤t->active_mm->context; |
430 |
++ set_ldt(pc->ldt->entries, pc->ldt->size); |
431 |
+ } |
432 |
+-#endif |
433 |
+ |
434 |
+-static int alloc_ldt(mm_context_t *pc, int mincount, int reload) |
435 |
++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ |
436 |
++static struct ldt_struct *alloc_ldt_struct(int size) |
437 |
+ { |
438 |
+- void *oldldt, *newldt; |
439 |
+- int oldsize; |
440 |
+- |
441 |
+- if (mincount <= pc->size) |
442 |
+- return 0; |
443 |
+- oldsize = pc->size; |
444 |
+- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & |
445 |
+- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); |
446 |
+- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) |
447 |
+- newldt = vmalloc(mincount * LDT_ENTRY_SIZE); |
448 |
++ struct ldt_struct *new_ldt; |
449 |
++ int alloc_size; |
450 |
++ |
451 |
++ if (size > LDT_ENTRIES) |
452 |
++ return NULL; |
453 |
++ |
454 |
++ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); |
455 |
++ if (!new_ldt) |
456 |
++ return NULL; |
457 |
++ |
458 |
++ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); |
459 |
++ alloc_size = size * LDT_ENTRY_SIZE; |
460 |
++ |
461 |
++ /* |
462 |
++ * Xen is very picky: it requires a page-aligned LDT that has no |
463 |
++ * trailing nonzero bytes in any page that contains LDT descriptors. |
464 |
++ * Keep it simple: zero the whole allocation and never allocate less |
465 |
++ * than PAGE_SIZE. |
466 |
++ */ |
467 |
++ if (alloc_size > PAGE_SIZE) |
468 |
++ new_ldt->entries = vzalloc(alloc_size); |
469 |
+ else |
470 |
+- newldt = (void *)__get_free_page(GFP_KERNEL); |
471 |
+- |
472 |
+- if (!newldt) |
473 |
+- return -ENOMEM; |
474 |
++ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); |
475 |
+ |
476 |
+- if (oldsize) |
477 |
+- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); |
478 |
+- oldldt = pc->ldt; |
479 |
+- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, |
480 |
+- (mincount - oldsize) * LDT_ENTRY_SIZE); |
481 |
++ if (!new_ldt->entries) { |
482 |
++ kfree(new_ldt); |
483 |
++ return NULL; |
484 |
++ } |
485 |
+ |
486 |
+- paravirt_alloc_ldt(newldt, mincount); |
487 |
++ new_ldt->size = size; |
488 |
++ return new_ldt; |
489 |
++} |
490 |
+ |
491 |
+-#ifdef CONFIG_X86_64 |
492 |
+- /* CHECKME: Do we really need this ? */ |
493 |
+- wmb(); |
494 |
+-#endif |
495 |
+- pc->ldt = newldt; |
496 |
+- wmb(); |
497 |
+- pc->size = mincount; |
498 |
+- wmb(); |
499 |
+- |
500 |
+- if (reload) { |
501 |
+-#ifdef CONFIG_SMP |
502 |
+- preempt_disable(); |
503 |
+- load_LDT(pc); |
504 |
+- if (!cpumask_equal(mm_cpumask(current->mm), |
505 |
+- cpumask_of(smp_processor_id()))) |
506 |
+- smp_call_function(flush_ldt, current->mm, 1); |
507 |
+- preempt_enable(); |
508 |
+-#else |
509 |
+- load_LDT(pc); |
510 |
+-#endif |
511 |
+- } |
512 |
+- if (oldsize) { |
513 |
+- paravirt_free_ldt(oldldt, oldsize); |
514 |
+- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) |
515 |
+- vfree(oldldt); |
516 |
+- else |
517 |
+- put_page(virt_to_page(oldldt)); |
518 |
+- } |
519 |
+- return 0; |
520 |
++/* After calling this, the LDT is immutable. */ |
521 |
++static void finalize_ldt_struct(struct ldt_struct *ldt) |
522 |
++{ |
523 |
++ paravirt_alloc_ldt(ldt->entries, ldt->size); |
524 |
+ } |
525 |
+ |
526 |
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old) |
527 |
++/* context.lock is held */ |
528 |
++static void install_ldt(struct mm_struct *current_mm, |
529 |
++ struct ldt_struct *ldt) |
530 |
+ { |
531 |
+- int err = alloc_ldt(new, old->size, 0); |
532 |
+- int i; |
533 |
++ /* Synchronizes with lockless_dereference in load_mm_ldt. */ |
534 |
++ smp_store_release(¤t_mm->context.ldt, ldt); |
535 |
++ |
536 |
++ /* Activate the LDT for all CPUs using current_mm. */ |
537 |
++ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); |
538 |
++} |
539 |
+ |
540 |
+- if (err < 0) |
541 |
+- return err; |
542 |
++static void free_ldt_struct(struct ldt_struct *ldt) |
543 |
++{ |
544 |
++ if (likely(!ldt)) |
545 |
++ return; |
546 |
+ |
547 |
+- for (i = 0; i < old->size; i++) |
548 |
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); |
549 |
+- return 0; |
550 |
++ paravirt_free_ldt(ldt->entries, ldt->size); |
551 |
++ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) |
552 |
++ vfree(ldt->entries); |
553 |
++ else |
554 |
++ kfree(ldt->entries); |
555 |
++ kfree(ldt); |
556 |
+ } |
557 |
+ |
558 |
+ /* |
559 |
+@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) |
560 |
+ */ |
561 |
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
562 |
+ { |
563 |
++ struct ldt_struct *new_ldt; |
564 |
+ struct mm_struct *old_mm; |
565 |
+ int retval = 0; |
566 |
+ |
567 |
+ mutex_init(&mm->context.lock); |
568 |
+- mm->context.size = 0; |
569 |
+ old_mm = current->mm; |
570 |
+- if (old_mm && old_mm->context.size > 0) { |
571 |
+- mutex_lock(&old_mm->context.lock); |
572 |
+- retval = copy_ldt(&mm->context, &old_mm->context); |
573 |
+- mutex_unlock(&old_mm->context.lock); |
574 |
++ if (!old_mm) { |
575 |
++ mm->context.ldt = NULL; |
576 |
++ return 0; |
577 |
+ } |
578 |
++ |
579 |
++ mutex_lock(&old_mm->context.lock); |
580 |
++ if (!old_mm->context.ldt) { |
581 |
++ mm->context.ldt = NULL; |
582 |
++ goto out_unlock; |
583 |
++ } |
584 |
++ |
585 |
++ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); |
586 |
++ if (!new_ldt) { |
587 |
++ retval = -ENOMEM; |
588 |
++ goto out_unlock; |
589 |
++ } |
590 |
++ |
591 |
++ memcpy(new_ldt->entries, old_mm->context.ldt->entries, |
592 |
++ new_ldt->size * LDT_ENTRY_SIZE); |
593 |
++ finalize_ldt_struct(new_ldt); |
594 |
++ |
595 |
++ mm->context.ldt = new_ldt; |
596 |
++ |
597 |
++out_unlock: |
598 |
++ mutex_unlock(&old_mm->context.lock); |
599 |
+ return retval; |
600 |
+ } |
601 |
+ |
602 |
+@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
603 |
+ */ |
604 |
+ void destroy_context(struct mm_struct *mm) |
605 |
+ { |
606 |
+- if (mm->context.size) { |
607 |
+-#ifdef CONFIG_X86_32 |
608 |
+- /* CHECKME: Can this ever happen ? */ |
609 |
+- if (mm == current->active_mm) |
610 |
+- clear_LDT(); |
611 |
+-#endif |
612 |
+- paravirt_free_ldt(mm->context.ldt, mm->context.size); |
613 |
+- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) |
614 |
+- vfree(mm->context.ldt); |
615 |
+- else |
616 |
+- put_page(virt_to_page(mm->context.ldt)); |
617 |
+- mm->context.size = 0; |
618 |
+- } |
619 |
++ free_ldt_struct(mm->context.ldt); |
620 |
++ mm->context.ldt = NULL; |
621 |
+ } |
622 |
+ |
623 |
+ static int read_ldt(void __user *ptr, unsigned long bytecount) |
624 |
+ { |
625 |
+- int err; |
626 |
++ int retval; |
627 |
+ unsigned long size; |
628 |
+ struct mm_struct *mm = current->mm; |
629 |
+ |
630 |
+- if (!mm->context.size) |
631 |
+- return 0; |
632 |
++ mutex_lock(&mm->context.lock); |
633 |
++ |
634 |
++ if (!mm->context.ldt) { |
635 |
++ retval = 0; |
636 |
++ goto out_unlock; |
637 |
++ } |
638 |
++ |
639 |
+ if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) |
640 |
+ bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; |
641 |
+ |
642 |
+- mutex_lock(&mm->context.lock); |
643 |
+- size = mm->context.size * LDT_ENTRY_SIZE; |
644 |
++ size = mm->context.ldt->size * LDT_ENTRY_SIZE; |
645 |
+ if (size > bytecount) |
646 |
+ size = bytecount; |
647 |
+ |
648 |
+- err = 0; |
649 |
+- if (copy_to_user(ptr, mm->context.ldt, size)) |
650 |
+- err = -EFAULT; |
651 |
+- mutex_unlock(&mm->context.lock); |
652 |
+- if (err < 0) |
653 |
+- goto error_return; |
654 |
++ if (copy_to_user(ptr, mm->context.ldt->entries, size)) { |
655 |
++ retval = -EFAULT; |
656 |
++ goto out_unlock; |
657 |
++ } |
658 |
++ |
659 |
+ if (size != bytecount) { |
660 |
+- /* zero-fill the rest */ |
661 |
+- if (clear_user(ptr + size, bytecount - size) != 0) { |
662 |
+- err = -EFAULT; |
663 |
+- goto error_return; |
664 |
++ /* Zero-fill the rest and pretend we read bytecount bytes. */ |
665 |
++ if (clear_user(ptr + size, bytecount - size)) { |
666 |
++ retval = -EFAULT; |
667 |
++ goto out_unlock; |
668 |
+ } |
669 |
+ } |
670 |
+- return bytecount; |
671 |
+-error_return: |
672 |
+- return err; |
673 |
++ retval = bytecount; |
674 |
++ |
675 |
++out_unlock: |
676 |
++ mutex_unlock(&mm->context.lock); |
677 |
++ return retval; |
678 |
+ } |
679 |
+ |
680 |
+ static int read_default_ldt(void __user *ptr, unsigned long bytecount) |
681 |
+@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
682 |
+ struct desc_struct ldt; |
683 |
+ int error; |
684 |
+ struct user_desc ldt_info; |
685 |
++ int oldsize, newsize; |
686 |
++ struct ldt_struct *new_ldt, *old_ldt; |
687 |
+ |
688 |
+ error = -EINVAL; |
689 |
+ if (bytecount != sizeof(ldt_info)) |
690 |
+@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
691 |
+ goto out; |
692 |
+ } |
693 |
+ |
694 |
+- mutex_lock(&mm->context.lock); |
695 |
+- if (ldt_info.entry_number >= mm->context.size) { |
696 |
+- error = alloc_ldt(¤t->mm->context, |
697 |
+- ldt_info.entry_number + 1, 1); |
698 |
+- if (error < 0) |
699 |
+- goto out_unlock; |
700 |
+- } |
701 |
+- |
702 |
+- /* Allow LDTs to be cleared by the user. */ |
703 |
+- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { |
704 |
+- if (oldmode || LDT_empty(&ldt_info)) { |
705 |
+- memset(&ldt, 0, sizeof(ldt)); |
706 |
+- goto install; |
707 |
++ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || |
708 |
++ LDT_empty(&ldt_info)) { |
709 |
++ /* The user wants to clear the entry. */ |
710 |
++ memset(&ldt, 0, sizeof(ldt)); |
711 |
++ } else { |
712 |
++ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { |
713 |
++ error = -EINVAL; |
714 |
++ goto out; |
715 |
+ } |
716 |
++ |
717 |
++ fill_ldt(&ldt, &ldt_info); |
718 |
++ if (oldmode) |
719 |
++ ldt.avl = 0; |
720 |
+ } |
721 |
+ |
722 |
+- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { |
723 |
+- error = -EINVAL; |
724 |
++ mutex_lock(&mm->context.lock); |
725 |
++ |
726 |
++ old_ldt = mm->context.ldt; |
727 |
++ oldsize = old_ldt ? old_ldt->size : 0; |
728 |
++ newsize = max((int)(ldt_info.entry_number + 1), oldsize); |
729 |
++ |
730 |
++ error = -ENOMEM; |
731 |
++ new_ldt = alloc_ldt_struct(newsize); |
732 |
++ if (!new_ldt) |
733 |
+ goto out_unlock; |
734 |
+- } |
735 |
+ |
736 |
+- fill_ldt(&ldt, &ldt_info); |
737 |
+- if (oldmode) |
738 |
+- ldt.avl = 0; |
739 |
++ if (old_ldt) |
740 |
++ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); |
741 |
++ new_ldt->entries[ldt_info.entry_number] = ldt; |
742 |
++ finalize_ldt_struct(new_ldt); |
743 |
+ |
744 |
+- /* Install the new entry ... */ |
745 |
+-install: |
746 |
+- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); |
747 |
++ install_ldt(mm, new_ldt); |
748 |
++ free_ldt_struct(old_ldt); |
749 |
+ error = 0; |
750 |
+ |
751 |
+ out_unlock: |
752 |
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c |
753 |
+index c3e985d1751c..5c5ec7d28d9b 100644 |
754 |
+--- a/arch/x86/kernel/nmi.c |
755 |
++++ b/arch/x86/kernel/nmi.c |
756 |
+@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs) |
757 |
+ NOKPROBE_SYMBOL(default_do_nmi); |
758 |
+ |
759 |
+ /* |
760 |
+- * NMIs can hit breakpoints which will cause it to lose its |
761 |
+- * NMI context with the CPU when the breakpoint does an iret. |
762 |
+- */ |
763 |
+-#ifdef CONFIG_X86_32 |
764 |
+-/* |
765 |
+- * For i386, NMIs use the same stack as the kernel, and we can |
766 |
+- * add a workaround to the iret problem in C (preventing nested |
767 |
+- * NMIs if an NMI takes a trap). Simply have 3 states the NMI |
768 |
+- * can be in: |
769 |
++ * NMIs can hit breakpoints which will cause it to lose its NMI context |
770 |
++ * with the CPU when the breakpoint or page fault does an IRET. |
771 |
++ * |
772 |
++ * As a result, NMIs can nest if NMIs get unmasked due an IRET during |
773 |
++ * NMI processing. On x86_64, the asm glue protects us from nested NMIs |
774 |
++ * if the outer NMI came from kernel mode, but we can still nest if the |
775 |
++ * outer NMI came from user mode. |
776 |
++ * |
777 |
++ * To handle these nested NMIs, we have three states: |
778 |
+ * |
779 |
+ * 1) not running |
780 |
+ * 2) executing |
781 |
+@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi); |
782 |
+ * (Note, the latch is binary, thus multiple NMIs triggering, |
783 |
+ * when one is running, are ignored. Only one NMI is restarted.) |
784 |
+ * |
785 |
+- * If an NMI hits a breakpoint that executes an iret, another |
786 |
+- * NMI can preempt it. We do not want to allow this new NMI |
787 |
+- * to run, but we want to execute it when the first one finishes. |
788 |
+- * We set the state to "latched", and the exit of the first NMI will |
789 |
+- * perform a dec_return, if the result is zero (NOT_RUNNING), then |
790 |
+- * it will simply exit the NMI handler. If not, the dec_return |
791 |
+- * would have set the state to NMI_EXECUTING (what we want it to |
792 |
+- * be when we are running). In this case, we simply jump back |
793 |
+- * to rerun the NMI handler again, and restart the 'latched' NMI. |
794 |
++ * If an NMI executes an iret, another NMI can preempt it. We do not |
795 |
++ * want to allow this new NMI to run, but we want to execute it when the |
796 |
++ * first one finishes. We set the state to "latched", and the exit of |
797 |
++ * the first NMI will perform a dec_return, if the result is zero |
798 |
++ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the |
799 |
++ * dec_return would have set the state to NMI_EXECUTING (what we want it |
800 |
++ * to be when we are running). In this case, we simply jump back to |
801 |
++ * rerun the NMI handler again, and restart the 'latched' NMI. |
802 |
+ * |
803 |
+ * No trap (breakpoint or page fault) should be hit before nmi_restart, |
804 |
+ * thus there is no race between the first check of state for NOT_RUNNING |
805 |
+@@ -461,49 +460,36 @@ enum nmi_states { |
806 |
+ static DEFINE_PER_CPU(enum nmi_states, nmi_state); |
807 |
+ static DEFINE_PER_CPU(unsigned long, nmi_cr2); |
808 |
+ |
809 |
+-#define nmi_nesting_preprocess(regs) \ |
810 |
+- do { \ |
811 |
+- if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ |
812 |
+- this_cpu_write(nmi_state, NMI_LATCHED); \ |
813 |
+- return; \ |
814 |
+- } \ |
815 |
+- this_cpu_write(nmi_state, NMI_EXECUTING); \ |
816 |
+- this_cpu_write(nmi_cr2, read_cr2()); \ |
817 |
+- } while (0); \ |
818 |
+- nmi_restart: |
819 |
+- |
820 |
+-#define nmi_nesting_postprocess() \ |
821 |
+- do { \ |
822 |
+- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ |
823 |
+- write_cr2(this_cpu_read(nmi_cr2)); \ |
824 |
+- if (this_cpu_dec_return(nmi_state)) \ |
825 |
+- goto nmi_restart; \ |
826 |
+- } while (0) |
827 |
+-#else /* x86_64 */ |
828 |
++#ifdef CONFIG_X86_64 |
829 |
+ /* |
830 |
+- * In x86_64 things are a bit more difficult. This has the same problem |
831 |
+- * where an NMI hitting a breakpoint that calls iret will remove the |
832 |
+- * NMI context, allowing a nested NMI to enter. What makes this more |
833 |
+- * difficult is that both NMIs and breakpoints have their own stack. |
834 |
+- * When a new NMI or breakpoint is executed, the stack is set to a fixed |
835 |
+- * point. If an NMI is nested, it will have its stack set at that same |
836 |
+- * fixed address that the first NMI had, and will start corrupting the |
837 |
+- * stack. This is handled in entry_64.S, but the same problem exists with |
838 |
+- * the breakpoint stack. |
839 |
++ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without |
840 |
++ * some care, the inner breakpoint will clobber the outer breakpoint's |
841 |
++ * stack. |
842 |
+ * |
843 |
+- * If a breakpoint is being processed, and the debug stack is being used, |
844 |
+- * if an NMI comes in and also hits a breakpoint, the stack pointer |
845 |
+- * will be set to the same fixed address as the breakpoint that was |
846 |
+- * interrupted, causing that stack to be corrupted. To handle this case, |
847 |
+- * check if the stack that was interrupted is the debug stack, and if |
848 |
+- * so, change the IDT so that new breakpoints will use the current stack |
849 |
+- * and not switch to the fixed address. On return of the NMI, switch back |
850 |
+- * to the original IDT. |
851 |
++ * If a breakpoint is being processed, and the debug stack is being |
852 |
++ * used, if an NMI comes in and also hits a breakpoint, the stack |
853 |
++ * pointer will be set to the same fixed address as the breakpoint that |
854 |
++ * was interrupted, causing that stack to be corrupted. To handle this |
855 |
++ * case, check if the stack that was interrupted is the debug stack, and |
856 |
++ * if so, change the IDT so that new breakpoints will use the current |
857 |
++ * stack and not switch to the fixed address. On return of the NMI, |
858 |
++ * switch back to the original IDT. |
859 |
+ */ |
860 |
+ static DEFINE_PER_CPU(int, update_debug_stack); |
861 |
++#endif |
862 |
+ |
863 |
+-static inline void nmi_nesting_preprocess(struct pt_regs *regs) |
864 |
++dotraplinkage notrace __kprobes void |
865 |
++do_nmi(struct pt_regs *regs, long error_code) |
866 |
+ { |
867 |
++ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { |
868 |
++ this_cpu_write(nmi_state, NMI_LATCHED); |
869 |
++ return; |
870 |
++ } |
871 |
++ this_cpu_write(nmi_state, NMI_EXECUTING); |
872 |
++ this_cpu_write(nmi_cr2, read_cr2()); |
873 |
++nmi_restart: |
874 |
++ |
875 |
++#ifdef CONFIG_X86_64 |
876 |
+ /* |
877 |
+ * If we interrupted a breakpoint, it is possible that |
878 |
+ * the nmi handler will have breakpoints too. We need to |
879 |
+@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs) |
880 |
+ debug_stack_set_zero(); |
881 |
+ this_cpu_write(update_debug_stack, 1); |
882 |
+ } |
883 |
+-} |
884 |
+- |
885 |
+-static inline void nmi_nesting_postprocess(void) |
886 |
+-{ |
887 |
+- if (unlikely(this_cpu_read(update_debug_stack))) { |
888 |
+- debug_stack_reset(); |
889 |
+- this_cpu_write(update_debug_stack, 0); |
890 |
+- } |
891 |
+-} |
892 |
+ #endif |
893 |
+ |
894 |
+-dotraplinkage notrace void |
895 |
+-do_nmi(struct pt_regs *regs, long error_code) |
896 |
+-{ |
897 |
+- nmi_nesting_preprocess(regs); |
898 |
+- |
899 |
+ nmi_enter(); |
900 |
+ |
901 |
+ inc_irq_stat(__nmi_count); |
902 |
+@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code) |
903 |
+ |
904 |
+ nmi_exit(); |
905 |
+ |
906 |
+- /* On i386, may loop back to preprocess */ |
907 |
+- nmi_nesting_postprocess(); |
908 |
++#ifdef CONFIG_X86_64 |
909 |
++ if (unlikely(this_cpu_read(update_debug_stack))) { |
910 |
++ debug_stack_reset(); |
911 |
++ this_cpu_write(update_debug_stack, 0); |
912 |
++ } |
913 |
++#endif |
914 |
++ |
915 |
++ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) |
916 |
++ write_cr2(this_cpu_read(nmi_cr2)); |
917 |
++ if (this_cpu_dec_return(nmi_state)) |
918 |
++ goto nmi_restart; |
919 |
+ } |
920 |
+ NOKPROBE_SYMBOL(do_nmi); |
921 |
+ |
922 |
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c |
923 |
+index 67fcc43577d2..63a4b5092203 100644 |
924 |
+--- a/arch/x86/kernel/process_64.c |
925 |
++++ b/arch/x86/kernel/process_64.c |
926 |
+@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all) |
927 |
+ void release_thread(struct task_struct *dead_task) |
928 |
+ { |
929 |
+ if (dead_task->mm) { |
930 |
+- if (dead_task->mm->context.size) { |
931 |
++ if (dead_task->mm->context.ldt) { |
932 |
+ pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", |
933 |
+ dead_task->comm, |
934 |
+ dead_task->mm->context.ldt, |
935 |
+- dead_task->mm->context.size); |
936 |
++ dead_task->mm->context.ldt->size); |
937 |
+ BUG(); |
938 |
+ } |
939 |
+ } |
940 |
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c |
941 |
+index 9b4d51d0c0d0..0ccb53a9fcd9 100644 |
942 |
+--- a/arch/x86/kernel/step.c |
943 |
++++ b/arch/x86/kernel/step.c |
944 |
+@@ -5,6 +5,7 @@ |
945 |
+ #include <linux/mm.h> |
946 |
+ #include <linux/ptrace.h> |
947 |
+ #include <asm/desc.h> |
948 |
++#include <asm/mmu_context.h> |
949 |
+ |
950 |
+ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
951 |
+ { |
952 |
+@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re |
953 |
+ struct desc_struct *desc; |
954 |
+ unsigned long base; |
955 |
+ |
956 |
+- seg &= ~7UL; |
957 |
++ seg >>= 3; |
958 |
+ |
959 |
+ mutex_lock(&child->mm->context.lock); |
960 |
+- if (unlikely((seg >> 3) >= child->mm->context.size)) |
961 |
++ if (unlikely(!child->mm->context.ldt || |
962 |
++ seg >= child->mm->context.ldt->size)) |
963 |
+ addr = -1L; /* bogus selector, access would fault */ |
964 |
+ else { |
965 |
+- desc = child->mm->context.ldt + seg; |
966 |
++ desc = &child->mm->context.ldt->entries[seg]; |
967 |
+ base = get_desc_base(desc); |
968 |
+ |
969 |
+ /* 16-bit code segment? */ |
970 |
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c |
971 |
+index 3e32ed5648a0..a13a38830e76 100644 |
972 |
+--- a/arch/x86/power/cpu.c |
973 |
++++ b/arch/x86/power/cpu.c |
974 |
+@@ -23,6 +23,7 @@ |
975 |
+ #include <asm/debugreg.h> |
976 |
+ #include <asm/fpu-internal.h> /* pcntxt_mask */ |
977 |
+ #include <asm/cpu.h> |
978 |
++#include <asm/mmu_context.h> |
979 |
+ |
980 |
+ #ifdef CONFIG_X86_32 |
981 |
+ __visible unsigned long saved_context_ebx; |
982 |
+@@ -154,7 +155,7 @@ static void fix_processor_context(void) |
983 |
+ syscall_init(); /* This sets MSR_*STAR and related */ |
984 |
+ #endif |
985 |
+ load_TR_desc(); /* This does ltr */ |
986 |
+- load_LDT(¤t->active_mm->context); /* This does lldt */ |
987 |
++ load_mm_ldt(current->active_mm); /* This does lldt */ |
988 |
+ } |
989 |
+ |
990 |
+ /** |
991 |
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig |
992 |
+index e88fda867a33..484145368a24 100644 |
993 |
+--- a/arch/x86/xen/Kconfig |
994 |
++++ b/arch/x86/xen/Kconfig |
995 |
+@@ -8,7 +8,7 @@ config XEN |
996 |
+ select PARAVIRT_CLOCK |
997 |
+ select XEN_HAVE_PVMMU |
998 |
+ depends on X86_64 || (X86_32 && X86_PAE) |
999 |
+- depends on X86_TSC |
1000 |
++ depends on X86_LOCAL_APIC && X86_TSC |
1001 |
+ help |
1002 |
+ This is the Linux Xen port. Enabling this will allow the |
1003 |
+ kernel to boot in a paravirtualized environment under the |
1004 |
+@@ -17,7 +17,7 @@ config XEN |
1005 |
+ config XEN_DOM0 |
1006 |
+ def_bool y |
1007 |
+ depends on XEN && PCI_XEN && SWIOTLB_XEN |
1008 |
+- depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI |
1009 |
++ depends on X86_IO_APIC && ACPI && PCI |
1010 |
+ |
1011 |
+ config XEN_PVHVM |
1012 |
+ def_bool y |
1013 |
+diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile |
1014 |
+index 7322755f337a..4b6e29ac0968 100644 |
1015 |
+--- a/arch/x86/xen/Makefile |
1016 |
++++ b/arch/x86/xen/Makefile |
1017 |
+@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp) |
1018 |
+ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ |
1019 |
+ time.o xen-asm.o xen-asm_$(BITS).o \ |
1020 |
+ grant-table.o suspend.o platform-pci-unplug.o \ |
1021 |
+- p2m.o |
1022 |
++ p2m.o apic.o |
1023 |
+ |
1024 |
+ obj-$(CONFIG_EVENT_TRACING) += trace.o |
1025 |
+ |
1026 |
+ obj-$(CONFIG_SMP) += smp.o |
1027 |
+ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o |
1028 |
+ obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o |
1029 |
+-obj-$(CONFIG_XEN_DOM0) += apic.o vga.o |
1030 |
++obj-$(CONFIG_XEN_DOM0) += vga.o |
1031 |
+ obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o |
1032 |
+ obj-$(CONFIG_XEN_EFI) += efi.o |
1033 |
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c |
1034 |
+index e180e097a53a..d8d81d1aa1d5 100644 |
1035 |
+--- a/arch/x86/xen/enlighten.c |
1036 |
++++ b/arch/x86/xen/enlighten.c |
1037 |
+@@ -1772,6 +1772,7 @@ asmlinkage __visible void __init xen_start_kernel(void) |
1038 |
+ #ifdef CONFIG_X86_32 |
1039 |
+ i386_start_kernel(); |
1040 |
+ #else |
1041 |
++ cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ |
1042 |
+ x86_64_start_reservations((char *)__pa_symbol(&boot_params)); |
1043 |
+ #endif |
1044 |
+ } |
1045 |
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h |
1046 |
+index 28c7e0be56e4..566004cc8a5b 100644 |
1047 |
+--- a/arch/x86/xen/xen-ops.h |
1048 |
++++ b/arch/x86/xen/xen-ops.h |
1049 |
+@@ -94,17 +94,15 @@ struct dom0_vga_console_info; |
1050 |
+ |
1051 |
+ #ifdef CONFIG_XEN_DOM0 |
1052 |
+ void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); |
1053 |
+-void __init xen_init_apic(void); |
1054 |
+ #else |
1055 |
+ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, |
1056 |
+ size_t size) |
1057 |
+ { |
1058 |
+ } |
1059 |
+-static inline void __init xen_init_apic(void) |
1060 |
+-{ |
1061 |
+-} |
1062 |
+ #endif |
1063 |
+ |
1064 |
++void __init xen_init_apic(void); |
1065 |
++ |
1066 |
+ #ifdef CONFIG_XEN_EFI |
1067 |
+ extern void xen_efi_init(void); |
1068 |
+ #else |
1069 |
+diff --git a/block/blk-settings.c b/block/blk-settings.c |
1070 |
+index aa02247d227e..d15c34a31633 100644 |
1071 |
+--- a/block/blk-settings.c |
1072 |
++++ b/block/blk-settings.c |
1073 |
+@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); |
1074 |
+ * Description: |
1075 |
+ * Enables a low level driver to set a hard upper limit, |
1076 |
+ * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
1077 |
+- * the device driver based upon the combined capabilities of I/O |
1078 |
+- * controller and storage device. |
1079 |
++ * the device driver based upon the capabilities of the I/O |
1080 |
++ * controller. |
1081 |
+ * |
1082 |
+ * max_sectors is a soft limit imposed by the block layer for |
1083 |
+ * filesystem type requests. This value can be overridden on a |
1084 |
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1085 |
+index 74e18e94bef2..7f15707b4850 100644 |
1086 |
+--- a/drivers/ata/libata-core.c |
1087 |
++++ b/drivers/ata/libata-core.c |
1088 |
+@@ -4238,6 +4238,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
1089 |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1090 |
+ { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
1091 |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1092 |
++ { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
1093 |
++ ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1094 |
+ |
1095 |
+ /* devices that don't properly handle TRIM commands */ |
1096 |
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, |
1097 |
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c |
1098 |
+index 9d09c5bb5874..bb39181e4c33 100644 |
1099 |
+--- a/drivers/base/regmap/regcache-rbtree.c |
1100 |
++++ b/drivers/base/regmap/regcache-rbtree.c |
1101 |
+@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, |
1102 |
+ if (!blk) |
1103 |
+ return -ENOMEM; |
1104 |
+ |
1105 |
+- present = krealloc(rbnode->cache_present, |
1106 |
+- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); |
1107 |
+- if (!present) { |
1108 |
+- kfree(blk); |
1109 |
+- return -ENOMEM; |
1110 |
++ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { |
1111 |
++ present = krealloc(rbnode->cache_present, |
1112 |
++ BITS_TO_LONGS(blklen) * sizeof(*present), |
1113 |
++ GFP_KERNEL); |
1114 |
++ if (!present) { |
1115 |
++ kfree(blk); |
1116 |
++ return -ENOMEM; |
1117 |
++ } |
1118 |
++ |
1119 |
++ memset(present + BITS_TO_LONGS(rbnode->blklen), 0, |
1120 |
++ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) |
1121 |
++ * sizeof(*present)); |
1122 |
++ } else { |
1123 |
++ present = rbnode->cache_present; |
1124 |
+ } |
1125 |
+ |
1126 |
+ /* insert the register value in the correct place in the rbnode block */ |
1127 |
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c |
1128 |
+index 63fc7f06a014..0c858a60dc40 100644 |
1129 |
+--- a/drivers/block/xen-blkback/blkback.c |
1130 |
++++ b/drivers/block/xen-blkback/blkback.c |
1131 |
+@@ -350,7 +350,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) |
1132 |
+ return; |
1133 |
+ } |
1134 |
+ |
1135 |
+- if (work_pending(&blkif->persistent_purge_work)) { |
1136 |
++ if (work_busy(&blkif->persistent_purge_work)) { |
1137 |
+ pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); |
1138 |
+ return; |
1139 |
+ } |
1140 |
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c |
1141 |
+index 5ac312f6e0be..218c4858f494 100644 |
1142 |
+--- a/drivers/block/xen-blkfront.c |
1143 |
++++ b/drivers/block/xen-blkfront.c |
1144 |
+@@ -1099,8 +1099,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, |
1145 |
+ * Add the used indirect page back to the list of |
1146 |
+ * available pages for indirect grefs. |
1147 |
+ */ |
1148 |
+- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); |
1149 |
+- list_add(&indirect_page->lru, &info->indirect_pages); |
1150 |
++ if (!info->feature_persistent) { |
1151 |
++ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); |
1152 |
++ list_add(&indirect_page->lru, &info->indirect_pages); |
1153 |
++ } |
1154 |
+ s->indirect_grants[i]->gref = GRANT_INVALID_REF; |
1155 |
+ list_add_tail(&s->indirect_grants[i]->node, &info->grants); |
1156 |
+ } |
1157 |
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c |
1158 |
+index 08b0da23c4ab..5408450204b0 100644 |
1159 |
+--- a/drivers/crypto/caam/caamhash.c |
1160 |
++++ b/drivers/crypto/caam/caamhash.c |
1161 |
+@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req) |
1162 |
+ state->buflen_1; |
1163 |
+ u32 *sh_desc = ctx->sh_desc_fin, *desc; |
1164 |
+ dma_addr_t ptr = ctx->sh_desc_fin_dma; |
1165 |
+- int sec4_sg_bytes; |
1166 |
++ int sec4_sg_bytes, sec4_sg_src_index; |
1167 |
+ int digestsize = crypto_ahash_digestsize(ahash); |
1168 |
+ struct ahash_edesc *edesc; |
1169 |
+ int ret = 0; |
1170 |
+ int sh_len; |
1171 |
+ |
1172 |
+- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); |
1173 |
++ sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
1174 |
++ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); |
1175 |
+ |
1176 |
+ /* allocate space for base edesc and hw desc commands, link tables */ |
1177 |
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + |
1178 |
+@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req) |
1179 |
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
1180 |
+ buf, state->buf_dma, buflen, |
1181 |
+ last_buflen); |
1182 |
+- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; |
1183 |
++ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; |
1184 |
+ |
1185 |
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1186 |
+ sec4_sg_bytes, DMA_TO_DEVICE); |
1187 |
+diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c |
1188 |
+index 0f04d5ead521..4c1991d4ce8b 100644 |
1189 |
+--- a/drivers/edac/ppc4xx_edac.c |
1190 |
++++ b/drivers/edac/ppc4xx_edac.c |
1191 |
+@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) |
1192 |
+ */ |
1193 |
+ |
1194 |
+ for (row = 0; row < mci->nr_csrows; row++) { |
1195 |
+- struct csrow_info *csi = &mci->csrows[row]; |
1196 |
++ struct csrow_info *csi = mci->csrows[row]; |
1197 |
+ |
1198 |
+ /* |
1199 |
+ * Get the configuration settings for this |
1200 |
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1201 |
+index bbcd754dc7d0..ccedb17580f7 100644 |
1202 |
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1203 |
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1204 |
+@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) |
1205 |
+ struct drm_mode_config *mode_config = &dev->mode_config; |
1206 |
+ struct drm_connector *connector; |
1207 |
+ |
1208 |
++ /* we can race here at startup, some boards seem to trigger |
1209 |
++ * hotplug irqs when they shouldn't. */ |
1210 |
++ if (!rdev->mode_info.mode_config_initialized) |
1211 |
++ return; |
1212 |
++ |
1213 |
+ mutex_lock(&mode_config->mutex); |
1214 |
+ if (mode_config->num_connector) { |
1215 |
+ list_for_each_entry(connector, &mode_config->connector_list, head) |
1216 |
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
1217 |
+index 50b52802f470..8ad66bbd4f28 100644 |
1218 |
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
1219 |
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
1220 |
+@@ -2489,7 +2489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, |
1221 |
+ |
1222 |
+ ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); |
1223 |
+ if (unlikely(ret != 0)) |
1224 |
+- goto out_err; |
1225 |
++ goto out_err_nores; |
1226 |
+ |
1227 |
+ ret = vmw_validate_buffers(dev_priv, sw_context); |
1228 |
+ if (unlikely(ret != 0)) |
1229 |
+@@ -2533,6 +2533,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, |
1230 |
+ vmw_resource_relocations_free(&sw_context->res_relocations); |
1231 |
+ |
1232 |
+ vmw_fifo_commit(dev_priv, command_size); |
1233 |
++ mutex_unlock(&dev_priv->binding_mutex); |
1234 |
+ |
1235 |
+ vmw_query_bo_switch_commit(dev_priv, sw_context); |
1236 |
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
1237 |
+@@ -2548,7 +2549,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, |
1238 |
+ DRM_ERROR("Fence submission error. Syncing.\n"); |
1239 |
+ |
1240 |
+ vmw_resource_list_unreserve(&sw_context->resource_list, false); |
1241 |
+- mutex_unlock(&dev_priv->binding_mutex); |
1242 |
+ |
1243 |
+ ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
1244 |
+ (void *) fence); |
1245 |
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
1246 |
+index 3603d0cb25d9..ef984eba8396 100644 |
1247 |
+--- a/drivers/hid/hid-ids.h |
1248 |
++++ b/drivers/hid/hid-ids.h |
1249 |
+@@ -222,6 +222,7 @@ |
1250 |
+ #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 |
1251 |
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d |
1252 |
+ #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 |
1253 |
++#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 |
1254 |
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 |
1255 |
+ #define USB_DEVICE_ID_CHICONY_AK1D 0x1125 |
1256 |
+ |
1257 |
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c |
1258 |
+index 509dee2e9b72..a4d1fe64c925 100644 |
1259 |
+--- a/drivers/hid/usbhid/hid-quirks.c |
1260 |
++++ b/drivers/hid/usbhid/hid-quirks.c |
1261 |
+@@ -69,6 +69,7 @@ static const struct hid_blacklist { |
1262 |
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, |
1263 |
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, |
1264 |
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, |
1265 |
++ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
1266 |
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
1267 |
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, |
1268 |
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, |
1269 |
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c |
1270 |
+index 8c91fd5eb6fd..3ac9c4194814 100644 |
1271 |
+--- a/drivers/isdn/gigaset/ser-gigaset.c |
1272 |
++++ b/drivers/isdn/gigaset/ser-gigaset.c |
1273 |
+@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) |
1274 |
+ cs->hw.ser->tty = tty; |
1275 |
+ atomic_set(&cs->hw.ser->refcnt, 1); |
1276 |
+ init_completion(&cs->hw.ser->dead_cmp); |
1277 |
+- |
1278 |
+ tty->disc_data = cs; |
1279 |
+ |
1280 |
++ /* Set the amount of data we're willing to receive per call |
1281 |
++ * from the hardware driver to half of the input buffer size |
1282 |
++ * to leave some reserve. |
1283 |
++ * Note: We don't do flow control towards the hardware driver. |
1284 |
++ * If more data is received than will fit into the input buffer, |
1285 |
++ * it will be dropped and an error will be logged. This should |
1286 |
++ * never happen as the device is slow and the buffer size ample. |
1287 |
++ */ |
1288 |
++ tty->receive_room = RBUFSIZE/2; |
1289 |
++ |
1290 |
+ /* OK.. Initialization of the datastructures and the HW is done.. Now |
1291 |
+ * startup system and notify the LL that we are ready to run |
1292 |
+ */ |
1293 |
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c |
1294 |
+index e9d33ad59df5..3412b86e79fd 100644 |
1295 |
+--- a/drivers/md/dm-thin-metadata.c |
1296 |
++++ b/drivers/md/dm-thin-metadata.c |
1297 |
+@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) |
1298 |
+ return r; |
1299 |
+ |
1300 |
+ disk_super = dm_block_data(copy); |
1301 |
+- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); |
1302 |
+- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); |
1303 |
++ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); |
1304 |
++ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); |
1305 |
+ dm_sm_dec_block(pmd->metadata_sm, held_root); |
1306 |
+ |
1307 |
+ return dm_tm_unlock(pmd->tm, copy); |
1308 |
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c |
1309 |
+index bce7c0784b6b..633c63e7c32f 100644 |
1310 |
+--- a/drivers/mfd/arizona-core.c |
1311 |
++++ b/drivers/mfd/arizona-core.c |
1312 |
+@@ -892,10 +892,6 @@ int arizona_dev_init(struct arizona *arizona) |
1313 |
+ arizona->pdata.gpio_defaults[i]); |
1314 |
+ } |
1315 |
+ |
1316 |
+- pm_runtime_set_autosuspend_delay(arizona->dev, 100); |
1317 |
+- pm_runtime_use_autosuspend(arizona->dev); |
1318 |
+- pm_runtime_enable(arizona->dev); |
1319 |
+- |
1320 |
+ /* Chip default */ |
1321 |
+ if (!arizona->pdata.clk32k_src) |
1322 |
+ arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; |
1323 |
+@@ -992,11 +988,17 @@ int arizona_dev_init(struct arizona *arizona) |
1324 |
+ arizona->pdata.spk_fmt[i]); |
1325 |
+ } |
1326 |
+ |
1327 |
++ pm_runtime_set_active(arizona->dev); |
1328 |
++ pm_runtime_enable(arizona->dev); |
1329 |
++ |
1330 |
+ /* Set up for interrupts */ |
1331 |
+ ret = arizona_irq_init(arizona); |
1332 |
+ if (ret != 0) |
1333 |
+ goto err_reset; |
1334 |
+ |
1335 |
++ pm_runtime_set_autosuspend_delay(arizona->dev, 100); |
1336 |
++ pm_runtime_use_autosuspend(arizona->dev); |
1337 |
++ |
1338 |
+ arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", |
1339 |
+ arizona_clkgen_err, arizona); |
1340 |
+ arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", |
1341 |
+@@ -1024,10 +1026,6 @@ int arizona_dev_init(struct arizona *arizona) |
1342 |
+ goto err_irq; |
1343 |
+ } |
1344 |
+ |
1345 |
+-#ifdef CONFIG_PM_RUNTIME |
1346 |
+- regulator_disable(arizona->dcvdd); |
1347 |
+-#endif |
1348 |
+- |
1349 |
+ return 0; |
1350 |
+ |
1351 |
+ err_irq: |
1352 |
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c |
1353 |
+index f35d4280b2f7..c58fc62545a3 100644 |
1354 |
+--- a/drivers/mfd/lpc_ich.c |
1355 |
++++ b/drivers/mfd/lpc_ich.c |
1356 |
+@@ -934,8 +934,8 @@ gpe0_done: |
1357 |
+ lpc_ich_enable_gpio_space(dev); |
1358 |
+ |
1359 |
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]); |
1360 |
+- ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO], |
1361 |
+- 1, NULL, 0, NULL); |
1362 |
++ ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, |
1363 |
++ &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL); |
1364 |
+ |
1365 |
+ gpio_done: |
1366 |
+ if (acpi_conflict) |
1367 |
+@@ -1008,8 +1008,8 @@ static int lpc_ich_init_wdt(struct pci_dev *dev) |
1368 |
+ } |
1369 |
+ |
1370 |
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]); |
1371 |
+- ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT], |
1372 |
+- 1, NULL, 0, NULL); |
1373 |
++ ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, |
1374 |
++ &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL); |
1375 |
+ |
1376 |
+ wdt_done: |
1377 |
+ return ret; |
1378 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
1379 |
+index 1cc06c0e3e92..081dd70813c8 100644 |
1380 |
+--- a/drivers/net/bonding/bond_main.c |
1381 |
++++ b/drivers/net/bonding/bond_main.c |
1382 |
+@@ -622,6 +622,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, |
1383 |
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); |
1384 |
+ } |
1385 |
+ |
1386 |
++static struct slave *bond_get_old_active(struct bonding *bond, |
1387 |
++ struct slave *new_active) |
1388 |
++{ |
1389 |
++ struct slave *slave; |
1390 |
++ struct list_head *iter; |
1391 |
++ |
1392 |
++ bond_for_each_slave(bond, slave, iter) { |
1393 |
++ if (slave == new_active) |
1394 |
++ continue; |
1395 |
++ |
1396 |
++ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) |
1397 |
++ return slave; |
1398 |
++ } |
1399 |
++ |
1400 |
++ return NULL; |
1401 |
++} |
1402 |
++ |
1403 |
+ /* bond_do_fail_over_mac |
1404 |
+ * |
1405 |
+ * Perform special MAC address swapping for fail_over_mac settings |
1406 |
+@@ -649,6 +666,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, |
1407 |
+ if (!new_active) |
1408 |
+ return; |
1409 |
+ |
1410 |
++ if (!old_active) |
1411 |
++ old_active = bond_get_old_active(bond, new_active); |
1412 |
++ |
1413 |
+ if (old_active) { |
1414 |
+ ether_addr_copy(tmp_mac, new_active->dev->dev_addr); |
1415 |
+ ether_addr_copy(saddr.sa_data, |
1416 |
+@@ -1805,6 +1825,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, |
1417 |
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
1418 |
+ netdev_info(bond_dev, "Destroying bond %s\n", |
1419 |
+ bond_dev->name); |
1420 |
++ bond_remove_proc_entry(bond); |
1421 |
+ unregister_netdevice(bond_dev); |
1422 |
+ } |
1423 |
+ return ret; |
1424 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c |
1425 |
+index 49290a405903..af67e7d410eb 100644 |
1426 |
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c |
1427 |
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c |
1428 |
+@@ -568,7 +568,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
1429 |
+ continue; |
1430 |
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", |
1431 |
+ __func__, i, port); |
1432 |
+- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; |
1433 |
++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
1434 |
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
1435 |
+ eqe->event.port_change.port = |
1436 |
+ cpu_to_be32( |
1437 |
+@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
1438 |
+ continue; |
1439 |
+ if (i == mlx4_master_func_num(dev)) |
1440 |
+ continue; |
1441 |
+- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; |
1442 |
++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
1443 |
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
1444 |
+ eqe->event.port_change.port = |
1445 |
+ cpu_to_be32( |
1446 |
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c |
1447 |
+index 223eb42992bd..775e7bc292f2 100644 |
1448 |
+--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c |
1449 |
++++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c |
1450 |
+@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); |
1451 |
+ module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); |
1452 |
+ module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); |
1453 |
+ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); |
1454 |
++module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); |
1455 |
+ module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, |
1456 |
+ bool, 0444); |
1457 |
+ MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); |
1458 |
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c |
1459 |
+index 1b3a09473452..30f9ef0c0d4f 100644 |
1460 |
+--- a/drivers/scsi/libfc/fc_exch.c |
1461 |
++++ b/drivers/scsi/libfc/fc_exch.c |
1462 |
+@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, |
1463 |
+ if (resp) { |
1464 |
+ resp(sp, fp, arg); |
1465 |
+ res = true; |
1466 |
+- } else if (!IS_ERR(fp)) { |
1467 |
+- fc_frame_free(fp); |
1468 |
+ } |
1469 |
+ |
1470 |
+ spin_lock_bh(&ep->ex_lock); |
1471 |
+@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) |
1472 |
+ * If new exch resp handler is valid then call that |
1473 |
+ * first. |
1474 |
+ */ |
1475 |
+- fc_invoke_resp(ep, sp, fp); |
1476 |
++ if (!fc_invoke_resp(ep, sp, fp)) |
1477 |
++ fc_frame_free(fp); |
1478 |
+ |
1479 |
+ fc_exch_release(ep); |
1480 |
+ return; |
1481 |
+@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) |
1482 |
+ fc_exch_hold(ep); |
1483 |
+ if (!rc) |
1484 |
+ fc_exch_delete(ep); |
1485 |
+- fc_invoke_resp(ep, sp, fp); |
1486 |
++ if (!fc_invoke_resp(ep, sp, fp)) |
1487 |
++ fc_frame_free(fp); |
1488 |
+ if (has_rec) |
1489 |
+ fc_exch_timer_set(ep, ep->r_a_tov); |
1490 |
+ fc_exch_release(ep); |
1491 |
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c |
1492 |
+index 1d7e76e8b447..ae6fc1a94568 100644 |
1493 |
+--- a/drivers/scsi/libfc/fc_fcp.c |
1494 |
++++ b/drivers/scsi/libfc/fc_fcp.c |
1495 |
+@@ -1039,11 +1039,26 @@ restart: |
1496 |
+ fc_fcp_pkt_hold(fsp); |
1497 |
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags); |
1498 |
+ |
1499 |
+- if (!fc_fcp_lock_pkt(fsp)) { |
1500 |
++ spin_lock_bh(&fsp->scsi_pkt_lock); |
1501 |
++ if (!(fsp->state & FC_SRB_COMPL)) { |
1502 |
++ fsp->state |= FC_SRB_COMPL; |
1503 |
++ /* |
1504 |
++ * TODO: dropping scsi_pkt_lock and then reacquiring |
1505 |
++ * again around fc_fcp_cleanup_cmd() is required, |
1506 |
++ * since fc_fcp_cleanup_cmd() calls into |
1507 |
++ * fc_seq_set_resp() and that func preempts cpu using |
1508 |
++ * schedule. May be schedule and related code should be |
1509 |
++ * removed instead of unlocking here to avoid scheduling |
1510 |
++ * while atomic bug. |
1511 |
++ */ |
1512 |
++ spin_unlock_bh(&fsp->scsi_pkt_lock); |
1513 |
++ |
1514 |
+ fc_fcp_cleanup_cmd(fsp, error); |
1515 |
++ |
1516 |
++ spin_lock_bh(&fsp->scsi_pkt_lock); |
1517 |
+ fc_io_compl(fsp); |
1518 |
+- fc_fcp_unlock_pkt(fsp); |
1519 |
+ } |
1520 |
++ spin_unlock_bh(&fsp->scsi_pkt_lock); |
1521 |
+ |
1522 |
+ fc_fcp_pkt_release(fsp); |
1523 |
+ spin_lock_irqsave(&si->scsi_queue_lock, flags); |
1524 |
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
1525 |
+index 0d8bc6c66650..7854584ebd59 100644 |
1526 |
+--- a/drivers/scsi/libiscsi.c |
1527 |
++++ b/drivers/scsi/libiscsi.c |
1528 |
+@@ -2960,10 +2960,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) |
1529 |
+ { |
1530 |
+ struct iscsi_conn *conn = cls_conn->dd_data; |
1531 |
+ struct iscsi_session *session = conn->session; |
1532 |
+- unsigned long flags; |
1533 |
+ |
1534 |
+ del_timer_sync(&conn->transport_timer); |
1535 |
+ |
1536 |
++ mutex_lock(&session->eh_mutex); |
1537 |
+ spin_lock_bh(&session->frwd_lock); |
1538 |
+ conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; |
1539 |
+ if (session->leadconn == conn) { |
1540 |
+@@ -2975,28 +2975,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) |
1541 |
+ } |
1542 |
+ spin_unlock_bh(&session->frwd_lock); |
1543 |
+ |
1544 |
+- /* |
1545 |
+- * Block until all in-progress commands for this connection |
1546 |
+- * time out or fail. |
1547 |
+- */ |
1548 |
+- for (;;) { |
1549 |
+- spin_lock_irqsave(session->host->host_lock, flags); |
1550 |
+- if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */ |
1551 |
+- spin_unlock_irqrestore(session->host->host_lock, flags); |
1552 |
+- break; |
1553 |
+- } |
1554 |
+- spin_unlock_irqrestore(session->host->host_lock, flags); |
1555 |
+- msleep_interruptible(500); |
1556 |
+- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " |
1557 |
+- "host_busy %d host_failed %d\n", |
1558 |
+- atomic_read(&session->host->host_busy), |
1559 |
+- session->host->host_failed); |
1560 |
+- /* |
1561 |
+- * force eh_abort() to unblock |
1562 |
+- */ |
1563 |
+- wake_up(&conn->ehwait); |
1564 |
+- } |
1565 |
+- |
1566 |
+ /* flush queued up work because we free the connection below */ |
1567 |
+ iscsi_suspend_tx(conn); |
1568 |
+ |
1569 |
+@@ -3013,6 +2991,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) |
1570 |
+ if (session->leadconn == conn) |
1571 |
+ session->leadconn = NULL; |
1572 |
+ spin_unlock_bh(&session->frwd_lock); |
1573 |
++ mutex_unlock(&session->eh_mutex); |
1574 |
+ |
1575 |
+ iscsi_destroy_conn(cls_conn); |
1576 |
+ } |
1577 |
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c |
1578 |
+index b99399fe2548..15acc808658f 100644 |
1579 |
+--- a/drivers/scsi/lpfc/lpfc_scsi.c |
1580 |
++++ b/drivers/scsi/lpfc/lpfc_scsi.c |
1581 |
+@@ -3380,7 +3380,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) |
1582 |
+ */ |
1583 |
+ |
1584 |
+ nseg = scsi_dma_map(scsi_cmnd); |
1585 |
+- if (unlikely(!nseg)) |
1586 |
++ if (unlikely(nseg <= 0)) |
1587 |
+ return 1; |
1588 |
+ sgl += 1; |
1589 |
+ /* clear the last flag in the fcp_rsp map entry */ |
1590 |
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c |
1591 |
+index 7454498c4091..a2dcf6a54ec6 100644 |
1592 |
+--- a/drivers/scsi/scsi_pm.c |
1593 |
++++ b/drivers/scsi/scsi_pm.c |
1594 |
+@@ -219,15 +219,15 @@ static int sdev_runtime_suspend(struct device *dev) |
1595 |
+ { |
1596 |
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
1597 |
+ struct scsi_device *sdev = to_scsi_device(dev); |
1598 |
+- int err; |
1599 |
++ int err = 0; |
1600 |
+ |
1601 |
+- err = blk_pre_runtime_suspend(sdev->request_queue); |
1602 |
+- if (err) |
1603 |
+- return err; |
1604 |
+- if (pm && pm->runtime_suspend) |
1605 |
++ if (pm && pm->runtime_suspend) { |
1606 |
++ err = blk_pre_runtime_suspend(sdev->request_queue); |
1607 |
++ if (err) |
1608 |
++ return err; |
1609 |
+ err = pm->runtime_suspend(dev); |
1610 |
+- blk_post_runtime_suspend(sdev->request_queue, err); |
1611 |
+- |
1612 |
++ blk_post_runtime_suspend(sdev->request_queue, err); |
1613 |
++ } |
1614 |
+ return err; |
1615 |
+ } |
1616 |
+ |
1617 |
+@@ -250,11 +250,11 @@ static int sdev_runtime_resume(struct device *dev) |
1618 |
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
1619 |
+ int err = 0; |
1620 |
+ |
1621 |
+- blk_pre_runtime_resume(sdev->request_queue); |
1622 |
+- if (pm && pm->runtime_resume) |
1623 |
++ if (pm && pm->runtime_resume) { |
1624 |
++ blk_pre_runtime_resume(sdev->request_queue); |
1625 |
+ err = pm->runtime_resume(dev); |
1626 |
+- blk_post_runtime_resume(sdev->request_queue, err); |
1627 |
+- |
1628 |
++ blk_post_runtime_resume(sdev->request_queue, err); |
1629 |
++ } |
1630 |
+ return err; |
1631 |
+ } |
1632 |
+ |
1633 |
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
1634 |
+index ce382e858452..6d931d598d80 100644 |
1635 |
+--- a/drivers/scsi/sd.c |
1636 |
++++ b/drivers/scsi/sd.c |
1637 |
+@@ -2812,9 +2812,9 @@ static int sd_revalidate_disk(struct gendisk *disk) |
1638 |
+ max_xfer = sdkp->max_xfer_blocks; |
1639 |
+ max_xfer <<= ilog2(sdp->sector_size) - 9; |
1640 |
+ |
1641 |
+- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), |
1642 |
+- max_xfer); |
1643 |
+- blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); |
1644 |
++ sdkp->disk->queue->limits.max_sectors = |
1645 |
++ min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); |
1646 |
++ |
1647 |
+ set_capacity(disk, sdkp->capacity); |
1648 |
+ sd_config_write_same(sdkp); |
1649 |
+ kfree(buffer); |
1650 |
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h |
1651 |
+index 45c39a37f924..8bc073d297db 100644 |
1652 |
+--- a/include/drm/drm_pciids.h |
1653 |
++++ b/include/drm/drm_pciids.h |
1654 |
+@@ -172,6 +172,7 @@ |
1655 |
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
1656 |
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
1657 |
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
1658 |
++ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1659 |
+ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1660 |
+ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1661 |
+ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1662 |
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h |
1663 |
+index 000c5f90f08c..2bd394ed35f6 100644 |
1664 |
+--- a/include/linux/compiler.h |
1665 |
++++ b/include/linux/compiler.h |
1666 |
+@@ -454,6 +454,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s |
1667 |
+ */ |
1668 |
+ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
1669 |
+ |
1670 |
++/** |
1671 |
++ * lockless_dereference() - safely load a pointer for later dereference |
1672 |
++ * @p: The pointer to load |
1673 |
++ * |
1674 |
++ * Similar to rcu_dereference(), but for situations where the pointed-to |
1675 |
++ * object's lifetime is managed by something other than RCU. That |
1676 |
++ * "something other" might be reference counting or simple immortality. |
1677 |
++ */ |
1678 |
++#define lockless_dereference(p) \ |
1679 |
++({ \ |
1680 |
++ typeof(p) _________p1 = ACCESS_ONCE(p); \ |
1681 |
++ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
1682 |
++ (_________p1); \ |
1683 |
++}) |
1684 |
++ |
1685 |
+ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
1686 |
+ #ifdef CONFIG_KPROBES |
1687 |
+ # define __kprobes __attribute__((__section__(".kprobes.text"))) |
1688 |
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h |
1689 |
+index 53ff1a752d7e..a4a819ffb2d1 100644 |
1690 |
+--- a/include/linux/rcupdate.h |
1691 |
++++ b/include/linux/rcupdate.h |
1692 |
+@@ -617,21 +617,6 @@ static inline void rcu_preempt_sleep_check(void) |
1693 |
+ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
1694 |
+ |
1695 |
+ /** |
1696 |
+- * lockless_dereference() - safely load a pointer for later dereference |
1697 |
+- * @p: The pointer to load |
1698 |
+- * |
1699 |
+- * Similar to rcu_dereference(), but for situations where the pointed-to |
1700 |
+- * object's lifetime is managed by something other than RCU. That |
1701 |
+- * "something other" might be reference counting or simple immortality. |
1702 |
+- */ |
1703 |
+-#define lockless_dereference(p) \ |
1704 |
+-({ \ |
1705 |
+- typeof(p) _________p1 = ACCESS_ONCE(p); \ |
1706 |
+- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
1707 |
+- (_________p1); \ |
1708 |
+-}) |
1709 |
+- |
1710 |
+-/** |
1711 |
+ * rcu_assign_pointer() - assign to RCU-protected pointer |
1712 |
+ * @p: pointer to assign to |
1713 |
+ * @v: value to assign (publish) |
1714 |
+diff --git a/include/net/ip.h b/include/net/ip.h |
1715 |
+index c0c26c3deeb5..d00ebdf14ca4 100644 |
1716 |
+--- a/include/net/ip.h |
1717 |
++++ b/include/net/ip.h |
1718 |
+@@ -160,6 +160,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) |
1719 |
+ } |
1720 |
+ |
1721 |
+ /* datagram.c */ |
1722 |
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
1723 |
+ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
1724 |
+ |
1725 |
+ void ip4_datagram_release_cb(struct sock *sk); |
1726 |
+diff --git a/ipc/sem.c b/ipc/sem.c |
1727 |
+index 53c3310f41c6..85ad28aaf548 100644 |
1728 |
+--- a/ipc/sem.c |
1729 |
++++ b/ipc/sem.c |
1730 |
+@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) |
1731 |
+ } |
1732 |
+ |
1733 |
+ /* |
1734 |
++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they |
1735 |
++ * are only control barriers. |
1736 |
++ * The code must pair with spin_unlock(&sem->lock) or |
1737 |
++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. |
1738 |
++ * |
1739 |
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier. |
1740 |
++ */ |
1741 |
++#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() |
1742 |
++ |
1743 |
++/* |
1744 |
+ * Wait until all currently ongoing simple ops have completed. |
1745 |
+ * Caller must own sem_perm.lock. |
1746 |
+ * New simple ops cannot start, because simple ops first check |
1747 |
+@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) |
1748 |
+ sem = sma->sem_base + i; |
1749 |
+ spin_unlock_wait(&sem->lock); |
1750 |
+ } |
1751 |
++ ipc_smp_acquire__after_spin_is_unlocked(); |
1752 |
+ } |
1753 |
+ |
1754 |
+ /* |
1755 |
+@@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, |
1756 |
+ |
1757 |
+ /* Then check that the global lock is free */ |
1758 |
+ if (!spin_is_locked(&sma->sem_perm.lock)) { |
1759 |
+- /* spin_is_locked() is not a memory barrier */ |
1760 |
+- smp_mb(); |
1761 |
++ /* |
1762 |
++ * We need a memory barrier with acquire semantics, |
1763 |
++ * otherwise we can race with another thread that does: |
1764 |
++ * complex_count++; |
1765 |
++ * spin_unlock(sem_perm.lock); |
1766 |
++ */ |
1767 |
++ ipc_smp_acquire__after_spin_is_unlocked(); |
1768 |
+ |
1769 |
+ /* Now repeat the test of complex_count: |
1770 |
+ * It can't change anymore until we drop sem->lock. |
1771 |
+@@ -2067,17 +2083,28 @@ void exit_sem(struct task_struct *tsk) |
1772 |
+ rcu_read_lock(); |
1773 |
+ un = list_entry_rcu(ulp->list_proc.next, |
1774 |
+ struct sem_undo, list_proc); |
1775 |
+- if (&un->list_proc == &ulp->list_proc) |
1776 |
+- semid = -1; |
1777 |
+- else |
1778 |
+- semid = un->semid; |
1779 |
++ if (&un->list_proc == &ulp->list_proc) { |
1780 |
++ /* |
1781 |
++ * We must wait for freeary() before freeing this ulp, |
1782 |
++ * in case we raced with last sem_undo. There is a small |
1783 |
++ * possibility where we exit while freeary() didn't |
1784 |
++ * finish unlocking sem_undo_list. |
1785 |
++ */ |
1786 |
++ spin_unlock_wait(&ulp->lock); |
1787 |
++ rcu_read_unlock(); |
1788 |
++ break; |
1789 |
++ } |
1790 |
++ spin_lock(&ulp->lock); |
1791 |
++ semid = un->semid; |
1792 |
++ spin_unlock(&ulp->lock); |
1793 |
+ |
1794 |
++ /* exit_sem raced with IPC_RMID, nothing to do */ |
1795 |
+ if (semid == -1) { |
1796 |
+ rcu_read_unlock(); |
1797 |
+- break; |
1798 |
++ continue; |
1799 |
+ } |
1800 |
+ |
1801 |
+- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); |
1802 |
++ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); |
1803 |
+ /* exit_sem raced with IPC_RMID, nothing to do */ |
1804 |
+ if (IS_ERR(sma)) { |
1805 |
+ rcu_read_unlock(); |
1806 |
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c |
1807 |
+index 672310e1597e..71b52dd957de 100644 |
1808 |
+--- a/kernel/cpuset.c |
1809 |
++++ b/kernel/cpuset.c |
1810 |
+@@ -1204,7 +1204,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1811 |
+ mutex_unlock(&callback_mutex); |
1812 |
+ |
1813 |
+ /* use trialcs->mems_allowed as a temp variable */ |
1814 |
+- update_nodemasks_hier(cs, &cs->mems_allowed); |
1815 |
++ update_nodemasks_hier(cs, &trialcs->mems_allowed); |
1816 |
+ done: |
1817 |
+ return retval; |
1818 |
+ } |
1819 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
1820 |
+index cb86038cad47..ff181a5a5562 100644 |
1821 |
+--- a/kernel/events/core.c |
1822 |
++++ b/kernel/events/core.c |
1823 |
+@@ -3729,28 +3729,21 @@ static void perf_event_for_each(struct perf_event *event, |
1824 |
+ mutex_unlock(&ctx->mutex); |
1825 |
+ } |
1826 |
+ |
1827 |
+-static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1828 |
+-{ |
1829 |
+- struct perf_event_context *ctx = event->ctx; |
1830 |
+- int ret = 0, active; |
1831 |
++struct period_event { |
1832 |
++ struct perf_event *event; |
1833 |
+ u64 value; |
1834 |
++}; |
1835 |
+ |
1836 |
+- if (!is_sampling_event(event)) |
1837 |
+- return -EINVAL; |
1838 |
+- |
1839 |
+- if (copy_from_user(&value, arg, sizeof(value))) |
1840 |
+- return -EFAULT; |
1841 |
+- |
1842 |
+- if (!value) |
1843 |
+- return -EINVAL; |
1844 |
++static int __perf_event_period(void *info) |
1845 |
++{ |
1846 |
++ struct period_event *pe = info; |
1847 |
++ struct perf_event *event = pe->event; |
1848 |
++ struct perf_event_context *ctx = event->ctx; |
1849 |
++ u64 value = pe->value; |
1850 |
++ bool active; |
1851 |
+ |
1852 |
+- raw_spin_lock_irq(&ctx->lock); |
1853 |
++ raw_spin_lock(&ctx->lock); |
1854 |
+ if (event->attr.freq) { |
1855 |
+- if (value > sysctl_perf_event_sample_rate) { |
1856 |
+- ret = -EINVAL; |
1857 |
+- goto unlock; |
1858 |
+- } |
1859 |
+- |
1860 |
+ event->attr.sample_freq = value; |
1861 |
+ } else { |
1862 |
+ event->attr.sample_period = value; |
1863 |
+@@ -3769,11 +3762,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1864 |
+ event->pmu->start(event, PERF_EF_RELOAD); |
1865 |
+ perf_pmu_enable(ctx->pmu); |
1866 |
+ } |
1867 |
++ raw_spin_unlock(&ctx->lock); |
1868 |
+ |
1869 |
+-unlock: |
1870 |
++ return 0; |
1871 |
++} |
1872 |
++ |
1873 |
++static int perf_event_period(struct perf_event *event, u64 __user *arg) |
1874 |
++{ |
1875 |
++ struct period_event pe = { .event = event, }; |
1876 |
++ struct perf_event_context *ctx = event->ctx; |
1877 |
++ struct task_struct *task; |
1878 |
++ u64 value; |
1879 |
++ |
1880 |
++ if (!is_sampling_event(event)) |
1881 |
++ return -EINVAL; |
1882 |
++ |
1883 |
++ if (copy_from_user(&value, arg, sizeof(value))) |
1884 |
++ return -EFAULT; |
1885 |
++ |
1886 |
++ if (!value) |
1887 |
++ return -EINVAL; |
1888 |
++ |
1889 |
++ if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
1890 |
++ return -EINVAL; |
1891 |
++ |
1892 |
++ task = ctx->task; |
1893 |
++ pe.value = value; |
1894 |
++ |
1895 |
++ if (!task) { |
1896 |
++ cpu_function_call(event->cpu, __perf_event_period, &pe); |
1897 |
++ return 0; |
1898 |
++ } |
1899 |
++ |
1900 |
++retry: |
1901 |
++ if (!task_function_call(task, __perf_event_period, &pe)) |
1902 |
++ return 0; |
1903 |
++ |
1904 |
++ raw_spin_lock_irq(&ctx->lock); |
1905 |
++ if (ctx->is_active) { |
1906 |
++ raw_spin_unlock_irq(&ctx->lock); |
1907 |
++ task = ctx->task; |
1908 |
++ goto retry; |
1909 |
++ } |
1910 |
++ |
1911 |
++ __perf_event_period(&pe); |
1912 |
+ raw_spin_unlock_irq(&ctx->lock); |
1913 |
+ |
1914 |
+- return ret; |
1915 |
++ return 0; |
1916 |
+ } |
1917 |
+ |
1918 |
+ static const struct file_operations perf_fops; |
1919 |
+@@ -4398,12 +4433,20 @@ static const struct file_operations perf_fops = { |
1920 |
+ * to user-space before waking everybody up. |
1921 |
+ */ |
1922 |
+ |
1923 |
++static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) |
1924 |
++{ |
1925 |
++ /* only the parent has fasync state */ |
1926 |
++ if (event->parent) |
1927 |
++ event = event->parent; |
1928 |
++ return &event->fasync; |
1929 |
++} |
1930 |
++ |
1931 |
+ void perf_event_wakeup(struct perf_event *event) |
1932 |
+ { |
1933 |
+ ring_buffer_wakeup(event); |
1934 |
+ |
1935 |
+ if (event->pending_kill) { |
1936 |
+- kill_fasync(&event->fasync, SIGIO, event->pending_kill); |
1937 |
++ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); |
1938 |
+ event->pending_kill = 0; |
1939 |
+ } |
1940 |
+ } |
1941 |
+@@ -5638,7 +5681,7 @@ static int __perf_event_overflow(struct perf_event *event, |
1942 |
+ else |
1943 |
+ perf_event_output(event, data, regs); |
1944 |
+ |
1945 |
+- if (event->fasync && event->pending_kill) { |
1946 |
++ if (*perf_event_fasync(event) && event->pending_kill) { |
1947 |
+ event->pending_wakeup = 1; |
1948 |
+ irq_work_queue(&event->pending); |
1949 |
+ } |
1950 |
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1951 |
+index da8fa4e4237c..a1d4dfa62023 100644 |
1952 |
+--- a/mm/hugetlb.c |
1953 |
++++ b/mm/hugetlb.c |
1954 |
+@@ -855,6 +855,31 @@ struct hstate *size_to_hstate(unsigned long size) |
1955 |
+ return NULL; |
1956 |
+ } |
1957 |
+ |
1958 |
++/* |
1959 |
++ * Test to determine whether the hugepage is "active/in-use" (i.e. being linked |
1960 |
++ * to hstate->hugepage_activelist.) |
1961 |
++ * |
1962 |
++ * This function can be called for tail pages, but never returns true for them. |
1963 |
++ */ |
1964 |
++bool page_huge_active(struct page *page) |
1965 |
++{ |
1966 |
++ VM_BUG_ON_PAGE(!PageHuge(page), page); |
1967 |
++ return PageHead(page) && PagePrivate(&page[1]); |
1968 |
++} |
1969 |
++ |
1970 |
++/* never called for tail page */ |
1971 |
++static void set_page_huge_active(struct page *page) |
1972 |
++{ |
1973 |
++ VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1974 |
++ SetPagePrivate(&page[1]); |
1975 |
++} |
1976 |
++ |
1977 |
++static void clear_page_huge_active(struct page *page) |
1978 |
++{ |
1979 |
++ VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1980 |
++ ClearPagePrivate(&page[1]); |
1981 |
++} |
1982 |
++ |
1983 |
+ void free_huge_page(struct page *page) |
1984 |
+ { |
1985 |
+ /* |
1986 |
+@@ -875,6 +900,7 @@ void free_huge_page(struct page *page) |
1987 |
+ ClearPagePrivate(page); |
1988 |
+ |
1989 |
+ spin_lock(&hugetlb_lock); |
1990 |
++ clear_page_huge_active(page); |
1991 |
+ hugetlb_cgroup_uncharge_page(hstate_index(h), |
1992 |
+ pages_per_huge_page(h), page); |
1993 |
+ if (restore_reserve) |
1994 |
+@@ -2884,6 +2910,7 @@ retry_avoidcopy: |
1995 |
+ copy_user_huge_page(new_page, old_page, address, vma, |
1996 |
+ pages_per_huge_page(h)); |
1997 |
+ __SetPageUptodate(new_page); |
1998 |
++ set_page_huge_active(new_page); |
1999 |
+ |
2000 |
+ mmun_start = address & huge_page_mask(h); |
2001 |
+ mmun_end = mmun_start + huge_page_size(h); |
2002 |
+@@ -2995,6 +3022,7 @@ retry: |
2003 |
+ } |
2004 |
+ clear_huge_page(page, address, pages_per_huge_page(h)); |
2005 |
+ __SetPageUptodate(page); |
2006 |
++ set_page_huge_active(page); |
2007 |
+ |
2008 |
+ if (vma->vm_flags & VM_MAYSHARE) { |
2009 |
+ int err; |
2010 |
+@@ -3799,19 +3827,26 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage) |
2011 |
+ |
2012 |
+ bool isolate_huge_page(struct page *page, struct list_head *list) |
2013 |
+ { |
2014 |
++ bool ret = true; |
2015 |
++ |
2016 |
+ VM_BUG_ON_PAGE(!PageHead(page), page); |
2017 |
+- if (!get_page_unless_zero(page)) |
2018 |
+- return false; |
2019 |
+ spin_lock(&hugetlb_lock); |
2020 |
++ if (!page_huge_active(page) || !get_page_unless_zero(page)) { |
2021 |
++ ret = false; |
2022 |
++ goto unlock; |
2023 |
++ } |
2024 |
++ clear_page_huge_active(page); |
2025 |
+ list_move_tail(&page->lru, list); |
2026 |
++unlock: |
2027 |
+ spin_unlock(&hugetlb_lock); |
2028 |
+- return true; |
2029 |
++ return ret; |
2030 |
+ } |
2031 |
+ |
2032 |
+ void putback_active_hugepage(struct page *page) |
2033 |
+ { |
2034 |
+ VM_BUG_ON_PAGE(!PageHead(page), page); |
2035 |
+ spin_lock(&hugetlb_lock); |
2036 |
++ set_page_huge_active(page); |
2037 |
+ list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); |
2038 |
+ spin_unlock(&hugetlb_lock); |
2039 |
+ put_page(page); |
2040 |
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
2041 |
+index 22f047fbaa33..715bc57385b9 100644 |
2042 |
+--- a/mm/memory-failure.c |
2043 |
++++ b/mm/memory-failure.c |
2044 |
+@@ -1524,6 +1524,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) |
2045 |
+ */ |
2046 |
+ ret = __get_any_page(page, pfn, 0); |
2047 |
+ if (!PageLRU(page)) { |
2048 |
++ /* Drop page reference which is from __get_any_page() */ |
2049 |
++ put_page(page); |
2050 |
+ pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", |
2051 |
+ pfn, page->flags); |
2052 |
+ return -EIO; |
2053 |
+@@ -1552,8 +1554,17 @@ static int soft_offline_huge_page(struct page *page, int flags) |
2054 |
+ } |
2055 |
+ unlock_page(hpage); |
2056 |
+ |
2057 |
+- /* Keep page count to indicate a given hugepage is isolated. */ |
2058 |
+- list_move(&hpage->lru, &pagelist); |
2059 |
++ ret = isolate_huge_page(hpage, &pagelist); |
2060 |
++ /* |
2061 |
++ * get_any_page() and isolate_huge_page() takes a refcount each, |
2062 |
++ * so need to drop one here. |
2063 |
++ */ |
2064 |
++ put_page(hpage); |
2065 |
++ if (!ret) { |
2066 |
++ pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); |
2067 |
++ return -EBUSY; |
2068 |
++ } |
2069 |
++ |
2070 |
+ ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, |
2071 |
+ MIGRATE_SYNC, MR_MEMORY_FAILURE); |
2072 |
+ if (ret) { |
2073 |
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c |
2074 |
+index 5df05269d17a..cc641541d38f 100644 |
2075 |
+--- a/net/bridge/br_mdb.c |
2076 |
++++ b/net/bridge/br_mdb.c |
2077 |
+@@ -347,7 +347,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, |
2078 |
+ return -ENOMEM; |
2079 |
+ rcu_assign_pointer(*pp, p); |
2080 |
+ |
2081 |
+- br_mdb_notify(br->dev, port, group, RTM_NEWMDB); |
2082 |
+ return 0; |
2083 |
+ } |
2084 |
+ |
2085 |
+@@ -370,6 +369,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, |
2086 |
+ if (!p || p->br != br || p->state == BR_STATE_DISABLED) |
2087 |
+ return -EINVAL; |
2088 |
+ |
2089 |
++ memset(&ip, 0, sizeof(ip)); |
2090 |
+ ip.proto = entry->addr.proto; |
2091 |
+ if (ip.proto == htons(ETH_P_IP)) |
2092 |
+ ip.u.ip4 = entry->addr.u.ip4; |
2093 |
+@@ -416,6 +416,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) |
2094 |
+ if (!netif_running(br->dev) || br->multicast_disabled) |
2095 |
+ return -EINVAL; |
2096 |
+ |
2097 |
++ memset(&ip, 0, sizeof(ip)); |
2098 |
+ ip.proto = entry->addr.proto; |
2099 |
+ if (ip.proto == htons(ETH_P_IP)) { |
2100 |
+ if (timer_pending(&br->ip4_other_query.timer)) |
2101 |
+diff --git a/net/core/datagram.c b/net/core/datagram.c |
2102 |
+index fdbc9a81d4c2..3a402a7b20e9 100644 |
2103 |
+--- a/net/core/datagram.c |
2104 |
++++ b/net/core/datagram.c |
2105 |
+@@ -744,7 +744,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
2106 |
+ !skb->csum_complete_sw) |
2107 |
+ netdev_rx_csum_fault(skb->dev); |
2108 |
+ } |
2109 |
+- skb->csum_valid = !sum; |
2110 |
++ if (!skb_shared(skb)) |
2111 |
++ skb->csum_valid = !sum; |
2112 |
+ return sum; |
2113 |
+ } |
2114 |
+ EXPORT_SYMBOL(__skb_checksum_complete_head); |
2115 |
+@@ -764,11 +765,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) |
2116 |
+ netdev_rx_csum_fault(skb->dev); |
2117 |
+ } |
2118 |
+ |
2119 |
+- /* Save full packet checksum */ |
2120 |
+- skb->csum = csum; |
2121 |
+- skb->ip_summed = CHECKSUM_COMPLETE; |
2122 |
+- skb->csum_complete_sw = 1; |
2123 |
+- skb->csum_valid = !sum; |
2124 |
++ if (!skb_shared(skb)) { |
2125 |
++ /* Save full packet checksum */ |
2126 |
++ skb->csum = csum; |
2127 |
++ skb->ip_summed = CHECKSUM_COMPLETE; |
2128 |
++ skb->csum_complete_sw = 1; |
2129 |
++ skb->csum_valid = !sum; |
2130 |
++ } |
2131 |
+ |
2132 |
+ return sum; |
2133 |
+ } |
2134 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
2135 |
+index fb9625874b3c..93612b2e3bbf 100644 |
2136 |
+--- a/net/core/dev.c |
2137 |
++++ b/net/core/dev.c |
2138 |
+@@ -3309,6 +3309,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
2139 |
+ local_irq_save(flags); |
2140 |
+ |
2141 |
+ rps_lock(sd); |
2142 |
++ if (!netif_running(skb->dev)) |
2143 |
++ goto drop; |
2144 |
+ qlen = skb_queue_len(&sd->input_pkt_queue); |
2145 |
+ if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { |
2146 |
+ if (skb_queue_len(&sd->input_pkt_queue)) { |
2147 |
+@@ -3330,6 +3332,7 @@ enqueue: |
2148 |
+ goto enqueue; |
2149 |
+ } |
2150 |
+ |
2151 |
++drop: |
2152 |
+ sd->dropped++; |
2153 |
+ rps_unlock(sd); |
2154 |
+ |
2155 |
+@@ -3638,8 +3641,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) |
2156 |
+ |
2157 |
+ pt_prev = NULL; |
2158 |
+ |
2159 |
+- rcu_read_lock(); |
2160 |
+- |
2161 |
+ another_round: |
2162 |
+ skb->skb_iif = skb->dev->ifindex; |
2163 |
+ |
2164 |
+@@ -3649,7 +3650,7 @@ another_round: |
2165 |
+ skb->protocol == cpu_to_be16(ETH_P_8021AD)) { |
2166 |
+ skb = skb_vlan_untag(skb); |
2167 |
+ if (unlikely(!skb)) |
2168 |
+- goto unlock; |
2169 |
++ goto out; |
2170 |
+ } |
2171 |
+ |
2172 |
+ #ifdef CONFIG_NET_CLS_ACT |
2173 |
+@@ -3674,7 +3675,7 @@ skip_taps: |
2174 |
+ #ifdef CONFIG_NET_CLS_ACT |
2175 |
+ skb = handle_ing(skb, &pt_prev, &ret, orig_dev); |
2176 |
+ if (!skb) |
2177 |
+- goto unlock; |
2178 |
++ goto out; |
2179 |
+ ncls: |
2180 |
+ #endif |
2181 |
+ |
2182 |
+@@ -3689,7 +3690,7 @@ ncls: |
2183 |
+ if (vlan_do_receive(&skb)) |
2184 |
+ goto another_round; |
2185 |
+ else if (unlikely(!skb)) |
2186 |
+- goto unlock; |
2187 |
++ goto out; |
2188 |
+ } |
2189 |
+ |
2190 |
+ rx_handler = rcu_dereference(skb->dev->rx_handler); |
2191 |
+@@ -3701,7 +3702,7 @@ ncls: |
2192 |
+ switch (rx_handler(&skb)) { |
2193 |
+ case RX_HANDLER_CONSUMED: |
2194 |
+ ret = NET_RX_SUCCESS; |
2195 |
+- goto unlock; |
2196 |
++ goto out; |
2197 |
+ case RX_HANDLER_ANOTHER: |
2198 |
+ goto another_round; |
2199 |
+ case RX_HANDLER_EXACT: |
2200 |
+@@ -3753,8 +3754,7 @@ drop: |
2201 |
+ ret = NET_RX_DROP; |
2202 |
+ } |
2203 |
+ |
2204 |
+-unlock: |
2205 |
+- rcu_read_unlock(); |
2206 |
++out: |
2207 |
+ return ret; |
2208 |
+ } |
2209 |
+ |
2210 |
+@@ -3785,29 +3785,30 @@ static int __netif_receive_skb(struct sk_buff *skb) |
2211 |
+ |
2212 |
+ static int netif_receive_skb_internal(struct sk_buff *skb) |
2213 |
+ { |
2214 |
++ int ret; |
2215 |
++ |
2216 |
+ net_timestamp_check(netdev_tstamp_prequeue, skb); |
2217 |
+ |
2218 |
+ if (skb_defer_rx_timestamp(skb)) |
2219 |
+ return NET_RX_SUCCESS; |
2220 |
+ |
2221 |
++ rcu_read_lock(); |
2222 |
++ |
2223 |
+ #ifdef CONFIG_RPS |
2224 |
+ if (static_key_false(&rps_needed)) { |
2225 |
+ struct rps_dev_flow voidflow, *rflow = &voidflow; |
2226 |
+- int cpu, ret; |
2227 |
+- |
2228 |
+- rcu_read_lock(); |
2229 |
+- |
2230 |
+- cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2231 |
++ int cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2232 |
+ |
2233 |
+ if (cpu >= 0) { |
2234 |
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
2235 |
+ rcu_read_unlock(); |
2236 |
+ return ret; |
2237 |
+ } |
2238 |
+- rcu_read_unlock(); |
2239 |
+ } |
2240 |
+ #endif |
2241 |
+- return __netif_receive_skb(skb); |
2242 |
++ ret = __netif_receive_skb(skb); |
2243 |
++ rcu_read_unlock(); |
2244 |
++ return ret; |
2245 |
+ } |
2246 |
+ |
2247 |
+ /** |
2248 |
+@@ -4343,8 +4344,10 @@ static int process_backlog(struct napi_struct *napi, int quota) |
2249 |
+ struct sk_buff *skb; |
2250 |
+ |
2251 |
+ while ((skb = __skb_dequeue(&sd->process_queue))) { |
2252 |
++ rcu_read_lock(); |
2253 |
+ local_irq_enable(); |
2254 |
+ __netif_receive_skb(skb); |
2255 |
++ rcu_read_unlock(); |
2256 |
+ local_irq_disable(); |
2257 |
+ input_queue_head_incr(sd); |
2258 |
+ if (++work >= quota) { |
2259 |
+@@ -5867,6 +5870,7 @@ static void rollback_registered_many(struct list_head *head) |
2260 |
+ unlist_netdevice(dev); |
2261 |
+ |
2262 |
+ dev->reg_state = NETREG_UNREGISTERING; |
2263 |
++ on_each_cpu(flush_backlog, dev, 1); |
2264 |
+ } |
2265 |
+ |
2266 |
+ synchronize_net(); |
2267 |
+@@ -6128,7 +6132,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev) |
2268 |
+ struct netdev_queue *tx; |
2269 |
+ size_t sz = count * sizeof(*tx); |
2270 |
+ |
2271 |
+- BUG_ON(count < 1 || count > 0xffff); |
2272 |
++ if (count < 1 || count > 0xffff) |
2273 |
++ return -EINVAL; |
2274 |
+ |
2275 |
+ tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); |
2276 |
+ if (!tx) { |
2277 |
+@@ -6486,8 +6491,6 @@ void netdev_run_todo(void) |
2278 |
+ |
2279 |
+ dev->reg_state = NETREG_UNREGISTERED; |
2280 |
+ |
2281 |
+- on_each_cpu(flush_backlog, dev, 1); |
2282 |
+- |
2283 |
+ netdev_wait_allrefs(dev); |
2284 |
+ |
2285 |
+ /* paranoia */ |
2286 |
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c |
2287 |
+index 0b320d93fb56..4ff3eacc99f5 100644 |
2288 |
+--- a/net/core/pktgen.c |
2289 |
++++ b/net/core/pktgen.c |
2290 |
+@@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg) |
2291 |
+ pktgen_rem_thread(t); |
2292 |
+ |
2293 |
+ /* Wait for kthread_stop */ |
2294 |
+- while (!kthread_should_stop()) { |
2295 |
++ for (;;) { |
2296 |
+ set_current_state(TASK_INTERRUPTIBLE); |
2297 |
++ if (kthread_should_stop()) |
2298 |
++ break; |
2299 |
+ schedule(); |
2300 |
+ } |
2301 |
+ __set_current_state(TASK_RUNNING); |
2302 |
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c |
2303 |
+index 90c0e8386116..574fad9cca05 100644 |
2304 |
+--- a/net/ipv4/datagram.c |
2305 |
++++ b/net/ipv4/datagram.c |
2306 |
+@@ -20,7 +20,7 @@ |
2307 |
+ #include <net/route.h> |
2308 |
+ #include <net/tcp_states.h> |
2309 |
+ |
2310 |
+-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2311 |
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2312 |
+ { |
2313 |
+ struct inet_sock *inet = inet_sk(sk); |
2314 |
+ struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; |
2315 |
+@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2316 |
+ |
2317 |
+ sk_dst_reset(sk); |
2318 |
+ |
2319 |
+- lock_sock(sk); |
2320 |
+- |
2321 |
+ oif = sk->sk_bound_dev_if; |
2322 |
+ saddr = inet->inet_saddr; |
2323 |
+ if (ipv4_is_multicast(usin->sin_addr.s_addr)) { |
2324 |
+@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2325 |
+ sk_dst_set(sk, &rt->dst); |
2326 |
+ err = 0; |
2327 |
+ out: |
2328 |
+- release_sock(sk); |
2329 |
+ return err; |
2330 |
+ } |
2331 |
++EXPORT_SYMBOL(__ip4_datagram_connect); |
2332 |
++ |
2333 |
++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2334 |
++{ |
2335 |
++ int res; |
2336 |
++ |
2337 |
++ lock_sock(sk); |
2338 |
++ res = __ip4_datagram_connect(sk, uaddr, addr_len); |
2339 |
++ release_sock(sk); |
2340 |
++ return res; |
2341 |
++} |
2342 |
+ EXPORT_SYMBOL(ip4_datagram_connect); |
2343 |
+ |
2344 |
+ /* Because UDP xmit path can manipulate sk_dst_cache without holding |
2345 |
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c |
2346 |
+index b48e03cd6656..9516031847f1 100644 |
2347 |
+--- a/net/ipv4/ip_fragment.c |
2348 |
++++ b/net/ipv4/ip_fragment.c |
2349 |
+@@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
2350 |
+ ihl = ip_hdrlen(skb); |
2351 |
+ |
2352 |
+ /* Determine the position of this fragment. */ |
2353 |
+- end = offset + skb->len - ihl; |
2354 |
++ end = offset + skb->len - skb_network_offset(skb) - ihl; |
2355 |
+ err = -EINVAL; |
2356 |
+ |
2357 |
+ /* Is this the final fragment? */ |
2358 |
+@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
2359 |
+ goto err; |
2360 |
+ |
2361 |
+ err = -ENOMEM; |
2362 |
+- if (pskb_pull(skb, ihl) == NULL) |
2363 |
++ if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) |
2364 |
+ goto err; |
2365 |
+ |
2366 |
+ err = pskb_trim_rcsum(skb, end - offset); |
2367 |
+@@ -612,6 +612,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
2368 |
+ iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; |
2369 |
+ iph->tot_len = htons(len); |
2370 |
+ iph->tos |= ecn; |
2371 |
++ |
2372 |
++ ip_send_check(iph); |
2373 |
++ |
2374 |
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
2375 |
+ qp->q.fragments = NULL; |
2376 |
+ qp->q.fragments_tail = NULL; |
2377 |
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
2378 |
+index 0bb8e141eacc..682257242971 100644 |
2379 |
+--- a/net/ipv4/ip_tunnel.c |
2380 |
++++ b/net/ipv4/ip_tunnel.c |
2381 |
+@@ -587,7 +587,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, |
2382 |
+ EXPORT_SYMBOL(ip_tunnel_encap); |
2383 |
+ |
2384 |
+ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
2385 |
+- struct rtable *rt, __be16 df) |
2386 |
++ struct rtable *rt, __be16 df, |
2387 |
++ const struct iphdr *inner_iph) |
2388 |
+ { |
2389 |
+ struct ip_tunnel *tunnel = netdev_priv(dev); |
2390 |
+ int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; |
2391 |
+@@ -604,7 +605,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
2392 |
+ |
2393 |
+ if (skb->protocol == htons(ETH_P_IP)) { |
2394 |
+ if (!skb_is_gso(skb) && |
2395 |
+- (df & htons(IP_DF)) && mtu < pkt_size) { |
2396 |
++ (inner_iph->frag_off & htons(IP_DF)) && |
2397 |
++ mtu < pkt_size) { |
2398 |
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
2399 |
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
2400 |
+ return -E2BIG; |
2401 |
+@@ -738,7 +740,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
2402 |
+ goto tx_error; |
2403 |
+ } |
2404 |
+ |
2405 |
+- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { |
2406 |
++ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { |
2407 |
+ ip_rt_put(rt); |
2408 |
+ goto tx_error; |
2409 |
+ } |
2410 |
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
2411 |
+index 11e3945eeac7..e069aeb2cf72 100644 |
2412 |
+--- a/net/ipv6/datagram.c |
2413 |
++++ b/net/ipv6/datagram.c |
2414 |
+@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) |
2415 |
+ return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); |
2416 |
+ } |
2417 |
+ |
2418 |
+-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2419 |
++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2420 |
+ { |
2421 |
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
2422 |
+ struct inet_sock *inet = inet_sk(sk); |
2423 |
+@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2424 |
+ if (usin->sin6_family == AF_INET) { |
2425 |
+ if (__ipv6_only_sock(sk)) |
2426 |
+ return -EAFNOSUPPORT; |
2427 |
+- err = ip4_datagram_connect(sk, uaddr, addr_len); |
2428 |
++ err = __ip4_datagram_connect(sk, uaddr, addr_len); |
2429 |
+ goto ipv4_connected; |
2430 |
+ } |
2431 |
+ |
2432 |
+@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2433 |
+ sin.sin_addr.s_addr = daddr->s6_addr32[3]; |
2434 |
+ sin.sin_port = usin->sin6_port; |
2435 |
+ |
2436 |
+- err = ip4_datagram_connect(sk, |
2437 |
+- (struct sockaddr *) &sin, |
2438 |
+- sizeof(sin)); |
2439 |
++ err = __ip4_datagram_connect(sk, |
2440 |
++ (struct sockaddr *) &sin, |
2441 |
++ sizeof(sin)); |
2442 |
+ |
2443 |
+ ipv4_connected: |
2444 |
+ if (err) |
2445 |
+@@ -204,6 +204,16 @@ out: |
2446 |
+ fl6_sock_release(flowlabel); |
2447 |
+ return err; |
2448 |
+ } |
2449 |
++ |
2450 |
++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2451 |
++{ |
2452 |
++ int res; |
2453 |
++ |
2454 |
++ lock_sock(sk); |
2455 |
++ res = __ip6_datagram_connect(sk, uaddr, addr_len); |
2456 |
++ release_sock(sk); |
2457 |
++ return res; |
2458 |
++} |
2459 |
+ EXPORT_SYMBOL_GPL(ip6_datagram_connect); |
2460 |
+ |
2461 |
+ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, |
2462 |
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c |
2463 |
+index a3084ab5df6c..ac5e973e9eb5 100644 |
2464 |
+--- a/net/ipv6/ip6_input.c |
2465 |
++++ b/net/ipv6/ip6_input.c |
2466 |
+@@ -329,10 +329,10 @@ int ip6_mc_input(struct sk_buff *skb) |
2467 |
+ if (offset < 0) |
2468 |
+ goto out; |
2469 |
+ |
2470 |
+- if (!ipv6_is_mld(skb, nexthdr, offset)) |
2471 |
+- goto out; |
2472 |
++ if (ipv6_is_mld(skb, nexthdr, offset)) |
2473 |
++ deliver = true; |
2474 |
+ |
2475 |
+- deliver = true; |
2476 |
++ goto out; |
2477 |
+ } |
2478 |
+ /* unknown RA - process it normally */ |
2479 |
+ } |
2480 |
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
2481 |
+index c82b2e37e652..6ffd1ebaba93 100644 |
2482 |
+--- a/net/netlink/af_netlink.c |
2483 |
++++ b/net/netlink/af_netlink.c |
2484 |
+@@ -366,25 +366,52 @@ err1: |
2485 |
+ return NULL; |
2486 |
+ } |
2487 |
+ |
2488 |
++ |
2489 |
++static void |
2490 |
++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, |
2491 |
++ unsigned int order) |
2492 |
++{ |
2493 |
++ struct netlink_sock *nlk = nlk_sk(sk); |
2494 |
++ struct sk_buff_head *queue; |
2495 |
++ struct netlink_ring *ring; |
2496 |
++ |
2497 |
++ queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
2498 |
++ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
2499 |
++ |
2500 |
++ spin_lock_bh(&queue->lock); |
2501 |
++ |
2502 |
++ ring->frame_max = req->nm_frame_nr - 1; |
2503 |
++ ring->head = 0; |
2504 |
++ ring->frame_size = req->nm_frame_size; |
2505 |
++ ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; |
2506 |
++ |
2507 |
++ swap(ring->pg_vec_len, req->nm_block_nr); |
2508 |
++ swap(ring->pg_vec_order, order); |
2509 |
++ swap(ring->pg_vec, pg_vec); |
2510 |
++ |
2511 |
++ __skb_queue_purge(queue); |
2512 |
++ spin_unlock_bh(&queue->lock); |
2513 |
++ |
2514 |
++ WARN_ON(atomic_read(&nlk->mapped)); |
2515 |
++ |
2516 |
++ if (pg_vec) |
2517 |
++ free_pg_vec(pg_vec, order, req->nm_block_nr); |
2518 |
++} |
2519 |
++ |
2520 |
+ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
2521 |
+- bool closing, bool tx_ring) |
2522 |
++ bool tx_ring) |
2523 |
+ { |
2524 |
+ struct netlink_sock *nlk = nlk_sk(sk); |
2525 |
+ struct netlink_ring *ring; |
2526 |
+- struct sk_buff_head *queue; |
2527 |
+ void **pg_vec = NULL; |
2528 |
+ unsigned int order = 0; |
2529 |
+- int err; |
2530 |
+ |
2531 |
+ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
2532 |
+- queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
2533 |
+ |
2534 |
+- if (!closing) { |
2535 |
+- if (atomic_read(&nlk->mapped)) |
2536 |
+- return -EBUSY; |
2537 |
+- if (atomic_read(&ring->pending)) |
2538 |
+- return -EBUSY; |
2539 |
+- } |
2540 |
++ if (atomic_read(&nlk->mapped)) |
2541 |
++ return -EBUSY; |
2542 |
++ if (atomic_read(&ring->pending)) |
2543 |
++ return -EBUSY; |
2544 |
+ |
2545 |
+ if (req->nm_block_nr) { |
2546 |
+ if (ring->pg_vec != NULL) |
2547 |
+@@ -416,31 +443,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
2548 |
+ return -EINVAL; |
2549 |
+ } |
2550 |
+ |
2551 |
+- err = -EBUSY; |
2552 |
+ mutex_lock(&nlk->pg_vec_lock); |
2553 |
+- if (closing || atomic_read(&nlk->mapped) == 0) { |
2554 |
+- err = 0; |
2555 |
+- spin_lock_bh(&queue->lock); |
2556 |
+- |
2557 |
+- ring->frame_max = req->nm_frame_nr - 1; |
2558 |
+- ring->head = 0; |
2559 |
+- ring->frame_size = req->nm_frame_size; |
2560 |
+- ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; |
2561 |
+- |
2562 |
+- swap(ring->pg_vec_len, req->nm_block_nr); |
2563 |
+- swap(ring->pg_vec_order, order); |
2564 |
+- swap(ring->pg_vec, pg_vec); |
2565 |
+- |
2566 |
+- __skb_queue_purge(queue); |
2567 |
+- spin_unlock_bh(&queue->lock); |
2568 |
+- |
2569 |
+- WARN_ON(atomic_read(&nlk->mapped)); |
2570 |
++ if (atomic_read(&nlk->mapped) == 0) { |
2571 |
++ __netlink_set_ring(sk, req, tx_ring, pg_vec, order); |
2572 |
++ mutex_unlock(&nlk->pg_vec_lock); |
2573 |
++ return 0; |
2574 |
+ } |
2575 |
++ |
2576 |
+ mutex_unlock(&nlk->pg_vec_lock); |
2577 |
+ |
2578 |
+ if (pg_vec) |
2579 |
+ free_pg_vec(pg_vec, order, req->nm_block_nr); |
2580 |
+- return err; |
2581 |
++ |
2582 |
++ return -EBUSY; |
2583 |
+ } |
2584 |
+ |
2585 |
+ static void netlink_mm_open(struct vm_area_struct *vma) |
2586 |
+@@ -909,10 +924,10 @@ static void netlink_sock_destruct(struct sock *sk) |
2587 |
+ |
2588 |
+ memset(&req, 0, sizeof(req)); |
2589 |
+ if (nlk->rx_ring.pg_vec) |
2590 |
+- netlink_set_ring(sk, &req, true, false); |
2591 |
++ __netlink_set_ring(sk, &req, false, NULL, 0); |
2592 |
+ memset(&req, 0, sizeof(req)); |
2593 |
+ if (nlk->tx_ring.pg_vec) |
2594 |
+- netlink_set_ring(sk, &req, true, true); |
2595 |
++ __netlink_set_ring(sk, &req, true, NULL, 0); |
2596 |
+ } |
2597 |
+ #endif /* CONFIG_NETLINK_MMAP */ |
2598 |
+ |
2599 |
+@@ -2163,7 +2178,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, |
2600 |
+ return -EINVAL; |
2601 |
+ if (copy_from_user(&req, optval, sizeof(req))) |
2602 |
+ return -EFAULT; |
2603 |
+- err = netlink_set_ring(sk, &req, false, |
2604 |
++ err = netlink_set_ring(sk, &req, |
2605 |
+ optname == NETLINK_TX_RING); |
2606 |
+ break; |
2607 |
+ } |
2608 |
+diff --git a/net/rds/info.c b/net/rds/info.c |
2609 |
+index 9a6b4f66187c..140a44a5f7b7 100644 |
2610 |
+--- a/net/rds/info.c |
2611 |
++++ b/net/rds/info.c |
2612 |
+@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, |
2613 |
+ |
2614 |
+ /* check for all kinds of wrapping and the like */ |
2615 |
+ start = (unsigned long)optval; |
2616 |
+- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { |
2617 |
++ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { |
2618 |
+ ret = -EINVAL; |
2619 |
+ goto out; |
2620 |
+ } |
2621 |
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
2622 |
+index 51bddc236a15..8224016ebd70 100644 |
2623 |
+--- a/net/tipc/socket.c |
2624 |
++++ b/net/tipc/socket.c |
2625 |
+@@ -1996,6 +1996,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) |
2626 |
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); |
2627 |
+ if (res) |
2628 |
+ goto exit; |
2629 |
++ security_sk_clone(sock->sk, new_sock->sk); |
2630 |
+ |
2631 |
+ new_sk = new_sock->sk; |
2632 |
+ new_tsock = tipc_sk(new_sk); |
2633 |
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl |
2634 |
+index 9cb8522d8d22..f3d3fb42b873 100755 |
2635 |
+--- a/scripts/kconfig/streamline_config.pl |
2636 |
++++ b/scripts/kconfig/streamline_config.pl |
2637 |
+@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); |
2638 |
+ my $kconfig = $ARGV[1]; |
2639 |
+ my $lsmod_file = $ENV{'LSMOD'}; |
2640 |
+ |
2641 |
+-my @makefiles = `find $ksource -name Makefile 2>/dev/null`; |
2642 |
++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; |
2643 |
+ chomp @makefiles; |
2644 |
+ |
2645 |
+ my %depends; |
2646 |
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
2647 |
+index 9ff5050d513a..2c10c9ee36a2 100644 |
2648 |
+--- a/sound/usb/quirks.c |
2649 |
++++ b/sound/usb/quirks.c |
2650 |
+@@ -1258,6 +1258,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, |
2651 |
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
2652 |
+ break; |
2653 |
+ |
2654 |
++ case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */ |
2655 |
+ case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ |
2656 |
+ case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ |
2657 |
+ if (fp->altsetting == 3) |