1 |
commit: 8e7f5205bb1197eb27e5adb6cd52da00a9452499 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sun Jan 31 23:29:43 2016 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sun Jan 31 23:29:43 2016 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8e7f5205 |
7 |
|
8 |
Linux patch 4.1.17 |
9 |
|
10 |
0000_README | 4 + |
11 |
1016_linux-4.1.17.patch | 4830 +++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 4834 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 1ca97cd..8b9fa0f 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -107,6 +107,10 @@ Patch: 1015_linux-4.1.16.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.1.16 |
21 |
|
22 |
+Patch: 1016_linux-4.1.17.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.1.17 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1016_linux-4.1.17.patch b/1016_linux-4.1.17.patch |
31 |
new file mode 100644 |
32 |
index 0000000..214c6ac |
33 |
--- /dev/null |
34 |
+++ b/1016_linux-4.1.17.patch |
35 |
@@ -0,0 +1,4830 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index 7609f1dcdcb9..d398dd440bc9 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,6 +1,6 @@ |
41 |
+ VERSION = 4 |
42 |
+ PATCHLEVEL = 1 |
43 |
+-SUBLEVEL = 16 |
44 |
++SUBLEVEL = 17 |
45 |
+ EXTRAVERSION = |
46 |
+ NAME = Series 4800 |
47 |
+ |
48 |
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
49 |
+index 191dcfab9f60..da09ddcfcc00 100644 |
50 |
+--- a/arch/arm/kvm/mmu.c |
51 |
++++ b/arch/arm/kvm/mmu.c |
52 |
+@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud) |
53 |
+ __kvm_flush_dcache_pud(pud); |
54 |
+ } |
55 |
+ |
56 |
++static bool kvm_is_device_pfn(unsigned long pfn) |
57 |
++{ |
58 |
++ return !pfn_valid(pfn); |
59 |
++} |
60 |
++ |
61 |
+ /** |
62 |
+ * stage2_dissolve_pmd() - clear and flush huge PMD entry |
63 |
+ * @kvm: pointer to kvm structure. |
64 |
+@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, |
65 |
+ kvm_tlb_flush_vmid_ipa(kvm, addr); |
66 |
+ |
67 |
+ /* No need to invalidate the cache for device mappings */ |
68 |
+- if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) |
69 |
++ if (!kvm_is_device_pfn(pte_pfn(old_pte))) |
70 |
+ kvm_flush_dcache_pte(old_pte); |
71 |
+ |
72 |
+ put_page(virt_to_page(pte)); |
73 |
+@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, |
74 |
+ |
75 |
+ pte = pte_offset_kernel(pmd, addr); |
76 |
+ do { |
77 |
+- if (!pte_none(*pte) && |
78 |
+- (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) |
79 |
++ if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte))) |
80 |
+ kvm_flush_dcache_pte(*pte); |
81 |
+ } while (pte++, addr += PAGE_SIZE, addr != end); |
82 |
+ } |
83 |
+@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
84 |
+ return kvm_vcpu_dabt_iswrite(vcpu); |
85 |
+ } |
86 |
+ |
87 |
+-static bool kvm_is_device_pfn(unsigned long pfn) |
88 |
+-{ |
89 |
+- return !pfn_valid(pfn); |
90 |
+-} |
91 |
+- |
92 |
+ /** |
93 |
+ * stage2_wp_ptes - write protect PMD range |
94 |
+ * @pmd: pointer to pmd entry |
95 |
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c |
96 |
+index e0e23582c8b4..5fe949b084ac 100644 |
97 |
+--- a/arch/arm/net/bpf_jit_32.c |
98 |
++++ b/arch/arm/net/bpf_jit_32.c |
99 |
+@@ -162,19 +162,6 @@ static inline int mem_words_used(struct jit_ctx *ctx) |
100 |
+ return fls(ctx->seen & SEEN_MEM); |
101 |
+ } |
102 |
+ |
103 |
+-static inline bool is_load_to_a(u16 inst) |
104 |
+-{ |
105 |
+- switch (inst) { |
106 |
+- case BPF_LD | BPF_W | BPF_LEN: |
107 |
+- case BPF_LD | BPF_W | BPF_ABS: |
108 |
+- case BPF_LD | BPF_H | BPF_ABS: |
109 |
+- case BPF_LD | BPF_B | BPF_ABS: |
110 |
+- return true; |
111 |
+- default: |
112 |
+- return false; |
113 |
+- } |
114 |
+-} |
115 |
+- |
116 |
+ static void jit_fill_hole(void *area, unsigned int size) |
117 |
+ { |
118 |
+ u32 *ptr; |
119 |
+@@ -186,7 +173,6 @@ static void jit_fill_hole(void *area, unsigned int size) |
120 |
+ static void build_prologue(struct jit_ctx *ctx) |
121 |
+ { |
122 |
+ u16 reg_set = saved_regs(ctx); |
123 |
+- u16 first_inst = ctx->skf->insns[0].code; |
124 |
+ u16 off; |
125 |
+ |
126 |
+ #ifdef CONFIG_FRAME_POINTER |
127 |
+@@ -216,7 +202,7 @@ static void build_prologue(struct jit_ctx *ctx) |
128 |
+ emit(ARM_MOV_I(r_X, 0), ctx); |
129 |
+ |
130 |
+ /* do not leak kernel data to userspace */ |
131 |
+- if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) |
132 |
++ if (bpf_needs_clear_a(&ctx->skf->insns[0])) |
133 |
+ emit(ARM_MOV_I(r_A, 0), ctx); |
134 |
+ |
135 |
+ /* stack space for the BPF_MEM words */ |
136 |
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h |
137 |
+index 17e92f05b1fe..3ca894ecf699 100644 |
138 |
+--- a/arch/arm64/include/asm/kvm_emulate.h |
139 |
++++ b/arch/arm64/include/asm/kvm_emulate.h |
140 |
+@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
141 |
+ *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; |
142 |
+ } |
143 |
+ |
144 |
++/* |
145 |
++ * vcpu_reg should always be passed a register number coming from a |
146 |
++ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 |
147 |
++ * with banked registers. |
148 |
++ */ |
149 |
+ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) |
150 |
+ { |
151 |
+- if (vcpu_mode_is_32bit(vcpu)) |
152 |
+- return vcpu_reg32(vcpu, reg_num); |
153 |
+- |
154 |
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; |
155 |
+ } |
156 |
+ |
157 |
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c |
158 |
+index d882b833dbdb..608ac6aa497b 100644 |
159 |
+--- a/arch/arm64/kernel/ptrace.c |
160 |
++++ b/arch/arm64/kernel/ptrace.c |
161 |
+@@ -58,6 +58,12 @@ |
162 |
+ */ |
163 |
+ void ptrace_disable(struct task_struct *child) |
164 |
+ { |
165 |
++ /* |
166 |
++ * This would be better off in core code, but PTRACE_DETACH has |
167 |
++ * grown its fair share of arch-specific worts and changing it |
168 |
++ * is likely to cause regressions on obscure architectures. |
169 |
++ */ |
170 |
++ user_disable_single_step(child); |
171 |
+ } |
172 |
+ |
173 |
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT |
174 |
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c |
175 |
+index 74753132c3ac..bbdb53b87e13 100644 |
176 |
+--- a/arch/arm64/kernel/setup.c |
177 |
++++ b/arch/arm64/kernel/setup.c |
178 |
+@@ -523,6 +523,10 @@ static int c_show(struct seq_file *m, void *v) |
179 |
+ seq_printf(m, "processor\t: %d\n", i); |
180 |
+ #endif |
181 |
+ |
182 |
++ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
183 |
++ loops_per_jiffy / (500000UL/HZ), |
184 |
++ loops_per_jiffy / (5000UL/HZ) % 100); |
185 |
++ |
186 |
+ /* |
187 |
+ * Dump out the common processor features in a single line. |
188 |
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP) |
189 |
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c |
190 |
+index 53f1f8dccf6c..357418137db7 100644 |
191 |
+--- a/arch/arm64/kernel/suspend.c |
192 |
++++ b/arch/arm64/kernel/suspend.c |
193 |
+@@ -1,3 +1,4 @@ |
194 |
++#include <linux/ftrace.h> |
195 |
+ #include <linux/percpu.h> |
196 |
+ #include <linux/slab.h> |
197 |
+ #include <asm/cacheflush.h> |
198 |
+@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) |
199 |
+ local_dbg_save(flags); |
200 |
+ |
201 |
+ /* |
202 |
++ * Function graph tracer state gets incosistent when the kernel |
203 |
++ * calls functions that never return (aka suspend finishers) hence |
204 |
++ * disable graph tracing during their execution. |
205 |
++ */ |
206 |
++ pause_graph_tracing(); |
207 |
++ |
208 |
++ /* |
209 |
+ * mm context saved on the stack, it will be restored when |
210 |
+ * the cpu comes out of reset through the identity mapped |
211 |
+ * page tables, so that the thread address space is properly |
212 |
+@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) |
213 |
+ hw_breakpoint_restore(NULL); |
214 |
+ } |
215 |
+ |
216 |
++ unpause_graph_tracing(); |
217 |
++ |
218 |
+ /* |
219 |
+ * Restore pstate flags. OS lock and mdscr have been already |
220 |
+ * restored, so from this point onwards, debugging is fully |
221 |
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c |
222 |
+index 85c57158dcd9..648112e90ed5 100644 |
223 |
+--- a/arch/arm64/kvm/inject_fault.c |
224 |
++++ b/arch/arm64/kvm/inject_fault.c |
225 |
+@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) |
226 |
+ |
227 |
+ /* Note: These now point to the banked copies */ |
228 |
+ *vcpu_spsr(vcpu) = new_spsr_value; |
229 |
+- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; |
230 |
++ *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; |
231 |
+ |
232 |
+ /* Branch to exception vector */ |
233 |
+ if (sctlr & (1 << 13)) |
234 |
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c |
235 |
+index 5b8b664422d3..cb34eb8bbb9d 100644 |
236 |
+--- a/arch/arm64/mm/mmu.c |
237 |
++++ b/arch/arm64/mm/mmu.c |
238 |
+@@ -450,6 +450,9 @@ void __init paging_init(void) |
239 |
+ |
240 |
+ empty_zero_page = virt_to_page(zero_page); |
241 |
+ |
242 |
++ /* Ensure the zero page is visible to the page table walker */ |
243 |
++ dsb(ishst); |
244 |
++ |
245 |
+ /* |
246 |
+ * TTBR0 is only used for the identity mapping at this stage. Make it |
247 |
+ * point to zero page to avoid speculatively fetching new entries. |
248 |
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h |
249 |
+index 98a26ce82d26..aee5637ea436 100644 |
250 |
+--- a/arch/arm64/net/bpf_jit.h |
251 |
++++ b/arch/arm64/net/bpf_jit.h |
252 |
+@@ -1,7 +1,7 @@ |
253 |
+ /* |
254 |
+ * BPF JIT compiler for ARM64 |
255 |
+ * |
256 |
+- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@×××××.com> |
257 |
++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@×××××.com> |
258 |
+ * |
259 |
+ * This program is free software; you can redistribute it and/or modify |
260 |
+ * it under the terms of the GNU General Public License version 2 as |
261 |
+@@ -35,6 +35,7 @@ |
262 |
+ aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \ |
263 |
+ AARCH64_INSN_BRANCH_COMP_##type) |
264 |
+ #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO) |
265 |
++#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO) |
266 |
+ |
267 |
+ /* Conditional branch (immediate) */ |
268 |
+ #define A64_COND_BRANCH(cond, offset) \ |
269 |
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c |
270 |
+index c047598b09e0..6217f80702d2 100644 |
271 |
+--- a/arch/arm64/net/bpf_jit_comp.c |
272 |
++++ b/arch/arm64/net/bpf_jit_comp.c |
273 |
+@@ -1,7 +1,7 @@ |
274 |
+ /* |
275 |
+ * BPF JIT compiler for ARM64 |
276 |
+ * |
277 |
+- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@×××××.com> |
278 |
++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@×××××.com> |
279 |
+ * |
280 |
+ * This program is free software; you can redistribute it and/or modify |
281 |
+ * it under the terms of the GNU General Public License version 2 as |
282 |
+@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) |
283 |
+ u8 jmp_cond; |
284 |
+ s32 jmp_offset; |
285 |
+ |
286 |
++#define check_imm(bits, imm) do { \ |
287 |
++ if ((((imm) > 0) && ((imm) >> (bits))) || \ |
288 |
++ (((imm) < 0) && (~(imm) >> (bits)))) { \ |
289 |
++ pr_info("[%2d] imm=%d(0x%x) out of range\n", \ |
290 |
++ i, imm, imm); \ |
291 |
++ return -EINVAL; \ |
292 |
++ } \ |
293 |
++} while (0) |
294 |
++#define check_imm19(imm) check_imm(19, imm) |
295 |
++#define check_imm26(imm) check_imm(26, imm) |
296 |
++ |
297 |
+ switch (code) { |
298 |
+ /* dst = src */ |
299 |
+ case BPF_ALU | BPF_MOV | BPF_X: |
300 |
+@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) |
301 |
+ break; |
302 |
+ case BPF_ALU | BPF_DIV | BPF_X: |
303 |
+ case BPF_ALU64 | BPF_DIV | BPF_X: |
304 |
+- emit(A64_UDIV(is64, dst, dst, src), ctx); |
305 |
+- break; |
306 |
+ case BPF_ALU | BPF_MOD | BPF_X: |
307 |
+ case BPF_ALU64 | BPF_MOD | BPF_X: |
308 |
+- ctx->tmp_used = 1; |
309 |
+- emit(A64_UDIV(is64, tmp, dst, src), ctx); |
310 |
+- emit(A64_MUL(is64, tmp, tmp, src), ctx); |
311 |
+- emit(A64_SUB(is64, dst, dst, tmp), ctx); |
312 |
++ { |
313 |
++ const u8 r0 = bpf2a64[BPF_REG_0]; |
314 |
++ |
315 |
++ /* if (src == 0) return 0 */ |
316 |
++ jmp_offset = 3; /* skip ahead to else path */ |
317 |
++ check_imm19(jmp_offset); |
318 |
++ emit(A64_CBNZ(is64, src, jmp_offset), ctx); |
319 |
++ emit(A64_MOVZ(1, r0, 0, 0), ctx); |
320 |
++ jmp_offset = epilogue_offset(ctx); |
321 |
++ check_imm26(jmp_offset); |
322 |
++ emit(A64_B(jmp_offset), ctx); |
323 |
++ /* else */ |
324 |
++ switch (BPF_OP(code)) { |
325 |
++ case BPF_DIV: |
326 |
++ emit(A64_UDIV(is64, dst, dst, src), ctx); |
327 |
++ break; |
328 |
++ case BPF_MOD: |
329 |
++ ctx->tmp_used = 1; |
330 |
++ emit(A64_UDIV(is64, tmp, dst, src), ctx); |
331 |
++ emit(A64_MUL(is64, tmp, tmp, src), ctx); |
332 |
++ emit(A64_SUB(is64, dst, dst, tmp), ctx); |
333 |
++ break; |
334 |
++ } |
335 |
+ break; |
336 |
++ } |
337 |
+ case BPF_ALU | BPF_LSH | BPF_X: |
338 |
+ case BPF_ALU64 | BPF_LSH | BPF_X: |
339 |
+ emit(A64_LSLV(is64, dst, dst, src), ctx); |
340 |
+@@ -393,17 +422,6 @@ emit_bswap_uxt: |
341 |
+ emit(A64_ASR(is64, dst, dst, imm), ctx); |
342 |
+ break; |
343 |
+ |
344 |
+-#define check_imm(bits, imm) do { \ |
345 |
+- if ((((imm) > 0) && ((imm) >> (bits))) || \ |
346 |
+- (((imm) < 0) && (~(imm) >> (bits)))) { \ |
347 |
+- pr_info("[%2d] imm=%d(0x%x) out of range\n", \ |
348 |
+- i, imm, imm); \ |
349 |
+- return -EINVAL; \ |
350 |
+- } \ |
351 |
+-} while (0) |
352 |
+-#define check_imm19(imm) check_imm(19, imm) |
353 |
+-#define check_imm26(imm) check_imm(26, imm) |
354 |
+- |
355 |
+ /* JUMP off */ |
356 |
+ case BPF_JMP | BPF_JA: |
357 |
+ jmp_offset = bpf2a64_offset(i + off, i, ctx); |
358 |
+diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c |
359 |
+index e23fdf2a9c80..d6d27d51d131 100644 |
360 |
+--- a/arch/mips/net/bpf_jit.c |
361 |
++++ b/arch/mips/net/bpf_jit.c |
362 |
+@@ -556,19 +556,6 @@ static inline u16 align_sp(unsigned int num) |
363 |
+ return num; |
364 |
+ } |
365 |
+ |
366 |
+-static bool is_load_to_a(u16 inst) |
367 |
+-{ |
368 |
+- switch (inst) { |
369 |
+- case BPF_LD | BPF_W | BPF_LEN: |
370 |
+- case BPF_LD | BPF_W | BPF_ABS: |
371 |
+- case BPF_LD | BPF_H | BPF_ABS: |
372 |
+- case BPF_LD | BPF_B | BPF_ABS: |
373 |
+- return true; |
374 |
+- default: |
375 |
+- return false; |
376 |
+- } |
377 |
+-} |
378 |
+- |
379 |
+ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) |
380 |
+ { |
381 |
+ int i = 0, real_off = 0; |
382 |
+@@ -686,7 +673,6 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx) |
383 |
+ |
384 |
+ static void build_prologue(struct jit_ctx *ctx) |
385 |
+ { |
386 |
+- u16 first_inst = ctx->skf->insns[0].code; |
387 |
+ int sp_off; |
388 |
+ |
389 |
+ /* Calculate the total offset for the stack pointer */ |
390 |
+@@ -700,7 +686,7 @@ static void build_prologue(struct jit_ctx *ctx) |
391 |
+ emit_jit_reg_move(r_X, r_zero, ctx); |
392 |
+ |
393 |
+ /* Do not leak kernel data to userspace */ |
394 |
+- if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) |
395 |
++ if (bpf_needs_clear_a(&ctx->skf->insns[0])) |
396 |
+ emit_jit_reg_move(r_A, r_zero, ctx); |
397 |
+ } |
398 |
+ |
399 |
+diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig |
400 |
+index 4434b54e1d87..78ae5552fdb8 100644 |
401 |
+--- a/arch/mn10300/Kconfig |
402 |
++++ b/arch/mn10300/Kconfig |
403 |
+@@ -1,6 +1,7 @@ |
404 |
+ config MN10300 |
405 |
+ def_bool y |
406 |
+ select HAVE_OPROFILE |
407 |
++ select HAVE_UID16 |
408 |
+ select GENERIC_IRQ_SHOW |
409 |
+ select ARCH_WANT_IPC_PARSE_VERSION |
410 |
+ select HAVE_ARCH_TRACEHOOK |
411 |
+@@ -37,9 +38,6 @@ config HIGHMEM |
412 |
+ config NUMA |
413 |
+ def_bool n |
414 |
+ |
415 |
+-config UID16 |
416 |
+- def_bool y |
417 |
+- |
418 |
+ config RWSEM_GENERIC_SPINLOCK |
419 |
+ def_bool y |
420 |
+ |
421 |
+diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h |
422 |
+index d463c68fe7f0..99897f6645c1 100644 |
423 |
+--- a/arch/powerpc/include/asm/cmpxchg.h |
424 |
++++ b/arch/powerpc/include/asm/cmpxchg.h |
425 |
+@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val) |
426 |
+ unsigned long prev; |
427 |
+ |
428 |
+ __asm__ __volatile__( |
429 |
+- PPC_RELEASE_BARRIER |
430 |
++ PPC_ATOMIC_ENTRY_BARRIER |
431 |
+ "1: lwarx %0,0,%2 \n" |
432 |
+ PPC405_ERR77(0,%2) |
433 |
+ " stwcx. %3,0,%2 \n\ |
434 |
+ bne- 1b" |
435 |
+- PPC_ACQUIRE_BARRIER |
436 |
++ PPC_ATOMIC_EXIT_BARRIER |
437 |
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
438 |
+ : "r" (p), "r" (val) |
439 |
+ : "cc", "memory"); |
440 |
+@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val) |
441 |
+ unsigned long prev; |
442 |
+ |
443 |
+ __asm__ __volatile__( |
444 |
+- PPC_RELEASE_BARRIER |
445 |
++ PPC_ATOMIC_ENTRY_BARRIER |
446 |
+ "1: ldarx %0,0,%2 \n" |
447 |
+ PPC405_ERR77(0,%2) |
448 |
+ " stdcx. %3,0,%2 \n\ |
449 |
+ bne- 1b" |
450 |
+- PPC_ACQUIRE_BARRIER |
451 |
++ PPC_ATOMIC_EXIT_BARRIER |
452 |
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
453 |
+ : "r" (p), "r" (val) |
454 |
+ : "cc", "memory"); |
455 |
+@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) |
456 |
+ unsigned int prev; |
457 |
+ |
458 |
+ __asm__ __volatile__ ( |
459 |
+- PPC_RELEASE_BARRIER |
460 |
++ PPC_ATOMIC_ENTRY_BARRIER |
461 |
+ "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
462 |
+ cmpw 0,%0,%3\n\ |
463 |
+ bne- 2f\n" |
464 |
+ PPC405_ERR77(0,%2) |
465 |
+ " stwcx. %4,0,%2\n\ |
466 |
+ bne- 1b" |
467 |
+- PPC_ACQUIRE_BARRIER |
468 |
++ PPC_ATOMIC_EXIT_BARRIER |
469 |
+ "\n\ |
470 |
+ 2:" |
471 |
+ : "=&r" (prev), "+m" (*p) |
472 |
+@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) |
473 |
+ unsigned long prev; |
474 |
+ |
475 |
+ __asm__ __volatile__ ( |
476 |
+- PPC_RELEASE_BARRIER |
477 |
++ PPC_ATOMIC_ENTRY_BARRIER |
478 |
+ "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
479 |
+ cmpd 0,%0,%3\n\ |
480 |
+ bne- 2f\n\ |
481 |
+ stdcx. %4,0,%2\n\ |
482 |
+ bne- 1b" |
483 |
+- PPC_ACQUIRE_BARRIER |
484 |
++ PPC_ATOMIC_EXIT_BARRIER |
485 |
+ "\n\ |
486 |
+ 2:" |
487 |
+ : "=&r" (prev), "+m" (*p) |
488 |
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h |
489 |
+index af56b5c6c81a..f4f99f01b746 100644 |
490 |
+--- a/arch/powerpc/include/asm/reg.h |
491 |
++++ b/arch/powerpc/include/asm/reg.h |
492 |
+@@ -108,6 +108,7 @@ |
493 |
+ #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ |
494 |
+ #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ |
495 |
+ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ |
496 |
++#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */ |
497 |
+ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) |
498 |
+ #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) |
499 |
+ |
500 |
+diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h |
501 |
+index e682a7143edb..c50868681f9e 100644 |
502 |
+--- a/arch/powerpc/include/asm/synch.h |
503 |
++++ b/arch/powerpc/include/asm/synch.h |
504 |
+@@ -44,7 +44,7 @@ static inline void isync(void) |
505 |
+ MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); |
506 |
+ #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) |
507 |
+ #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" |
508 |
+-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" |
509 |
++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n" |
510 |
+ #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" |
511 |
+ #else |
512 |
+ #define PPC_ACQUIRE_BARRIER |
513 |
+diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h |
514 |
+index 59dad113897b..c2d21d11c2d2 100644 |
515 |
+--- a/arch/powerpc/include/uapi/asm/elf.h |
516 |
++++ b/arch/powerpc/include/uapi/asm/elf.h |
517 |
+@@ -295,6 +295,8 @@ do { \ |
518 |
+ #define R_PPC64_TLSLD 108 |
519 |
+ #define R_PPC64_TOCSAVE 109 |
520 |
+ |
521 |
++#define R_PPC64_ENTRY 118 |
522 |
++ |
523 |
+ #define R_PPC64_REL16 249 |
524 |
+ #define R_PPC64_REL16_LO 250 |
525 |
+ #define R_PPC64_REL16_HI 251 |
526 |
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c |
527 |
+index 68384514506b..59663af9315f 100644 |
528 |
+--- a/arch/powerpc/kernel/module_64.c |
529 |
++++ b/arch/powerpc/kernel/module_64.c |
530 |
+@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, |
531 |
+ */ |
532 |
+ break; |
533 |
+ |
534 |
++ case R_PPC64_ENTRY: |
535 |
++ /* |
536 |
++ * Optimize ELFv2 large code model entry point if |
537 |
++ * the TOC is within 2GB range of current location. |
538 |
++ */ |
539 |
++ value = my_r2(sechdrs, me) - (unsigned long)location; |
540 |
++ if (value + 0x80008000 > 0xffffffff) |
541 |
++ break; |
542 |
++ /* |
543 |
++ * Check for the large code model prolog sequence: |
544 |
++ * ld r2, ...(r12) |
545 |
++ * add r2, r2, r12 |
546 |
++ */ |
547 |
++ if ((((uint32_t *)location)[0] & ~0xfffc) |
548 |
++ != 0xe84c0000) |
549 |
++ break; |
550 |
++ if (((uint32_t *)location)[1] != 0x7c426214) |
551 |
++ break; |
552 |
++ /* |
553 |
++ * If found, replace it with: |
554 |
++ * addis r2, r12, (.TOC.-func)@ha |
555 |
++ * addi r2, r12, (.TOC.-func)@l |
556 |
++ */ |
557 |
++ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); |
558 |
++ ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); |
559 |
++ break; |
560 |
++ |
561 |
+ case R_PPC64_REL16_HA: |
562 |
+ /* Subtract location pointer */ |
563 |
+ value -= (unsigned long)location; |
564 |
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
565 |
+index 0596373cd1c3..c8c8275765e7 100644 |
566 |
+--- a/arch/powerpc/kernel/process.c |
567 |
++++ b/arch/powerpc/kernel/process.c |
568 |
+@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr, |
569 |
+ msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; |
570 |
+ } |
571 |
+ |
572 |
++ /* |
573 |
++ * Use the current MSR TM suspended bit to track if we have |
574 |
++ * checkpointed state outstanding. |
575 |
++ * On signal delivery, we'd normally reclaim the checkpointed |
576 |
++ * state to obtain stack pointer (see:get_tm_stackpointer()). |
577 |
++ * This will then directly return to userspace without going |
578 |
++ * through __switch_to(). However, if the stack frame is bad, |
579 |
++ * we need to exit this thread which calls __switch_to() which |
580 |
++ * will again attempt to reclaim the already saved tm state. |
581 |
++ * Hence we need to check that we've not already reclaimed |
582 |
++ * this state. |
583 |
++ * We do this using the current MSR, rather tracking it in |
584 |
++ * some specific thread_struct bit, as it has the additional |
585 |
++ * benifit of checking for a potential TM bad thing exception. |
586 |
++ */ |
587 |
++ if (!MSR_TM_SUSPENDED(mfmsr())) |
588 |
++ return; |
589 |
++ |
590 |
+ tm_reclaim(thr, thr->regs->msr, cause); |
591 |
+ |
592 |
+ /* Having done the reclaim, we now have the checkpointed |
593 |
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c |
594 |
+index da50e0c9c57e..7356c33dc897 100644 |
595 |
+--- a/arch/powerpc/kernel/signal_32.c |
596 |
++++ b/arch/powerpc/kernel/signal_32.c |
597 |
+@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs, |
598 |
+ return 1; |
599 |
+ #endif /* CONFIG_SPE */ |
600 |
+ |
601 |
++ /* Get the top half of the MSR from the user context */ |
602 |
++ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) |
603 |
++ return 1; |
604 |
++ msr_hi <<= 32; |
605 |
++ /* If TM bits are set to the reserved value, it's an invalid context */ |
606 |
++ if (MSR_TM_RESV(msr_hi)) |
607 |
++ return 1; |
608 |
++ /* Pull in the MSR TM bits from the user context */ |
609 |
++ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); |
610 |
+ /* Now, recheckpoint. This loads up all of the checkpointed (older) |
611 |
+ * registers, including FP and V[S]Rs. After recheckpointing, the |
612 |
+ * transactional versions should be loaded. |
613 |
+@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, |
614 |
+ current->thread.tm_texasr |= TEXASR_FS; |
615 |
+ /* This loads the checkpointed FP/VEC state, if used */ |
616 |
+ tm_recheckpoint(¤t->thread, msr); |
617 |
+- /* Get the top half of the MSR */ |
618 |
+- if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) |
619 |
+- return 1; |
620 |
+- /* Pull in MSR TM from user context */ |
621 |
+- regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK); |
622 |
+ |
623 |
+ /* This loads the speculative FP/VEC state, if used */ |
624 |
+ if (msr & MSR_FP) { |
625 |
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c |
626 |
+index c7c24d2e2bdb..164fd6474843 100644 |
627 |
+--- a/arch/powerpc/kernel/signal_64.c |
628 |
++++ b/arch/powerpc/kernel/signal_64.c |
629 |
+@@ -427,6 +427,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, |
630 |
+ |
631 |
+ /* get MSR separately, transfer the LE bit if doing signal return */ |
632 |
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); |
633 |
++ /* Don't allow reserved mode. */ |
634 |
++ if (MSR_TM_RESV(msr)) |
635 |
++ return -EINVAL; |
636 |
++ |
637 |
+ /* pull in MSR TM from user context */ |
638 |
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); |
639 |
+ |
640 |
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
641 |
+index f1e0e5522e3a..f5b3de7f7fa2 100644 |
642 |
+--- a/arch/powerpc/kvm/book3s_hv.c |
643 |
++++ b/arch/powerpc/kvm/book3s_hv.c |
644 |
+@@ -210,6 +210,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
645 |
+ |
646 |
+ static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
647 |
+ { |
648 |
++ /* |
649 |
++ * Check for illegal transactional state bit combination |
650 |
++ * and if we find it, force the TS field to a safe state. |
651 |
++ */ |
652 |
++ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) |
653 |
++ msr &= ~MSR_TS_MASK; |
654 |
+ vcpu->arch.shregs.msr = msr; |
655 |
+ kvmppc_end_cede(vcpu); |
656 |
+ } |
657 |
+diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c |
658 |
+index 17cea18a09d3..264c473c1b3c 100644 |
659 |
+--- a/arch/powerpc/net/bpf_jit_comp.c |
660 |
++++ b/arch/powerpc/net/bpf_jit_comp.c |
661 |
+@@ -78,18 +78,9 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, |
662 |
+ PPC_LI(r_X, 0); |
663 |
+ } |
664 |
+ |
665 |
+- switch (filter[0].code) { |
666 |
+- case BPF_RET | BPF_K: |
667 |
+- case BPF_LD | BPF_W | BPF_LEN: |
668 |
+- case BPF_LD | BPF_W | BPF_ABS: |
669 |
+- case BPF_LD | BPF_H | BPF_ABS: |
670 |
+- case BPF_LD | BPF_B | BPF_ABS: |
671 |
+- /* first instruction sets A register (or is RET 'constant') */ |
672 |
+- break; |
673 |
+- default: |
674 |
+- /* make sure we dont leak kernel information to user */ |
675 |
++ /* make sure we dont leak kernel information to user */ |
676 |
++ if (bpf_needs_clear_a(&filter[0])) |
677 |
+ PPC_LI(r_A, 0); |
678 |
+- } |
679 |
+ } |
680 |
+ |
681 |
+ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) |
682 |
+diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c |
683 |
+index 2241565b0739..b831a2ee32e9 100644 |
684 |
+--- a/arch/powerpc/platforms/powernv/opal.c |
685 |
++++ b/arch/powerpc/platforms/powernv/opal.c |
686 |
+@@ -358,7 +358,7 @@ static void opal_handle_message(void) |
687 |
+ |
688 |
+ /* Sanity check */ |
689 |
+ if (type >= OPAL_MSG_TYPE_MAX) { |
690 |
+- pr_warning("%s: Unknown message type: %u\n", __func__, type); |
691 |
++ pr_warn_once("%s: Unknown message type: %u\n", __func__, type); |
692 |
+ return; |
693 |
+ } |
694 |
+ opal_message_do_notify(type, (void *)&msg); |
695 |
+diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c |
696 |
+index 7931eeeb649a..8109e92cd619 100644 |
697 |
+--- a/arch/sparc/net/bpf_jit_comp.c |
698 |
++++ b/arch/sparc/net/bpf_jit_comp.c |
699 |
+@@ -420,22 +420,9 @@ void bpf_jit_compile(struct bpf_prog *fp) |
700 |
+ } |
701 |
+ emit_reg_move(O7, r_saved_O7); |
702 |
+ |
703 |
+- switch (filter[0].code) { |
704 |
+- case BPF_RET | BPF_K: |
705 |
+- case BPF_LD | BPF_W | BPF_LEN: |
706 |
+- case BPF_LD | BPF_W | BPF_ABS: |
707 |
+- case BPF_LD | BPF_H | BPF_ABS: |
708 |
+- case BPF_LD | BPF_B | BPF_ABS: |
709 |
+- /* The first instruction sets the A register (or is |
710 |
+- * a "RET 'constant'") |
711 |
+- */ |
712 |
+- break; |
713 |
+- default: |
714 |
+- /* Make sure we dont leak kernel information to the |
715 |
+- * user. |
716 |
+- */ |
717 |
++ /* Make sure we dont leak kernel information to the user. */ |
718 |
++ if (bpf_needs_clear_a(&filter[0])) |
719 |
+ emit_clear(r_A); /* A = 0 */ |
720 |
+- } |
721 |
+ |
722 |
+ for (i = 0; i < flen; i++) { |
723 |
+ unsigned int K = filter[i].k; |
724 |
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h |
725 |
+index 4fa687a47a62..6b8d6e8cd449 100644 |
726 |
+--- a/arch/x86/include/asm/boot.h |
727 |
++++ b/arch/x86/include/asm/boot.h |
728 |
+@@ -27,7 +27,7 @@ |
729 |
+ #define BOOT_HEAP_SIZE 0x400000 |
730 |
+ #else /* !CONFIG_KERNEL_BZIP2 */ |
731 |
+ |
732 |
+-#define BOOT_HEAP_SIZE 0x8000 |
733 |
++#define BOOT_HEAP_SIZE 0x10000 |
734 |
+ |
735 |
+ #endif /* !CONFIG_KERNEL_BZIP2 */ |
736 |
+ |
737 |
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h |
738 |
+index 80d67dd80351..73e38f14ddeb 100644 |
739 |
+--- a/arch/x86/include/asm/mmu_context.h |
740 |
++++ b/arch/x86/include/asm/mmu_context.h |
741 |
+@@ -104,8 +104,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
742 |
+ #endif |
743 |
+ cpumask_set_cpu(cpu, mm_cpumask(next)); |
744 |
+ |
745 |
+- /* Re-load page tables */ |
746 |
++ /* |
747 |
++ * Re-load page tables. |
748 |
++ * |
749 |
++ * This logic has an ordering constraint: |
750 |
++ * |
751 |
++ * CPU 0: Write to a PTE for 'next' |
752 |
++ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. |
753 |
++ * CPU 1: set bit 1 in next's mm_cpumask |
754 |
++ * CPU 1: load from the PTE that CPU 0 writes (implicit) |
755 |
++ * |
756 |
++ * We need to prevent an outcome in which CPU 1 observes |
757 |
++ * the new PTE value and CPU 0 observes bit 1 clear in |
758 |
++ * mm_cpumask. (If that occurs, then the IPI will never |
759 |
++ * be sent, and CPU 0's TLB will contain a stale entry.) |
760 |
++ * |
761 |
++ * The bad outcome can occur if either CPU's load is |
762 |
++ * reordered before that CPU's store, so both CPUs must |
763 |
++ * execute full barriers to prevent this from happening. |
764 |
++ * |
765 |
++ * Thus, switch_mm needs a full barrier between the |
766 |
++ * store to mm_cpumask and any operation that could load |
767 |
++ * from next->pgd. TLB fills are special and can happen |
768 |
++ * due to instruction fetches or for no reason at all, |
769 |
++ * and neither LOCK nor MFENCE orders them. |
770 |
++ * Fortunately, load_cr3() is serializing and gives the |
771 |
++ * ordering guarantee we need. |
772 |
++ * |
773 |
++ */ |
774 |
+ load_cr3(next->pgd); |
775 |
++ |
776 |
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
777 |
+ |
778 |
+ /* Stop flush ipis for the previous mm */ |
779 |
+@@ -142,10 +170,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
780 |
+ * schedule, protecting us from simultaneous changes. |
781 |
+ */ |
782 |
+ cpumask_set_cpu(cpu, mm_cpumask(next)); |
783 |
++ |
784 |
+ /* |
785 |
+ * We were in lazy tlb mode and leave_mm disabled |
786 |
+ * tlb flush IPI delivery. We must reload CR3 |
787 |
+ * to make sure to use no freed page tables. |
788 |
++ * |
789 |
++ * As above, load_cr3() is serializing and orders TLB |
790 |
++ * fills with respect to the mm_cpumask write. |
791 |
+ */ |
792 |
+ load_cr3(next->pgd); |
793 |
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
794 |
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c |
795 |
+index 86db4bcd7ce5..0549ae3cb332 100644 |
796 |
+--- a/arch/x86/kernel/reboot.c |
797 |
++++ b/arch/x86/kernel/reboot.c |
798 |
+@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { |
799 |
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), |
800 |
+ }, |
801 |
+ }, |
802 |
++ { /* Handle problems with rebooting on the iMac10,1. */ |
803 |
++ .callback = set_pci_reboot, |
804 |
++ .ident = "Apple iMac10,1", |
805 |
++ .matches = { |
806 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), |
807 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"), |
808 |
++ }, |
809 |
++ }, |
810 |
+ |
811 |
+ /* ASRock */ |
812 |
+ { /* Handle problems with rebooting on ASRock Q1900DC-ITX */ |
813 |
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c |
814 |
+index e0fd5f47fbb9..5d2e2e9af1c4 100644 |
815 |
+--- a/arch/x86/kernel/signal.c |
816 |
++++ b/arch/x86/kernel/signal.c |
817 |
+@@ -667,12 +667,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
818 |
+ signal_setup_done(failed, ksig, stepping); |
819 |
+ } |
820 |
+ |
821 |
+-#ifdef CONFIG_X86_32 |
822 |
+-#define NR_restart_syscall __NR_restart_syscall |
823 |
+-#else /* !CONFIG_X86_32 */ |
824 |
+-#define NR_restart_syscall \ |
825 |
+- test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall |
826 |
+-#endif /* CONFIG_X86_32 */ |
827 |
++static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) |
828 |
++{ |
829 |
++#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64) |
830 |
++ return __NR_restart_syscall; |
831 |
++#else /* !CONFIG_X86_32 && CONFIG_X86_64 */ |
832 |
++ return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : |
833 |
++ __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); |
834 |
++#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */ |
835 |
++} |
836 |
+ |
837 |
+ /* |
838 |
+ * Note that 'init' is a special process: it doesn't get signals it doesn't |
839 |
+@@ -701,7 +704,7 @@ static void do_signal(struct pt_regs *regs) |
840 |
+ break; |
841 |
+ |
842 |
+ case -ERESTART_RESTARTBLOCK: |
843 |
+- regs->ax = NR_restart_syscall; |
844 |
++ regs->ax = get_nr_restart_syscall(regs); |
845 |
+ regs->ip -= 2; |
846 |
+ break; |
847 |
+ } |
848 |
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
849 |
+index 454ccb082e18..0d039cd268a8 100644 |
850 |
+--- a/arch/x86/kvm/svm.c |
851 |
++++ b/arch/x86/kvm/svm.c |
852 |
+@@ -1106,6 +1106,7 @@ static void init_vmcb(struct vcpu_svm *svm) |
853 |
+ set_exception_intercept(svm, UD_VECTOR); |
854 |
+ set_exception_intercept(svm, MC_VECTOR); |
855 |
+ set_exception_intercept(svm, AC_VECTOR); |
856 |
++ set_exception_intercept(svm, DB_VECTOR); |
857 |
+ |
858 |
+ set_intercept(svm, INTERCEPT_INTR); |
859 |
+ set_intercept(svm, INTERCEPT_NMI); |
860 |
+@@ -1638,20 +1639,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, |
861 |
+ mark_dirty(svm->vmcb, VMCB_SEG); |
862 |
+ } |
863 |
+ |
864 |
+-static void update_db_bp_intercept(struct kvm_vcpu *vcpu) |
865 |
++static void update_bp_intercept(struct kvm_vcpu *vcpu) |
866 |
+ { |
867 |
+ struct vcpu_svm *svm = to_svm(vcpu); |
868 |
+ |
869 |
+- clr_exception_intercept(svm, DB_VECTOR); |
870 |
+ clr_exception_intercept(svm, BP_VECTOR); |
871 |
+ |
872 |
+- if (svm->nmi_singlestep) |
873 |
+- set_exception_intercept(svm, DB_VECTOR); |
874 |
+- |
875 |
+ if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
876 |
+- if (vcpu->guest_debug & |
877 |
+- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) |
878 |
+- set_exception_intercept(svm, DB_VECTOR); |
879 |
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
880 |
+ set_exception_intercept(svm, BP_VECTOR); |
881 |
+ } else |
882 |
+@@ -1757,7 +1751,6 @@ static int db_interception(struct vcpu_svm *svm) |
883 |
+ if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) |
884 |
+ svm->vmcb->save.rflags &= |
885 |
+ ~(X86_EFLAGS_TF | X86_EFLAGS_RF); |
886 |
+- update_db_bp_intercept(&svm->vcpu); |
887 |
+ } |
888 |
+ |
889 |
+ if (svm->vcpu.guest_debug & |
890 |
+@@ -3751,7 +3744,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) |
891 |
+ */ |
892 |
+ svm->nmi_singlestep = true; |
893 |
+ svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
894 |
+- update_db_bp_intercept(vcpu); |
895 |
+ } |
896 |
+ |
897 |
+ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
898 |
+@@ -4367,7 +4359,7 @@ static struct kvm_x86_ops svm_x86_ops = { |
899 |
+ .vcpu_load = svm_vcpu_load, |
900 |
+ .vcpu_put = svm_vcpu_put, |
901 |
+ |
902 |
+- .update_db_bp_intercept = update_db_bp_intercept, |
903 |
++ .update_db_bp_intercept = update_bp_intercept, |
904 |
+ .get_msr = svm_get_msr, |
905 |
+ .set_msr = svm_set_msr, |
906 |
+ .get_segment_base = svm_get_segment_base, |
907 |
+diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h |
908 |
+index 7c7bc8bef21f..21dda139eb3a 100644 |
909 |
+--- a/arch/x86/kvm/trace.h |
910 |
++++ b/arch/x86/kvm/trace.h |
911 |
+@@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq, |
912 |
+ #define kvm_trace_sym_exc \ |
913 |
+ EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ |
914 |
+ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ |
915 |
+- EXS(MF), EXS(MC) |
916 |
++ EXS(MF), EXS(AC), EXS(MC) |
917 |
+ |
918 |
+ /* |
919 |
+ * Tracepoint for kvm interrupt injection: |
920 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
921 |
+index a243854c35d5..945f9e13f1aa 100644 |
922 |
+--- a/arch/x86/kvm/vmx.c |
923 |
++++ b/arch/x86/kvm/vmx.c |
924 |
+@@ -3652,20 +3652,21 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
925 |
+ if (!is_paging(vcpu)) { |
926 |
+ hw_cr4 &= ~X86_CR4_PAE; |
927 |
+ hw_cr4 |= X86_CR4_PSE; |
928 |
+- /* |
929 |
+- * SMEP/SMAP is disabled if CPU is in non-paging mode |
930 |
+- * in hardware. However KVM always uses paging mode to |
931 |
+- * emulate guest non-paging mode with TDP. |
932 |
+- * To emulate this behavior, SMEP/SMAP needs to be |
933 |
+- * manually disabled when guest switches to non-paging |
934 |
+- * mode. |
935 |
+- */ |
936 |
+- hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP); |
937 |
+ } else if (!(cr4 & X86_CR4_PAE)) { |
938 |
+ hw_cr4 &= ~X86_CR4_PAE; |
939 |
+ } |
940 |
+ } |
941 |
+ |
942 |
++ if (!enable_unrestricted_guest && !is_paging(vcpu)) |
943 |
++ /* |
944 |
++ * SMEP/SMAP is disabled if CPU is in non-paging mode in |
945 |
++ * hardware. However KVM always uses paging mode without |
946 |
++ * unrestricted guest. |
947 |
++ * To emulate this behavior, SMEP/SMAP needs to be manually |
948 |
++ * disabled when guest switches to non-paging mode. |
949 |
++ */ |
950 |
++ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP); |
951 |
++ |
952 |
+ vmcs_writel(CR4_READ_SHADOW, cr4); |
953 |
+ vmcs_writel(GUEST_CR4, hw_cr4); |
954 |
+ return 0; |
955 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
956 |
+index 47a32f743a91..fed4c84eac44 100644 |
957 |
+--- a/arch/x86/kvm/x86.c |
958 |
++++ b/arch/x86/kvm/x86.c |
959 |
+@@ -940,7 +940,7 @@ static u32 msrs_to_save[] = { |
960 |
+ MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, |
961 |
+ #endif |
962 |
+ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, |
963 |
+- MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS |
964 |
++ MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, |
965 |
+ }; |
966 |
+ |
967 |
+ static unsigned num_msrs_to_save; |
968 |
+@@ -4117,16 +4117,17 @@ static void kvm_init_msr_list(void) |
969 |
+ |
970 |
+ /* |
971 |
+ * Even MSRs that are valid in the host may not be exposed |
972 |
+- * to the guests in some cases. We could work around this |
973 |
+- * in VMX with the generic MSR save/load machinery, but it |
974 |
+- * is not really worthwhile since it will really only |
975 |
+- * happen with nested virtualization. |
976 |
++ * to the guests in some cases. |
977 |
+ */ |
978 |
+ switch (msrs_to_save[i]) { |
979 |
+ case MSR_IA32_BNDCFGS: |
980 |
+ if (!kvm_x86_ops->mpx_supported()) |
981 |
+ continue; |
982 |
+ break; |
983 |
++ case MSR_TSC_AUX: |
984 |
++ if (!kvm_x86_ops->rdtscp_supported()) |
985 |
++ continue; |
986 |
++ break; |
987 |
+ default: |
988 |
+ break; |
989 |
+ } |
990 |
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c |
991 |
+index 4d1c11c07fe1..f738c61bc891 100644 |
992 |
+--- a/arch/x86/mm/mpx.c |
993 |
++++ b/arch/x86/mm/mpx.c |
994 |
+@@ -120,19 +120,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, |
995 |
+ switch (type) { |
996 |
+ case REG_TYPE_RM: |
997 |
+ regno = X86_MODRM_RM(insn->modrm.value); |
998 |
+- if (X86_REX_B(insn->rex_prefix.value) == 1) |
999 |
++ if (X86_REX_B(insn->rex_prefix.value)) |
1000 |
+ regno += 8; |
1001 |
+ break; |
1002 |
+ |
1003 |
+ case REG_TYPE_INDEX: |
1004 |
+ regno = X86_SIB_INDEX(insn->sib.value); |
1005 |
+- if (X86_REX_X(insn->rex_prefix.value) == 1) |
1006 |
++ if (X86_REX_X(insn->rex_prefix.value)) |
1007 |
+ regno += 8; |
1008 |
+ break; |
1009 |
+ |
1010 |
+ case REG_TYPE_BASE: |
1011 |
+ regno = X86_SIB_BASE(insn->sib.value); |
1012 |
+- if (X86_REX_B(insn->rex_prefix.value) == 1) |
1013 |
++ if (X86_REX_B(insn->rex_prefix.value)) |
1014 |
+ regno += 8; |
1015 |
+ break; |
1016 |
+ |
1017 |
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c |
1018 |
+index 90b924acd982..061e0114005e 100644 |
1019 |
+--- a/arch/x86/mm/tlb.c |
1020 |
++++ b/arch/x86/mm/tlb.c |
1021 |
+@@ -160,7 +160,10 @@ void flush_tlb_current_task(void) |
1022 |
+ preempt_disable(); |
1023 |
+ |
1024 |
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
1025 |
++ |
1026 |
++ /* This is an implicit full barrier that synchronizes with switch_mm. */ |
1027 |
+ local_flush_tlb(); |
1028 |
++ |
1029 |
+ trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
1030 |
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
1031 |
+ flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
1032 |
+@@ -187,17 +190,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
1033 |
+ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; |
1034 |
+ |
1035 |
+ preempt_disable(); |
1036 |
+- if (current->active_mm != mm) |
1037 |
++ if (current->active_mm != mm) { |
1038 |
++ /* Synchronize with switch_mm. */ |
1039 |
++ smp_mb(); |
1040 |
++ |
1041 |
+ goto out; |
1042 |
++ } |
1043 |
+ |
1044 |
+ if (!current->mm) { |
1045 |
+ leave_mm(smp_processor_id()); |
1046 |
++ |
1047 |
++ /* Synchronize with switch_mm. */ |
1048 |
++ smp_mb(); |
1049 |
++ |
1050 |
+ goto out; |
1051 |
+ } |
1052 |
+ |
1053 |
+ if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) |
1054 |
+ base_pages_to_flush = (end - start) >> PAGE_SHIFT; |
1055 |
+ |
1056 |
++ /* |
1057 |
++ * Both branches below are implicit full barriers (MOV to CR or |
1058 |
++ * INVLPG) that synchronize with switch_mm. |
1059 |
++ */ |
1060 |
+ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { |
1061 |
+ base_pages_to_flush = TLB_FLUSH_ALL; |
1062 |
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
1063 |
+@@ -227,10 +242,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) |
1064 |
+ preempt_disable(); |
1065 |
+ |
1066 |
+ if (current->active_mm == mm) { |
1067 |
+- if (current->mm) |
1068 |
++ if (current->mm) { |
1069 |
++ /* |
1070 |
++ * Implicit full barrier (INVLPG) that synchronizes |
1071 |
++ * with switch_mm. |
1072 |
++ */ |
1073 |
+ __flush_tlb_one(start); |
1074 |
+- else |
1075 |
++ } else { |
1076 |
+ leave_mm(smp_processor_id()); |
1077 |
++ |
1078 |
++ /* Synchronize with switch_mm. */ |
1079 |
++ smp_mb(); |
1080 |
++ } |
1081 |
+ } |
1082 |
+ |
1083 |
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
1084 |
+diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c |
1085 |
+index 53b4c0811f4f..6d3415144dab 100644 |
1086 |
+--- a/arch/x86/xen/suspend.c |
1087 |
++++ b/arch/x86/xen/suspend.c |
1088 |
+@@ -32,7 +32,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled) |
1089 |
+ { |
1090 |
+ #ifdef CONFIG_XEN_PVHVM |
1091 |
+ int cpu; |
1092 |
+- xen_hvm_init_shared_info(); |
1093 |
++ if (!suspend_cancelled) |
1094 |
++ xen_hvm_init_shared_info(); |
1095 |
+ xen_callback_vector(); |
1096 |
+ xen_unplug_emulated_devices(); |
1097 |
+ if (xen_feature(XENFEAT_hvm_safe_pvclock)) { |
1098 |
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c |
1099 |
+index 8a45e92ff60c..05222706dc66 100644 |
1100 |
+--- a/drivers/char/ipmi/ipmi_si_intf.c |
1101 |
++++ b/drivers/char/ipmi/ipmi_si_intf.c |
1102 |
+@@ -404,18 +404,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) |
1103 |
+ return rv; |
1104 |
+ } |
1105 |
+ |
1106 |
+-static void start_check_enables(struct smi_info *smi_info) |
1107 |
++static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
1108 |
++{ |
1109 |
++ smi_info->last_timeout_jiffies = jiffies; |
1110 |
++ mod_timer(&smi_info->si_timer, new_val); |
1111 |
++ smi_info->timer_running = true; |
1112 |
++} |
1113 |
++ |
1114 |
++/* |
1115 |
++ * Start a new message and (re)start the timer and thread. |
1116 |
++ */ |
1117 |
++static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, |
1118 |
++ unsigned int size) |
1119 |
++{ |
1120 |
++ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); |
1121 |
++ |
1122 |
++ if (smi_info->thread) |
1123 |
++ wake_up_process(smi_info->thread); |
1124 |
++ |
1125 |
++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); |
1126 |
++} |
1127 |
++ |
1128 |
++static void start_check_enables(struct smi_info *smi_info, bool start_timer) |
1129 |
+ { |
1130 |
+ unsigned char msg[2]; |
1131 |
+ |
1132 |
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
1133 |
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
1134 |
+ |
1135 |
+- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
1136 |
++ if (start_timer) |
1137 |
++ start_new_msg(smi_info, msg, 2); |
1138 |
++ else |
1139 |
++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
1140 |
+ smi_info->si_state = SI_CHECKING_ENABLES; |
1141 |
+ } |
1142 |
+ |
1143 |
+-static void start_clear_flags(struct smi_info *smi_info) |
1144 |
++static void start_clear_flags(struct smi_info *smi_info, bool start_timer) |
1145 |
+ { |
1146 |
+ unsigned char msg[3]; |
1147 |
+ |
1148 |
+@@ -424,7 +448,10 @@ static void start_clear_flags(struct smi_info *smi_info) |
1149 |
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; |
1150 |
+ msg[2] = WDT_PRE_TIMEOUT_INT; |
1151 |
+ |
1152 |
+- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); |
1153 |
++ if (start_timer) |
1154 |
++ start_new_msg(smi_info, msg, 3); |
1155 |
++ else |
1156 |
++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); |
1157 |
+ smi_info->si_state = SI_CLEARING_FLAGS; |
1158 |
+ } |
1159 |
+ |
1160 |
+@@ -434,10 +461,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info) |
1161 |
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; |
1162 |
+ smi_info->curr_msg->data_size = 2; |
1163 |
+ |
1164 |
+- smi_info->handlers->start_transaction( |
1165 |
+- smi_info->si_sm, |
1166 |
+- smi_info->curr_msg->data, |
1167 |
+- smi_info->curr_msg->data_size); |
1168 |
++ start_new_msg(smi_info, smi_info->curr_msg->data, |
1169 |
++ smi_info->curr_msg->data_size); |
1170 |
+ smi_info->si_state = SI_GETTING_MESSAGES; |
1171 |
+ } |
1172 |
+ |
1173 |
+@@ -447,20 +472,11 @@ static void start_getting_events(struct smi_info *smi_info) |
1174 |
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; |
1175 |
+ smi_info->curr_msg->data_size = 2; |
1176 |
+ |
1177 |
+- smi_info->handlers->start_transaction( |
1178 |
+- smi_info->si_sm, |
1179 |
+- smi_info->curr_msg->data, |
1180 |
+- smi_info->curr_msg->data_size); |
1181 |
++ start_new_msg(smi_info, smi_info->curr_msg->data, |
1182 |
++ smi_info->curr_msg->data_size); |
1183 |
+ smi_info->si_state = SI_GETTING_EVENTS; |
1184 |
+ } |
1185 |
+ |
1186 |
+-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
1187 |
+-{ |
1188 |
+- smi_info->last_timeout_jiffies = jiffies; |
1189 |
+- mod_timer(&smi_info->si_timer, new_val); |
1190 |
+- smi_info->timer_running = true; |
1191 |
+-} |
1192 |
+- |
1193 |
+ /* |
1194 |
+ * When we have a situtaion where we run out of memory and cannot |
1195 |
+ * allocate messages, we just leave them in the BMC and run the system |
1196 |
+@@ -470,11 +486,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
1197 |
+ * Note that we cannot just use disable_irq(), since the interrupt may |
1198 |
+ * be shared. |
1199 |
+ */ |
1200 |
+-static inline bool disable_si_irq(struct smi_info *smi_info) |
1201 |
++static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) |
1202 |
+ { |
1203 |
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
1204 |
+ smi_info->interrupt_disabled = true; |
1205 |
+- start_check_enables(smi_info); |
1206 |
++ start_check_enables(smi_info, start_timer); |
1207 |
+ return true; |
1208 |
+ } |
1209 |
+ return false; |
1210 |
+@@ -484,7 +500,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) |
1211 |
+ { |
1212 |
+ if ((smi_info->irq) && (smi_info->interrupt_disabled)) { |
1213 |
+ smi_info->interrupt_disabled = false; |
1214 |
+- start_check_enables(smi_info); |
1215 |
++ start_check_enables(smi_info, true); |
1216 |
+ return true; |
1217 |
+ } |
1218 |
+ return false; |
1219 |
+@@ -502,7 +518,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) |
1220 |
+ |
1221 |
+ msg = ipmi_alloc_smi_msg(); |
1222 |
+ if (!msg) { |
1223 |
+- if (!disable_si_irq(smi_info)) |
1224 |
++ if (!disable_si_irq(smi_info, true)) |
1225 |
+ smi_info->si_state = SI_NORMAL; |
1226 |
+ } else if (enable_si_irq(smi_info)) { |
1227 |
+ ipmi_free_smi_msg(msg); |
1228 |
+@@ -518,7 +534,7 @@ static void handle_flags(struct smi_info *smi_info) |
1229 |
+ /* Watchdog pre-timeout */ |
1230 |
+ smi_inc_stat(smi_info, watchdog_pretimeouts); |
1231 |
+ |
1232 |
+- start_clear_flags(smi_info); |
1233 |
++ start_clear_flags(smi_info, true); |
1234 |
+ smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
1235 |
+ if (smi_info->intf) |
1236 |
+ ipmi_smi_watchdog_pretimeout(smi_info->intf); |
1237 |
+@@ -870,8 +886,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, |
1238 |
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
1239 |
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD; |
1240 |
+ |
1241 |
+- smi_info->handlers->start_transaction( |
1242 |
+- smi_info->si_sm, msg, 2); |
1243 |
++ start_new_msg(smi_info, msg, 2); |
1244 |
+ smi_info->si_state = SI_GETTING_FLAGS; |
1245 |
+ goto restart; |
1246 |
+ } |
1247 |
+@@ -901,7 +916,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, |
1248 |
+ * disable and messages disabled. |
1249 |
+ */ |
1250 |
+ if (smi_info->supports_event_msg_buff || smi_info->irq) { |
1251 |
+- start_check_enables(smi_info); |
1252 |
++ start_check_enables(smi_info, true); |
1253 |
+ } else { |
1254 |
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info); |
1255 |
+ if (!smi_info->curr_msg) |
1256 |
+@@ -1203,14 +1218,14 @@ static int smi_start_processing(void *send_info, |
1257 |
+ |
1258 |
+ new_smi->intf = intf; |
1259 |
+ |
1260 |
+- /* Try to claim any interrupts. */ |
1261 |
+- if (new_smi->irq_setup) |
1262 |
+- new_smi->irq_setup(new_smi); |
1263 |
+- |
1264 |
+ /* Set up the timer that drives the interface. */ |
1265 |
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); |
1266 |
+ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); |
1267 |
+ |
1268 |
++ /* Try to claim any interrupts. */ |
1269 |
++ if (new_smi->irq_setup) |
1270 |
++ new_smi->irq_setup(new_smi); |
1271 |
++ |
1272 |
+ /* |
1273 |
+ * Check if the user forcefully enabled the daemon. |
1274 |
+ */ |
1275 |
+@@ -3515,7 +3530,7 @@ static int try_smi_init(struct smi_info *new_smi) |
1276 |
+ * Start clearing the flags before we enable interrupts or the |
1277 |
+ * timer to avoid racing with the timer. |
1278 |
+ */ |
1279 |
+- start_clear_flags(new_smi); |
1280 |
++ start_clear_flags(new_smi, false); |
1281 |
+ |
1282 |
+ /* |
1283 |
+ * IRQ is defined to be set when non-zero. req_events will |
1284 |
+@@ -3817,7 +3832,7 @@ static void cleanup_one_si(struct smi_info *to_clean) |
1285 |
+ poll(to_clean); |
1286 |
+ schedule_timeout_uninterruptible(1); |
1287 |
+ } |
1288 |
+- disable_si_irq(to_clean); |
1289 |
++ disable_si_irq(to_clean, false); |
1290 |
+ while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
1291 |
+ poll(to_clean); |
1292 |
+ schedule_timeout_uninterruptible(1); |
1293 |
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c |
1294 |
+index 30f522848c73..c19e7fc717c3 100644 |
1295 |
+--- a/drivers/connector/connector.c |
1296 |
++++ b/drivers/connector/connector.c |
1297 |
+@@ -178,26 +178,21 @@ static int cn_call_callback(struct sk_buff *skb) |
1298 |
+ * |
1299 |
+ * It checks skb, netlink header and msg sizes, and calls callback helper. |
1300 |
+ */ |
1301 |
+-static void cn_rx_skb(struct sk_buff *__skb) |
1302 |
++static void cn_rx_skb(struct sk_buff *skb) |
1303 |
+ { |
1304 |
+ struct nlmsghdr *nlh; |
1305 |
+- struct sk_buff *skb; |
1306 |
+ int len, err; |
1307 |
+ |
1308 |
+- skb = skb_get(__skb); |
1309 |
+- |
1310 |
+ if (skb->len >= NLMSG_HDRLEN) { |
1311 |
+ nlh = nlmsg_hdr(skb); |
1312 |
+ len = nlmsg_len(nlh); |
1313 |
+ |
1314 |
+ if (len < (int)sizeof(struct cn_msg) || |
1315 |
+ skb->len < nlh->nlmsg_len || |
1316 |
+- len > CONNECTOR_MAX_MSG_SIZE) { |
1317 |
+- kfree_skb(skb); |
1318 |
++ len > CONNECTOR_MAX_MSG_SIZE) |
1319 |
+ return; |
1320 |
+- } |
1321 |
+ |
1322 |
+- err = cn_call_callback(skb); |
1323 |
++ err = cn_call_callback(skb_get(skb)); |
1324 |
+ if (err < 0) |
1325 |
+ kfree_skb(skb); |
1326 |
+ } |
1327 |
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
1328 |
+index 722a925795a2..9ce9dfeb1258 100644 |
1329 |
+--- a/drivers/hid/hid-core.c |
1330 |
++++ b/drivers/hid/hid-core.c |
1331 |
+@@ -1589,7 +1589,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) |
1332 |
+ "Multi-Axis Controller" |
1333 |
+ }; |
1334 |
+ const char *type, *bus; |
1335 |
+- char buf[64]; |
1336 |
++ char buf[64] = ""; |
1337 |
+ unsigned int i; |
1338 |
+ int len; |
1339 |
+ int ret; |
1340 |
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
1341 |
+index 8b0178db6a04..b85a8614c128 100644 |
1342 |
+--- a/drivers/iommu/intel-iommu.c |
1343 |
++++ b/drivers/iommu/intel-iommu.c |
1344 |
+@@ -3928,14 +3928,17 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev) |
1345 |
+ dev = pci_physfn(dev); |
1346 |
+ for (bus = dev->bus; bus; bus = bus->parent) { |
1347 |
+ bridge = bus->self; |
1348 |
+- if (!bridge || !pci_is_pcie(bridge) || |
1349 |
++ /* If it's an integrated device, allow ATS */ |
1350 |
++ if (!bridge) |
1351 |
++ return 1; |
1352 |
++ /* Connected via non-PCIe: no ATS */ |
1353 |
++ if (!pci_is_pcie(bridge) || |
1354 |
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) |
1355 |
+ return 0; |
1356 |
++ /* If we found the root port, look it up in the ATSR */ |
1357 |
+ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) |
1358 |
+ break; |
1359 |
+ } |
1360 |
+- if (!bridge) |
1361 |
+- return 0; |
1362 |
+ |
1363 |
+ rcu_read_lock(); |
1364 |
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { |
1365 |
+diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c |
1366 |
+index c4198fa490bf..9c1e8adaf4fc 100644 |
1367 |
+--- a/drivers/isdn/i4l/isdn_ppp.c |
1368 |
++++ b/drivers/isdn/i4l/isdn_ppp.c |
1369 |
+@@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file) |
1370 |
+ is->compflags = 0; |
1371 |
+ |
1372 |
+ is->reset = isdn_ppp_ccp_reset_alloc(is); |
1373 |
++ if (!is->reset) |
1374 |
++ return -ENOMEM; |
1375 |
+ |
1376 |
+ is->lp = NULL; |
1377 |
+ is->mp_seqno = 0; /* MP sequence number */ |
1378 |
+@@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file) |
1379 |
+ * VJ header compression init |
1380 |
+ */ |
1381 |
+ is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */ |
1382 |
++ if (IS_ERR(is->slcomp)) { |
1383 |
++ isdn_ppp_ccp_reset_free(is); |
1384 |
++ return PTR_ERR(is->slcomp); |
1385 |
++ } |
1386 |
+ #endif |
1387 |
+ #ifdef CONFIG_IPPP_FILTER |
1388 |
+ is->pass_filter = NULL; |
1389 |
+@@ -567,10 +573,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) |
1390 |
+ is->maxcid = val; |
1391 |
+ #ifdef CONFIG_ISDN_PPP_VJ |
1392 |
+ sltmp = slhc_init(16, val); |
1393 |
+- if (!sltmp) { |
1394 |
+- printk(KERN_ERR "ippp, can't realloc slhc struct\n"); |
1395 |
+- return -ENOMEM; |
1396 |
+- } |
1397 |
++ if (IS_ERR(sltmp)) |
1398 |
++ return PTR_ERR(sltmp); |
1399 |
+ if (is->slcomp) |
1400 |
+ slhc_free(is->slcomp); |
1401 |
+ is->slcomp = sltmp; |
1402 |
+diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c |
1403 |
+index 084d346fb4c4..e15eef6a94e5 100644 |
1404 |
+--- a/drivers/media/platform/vivid/vivid-osd.c |
1405 |
++++ b/drivers/media/platform/vivid/vivid-osd.c |
1406 |
+@@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg) |
1407 |
+ case FBIOGET_VBLANK: { |
1408 |
+ struct fb_vblank vblank; |
1409 |
+ |
1410 |
++ memset(&vblank, 0, sizeof(vblank)); |
1411 |
+ vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT | |
1412 |
+ FB_VBLANK_HAVE_VSYNC; |
1413 |
+ vblank.count = 0; |
1414 |
+diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c |
1415 |
+index 4069234abed5..a50750ce511d 100644 |
1416 |
+--- a/drivers/media/usb/airspy/airspy.c |
1417 |
++++ b/drivers/media/usb/airspy/airspy.c |
1418 |
+@@ -132,7 +132,7 @@ struct airspy { |
1419 |
+ int urbs_submitted; |
1420 |
+ |
1421 |
+ /* USB control message buffer */ |
1422 |
+- #define BUF_SIZE 24 |
1423 |
++ #define BUF_SIZE 128 |
1424 |
+ u8 buf[BUF_SIZE]; |
1425 |
+ |
1426 |
+ /* Current configuration */ |
1427 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
1428 |
+index 16d87bf8ac3c..72ba774df7a7 100644 |
1429 |
+--- a/drivers/net/bonding/bond_main.c |
1430 |
++++ b/drivers/net/bonding/bond_main.c |
1431 |
+@@ -1194,7 +1194,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev, |
1432 |
+ err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave); |
1433 |
+ if (err) |
1434 |
+ return err; |
1435 |
+- slave_dev->flags |= IFF_SLAVE; |
1436 |
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); |
1437 |
+ return 0; |
1438 |
+ } |
1439 |
+@@ -1452,6 +1451,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
1440 |
+ } |
1441 |
+ } |
1442 |
+ |
1443 |
++ /* set slave flag before open to prevent IPv6 addrconf */ |
1444 |
++ slave_dev->flags |= IFF_SLAVE; |
1445 |
++ |
1446 |
+ /* open the slave since the application closed it */ |
1447 |
+ res = dev_open(slave_dev); |
1448 |
+ if (res) { |
1449 |
+@@ -1712,6 +1714,7 @@ err_close: |
1450 |
+ dev_close(slave_dev); |
1451 |
+ |
1452 |
+ err_restore_mac: |
1453 |
++ slave_dev->flags &= ~IFF_SLAVE; |
1454 |
+ if (!bond->params.fail_over_mac || |
1455 |
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { |
1456 |
+ /* XXX TODO - fom follow mode needs to change master's |
1457 |
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
1458 |
+index 9d15566521a7..cfe49a07c7c1 100644 |
1459 |
+--- a/drivers/net/ppp/ppp_generic.c |
1460 |
++++ b/drivers/net/ppp/ppp_generic.c |
1461 |
+@@ -715,10 +715,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
1462 |
+ val &= 0xffff; |
1463 |
+ } |
1464 |
+ vj = slhc_init(val2+1, val+1); |
1465 |
+- if (!vj) { |
1466 |
+- netdev_err(ppp->dev, |
1467 |
+- "PPP: no memory (VJ compressor)\n"); |
1468 |
+- err = -ENOMEM; |
1469 |
++ if (IS_ERR(vj)) { |
1470 |
++ err = PTR_ERR(vj); |
1471 |
+ break; |
1472 |
+ } |
1473 |
+ ppp_lock(ppp); |
1474 |
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c |
1475 |
+index 079f7adfcde5..27ed25252aac 100644 |
1476 |
+--- a/drivers/net/slip/slhc.c |
1477 |
++++ b/drivers/net/slip/slhc.c |
1478 |
+@@ -84,8 +84,9 @@ static long decode(unsigned char **cpp); |
1479 |
+ static unsigned char * put16(unsigned char *cp, unsigned short x); |
1480 |
+ static unsigned short pull16(unsigned char **cpp); |
1481 |
+ |
1482 |
+-/* Initialize compression data structure |
1483 |
++/* Allocate compression data structure |
1484 |
+ * slots must be in range 0 to 255 (zero meaning no compression) |
1485 |
++ * Returns pointer to structure or ERR_PTR() on error. |
1486 |
+ */ |
1487 |
+ struct slcompress * |
1488 |
+ slhc_init(int rslots, int tslots) |
1489 |
+@@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots) |
1490 |
+ register struct cstate *ts; |
1491 |
+ struct slcompress *comp; |
1492 |
+ |
1493 |
++ if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255) |
1494 |
++ return ERR_PTR(-EINVAL); |
1495 |
++ |
1496 |
+ comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL); |
1497 |
+ if (! comp) |
1498 |
+ goto out_fail; |
1499 |
+ |
1500 |
+- if ( rslots > 0 && rslots < 256 ) { |
1501 |
++ if (rslots > 0) { |
1502 |
+ size_t rsize = rslots * sizeof(struct cstate); |
1503 |
+ comp->rstate = kzalloc(rsize, GFP_KERNEL); |
1504 |
+ if (! comp->rstate) |
1505 |
+@@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots) |
1506 |
+ comp->rslot_limit = rslots - 1; |
1507 |
+ } |
1508 |
+ |
1509 |
+- if ( tslots > 0 && tslots < 256 ) { |
1510 |
++ if (tslots > 0) { |
1511 |
+ size_t tsize = tslots * sizeof(struct cstate); |
1512 |
+ comp->tstate = kzalloc(tsize, GFP_KERNEL); |
1513 |
+ if (! comp->tstate) |
1514 |
+@@ -141,7 +145,7 @@ out_free2: |
1515 |
+ out_free: |
1516 |
+ kfree(comp); |
1517 |
+ out_fail: |
1518 |
+- return NULL; |
1519 |
++ return ERR_PTR(-ENOMEM); |
1520 |
+ } |
1521 |
+ |
1522 |
+ |
1523 |
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c |
1524 |
+index 05387b1e2e95..a17d86a57734 100644 |
1525 |
+--- a/drivers/net/slip/slip.c |
1526 |
++++ b/drivers/net/slip/slip.c |
1527 |
+@@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu) |
1528 |
+ if (cbuff == NULL) |
1529 |
+ goto err_exit; |
1530 |
+ slcomp = slhc_init(16, 16); |
1531 |
+- if (slcomp == NULL) |
1532 |
++ if (IS_ERR(slcomp)) |
1533 |
+ goto err_exit; |
1534 |
+ #endif |
1535 |
+ spin_lock_bh(&sl->lock); |
1536 |
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c |
1537 |
+index 6928448f6b7f..2b45d0168c3c 100644 |
1538 |
+--- a/drivers/net/team/team.c |
1539 |
++++ b/drivers/net/team/team.c |
1540 |
+@@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
1541 |
+ struct team *team = netdev_priv(dev); |
1542 |
+ struct team_port *port; |
1543 |
+ |
1544 |
+- rcu_read_lock(); |
1545 |
+- list_for_each_entry_rcu(port, &team->port_list, list) |
1546 |
++ mutex_lock(&team->lock); |
1547 |
++ list_for_each_entry(port, &team->port_list, list) |
1548 |
+ vlan_vid_del(port->dev, proto, vid); |
1549 |
+- rcu_read_unlock(); |
1550 |
++ mutex_unlock(&team->lock); |
1551 |
+ |
1552 |
+ return 0; |
1553 |
+ } |
1554 |
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c |
1555 |
+index e4b7a47a825c..5efaa9ab5af5 100644 |
1556 |
+--- a/drivers/net/usb/cdc_mbim.c |
1557 |
++++ b/drivers/net/usb/cdc_mbim.c |
1558 |
+@@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = { |
1559 |
+ .ndo_stop = usbnet_stop, |
1560 |
+ .ndo_start_xmit = usbnet_start_xmit, |
1561 |
+ .ndo_tx_timeout = usbnet_tx_timeout, |
1562 |
+- .ndo_change_mtu = usbnet_change_mtu, |
1563 |
++ .ndo_change_mtu = cdc_ncm_change_mtu, |
1564 |
+ .ndo_set_mac_address = eth_mac_addr, |
1565 |
+ .ndo_validate_addr = eth_validate_addr, |
1566 |
+ .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid, |
1567 |
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
1568 |
+index 8067b8fbb0ee..0b481c30979b 100644 |
1569 |
+--- a/drivers/net/usb/cdc_ncm.c |
1570 |
++++ b/drivers/net/usb/cdc_ncm.c |
1571 |
+@@ -41,6 +41,7 @@ |
1572 |
+ #include <linux/module.h> |
1573 |
+ #include <linux/netdevice.h> |
1574 |
+ #include <linux/ctype.h> |
1575 |
++#include <linux/etherdevice.h> |
1576 |
+ #include <linux/ethtool.h> |
1577 |
+ #include <linux/workqueue.h> |
1578 |
+ #include <linux/mii.h> |
1579 |
+@@ -687,6 +688,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) |
1580 |
+ kfree(ctx); |
1581 |
+ } |
1582 |
+ |
1583 |
++/* we need to override the usbnet change_mtu ndo for two reasons: |
1584 |
++ * - respect the negotiated maximum datagram size |
1585 |
++ * - avoid unwanted changes to rx and tx buffers |
1586 |
++ */ |
1587 |
++int cdc_ncm_change_mtu(struct net_device *net, int new_mtu) |
1588 |
++{ |
1589 |
++ struct usbnet *dev = netdev_priv(net); |
1590 |
++ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; |
1591 |
++ int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev); |
1592 |
++ |
1593 |
++ if (new_mtu <= 0 || new_mtu > maxmtu) |
1594 |
++ return -EINVAL; |
1595 |
++ net->mtu = new_mtu; |
1596 |
++ return 0; |
1597 |
++} |
1598 |
++EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu); |
1599 |
++ |
1600 |
++static const struct net_device_ops cdc_ncm_netdev_ops = { |
1601 |
++ .ndo_open = usbnet_open, |
1602 |
++ .ndo_stop = usbnet_stop, |
1603 |
++ .ndo_start_xmit = usbnet_start_xmit, |
1604 |
++ .ndo_tx_timeout = usbnet_tx_timeout, |
1605 |
++ .ndo_change_mtu = cdc_ncm_change_mtu, |
1606 |
++ .ndo_set_mac_address = eth_mac_addr, |
1607 |
++ .ndo_validate_addr = eth_validate_addr, |
1608 |
++}; |
1609 |
++ |
1610 |
+ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) |
1611 |
+ { |
1612 |
+ const struct usb_cdc_union_desc *union_desc = NULL; |
1613 |
+@@ -861,6 +889,9 @@ advance: |
1614 |
+ /* add our sysfs attrs */ |
1615 |
+ dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group; |
1616 |
+ |
1617 |
++ /* must handle MTU changes */ |
1618 |
++ dev->net->netdev_ops = &cdc_ncm_netdev_ops; |
1619 |
++ |
1620 |
+ return 0; |
1621 |
+ |
1622 |
+ error2: |
1623 |
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c |
1624 |
+index c8186ffda1a3..2e61a799f32a 100644 |
1625 |
+--- a/drivers/net/veth.c |
1626 |
++++ b/drivers/net/veth.c |
1627 |
+@@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) |
1628 |
+ kfree_skb(skb); |
1629 |
+ goto drop; |
1630 |
+ } |
1631 |
+- /* don't change ip_summed == CHECKSUM_PARTIAL, as that |
1632 |
+- * will cause bad checksum on forwarded packets |
1633 |
+- */ |
1634 |
+- if (skb->ip_summed == CHECKSUM_NONE && |
1635 |
+- rcv->features & NETIF_F_RXCSUM) |
1636 |
+- skb->ip_summed = CHECKSUM_UNNECESSARY; |
1637 |
+ |
1638 |
+ if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { |
1639 |
+ struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); |
1640 |
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
1641 |
+index 0085b8df83e2..940f78e41993 100644 |
1642 |
+--- a/drivers/net/vxlan.c |
1643 |
++++ b/drivers/net/vxlan.c |
1644 |
+@@ -2581,7 +2581,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, |
1645 |
+ struct nlattr *tb[], struct nlattr *data[]) |
1646 |
+ { |
1647 |
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); |
1648 |
+- struct vxlan_dev *vxlan = netdev_priv(dev); |
1649 |
++ struct vxlan_dev *vxlan = netdev_priv(dev), *tmp; |
1650 |
+ struct vxlan_rdst *dst = &vxlan->default_dst; |
1651 |
+ __u32 vni; |
1652 |
+ int err; |
1653 |
+@@ -2714,9 +2714,13 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, |
1654 |
+ if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) |
1655 |
+ vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL; |
1656 |
+ |
1657 |
+- if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, |
1658 |
+- vxlan->dst_port, vxlan->flags)) { |
1659 |
+- pr_info("duplicate VNI %u\n", vni); |
1660 |
++ list_for_each_entry(tmp, &vn->vxlan_list, next) { |
1661 |
++ if (tmp->default_dst.remote_vni == vni && |
1662 |
++ (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 || |
1663 |
++ tmp->saddr.sa.sa_family == AF_INET6) == use_ipv6 && |
1664 |
++ tmp->dst_port == vxlan->dst_port && |
1665 |
++ (tmp->flags & VXLAN_F_RCV_FLAGS) == |
1666 |
++ (vxlan->flags & VXLAN_F_RCV_FLAGS)) |
1667 |
+ return -EEXIST; |
1668 |
+ } |
1669 |
+ |
1670 |
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
1671 |
+index 0866c5dfdf87..5e5b6184e720 100644 |
1672 |
+--- a/drivers/net/xen-netback/netback.c |
1673 |
++++ b/drivers/net/xen-netback/netback.c |
1674 |
+@@ -2007,8 +2007,11 @@ static int __init netback_init(void) |
1675 |
+ if (!xen_domain()) |
1676 |
+ return -ENODEV; |
1677 |
+ |
1678 |
+- /* Allow as many queues as there are CPUs, by default */ |
1679 |
+- xenvif_max_queues = num_online_cpus(); |
1680 |
++ /* Allow as many queues as there are CPUs if user has not |
1681 |
++ * specified a value. |
1682 |
++ */ |
1683 |
++ if (xenvif_max_queues == 0) |
1684 |
++ xenvif_max_queues = num_online_cpus(); |
1685 |
+ |
1686 |
+ if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { |
1687 |
+ pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", |
1688 |
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
1689 |
+index 52f081f4dfd5..fd51626e859e 100644 |
1690 |
+--- a/drivers/net/xen-netfront.c |
1691 |
++++ b/drivers/net/xen-netfront.c |
1692 |
+@@ -1710,19 +1710,19 @@ static void xennet_destroy_queues(struct netfront_info *info) |
1693 |
+ } |
1694 |
+ |
1695 |
+ static int xennet_create_queues(struct netfront_info *info, |
1696 |
+- unsigned int num_queues) |
1697 |
++ unsigned int *num_queues) |
1698 |
+ { |
1699 |
+ unsigned int i; |
1700 |
+ int ret; |
1701 |
+ |
1702 |
+- info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), |
1703 |
++ info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), |
1704 |
+ GFP_KERNEL); |
1705 |
+ if (!info->queues) |
1706 |
+ return -ENOMEM; |
1707 |
+ |
1708 |
+ rtnl_lock(); |
1709 |
+ |
1710 |
+- for (i = 0; i < num_queues; i++) { |
1711 |
++ for (i = 0; i < *num_queues; i++) { |
1712 |
+ struct netfront_queue *queue = &info->queues[i]; |
1713 |
+ |
1714 |
+ queue->id = i; |
1715 |
+@@ -1732,7 +1732,7 @@ static int xennet_create_queues(struct netfront_info *info, |
1716 |
+ if (ret < 0) { |
1717 |
+ dev_warn(&info->netdev->dev, |
1718 |
+ "only created %d queues\n", i); |
1719 |
+- num_queues = i; |
1720 |
++ *num_queues = i; |
1721 |
+ break; |
1722 |
+ } |
1723 |
+ |
1724 |
+@@ -1742,11 +1742,11 @@ static int xennet_create_queues(struct netfront_info *info, |
1725 |
+ napi_enable(&queue->napi); |
1726 |
+ } |
1727 |
+ |
1728 |
+- netif_set_real_num_tx_queues(info->netdev, num_queues); |
1729 |
++ netif_set_real_num_tx_queues(info->netdev, *num_queues); |
1730 |
+ |
1731 |
+ rtnl_unlock(); |
1732 |
+ |
1733 |
+- if (num_queues == 0) { |
1734 |
++ if (*num_queues == 0) { |
1735 |
+ dev_err(&info->netdev->dev, "no queues\n"); |
1736 |
+ return -EINVAL; |
1737 |
+ } |
1738 |
+@@ -1792,7 +1792,7 @@ static int talk_to_netback(struct xenbus_device *dev, |
1739 |
+ if (info->queues) |
1740 |
+ xennet_destroy_queues(info); |
1741 |
+ |
1742 |
+- err = xennet_create_queues(info, num_queues); |
1743 |
++ err = xennet_create_queues(info, &num_queues); |
1744 |
+ if (err < 0) |
1745 |
+ goto destroy_ring; |
1746 |
+ |
1747 |
+@@ -2140,8 +2140,11 @@ static int __init netif_init(void) |
1748 |
+ |
1749 |
+ pr_info("Initialising Xen virtual ethernet driver\n"); |
1750 |
+ |
1751 |
+- /* Allow as many queues as there are CPUs, by default */ |
1752 |
+- xennet_max_queues = num_online_cpus(); |
1753 |
++ /* Allow as many queues as there are CPUs if user has not |
1754 |
++ * specified a value. |
1755 |
++ */ |
1756 |
++ if (xennet_max_queues == 0) |
1757 |
++ xennet_max_queues = num_online_cpus(); |
1758 |
+ |
1759 |
+ return xenbus_register_frontend(&netfront_driver); |
1760 |
+ } |
1761 |
+diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h |
1762 |
+index 761e77bfce5d..e56f1569f6c3 100644 |
1763 |
+--- a/drivers/parisc/iommu-helpers.h |
1764 |
++++ b/drivers/parisc/iommu-helpers.h |
1765 |
+@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
1766 |
+ struct scatterlist *contig_sg; /* contig chunk head */ |
1767 |
+ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
1768 |
+ unsigned int n_mappings = 0; |
1769 |
+- unsigned int max_seg_size = dma_get_max_seg_size(dev); |
1770 |
++ unsigned int max_seg_size = min(dma_get_max_seg_size(dev), |
1771 |
++ (unsigned)DMA_CHUNK_SIZE); |
1772 |
++ unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1; |
1773 |
++ if (max_seg_boundary) /* check if the addition above didn't overflow */ |
1774 |
++ max_seg_size = min(max_seg_size, max_seg_boundary); |
1775 |
+ |
1776 |
+ while (nents > 0) { |
1777 |
+ |
1778 |
+@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
1779 |
+ |
1780 |
+ /* |
1781 |
+ ** First make sure current dma stream won't |
1782 |
+- ** exceed DMA_CHUNK_SIZE if we coalesce the |
1783 |
++ ** exceed max_seg_size if we coalesce the |
1784 |
+ ** next entry. |
1785 |
+ */ |
1786 |
+- if(unlikely(ALIGN(dma_len + dma_offset + startsg->length, |
1787 |
+- IOVP_SIZE) > DMA_CHUNK_SIZE)) |
1788 |
+- break; |
1789 |
+- |
1790 |
+- if (startsg->length + dma_len > max_seg_size) |
1791 |
++ if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) > |
1792 |
++ max_seg_size)) |
1793 |
+ break; |
1794 |
+ |
1795 |
+ /* |
1796 |
+diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c |
1797 |
+index d542e06d6cd3..10e520d6bb75 100644 |
1798 |
+--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c |
1799 |
++++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c |
1800 |
+@@ -1268,6 +1268,7 @@ static int |
1801 |
+ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) |
1802 |
+ { |
1803 |
+ struct lov_stripe_md *ulsm = _ulsm; |
1804 |
++ struct lov_oinfo **p; |
1805 |
+ int nob, i; |
1806 |
+ |
1807 |
+ nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); |
1808 |
+@@ -1277,9 +1278,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) |
1809 |
+ if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) |
1810 |
+ return -EFAULT; |
1811 |
+ |
1812 |
+- for (i = 0; i < lsm->lsm_stripe_count; i++) { |
1813 |
+- if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i], |
1814 |
+- sizeof(lsm->lsm_oinfo[0]))) |
1815 |
++ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { |
1816 |
++ struct lov_oinfo __user *up; |
1817 |
++ if (get_user(up, ulsm->lsm_oinfo + i) || |
1818 |
++ copy_to_user(up, *p, sizeof(struct lov_oinfo))) |
1819 |
+ return -EFAULT; |
1820 |
+ } |
1821 |
+ return 0; |
1822 |
+@@ -1287,9 +1289,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) |
1823 |
+ |
1824 |
+ static int |
1825 |
+ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, |
1826 |
+- void *ulsm, int ulsm_nob) |
1827 |
++ struct lov_stripe_md __user *ulsm, int ulsm_nob) |
1828 |
+ { |
1829 |
+ struct echo_client_obd *ec = ed->ed_ec; |
1830 |
++ struct lov_oinfo **p; |
1831 |
+ int i; |
1832 |
+ |
1833 |
+ if (ulsm_nob < sizeof(*lsm)) |
1834 |
+@@ -1305,11 +1308,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, |
1835 |
+ return -EINVAL; |
1836 |
+ |
1837 |
+ |
1838 |
+- for (i = 0; i < lsm->lsm_stripe_count; i++) { |
1839 |
+- if (copy_from_user(lsm->lsm_oinfo[i], |
1840 |
+- ((struct lov_stripe_md *)ulsm)-> \ |
1841 |
+- lsm_oinfo[i], |
1842 |
+- sizeof(lsm->lsm_oinfo[0]))) |
1843 |
++ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { |
1844 |
++ struct lov_oinfo __user *up; |
1845 |
++ if (get_user(up, ulsm->lsm_oinfo + i) || |
1846 |
++ copy_from_user(*p, up, sizeof(struct lov_oinfo))) |
1847 |
+ return -EFAULT; |
1848 |
+ } |
1849 |
+ return 0; |
1850 |
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
1851 |
+index d68c4a4db682..ee11b301f3da 100644 |
1852 |
+--- a/drivers/usb/core/hub.c |
1853 |
++++ b/drivers/usb/core/hub.c |
1854 |
+@@ -1034,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
1855 |
+ unsigned delay; |
1856 |
+ |
1857 |
+ /* Continue a partial initialization */ |
1858 |
+- if (type == HUB_INIT2) |
1859 |
+- goto init2; |
1860 |
+- if (type == HUB_INIT3) |
1861 |
++ if (type == HUB_INIT2 || type == HUB_INIT3) { |
1862 |
++ device_lock(hub->intfdev); |
1863 |
++ |
1864 |
++ /* Was the hub disconnected while we were waiting? */ |
1865 |
++ if (hub->disconnected) { |
1866 |
++ device_unlock(hub->intfdev); |
1867 |
++ kref_put(&hub->kref, hub_release); |
1868 |
++ return; |
1869 |
++ } |
1870 |
++ if (type == HUB_INIT2) |
1871 |
++ goto init2; |
1872 |
+ goto init3; |
1873 |
++ } |
1874 |
++ kref_get(&hub->kref); |
1875 |
+ |
1876 |
+ /* The superspeed hub except for root hub has to use Hub Depth |
1877 |
+ * value as an offset into the route string to locate the bits |
1878 |
+@@ -1235,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
1879 |
+ queue_delayed_work(system_power_efficient_wq, |
1880 |
+ &hub->init_work, |
1881 |
+ msecs_to_jiffies(delay)); |
1882 |
++ device_unlock(hub->intfdev); |
1883 |
+ return; /* Continues at init3: below */ |
1884 |
+ } else { |
1885 |
+ msleep(delay); |
1886 |
+@@ -1256,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
1887 |
+ /* Allow autosuspend if it was suppressed */ |
1888 |
+ if (type <= HUB_INIT3) |
1889 |
+ usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); |
1890 |
++ |
1891 |
++ if (type == HUB_INIT2 || type == HUB_INIT3) |
1892 |
++ device_unlock(hub->intfdev); |
1893 |
++ |
1894 |
++ kref_put(&hub->kref, hub_release); |
1895 |
+ } |
1896 |
+ |
1897 |
+ /* Implement the continuations for the delays above */ |
1898 |
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
1899 |
+index 1e6d7579709e..f6bb118e4501 100644 |
1900 |
+--- a/drivers/usb/host/xhci.c |
1901 |
++++ b/drivers/usb/host/xhci.c |
1902 |
+@@ -4794,8 +4794,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
1903 |
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1904 |
+ slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
1905 |
+ slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
1906 |
++ /* |
1907 |
++ * refer to section 6.2.2: MTT should be 0 for full speed hub, |
1908 |
++ * but it may be already set to 1 when setup an xHCI virtual |
1909 |
++ * device, so clear it anyway. |
1910 |
++ */ |
1911 |
+ if (tt->multi) |
1912 |
+ slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
1913 |
++ else if (hdev->speed == USB_SPEED_FULL) |
1914 |
++ slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); |
1915 |
++ |
1916 |
+ if (xhci->hci_version > 0x95) { |
1917 |
+ xhci_dbg(xhci, "xHCI version %x needs hub " |
1918 |
+ "TT think time and number of ports\n", |
1919 |
+@@ -5046,6 +5054,10 @@ static int __init xhci_hcd_init(void) |
1920 |
+ BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
1921 |
+ /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
1922 |
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
1923 |
++ |
1924 |
++ if (usb_disabled()) |
1925 |
++ return -ENODEV; |
1926 |
++ |
1927 |
+ return 0; |
1928 |
+ } |
1929 |
+ |
1930 |
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
1931 |
+index 7d4f51a32e66..59b2126b21a3 100644 |
1932 |
+--- a/drivers/usb/serial/cp210x.c |
1933 |
++++ b/drivers/usb/serial/cp210x.c |
1934 |
+@@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = { |
1935 |
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ |
1936 |
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
1937 |
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
1938 |
++ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ |
1939 |
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
1940 |
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
1941 |
+ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ |
1942 |
+diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c |
1943 |
+index f51a5d52c0ed..ec1b8f2c1183 100644 |
1944 |
+--- a/drivers/usb/serial/ipaq.c |
1945 |
++++ b/drivers/usb/serial/ipaq.c |
1946 |
+@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty, |
1947 |
+ * through. Since this has a reasonably high failure rate, we retry |
1948 |
+ * several times. |
1949 |
+ */ |
1950 |
+- while (retries--) { |
1951 |
++ while (retries) { |
1952 |
++ retries--; |
1953 |
+ result = usb_control_msg(serial->dev, |
1954 |
+ usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, |
1955 |
+ 0x1, 0, NULL, 0, 100); |
1956 |
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c |
1957 |
+index 4bd23bba816f..ee71baddbb10 100644 |
1958 |
+--- a/drivers/xen/gntdev.c |
1959 |
++++ b/drivers/xen/gntdev.c |
1960 |
+@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
1961 |
+ |
1962 |
+ vma->vm_ops = &gntdev_vmops; |
1963 |
+ |
1964 |
+- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
1965 |
++ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; |
1966 |
+ |
1967 |
+ if (use_ptemod) |
1968 |
+ vma->vm_flags |= VM_DONTCOPY; |
1969 |
+diff --git a/fs/direct-io.c b/fs/direct-io.c |
1970 |
+index 745d2342651a..d83a021a659f 100644 |
1971 |
+--- a/fs/direct-io.c |
1972 |
++++ b/fs/direct-io.c |
1973 |
+@@ -1159,6 +1159,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
1974 |
+ } |
1975 |
+ } |
1976 |
+ |
1977 |
++ /* Once we sampled i_size check for reads beyond EOF */ |
1978 |
++ dio->i_size = i_size_read(inode); |
1979 |
++ if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { |
1980 |
++ if (dio->flags & DIO_LOCKING) |
1981 |
++ mutex_unlock(&inode->i_mutex); |
1982 |
++ kmem_cache_free(dio_cache, dio); |
1983 |
++ retval = 0; |
1984 |
++ goto out; |
1985 |
++ } |
1986 |
++ |
1987 |
+ /* |
1988 |
+ * For file extending writes updating i_size before data writeouts |
1989 |
+ * complete can expose uninitialized blocks in dumb filesystems. |
1990 |
+@@ -1212,7 +1222,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
1991 |
+ sdio.next_block_for_io = -1; |
1992 |
+ |
1993 |
+ dio->iocb = iocb; |
1994 |
+- dio->i_size = i_size_read(inode); |
1995 |
+ |
1996 |
+ spin_lock_init(&dio->bio_lock); |
1997 |
+ dio->refcount = 1; |
1998 |
+diff --git a/include/linux/filter.h b/include/linux/filter.h |
1999 |
+index fa11b3a367be..1ce6e1049a3b 100644 |
2000 |
+--- a/include/linux/filter.h |
2001 |
++++ b/include/linux/filter.h |
2002 |
+@@ -428,6 +428,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp) |
2003 |
+ |
2004 |
+ #define BPF_ANC BIT(15) |
2005 |
+ |
2006 |
++static inline bool bpf_needs_clear_a(const struct sock_filter *first) |
2007 |
++{ |
2008 |
++ switch (first->code) { |
2009 |
++ case BPF_RET | BPF_K: |
2010 |
++ case BPF_LD | BPF_W | BPF_LEN: |
2011 |
++ return false; |
2012 |
++ |
2013 |
++ case BPF_LD | BPF_W | BPF_ABS: |
2014 |
++ case BPF_LD | BPF_H | BPF_ABS: |
2015 |
++ case BPF_LD | BPF_B | BPF_ABS: |
2016 |
++ if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) |
2017 |
++ return true; |
2018 |
++ return false; |
2019 |
++ |
2020 |
++ default: |
2021 |
++ return true; |
2022 |
++ } |
2023 |
++} |
2024 |
++ |
2025 |
+ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) |
2026 |
+ { |
2027 |
+ BUG_ON(ftest->code & BPF_ANC); |
2028 |
+diff --git a/include/linux/sched.h b/include/linux/sched.h |
2029 |
+index 61f4f2d5c882..9128b4e9f541 100644 |
2030 |
+--- a/include/linux/sched.h |
2031 |
++++ b/include/linux/sched.h |
2032 |
+@@ -802,6 +802,7 @@ struct user_struct { |
2033 |
+ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ |
2034 |
+ #endif |
2035 |
+ unsigned long locked_shm; /* How many pages of mlocked shm ? */ |
2036 |
++ unsigned long unix_inflight; /* How many files in flight in unix sockets */ |
2037 |
+ |
2038 |
+ #ifdef CONFIG_KEYS |
2039 |
+ struct key *uid_keyring; /* UID specific keyring */ |
2040 |
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
2041 |
+index 4307e20a4a4a..1f17abe23725 100644 |
2042 |
+--- a/include/linux/skbuff.h |
2043 |
++++ b/include/linux/skbuff.h |
2044 |
+@@ -3320,7 +3320,8 @@ struct skb_gso_cb { |
2045 |
+ int encap_level; |
2046 |
+ __u16 csum_start; |
2047 |
+ }; |
2048 |
+-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) |
2049 |
++#define SKB_SGO_CB_OFFSET 32 |
2050 |
++#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) |
2051 |
+ |
2052 |
+ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) |
2053 |
+ { |
2054 |
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h |
2055 |
+index 76d1e38aabe1..0c53fd51bf9b 100644 |
2056 |
+--- a/include/linux/syscalls.h |
2057 |
++++ b/include/linux/syscalls.h |
2058 |
+@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename, |
2059 |
+ asmlinkage long sys_lchown(const char __user *filename, |
2060 |
+ uid_t user, gid_t group); |
2061 |
+ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); |
2062 |
+-#ifdef CONFIG_UID16 |
2063 |
++#ifdef CONFIG_HAVE_UID16 |
2064 |
+ asmlinkage long sys_chown16(const char __user *filename, |
2065 |
+ old_uid_t user, old_gid_t group); |
2066 |
+ asmlinkage long sys_lchown16(const char __user *filename, |
2067 |
+diff --git a/include/linux/types.h b/include/linux/types.h |
2068 |
+index 8715287c3b1f..69c44d981da3 100644 |
2069 |
+--- a/include/linux/types.h |
2070 |
++++ b/include/linux/types.h |
2071 |
+@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t; |
2072 |
+ |
2073 |
+ typedef unsigned long uintptr_t; |
2074 |
+ |
2075 |
+-#ifdef CONFIG_UID16 |
2076 |
++#ifdef CONFIG_HAVE_UID16 |
2077 |
+ /* This is defined by include/asm-{arch}/posix_types.h */ |
2078 |
+ typedef __kernel_old_uid_t old_uid_t; |
2079 |
+ typedef __kernel_old_gid_t old_gid_t; |
2080 |
+diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h |
2081 |
+index 7c9b484735c5..e7827ae2462c 100644 |
2082 |
+--- a/include/linux/usb/cdc_ncm.h |
2083 |
++++ b/include/linux/usb/cdc_ncm.h |
2084 |
+@@ -133,6 +133,7 @@ struct cdc_ncm_ctx { |
2085 |
+ }; |
2086 |
+ |
2087 |
+ u8 cdc_ncm_select_altsetting(struct usb_interface *intf); |
2088 |
++int cdc_ncm_change_mtu(struct net_device *net, int new_mtu); |
2089 |
+ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); |
2090 |
+ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); |
2091 |
+ struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); |
2092 |
+diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h |
2093 |
+index 84b20835b736..0dc0a51da38f 100644 |
2094 |
+--- a/include/net/inet_ecn.h |
2095 |
++++ b/include/net/inet_ecn.h |
2096 |
+@@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner) |
2097 |
+ |
2098 |
+ struct ipv6hdr; |
2099 |
+ |
2100 |
+-static inline int IP6_ECN_set_ce(struct ipv6hdr *iph) |
2101 |
++/* Note: |
2102 |
++ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE, |
2103 |
++ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE |
2104 |
++ * In IPv6 case, no checksum compensates the change in IPv6 header, |
2105 |
++ * so we have to update skb->csum. |
2106 |
++ */ |
2107 |
++static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) |
2108 |
+ { |
2109 |
++ __be32 from, to; |
2110 |
++ |
2111 |
+ if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph))) |
2112 |
+ return 0; |
2113 |
+- *(__be32*)iph |= htonl(INET_ECN_CE << 20); |
2114 |
++ |
2115 |
++ from = *(__be32 *)iph; |
2116 |
++ to = from | htonl(INET_ECN_CE << 20); |
2117 |
++ *(__be32 *)iph = to; |
2118 |
++ if (skb->ip_summed == CHECKSUM_COMPLETE) |
2119 |
++ skb->csum = csum_add(csum_sub(skb->csum, from), to); |
2120 |
+ return 1; |
2121 |
+ } |
2122 |
+ |
2123 |
+@@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) |
2124 |
+ case cpu_to_be16(ETH_P_IPV6): |
2125 |
+ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= |
2126 |
+ skb_tail_pointer(skb)) |
2127 |
+- return IP6_ECN_set_ce(ipv6_hdr(skb)); |
2128 |
++ return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); |
2129 |
+ break; |
2130 |
+ } |
2131 |
+ |
2132 |
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h |
2133 |
+index 360c4802288d..7682cb2ae237 100644 |
2134 |
+--- a/include/net/inet_timewait_sock.h |
2135 |
++++ b/include/net/inet_timewait_sock.h |
2136 |
+@@ -112,7 +112,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, |
2137 |
+ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, |
2138 |
+ struct inet_hashinfo *hashinfo); |
2139 |
+ |
2140 |
+-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); |
2141 |
++void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, |
2142 |
++ bool rearm); |
2143 |
++ |
2144 |
++static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) |
2145 |
++{ |
2146 |
++ __inet_twsk_schedule(tw, timeo, false); |
2147 |
++} |
2148 |
++ |
2149 |
++static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) |
2150 |
++{ |
2151 |
++ __inet_twsk_schedule(tw, timeo, true); |
2152 |
++} |
2153 |
++ |
2154 |
+ void inet_twsk_deschedule(struct inet_timewait_sock *tw); |
2155 |
+ |
2156 |
+ void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
2157 |
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
2158 |
+index 47dcd3aa6e23..141d562064a7 100644 |
2159 |
+--- a/kernel/bpf/verifier.c |
2160 |
++++ b/kernel/bpf/verifier.c |
2161 |
+@@ -1019,6 +1019,16 @@ static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn) |
2162 |
+ return -EINVAL; |
2163 |
+ } |
2164 |
+ |
2165 |
++ if ((opcode == BPF_LSH || opcode == BPF_RSH || |
2166 |
++ opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { |
2167 |
++ int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; |
2168 |
++ |
2169 |
++ if (insn->imm < 0 || insn->imm >= size) { |
2170 |
++ verbose("invalid shift %d\n", insn->imm); |
2171 |
++ return -EINVAL; |
2172 |
++ } |
2173 |
++ } |
2174 |
++ |
2175 |
+ /* pattern match 'bpf_add Rx, imm' instruction */ |
2176 |
+ if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && |
2177 |
+ regs[insn->dst_reg].type == FRAME_PTR && |
2178 |
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c |
2179 |
+index ac4b96eccade..bd3357e69c5c 100644 |
2180 |
+--- a/net/batman-adv/bridge_loop_avoidance.c |
2181 |
++++ b/net/batman-adv/bridge_loop_avoidance.c |
2182 |
+@@ -112,21 +112,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw) |
2183 |
+ } |
2184 |
+ |
2185 |
+ /* finally deinitialize the claim */ |
2186 |
+-static void batadv_claim_free_rcu(struct rcu_head *rcu) |
2187 |
++static void batadv_claim_release(struct batadv_bla_claim *claim) |
2188 |
+ { |
2189 |
+- struct batadv_bla_claim *claim; |
2190 |
+- |
2191 |
+- claim = container_of(rcu, struct batadv_bla_claim, rcu); |
2192 |
+- |
2193 |
+ batadv_backbone_gw_free_ref(claim->backbone_gw); |
2194 |
+- kfree(claim); |
2195 |
++ kfree_rcu(claim, rcu); |
2196 |
+ } |
2197 |
+ |
2198 |
+ /* free a claim, call claim_free_rcu if its the last reference */ |
2199 |
+ static void batadv_claim_free_ref(struct batadv_bla_claim *claim) |
2200 |
+ { |
2201 |
+ if (atomic_dec_and_test(&claim->refcount)) |
2202 |
+- call_rcu(&claim->rcu, batadv_claim_free_rcu); |
2203 |
++ batadv_claim_release(claim); |
2204 |
+ } |
2205 |
+ |
2206 |
+ /** |
2207 |
+diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h |
2208 |
+index 1918cd50b62e..b6bff9c1877a 100644 |
2209 |
+--- a/net/batman-adv/hard-interface.h |
2210 |
++++ b/net/batman-adv/hard-interface.h |
2211 |
+@@ -64,18 +64,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface) |
2212 |
+ call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu); |
2213 |
+ } |
2214 |
+ |
2215 |
+-/** |
2216 |
+- * batadv_hardif_free_ref_now - decrement the hard interface refcounter and |
2217 |
+- * possibly free it (without rcu callback) |
2218 |
+- * @hard_iface: the hard interface to free |
2219 |
+- */ |
2220 |
+-static inline void |
2221 |
+-batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface) |
2222 |
+-{ |
2223 |
+- if (atomic_dec_and_test(&hard_iface->refcount)) |
2224 |
+- batadv_hardif_free_rcu(&hard_iface->rcu); |
2225 |
+-} |
2226 |
+- |
2227 |
+ static inline struct batadv_hard_iface * |
2228 |
+ batadv_primary_if_get_selected(struct batadv_priv *bat_priv) |
2229 |
+ { |
2230 |
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c |
2231 |
+index a449195c5b2b..2fbd3a6bde9a 100644 |
2232 |
+--- a/net/batman-adv/network-coding.c |
2233 |
++++ b/net/batman-adv/network-coding.c |
2234 |
+@@ -175,28 +175,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node) |
2235 |
+ } |
2236 |
+ |
2237 |
+ /** |
2238 |
+- * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove |
2239 |
+- * its refcount on the orig_node |
2240 |
+- * @rcu: rcu pointer of the nc node |
2241 |
++ * batadv_nc_node_release - release nc_node from lists and queue for free after |
2242 |
++ * rcu grace period |
2243 |
++ * @nc_node: the nc node to free |
2244 |
+ */ |
2245 |
+-static void batadv_nc_node_free_rcu(struct rcu_head *rcu) |
2246 |
++static void batadv_nc_node_release(struct batadv_nc_node *nc_node) |
2247 |
+ { |
2248 |
+- struct batadv_nc_node *nc_node; |
2249 |
+- |
2250 |
+- nc_node = container_of(rcu, struct batadv_nc_node, rcu); |
2251 |
+ batadv_orig_node_free_ref(nc_node->orig_node); |
2252 |
+- kfree(nc_node); |
2253 |
++ kfree_rcu(nc_node, rcu); |
2254 |
+ } |
2255 |
+ |
2256 |
+ /** |
2257 |
+- * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly |
2258 |
+- * frees it |
2259 |
++ * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly |
2260 |
++ * release it |
2261 |
+ * @nc_node: the nc node to free |
2262 |
+ */ |
2263 |
+ static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node) |
2264 |
+ { |
2265 |
+ if (atomic_dec_and_test(&nc_node->refcount)) |
2266 |
+- call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu); |
2267 |
++ batadv_nc_node_release(nc_node); |
2268 |
+ } |
2269 |
+ |
2270 |
+ /** |
2271 |
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c |
2272 |
+index dfae97408628..77ea1d4de2ba 100644 |
2273 |
+--- a/net/batman-adv/originator.c |
2274 |
++++ b/net/batman-adv/originator.c |
2275 |
+@@ -150,86 +150,58 @@ err: |
2276 |
+ } |
2277 |
+ |
2278 |
+ /** |
2279 |
+- * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object |
2280 |
+- * @rcu: rcu pointer of the neigh_ifinfo object |
2281 |
+- */ |
2282 |
+-static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu) |
2283 |
+-{ |
2284 |
+- struct batadv_neigh_ifinfo *neigh_ifinfo; |
2285 |
+- |
2286 |
+- neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu); |
2287 |
+- |
2288 |
+- if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT) |
2289 |
+- batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing); |
2290 |
+- |
2291 |
+- kfree(neigh_ifinfo); |
2292 |
+-} |
2293 |
+- |
2294 |
+-/** |
2295 |
+- * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free |
2296 |
+- * the neigh_ifinfo (without rcu callback) |
2297 |
++ * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for |
2298 |
++ * free after rcu grace period |
2299 |
+ * @neigh_ifinfo: the neigh_ifinfo object to release |
2300 |
+ */ |
2301 |
+ static void |
2302 |
+-batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo) |
2303 |
++batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo) |
2304 |
+ { |
2305 |
+- if (atomic_dec_and_test(&neigh_ifinfo->refcount)) |
2306 |
+- batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu); |
2307 |
++ if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT) |
2308 |
++ batadv_hardif_free_ref(neigh_ifinfo->if_outgoing); |
2309 |
++ |
2310 |
++ kfree_rcu(neigh_ifinfo, rcu); |
2311 |
+ } |
2312 |
+ |
2313 |
+ /** |
2314 |
+- * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free |
2315 |
++ * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release |
2316 |
+ * the neigh_ifinfo |
2317 |
+ * @neigh_ifinfo: the neigh_ifinfo object to release |
2318 |
+ */ |
2319 |
+ void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo) |
2320 |
+ { |
2321 |
+ if (atomic_dec_and_test(&neigh_ifinfo->refcount)) |
2322 |
+- call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu); |
2323 |
++ batadv_neigh_ifinfo_release(neigh_ifinfo); |
2324 |
+ } |
2325 |
+ |
2326 |
+ /** |
2327 |
+- * batadv_neigh_node_free_rcu - free the neigh_node |
2328 |
+- * @rcu: rcu pointer of the neigh_node |
2329 |
++ * batadv_neigh_node_release - release neigh_node from lists and queue for |
2330 |
++ * free after rcu grace period |
2331 |
++ * @neigh_node: neigh neighbor to free |
2332 |
+ */ |
2333 |
+-static void batadv_neigh_node_free_rcu(struct rcu_head *rcu) |
2334 |
++static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node) |
2335 |
+ { |
2336 |
+ struct hlist_node *node_tmp; |
2337 |
+- struct batadv_neigh_node *neigh_node; |
2338 |
+ struct batadv_neigh_ifinfo *neigh_ifinfo; |
2339 |
+ |
2340 |
+- neigh_node = container_of(rcu, struct batadv_neigh_node, rcu); |
2341 |
+- |
2342 |
+ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, |
2343 |
+ &neigh_node->ifinfo_list, list) { |
2344 |
+- batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo); |
2345 |
++ batadv_neigh_ifinfo_free_ref(neigh_ifinfo); |
2346 |
+ } |
2347 |
+- batadv_hardif_free_ref_now(neigh_node->if_incoming); |
2348 |
++ batadv_hardif_free_ref(neigh_node->if_incoming); |
2349 |
+ |
2350 |
+- kfree(neigh_node); |
2351 |
+-} |
2352 |
+- |
2353 |
+-/** |
2354 |
+- * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter |
2355 |
+- * and possibly free it (without rcu callback) |
2356 |
+- * @neigh_node: neigh neighbor to free |
2357 |
+- */ |
2358 |
+-static void |
2359 |
+-batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node) |
2360 |
+-{ |
2361 |
+- if (atomic_dec_and_test(&neigh_node->refcount)) |
2362 |
+- batadv_neigh_node_free_rcu(&neigh_node->rcu); |
2363 |
++ kfree_rcu(neigh_node, rcu); |
2364 |
+ } |
2365 |
+ |
2366 |
+ /** |
2367 |
+ * batadv_neigh_node_free_ref - decrement the neighbors refcounter |
2368 |
+- * and possibly free it |
2369 |
++ * and possibly release it |
2370 |
+ * @neigh_node: neigh neighbor to free |
2371 |
+ */ |
2372 |
+ void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node) |
2373 |
+ { |
2374 |
+ if (atomic_dec_and_test(&neigh_node->refcount)) |
2375 |
+- call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu); |
2376 |
++ batadv_neigh_node_release(neigh_node); |
2377 |
+ } |
2378 |
+ |
2379 |
+ /** |
2380 |
+@@ -495,108 +467,99 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node, |
2381 |
+ } |
2382 |
+ |
2383 |
+ /** |
2384 |
+- * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object |
2385 |
+- * @rcu: rcu pointer of the orig_ifinfo object |
2386 |
++ * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for |
2387 |
++ * free after rcu grace period |
2388 |
++ * @orig_ifinfo: the orig_ifinfo object to release |
2389 |
+ */ |
2390 |
+-static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) |
2391 |
++static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo) |
2392 |
+ { |
2393 |
+- struct batadv_orig_ifinfo *orig_ifinfo; |
2394 |
+ struct batadv_neigh_node *router; |
2395 |
+ |
2396 |
+- orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); |
2397 |
+- |
2398 |
+ if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) |
2399 |
+- batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); |
2400 |
++ batadv_hardif_free_ref(orig_ifinfo->if_outgoing); |
2401 |
+ |
2402 |
+ /* this is the last reference to this object */ |
2403 |
+ router = rcu_dereference_protected(orig_ifinfo->router, true); |
2404 |
+ if (router) |
2405 |
+- batadv_neigh_node_free_ref_now(router); |
2406 |
+- kfree(orig_ifinfo); |
2407 |
++ batadv_neigh_node_free_ref(router); |
2408 |
++ |
2409 |
++ kfree_rcu(orig_ifinfo, rcu); |
2410 |
+ } |
2411 |
+ |
2412 |
+ /** |
2413 |
+- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free |
2414 |
+- * the orig_ifinfo (without rcu callback) |
2415 |
++ * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release |
2416 |
++ * the orig_ifinfo |
2417 |
+ * @orig_ifinfo: the orig_ifinfo object to release |
2418 |
+ */ |
2419 |
+-static void |
2420 |
+-batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo) |
2421 |
++void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo) |
2422 |
+ { |
2423 |
+ if (atomic_dec_and_test(&orig_ifinfo->refcount)) |
2424 |
+- batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu); |
2425 |
++ batadv_orig_ifinfo_release(orig_ifinfo); |
2426 |
+ } |
2427 |
+ |
2428 |
+ /** |
2429 |
+- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free |
2430 |
+- * the orig_ifinfo |
2431 |
+- * @orig_ifinfo: the orig_ifinfo object to release |
2432 |
++ * batadv_orig_node_free_rcu - free the orig_node |
2433 |
++ * @rcu: rcu pointer of the orig_node |
2434 |
+ */ |
2435 |
+-void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo) |
2436 |
++static void batadv_orig_node_free_rcu(struct rcu_head *rcu) |
2437 |
+ { |
2438 |
+- if (atomic_dec_and_test(&orig_ifinfo->refcount)) |
2439 |
+- call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu); |
2440 |
++ struct batadv_orig_node *orig_node; |
2441 |
++ |
2442 |
++ orig_node = container_of(rcu, struct batadv_orig_node, rcu); |
2443 |
++ |
2444 |
++ batadv_mcast_purge_orig(orig_node); |
2445 |
++ |
2446 |
++ batadv_frag_purge_orig(orig_node, NULL); |
2447 |
++ |
2448 |
++ if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) |
2449 |
++ orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); |
2450 |
++ |
2451 |
++ kfree(orig_node->tt_buff); |
2452 |
++ kfree(orig_node); |
2453 |
+ } |
2454 |
+ |
2455 |
+-static void batadv_orig_node_free_rcu(struct rcu_head *rcu) |
2456 |
++/** |
2457 |
++ * batadv_orig_node_release - release orig_node from lists and queue for |
2458 |
++ * free after rcu grace period |
2459 |
++ * @orig_node: the orig node to free |
2460 |
++ */ |
2461 |
++static void batadv_orig_node_release(struct batadv_orig_node *orig_node) |
2462 |
+ { |
2463 |
+ struct hlist_node *node_tmp; |
2464 |
+ struct batadv_neigh_node *neigh_node; |
2465 |
+- struct batadv_orig_node *orig_node; |
2466 |
+ struct batadv_orig_ifinfo *orig_ifinfo; |
2467 |
+ |
2468 |
+- orig_node = container_of(rcu, struct batadv_orig_node, rcu); |
2469 |
+- |
2470 |
+ spin_lock_bh(&orig_node->neigh_list_lock); |
2471 |
+ |
2472 |
+ /* for all neighbors towards this originator ... */ |
2473 |
+ hlist_for_each_entry_safe(neigh_node, node_tmp, |
2474 |
+ &orig_node->neigh_list, list) { |
2475 |
+ hlist_del_rcu(&neigh_node->list); |
2476 |
+- batadv_neigh_node_free_ref_now(neigh_node); |
2477 |
++ batadv_neigh_node_free_ref(neigh_node); |
2478 |
+ } |
2479 |
+ |
2480 |
+ hlist_for_each_entry_safe(orig_ifinfo, node_tmp, |
2481 |
+ &orig_node->ifinfo_list, list) { |
2482 |
+ hlist_del_rcu(&orig_ifinfo->list); |
2483 |
+- batadv_orig_ifinfo_free_ref_now(orig_ifinfo); |
2484 |
++ batadv_orig_ifinfo_free_ref(orig_ifinfo); |
2485 |
+ } |
2486 |
+ spin_unlock_bh(&orig_node->neigh_list_lock); |
2487 |
+ |
2488 |
+- batadv_mcast_purge_orig(orig_node); |
2489 |
+- |
2490 |
+ /* Free nc_nodes */ |
2491 |
+ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); |
2492 |
+ |
2493 |
+- batadv_frag_purge_orig(orig_node, NULL); |
2494 |
+- |
2495 |
+- if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) |
2496 |
+- orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); |
2497 |
+- |
2498 |
+- kfree(orig_node->tt_buff); |
2499 |
+- kfree(orig_node); |
2500 |
++ call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); |
2501 |
+ } |
2502 |
+ |
2503 |
+ /** |
2504 |
+ * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly |
2505 |
+- * schedule an rcu callback for freeing it |
2506 |
++ * release it |
2507 |
+ * @orig_node: the orig node to free |
2508 |
+ */ |
2509 |
+ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) |
2510 |
+ { |
2511 |
+ if (atomic_dec_and_test(&orig_node->refcount)) |
2512 |
+- call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); |
2513 |
+-} |
2514 |
+- |
2515 |
+-/** |
2516 |
+- * batadv_orig_node_free_ref_now - decrement the orig node refcounter and |
2517 |
+- * possibly free it (without rcu callback) |
2518 |
+- * @orig_node: the orig node to free |
2519 |
+- */ |
2520 |
+-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node) |
2521 |
+-{ |
2522 |
+- if (atomic_dec_and_test(&orig_node->refcount)) |
2523 |
+- batadv_orig_node_free_rcu(&orig_node->rcu); |
2524 |
++ batadv_orig_node_release(orig_node); |
2525 |
+ } |
2526 |
+ |
2527 |
+ void batadv_originator_free(struct batadv_priv *bat_priv) |
2528 |
+diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h |
2529 |
+index aa4a43696295..28b751ad549c 100644 |
2530 |
+--- a/net/batman-adv/originator.h |
2531 |
++++ b/net/batman-adv/originator.h |
2532 |
+@@ -25,7 +25,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv); |
2533 |
+ void batadv_originator_free(struct batadv_priv *bat_priv); |
2534 |
+ void batadv_purge_orig_ref(struct batadv_priv *bat_priv); |
2535 |
+ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node); |
2536 |
+-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node); |
2537 |
+ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, |
2538 |
+ const uint8_t *addr); |
2539 |
+ struct batadv_neigh_node * |
2540 |
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c |
2541 |
+index 4f2a9d2c56db..ddd62c9af5b4 100644 |
2542 |
+--- a/net/batman-adv/translation-table.c |
2543 |
++++ b/net/batman-adv/translation-table.c |
2544 |
+@@ -219,20 +219,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, |
2545 |
+ return count; |
2546 |
+ } |
2547 |
+ |
2548 |
+-static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) |
2549 |
+-{ |
2550 |
+- struct batadv_tt_orig_list_entry *orig_entry; |
2551 |
+- |
2552 |
+- orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); |
|