Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Fri, 05 Jan 2018 15:02:34
Message-Id: 1515164540.c72b8737f3a452232dbb3c1eb0fdc621313cff3c.alicef@gentoo
1 commit: c72b8737f3a452232dbb3c1eb0fdc621313cff3c
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jan 5 15:02:20 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Jan 5 15:02:20 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c72b8737
7
8 linux kernel 4.14.12
9
10 0000_README | 4 +
11 1011_linux-4.14.12.patch | 514 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 518 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index c07cc2b..a10ea98 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -87,6 +87,10 @@ Patch: 1010_linux-4.14.11.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.11
21
22 +Patch: 1011_linux-4.14.12.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.12
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1011_linux-4.14.12.patch b/1011_linux-4.14.12.patch
31 new file mode 100644
32 index 0000000..8f9d788
33 --- /dev/null
34 +++ b/1011_linux-4.14.12.patch
35 @@ -0,0 +1,514 @@
36 +diff --git a/Makefile b/Makefile
37 +index 655887067dc7..20f7d4de0f1c 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 11
45 ++SUBLEVEL = 12
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
50 +index 40f17009ec20..98d5358e4041 100644
51 +--- a/arch/x86/entry/entry_64_compat.S
52 ++++ b/arch/x86/entry/entry_64_compat.S
53 +@@ -190,8 +190,13 @@ ENTRY(entry_SYSCALL_compat)
54 + /* Interrupts are off on entry. */
55 + swapgs
56 +
57 +- /* Stash user ESP and switch to the kernel stack. */
58 ++ /* Stash user ESP */
59 + movl %esp, %r8d
60 ++
61 ++ /* Use %rsp as scratch reg. User ESP is stashed in r8 */
62 ++ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
63 ++
64 ++ /* Switch to the kernel stack */
65 + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
66 +
67 + /* Construct struct pt_regs on stack */
68 +@@ -219,12 +224,6 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
69 + pushq $0 /* pt_regs->r14 = 0 */
70 + pushq $0 /* pt_regs->r15 = 0 */
71 +
72 +- /*
73 +- * We just saved %rdi so it is safe to clobber. It is not
74 +- * preserved during the C calls inside TRACE_IRQS_OFF anyway.
75 +- */
76 +- SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
77 +-
78 + /*
79 + * User mode is traced as though IRQs are on, and SYSENTER
80 + * turned them off.
81 +diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
82 +index c1688c2d0a12..1f86e1b0a5cd 100644
83 +--- a/arch/x86/include/asm/unwind.h
84 ++++ b/arch/x86/include/asm/unwind.h
85 +@@ -56,18 +56,27 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
86 +
87 + #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
88 + /*
89 +- * WARNING: The entire pt_regs may not be safe to dereference. In some cases,
90 +- * only the iret frame registers are accessible. Use with caution!
91 ++ * If 'partial' returns true, only the iret frame registers are valid.
92 + */
93 +-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
94 ++static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
95 ++ bool *partial)
96 + {
97 + if (unwind_done(state))
98 + return NULL;
99 +
100 ++ if (partial) {
101 ++#ifdef CONFIG_UNWINDER_ORC
102 ++ *partial = !state->full_regs;
103 ++#else
104 ++ *partial = false;
105 ++#endif
106 ++ }
107 ++
108 + return state->regs;
109 + }
110 + #else
111 +-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
112 ++static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
113 ++ bool *partial)
114 + {
115 + return NULL;
116 + }
117 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
118 +index f2a94dfb434e..b1be494ab4e8 100644
119 +--- a/arch/x86/kernel/cpu/common.c
120 ++++ b/arch/x86/kernel/cpu/common.c
121 +@@ -899,8 +899,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
122 +
123 + setup_force_cpu_cap(X86_FEATURE_ALWAYS);
124 +
125 +- /* Assume for now that ALL x86 CPUs are insecure */
126 +- setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
127 ++ if (c->x86_vendor != X86_VENDOR_AMD)
128 ++ setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
129 +
130 + fpu__init_system(c);
131 +
132 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
133 +index 5fa110699ed2..afbecff161d1 100644
134 +--- a/arch/x86/kernel/dumpstack.c
135 ++++ b/arch/x86/kernel/dumpstack.c
136 +@@ -76,12 +76,23 @@ void show_iret_regs(struct pt_regs *regs)
137 + regs->sp, regs->flags);
138 + }
139 +
140 +-static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
141 ++static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
142 ++ bool partial)
143 + {
144 +- if (on_stack(info, regs, sizeof(*regs)))
145 ++ /*
146 ++ * These on_stack() checks aren't strictly necessary: the unwind code
147 ++ * has already validated the 'regs' pointer. The checks are done for
148 ++ * ordering reasons: if the registers are on the next stack, we don't
149 ++ * want to print them out yet. Otherwise they'll be shown as part of
150 ++ * the wrong stack. Later, when show_trace_log_lvl() switches to the
151 ++ * next stack, this function will be called again with the same regs so
152 ++ * they can be printed in the right context.
153 ++ */
154 ++ if (!partial && on_stack(info, regs, sizeof(*regs))) {
155 + __show_regs(regs, 0);
156 +- else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
157 +- IRET_FRAME_SIZE)) {
158 ++
159 ++ } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
160 ++ IRET_FRAME_SIZE)) {
161 + /*
162 + * When an interrupt or exception occurs in entry code, the
163 + * full pt_regs might not have been saved yet. In that case
164 +@@ -98,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
165 + struct stack_info stack_info = {0};
166 + unsigned long visit_mask = 0;
167 + int graph_idx = 0;
168 ++ bool partial;
169 +
170 + printk("%sCall Trace:\n", log_lvl);
171 +
172 + unwind_start(&state, task, regs, stack);
173 + stack = stack ? : get_stack_pointer(task, regs);
174 ++ regs = unwind_get_entry_regs(&state, &partial);
175 +
176 + /*
177 + * Iterate through the stacks, starting with the current stack pointer.
178 +@@ -120,7 +133,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
179 + * - hardirq stack
180 + * - entry stack
181 + */
182 +- for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
183 ++ for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
184 + const char *stack_name;
185 +
186 + if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
187 +@@ -140,7 +153,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
188 + printk("%s <%s>\n", log_lvl, stack_name);
189 +
190 + if (regs)
191 +- show_regs_safe(&stack_info, regs);
192 ++ show_regs_if_on_stack(&stack_info, regs, partial);
193 +
194 + /*
195 + * Scan the stack, printing any text addresses we find. At the
196 +@@ -164,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
197 +
198 + /*
199 + * Don't print regs->ip again if it was already printed
200 +- * by show_regs_safe() below.
201 ++ * by show_regs_if_on_stack().
202 + */
203 + if (regs && stack == &regs->ip)
204 + goto next;
205 +@@ -199,9 +212,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
206 + unwind_next_frame(&state);
207 +
208 + /* if the frame has entry regs, print them */
209 +- regs = unwind_get_entry_regs(&state);
210 ++ regs = unwind_get_entry_regs(&state, &partial);
211 + if (regs)
212 +- show_regs_safe(&stack_info, regs);
213 ++ show_regs_if_on_stack(&stack_info, regs, partial);
214 + }
215 +
216 + if (stack_name)
217 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
218 +index 517415978409..3cb2486c47e4 100644
219 +--- a/arch/x86/kernel/process.c
220 ++++ b/arch/x86/kernel/process.c
221 +@@ -47,7 +47,7 @@
222 + * section. Since TSS's are completely CPU-local, we want them
223 + * on exact cacheline boundaries, to eliminate cacheline ping-pong.
224 + */
225 +-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
226 ++__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
227 + .x86_tss = {
228 + /*
229 + * .sp0 is only used when entering ring 0 from a lower
230 +diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
231 +index 8dabd7bf1673..60244bfaf88f 100644
232 +--- a/arch/x86/kernel/stacktrace.c
233 ++++ b/arch/x86/kernel/stacktrace.c
234 +@@ -98,7 +98,7 @@ static int __save_stack_trace_reliable(struct stack_trace *trace,
235 + for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
236 + unwind_next_frame(&state)) {
237 +
238 +- regs = unwind_get_entry_regs(&state);
239 ++ regs = unwind_get_entry_regs(&state, NULL);
240 + if (regs) {
241 + /*
242 + * Kernel mode registers on the stack indicate an
243 +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
244 +index bce8aea65606..2da28ba97508 100644
245 +--- a/arch/x86/mm/pti.c
246 ++++ b/arch/x86/mm/pti.c
247 +@@ -367,7 +367,8 @@ static void __init pti_setup_espfix64(void)
248 + static void __init pti_clone_entry_text(void)
249 + {
250 + pti_clone_pmds((unsigned long) __entry_text_start,
251 +- (unsigned long) __irqentry_text_end, _PAGE_RW);
252 ++ (unsigned long) __irqentry_text_end,
253 ++ _PAGE_RW | _PAGE_GLOBAL);
254 + }
255 +
256 + /*
257 +diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
258 +index f4c070ea8384..c90fba3ed861 100644
259 +--- a/drivers/rtc/rtc-m41t80.c
260 ++++ b/drivers/rtc/rtc-m41t80.c
261 +@@ -154,6 +154,8 @@ struct m41t80_data {
262 + struct rtc_device *rtc;
263 + #ifdef CONFIG_COMMON_CLK
264 + struct clk_hw sqw;
265 ++ unsigned long freq;
266 ++ unsigned int sqwe;
267 + #endif
268 + };
269 +
270 +@@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
271 + #ifdef CONFIG_COMMON_CLK
272 + #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
273 +
274 +-static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
275 +- unsigned long parent_rate)
276 ++static unsigned long m41t80_decode_freq(int setting)
277 ++{
278 ++ return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
279 ++ M41T80_SQW_MAX_FREQ >> setting;
280 ++}
281 ++
282 ++static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
283 + {
284 +- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
285 + struct i2c_client *client = m41t80->client;
286 + int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
287 + M41T80_REG_WDAY : M41T80_REG_SQW;
288 + int ret = i2c_smbus_read_byte_data(client, reg_sqw);
289 +- unsigned long val = M41T80_SQW_MAX_FREQ;
290 +
291 + if (ret < 0)
292 + return 0;
293 ++ return m41t80_decode_freq(ret >> 4);
294 ++}
295 +
296 +- ret >>= 4;
297 +- if (ret == 0)
298 +- val = 0;
299 +- else if (ret > 1)
300 +- val = val / (1 << ret);
301 +-
302 +- return val;
303 ++static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
304 ++ unsigned long parent_rate)
305 ++{
306 ++ return sqw_to_m41t80_data(hw)->freq;
307 + }
308 +
309 + static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
310 + unsigned long *prate)
311 + {
312 +- int i, freq = M41T80_SQW_MAX_FREQ;
313 +-
314 +- if (freq <= rate)
315 +- return freq;
316 +-
317 +- for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
318 +- freq /= 1 << i;
319 +- if (freq <= rate)
320 +- return freq;
321 +- }
322 +-
323 +- return 0;
324 ++ if (rate >= M41T80_SQW_MAX_FREQ)
325 ++ return M41T80_SQW_MAX_FREQ;
326 ++ if (rate >= M41T80_SQW_MAX_FREQ / 4)
327 ++ return M41T80_SQW_MAX_FREQ / 4;
328 ++ if (!rate)
329 ++ return 0;
330 ++ return 1 << ilog2(rate);
331 + }
332 +
333 + static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
334 +@@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
335 + M41T80_REG_WDAY : M41T80_REG_SQW;
336 + int reg, ret, val = 0;
337 +
338 +- if (rate) {
339 +- if (!is_power_of_2(rate))
340 +- return -EINVAL;
341 +- val = ilog2(rate);
342 +- if (val == ilog2(M41T80_SQW_MAX_FREQ))
343 +- val = 1;
344 +- else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
345 +- val = ilog2(M41T80_SQW_MAX_FREQ) - val;
346 +- else
347 +- return -EINVAL;
348 +- }
349 ++ if (rate >= M41T80_SQW_MAX_FREQ)
350 ++ val = 1;
351 ++ else if (rate >= M41T80_SQW_MAX_FREQ / 4)
352 ++ val = 2;
353 ++ else if (rate)
354 ++ val = 15 - ilog2(rate);
355 +
356 + reg = i2c_smbus_read_byte_data(client, reg_sqw);
357 + if (reg < 0)
358 +@@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
359 + reg = (reg & 0x0f) | (val << 4);
360 +
361 + ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
362 +- if (ret < 0)
363 +- return ret;
364 +-
365 +- return -EINVAL;
366 ++ if (!ret)
367 ++ m41t80->freq = m41t80_decode_freq(val);
368 ++ return ret;
369 + }
370 +
371 + static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
372 +@@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
373 + else
374 + ret &= ~M41T80_ALMON_SQWE;
375 +
376 +- return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
377 ++ ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
378 ++ if (!ret)
379 ++ m41t80->sqwe = enable;
380 ++ return ret;
381 + }
382 +
383 + static int m41t80_sqw_prepare(struct clk_hw *hw)
384 +@@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
385 +
386 + static int m41t80_sqw_is_prepared(struct clk_hw *hw)
387 + {
388 +- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
389 +- struct i2c_client *client = m41t80->client;
390 +- int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
391 +-
392 +- if (ret < 0)
393 +- return ret;
394 +-
395 +- return !!(ret & M41T80_ALMON_SQWE);
396 ++ return sqw_to_m41t80_data(hw)->sqwe;
397 + }
398 +
399 + static const struct clk_ops m41t80_sqw_ops = {
400 +@@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
401 + init.parent_names = NULL;
402 + init.num_parents = 0;
403 + m41t80->sqw.init = &init;
404 ++ m41t80->freq = m41t80_get_freq(m41t80);
405 +
406 + /* optional override of the clockname */
407 + of_property_read_string(node, "clock-output-names", &init.name);
408 +diff --git a/fs/exec.c b/fs/exec.c
409 +index 3e14ba25f678..acec119fcc31 100644
410 +--- a/fs/exec.c
411 ++++ b/fs/exec.c
412 +@@ -1350,9 +1350,14 @@ void setup_new_exec(struct linux_binprm * bprm)
413 +
414 + current->sas_ss_sp = current->sas_ss_size = 0;
415 +
416 +- /* Figure out dumpability. */
417 ++ /*
418 ++ * Figure out dumpability. Note that this checking only of current
419 ++ * is wrong, but userspace depends on it. This should be testing
420 ++ * bprm->secureexec instead.
421 ++ */
422 + if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
423 +- bprm->secureexec)
424 ++ !(uid_eq(current_euid(), current_uid()) &&
425 ++ gid_eq(current_egid(), current_gid())))
426 + set_dumpable(current->mm, suid_dumpable);
427 + else
428 + set_dumpable(current->mm, SUID_DUMP_USER);
429 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
430 +index 2a6093840e7e..6bc16bb61b55 100644
431 +--- a/net/xfrm/xfrm_policy.c
432 ++++ b/net/xfrm/xfrm_policy.c
433 +@@ -1362,29 +1362,36 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
434 + struct net *net = xp_net(policy);
435 + int nx;
436 + int i, error;
437 ++ xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
438 ++ xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
439 + xfrm_address_t tmp;
440 +
441 + for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
442 + struct xfrm_state *x;
443 +- xfrm_address_t *local;
444 +- xfrm_address_t *remote;
445 ++ xfrm_address_t *remote = daddr;
446 ++ xfrm_address_t *local = saddr;
447 + struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
448 +
449 +- remote = &tmpl->id.daddr;
450 +- local = &tmpl->saddr;
451 +- if (xfrm_addr_any(local, tmpl->encap_family)) {
452 +- error = xfrm_get_saddr(net, fl->flowi_oif,
453 +- &tmp, remote,
454 +- tmpl->encap_family, 0);
455 +- if (error)
456 +- goto fail;
457 +- local = &tmp;
458 ++ if (tmpl->mode == XFRM_MODE_TUNNEL ||
459 ++ tmpl->mode == XFRM_MODE_BEET) {
460 ++ remote = &tmpl->id.daddr;
461 ++ local = &tmpl->saddr;
462 ++ if (xfrm_addr_any(local, tmpl->encap_family)) {
463 ++ error = xfrm_get_saddr(net, fl->flowi_oif,
464 ++ &tmp, remote,
465 ++ tmpl->encap_family, 0);
466 ++ if (error)
467 ++ goto fail;
468 ++ local = &tmp;
469 ++ }
470 + }
471 +
472 + x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
473 +
474 + if (x && x->km.state == XFRM_STATE_VALID) {
475 + xfrm[nx++] = x;
476 ++ daddr = remote;
477 ++ saddr = local;
478 + continue;
479 + }
480 + if (x) {
481 +diff --git a/security/commoncap.c b/security/commoncap.c
482 +index fc46f5b85251..7b01431d1e19 100644
483 +--- a/security/commoncap.c
484 ++++ b/security/commoncap.c
485 +@@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m)
486 + return m & ~VFS_CAP_FLAGS_EFFECTIVE;
487 + }
488 +
489 +-static bool is_v2header(size_t size, __le32 magic)
490 ++static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
491 + {
492 +- __u32 m = le32_to_cpu(magic);
493 + if (size != XATTR_CAPS_SZ_2)
494 + return false;
495 +- return sansflags(m) == VFS_CAP_REVISION_2;
496 ++ return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
497 + }
498 +
499 +-static bool is_v3header(size_t size, __le32 magic)
500 ++static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
501 + {
502 +- __u32 m = le32_to_cpu(magic);
503 +-
504 + if (size != XATTR_CAPS_SZ_3)
505 + return false;
506 +- return sansflags(m) == VFS_CAP_REVISION_3;
507 ++ return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
508 + }
509 +
510 + /*
511 +@@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
512 +
513 + fs_ns = inode->i_sb->s_user_ns;
514 + cap = (struct vfs_cap_data *) tmpbuf;
515 +- if (is_v2header((size_t) ret, cap->magic_etc)) {
516 ++ if (is_v2header((size_t) ret, cap)) {
517 + /* If this is sizeof(vfs_cap_data) then we're ok with the
518 + * on-disk value, so return that. */
519 + if (alloc)
520 +@@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
521 + else
522 + kfree(tmpbuf);
523 + return ret;
524 +- } else if (!is_v3header((size_t) ret, cap->magic_etc)) {
525 ++ } else if (!is_v3header((size_t) ret, cap)) {
526 + kfree(tmpbuf);
527 + return -EINVAL;
528 + }
529 +@@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size,
530 + return make_kuid(task_ns, rootid);
531 + }
532 +
533 +-static bool validheader(size_t size, __le32 magic)
534 ++static bool validheader(size_t size, const struct vfs_cap_data *cap)
535 + {
536 +- return is_v2header(size, magic) || is_v3header(size, magic);
537 ++ return is_v2header(size, cap) || is_v3header(size, cap);
538 + }
539 +
540 + /*
541 +@@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
542 +
543 + if (!*ivalue)
544 + return -EINVAL;
545 +- if (!validheader(size, cap->magic_etc))
546 ++ if (!validheader(size, cap))
547 + return -EINVAL;
548 + if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP))
549 + return -EPERM;