Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 07 Aug 2020 12:16:59
Message-Id: 1596802602.3da17cd6f017d13a469dfef294997aa61dbd062f.alicef@gentoo
1 commit: 3da17cd6f017d13a469dfef294997aa61dbd062f
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 7 12:16:28 2020 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 7 12:16:42 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3da17cd6
7
8 Linux patch 5.4.57
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1056_linux-5.4.57.patch | 494 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 498 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0a219d2..bdf588b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -267,6 +267,10 @@ Patch: 1055_linux-5.4.56.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.56
23
24 +Patch: 1056_linux-5.4.57.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.57
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1056_linux-5.4.57.patch b/1056_linux-5.4.57.patch
33 new file mode 100644
34 index 0000000..44ed76b
35 --- /dev/null
36 +++ b/1056_linux-5.4.57.patch
37 @@ -0,0 +1,494 @@
38 +diff --git a/Makefile b/Makefile
39 +index c33fb4eebd4d..dd753ef637fd 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 56
47 ++SUBLEVEL = 57
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
52 +index f44f448537f2..1a3eedbac4a2 100644
53 +--- a/arch/arm/include/asm/percpu.h
54 ++++ b/arch/arm/include/asm/percpu.h
55 +@@ -5,6 +5,8 @@
56 + #ifndef _ASM_ARM_PERCPU_H_
57 + #define _ASM_ARM_PERCPU_H_
58 +
59 ++#include <asm/thread_info.h>
60 ++
61 + /*
62 + * Same as asm-generic/percpu.h, except that we store the per cpu offset
63 + * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
64 +diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
65 +index 7a24bad1a58b..076a4157a74f 100644
66 +--- a/arch/arm64/include/asm/pointer_auth.h
67 ++++ b/arch/arm64/include/asm/pointer_auth.h
68 +@@ -3,7 +3,6 @@
69 + #define __ASM_POINTER_AUTH_H
70 +
71 + #include <linux/bitops.h>
72 +-#include <linux/random.h>
73 +
74 + #include <asm/cpufeature.h>
75 + #include <asm/memory.h>
76 +@@ -30,6 +29,13 @@ struct ptrauth_keys {
77 + struct ptrauth_key apga;
78 + };
79 +
80 ++/*
81 ++ * Only include random.h once ptrauth_keys_* structures are defined
82 ++ * to avoid yet another circular include hell (random.h * ends up
83 ++ * including asm/smp.h, which requires ptrauth_keys_kernel).
84 ++ */
85 ++#include <linux/random.h>
86 ++
87 + static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
88 + {
89 + if (system_supports_address_auth()) {
90 +diff --git a/drivers/char/random.c b/drivers/char/random.c
91 +index 8ff28c14af7e..e877c20e0ee0 100644
92 +--- a/drivers/char/random.c
93 ++++ b/drivers/char/random.c
94 +@@ -1330,6 +1330,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
95 +
96 + fast_mix(fast_pool);
97 + add_interrupt_bench(cycles);
98 ++ this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
99 +
100 + if (unlikely(crng_init == 0)) {
101 + if ((fast_pool->count >= 64) &&
102 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
103 +index 7e0c77de551b..a284d99a1ee5 100644
104 +--- a/fs/ext4/inode.c
105 ++++ b/fs/ext4/inode.c
106 +@@ -3836,6 +3836,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
107 + struct inode *inode = mapping->host;
108 + size_t count = iov_iter_count(iter);
109 + ssize_t ret;
110 ++ loff_t offset = iocb->ki_pos;
111 ++ loff_t size = i_size_read(inode);
112 ++
113 ++ if (offset >= size)
114 ++ return 0;
115 +
116 + /*
117 + * Shared inode_lock is enough for us - it protects against concurrent
118 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
119 +index 3bf3835d0e86..7aa0d8b5aaf0 100644
120 +--- a/include/linux/bpf.h
121 ++++ b/include/linux/bpf.h
122 +@@ -956,11 +956,14 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
123 + #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
124 +
125 + #if defined(CONFIG_BPF_STREAM_PARSER)
126 +-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
127 ++int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
128 ++ struct bpf_prog *old, u32 which);
129 + int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
130 ++int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
131 + #else
132 + static inline int sock_map_prog_update(struct bpf_map *map,
133 +- struct bpf_prog *prog, u32 which)
134 ++ struct bpf_prog *prog,
135 ++ struct bpf_prog *old, u32 which)
136 + {
137 + return -EOPNOTSUPP;
138 + }
139 +@@ -970,6 +973,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
140 + {
141 + return -EINVAL;
142 + }
143 ++
144 ++static inline int sock_map_prog_detach(const union bpf_attr *attr,
145 ++ enum bpf_prog_type ptype)
146 ++{
147 ++ return -EOPNOTSUPP;
148 ++}
149 + #endif
150 +
151 + #if defined(CONFIG_XDP_SOCKETS)
152 +diff --git a/include/linux/prandom.h b/include/linux/prandom.h
153 +new file mode 100644
154 +index 000000000000..aa16e6468f91
155 +--- /dev/null
156 ++++ b/include/linux/prandom.h
157 +@@ -0,0 +1,78 @@
158 ++/* SPDX-License-Identifier: GPL-2.0 */
159 ++/*
160 ++ * include/linux/prandom.h
161 ++ *
162 ++ * Include file for the fast pseudo-random 32-bit
163 ++ * generation.
164 ++ */
165 ++#ifndef _LINUX_PRANDOM_H
166 ++#define _LINUX_PRANDOM_H
167 ++
168 ++#include <linux/types.h>
169 ++#include <linux/percpu.h>
170 ++
171 ++u32 prandom_u32(void);
172 ++void prandom_bytes(void *buf, size_t nbytes);
173 ++void prandom_seed(u32 seed);
174 ++void prandom_reseed_late(void);
175 ++
176 ++struct rnd_state {
177 ++ __u32 s1, s2, s3, s4;
178 ++};
179 ++
180 ++DECLARE_PER_CPU(struct rnd_state, net_rand_state);
181 ++
182 ++u32 prandom_u32_state(struct rnd_state *state);
183 ++void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
184 ++void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
185 ++
186 ++#define prandom_init_once(pcpu_state) \
187 ++ DO_ONCE(prandom_seed_full_state, (pcpu_state))
188 ++
189 ++/**
190 ++ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
191 ++ * @ep_ro: right open interval endpoint
192 ++ *
193 ++ * Returns a pseudo-random number that is in interval [0, ep_ro). Note
194 ++ * that the result depends on PRNG being well distributed in [0, ~0U]
195 ++ * u32 space. Here we use maximally equidistributed combined Tausworthe
196 ++ * generator, that is, prandom_u32(). This is useful when requesting a
197 ++ * random index of an array containing ep_ro elements, for example.
198 ++ *
199 ++ * Returns: pseudo-random number in interval [0, ep_ro)
200 ++ */
201 ++static inline u32 prandom_u32_max(u32 ep_ro)
202 ++{
203 ++ return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
204 ++}
205 ++
206 ++/*
207 ++ * Handle minimum values for seeds
208 ++ */
209 ++static inline u32 __seed(u32 x, u32 m)
210 ++{
211 ++ return (x < m) ? x + m : x;
212 ++}
213 ++
214 ++/**
215 ++ * prandom_seed_state - set seed for prandom_u32_state().
216 ++ * @state: pointer to state structure to receive the seed.
217 ++ * @seed: arbitrary 64-bit value to use as a seed.
218 ++ */
219 ++static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
220 ++{
221 ++ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
222 ++
223 ++ state->s1 = __seed(i, 2U);
224 ++ state->s2 = __seed(i, 8U);
225 ++ state->s3 = __seed(i, 16U);
226 ++ state->s4 = __seed(i, 128U);
227 ++}
228 ++
229 ++/* Pseudo random number generator from numerical recipes. */
230 ++static inline u32 next_pseudo_random32(u32 seed)
231 ++{
232 ++ return seed * 1664525 + 1013904223;
233 ++}
234 ++
235 ++#endif
236 +diff --git a/include/linux/random.h b/include/linux/random.h
237 +index f189c927fdea..5b3ec7d2791f 100644
238 +--- a/include/linux/random.h
239 ++++ b/include/linux/random.h
240 +@@ -108,61 +108,12 @@ declare_get_random_var_wait(long)
241 +
242 + unsigned long randomize_page(unsigned long start, unsigned long range);
243 +
244 +-u32 prandom_u32(void);
245 +-void prandom_bytes(void *buf, size_t nbytes);
246 +-void prandom_seed(u32 seed);
247 +-void prandom_reseed_late(void);
248 +-
249 +-struct rnd_state {
250 +- __u32 s1, s2, s3, s4;
251 +-};
252 +-
253 +-u32 prandom_u32_state(struct rnd_state *state);
254 +-void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
255 +-void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
256 +-
257 +-#define prandom_init_once(pcpu_state) \
258 +- DO_ONCE(prandom_seed_full_state, (pcpu_state))
259 +-
260 +-/**
261 +- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
262 +- * @ep_ro: right open interval endpoint
263 +- *
264 +- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
265 +- * that the result depends on PRNG being well distributed in [0, ~0U]
266 +- * u32 space. Here we use maximally equidistributed combined Tausworthe
267 +- * generator, that is, prandom_u32(). This is useful when requesting a
268 +- * random index of an array containing ep_ro elements, for example.
269 +- *
270 +- * Returns: pseudo-random number in interval [0, ep_ro)
271 +- */
272 +-static inline u32 prandom_u32_max(u32 ep_ro)
273 +-{
274 +- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
275 +-}
276 +-
277 + /*
278 +- * Handle minimum values for seeds
279 ++ * This is designed to be standalone for just prandom
280 ++ * users, but for now we include it from <linux/random.h>
281 ++ * for legacy reasons.
282 + */
283 +-static inline u32 __seed(u32 x, u32 m)
284 +-{
285 +- return (x < m) ? x + m : x;
286 +-}
287 +-
288 +-/**
289 +- * prandom_seed_state - set seed for prandom_u32_state().
290 +- * @state: pointer to state structure to receive the seed.
291 +- * @seed: arbitrary 64-bit value to use as a seed.
292 +- */
293 +-static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
294 +-{
295 +- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
296 +-
297 +- state->s1 = __seed(i, 2U);
298 +- state->s2 = __seed(i, 8U);
299 +- state->s3 = __seed(i, 16U);
300 +- state->s4 = __seed(i, 128U);
301 +-}
302 ++#include <linux/prandom.h>
303 +
304 + #ifdef CONFIG_ARCH_RANDOM
305 + # include <asm/archrandom.h>
306 +@@ -193,10 +144,4 @@ static inline bool arch_has_random_seed(void)
307 + }
308 + #endif
309 +
310 +-/* Pseudo random number generator from numerical recipes. */
311 +-static inline u32 next_pseudo_random32(u32 seed)
312 +-{
313 +- return seed * 1664525 + 1013904223;
314 +-}
315 +-
316 + #endif /* _LINUX_RANDOM_H */
317 +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
318 +index 4bdb5e4bbd6a..20f3550b0b11 100644
319 +--- a/include/linux/skmsg.h
320 ++++ b/include/linux/skmsg.h
321 +@@ -450,6 +450,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
322 + bpf_prog_put(prog);
323 + }
324 +
325 ++static inline int psock_replace_prog(struct bpf_prog **pprog,
326 ++ struct bpf_prog *prog,
327 ++ struct bpf_prog *old)
328 ++{
329 ++ if (cmpxchg(pprog, old, prog) != old)
330 ++ return -ENOENT;
331 ++
332 ++ if (old)
333 ++ bpf_prog_put(old);
334 ++
335 ++ return 0;
336 ++}
337 ++
338 + static inline void psock_progs_drop(struct sk_psock_progs *progs)
339 + {
340 + psock_set_prog(&progs->msg_parser, NULL);
341 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
342 +index 8bc904f9badb..bf03d04a9e2f 100644
343 +--- a/kernel/bpf/syscall.c
344 ++++ b/kernel/bpf/syscall.c
345 +@@ -2029,10 +2029,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
346 + ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
347 + break;
348 + case BPF_SK_MSG_VERDICT:
349 +- return sock_map_get_from_fd(attr, NULL);
350 ++ return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_MSG);
351 + case BPF_SK_SKB_STREAM_PARSER:
352 + case BPF_SK_SKB_STREAM_VERDICT:
353 +- return sock_map_get_from_fd(attr, NULL);
354 ++ return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_SKB);
355 + case BPF_LIRC_MODE2:
356 + return lirc_prog_detach(attr);
357 + case BPF_FLOW_DISSECTOR:
358 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
359 +index 1e9b81a930c0..a3ae244b1bcd 100644
360 +--- a/kernel/time/timer.c
361 ++++ b/kernel/time/timer.c
362 +@@ -43,6 +43,7 @@
363 + #include <linux/sched/debug.h>
364 + #include <linux/slab.h>
365 + #include <linux/compat.h>
366 ++#include <linux/random.h>
367 +
368 + #include <linux/uaccess.h>
369 + #include <asm/unistd.h>
370 +@@ -1742,6 +1743,13 @@ void update_process_times(int user_tick)
371 + scheduler_tick();
372 + if (IS_ENABLED(CONFIG_POSIX_TIMERS))
373 + run_posix_cpu_timers();
374 ++
375 ++ /* The current CPU might make use of net randoms without receiving IRQs
376 ++ * to renew them often enough. Let's update the net_rand_state from a
377 ++ * non-constant value that's not affine to the number of calls to make
378 ++ * sure it's updated when there's some activity (we don't care in idle).
379 ++ */
380 ++ this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
381 + }
382 +
383 + /**
384 +diff --git a/lib/random32.c b/lib/random32.c
385 +index 763b920a6206..3d749abb9e80 100644
386 +--- a/lib/random32.c
387 ++++ b/lib/random32.c
388 +@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
389 + }
390 + #endif
391 +
392 +-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
393 ++DEFINE_PER_CPU(struct rnd_state, net_rand_state);
394 +
395 + /**
396 + * prandom_u32_state - seeded pseudo-random number generator.
397 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
398 +index 6bbc118bf00e..df52061f99f7 100644
399 +--- a/net/core/sock_map.c
400 ++++ b/net/core/sock_map.c
401 +@@ -71,7 +71,42 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
402 + map = __bpf_map_get(f);
403 + if (IS_ERR(map))
404 + return PTR_ERR(map);
405 +- ret = sock_map_prog_update(map, prog, attr->attach_type);
406 ++ ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
407 ++ fdput(f);
408 ++ return ret;
409 ++}
410 ++
411 ++int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
412 ++{
413 ++ u32 ufd = attr->target_fd;
414 ++ struct bpf_prog *prog;
415 ++ struct bpf_map *map;
416 ++ struct fd f;
417 ++ int ret;
418 ++
419 ++ if (attr->attach_flags)
420 ++ return -EINVAL;
421 ++
422 ++ f = fdget(ufd);
423 ++ map = __bpf_map_get(f);
424 ++ if (IS_ERR(map))
425 ++ return PTR_ERR(map);
426 ++
427 ++ prog = bpf_prog_get(attr->attach_bpf_fd);
428 ++ if (IS_ERR(prog)) {
429 ++ ret = PTR_ERR(prog);
430 ++ goto put_map;
431 ++ }
432 ++
433 ++ if (prog->type != ptype) {
434 ++ ret = -EINVAL;
435 ++ goto put_prog;
436 ++ }
437 ++
438 ++ ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
439 ++put_prog:
440 ++ bpf_prog_put(prog);
441 ++put_map:
442 + fdput(f);
443 + return ret;
444 + }
445 +@@ -1015,27 +1050,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
446 + }
447 +
448 + int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
449 +- u32 which)
450 ++ struct bpf_prog *old, u32 which)
451 + {
452 + struct sk_psock_progs *progs = sock_map_progs(map);
453 ++ struct bpf_prog **pprog;
454 +
455 + if (!progs)
456 + return -EOPNOTSUPP;
457 +
458 + switch (which) {
459 + case BPF_SK_MSG_VERDICT:
460 +- psock_set_prog(&progs->msg_parser, prog);
461 ++ pprog = &progs->msg_parser;
462 + break;
463 + case BPF_SK_SKB_STREAM_PARSER:
464 +- psock_set_prog(&progs->skb_parser, prog);
465 ++ pprog = &progs->skb_parser;
466 + break;
467 + case BPF_SK_SKB_STREAM_VERDICT:
468 +- psock_set_prog(&progs->skb_verdict, prog);
469 ++ pprog = &progs->skb_verdict;
470 + break;
471 + default:
472 + return -EOPNOTSUPP;
473 + }
474 +
475 ++ if (old)
476 ++ return psock_replace_prog(pprog, prog, old);
477 ++
478 ++ psock_set_prog(pprog, prog);
479 + return 0;
480 + }
481 +
482 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
483 +index e1f1becda529..c812f0178b64 100644
484 +--- a/tools/testing/selftests/bpf/test_maps.c
485 ++++ b/tools/testing/selftests/bpf/test_maps.c
486 +@@ -793,19 +793,19 @@ static void test_sockmap(unsigned int tasks, void *data)
487 + }
488 +
489 + err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
490 +- if (err) {
491 ++ if (!err) {
492 + printf("Failed empty parser prog detach\n");
493 + goto out_sockmap;
494 + }
495 +
496 + err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
497 +- if (err) {
498 ++ if (!err) {
499 + printf("Failed empty verdict prog detach\n");
500 + goto out_sockmap;
501 + }
502 +
503 + err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
504 +- if (err) {
505 ++ if (!err) {
506 + printf("Failed empty msg verdict prog detach\n");
507 + goto out_sockmap;
508 + }
509 +@@ -1094,19 +1094,19 @@ static void test_sockmap(unsigned int tasks, void *data)
510 + assert(status == 0);
511 + }
512 +
513 +- err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
514 ++ err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
515 + if (!err) {
516 + printf("Detached an invalid prog type.\n");
517 + goto out_sockmap;
518 + }
519 +
520 +- err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
521 ++ err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
522 + if (err) {
523 + printf("Failed parser prog detach\n");
524 + goto out_sockmap;
525 + }
526 +
527 +- err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
528 ++ err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
529 + if (err) {
530 + printf("Failed parser prog detach\n");
531 + goto out_sockmap;