Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Mon, 30 May 2022 13:59:58
Message-Id: 1653919181.45dd19c15b74d99ba9dcf4e008ade81aaf3e657f.mpagano@gentoo
1 commit: 45dd19c15b74d99ba9dcf4e008ade81aaf3e657f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon May 30 13:59:41 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon May 30 13:59:41 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=45dd19c1
7
8 Linux patch 5.10.119
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1118_linux-5.10.119.patch | 6681 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6685 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 51c56a3e..32647390 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -515,6 +515,10 @@ Patch: 1117_linux-5.10.118.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.118
23
24 +Patch: 1118_linux-5.10.119.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.119
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1118_linux-5.10.119.patch b/1118_linux-5.10.119.patch
33 new file mode 100644
34 index 00000000..7794e863
35 --- /dev/null
36 +++ b/1118_linux-5.10.119.patch
37 @@ -0,0 +1,6681 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 611172f68bb57..5e34deec819fa 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -4035,6 +4035,12 @@
43 + fully seed the kernel's CRNG. Default is controlled
44 + by CONFIG_RANDOM_TRUST_CPU.
45 +
46 ++ random.trust_bootloader={on,off}
47 ++ [KNL] Enable or disable trusting the use of a
48 ++ seed passed by the bootloader (if available) to
49 ++ fully seed the kernel's CRNG. Default is controlled
50 ++ by CONFIG_RANDOM_TRUST_BOOTLOADER.
51 ++
52 + ras=option[,option,...] [KNL] RAS-specific options
53 +
54 + cec_disable [X86]
55 +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
56 +index e338306f45873..a4b1ebc2e70b0 100644
57 +--- a/Documentation/admin-guide/sysctl/kernel.rst
58 ++++ b/Documentation/admin-guide/sysctl/kernel.rst
59 +@@ -1006,28 +1006,22 @@ This is a directory, with the following entries:
60 + * ``boot_id``: a UUID generated the first time this is retrieved, and
61 + unvarying after that;
62 +
63 ++* ``uuid``: a UUID generated every time this is retrieved (this can
64 ++ thus be used to generate UUIDs at will);
65 ++
66 + * ``entropy_avail``: the pool's entropy count, in bits;
67 +
68 + * ``poolsize``: the entropy pool size, in bits;
69 +
70 + * ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
71 +- number of seconds between urandom pool reseeding).
72 +-
73 +-* ``uuid``: a UUID generated every time this is retrieved (this can
74 +- thus be used to generate UUIDs at will);
75 ++ number of seconds between urandom pool reseeding). This file is
76 ++ writable for compatibility purposes, but writing to it has no effect
77 ++ on any RNG behavior;
78 +
79 + * ``write_wakeup_threshold``: when the entropy count drops below this
80 + (as a number of bits), processes waiting to write to ``/dev/random``
81 +- are woken up.
82 +-
83 +-If ``drivers/char/random.c`` is built with ``ADD_INTERRUPT_BENCH``
84 +-defined, these additional entries are present:
85 +-
86 +-* ``add_interrupt_avg_cycles``: the average number of cycles between
87 +- interrupts used to feed the pool;
88 +-
89 +-* ``add_interrupt_avg_deviation``: the standard deviation seen on the
90 +- number of cycles between interrupts used to feed the pool.
91 ++ are woken up. This file is writable for compatibility purposes, but
92 ++ writing to it has no effect on any RNG behavior.
93 +
94 +
95 + randomize_va_space
96 +diff --git a/MAINTAINERS b/MAINTAINERS
97 +index c64c9354c287f..7c118b507912f 100644
98 +--- a/MAINTAINERS
99 ++++ b/MAINTAINERS
100 +@@ -14671,6 +14671,8 @@ F: arch/mips/generic/board-ranchu.c
101 +
102 + RANDOM NUMBER DRIVER
103 + M: "Theodore Ts'o" <tytso@×××.edu>
104 ++M: Jason A. Donenfeld <Jason@×××××.com>
105 ++T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
106 + S: Maintained
107 + F: drivers/char/random.c
108 +
109 +diff --git a/Makefile b/Makefile
110 +index f9210e43121dc..b442cc5bbfc30 100644
111 +--- a/Makefile
112 ++++ b/Makefile
113 +@@ -1,7 +1,7 @@
114 + # SPDX-License-Identifier: GPL-2.0
115 + VERSION = 5
116 + PATCHLEVEL = 10
117 +-SUBLEVEL = 118
118 ++SUBLEVEL = 119
119 + EXTRAVERSION =
120 + NAME = Dare mighty things
121 +
122 +diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h
123 +index b565cc6f408e9..f89798da8a147 100644
124 +--- a/arch/alpha/include/asm/timex.h
125 ++++ b/arch/alpha/include/asm/timex.h
126 +@@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
127 + __asm__ __volatile__ ("rpcc %0" : "=r"(ret));
128 + return ret;
129 + }
130 ++#define get_cycles get_cycles
131 +
132 + #endif
133 +diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
134 +index 7c3b3671d6c25..6d1337c169cd3 100644
135 +--- a/arch/arm/include/asm/timex.h
136 ++++ b/arch/arm/include/asm/timex.h
137 +@@ -11,5 +11,6 @@
138 +
139 + typedef unsigned long cycles_t;
140 + #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
141 ++#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
142 +
143 + #endif
144 +diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
145 +index 869a3ac6bf23a..7ccc077a60bed 100644
146 +--- a/arch/ia64/include/asm/timex.h
147 ++++ b/arch/ia64/include/asm/timex.h
148 +@@ -39,6 +39,7 @@ get_cycles (void)
149 + ret = ia64_getreg(_IA64_REG_AR_ITC);
150 + return ret;
151 + }
152 ++#define get_cycles get_cycles
153 +
154 + extern void ia64_cpu_local_tick (void);
155 + extern unsigned long long ia64_native_sched_clock (void);
156 +diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h
157 +index 6a21d93582805..f4a7a340f4cae 100644
158 +--- a/arch/m68k/include/asm/timex.h
159 ++++ b/arch/m68k/include/asm/timex.h
160 +@@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
161 + {
162 + if (mach_random_get_entropy)
163 + return mach_random_get_entropy();
164 +- return 0;
165 ++ return random_get_entropy_fallback();
166 + }
167 + #define random_get_entropy random_get_entropy
168 +
169 +diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
170 +index 8026baf46e729..2e107886f97ac 100644
171 +--- a/arch/mips/include/asm/timex.h
172 ++++ b/arch/mips/include/asm/timex.h
173 +@@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
174 + else
175 + return 0; /* no usable counter */
176 + }
177 ++#define get_cycles get_cycles
178 +
179 + /*
180 + * Like get_cycles - but where c0_count is not available we desperately
181 + * use c0_random in an attempt to get at least a little bit of entropy.
182 +- *
183 +- * R6000 and R6000A neither have a count register nor a random register.
184 +- * That leaves no entropy source in the CPU itself.
185 + */
186 + static inline unsigned long random_get_entropy(void)
187 + {
188 +- unsigned int prid = read_c0_prid();
189 +- unsigned int imp = prid & PRID_IMP_MASK;
190 ++ unsigned int c0_random;
191 +
192 +- if (can_use_mips_counter(prid))
193 ++ if (can_use_mips_counter(read_c0_prid()))
194 + return read_c0_count();
195 +- else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
196 +- return read_c0_random();
197 ++
198 ++ if (cpu_has_3kex)
199 ++ c0_random = (read_c0_random() >> 8) & 0x3f;
200 + else
201 +- return 0; /* no usable register */
202 ++ c0_random = read_c0_random() & 0x3f;
203 ++ return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
204 + }
205 + #define random_get_entropy random_get_entropy
206 +
207 +diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h
208 +index a769f871b28d9..40a1adc9bd03e 100644
209 +--- a/arch/nios2/include/asm/timex.h
210 ++++ b/arch/nios2/include/asm/timex.h
211 +@@ -8,5 +8,8 @@
212 + typedef unsigned long cycles_t;
213 +
214 + extern cycles_t get_cycles(void);
215 ++#define get_cycles get_cycles
216 ++
217 ++#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
218 +
219 + #endif
220 +diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h
221 +index 06b510f8172e3..b4622cb06a75e 100644
222 +--- a/arch/parisc/include/asm/timex.h
223 ++++ b/arch/parisc/include/asm/timex.h
224 +@@ -13,9 +13,10 @@
225 +
226 + typedef unsigned long cycles_t;
227 +
228 +-static inline cycles_t get_cycles (void)
229 ++static inline cycles_t get_cycles(void)
230 + {
231 + return mfctl(16);
232 + }
233 ++#define get_cycles get_cycles
234 +
235 + #endif
236 +diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
237 +index 95988870a57bc..171602fd358e1 100644
238 +--- a/arch/powerpc/include/asm/timex.h
239 ++++ b/arch/powerpc/include/asm/timex.h
240 +@@ -19,6 +19,7 @@ static inline cycles_t get_cycles(void)
241 + {
242 + return mftb();
243 + }
244 ++#define get_cycles get_cycles
245 +
246 + #endif /* __KERNEL__ */
247 + #endif /* _ASM_POWERPC_TIMEX_H */
248 +diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
249 +index 81de51e6aa32b..a06697846e695 100644
250 +--- a/arch/riscv/include/asm/timex.h
251 ++++ b/arch/riscv/include/asm/timex.h
252 +@@ -41,7 +41,7 @@ static inline u32 get_cycles_hi(void)
253 + static inline unsigned long random_get_entropy(void)
254 + {
255 + if (unlikely(clint_time_val == NULL))
256 +- return 0;
257 ++ return random_get_entropy_fallback();
258 + return get_cycles();
259 + }
260 + #define random_get_entropy() random_get_entropy()
261 +diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
262 +index 289aaff4d365f..588aa0f2c842c 100644
263 +--- a/arch/s390/include/asm/timex.h
264 ++++ b/arch/s390/include/asm/timex.h
265 +@@ -172,6 +172,7 @@ static inline cycles_t get_cycles(void)
266 + {
267 + return (cycles_t) get_tod_clock() >> 2;
268 + }
269 ++#define get_cycles get_cycles
270 +
271 + int get_phys_clock(unsigned long *clock);
272 + void init_cpu_timer(void);
273 +diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h
274 +index 542915b462097..f86326a6f89e0 100644
275 +--- a/arch/sparc/include/asm/timex_32.h
276 ++++ b/arch/sparc/include/asm/timex_32.h
277 +@@ -9,8 +9,6 @@
278 +
279 + #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
280 +
281 +-/* XXX Maybe do something better at some point... -DaveM */
282 +-typedef unsigned long cycles_t;
283 +-#define get_cycles() (0)
284 ++#include <asm-generic/timex.h>
285 +
286 + #endif
287 +diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h
288 +index e392a9a5bc9bd..9f27176adb26d 100644
289 +--- a/arch/um/include/asm/timex.h
290 ++++ b/arch/um/include/asm/timex.h
291 +@@ -2,13 +2,8 @@
292 + #ifndef __UM_TIMEX_H
293 + #define __UM_TIMEX_H
294 +
295 +-typedef unsigned long cycles_t;
296 +-
297 +-static inline cycles_t get_cycles (void)
298 +-{
299 +- return 0;
300 +-}
301 +-
302 + #define CLOCK_TICK_RATE (HZ)
303 +
304 ++#include <asm-generic/timex.h>
305 ++
306 + #endif
307 +diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
308 +index a31de0c6ccde2..66f25c2938bfe 100644
309 +--- a/arch/x86/crypto/Makefile
310 ++++ b/arch/x86/crypto/Makefile
311 +@@ -66,7 +66,9 @@ obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
312 + sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
313 +
314 + obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o
315 +-blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
316 ++blake2s-x86_64-y := blake2s-shash.o
317 ++obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o
318 ++libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
319 +
320 + obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
321 + ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
322 +diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
323 +index c025a01cf7084..69853c13e8fb0 100644
324 +--- a/arch/x86/crypto/blake2s-glue.c
325 ++++ b/arch/x86/crypto/blake2s-glue.c
326 +@@ -5,7 +5,6 @@
327 +
328 + #include <crypto/internal/blake2s.h>
329 + #include <crypto/internal/simd.h>
330 +-#include <crypto/internal/hash.h>
331 +
332 + #include <linux/types.h>
333 + #include <linux/jump_label.h>
334 +@@ -28,9 +27,8 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
335 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
336 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
337 +
338 +-void blake2s_compress_arch(struct blake2s_state *state,
339 +- const u8 *block, size_t nblocks,
340 +- const u32 inc)
341 ++void blake2s_compress(struct blake2s_state *state, const u8 *block,
342 ++ size_t nblocks, const u32 inc)
343 + {
344 + /* SIMD disables preemption, so relax after processing each page. */
345 + BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
346 +@@ -56,147 +54,12 @@ void blake2s_compress_arch(struct blake2s_state *state,
347 + block += blocks * BLAKE2S_BLOCK_SIZE;
348 + } while (nblocks);
349 + }
350 +-EXPORT_SYMBOL(blake2s_compress_arch);
351 +-
352 +-static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
353 +- unsigned int keylen)
354 +-{
355 +- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
356 +-
357 +- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
358 +- return -EINVAL;
359 +-
360 +- memcpy(tctx->key, key, keylen);
361 +- tctx->keylen = keylen;
362 +-
363 +- return 0;
364 +-}
365 +-
366 +-static int crypto_blake2s_init(struct shash_desc *desc)
367 +-{
368 +- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
369 +- struct blake2s_state *state = shash_desc_ctx(desc);
370 +- const int outlen = crypto_shash_digestsize(desc->tfm);
371 +-
372 +- if (tctx->keylen)
373 +- blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
374 +- else
375 +- blake2s_init(state, outlen);
376 +-
377 +- return 0;
378 +-}
379 +-
380 +-static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
381 +- unsigned int inlen)
382 +-{
383 +- struct blake2s_state *state = shash_desc_ctx(desc);
384 +- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
385 +-
386 +- if (unlikely(!inlen))
387 +- return 0;
388 +- if (inlen > fill) {
389 +- memcpy(state->buf + state->buflen, in, fill);
390 +- blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
391 +- state->buflen = 0;
392 +- in += fill;
393 +- inlen -= fill;
394 +- }
395 +- if (inlen > BLAKE2S_BLOCK_SIZE) {
396 +- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
397 +- /* Hash one less (full) block than strictly possible */
398 +- blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
399 +- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
400 +- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
401 +- }
402 +- memcpy(state->buf + state->buflen, in, inlen);
403 +- state->buflen += inlen;
404 +-
405 +- return 0;
406 +-}
407 +-
408 +-static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
409 +-{
410 +- struct blake2s_state *state = shash_desc_ctx(desc);
411 +-
412 +- blake2s_set_lastblock(state);
413 +- memset(state->buf + state->buflen, 0,
414 +- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
415 +- blake2s_compress_arch(state, state->buf, 1, state->buflen);
416 +- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
417 +- memcpy(out, state->h, state->outlen);
418 +- memzero_explicit(state, sizeof(*state));
419 +-
420 +- return 0;
421 +-}
422 +-
423 +-static struct shash_alg blake2s_algs[] = {{
424 +- .base.cra_name = "blake2s-128",
425 +- .base.cra_driver_name = "blake2s-128-x86",
426 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
427 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
428 +- .base.cra_priority = 200,
429 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
430 +- .base.cra_module = THIS_MODULE,
431 +-
432 +- .digestsize = BLAKE2S_128_HASH_SIZE,
433 +- .setkey = crypto_blake2s_setkey,
434 +- .init = crypto_blake2s_init,
435 +- .update = crypto_blake2s_update,
436 +- .final = crypto_blake2s_final,
437 +- .descsize = sizeof(struct blake2s_state),
438 +-}, {
439 +- .base.cra_name = "blake2s-160",
440 +- .base.cra_driver_name = "blake2s-160-x86",
441 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
442 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
443 +- .base.cra_priority = 200,
444 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
445 +- .base.cra_module = THIS_MODULE,
446 +-
447 +- .digestsize = BLAKE2S_160_HASH_SIZE,
448 +- .setkey = crypto_blake2s_setkey,
449 +- .init = crypto_blake2s_init,
450 +- .update = crypto_blake2s_update,
451 +- .final = crypto_blake2s_final,
452 +- .descsize = sizeof(struct blake2s_state),
453 +-}, {
454 +- .base.cra_name = "blake2s-224",
455 +- .base.cra_driver_name = "blake2s-224-x86",
456 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
457 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
458 +- .base.cra_priority = 200,
459 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
460 +- .base.cra_module = THIS_MODULE,
461 +-
462 +- .digestsize = BLAKE2S_224_HASH_SIZE,
463 +- .setkey = crypto_blake2s_setkey,
464 +- .init = crypto_blake2s_init,
465 +- .update = crypto_blake2s_update,
466 +- .final = crypto_blake2s_final,
467 +- .descsize = sizeof(struct blake2s_state),
468 +-}, {
469 +- .base.cra_name = "blake2s-256",
470 +- .base.cra_driver_name = "blake2s-256-x86",
471 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
472 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
473 +- .base.cra_priority = 200,
474 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
475 +- .base.cra_module = THIS_MODULE,
476 +-
477 +- .digestsize = BLAKE2S_256_HASH_SIZE,
478 +- .setkey = crypto_blake2s_setkey,
479 +- .init = crypto_blake2s_init,
480 +- .update = crypto_blake2s_update,
481 +- .final = crypto_blake2s_final,
482 +- .descsize = sizeof(struct blake2s_state),
483 +-}};
484 ++EXPORT_SYMBOL(blake2s_compress);
485 +
486 + static int __init blake2s_mod_init(void)
487 + {
488 +- if (!boot_cpu_has(X86_FEATURE_SSSE3))
489 +- return 0;
490 +-
491 +- static_branch_enable(&blake2s_use_ssse3);
492 ++ if (boot_cpu_has(X86_FEATURE_SSSE3))
493 ++ static_branch_enable(&blake2s_use_ssse3);
494 +
495 + if (IS_ENABLED(CONFIG_AS_AVX512) &&
496 + boot_cpu_has(X86_FEATURE_AVX) &&
497 +@@ -207,26 +70,9 @@ static int __init blake2s_mod_init(void)
498 + XFEATURE_MASK_AVX512, NULL))
499 + static_branch_enable(&blake2s_use_avx512);
500 +
501 +- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
502 +- crypto_register_shashes(blake2s_algs,
503 +- ARRAY_SIZE(blake2s_algs)) : 0;
504 +-}
505 +-
506 +-static void __exit blake2s_mod_exit(void)
507 +-{
508 +- if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
509 +- crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
510 ++ return 0;
511 + }
512 +
513 + module_init(blake2s_mod_init);
514 +-module_exit(blake2s_mod_exit);
515 +
516 +-MODULE_ALIAS_CRYPTO("blake2s-128");
517 +-MODULE_ALIAS_CRYPTO("blake2s-128-x86");
518 +-MODULE_ALIAS_CRYPTO("blake2s-160");
519 +-MODULE_ALIAS_CRYPTO("blake2s-160-x86");
520 +-MODULE_ALIAS_CRYPTO("blake2s-224");
521 +-MODULE_ALIAS_CRYPTO("blake2s-224-x86");
522 +-MODULE_ALIAS_CRYPTO("blake2s-256");
523 +-MODULE_ALIAS_CRYPTO("blake2s-256-x86");
524 + MODULE_LICENSE("GPL v2");
525 +diff --git a/arch/x86/crypto/blake2s-shash.c b/arch/x86/crypto/blake2s-shash.c
526 +new file mode 100644
527 +index 0000000000000..59ae28abe35cc
528 +--- /dev/null
529 ++++ b/arch/x86/crypto/blake2s-shash.c
530 +@@ -0,0 +1,77 @@
531 ++// SPDX-License-Identifier: GPL-2.0 OR MIT
532 ++/*
533 ++ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
534 ++ */
535 ++
536 ++#include <crypto/internal/blake2s.h>
537 ++#include <crypto/internal/simd.h>
538 ++#include <crypto/internal/hash.h>
539 ++
540 ++#include <linux/types.h>
541 ++#include <linux/kernel.h>
542 ++#include <linux/module.h>
543 ++#include <linux/sizes.h>
544 ++
545 ++#include <asm/cpufeature.h>
546 ++#include <asm/processor.h>
547 ++
548 ++static int crypto_blake2s_update_x86(struct shash_desc *desc,
549 ++ const u8 *in, unsigned int inlen)
550 ++{
551 ++ return crypto_blake2s_update(desc, in, inlen, false);
552 ++}
553 ++
554 ++static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
555 ++{
556 ++ return crypto_blake2s_final(desc, out, false);
557 ++}
558 ++
559 ++#define BLAKE2S_ALG(name, driver_name, digest_size) \
560 ++ { \
561 ++ .base.cra_name = name, \
562 ++ .base.cra_driver_name = driver_name, \
563 ++ .base.cra_priority = 200, \
564 ++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
565 ++ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
566 ++ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
567 ++ .base.cra_module = THIS_MODULE, \
568 ++ .digestsize = digest_size, \
569 ++ .setkey = crypto_blake2s_setkey, \
570 ++ .init = crypto_blake2s_init, \
571 ++ .update = crypto_blake2s_update_x86, \
572 ++ .final = crypto_blake2s_final_x86, \
573 ++ .descsize = sizeof(struct blake2s_state), \
574 ++ }
575 ++
576 ++static struct shash_alg blake2s_algs[] = {
577 ++ BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
578 ++ BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
579 ++ BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
580 ++ BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
581 ++};
582 ++
583 ++static int __init blake2s_mod_init(void)
584 ++{
585 ++ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
586 ++ return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
587 ++ return 0;
588 ++}
589 ++
590 ++static void __exit blake2s_mod_exit(void)
591 ++{
592 ++ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
593 ++ crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
594 ++}
595 ++
596 ++module_init(blake2s_mod_init);
597 ++module_exit(blake2s_mod_exit);
598 ++
599 ++MODULE_ALIAS_CRYPTO("blake2s-128");
600 ++MODULE_ALIAS_CRYPTO("blake2s-128-x86");
601 ++MODULE_ALIAS_CRYPTO("blake2s-160");
602 ++MODULE_ALIAS_CRYPTO("blake2s-160-x86");
603 ++MODULE_ALIAS_CRYPTO("blake2s-224");
604 ++MODULE_ALIAS_CRYPTO("blake2s-224-x86");
605 ++MODULE_ALIAS_CRYPTO("blake2s-256");
606 ++MODULE_ALIAS_CRYPTO("blake2s-256-x86");
607 ++MODULE_LICENSE("GPL v2");
608 +diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
609 +index a4a8b1b16c0c1..956e4145311b1 100644
610 +--- a/arch/x86/include/asm/timex.h
611 ++++ b/arch/x86/include/asm/timex.h
612 +@@ -5,6 +5,15 @@
613 + #include <asm/processor.h>
614 + #include <asm/tsc.h>
615 +
616 ++static inline unsigned long random_get_entropy(void)
617 ++{
618 ++ if (!IS_ENABLED(CONFIG_X86_TSC) &&
619 ++ !cpu_feature_enabled(X86_FEATURE_TSC))
620 ++ return random_get_entropy_fallback();
621 ++ return rdtsc();
622 ++}
623 ++#define random_get_entropy random_get_entropy
624 ++
625 + /* Assume we use the PIT time source for the clock tick */
626 + #define CLOCK_TICK_RATE PIT_TICK_RATE
627 +
628 +diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
629 +index 01a300a9700b9..fbdc3d9514943 100644
630 +--- a/arch/x86/include/asm/tsc.h
631 ++++ b/arch/x86/include/asm/tsc.h
632 +@@ -20,13 +20,12 @@ extern void disable_TSC(void);
633 +
634 + static inline cycles_t get_cycles(void)
635 + {
636 +-#ifndef CONFIG_X86_TSC
637 +- if (!boot_cpu_has(X86_FEATURE_TSC))
638 ++ if (!IS_ENABLED(CONFIG_X86_TSC) &&
639 ++ !cpu_feature_enabled(X86_FEATURE_TSC))
640 + return 0;
641 +-#endif
642 +-
643 + return rdtsc();
644 + }
645 ++#define get_cycles get_cycles
646 +
647 + extern struct system_counterval_t convert_art_to_tsc(u64 art);
648 + extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
649 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
650 +index 65d11711cd7bb..021cd067733e3 100644
651 +--- a/arch/x86/kernel/cpu/mshyperv.c
652 ++++ b/arch/x86/kernel/cpu/mshyperv.c
653 +@@ -84,7 +84,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
654 + inc_irq_stat(hyperv_stimer0_count);
655 + if (hv_stimer0_handler)
656 + hv_stimer0_handler();
657 +- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
658 ++ add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
659 + ack_APIC_irq();
660 +
661 + set_irq_regs(old_regs);
662 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
663 +index a3ef793fce5f1..6ed6b090be941 100644
664 +--- a/arch/x86/kvm/lapic.c
665 ++++ b/arch/x86/kvm/lapic.c
666 +@@ -297,6 +297,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
667 +
668 + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
669 + }
670 ++
671 ++ /* Check if there are APF page ready requests pending */
672 ++ if (enabled)
673 ++ kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
674 + }
675 +
676 + static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
677 +@@ -2260,6 +2264,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
678 + if (value & MSR_IA32_APICBASE_ENABLE) {
679 + kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
680 + static_key_slow_dec_deferred(&apic_hw_disabled);
681 ++ /* Check if there are APF page ready requests pending */
682 ++ kvm_make_request(KVM_REQ_APF_READY, vcpu);
683 + } else {
684 + static_key_slow_inc(&apic_hw_disabled.key);
685 + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
686 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
687 +index 306268f90455f..6096d0f1a62af 100644
688 +--- a/arch/x86/kvm/mmu/mmu.c
689 ++++ b/arch/x86/kvm/mmu/mmu.c
690 +@@ -5178,14 +5178,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
691 + uint i;
692 +
693 + if (pcid == kvm_get_active_pcid(vcpu)) {
694 +- mmu->invlpg(vcpu, gva, mmu->root_hpa);
695 ++ if (mmu->invlpg)
696 ++ mmu->invlpg(vcpu, gva, mmu->root_hpa);
697 + tlb_flush = true;
698 + }
699 +
700 + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
701 + if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
702 + pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
703 +- mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
704 ++ if (mmu->invlpg)
705 ++ mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
706 + tlb_flush = true;
707 + }
708 + }
709 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
710 +index 4588f73bf59a4..ae18062c26a66 100644
711 +--- a/arch/x86/kvm/x86.c
712 ++++ b/arch/x86/kvm/x86.c
713 +@@ -11146,7 +11146,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
714 + if (!kvm_pv_async_pf_enabled(vcpu))
715 + return true;
716 + else
717 +- return apf_pageready_slot_free(vcpu);
718 ++ return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
719 + }
720 +
721 + void kvm_arch_start_assignment(struct kvm *kvm)
722 +diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
723 +index 233ec75e60c69..3f2462f2d0270 100644
724 +--- a/arch/xtensa/include/asm/timex.h
725 ++++ b/arch/xtensa/include/asm/timex.h
726 +@@ -29,10 +29,6 @@
727 +
728 + extern unsigned long ccount_freq;
729 +
730 +-typedef unsigned long long cycles_t;
731 +-
732 +-#define get_cycles() (0)
733 +-
734 + void local_timer_setup(unsigned cpu);
735 +
736 + /*
737 +@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare)
738 + xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
739 + }
740 +
741 ++#include <asm-generic/timex.h>
742 ++
743 + #endif /* _XTENSA_TIMEX_H */
744 +diff --git a/crypto/Kconfig b/crypto/Kconfig
745 +index 1157f82dc9cf4..0dee9242491cb 100644
746 +--- a/crypto/Kconfig
747 ++++ b/crypto/Kconfig
748 +@@ -1936,9 +1936,10 @@ config CRYPTO_STATS
749 + config CRYPTO_HASH_INFO
750 + bool
751 +
752 +-source "lib/crypto/Kconfig"
753 + source "drivers/crypto/Kconfig"
754 + source "crypto/asymmetric_keys/Kconfig"
755 + source "certs/Kconfig"
756 +
757 + endif # if CRYPTO
758 ++
759 ++source "lib/crypto/Kconfig"
760 +diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
761 +index 005783ff45ad0..5f96a21f87883 100644
762 +--- a/crypto/blake2s_generic.c
763 ++++ b/crypto/blake2s_generic.c
764 +@@ -1,149 +1,55 @@
765 + // SPDX-License-Identifier: GPL-2.0 OR MIT
766 + /*
767 ++ * shash interface to the generic implementation of BLAKE2s
768 ++ *
769 + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
770 + */
771 +
772 + #include <crypto/internal/blake2s.h>
773 +-#include <crypto/internal/simd.h>
774 + #include <crypto/internal/hash.h>
775 +
776 + #include <linux/types.h>
777 +-#include <linux/jump_label.h>
778 + #include <linux/kernel.h>
779 + #include <linux/module.h>
780 +
781 +-static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
782 +- unsigned int keylen)
783 ++static int crypto_blake2s_update_generic(struct shash_desc *desc,
784 ++ const u8 *in, unsigned int inlen)
785 + {
786 +- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
787 +-
788 +- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
789 +- return -EINVAL;
790 +-
791 +- memcpy(tctx->key, key, keylen);
792 +- tctx->keylen = keylen;
793 +-
794 +- return 0;
795 ++ return crypto_blake2s_update(desc, in, inlen, true);
796 + }
797 +
798 +-static int crypto_blake2s_init(struct shash_desc *desc)
799 ++static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
800 + {
801 +- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
802 +- struct blake2s_state *state = shash_desc_ctx(desc);
803 +- const int outlen = crypto_shash_digestsize(desc->tfm);
804 +-
805 +- if (tctx->keylen)
806 +- blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
807 +- else
808 +- blake2s_init(state, outlen);
809 +-
810 +- return 0;
811 ++ return crypto_blake2s_final(desc, out, true);
812 + }
813 +
814 +-static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
815 +- unsigned int inlen)
816 +-{
817 +- struct blake2s_state *state = shash_desc_ctx(desc);
818 +- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
819 +-
820 +- if (unlikely(!inlen))
821 +- return 0;
822 +- if (inlen > fill) {
823 +- memcpy(state->buf + state->buflen, in, fill);
824 +- blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
825 +- state->buflen = 0;
826 +- in += fill;
827 +- inlen -= fill;
828 +- }
829 +- if (inlen > BLAKE2S_BLOCK_SIZE) {
830 +- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
831 +- /* Hash one less (full) block than strictly possible */
832 +- blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
833 +- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
834 +- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
835 ++#define BLAKE2S_ALG(name, driver_name, digest_size) \
836 ++ { \
837 ++ .base.cra_name = name, \
838 ++ .base.cra_driver_name = driver_name, \
839 ++ .base.cra_priority = 100, \
840 ++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
841 ++ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
842 ++ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
843 ++ .base.cra_module = THIS_MODULE, \
844 ++ .digestsize = digest_size, \
845 ++ .setkey = crypto_blake2s_setkey, \
846 ++ .init = crypto_blake2s_init, \
847 ++ .update = crypto_blake2s_update_generic, \
848 ++ .final = crypto_blake2s_final_generic, \
849 ++ .descsize = sizeof(struct blake2s_state), \
850 + }
851 +- memcpy(state->buf + state->buflen, in, inlen);
852 +- state->buflen += inlen;
853 +-
854 +- return 0;
855 +-}
856 +-
857 +-static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
858 +-{
859 +- struct blake2s_state *state = shash_desc_ctx(desc);
860 +-
861 +- blake2s_set_lastblock(state);
862 +- memset(state->buf + state->buflen, 0,
863 +- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
864 +- blake2s_compress_generic(state, state->buf, 1, state->buflen);
865 +- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
866 +- memcpy(out, state->h, state->outlen);
867 +- memzero_explicit(state, sizeof(*state));
868 +-
869 +- return 0;
870 +-}
871 +-
872 +-static struct shash_alg blake2s_algs[] = {{
873 +- .base.cra_name = "blake2s-128",
874 +- .base.cra_driver_name = "blake2s-128-generic",
875 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
876 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
877 +- .base.cra_priority = 200,
878 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
879 +- .base.cra_module = THIS_MODULE,
880 +-
881 +- .digestsize = BLAKE2S_128_HASH_SIZE,
882 +- .setkey = crypto_blake2s_setkey,
883 +- .init = crypto_blake2s_init,
884 +- .update = crypto_blake2s_update,
885 +- .final = crypto_blake2s_final,
886 +- .descsize = sizeof(struct blake2s_state),
887 +-}, {
888 +- .base.cra_name = "blake2s-160",
889 +- .base.cra_driver_name = "blake2s-160-generic",
890 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
891 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
892 +- .base.cra_priority = 200,
893 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
894 +- .base.cra_module = THIS_MODULE,
895 +-
896 +- .digestsize = BLAKE2S_160_HASH_SIZE,
897 +- .setkey = crypto_blake2s_setkey,
898 +- .init = crypto_blake2s_init,
899 +- .update = crypto_blake2s_update,
900 +- .final = crypto_blake2s_final,
901 +- .descsize = sizeof(struct blake2s_state),
902 +-}, {
903 +- .base.cra_name = "blake2s-224",
904 +- .base.cra_driver_name = "blake2s-224-generic",
905 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
906 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
907 +- .base.cra_priority = 200,
908 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
909 +- .base.cra_module = THIS_MODULE,
910 +-
911 +- .digestsize = BLAKE2S_224_HASH_SIZE,
912 +- .setkey = crypto_blake2s_setkey,
913 +- .init = crypto_blake2s_init,
914 +- .update = crypto_blake2s_update,
915 +- .final = crypto_blake2s_final,
916 +- .descsize = sizeof(struct blake2s_state),
917 +-}, {
918 +- .base.cra_name = "blake2s-256",
919 +- .base.cra_driver_name = "blake2s-256-generic",
920 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
921 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
922 +- .base.cra_priority = 200,
923 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
924 +- .base.cra_module = THIS_MODULE,
925 +
926 +- .digestsize = BLAKE2S_256_HASH_SIZE,
927 +- .setkey = crypto_blake2s_setkey,
928 +- .init = crypto_blake2s_init,
929 +- .update = crypto_blake2s_update,
930 +- .final = crypto_blake2s_final,
931 +- .descsize = sizeof(struct blake2s_state),
932 +-}};
933 ++static struct shash_alg blake2s_algs[] = {
934 ++ BLAKE2S_ALG("blake2s-128", "blake2s-128-generic",
935 ++ BLAKE2S_128_HASH_SIZE),
936 ++ BLAKE2S_ALG("blake2s-160", "blake2s-160-generic",
937 ++ BLAKE2S_160_HASH_SIZE),
938 ++ BLAKE2S_ALG("blake2s-224", "blake2s-224-generic",
939 ++ BLAKE2S_224_HASH_SIZE),
940 ++ BLAKE2S_ALG("blake2s-256", "blake2s-256-generic",
941 ++ BLAKE2S_256_HASH_SIZE),
942 ++};
943 +
944 + static int __init blake2s_mod_init(void)
945 + {
946 +diff --git a/crypto/drbg.c b/crypto/drbg.c
947 +index 3132967a17497..19ea8d6628ffb 100644
948 +--- a/crypto/drbg.c
949 ++++ b/crypto/drbg.c
950 +@@ -1490,12 +1490,13 @@ static int drbg_generate_long(struct drbg_state *drbg,
951 + return 0;
952 + }
953 +
954 +-static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
955 ++static int drbg_schedule_async_seed(struct notifier_block *nb, unsigned long action, void *data)
956 + {
957 +- struct drbg_state *drbg = container_of(rdy, struct drbg_state,
958 ++ struct drbg_state *drbg = container_of(nb, struct drbg_state,
959 + random_ready);
960 +
961 + schedule_work(&drbg->seed_work);
962 ++ return 0;
963 + }
964 +
965 + static int drbg_prepare_hrng(struct drbg_state *drbg)
966 +@@ -1510,10 +1511,8 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
967 +
968 + INIT_WORK(&drbg->seed_work, drbg_async_seed);
969 +
970 +- drbg->random_ready.owner = THIS_MODULE;
971 +- drbg->random_ready.func = drbg_schedule_async_seed;
972 +-
973 +- err = add_random_ready_callback(&drbg->random_ready);
974 ++ drbg->random_ready.notifier_call = drbg_schedule_async_seed;
975 ++ err = register_random_ready_notifier(&drbg->random_ready);
976 +
977 + switch (err) {
978 + case 0:
979 +@@ -1524,7 +1523,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
980 + fallthrough;
981 +
982 + default:
983 +- drbg->random_ready.func = NULL;
984 ++ drbg->random_ready.notifier_call = NULL;
985 + return err;
986 + }
987 +
988 +@@ -1628,8 +1627,8 @@ free_everything:
989 + */
990 + static int drbg_uninstantiate(struct drbg_state *drbg)
991 + {
992 +- if (drbg->random_ready.func) {
993 +- del_random_ready_callback(&drbg->random_ready);
994 ++ if (drbg->random_ready.notifier_call) {
995 ++ unregister_random_ready_notifier(&drbg->random_ready);
996 + cancel_work_sync(&drbg->seed_work);
997 + }
998 +
999 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
1000 +index a5cc4f3bb1e31..1d94c4625f365 100644
1001 +--- a/drivers/acpi/sysfs.c
1002 ++++ b/drivers/acpi/sysfs.c
1003 +@@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
1004 + {
1005 + struct acpi_data_attr *data_attr;
1006 + void __iomem *base;
1007 +- ssize_t rc;
1008 ++ ssize_t size;
1009 +
1010 + data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
1011 ++ size = data_attr->attr.size;
1012 ++
1013 ++ if (offset < 0)
1014 ++ return -EINVAL;
1015 ++
1016 ++ if (offset >= size)
1017 ++ return 0;
1018 +
1019 +- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
1020 ++ if (count > size - offset)
1021 ++ count = size - offset;
1022 ++
1023 ++ base = acpi_os_map_iomem(data_attr->addr, size);
1024 + if (!base)
1025 + return -ENOMEM;
1026 +- rc = memory_read_from_buffer(buf, count, &offset, base,
1027 +- data_attr->attr.size);
1028 +- acpi_os_unmap_memory(base, data_attr->attr.size);
1029 +
1030 +- return rc;
1031 ++ memcpy_fromio(buf, base + offset, count);
1032 ++
1033 ++ acpi_os_unmap_iomem(base, size);
1034 ++
1035 ++ return count;
1036 + }
1037 +
1038 + static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
1039 +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
1040 +index d229a2d0c0174..3e2703a496328 100644
1041 +--- a/drivers/char/Kconfig
1042 ++++ b/drivers/char/Kconfig
1043 +@@ -495,4 +495,5 @@ config RANDOM_TRUST_BOOTLOADER
1044 + device randomness. Say Y here to assume the entropy provided by the
1045 + booloader is trustworthy so it will be added to the kernel's entropy
1046 + pool. Otherwise, say N here so it will be regarded as device input that
1047 +- only mixes the entropy pool.
1048 ++ only mixes the entropy pool. This can also be configured at boot with
1049 ++ "random.trust_bootloader=on/off".
1050 +diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
1051 +index 8c1c47dd9f464..5749998feaa46 100644
1052 +--- a/drivers/char/hw_random/core.c
1053 ++++ b/drivers/char/hw_random/core.c
1054 +@@ -15,6 +15,7 @@
1055 + #include <linux/err.h>
1056 + #include <linux/fs.h>
1057 + #include <linux/hw_random.h>
1058 ++#include <linux/random.h>
1059 + #include <linux/kernel.h>
1060 + #include <linux/kthread.h>
1061 + #include <linux/sched/signal.h>
1062 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1063 +index 5f541c9465598..00b50ccc9fae6 100644
1064 +--- a/drivers/char/random.c
1065 ++++ b/drivers/char/random.c
1066 +@@ -1,310 +1,26 @@
1067 ++// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
1068 + /*
1069 +- * random.c -- A strong random number generator
1070 +- *
1071 +- * Copyright (C) 2017 Jason A. Donenfeld <Jason@×××××.com>. All
1072 +- * Rights Reserved.
1073 +- *
1074 ++ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
1075 + * Copyright Matt Mackall <mpm@×××××××.com>, 2003, 2004, 2005
1076 +- *
1077 +- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
1078 +- * rights reserved.
1079 +- *
1080 +- * Redistribution and use in source and binary forms, with or without
1081 +- * modification, are permitted provided that the following conditions
1082 +- * are met:
1083 +- * 1. Redistributions of source code must retain the above copyright
1084 +- * notice, and the entire permission notice in its entirety,
1085 +- * including the disclaimer of warranties.
1086 +- * 2. Redistributions in binary form must reproduce the above copyright
1087 +- * notice, this list of conditions and the following disclaimer in the
1088 +- * documentation and/or other materials provided with the distribution.
1089 +- * 3. The name of the author may not be used to endorse or promote
1090 +- * products derived from this software without specific prior
1091 +- * written permission.
1092 +- *
1093 +- * ALTERNATIVELY, this product may be distributed under the terms of
1094 +- * the GNU General Public License, in which case the provisions of the GPL are
1095 +- * required INSTEAD OF the above restrictions. (This clause is
1096 +- * necessary due to a potential bad interaction between the GPL and
1097 +- * the restrictions contained in a BSD-style copyright.)
1098 +- *
1099 +- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
1100 +- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1101 +- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
1102 +- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
1103 +- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1104 +- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
1105 +- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1106 +- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1107 +- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1108 +- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
1109 +- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
1110 +- * DAMAGE.
1111 +- */
1112 +-
1113 +-/*
1114 +- * (now, with legal B.S. out of the way.....)
1115 +- *
1116 +- * This routine gathers environmental noise from device drivers, etc.,
1117 +- * and returns good random numbers, suitable for cryptographic use.
1118 +- * Besides the obvious cryptographic uses, these numbers are also good
1119 +- * for seeding TCP sequence numbers, and other places where it is
1120 +- * desirable to have numbers which are not only random, but hard to
1121 +- * predict by an attacker.
1122 +- *
1123 +- * Theory of operation
1124 +- * ===================
1125 +- *
1126 +- * Computers are very predictable devices. Hence it is extremely hard
1127 +- * to produce truly random numbers on a computer --- as opposed to
1128 +- * pseudo-random numbers, which can easily generated by using a
1129 +- * algorithm. Unfortunately, it is very easy for attackers to guess
1130 +- * the sequence of pseudo-random number generators, and for some
1131 +- * applications this is not acceptable. So instead, we must try to
1132 +- * gather "environmental noise" from the computer's environment, which
1133 +- * must be hard for outside attackers to observe, and use that to
1134 +- * generate random numbers. In a Unix environment, this is best done
1135 +- * from inside the kernel.
1136 +- *
1137 +- * Sources of randomness from the environment include inter-keyboard
1138 +- * timings, inter-interrupt timings from some interrupts, and other
1139 +- * events which are both (a) non-deterministic and (b) hard for an
1140 +- * outside observer to measure. Randomness from these sources are
1141 +- * added to an "entropy pool", which is mixed using a CRC-like function.
1142 +- * This is not cryptographically strong, but it is adequate assuming
1143 +- * the randomness is not chosen maliciously, and it is fast enough that
1144 +- * the overhead of doing it on every interrupt is very reasonable.
1145 +- * As random bytes are mixed into the entropy pool, the routines keep
1146 +- * an *estimate* of how many bits of randomness have been stored into
1147 +- * the random number generator's internal state.
1148 +- *
1149 +- * When random bytes are desired, they are obtained by taking the SHA
1150 +- * hash of the contents of the "entropy pool". The SHA hash avoids
1151 +- * exposing the internal state of the entropy pool. It is believed to
1152 +- * be computationally infeasible to derive any useful information
1153 +- * about the input of SHA from its output. Even if it is possible to
1154 +- * analyze SHA in some clever way, as long as the amount of data
1155 +- * returned from the generator is less than the inherent entropy in
1156 +- * the pool, the output data is totally unpredictable. For this
1157 +- * reason, the routine decreases its internal estimate of how many
1158 +- * bits of "true randomness" are contained in the entropy pool as it
1159 +- * outputs random numbers.
1160 +- *
1161 +- * If this estimate goes to zero, the routine can still generate
1162 +- * random numbers; however, an attacker may (at least in theory) be
1163 +- * able to infer the future output of the generator from prior
1164 +- * outputs. This requires successful cryptanalysis of SHA, which is
1165 +- * not believed to be feasible, but there is a remote possibility.
1166 +- * Nonetheless, these numbers should be useful for the vast majority
1167 +- * of purposes.
1168 +- *
1169 +- * Exported interfaces ---- output
1170 +- * ===============================
1171 +- *
1172 +- * There are four exported interfaces; two for use within the kernel,
1173 +- * and two or use from userspace.
1174 +- *
1175 +- * Exported interfaces ---- userspace output
1176 +- * -----------------------------------------
1177 +- *
1178 +- * The userspace interfaces are two character devices /dev/random and
1179 +- * /dev/urandom. /dev/random is suitable for use when very high
1180 +- * quality randomness is desired (for example, for key generation or
1181 +- * one-time pads), as it will only return a maximum of the number of
1182 +- * bits of randomness (as estimated by the random number generator)
1183 +- * contained in the entropy pool.
1184 +- *
1185 +- * The /dev/urandom device does not have this limit, and will return
1186 +- * as many bytes as are requested. As more and more random bytes are
1187 +- * requested without giving time for the entropy pool to recharge,
1188 +- * this will result in random numbers that are merely cryptographically
1189 +- * strong. For many applications, however, this is acceptable.
1190 +- *
1191 +- * Exported interfaces ---- kernel output
1192 +- * --------------------------------------
1193 +- *
1194 +- * The primary kernel interface is
1195 +- *
1196 +- * void get_random_bytes(void *buf, int nbytes);
1197 +- *
1198 +- * This interface will return the requested number of random bytes,
1199 +- * and place it in the requested buffer. This is equivalent to a
1200 +- * read from /dev/urandom.
1201 +- *
1202 +- * For less critical applications, there are the functions:
1203 +- *
1204 +- * u32 get_random_u32()
1205 +- * u64 get_random_u64()
1206 +- * unsigned int get_random_int()
1207 +- * unsigned long get_random_long()
1208 +- *
1209 +- * These are produced by a cryptographic RNG seeded from get_random_bytes,
1210 +- * and so do not deplete the entropy pool as much. These are recommended
1211 +- * for most in-kernel operations *if the result is going to be stored in
1212 +- * the kernel*.
1213 +- *
1214 +- * Specifically, the get_random_int() family do not attempt to do
1215 +- * "anti-backtracking". If you capture the state of the kernel (e.g.
1216 +- * by snapshotting the VM), you can figure out previous get_random_int()
1217 +- * return values. But if the value is stored in the kernel anyway,
1218 +- * this is not a problem.
1219 +- *
1220 +- * It *is* safe to expose get_random_int() output to attackers (e.g. as
1221 +- * network cookies); given outputs 1..n, it's not feasible to predict
1222 +- * outputs 0 or n+1. The only concern is an attacker who breaks into
1223 +- * the kernel later; the get_random_int() engine is not reseeded as
1224 +- * often as the get_random_bytes() one.
1225 +- *
1226 +- * get_random_bytes() is needed for keys that need to stay secret after
1227 +- * they are erased from the kernel. For example, any key that will
1228 +- * be wrapped and stored encrypted. And session encryption keys: we'd
1229 +- * like to know that after the session is closed and the keys erased,
1230 +- * the plaintext is unrecoverable to someone who recorded the ciphertext.
1231 +- *
1232 +- * But for network ports/cookies, stack canaries, PRNG seeds, address
1233 +- * space layout randomization, session *authentication* keys, or other
1234 +- * applications where the sensitive data is stored in the kernel in
1235 +- * plaintext for as long as it's sensitive, the get_random_int() family
1236 +- * is just fine.
1237 +- *
1238 +- * Consider ASLR. We want to keep the address space secret from an
1239 +- * outside attacker while the process is running, but once the address
1240 +- * space is torn down, it's of no use to an attacker any more. And it's
1241 +- * stored in kernel data structures as long as it's alive, so worrying
1242 +- * about an attacker's ability to extrapolate it from the get_random_int()
1243 +- * CRNG is silly.
1244 +- *
1245 +- * Even some cryptographic keys are safe to generate with get_random_int().
1246 +- * In particular, keys for SipHash are generally fine. Here, knowledge
1247 +- * of the key authorizes you to do something to a kernel object (inject
1248 +- * packets to a network connection, or flood a hash table), and the
1249 +- * key is stored with the object being protected. Once it goes away,
1250 +- * we no longer care if anyone knows the key.
1251 +- *
1252 +- * prandom_u32()
1253 +- * -------------
1254 +- *
1255 +- * For even weaker applications, see the pseudorandom generator
1256 +- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
1257 +- * numbers aren't security-critical at all, these are *far* cheaper.
1258 +- * Useful for self-tests, random error simulation, randomized backoffs,
1259 +- * and any other application where you trust that nobody is trying to
1260 +- * maliciously mess with you by guessing the "random" numbers.
1261 +- *
1262 +- * Exported interfaces ---- input
1263 +- * ==============================
1264 +- *
1265 +- * The current exported interfaces for gathering environmental noise
1266 +- * from the devices are:
1267 +- *
1268 +- * void add_device_randomness(const void *buf, unsigned int size);
1269 +- * void add_input_randomness(unsigned int type, unsigned int code,
1270 +- * unsigned int value);
1271 +- * void add_interrupt_randomness(int irq, int irq_flags);
1272 +- * void add_disk_randomness(struct gendisk *disk);
1273 +- *
1274 +- * add_device_randomness() is for adding data to the random pool that
1275 +- * is likely to differ between two devices (or possibly even per boot).
1276 +- * This would be things like MAC addresses or serial numbers, or the
1277 +- * read-out of the RTC. This does *not* add any actual entropy to the
1278 +- * pool, but it initializes the pool to different values for devices
1279 +- * that might otherwise be identical and have very little entropy
1280 +- * available to them (particularly common in the embedded world).
1281 +- *
1282 +- * add_input_randomness() uses the input layer interrupt timing, as well as
1283 +- * the event type information from the hardware.
1284 +- *
1285 +- * add_interrupt_randomness() uses the interrupt timing as random
1286 +- * inputs to the entropy pool. Using the cycle counters and the irq source
1287 +- * as inputs, it feeds the randomness roughly once a second.
1288 +- *
1289 +- * add_disk_randomness() uses what amounts to the seek time of block
1290 +- * layer request events, on a per-disk_devt basis, as input to the
1291 +- * entropy pool. Note that high-speed solid state drives with very low
1292 +- * seek times do not make for good sources of entropy, as their seek
1293 +- * times are usually fairly consistent.
1294 +- *
1295 +- * All of these routines try to estimate how many bits of randomness a
1296 +- * particular randomness source. They do this by keeping track of the
1297 +- * first and second order deltas of the event timings.
1298 +- *
1299 +- * Ensuring unpredictability at system startup
1300 +- * ============================================
1301 +- *
1302 +- * When any operating system starts up, it will go through a sequence
1303 +- * of actions that are fairly predictable by an adversary, especially
1304 +- * if the start-up does not involve interaction with a human operator.
1305 +- * This reduces the actual number of bits of unpredictability in the
1306 +- * entropy pool below the value in entropy_count. In order to
1307 +- * counteract this effect, it helps to carry information in the
1308 +- * entropy pool across shut-downs and start-ups. To do this, put the
1309 +- * following lines an appropriate script which is run during the boot
1310 +- * sequence:
1311 +- *
1312 +- * echo "Initializing random number generator..."
1313 +- * random_seed=/var/run/random-seed
1314 +- * # Carry a random seed from start-up to start-up
1315 +- * # Load and then save the whole entropy pool
1316 +- * if [ -f $random_seed ]; then
1317 +- * cat $random_seed >/dev/urandom
1318 +- * else
1319 +- * touch $random_seed
1320 +- * fi
1321 +- * chmod 600 $random_seed
1322 +- * dd if=/dev/urandom of=$random_seed count=1 bs=512
1323 +- *
1324 +- * and the following lines in an appropriate script which is run as
1325 +- * the system is shutdown:
1326 +- *
1327 +- * # Carry a random seed from shut-down to start-up
1328 +- * # Save the whole entropy pool
1329 +- * echo "Saving random seed..."
1330 +- * random_seed=/var/run/random-seed
1331 +- * touch $random_seed
1332 +- * chmod 600 $random_seed
1333 +- * dd if=/dev/urandom of=$random_seed count=1 bs=512
1334 +- *
1335 +- * For example, on most modern systems using the System V init
1336 +- * scripts, such code fragments would be found in
1337 +- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
1338 +- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
1339 +- *
1340 +- * Effectively, these commands cause the contents of the entropy pool
1341 +- * to be saved at shut-down time and reloaded into the entropy pool at
1342 +- * start-up. (The 'dd' in the addition to the bootup script is to
1343 +- * make sure that /etc/random-seed is different for every start-up,
1344 +- * even if the system crashes without executing rc.0.) Even with
1345 +- * complete knowledge of the start-up activities, predicting the state
1346 +- * of the entropy pool requires knowledge of the previous history of
1347 +- * the system.
1348 +- *
1349 +- * Configuring the /dev/random driver under Linux
1350 +- * ==============================================
1351 +- *
1352 +- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
1353 +- * the /dev/mem major number (#1). So if your system does not have
1354 +- * /dev/random and /dev/urandom created already, they can be created
1355 +- * by using the commands:
1356 +- *
1357 +- * mknod /dev/random c 1 8
1358 +- * mknod /dev/urandom c 1 9
1359 +- *
1360 +- * Acknowledgements:
1361 +- * =================
1362 +- *
1363 +- * Ideas for constructing this random number generator were derived
1364 +- * from Pretty Good Privacy's random number generator, and from private
1365 +- * discussions with Phil Karn. Colin Plumb provided a faster random
1366 +- * number generator, which speed up the mixing function of the entropy
1367 +- * pool, taken from PGPfone. Dale Worley has also contributed many
1368 +- * useful ideas and suggestions to improve this driver.
1369 +- *
1370 +- * Any flaws in the design are solely my responsibility, and should
1371 +- * not be attributed to the Phil, Colin, or any of authors of PGP.
1372 +- *
1373 +- * Further background information on this topic may be obtained from
1374 +- * RFC 1750, "Randomness Recommendations for Security", by Donald
1375 +- * Eastlake, Steve Crocker, and Jeff Schiller.
1376 ++ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
1377 ++ *
1378 ++ * This driver produces cryptographically secure pseudorandom data. It is divided
1379 ++ * into roughly six sections, each with a section header:
1380 ++ *
1381 ++ * - Initialization and readiness waiting.
1382 ++ * - Fast key erasure RNG, the "crng".
1383 ++ * - Entropy accumulation and extraction routines.
1384 ++ * - Entropy collection routines.
1385 ++ * - Userspace reader/writer interfaces.
1386 ++ * - Sysctl interface.
1387 ++ *
1388 ++ * The high level overview is that there is one input pool, into which
1389 ++ * various pieces of data are hashed. Prior to initialization, some of that
1390 ++ * data is then "credited" as having a certain number of bits of entropy.
1391 ++ * When enough bits of entropy are available, the hash is finalized and
1392 ++ * handed as a key to a stream cipher that expands it indefinitely for
1393 ++ * various consumers. This key is periodically refreshed as the various
1394 ++ * entropy collectors, described below, add data to the input pool.
1395 + */
1396 +
1397 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1398 +@@ -327,7 +43,6 @@
1399 + #include <linux/spinlock.h>
1400 + #include <linux/kthread.h>
1401 + #include <linux/percpu.h>
1402 +-#include <linux/fips.h>
1403 + #include <linux/ptrace.h>
1404 + #include <linux/workqueue.h>
1405 + #include <linux/irq.h>
1406 +@@ -335,1503 +50,1082 @@
1407 + #include <linux/syscalls.h>
1408 + #include <linux/completion.h>
1409 + #include <linux/uuid.h>
1410 ++#include <linux/uaccess.h>
1411 ++#include <linux/siphash.h>
1412 ++#include <linux/uio.h>
1413 + #include <crypto/chacha.h>
1414 +-#include <crypto/sha.h>
1415 +-
1416 ++#include <crypto/blake2s.h>
1417 + #include <asm/processor.h>
1418 +-#include <linux/uaccess.h>
1419 + #include <asm/irq.h>
1420 + #include <asm/irq_regs.h>
1421 + #include <asm/io.h>
1422 +
1423 +-#define CREATE_TRACE_POINTS
1424 +-#include <trace/events/random.h>
1425 +-
1426 +-/* #define ADD_INTERRUPT_BENCH */
1427 ++/*********************************************************************
1428 ++ *
1429 ++ * Initialization and readiness waiting.
1430 ++ *
1431 ++ * Much of the RNG infrastructure is devoted to various dependencies
1432 ++ * being able to wait until the RNG has collected enough entropy and
1433 ++ * is ready for safe consumption.
1434 ++ *
1435 ++ *********************************************************************/
1436 +
1437 + /*
1438 +- * Configuration information
1439 ++ * crng_init is protected by base_crng->lock, and only increases
1440 ++ * its value (from empty->early->ready).
1441 + */
1442 +-#define INPUT_POOL_SHIFT 12
1443 +-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
1444 +-#define OUTPUT_POOL_SHIFT 10
1445 +-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
1446 +-#define EXTRACT_SIZE 10
1447 +-
1448 ++static enum {
1449 ++ CRNG_EMPTY = 0, /* Little to no entropy collected */
1450 ++ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
1451 ++ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
1452 ++} crng_init __read_mostly = CRNG_EMPTY;
1453 ++static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
1454 ++#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
1455 ++/* Various types of waiters for crng_init->CRNG_READY transition. */
1456 ++static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
1457 ++static struct fasync_struct *fasync;
1458 ++static DEFINE_SPINLOCK(random_ready_chain_lock);
1459 ++static RAW_NOTIFIER_HEAD(random_ready_chain);
1460 +
1461 +-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
1462 ++/* Control how we warn userspace. */
1463 ++static struct ratelimit_state urandom_warning =
1464 ++ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
1465 ++static int ratelimit_disable __read_mostly =
1466 ++ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
1467 ++module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
1468 ++MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
1469 +
1470 + /*
1471 +- * To allow fractional bits to be tracked, the entropy_count field is
1472 +- * denominated in units of 1/8th bits.
1473 ++ * Returns whether or not the input pool has been seeded and thus guaranteed
1474 ++ * to supply cryptographically secure random numbers. This applies to: the
1475 ++ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1476 ++ * ,u64,int,long} family of functions.
1477 + *
1478 +- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
1479 +- * credit_entropy_bits() needs to be 64 bits wide.
1480 ++ * Returns: true if the input pool has been seeded.
1481 ++ * false if the input pool has not been seeded.
1482 + */
1483 +-#define ENTROPY_SHIFT 3
1484 +-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
1485 ++bool rng_is_initialized(void)
1486 ++{
1487 ++ return crng_ready();
1488 ++}
1489 ++EXPORT_SYMBOL(rng_is_initialized);
1490 +
1491 +-/*
1492 +- * If the entropy count falls under this number of bits, then we
1493 +- * should wake up processes which are selecting or polling on write
1494 +- * access to /dev/random.
1495 +- */
1496 +-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
1497 ++static void __cold crng_set_ready(struct work_struct *work)
1498 ++{
1499 ++ static_branch_enable(&crng_is_ready);
1500 ++}
1501 ++
1502 ++/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
1503 ++static void try_to_generate_entropy(void);
1504 +
1505 + /*
1506 +- * Originally, we used a primitive polynomial of degree .poolwords
1507 +- * over GF(2). The taps for various sizes are defined below. They
1508 +- * were chosen to be evenly spaced except for the last tap, which is 1
1509 +- * to get the twisting happening as fast as possible.
1510 +- *
1511 +- * For the purposes of better mixing, we use the CRC-32 polynomial as
1512 +- * well to make a (modified) twisted Generalized Feedback Shift
1513 +- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
1514 +- * generators. ACM Transactions on Modeling and Computer Simulation
1515 +- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
1516 +- * GFSR generators II. ACM Transactions on Modeling and Computer
1517 +- * Simulation 4:254-266)
1518 +- *
1519 +- * Thanks to Colin Plumb for suggesting this.
1520 +- *
1521 +- * The mixing operation is much less sensitive than the output hash,
1522 +- * where we use SHA-1. All that we want of mixing operation is that
1523 +- * it be a good non-cryptographic hash; i.e. it not produce collisions
1524 +- * when fed "random" data of the sort we expect to see. As long as
1525 +- * the pool state differs for different inputs, we have preserved the
1526 +- * input entropy and done a good job. The fact that an intelligent
1527 +- * attacker can construct inputs that will produce controlled
1528 +- * alterations to the pool's state is not important because we don't
1529 +- * consider such inputs to contribute any randomness. The only
1530 +- * property we need with respect to them is that the attacker can't
1531 +- * increase his/her knowledge of the pool's state. Since all
1532 +- * additions are reversible (knowing the final state and the input,
1533 +- * you can reconstruct the initial state), if an attacker has any
1534 +- * uncertainty about the initial state, he/she can only shuffle that
1535 +- * uncertainty about, but never cause any collisions (which would
1536 +- * decrease the uncertainty).
1537 ++ * Wait for the input pool to be seeded and thus guaranteed to supply
1538 ++ * cryptographically secure random numbers. This applies to: the /dev/urandom
1539 ++ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1540 ++ * family of functions. Using any of these functions without first calling
1541 ++ * this function forfeits the guarantee of security.
1542 + *
1543 +- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
1544 +- * Videau in their paper, "The Linux Pseudorandom Number Generator
1545 +- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
1546 +- * paper, they point out that we are not using a true Twisted GFSR,
1547 +- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
1548 +- * is, with only three taps, instead of the six that we are using).
1549 +- * As a result, the resulting polynomial is neither primitive nor
1550 +- * irreducible, and hence does not have a maximal period over
1551 +- * GF(2**32). They suggest a slight change to the generator
1552 +- * polynomial which improves the resulting TGFSR polynomial to be
1553 +- * irreducible, which we have made here.
1554 ++ * Returns: 0 if the input pool has been seeded.
1555 ++ * -ERESTARTSYS if the function was interrupted by a signal.
1556 + */
1557 +-static const struct poolinfo {
1558 +- int poolbitshift, poolwords, poolbytes, poolfracbits;
1559 +-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
1560 +- int tap1, tap2, tap3, tap4, tap5;
1561 +-} poolinfo_table[] = {
1562 +- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
1563 +- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
1564 +- { S(128), 104, 76, 51, 25, 1 },
1565 +-};
1566 ++int wait_for_random_bytes(void)
1567 ++{
1568 ++ while (!crng_ready()) {
1569 ++ int ret;
1570 ++
1571 ++ try_to_generate_entropy();
1572 ++ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1573 ++ if (ret)
1574 ++ return ret > 0 ? 0 : ret;
1575 ++ }
1576 ++ return 0;
1577 ++}
1578 ++EXPORT_SYMBOL(wait_for_random_bytes);
1579 +
1580 + /*
1581 +- * Static global variables
1582 ++ * Add a callback function that will be invoked when the input
1583 ++ * pool is initialised.
1584 ++ *
1585 ++ * returns: 0 if callback is successfully added
1586 ++ * -EALREADY if pool is already initialised (callback not called)
1587 + */
1588 +-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
1589 +-static struct fasync_struct *fasync;
1590 +-
1591 +-static DEFINE_SPINLOCK(random_ready_list_lock);
1592 +-static LIST_HEAD(random_ready_list);
1593 ++int __cold register_random_ready_notifier(struct notifier_block *nb)
1594 ++{
1595 ++ unsigned long flags;
1596 ++ int ret = -EALREADY;
1597 +
1598 +-struct crng_state {
1599 +- __u32 state[16];
1600 +- unsigned long init_time;
1601 +- spinlock_t lock;
1602 +-};
1603 ++ if (crng_ready())
1604 ++ return ret;
1605 +
1606 +-static struct crng_state primary_crng = {
1607 +- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
1608 +-};
1609 ++ spin_lock_irqsave(&random_ready_chain_lock, flags);
1610 ++ if (!crng_ready())
1611 ++ ret = raw_notifier_chain_register(&random_ready_chain, nb);
1612 ++ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
1613 ++ return ret;
1614 ++}
1615 ++EXPORT_SYMBOL(register_random_ready_notifier);
1616 +
1617 + /*
1618 +- * crng_init = 0 --> Uninitialized
1619 +- * 1 --> Initialized
1620 +- * 2 --> Initialized from input_pool
1621 +- *
1622 +- * crng_init is protected by primary_crng->lock, and only increases
1623 +- * its value (from 0->1->2).
1624 ++ * Delete a previously registered readiness callback function.
1625 + */
1626 +-static int crng_init = 0;
1627 +-static bool crng_need_final_init = false;
1628 +-#define crng_ready() (likely(crng_init > 1))
1629 +-static int crng_init_cnt = 0;
1630 +-static unsigned long crng_global_init_time = 0;
1631 +-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
1632 +-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
1633 +-static void _crng_backtrack_protect(struct crng_state *crng,
1634 +- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
1635 +-static void process_random_ready_list(void);
1636 +-static void _get_random_bytes(void *buf, int nbytes);
1637 +-
1638 +-static struct ratelimit_state unseeded_warning =
1639 +- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
1640 +-static struct ratelimit_state urandom_warning =
1641 +- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
1642 ++int __cold unregister_random_ready_notifier(struct notifier_block *nb)
1643 ++{
1644 ++ unsigned long flags;
1645 ++ int ret;
1646 +
1647 +-static int ratelimit_disable __read_mostly;
1648 ++ spin_lock_irqsave(&random_ready_chain_lock, flags);
1649 ++ ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
1650 ++ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
1651 ++ return ret;
1652 ++}
1653 ++EXPORT_SYMBOL(unregister_random_ready_notifier);
1654 +
1655 +-module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
1656 +-MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
1657 ++static void __cold process_random_ready_list(void)
1658 ++{
1659 ++ unsigned long flags;
1660 +
1661 +-/**********************************************************************
1662 ++ spin_lock_irqsave(&random_ready_chain_lock, flags);
1663 ++ raw_notifier_call_chain(&random_ready_chain, 0, NULL);
1664 ++ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
1665 ++}
1666 ++
1667 ++#define warn_unseeded_randomness() \
1668 ++ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
1669 ++ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
1670 ++ __func__, (void *)_RET_IP_, crng_init)
1671 ++
1672 ++
1673 ++/*********************************************************************
1674 + *
1675 +- * OS independent entropy store. Here are the functions which handle
1676 +- * storing entropy in an entropy pool.
1677 ++ * Fast key erasure RNG, the "crng".
1678 + *
1679 +- **********************************************************************/
1680 ++ * These functions expand entropy from the entropy extractor into
1681 ++ * long streams for external consumption using the "fast key erasure"
1682 ++ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
1683 ++ *
1684 ++ * There are a few exported interfaces for use by other drivers:
1685 ++ *
1686 ++ * void get_random_bytes(void *buf, size_t len)
1687 ++ * u32 get_random_u32()
1688 ++ * u64 get_random_u64()
1689 ++ * unsigned int get_random_int()
1690 ++ * unsigned long get_random_long()
1691 ++ *
1692 ++ * These interfaces will return the requested number of random bytes
1693 ++ * into the given buffer or as a return value. This is equivalent to
1694 ++ * a read from /dev/urandom. The u32, u64, int, and long family of
1695 ++ * functions may be higher performance for one-off random integers,
1696 ++ * because they do a bit of buffering and do not invoke reseeding
1697 ++ * until the buffer is emptied.
1698 ++ *
1699 ++ *********************************************************************/
1700 +
1701 +-struct entropy_store;
1702 +-struct entropy_store {
1703 +- /* read-only data: */
1704 +- const struct poolinfo *poolinfo;
1705 +- __u32 *pool;
1706 +- const char *name;
1707 ++enum {
1708 ++ CRNG_RESEED_START_INTERVAL = HZ,
1709 ++ CRNG_RESEED_INTERVAL = 60 * HZ
1710 ++};
1711 +
1712 +- /* read-write data: */
1713 ++static struct {
1714 ++ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
1715 ++ unsigned long birth;
1716 ++ unsigned long generation;
1717 + spinlock_t lock;
1718 +- unsigned short add_ptr;
1719 +- unsigned short input_rotate;
1720 +- int entropy_count;
1721 +- unsigned int initialized:1;
1722 +- unsigned int last_data_init:1;
1723 +- __u8 last_data[EXTRACT_SIZE];
1724 ++} base_crng = {
1725 ++ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
1726 + };
1727 +
1728 +-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1729 +- size_t nbytes, int min, int rsvd);
1730 +-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1731 +- size_t nbytes, int fips);
1732 +-
1733 +-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
1734 +-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
1735 +-
1736 +-static struct entropy_store input_pool = {
1737 +- .poolinfo = &poolinfo_table[0],
1738 +- .name = "input",
1739 +- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
1740 +- .pool = input_pool_data
1741 ++struct crng {
1742 ++ u8 key[CHACHA_KEY_SIZE];
1743 ++ unsigned long generation;
1744 ++ local_lock_t lock;
1745 + };
1746 +
1747 +-static __u32 const twist_table[8] = {
1748 +- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
1749 +- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
1750 +-
1751 +-/*
1752 +- * This function adds bytes into the entropy "pool". It does not
1753 +- * update the entropy estimate. The caller should call
1754 +- * credit_entropy_bits if this is appropriate.
1755 +- *
1756 +- * The pool is stirred with a primitive polynomial of the appropriate
1757 +- * degree, and then twisted. We twist by three bits at a time because
1758 +- * it's cheap to do so and helps slightly in the expected case where
1759 +- * the entropy is concentrated in the low-order bits.
1760 +- */
1761 +-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
1762 +- int nbytes)
1763 +-{
1764 +- unsigned long i, tap1, tap2, tap3, tap4, tap5;
1765 +- int input_rotate;
1766 +- int wordmask = r->poolinfo->poolwords - 1;
1767 +- const char *bytes = in;
1768 +- __u32 w;
1769 +-
1770 +- tap1 = r->poolinfo->tap1;
1771 +- tap2 = r->poolinfo->tap2;
1772 +- tap3 = r->poolinfo->tap3;
1773 +- tap4 = r->poolinfo->tap4;
1774 +- tap5 = r->poolinfo->tap5;
1775 +-
1776 +- input_rotate = r->input_rotate;
1777 +- i = r->add_ptr;
1778 +-
1779 +- /* mix one byte at a time to simplify size handling and churn faster */
1780 +- while (nbytes--) {
1781 +- w = rol32(*bytes++, input_rotate);
1782 +- i = (i - 1) & wordmask;
1783 +-
1784 +- /* XOR in the various taps */
1785 +- w ^= r->pool[i];
1786 +- w ^= r->pool[(i + tap1) & wordmask];
1787 +- w ^= r->pool[(i + tap2) & wordmask];
1788 +- w ^= r->pool[(i + tap3) & wordmask];
1789 +- w ^= r->pool[(i + tap4) & wordmask];
1790 +- w ^= r->pool[(i + tap5) & wordmask];
1791 +-
1792 +- /* Mix the result back in with a twist */
1793 +- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
1794 +-
1795 +- /*
1796 +- * Normally, we add 7 bits of rotation to the pool.
1797 +- * At the beginning of the pool, add an extra 7 bits
1798 +- * rotation, so that successive passes spread the
1799 +- * input bits across the pool evenly.
1800 +- */
1801 +- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
1802 +- }
1803 +-
1804 +- r->input_rotate = input_rotate;
1805 +- r->add_ptr = i;
1806 +-}
1807 ++static DEFINE_PER_CPU(struct crng, crngs) = {
1808 ++ .generation = ULONG_MAX,
1809 ++ .lock = INIT_LOCAL_LOCK(crngs.lock),
1810 ++};
1811 +
1812 +-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
1813 +- int nbytes)
1814 +-{
1815 +- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
1816 +- _mix_pool_bytes(r, in, nbytes);
1817 +-}
1818 ++/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
1819 ++static void extract_entropy(void *buf, size_t len);
1820 +
1821 +-static void mix_pool_bytes(struct entropy_store *r, const void *in,
1822 +- int nbytes)
1823 ++/* This extracts a new crng key from the input pool. */
1824 ++static void crng_reseed(void)
1825 + {
1826 + unsigned long flags;
1827 ++ unsigned long next_gen;
1828 ++ u8 key[CHACHA_KEY_SIZE];
1829 +
1830 +- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
1831 +- spin_lock_irqsave(&r->lock, flags);
1832 +- _mix_pool_bytes(r, in, nbytes);
1833 +- spin_unlock_irqrestore(&r->lock, flags);
1834 +-}
1835 ++ extract_entropy(key, sizeof(key));
1836 +
1837 +-struct fast_pool {
1838 +- __u32 pool[4];
1839 +- unsigned long last;
1840 +- unsigned short reg_idx;
1841 +- unsigned char count;
1842 +-};
1843 ++ /*
1844 ++ * We copy the new key into the base_crng, overwriting the old one,
1845 ++ * and update the generation counter. We avoid hitting ULONG_MAX,
1846 ++ * because the per-cpu crngs are initialized to ULONG_MAX, so this
1847 ++ * forces new CPUs that come online to always initialize.
1848 ++ */
1849 ++ spin_lock_irqsave(&base_crng.lock, flags);
1850 ++ memcpy(base_crng.key, key, sizeof(base_crng.key));
1851 ++ next_gen = base_crng.generation + 1;
1852 ++ if (next_gen == ULONG_MAX)
1853 ++ ++next_gen;
1854 ++ WRITE_ONCE(base_crng.generation, next_gen);
1855 ++ WRITE_ONCE(base_crng.birth, jiffies);
1856 ++ if (!static_branch_likely(&crng_is_ready))
1857 ++ crng_init = CRNG_READY;
1858 ++ spin_unlock_irqrestore(&base_crng.lock, flags);
1859 ++ memzero_explicit(key, sizeof(key));
1860 ++}
1861 +
1862 + /*
1863 +- * This is a fast mixing routine used by the interrupt randomness
1864 +- * collector. It's hardcoded for an 128 bit pool and assumes that any
1865 +- * locks that might be needed are taken by the caller.
1866 ++ * This generates a ChaCha block using the provided key, and then
1867 ++ * immediately overwites that key with half the block. It returns
1868 ++ * the resultant ChaCha state to the user, along with the second
1869 ++ * half of the block containing 32 bytes of random data that may
1870 ++ * be used; random_data_len may not be greater than 32.
1871 ++ *
1872 ++ * The returned ChaCha state contains within it a copy of the old
1873 ++ * key value, at index 4, so the state should always be zeroed out
1874 ++ * immediately after using in order to maintain forward secrecy.
1875 ++ * If the state cannot be erased in a timely manner, then it is
1876 ++ * safer to set the random_data parameter to &chacha_state[4] so
1877 ++ * that this function overwrites it before returning.
1878 + */
1879 +-static void fast_mix(struct fast_pool *f)
1880 ++static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
1881 ++ u32 chacha_state[CHACHA_STATE_WORDS],
1882 ++ u8 *random_data, size_t random_data_len)
1883 + {
1884 +- __u32 a = f->pool[0], b = f->pool[1];
1885 +- __u32 c = f->pool[2], d = f->pool[3];
1886 +-
1887 +- a += b; c += d;
1888 +- b = rol32(b, 6); d = rol32(d, 27);
1889 +- d ^= a; b ^= c;
1890 ++ u8 first_block[CHACHA_BLOCK_SIZE];
1891 +
1892 +- a += b; c += d;
1893 +- b = rol32(b, 16); d = rol32(d, 14);
1894 +- d ^= a; b ^= c;
1895 ++ BUG_ON(random_data_len > 32);
1896 +
1897 +- a += b; c += d;
1898 +- b = rol32(b, 6); d = rol32(d, 27);
1899 +- d ^= a; b ^= c;
1900 ++ chacha_init_consts(chacha_state);
1901 ++ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
1902 ++ memset(&chacha_state[12], 0, sizeof(u32) * 4);
1903 ++ chacha20_block(chacha_state, first_block);
1904 +
1905 +- a += b; c += d;
1906 +- b = rol32(b, 16); d = rol32(d, 14);
1907 +- d ^= a; b ^= c;
1908 +-
1909 +- f->pool[0] = a; f->pool[1] = b;
1910 +- f->pool[2] = c; f->pool[3] = d;
1911 +- f->count++;
1912 ++ memcpy(key, first_block, CHACHA_KEY_SIZE);
1913 ++ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
1914 ++ memzero_explicit(first_block, sizeof(first_block));
1915 + }
1916 +
1917 +-static void process_random_ready_list(void)
1918 +-{
1919 +- unsigned long flags;
1920 +- struct random_ready_callback *rdy, *tmp;
1921 +-
1922 +- spin_lock_irqsave(&random_ready_list_lock, flags);
1923 +- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
1924 +- struct module *owner = rdy->owner;
1925 +-
1926 +- list_del_init(&rdy->list);
1927 +- rdy->func(rdy);
1928 +- module_put(owner);
1929 ++/*
1930 ++ * Return whether the crng seed is considered to be sufficiently old
1931 ++ * that a reseeding is needed. This happens if the last reseeding
1932 ++ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
1933 ++ * proportional to the uptime.
1934 ++ */
1935 ++static bool crng_has_old_seed(void)
1936 ++{
1937 ++ static bool early_boot = true;
1938 ++ unsigned long interval = CRNG_RESEED_INTERVAL;
1939 ++
1940 ++ if (unlikely(READ_ONCE(early_boot))) {
1941 ++ time64_t uptime = ktime_get_seconds();
1942 ++ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
1943 ++ WRITE_ONCE(early_boot, false);
1944 ++ else
1945 ++ interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
1946 ++ (unsigned int)uptime / 2 * HZ);
1947 + }
1948 +- spin_unlock_irqrestore(&random_ready_list_lock, flags);
1949 ++ return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
1950 + }
1951 +
1952 + /*
1953 +- * Credit (or debit) the entropy store with n bits of entropy.
1954 +- * Use credit_entropy_bits_safe() if the value comes from userspace
1955 +- * or otherwise should be checked for extreme values.
1956 ++ * This function returns a ChaCha state that you may use for generating
1957 ++ * random data. It also returns up to 32 bytes on its own of random data
1958 ++ * that may be used; random_data_len may not be greater than 32.
1959 + */
1960 +-static void credit_entropy_bits(struct entropy_store *r, int nbits)
1961 ++static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
1962 ++ u8 *random_data, size_t random_data_len)
1963 + {
1964 +- int entropy_count, orig, has_initialized = 0;
1965 +- const int pool_size = r->poolinfo->poolfracbits;
1966 +- int nfrac = nbits << ENTROPY_SHIFT;
1967 +-
1968 +- if (!nbits)
1969 +- return;
1970 ++ unsigned long flags;
1971 ++ struct crng *crng;
1972 +
1973 +-retry:
1974 +- entropy_count = orig = READ_ONCE(r->entropy_count);
1975 +- if (nfrac < 0) {
1976 +- /* Debit */
1977 +- entropy_count += nfrac;
1978 +- } else {
1979 +- /*
1980 +- * Credit: we have to account for the possibility of
1981 +- * overwriting already present entropy. Even in the
1982 +- * ideal case of pure Shannon entropy, new contributions
1983 +- * approach the full value asymptotically:
1984 +- *
1985 +- * entropy <- entropy + (pool_size - entropy) *
1986 +- * (1 - exp(-add_entropy/pool_size))
1987 +- *
1988 +- * For add_entropy <= pool_size/2 then
1989 +- * (1 - exp(-add_entropy/pool_size)) >=
1990 +- * (add_entropy/pool_size)*0.7869...
1991 +- * so we can approximate the exponential with
1992 +- * 3/4*add_entropy/pool_size and still be on the
1993 +- * safe side by adding at most pool_size/2 at a time.
1994 +- *
1995 +- * The use of pool_size-2 in the while statement is to
1996 +- * prevent rounding artifacts from making the loop
1997 +- * arbitrarily long; this limits the loop to log2(pool_size)*2
1998 +- * turns no matter how large nbits is.
1999 +- */
2000 +- int pnfrac = nfrac;
2001 +- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
2002 +- /* The +2 corresponds to the /4 in the denominator */
2003 +-
2004 +- do {
2005 +- unsigned int anfrac = min(pnfrac, pool_size/2);
2006 +- unsigned int add =
2007 +- ((pool_size - entropy_count)*anfrac*3) >> s;
2008 +-
2009 +- entropy_count += add;
2010 +- pnfrac -= anfrac;
2011 +- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
2012 +- }
2013 ++ BUG_ON(random_data_len > 32);
2014 +
2015 +- if (WARN_ON(entropy_count < 0)) {
2016 +- pr_warn("negative entropy/overflow: pool %s count %d\n",
2017 +- r->name, entropy_count);
2018 +- entropy_count = 0;
2019 +- } else if (entropy_count > pool_size)
2020 +- entropy_count = pool_size;
2021 +- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
2022 +- goto retry;
2023 +-
2024 +- if (has_initialized) {
2025 +- r->initialized = 1;
2026 +- kill_fasync(&fasync, SIGIO, POLL_IN);
2027 ++ /*
2028 ++ * For the fast path, we check whether we're ready, unlocked first, and
2029 ++ * then re-check once locked later. In the case where we're really not
2030 ++ * ready, we do fast key erasure with the base_crng directly, extracting
2031 ++ * when crng_init is CRNG_EMPTY.
2032 ++ */
2033 ++ if (!crng_ready()) {
2034 ++ bool ready;
2035 ++
2036 ++ spin_lock_irqsave(&base_crng.lock, flags);
2037 ++ ready = crng_ready();
2038 ++ if (!ready) {
2039 ++ if (crng_init == CRNG_EMPTY)
2040 ++ extract_entropy(base_crng.key, sizeof(base_crng.key));
2041 ++ crng_fast_key_erasure(base_crng.key, chacha_state,
2042 ++ random_data, random_data_len);
2043 ++ }
2044 ++ spin_unlock_irqrestore(&base_crng.lock, flags);
2045 ++ if (!ready)
2046 ++ return;
2047 + }
2048 +
2049 +- trace_credit_entropy_bits(r->name, nbits,
2050 +- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
2051 ++ /*
2052 ++ * If the base_crng is old enough, we reseed, which in turn bumps the
2053 ++ * generation counter that we check below.
2054 ++ */
2055 ++ if (unlikely(crng_has_old_seed()))
2056 ++ crng_reseed();
2057 +
2058 +- if (r == &input_pool) {
2059 +- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
2060 ++ local_lock_irqsave(&crngs.lock, flags);
2061 ++ crng = raw_cpu_ptr(&crngs);
2062 +
2063 +- if (crng_init < 2) {
2064 +- if (entropy_bits < 128)
2065 +- return;
2066 +- crng_reseed(&primary_crng, r);
2067 +- entropy_bits = ENTROPY_BITS(r);
2068 +- }
2069 ++ /*
2070 ++ * If our per-cpu crng is older than the base_crng, then it means
2071 ++ * somebody reseeded the base_crng. In that case, we do fast key
2072 ++ * erasure on the base_crng, and use its output as the new key
2073 ++ * for our per-cpu crng. This brings us up to date with base_crng.
2074 ++ */
2075 ++ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
2076 ++ spin_lock(&base_crng.lock);
2077 ++ crng_fast_key_erasure(base_crng.key, chacha_state,
2078 ++ crng->key, sizeof(crng->key));
2079 ++ crng->generation = base_crng.generation;
2080 ++ spin_unlock(&base_crng.lock);
2081 + }
2082 ++
2083 ++ /*
2084 ++ * Finally, when we've made it this far, our per-cpu crng has an up
2085 ++ * to date key, and we can do fast key erasure with it to produce
2086 ++ * some random data and a ChaCha state for the caller. All other
2087 ++ * branches of this function are "unlikely", so most of the time we
2088 ++ * should wind up here immediately.
2089 ++ */
2090 ++ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
2091 ++ local_unlock_irqrestore(&crngs.lock, flags);
2092 + }
2093 +
2094 +-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
2095 ++static void _get_random_bytes(void *buf, size_t len)
2096 + {
2097 +- const int nbits_max = r->poolinfo->poolwords * 32;
2098 +-
2099 +- if (nbits < 0)
2100 +- return -EINVAL;
2101 ++ u32 chacha_state[CHACHA_STATE_WORDS];
2102 ++ u8 tmp[CHACHA_BLOCK_SIZE];
2103 ++ size_t first_block_len;
2104 +
2105 +- /* Cap the value to avoid overflows */
2106 +- nbits = min(nbits, nbits_max);
2107 ++ if (!len)
2108 ++ return;
2109 +
2110 +- credit_entropy_bits(r, nbits);
2111 +- return 0;
2112 +-}
2113 ++ first_block_len = min_t(size_t, 32, len);
2114 ++ crng_make_state(chacha_state, buf, first_block_len);
2115 ++ len -= first_block_len;
2116 ++ buf += first_block_len;
2117 +
2118 +-/*********************************************************************
2119 +- *
2120 +- * CRNG using CHACHA20
2121 +- *
2122 +- *********************************************************************/
2123 ++ while (len) {
2124 ++ if (len < CHACHA_BLOCK_SIZE) {
2125 ++ chacha20_block(chacha_state, tmp);
2126 ++ memcpy(buf, tmp, len);
2127 ++ memzero_explicit(tmp, sizeof(tmp));
2128 ++ break;
2129 ++ }
2130 +
2131 +-#define CRNG_RESEED_INTERVAL (300*HZ)
2132 ++ chacha20_block(chacha_state, buf);
2133 ++ if (unlikely(chacha_state[12] == 0))
2134 ++ ++chacha_state[13];
2135 ++ len -= CHACHA_BLOCK_SIZE;
2136 ++ buf += CHACHA_BLOCK_SIZE;
2137 ++ }
2138 +
2139 +-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
2140 ++ memzero_explicit(chacha_state, sizeof(chacha_state));
2141 ++}
2142 +
2143 +-#ifdef CONFIG_NUMA
2144 + /*
2145 +- * Hack to deal with crazy userspace progams when they are all trying
2146 +- * to access /dev/urandom in parallel. The programs are almost
2147 +- * certainly doing something terribly wrong, but we'll work around
2148 +- * their brain damage.
2149 ++ * This function is the exported kernel interface. It returns some
2150 ++ * number of good random numbers, suitable for key generation, seeding
2151 ++ * TCP sequence numbers, etc. It does not rely on the hardware random
2152 ++ * number generator. For random bytes direct from the hardware RNG
2153 ++ * (when available), use get_random_bytes_arch(). In order to ensure
2154 ++ * that the randomness provided by this function is okay, the function
2155 ++ * wait_for_random_bytes() should be called and return 0 at least once
2156 ++ * at any point prior.
2157 + */
2158 +-static struct crng_state **crng_node_pool __read_mostly;
2159 +-#endif
2160 +-
2161 +-static void invalidate_batched_entropy(void);
2162 +-static void numa_crng_init(void);
2163 +-
2164 +-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
2165 +-static int __init parse_trust_cpu(char *arg)
2166 ++void get_random_bytes(void *buf, size_t len)
2167 + {
2168 +- return kstrtobool(arg, &trust_cpu);
2169 ++ warn_unseeded_randomness();
2170 ++ _get_random_bytes(buf, len);
2171 + }
2172 +-early_param("random.trust_cpu", parse_trust_cpu);
2173 ++EXPORT_SYMBOL(get_random_bytes);
2174 +
2175 +-static bool crng_init_try_arch(struct crng_state *crng)
2176 ++static ssize_t get_random_bytes_user(struct iov_iter *iter)
2177 + {
2178 +- int i;
2179 +- bool arch_init = true;
2180 +- unsigned long rv;
2181 +-
2182 +- for (i = 4; i < 16; i++) {
2183 +- if (!arch_get_random_seed_long(&rv) &&
2184 +- !arch_get_random_long(&rv)) {
2185 +- rv = random_get_entropy();
2186 +- arch_init = false;
2187 +- }
2188 +- crng->state[i] ^= rv;
2189 ++ u32 chacha_state[CHACHA_STATE_WORDS];
2190 ++ u8 block[CHACHA_BLOCK_SIZE];
2191 ++ size_t ret = 0, copied;
2192 ++
2193 ++ if (unlikely(!iov_iter_count(iter)))
2194 ++ return 0;
2195 ++
2196 ++ /*
2197 ++ * Immediately overwrite the ChaCha key at index 4 with random
2198 ++ * bytes, in case userspace causes copy_to_user() below to sleep
2199 ++ * forever, so that we still retain forward secrecy in that case.
2200 ++ */
2201 ++ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
2202 ++ /*
2203 ++ * However, if we're doing a read of len <= 32, we don't need to
2204 ++ * use chacha_state after, so we can simply return those bytes to
2205 ++ * the user directly.
2206 ++ */
2207 ++ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
2208 ++ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
2209 ++ goto out_zero_chacha;
2210 + }
2211 +
2212 +- return arch_init;
2213 +-}
2214 ++ for (;;) {
2215 ++ chacha20_block(chacha_state, block);
2216 ++ if (unlikely(chacha_state[12] == 0))
2217 ++ ++chacha_state[13];
2218 +
2219 +-static bool __init crng_init_try_arch_early(struct crng_state *crng)
2220 +-{
2221 +- int i;
2222 +- bool arch_init = true;
2223 +- unsigned long rv;
2224 +-
2225 +- for (i = 4; i < 16; i++) {
2226 +- if (!arch_get_random_seed_long_early(&rv) &&
2227 +- !arch_get_random_long_early(&rv)) {
2228 +- rv = random_get_entropy();
2229 +- arch_init = false;
2230 ++ copied = copy_to_iter(block, sizeof(block), iter);
2231 ++ ret += copied;
2232 ++ if (!iov_iter_count(iter) || copied != sizeof(block))
2233 ++ break;
2234 ++
2235 ++ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
2236 ++ if (ret % PAGE_SIZE == 0) {
2237 ++ if (signal_pending(current))
2238 ++ break;
2239 ++ cond_resched();
2240 + }
2241 +- crng->state[i] ^= rv;
2242 + }
2243 +
2244 +- return arch_init;
2245 ++ memzero_explicit(block, sizeof(block));
2246 ++out_zero_chacha:
2247 ++ memzero_explicit(chacha_state, sizeof(chacha_state));
2248 ++ return ret ? ret : -EFAULT;
2249 + }
2250 +
2251 +-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
2252 +-{
2253 +- chacha_init_consts(crng->state);
2254 +- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
2255 +- crng_init_try_arch(crng);
2256 +- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
2257 +-}
2258 ++/*
2259 ++ * Batched entropy returns random integers. The quality of the random
2260 ++ * number is good as /dev/urandom. In order to ensure that the randomness
2261 ++ * provided by this function is okay, the function wait_for_random_bytes()
2262 ++ * should be called and return 0 at least once at any point prior.
2263 ++ */
2264 +
2265 +-static void __init crng_initialize_primary(struct crng_state *crng)
2266 ++#define DEFINE_BATCHED_ENTROPY(type) \
2267 ++struct batch_ ##type { \
2268 ++ /* \
2269 ++ * We make this 1.5x a ChaCha block, so that we get the \
2270 ++ * remaining 32 bytes from fast key erasure, plus one full \
2271 ++ * block from the detached ChaCha state. We can increase \
2272 ++ * the size of this later if needed so long as we keep the \
2273 ++ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
2274 ++ */ \
2275 ++ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
2276 ++ local_lock_t lock; \
2277 ++ unsigned long generation; \
2278 ++ unsigned int position; \
2279 ++}; \
2280 ++ \
2281 ++static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
2282 ++ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
2283 ++ .position = UINT_MAX \
2284 ++}; \
2285 ++ \
2286 ++type get_random_ ##type(void) \
2287 ++{ \
2288 ++ type ret; \
2289 ++ unsigned long flags; \
2290 ++ struct batch_ ##type *batch; \
2291 ++ unsigned long next_gen; \
2292 ++ \
2293 ++ warn_unseeded_randomness(); \
2294 ++ \
2295 ++ if (!crng_ready()) { \
2296 ++ _get_random_bytes(&ret, sizeof(ret)); \
2297 ++ return ret; \
2298 ++ } \
2299 ++ \
2300 ++ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
2301 ++ batch = raw_cpu_ptr(&batched_entropy_##type); \
2302 ++ \
2303 ++ next_gen = READ_ONCE(base_crng.generation); \
2304 ++ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
2305 ++ next_gen != batch->generation) { \
2306 ++ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
2307 ++ batch->position = 0; \
2308 ++ batch->generation = next_gen; \
2309 ++ } \
2310 ++ \
2311 ++ ret = batch->entropy[batch->position]; \
2312 ++ batch->entropy[batch->position] = 0; \
2313 ++ ++batch->position; \
2314 ++ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
2315 ++ return ret; \
2316 ++} \
2317 ++EXPORT_SYMBOL(get_random_ ##type);
2318 ++
2319 ++DEFINE_BATCHED_ENTROPY(u64)
2320 ++DEFINE_BATCHED_ENTROPY(u32)
2321 ++
2322 ++#ifdef CONFIG_SMP
2323 ++/*
2324 ++ * This function is called when the CPU is coming up, with entry
2325 ++ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
2326 ++ */
2327 ++int __cold random_prepare_cpu(unsigned int cpu)
2328 + {
2329 +- chacha_init_consts(crng->state);
2330 +- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
2331 +- if (crng_init_try_arch_early(crng) && trust_cpu) {
2332 +- invalidate_batched_entropy();
2333 +- numa_crng_init();
2334 +- crng_init = 2;
2335 +- pr_notice("crng done (trusting CPU's manufacturer)\n");
2336 +- }
2337 +- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
2338 +-}
2339 +-
2340 +-static void crng_finalize_init(struct crng_state *crng)
2341 +-{
2342 +- if (crng != &primary_crng || crng_init >= 2)
2343 +- return;
2344 +- if (!system_wq) {
2345 +- /* We can't call numa_crng_init until we have workqueues,
2346 +- * so mark this for processing later. */
2347 +- crng_need_final_init = true;
2348 +- return;
2349 +- }
2350 +-
2351 +- invalidate_batched_entropy();
2352 +- numa_crng_init();
2353 +- crng_init = 2;
2354 +- process_random_ready_list();
2355 +- wake_up_interruptible(&crng_init_wait);
2356 +- kill_fasync(&fasync, SIGIO, POLL_IN);
2357 +- pr_notice("crng init done\n");
2358 +- if (unseeded_warning.missed) {
2359 +- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
2360 +- unseeded_warning.missed);
2361 +- unseeded_warning.missed = 0;
2362 +- }
2363 +- if (urandom_warning.missed) {
2364 +- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
2365 +- urandom_warning.missed);
2366 +- urandom_warning.missed = 0;
2367 +- }
2368 +-}
2369 +-
2370 +-#ifdef CONFIG_NUMA
2371 +-static void do_numa_crng_init(struct work_struct *work)
2372 +-{
2373 +- int i;
2374 +- struct crng_state *crng;
2375 +- struct crng_state **pool;
2376 +-
2377 +- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
2378 +- for_each_online_node(i) {
2379 +- crng = kmalloc_node(sizeof(struct crng_state),
2380 +- GFP_KERNEL | __GFP_NOFAIL, i);
2381 +- spin_lock_init(&crng->lock);
2382 +- crng_initialize_secondary(crng);
2383 +- pool[i] = crng;
2384 +- }
2385 +- /* pairs with READ_ONCE() in select_crng() */
2386 +- if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
2387 +- for_each_node(i)
2388 +- kfree(pool[i]);
2389 +- kfree(pool);
2390 +- }
2391 +-}
2392 +-
2393 +-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
2394 +-
2395 +-static void numa_crng_init(void)
2396 +-{
2397 +- schedule_work(&numa_crng_init_work);
2398 +-}
2399 +-
2400 +-static struct crng_state *select_crng(void)
2401 +-{
2402 +- struct crng_state **pool;
2403 +- int nid = numa_node_id();
2404 +-
2405 +- /* pairs with cmpxchg_release() in do_numa_crng_init() */
2406 +- pool = READ_ONCE(crng_node_pool);
2407 +- if (pool && pool[nid])
2408 +- return pool[nid];
2409 +-
2410 +- return &primary_crng;
2411 +-}
2412 +-#else
2413 +-static void numa_crng_init(void) {}
2414 +-
2415 +-static struct crng_state *select_crng(void)
2416 +-{
2417 +- return &primary_crng;
2418 ++ /*
2419 ++ * When the cpu comes back online, immediately invalidate both
2420 ++ * the per-cpu crng and all batches, so that we serve fresh
2421 ++ * randomness.
2422 ++ */
2423 ++ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
2424 ++ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
2425 ++ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
2426 ++ return 0;
2427 + }
2428 + #endif
2429 +
2430 + /*
2431 +- * crng_fast_load() can be called by code in the interrupt service
2432 +- * path. So we can't afford to dilly-dally. Returns the number of
2433 +- * bytes processed from cp.
2434 +- */
2435 +-static size_t crng_fast_load(const char *cp, size_t len)
2436 +-{
2437 +- unsigned long flags;
2438 +- char *p;
2439 +- size_t ret = 0;
2440 +-
2441 +- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
2442 +- return 0;
2443 +- if (crng_init != 0) {
2444 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2445 +- return 0;
2446 +- }
2447 +- p = (unsigned char *) &primary_crng.state[4];
2448 +- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
2449 +- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
2450 +- cp++; crng_init_cnt++; len--; ret++;
2451 +- }
2452 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2453 +- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
2454 +- invalidate_batched_entropy();
2455 +- crng_init = 1;
2456 +- pr_notice("fast init done\n");
2457 +- }
2458 +- return ret;
2459 +-}
2460 +-
2461 +-/*
2462 +- * crng_slow_load() is called by add_device_randomness, which has two
2463 +- * attributes. (1) We can't trust the buffer passed to it is
2464 +- * guaranteed to be unpredictable (so it might not have any entropy at
2465 +- * all), and (2) it doesn't have the performance constraints of
2466 +- * crng_fast_load().
2467 +- *
2468 +- * So we do something more comprehensive which is guaranteed to touch
2469 +- * all of the primary_crng's state, and which uses a LFSR with a
2470 +- * period of 255 as part of the mixing algorithm. Finally, we do
2471 +- * *not* advance crng_init_cnt since buffer we may get may be something
2472 +- * like a fixed DMI table (for example), which might very well be
2473 +- * unique to the machine, but is otherwise unvarying.
2474 +- */
2475 +-static int crng_slow_load(const char *cp, size_t len)
2476 +-{
2477 +- unsigned long flags;
2478 +- static unsigned char lfsr = 1;
2479 +- unsigned char tmp;
2480 +- unsigned i, max = CHACHA_KEY_SIZE;
2481 +- const char * src_buf = cp;
2482 +- char * dest_buf = (char *) &primary_crng.state[4];
2483 +-
2484 +- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
2485 +- return 0;
2486 +- if (crng_init != 0) {
2487 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2488 +- return 0;
2489 +- }
2490 +- if (len > max)
2491 +- max = len;
2492 +-
2493 +- for (i = 0; i < max ; i++) {
2494 +- tmp = lfsr;
2495 +- lfsr >>= 1;
2496 +- if (tmp & 1)
2497 +- lfsr ^= 0xE1;
2498 +- tmp = dest_buf[i % CHACHA_KEY_SIZE];
2499 +- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
2500 +- lfsr += (tmp << 3) | (tmp >> 5);
2501 +- }
2502 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2503 +- return 1;
2504 +-}
2505 +-
2506 +-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
2507 +-{
2508 +- unsigned long flags;
2509 +- int i, num;
2510 +- union {
2511 +- __u8 block[CHACHA_BLOCK_SIZE];
2512 +- __u32 key[8];
2513 +- } buf;
2514 +-
2515 +- if (r) {
2516 +- num = extract_entropy(r, &buf, 32, 16, 0);
2517 +- if (num == 0)
2518 +- return;
2519 +- } else {
2520 +- _extract_crng(&primary_crng, buf.block);
2521 +- _crng_backtrack_protect(&primary_crng, buf.block,
2522 +- CHACHA_KEY_SIZE);
2523 +- }
2524 +- spin_lock_irqsave(&crng->lock, flags);
2525 +- for (i = 0; i < 8; i++) {
2526 +- unsigned long rv;
2527 +- if (!arch_get_random_seed_long(&rv) &&
2528 +- !arch_get_random_long(&rv))
2529 +- rv = random_get_entropy();
2530 +- crng->state[i+4] ^= buf.key[i] ^ rv;
2531 +- }
2532 +- memzero_explicit(&buf, sizeof(buf));
2533 +- WRITE_ONCE(crng->init_time, jiffies);
2534 +- spin_unlock_irqrestore(&crng->lock, flags);
2535 +- crng_finalize_init(crng);
2536 +-}
2537 +-
2538 +-static void _extract_crng(struct crng_state *crng,
2539 +- __u8 out[CHACHA_BLOCK_SIZE])
2540 +-{
2541 +- unsigned long v, flags, init_time;
2542 +-
2543 +- if (crng_ready()) {
2544 +- init_time = READ_ONCE(crng->init_time);
2545 +- if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
2546 +- time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
2547 +- crng_reseed(crng, crng == &primary_crng ?
2548 +- &input_pool : NULL);
2549 +- }
2550 +- spin_lock_irqsave(&crng->lock, flags);
2551 +- if (arch_get_random_long(&v))
2552 +- crng->state[14] ^= v;
2553 +- chacha20_block(&crng->state[0], out);
2554 +- if (crng->state[12] == 0)
2555 +- crng->state[13]++;
2556 +- spin_unlock_irqrestore(&crng->lock, flags);
2557 +-}
2558 +-
2559 +-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
2560 +-{
2561 +- _extract_crng(select_crng(), out);
2562 +-}
2563 +-
2564 +-/*
2565 +- * Use the leftover bytes from the CRNG block output (if there is
2566 +- * enough) to mutate the CRNG key to provide backtracking protection.
2567 ++ * This function will use the architecture-specific hardware random
2568 ++ * number generator if it is available. It is not recommended for
2569 ++ * use. Use get_random_bytes() instead. It returns the number of
2570 ++ * bytes filled in.
2571 + */
2572 +-static void _crng_backtrack_protect(struct crng_state *crng,
2573 +- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
2574 ++size_t __must_check get_random_bytes_arch(void *buf, size_t len)
2575 + {
2576 +- unsigned long flags;
2577 +- __u32 *s, *d;
2578 +- int i;
2579 +-
2580 +- used = round_up(used, sizeof(__u32));
2581 +- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
2582 +- extract_crng(tmp);
2583 +- used = 0;
2584 +- }
2585 +- spin_lock_irqsave(&crng->lock, flags);
2586 +- s = (__u32 *) &tmp[used];
2587 +- d = &crng->state[4];
2588 +- for (i=0; i < 8; i++)
2589 +- *d++ ^= *s++;
2590 +- spin_unlock_irqrestore(&crng->lock, flags);
2591 +-}
2592 ++ size_t left = len;
2593 ++ u8 *p = buf;
2594 +
2595 +-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
2596 +-{
2597 +- _crng_backtrack_protect(select_crng(), tmp, used);
2598 +-}
2599 +-
2600 +-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
2601 +-{
2602 +- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
2603 +- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
2604 +- int large_request = (nbytes > 256);
2605 +-
2606 +- while (nbytes) {
2607 +- if (large_request && need_resched()) {
2608 +- if (signal_pending(current)) {
2609 +- if (ret == 0)
2610 +- ret = -ERESTARTSYS;
2611 +- break;
2612 +- }
2613 +- schedule();
2614 +- }
2615 ++ while (left) {
2616 ++ unsigned long v;
2617 ++ size_t block_len = min_t(size_t, left, sizeof(unsigned long));
2618 +
2619 +- extract_crng(tmp);
2620 +- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
2621 +- if (copy_to_user(buf, tmp, i)) {
2622 +- ret = -EFAULT;
2623 ++ if (!arch_get_random_long(&v))
2624 + break;
2625 +- }
2626 +
2627 +- nbytes -= i;
2628 +- buf += i;
2629 +- ret += i;
2630 ++ memcpy(p, &v, block_len);
2631 ++ p += block_len;
2632 ++ left -= block_len;
2633 + }
2634 +- crng_backtrack_protect(tmp, i);
2635 +-
2636 +- /* Wipe data just written to memory */
2637 +- memzero_explicit(tmp, sizeof(tmp));
2638 +
2639 +- return ret;
2640 ++ return len - left;
2641 + }
2642 ++EXPORT_SYMBOL(get_random_bytes_arch);
2643 +
2644 +
2645 +-/*********************************************************************
2646 ++/**********************************************************************
2647 + *
2648 +- * Entropy input management
2649 ++ * Entropy accumulation and extraction routines.
2650 + *
2651 +- *********************************************************************/
2652 ++ * Callers may add entropy via:
2653 ++ *
2654 ++ * static void mix_pool_bytes(const void *buf, size_t len)
2655 ++ *
2656 ++ * After which, if added entropy should be credited:
2657 ++ *
2658 ++ * static void credit_init_bits(size_t bits)
2659 ++ *
2660 ++ * Finally, extract entropy via:
2661 ++ *
2662 ++ * static void extract_entropy(void *buf, size_t len)
2663 ++ *
2664 ++ **********************************************************************/
2665 +
2666 +-/* There is one of these per entropy source */
2667 +-struct timer_rand_state {
2668 +- cycles_t last_time;
2669 +- long last_delta, last_delta2;
2670 ++enum {
2671 ++ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
2672 ++ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
2673 ++ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
2674 + };
2675 +
2676 +-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
2677 ++static struct {
2678 ++ struct blake2s_state hash;
2679 ++ spinlock_t lock;
2680 ++ unsigned int init_bits;
2681 ++} input_pool = {
2682 ++ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
2683 ++ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
2684 ++ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
2685 ++ .hash.outlen = BLAKE2S_HASH_SIZE,
2686 ++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
2687 ++};
2688 ++
2689 ++static void _mix_pool_bytes(const void *buf, size_t len)
2690 ++{
2691 ++ blake2s_update(&input_pool.hash, buf, len);
2692 ++}
2693 +
2694 + /*
2695 +- * Add device- or boot-specific data to the input pool to help
2696 +- * initialize it.
2697 +- *
2698 +- * None of this adds any entropy; it is meant to avoid the problem of
2699 +- * the entropy pool having similar initial state across largely
2700 +- * identical devices.
2701 ++ * This function adds bytes into the input pool. It does not
2702 ++ * update the initialization bit counter; the caller should call
2703 ++ * credit_init_bits if this is appropriate.
2704 + */
2705 +-void add_device_randomness(const void *buf, unsigned int size)
2706 ++static void mix_pool_bytes(const void *buf, size_t len)
2707 + {
2708 +- unsigned long time = random_get_entropy() ^ jiffies;
2709 + unsigned long flags;
2710 +
2711 +- if (!crng_ready() && size)
2712 +- crng_slow_load(buf, size);
2713 +-
2714 +- trace_add_device_randomness(size, _RET_IP_);
2715 + spin_lock_irqsave(&input_pool.lock, flags);
2716 +- _mix_pool_bytes(&input_pool, buf, size);
2717 +- _mix_pool_bytes(&input_pool, &time, sizeof(time));
2718 ++ _mix_pool_bytes(buf, len);
2719 + spin_unlock_irqrestore(&input_pool.lock, flags);
2720 + }
2721 +-EXPORT_SYMBOL(add_device_randomness);
2722 +-
2723 +-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
2724 +
2725 + /*
2726 +- * This function adds entropy to the entropy "pool" by using timing
2727 +- * delays. It uses the timer_rand_state structure to make an estimate
2728 +- * of how many bits of entropy this call has added to the pool.
2729 +- *
2730 +- * The number "num" is also added to the pool - it should somehow describe
2731 +- * the type of event which just happened. This is currently 0-255 for
2732 +- * keyboard scan codes, and 256 upwards for interrupts.
2733 +- *
2734 ++ * This is an HKDF-like construction for using the hashed collected entropy
2735 ++ * as a PRF key, that's then expanded block-by-block.
2736 + */
2737 +-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
2738 ++static void extract_entropy(void *buf, size_t len)
2739 + {
2740 +- struct entropy_store *r;
2741 ++ unsigned long flags;
2742 ++ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
2743 + struct {
2744 +- long jiffies;
2745 +- unsigned cycles;
2746 +- unsigned num;
2747 +- } sample;
2748 +- long delta, delta2, delta3;
2749 +-
2750 +- sample.jiffies = jiffies;
2751 +- sample.cycles = random_get_entropy();
2752 +- sample.num = num;
2753 +- r = &input_pool;
2754 +- mix_pool_bytes(r, &sample, sizeof(sample));
2755 +-
2756 +- /*
2757 +- * Calculate number of bits of randomness we probably added.
2758 +- * We take into account the first, second and third-order deltas
2759 +- * in order to make our estimate.
2760 +- */
2761 +- delta = sample.jiffies - READ_ONCE(state->last_time);
2762 +- WRITE_ONCE(state->last_time, sample.jiffies);
2763 +-
2764 +- delta2 = delta - READ_ONCE(state->last_delta);
2765 +- WRITE_ONCE(state->last_delta, delta);
2766 +-
2767 +- delta3 = delta2 - READ_ONCE(state->last_delta2);
2768 +- WRITE_ONCE(state->last_delta2, delta2);
2769 +-
2770 +- if (delta < 0)
2771 +- delta = -delta;
2772 +- if (delta2 < 0)
2773 +- delta2 = -delta2;
2774 +- if (delta3 < 0)
2775 +- delta3 = -delta3;
2776 +- if (delta > delta2)
2777 +- delta = delta2;
2778 +- if (delta > delta3)
2779 +- delta = delta3;
2780 +-
2781 +- /*
2782 +- * delta is now minimum absolute delta.
2783 +- * Round down by 1 bit on general principles,
2784 +- * and limit entropy estimate to 12 bits.
2785 +- */
2786 +- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
2787 +-}
2788 +-
2789 +-void add_input_randomness(unsigned int type, unsigned int code,
2790 +- unsigned int value)
2791 +-{
2792 +- static unsigned char last_value;
2793 +-
2794 +- /* ignore autorepeat and the like */
2795 +- if (value == last_value)
2796 +- return;
2797 ++ unsigned long rdseed[32 / sizeof(long)];
2798 ++ size_t counter;
2799 ++ } block;
2800 ++ size_t i;
2801 ++
2802 ++ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
2803 ++ if (!arch_get_random_seed_long(&block.rdseed[i]) &&
2804 ++ !arch_get_random_long(&block.rdseed[i]))
2805 ++ block.rdseed[i] = random_get_entropy();
2806 ++ }
2807 +
2808 +- last_value = value;
2809 +- add_timer_randomness(&input_timer_state,
2810 +- (type << 4) ^ code ^ (code >> 4) ^ value);
2811 +- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
2812 +-}
2813 +-EXPORT_SYMBOL_GPL(add_input_randomness);
2814 ++ spin_lock_irqsave(&input_pool.lock, flags);
2815 +
2816 +-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
2817 ++ /* seed = HASHPRF(last_key, entropy_input) */
2818 ++ blake2s_final(&input_pool.hash, seed);
2819 +
2820 +-#ifdef ADD_INTERRUPT_BENCH
2821 +-static unsigned long avg_cycles, avg_deviation;
2822 ++ /* next_key = HASHPRF(seed, RDSEED || 0) */
2823 ++ block.counter = 0;
2824 ++ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
2825 ++ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
2826 +
2827 +-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
2828 +-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
2829 ++ spin_unlock_irqrestore(&input_pool.lock, flags);
2830 ++ memzero_explicit(next_key, sizeof(next_key));
2831 ++
2832 ++ while (len) {
2833 ++ i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
2834 ++ /* output = HASHPRF(seed, RDSEED || ++counter) */
2835 ++ ++block.counter;
2836 ++ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
2837 ++ len -= i;
2838 ++ buf += i;
2839 ++ }
2840 +
2841 +-static void add_interrupt_bench(cycles_t start)
2842 +-{
2843 +- long delta = random_get_entropy() - start;
2844 +-
2845 +- /* Use a weighted moving average */
2846 +- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
2847 +- avg_cycles += delta;
2848 +- /* And average deviation */
2849 +- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
2850 +- avg_deviation += delta;
2851 ++ memzero_explicit(seed, sizeof(seed));
2852 ++ memzero_explicit(&block, sizeof(block));
2853 + }
2854 +-#else
2855 +-#define add_interrupt_bench(x)
2856 +-#endif
2857 +-
2858 +-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
2859 +-{
2860 +- __u32 *ptr = (__u32 *) regs;
2861 +- unsigned int idx;
2862 +
2863 +- if (regs == NULL)
2864 +- return 0;
2865 +- idx = READ_ONCE(f->reg_idx);
2866 +- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
2867 +- idx = 0;
2868 +- ptr += idx++;
2869 +- WRITE_ONCE(f->reg_idx, idx);
2870 +- return *ptr;
2871 +-}
2872 ++#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
2873 +
2874 +-void add_interrupt_randomness(int irq, int irq_flags)
2875 ++static void __cold _credit_init_bits(size_t bits)
2876 + {
2877 +- struct entropy_store *r;
2878 +- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
2879 +- struct pt_regs *regs = get_irq_regs();
2880 +- unsigned long now = jiffies;
2881 +- cycles_t cycles = random_get_entropy();
2882 +- __u32 c_high, j_high;
2883 +- __u64 ip;
2884 +- unsigned long seed;
2885 +- int credit = 0;
2886 +-
2887 +- if (cycles == 0)
2888 +- cycles = get_reg(fast_pool, regs);
2889 +- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
2890 +- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
2891 +- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
2892 +- fast_pool->pool[1] ^= now ^ c_high;
2893 +- ip = regs ? instruction_pointer(regs) : _RET_IP_;
2894 +- fast_pool->pool[2] ^= ip;
2895 +- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
2896 +- get_reg(fast_pool, regs);
2897 +-
2898 +- fast_mix(fast_pool);
2899 +- add_interrupt_bench(cycles);
2900 +-
2901 +- if (unlikely(crng_init == 0)) {
2902 +- if ((fast_pool->count >= 64) &&
2903 +- crng_fast_load((char *) fast_pool->pool,
2904 +- sizeof(fast_pool->pool)) > 0) {
2905 +- fast_pool->count = 0;
2906 +- fast_pool->last = now;
2907 +- }
2908 +- return;
2909 +- }
2910 +-
2911 +- if ((fast_pool->count < 64) &&
2912 +- !time_after(now, fast_pool->last + HZ))
2913 +- return;
2914 ++ static struct execute_work set_ready;
2915 ++ unsigned int new, orig, add;
2916 ++ unsigned long flags;
2917 +
2918 +- r = &input_pool;
2919 +- if (!spin_trylock(&r->lock))
2920 ++ if (!bits)
2921 + return;
2922 +
2923 +- fast_pool->last = now;
2924 +- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
2925 ++ add = min_t(size_t, bits, POOL_BITS);
2926 +
2927 +- /*
2928 +- * If we have architectural seed generator, produce a seed and
2929 +- * add it to the pool. For the sake of paranoia don't let the
2930 +- * architectural seed generator dominate the input from the
2931 +- * interrupt noise.
2932 +- */
2933 +- if (arch_get_random_seed_long(&seed)) {
2934 +- __mix_pool_bytes(r, &seed, sizeof(seed));
2935 +- credit = 1;
2936 ++ do {
2937 ++ orig = READ_ONCE(input_pool.init_bits);
2938 ++ new = min_t(unsigned int, POOL_BITS, orig + add);
2939 ++ } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
2940 ++
2941 ++ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
2942 ++ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
2943 ++ execute_in_process_context(crng_set_ready, &set_ready);
2944 ++ process_random_ready_list();
2945 ++ wake_up_interruptible(&crng_init_wait);
2946 ++ kill_fasync(&fasync, SIGIO, POLL_IN);
2947 ++ pr_notice("crng init done\n");
2948 ++ if (urandom_warning.missed)
2949 ++ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
2950 ++ urandom_warning.missed);
2951 ++ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
2952 ++ spin_lock_irqsave(&base_crng.lock, flags);
2953 ++ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
2954 ++ if (crng_init == CRNG_EMPTY) {
2955 ++ extract_entropy(base_crng.key, sizeof(base_crng.key));
2956 ++ crng_init = CRNG_EARLY;
2957 ++ }
2958 ++ spin_unlock_irqrestore(&base_crng.lock, flags);
2959 + }
2960 +- spin_unlock(&r->lock);
2961 +-
2962 +- fast_pool->count = 0;
2963 +-
2964 +- /* award one bit for the contents of the fast pool */
2965 +- credit_entropy_bits(r, credit + 1);
2966 + }
2967 +-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
2968 +
2969 +-#ifdef CONFIG_BLOCK
2970 +-void add_disk_randomness(struct gendisk *disk)
2971 +-{
2972 +- if (!disk || !disk->random)
2973 +- return;
2974 +- /* first major is 1, so we get >= 0x200 here */
2975 +- add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
2976 +- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
2977 +-}
2978 +-EXPORT_SYMBOL_GPL(add_disk_randomness);
2979 +-#endif
2980 +
2981 +-/*********************************************************************
2982 ++/**********************************************************************
2983 + *
2984 +- * Entropy extraction routines
2985 ++ * Entropy collection routines.
2986 + *
2987 +- *********************************************************************/
2988 ++ * The following exported functions are used for pushing entropy into
2989 ++ * the above entropy accumulation routines:
2990 ++ *
2991 ++ * void add_device_randomness(const void *buf, size_t len);
2992 ++ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
2993 ++ * void add_bootloader_randomness(const void *buf, size_t len);
2994 ++ * void add_interrupt_randomness(int irq);
2995 ++ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
2996 ++ * void add_disk_randomness(struct gendisk *disk);
2997 ++ *
2998 ++ * add_device_randomness() adds data to the input pool that
2999 ++ * is likely to differ between two devices (or possibly even per boot).
3000 ++ * This would be things like MAC addresses or serial numbers, or the
3001 ++ * read-out of the RTC. This does *not* credit any actual entropy to
3002 ++ * the pool, but it initializes the pool to different values for devices
3003 ++ * that might otherwise be identical and have very little entropy
3004 ++ * available to them (particularly common in the embedded world).
3005 ++ *
3006 ++ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
3007 ++ * entropy as specified by the caller. If the entropy pool is full it will
3008 ++ * block until more entropy is needed.
3009 ++ *
3010 ++ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
3011 ++ * and device tree, and credits its input depending on whether or not the
3012 ++ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
3013 ++ *
3014 ++ * add_interrupt_randomness() uses the interrupt timing as random
3015 ++ * inputs to the entropy pool. Using the cycle counters and the irq source
3016 ++ * as inputs, it feeds the input pool roughly once a second or after 64
3017 ++ * interrupts, crediting 1 bit of entropy for whichever comes first.
3018 ++ *
3019 ++ * add_input_randomness() uses the input layer interrupt timing, as well
3020 ++ * as the event type information from the hardware.
3021 ++ *
3022 ++ * add_disk_randomness() uses what amounts to the seek time of block
3023 ++ * layer request events, on a per-disk_devt basis, as input to the
3024 ++ * entropy pool. Note that high-speed solid state drives with very low
3025 ++ * seek times do not make for good sources of entropy, as their seek
3026 ++ * times are usually fairly consistent.
3027 ++ *
3028 ++ * The last two routines try to estimate how many bits of entropy
3029 ++ * to credit. They do this by keeping track of the first and second
3030 ++ * order deltas of the event timings.
3031 ++ *
3032 ++ **********************************************************************/
3033 +
3034 +-/*
3035 +- * This function decides how many bytes to actually take from the
3036 +- * given pool, and also debits the entropy count accordingly.
3037 +- */
3038 +-static size_t account(struct entropy_store *r, size_t nbytes, int min,
3039 +- int reserved)
3040 ++static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
3041 ++static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
3042 ++static int __init parse_trust_cpu(char *arg)
3043 + {
3044 +- int entropy_count, orig, have_bytes;
3045 +- size_t ibytes, nfrac;
3046 +-
3047 +- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
3048 +-
3049 +- /* Can we pull enough? */
3050 +-retry:
3051 +- entropy_count = orig = READ_ONCE(r->entropy_count);
3052 +- ibytes = nbytes;
3053 +- /* never pull more than available */
3054 +- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
3055 +-
3056 +- if ((have_bytes -= reserved) < 0)
3057 +- have_bytes = 0;
3058 +- ibytes = min_t(size_t, ibytes, have_bytes);
3059 +- if (ibytes < min)
3060 +- ibytes = 0;
3061 +-
3062 +- if (WARN_ON(entropy_count < 0)) {
3063 +- pr_warn("negative entropy count: pool %s count %d\n",
3064 +- r->name, entropy_count);
3065 +- entropy_count = 0;
3066 +- }
3067 +- nfrac = ibytes << (ENTROPY_SHIFT + 3);
3068 +- if ((size_t) entropy_count > nfrac)
3069 +- entropy_count -= nfrac;
3070 +- else
3071 +- entropy_count = 0;
3072 +-
3073 +- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
3074 +- goto retry;
3075 +-
3076 +- trace_debit_entropy(r->name, 8 * ibytes);
3077 +- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
3078 +- wake_up_interruptible(&random_write_wait);
3079 +- kill_fasync(&fasync, SIGIO, POLL_OUT);
3080 +- }
3081 +-
3082 +- return ibytes;
3083 ++ return kstrtobool(arg, &trust_cpu);
3084 + }
3085 +-
3086 +-/*
3087 +- * This function does the actual extraction for extract_entropy and
3088 +- * extract_entropy_user.
3089 +- *
3090 +- * Note: we assume that .poolwords is a multiple of 16 words.
3091 +- */
3092 +-static void extract_buf(struct entropy_store *r, __u8 *out)
3093 ++static int __init parse_trust_bootloader(char *arg)
3094 + {
3095 +- int i;
3096 +- union {
3097 +- __u32 w[5];
3098 +- unsigned long l[LONGS(20)];
3099 +- } hash;
3100 +- __u32 workspace[SHA1_WORKSPACE_WORDS];
3101 +- unsigned long flags;
3102 +-
3103 +- /*
3104 +- * If we have an architectural hardware random number
3105 +- * generator, use it for SHA's initial vector
3106 +- */
3107 +- sha1_init(hash.w);
3108 +- for (i = 0; i < LONGS(20); i++) {
3109 +- unsigned long v;
3110 +- if (!arch_get_random_long(&v))
3111 +- break;
3112 +- hash.l[i] = v;
3113 +- }
3114 +-
3115 +- /* Generate a hash across the pool, 16 words (512 bits) at a time */
3116 +- spin_lock_irqsave(&r->lock, flags);
3117 +- for (i = 0; i < r->poolinfo->poolwords; i += 16)
3118 +- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
3119 +-
3120 +- /*
3121 +- * We mix the hash back into the pool to prevent backtracking
3122 +- * attacks (where the attacker knows the state of the pool
3123 +- * plus the current outputs, and attempts to find previous
3124 +- * ouputs), unless the hash function can be inverted. By
3125 +- * mixing at least a SHA1 worth of hash data back, we make
3126 +- * brute-forcing the feedback as hard as brute-forcing the
3127 +- * hash.
3128 +- */
3129 +- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
3130 +- spin_unlock_irqrestore(&r->lock, flags);
3131 +-
3132 +- memzero_explicit(workspace, sizeof(workspace));
3133 +-
3134 +- /*
3135 +- * In case the hash function has some recognizable output
3136 +- * pattern, we fold it in half. Thus, we always feed back
3137 +- * twice as much data as we output.
3138 +- */
3139 +- hash.w[0] ^= hash.w[3];
3140 +- hash.w[1] ^= hash.w[4];
3141 +- hash.w[2] ^= rol32(hash.w[2], 16);
3142 +-
3143 +- memcpy(out, &hash, EXTRACT_SIZE);
3144 +- memzero_explicit(&hash, sizeof(hash));
3145 ++ return kstrtobool(arg, &trust_bootloader);
3146 + }
3147 ++early_param("random.trust_cpu", parse_trust_cpu);
3148 ++early_param("random.trust_bootloader", parse_trust_bootloader);
3149 +
3150 +-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
3151 +- size_t nbytes, int fips)
3152 ++/*
3153 ++ * The first collection of entropy occurs at system boot while interrupts
3154 ++ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
3155 ++ * utsname(), and the command line. Depending on the above configuration knob,
3156 ++ * RDSEED may be considered sufficient for initialization. Note that much
3157 ++ * earlier setup may already have pushed entropy into the input pool by the
3158 ++ * time we get here.
3159 ++ */
3160 ++int __init random_init(const char *command_line)
3161 + {
3162 +- ssize_t ret = 0, i;
3163 +- __u8 tmp[EXTRACT_SIZE];
3164 +- unsigned long flags;
3165 ++ ktime_t now = ktime_get_real();
3166 ++ unsigned int i, arch_bytes;
3167 ++ unsigned long entropy;
3168 +
3169 +- while (nbytes) {
3170 +- extract_buf(r, tmp);
3171 ++#if defined(LATENT_ENTROPY_PLUGIN)
3172 ++ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
3173 ++ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
3174 ++#endif
3175 +
3176 +- if (fips) {
3177 +- spin_lock_irqsave(&r->lock, flags);
3178 +- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
3179 +- panic("Hardware RNG duplicated output!\n");
3180 +- memcpy(r->last_data, tmp, EXTRACT_SIZE);
3181 +- spin_unlock_irqrestore(&r->lock, flags);
3182 ++ for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
3183 ++ i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
3184 ++ if (!arch_get_random_seed_long_early(&entropy) &&
3185 ++ !arch_get_random_long_early(&entropy)) {
3186 ++ entropy = random_get_entropy();
3187 ++ arch_bytes -= sizeof(entropy);
3188 + }
3189 +- i = min_t(int, nbytes, EXTRACT_SIZE);
3190 +- memcpy(buf, tmp, i);
3191 +- nbytes -= i;
3192 +- buf += i;
3193 +- ret += i;
3194 ++ _mix_pool_bytes(&entropy, sizeof(entropy));
3195 + }
3196 ++ _mix_pool_bytes(&now, sizeof(now));
3197 ++ _mix_pool_bytes(utsname(), sizeof(*(utsname())));
3198 ++ _mix_pool_bytes(command_line, strlen(command_line));
3199 ++ add_latent_entropy();
3200 +
3201 +- /* Wipe data just returned from memory */
3202 +- memzero_explicit(tmp, sizeof(tmp));
3203 ++ if (crng_ready())
3204 ++ crng_reseed();
3205 ++ else if (trust_cpu)
3206 ++ credit_init_bits(arch_bytes * 8);
3207 +
3208 +- return ret;
3209 ++ return 0;
3210 + }
3211 +
3212 + /*
3213 +- * This function extracts randomness from the "entropy pool", and
3214 +- * returns it in a buffer.
3215 ++ * Add device- or boot-specific data to the input pool to help
3216 ++ * initialize it.
3217 + *
3218 +- * The min parameter specifies the minimum amount we can pull before
3219 +- * failing to avoid races that defeat catastrophic reseeding while the
3220 +- * reserved parameter indicates how much entropy we must leave in the
3221 +- * pool after each pull to avoid starving other readers.
3222 ++ * None of this adds any entropy; it is meant to avoid the problem of
3223 ++ * the entropy pool having similar initial state across largely
3224 ++ * identical devices.
3225 + */
3226 +-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
3227 +- size_t nbytes, int min, int reserved)
3228 ++void add_device_randomness(const void *buf, size_t len)
3229 + {
3230 +- __u8 tmp[EXTRACT_SIZE];
3231 ++ unsigned long entropy = random_get_entropy();
3232 + unsigned long flags;
3233 +
3234 +- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
3235 +- if (fips_enabled) {
3236 +- spin_lock_irqsave(&r->lock, flags);
3237 +- if (!r->last_data_init) {
3238 +- r->last_data_init = 1;
3239 +- spin_unlock_irqrestore(&r->lock, flags);
3240 +- trace_extract_entropy(r->name, EXTRACT_SIZE,
3241 +- ENTROPY_BITS(r), _RET_IP_);
3242 +- extract_buf(r, tmp);
3243 +- spin_lock_irqsave(&r->lock, flags);
3244 +- memcpy(r->last_data, tmp, EXTRACT_SIZE);
3245 +- }
3246 +- spin_unlock_irqrestore(&r->lock, flags);
3247 +- }
3248 +-
3249 +- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
3250 +- nbytes = account(r, nbytes, min, reserved);
3251 +-
3252 +- return _extract_entropy(r, buf, nbytes, fips_enabled);
3253 ++ spin_lock_irqsave(&input_pool.lock, flags);
3254 ++ _mix_pool_bytes(&entropy, sizeof(entropy));
3255 ++ _mix_pool_bytes(buf, len);
3256 ++ spin_unlock_irqrestore(&input_pool.lock, flags);
3257 + }
3258 ++EXPORT_SYMBOL(add_device_randomness);
3259 +
3260 +-#define warn_unseeded_randomness(previous) \
3261 +- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
3262 +-
3263 +-static void _warn_unseeded_randomness(const char *func_name, void *caller,
3264 +- void **previous)
3265 ++/*
3266 ++ * Interface for in-kernel drivers of true hardware RNGs.
3267 ++ * Those devices may produce endless random bits and will be throttled
3268 ++ * when our pool is full.
3269 ++ */
3270 ++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
3271 + {
3272 +-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
3273 +- const bool print_once = false;
3274 +-#else
3275 +- static bool print_once __read_mostly;
3276 +-#endif
3277 ++ mix_pool_bytes(buf, len);
3278 ++ credit_init_bits(entropy);
3279 +
3280 +- if (print_once ||
3281 +- crng_ready() ||
3282 +- (previous && (caller == READ_ONCE(*previous))))
3283 +- return;
3284 +- WRITE_ONCE(*previous, caller);
3285 +-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
3286 +- print_once = true;
3287 +-#endif
3288 +- if (__ratelimit(&unseeded_warning))
3289 +- printk_deferred(KERN_NOTICE "random: %s called from %pS "
3290 +- "with crng_init=%d\n", func_name, caller,
3291 +- crng_init);
3292 ++ /*
3293 ++ * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
3294 ++ * we're not yet initialized.
3295 ++ */
3296 ++ if (!kthread_should_stop() && crng_ready())
3297 ++ schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
3298 + }
3299 ++EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
3300 +
3301 + /*
3302 +- * This function is the exported kernel interface. It returns some
3303 +- * number of good random numbers, suitable for key generation, seeding
3304 +- * TCP sequence numbers, etc. It does not rely on the hardware random
3305 +- * number generator. For random bytes direct from the hardware RNG
3306 +- * (when available), use get_random_bytes_arch(). In order to ensure
3307 +- * that the randomness provided by this function is okay, the function
3308 +- * wait_for_random_bytes() should be called and return 0 at least once
3309 +- * at any point prior.
3310 ++ * Handle random seed passed by bootloader, and credit it if
3311 ++ * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
3312 + */
3313 +-static void _get_random_bytes(void *buf, int nbytes)
3314 ++void __cold add_bootloader_randomness(const void *buf, size_t len)
3315 + {
3316 +- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
3317 +-
3318 +- trace_get_random_bytes(nbytes, _RET_IP_);
3319 +-
3320 +- while (nbytes >= CHACHA_BLOCK_SIZE) {
3321 +- extract_crng(buf);
3322 +- buf += CHACHA_BLOCK_SIZE;
3323 +- nbytes -= CHACHA_BLOCK_SIZE;
3324 +- }
3325 +-
3326 +- if (nbytes > 0) {
3327 +- extract_crng(tmp);
3328 +- memcpy(buf, tmp, nbytes);
3329 +- crng_backtrack_protect(tmp, nbytes);
3330 +- } else
3331 +- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
3332 +- memzero_explicit(tmp, sizeof(tmp));
3333 ++ mix_pool_bytes(buf, len);
3334 ++ if (trust_bootloader)
3335 ++ credit_init_bits(len * 8);
3336 + }
3337 ++EXPORT_SYMBOL_GPL(add_bootloader_randomness);
3338 +
3339 +-void get_random_bytes(void *buf, int nbytes)
3340 +-{
3341 +- static void *previous;
3342 +-
3343 +- warn_unseeded_randomness(&previous);
3344 +- _get_random_bytes(buf, nbytes);
3345 +-}
3346 +-EXPORT_SYMBOL(get_random_bytes);
3347 ++struct fast_pool {
3348 ++ struct work_struct mix;
3349 ++ unsigned long pool[4];
3350 ++ unsigned long last;
3351 ++ unsigned int count;
3352 ++};
3353 +
3354 ++static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
3355 ++#ifdef CONFIG_64BIT
3356 ++#define FASTMIX_PERM SIPHASH_PERMUTATION
3357 ++ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
3358 ++#else
3359 ++#define FASTMIX_PERM HSIPHASH_PERMUTATION
3360 ++ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
3361 ++#endif
3362 ++};
3363 +
3364 + /*
3365 +- * Each time the timer fires, we expect that we got an unpredictable
3366 +- * jump in the cycle counter. Even if the timer is running on another
3367 +- * CPU, the timer activity will be touching the stack of the CPU that is
3368 +- * generating entropy..
3369 +- *
3370 +- * Note that we don't re-arm the timer in the timer itself - we are
3371 +- * happy to be scheduled away, since that just makes the load more
3372 +- * complex, but we do not want the timer to keep ticking unless the
3373 +- * entropy loop is running.
3374 +- *
3375 +- * So the re-arming always happens in the entropy loop itself.
3376 ++ * This is [Half]SipHash-1-x, starting from an empty key. Because
3377 ++ * the key is fixed, it assumes that its inputs are non-malicious,
3378 ++ * and therefore this has no security on its own. s represents the
3379 ++ * four-word SipHash state, while v represents a two-word input.
3380 + */
3381 +-static void entropy_timer(struct timer_list *t)
3382 ++static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
3383 + {
3384 +- credit_entropy_bits(&input_pool, 1);
3385 ++ s[3] ^= v1;
3386 ++ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
3387 ++ s[0] ^= v1;
3388 ++ s[3] ^= v2;
3389 ++ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
3390 ++ s[0] ^= v2;
3391 + }
3392 +
3393 ++#ifdef CONFIG_SMP
3394 + /*
3395 +- * If we have an actual cycle counter, see if we can
3396 +- * generate enough entropy with timing noise
3397 ++ * This function is called when the CPU has just come online, with
3398 ++ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
3399 + */
3400 +-static void try_to_generate_entropy(void)
3401 ++int __cold random_online_cpu(unsigned int cpu)
3402 + {
3403 +- struct {
3404 +- unsigned long now;
3405 +- struct timer_list timer;
3406 +- } stack;
3407 ++ /*
3408 ++ * During CPU shutdown and before CPU onlining, add_interrupt_
3409 ++ * randomness() may schedule mix_interrupt_randomness(), and
3410 ++ * set the MIX_INFLIGHT flag. However, because the worker can
3411 ++ * be scheduled on a different CPU during this period, that
3412 ++ * flag will never be cleared. For that reason, we zero out
3413 ++ * the flag here, which runs just after workqueues are onlined
3414 ++ * for the CPU again. This also has the effect of setting the
3415 ++ * irq randomness count to zero so that new accumulated irqs
3416 ++ * are fresh.
3417 ++ */
3418 ++ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
3419 ++ return 0;
3420 ++}
3421 ++#endif
3422 +
3423 +- stack.now = random_get_entropy();
3424 ++static void mix_interrupt_randomness(struct work_struct *work)
3425 ++{
3426 ++ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
3427 ++ /*
3428 ++ * The size of the copied stack pool is explicitly 2 longs so that we
3429 ++ * only ever ingest half of the siphash output each time, retaining
3430 ++ * the other half as the next "key" that carries over. The entropy is
3431 ++ * supposed to be sufficiently dispersed between bits so on average
3432 ++ * we don't wind up "losing" some.
3433 ++ */
3434 ++ unsigned long pool[2];
3435 ++ unsigned int count;
3436 +
3437 +- /* Slow counter - or none. Don't even bother */
3438 +- if (stack.now == random_get_entropy())
3439 ++ /* Check to see if we're running on the wrong CPU due to hotplug. */
3440 ++ local_irq_disable();
3441 ++ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
3442 ++ local_irq_enable();
3443 + return;
3444 +-
3445 +- timer_setup_on_stack(&stack.timer, entropy_timer, 0);
3446 +- while (!crng_ready()) {
3447 +- if (!timer_pending(&stack.timer))
3448 +- mod_timer(&stack.timer, jiffies+1);
3449 +- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
3450 +- schedule();
3451 +- stack.now = random_get_entropy();
3452 + }
3453 +
3454 +- del_timer_sync(&stack.timer);
3455 +- destroy_timer_on_stack(&stack.timer);
3456 +- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
3457 +-}
3458 +-
3459 +-/*
3460 +- * Wait for the urandom pool to be seeded and thus guaranteed to supply
3461 +- * cryptographically secure random numbers. This applies to: the /dev/urandom
3462 +- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
3463 +- * family of functions. Using any of these functions without first calling
3464 +- * this function forfeits the guarantee of security.
3465 +- *
3466 +- * Returns: 0 if the urandom pool has been seeded.
3467 +- * -ERESTARTSYS if the function was interrupted by a signal.
3468 +- */
3469 +-int wait_for_random_bytes(void)
3470 +-{
3471 +- if (likely(crng_ready()))
3472 +- return 0;
3473 +-
3474 +- do {
3475 +- int ret;
3476 +- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
3477 +- if (ret)
3478 +- return ret > 0 ? 0 : ret;
3479 ++ /*
3480 ++ * Copy the pool to the stack so that the mixer always has a
3481 ++ * consistent view, before we reenable irqs again.
3482 ++ */
3483 ++ memcpy(pool, fast_pool->pool, sizeof(pool));
3484 ++ count = fast_pool->count;
3485 ++ fast_pool->count = 0;
3486 ++ fast_pool->last = jiffies;
3487 ++ local_irq_enable();
3488 +
3489 +- try_to_generate_entropy();
3490 +- } while (!crng_ready());
3491 ++ mix_pool_bytes(pool, sizeof(pool));
3492 ++ credit_init_bits(max(1u, (count & U16_MAX) / 64));
3493 +
3494 +- return 0;
3495 ++ memzero_explicit(pool, sizeof(pool));
3496 + }
3497 +-EXPORT_SYMBOL(wait_for_random_bytes);
3498 +
3499 +-/*
3500 +- * Returns whether or not the urandom pool has been seeded and thus guaranteed
3501 +- * to supply cryptographically secure random numbers. This applies to: the
3502 +- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
3503 +- * ,u64,int,long} family of functions.
3504 +- *
3505 +- * Returns: true if the urandom pool has been seeded.
3506 +- * false if the urandom pool has not been seeded.
3507 +- */
3508 +-bool rng_is_initialized(void)
3509 +-{
3510 +- return crng_ready();
3511 +-}
3512 +-EXPORT_SYMBOL(rng_is_initialized);
3513 +-
3514 +-/*
3515 +- * Add a callback function that will be invoked when the nonblocking
3516 +- * pool is initialised.
3517 +- *
3518 +- * returns: 0 if callback is successfully added
3519 +- * -EALREADY if pool is already initialised (callback not called)
3520 +- * -ENOENT if module for callback is not alive
3521 +- */
3522 +-int add_random_ready_callback(struct random_ready_callback *rdy)
3523 ++void add_interrupt_randomness(int irq)
3524 + {
3525 +- struct module *owner;
3526 +- unsigned long flags;
3527 +- int err = -EALREADY;
3528 ++ enum { MIX_INFLIGHT = 1U << 31 };
3529 ++ unsigned long entropy = random_get_entropy();
3530 ++ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
3531 ++ struct pt_regs *regs = get_irq_regs();
3532 ++ unsigned int new_count;
3533 +
3534 +- if (crng_ready())
3535 +- return err;
3536 ++ fast_mix(fast_pool->pool, entropy,
3537 ++ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
3538 ++ new_count = ++fast_pool->count;
3539 +
3540 +- owner = rdy->owner;
3541 +- if (!try_module_get(owner))
3542 +- return -ENOENT;
3543 +-
3544 +- spin_lock_irqsave(&random_ready_list_lock, flags);
3545 +- if (crng_ready())
3546 +- goto out;
3547 +-
3548 +- owner = NULL;
3549 +-
3550 +- list_add(&rdy->list, &random_ready_list);
3551 +- err = 0;
3552 +-
3553 +-out:
3554 +- spin_unlock_irqrestore(&random_ready_list_lock, flags);
3555 ++ if (new_count & MIX_INFLIGHT)
3556 ++ return;
3557 +
3558 +- module_put(owner);
3559 ++ if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
3560 ++ return;
3561 +
3562 +- return err;
3563 ++ if (unlikely(!fast_pool->mix.func))
3564 ++ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
3565 ++ fast_pool->count |= MIX_INFLIGHT;
3566 ++ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
3567 + }
3568 +-EXPORT_SYMBOL(add_random_ready_callback);
3569 ++EXPORT_SYMBOL_GPL(add_interrupt_randomness);
3570 ++
3571 ++/* There is one of these per entropy source */
3572 ++struct timer_rand_state {
3573 ++ unsigned long last_time;
3574 ++ long last_delta, last_delta2;
3575 ++};
3576 +
3577 + /*
3578 +- * Delete a previously registered readiness callback function.
3579 ++ * This function adds entropy to the entropy "pool" by using timing
3580 ++ * delays. It uses the timer_rand_state structure to make an estimate
3581 ++ * of how many bits of entropy this call has added to the pool. The
3582 ++ * value "num" is also added to the pool; it should somehow describe
3583 ++ * the type of event that just happened.
3584 + */
3585 +-void del_random_ready_callback(struct random_ready_callback *rdy)
3586 ++static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
3587 + {
3588 +- unsigned long flags;
3589 +- struct module *owner = NULL;
3590 ++ unsigned long entropy = random_get_entropy(), now = jiffies, flags;
3591 ++ long delta, delta2, delta3;
3592 ++ unsigned int bits;
3593 +
3594 +- spin_lock_irqsave(&random_ready_list_lock, flags);
3595 +- if (!list_empty(&rdy->list)) {
3596 +- list_del_init(&rdy->list);
3597 +- owner = rdy->owner;
3598 ++ /*
3599 ++ * If we're in a hard IRQ, add_interrupt_randomness() will be called
3600 ++ * sometime after, so mix into the fast pool.
3601 ++ */
3602 ++ if (in_irq()) {
3603 ++ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
3604 ++ } else {
3605 ++ spin_lock_irqsave(&input_pool.lock, flags);
3606 ++ _mix_pool_bytes(&entropy, sizeof(entropy));
3607 ++ _mix_pool_bytes(&num, sizeof(num));
3608 ++ spin_unlock_irqrestore(&input_pool.lock, flags);
3609 + }
3610 +- spin_unlock_irqrestore(&random_ready_list_lock, flags);
3611 +
3612 +- module_put(owner);
3613 +-}
3614 +-EXPORT_SYMBOL(del_random_ready_callback);
3615 ++ if (crng_ready())
3616 ++ return;
3617 +
3618 +-/*
3619 +- * This function will use the architecture-specific hardware random
3620 +- * number generator if it is available. The arch-specific hw RNG will
3621 +- * almost certainly be faster than what we can do in software, but it
3622 +- * is impossible to verify that it is implemented securely (as
3623 +- * opposed, to, say, the AES encryption of a sequence number using a
3624 +- * key known by the NSA). So it's useful if we need the speed, but
3625 +- * only if we're willing to trust the hardware manufacturer not to
3626 +- * have put in a back door.
3627 +- *
3628 +- * Return number of bytes filled in.
3629 +- */
3630 +-int __must_check get_random_bytes_arch(void *buf, int nbytes)
3631 +-{
3632 +- int left = nbytes;
3633 +- char *p = buf;
3634 ++ /*
3635 ++ * Calculate number of bits of randomness we probably added.
3636 ++ * We take into account the first, second and third-order deltas
3637 ++ * in order to make our estimate.
3638 ++ */
3639 ++ delta = now - READ_ONCE(state->last_time);
3640 ++ WRITE_ONCE(state->last_time, now);
3641 +
3642 +- trace_get_random_bytes_arch(left, _RET_IP_);
3643 +- while (left) {
3644 +- unsigned long v;
3645 +- int chunk = min_t(int, left, sizeof(unsigned long));
3646 ++ delta2 = delta - READ_ONCE(state->last_delta);
3647 ++ WRITE_ONCE(state->last_delta, delta);
3648 +
3649 +- if (!arch_get_random_long(&v))
3650 +- break;
3651 ++ delta3 = delta2 - READ_ONCE(state->last_delta2);
3652 ++ WRITE_ONCE(state->last_delta2, delta2);
3653 +
3654 +- memcpy(p, &v, chunk);
3655 +- p += chunk;
3656 +- left -= chunk;
3657 +- }
3658 ++ if (delta < 0)
3659 ++ delta = -delta;
3660 ++ if (delta2 < 0)
3661 ++ delta2 = -delta2;
3662 ++ if (delta3 < 0)
3663 ++ delta3 = -delta3;
3664 ++ if (delta > delta2)
3665 ++ delta = delta2;
3666 ++ if (delta > delta3)
3667 ++ delta = delta3;
3668 ++
3669 ++ /*
3670 ++ * delta is now minimum absolute delta. Round down by 1 bit
3671 ++ * on general principles, and limit entropy estimate to 11 bits.
3672 ++ */
3673 ++ bits = min(fls(delta >> 1), 11);
3674 +
3675 +- return nbytes - left;
3676 ++ /*
3677 ++ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
3678 ++ * will run after this, which uses a different crediting scheme of 1 bit
3679 ++ * per every 64 interrupts. In order to let that function do accounting
3680 ++ * close to the one in this function, we credit a full 64/64 bit per bit,
3681 ++ * and then subtract one to account for the extra one added.
3682 ++ */
3683 ++ if (in_irq())
3684 ++ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
3685 ++ else
3686 ++ _credit_init_bits(bits);
3687 + }
3688 +-EXPORT_SYMBOL(get_random_bytes_arch);
3689 +
3690 +-/*
3691 +- * init_std_data - initialize pool with system data
3692 +- *
3693 +- * @r: pool to initialize
3694 +- *
3695 +- * This function clears the pool's entropy count and mixes some system
3696 +- * data into the pool to prepare it for use. The pool is not cleared
3697 +- * as that can only decrease the entropy in the pool.
3698 +- */
3699 +-static void __init init_std_data(struct entropy_store *r)
3700 ++void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
3701 + {
3702 +- int i;
3703 +- ktime_t now = ktime_get_real();
3704 +- unsigned long rv;
3705 +-
3706 +- mix_pool_bytes(r, &now, sizeof(now));
3707 +- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
3708 +- if (!arch_get_random_seed_long(&rv) &&
3709 +- !arch_get_random_long(&rv))
3710 +- rv = random_get_entropy();
3711 +- mix_pool_bytes(r, &rv, sizeof(rv));
3712 +- }
3713 +- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
3714 ++ static unsigned char last_value;
3715 ++ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
3716 ++
3717 ++ /* Ignore autorepeat and the like. */
3718 ++ if (value == last_value)
3719 ++ return;
3720 ++
3721 ++ last_value = value;
3722 ++ add_timer_randomness(&input_timer_state,
3723 ++ (type << 4) ^ code ^ (code >> 4) ^ value);
3724 + }
3725 ++EXPORT_SYMBOL_GPL(add_input_randomness);
3726 +
3727 +-/*
3728 +- * Note that setup_arch() may call add_device_randomness()
3729 +- * long before we get here. This allows seeding of the pools
3730 +- * with some platform dependent data very early in the boot
3731 +- * process. But it limits our options here. We must use
3732 +- * statically allocated structures that already have all
3733 +- * initializations complete at compile time. We should also
3734 +- * take care not to overwrite the precious per platform data
3735 +- * we were given.
3736 +- */
3737 +-int __init rand_initialize(void)
3738 ++#ifdef CONFIG_BLOCK
3739 ++void add_disk_randomness(struct gendisk *disk)
3740 + {
3741 +- init_std_data(&input_pool);
3742 +- if (crng_need_final_init)
3743 +- crng_finalize_init(&primary_crng);
3744 +- crng_initialize_primary(&primary_crng);
3745 +- crng_global_init_time = jiffies;
3746 +- if (ratelimit_disable) {
3747 +- urandom_warning.interval = 0;
3748 +- unseeded_warning.interval = 0;
3749 +- }
3750 +- return 0;
3751 ++ if (!disk || !disk->random)
3752 ++ return;
3753 ++ /* First major is 1, so we get >= 0x200 here. */
3754 ++ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
3755 + }
3756 ++EXPORT_SYMBOL_GPL(add_disk_randomness);
3757 +
3758 +-#ifdef CONFIG_BLOCK
3759 +-void rand_initialize_disk(struct gendisk *disk)
3760 ++void __cold rand_initialize_disk(struct gendisk *disk)
3761 + {
3762 + struct timer_rand_state *state;
3763 +
3764 +@@ -1847,116 +1141,189 @@ void rand_initialize_disk(struct gendisk *disk)
3765 + }
3766 + #endif
3767 +
3768 +-static ssize_t
3769 +-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
3770 +- loff_t *ppos)
3771 ++/*
3772 ++ * Each time the timer fires, we expect that we got an unpredictable
3773 ++ * jump in the cycle counter. Even if the timer is running on another
3774 ++ * CPU, the timer activity will be touching the stack of the CPU that is
3775 ++ * generating entropy..
3776 ++ *
3777 ++ * Note that we don't re-arm the timer in the timer itself - we are
3778 ++ * happy to be scheduled away, since that just makes the load more
3779 ++ * complex, but we do not want the timer to keep ticking unless the
3780 ++ * entropy loop is running.
3781 ++ *
3782 ++ * So the re-arming always happens in the entropy loop itself.
3783 ++ */
3784 ++static void __cold entropy_timer(struct timer_list *t)
3785 + {
3786 +- int ret;
3787 +-
3788 +- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
3789 +- ret = extract_crng_user(buf, nbytes);
3790 +- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
3791 +- return ret;
3792 ++ credit_init_bits(1);
3793 + }
3794 +
3795 +-static ssize_t
3796 +-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
3797 ++/*
3798 ++ * If we have an actual cycle counter, see if we can
3799 ++ * generate enough entropy with timing noise
3800 ++ */
3801 ++static void __cold try_to_generate_entropy(void)
3802 + {
3803 +- unsigned long flags;
3804 +- static int maxwarn = 10;
3805 ++ struct {
3806 ++ unsigned long entropy;
3807 ++ struct timer_list timer;
3808 ++ } stack;
3809 ++
3810 ++ stack.entropy = random_get_entropy();
3811 +
3812 +- if (!crng_ready() && maxwarn > 0) {
3813 +- maxwarn--;
3814 +- if (__ratelimit(&urandom_warning))
3815 +- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
3816 +- current->comm, nbytes);
3817 +- spin_lock_irqsave(&primary_crng.lock, flags);
3818 +- crng_init_cnt = 0;
3819 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
3820 ++ /* Slow counter - or none. Don't even bother */
3821 ++ if (stack.entropy == random_get_entropy())
3822 ++ return;
3823 ++
3824 ++ timer_setup_on_stack(&stack.timer, entropy_timer, 0);
3825 ++ while (!crng_ready() && !signal_pending(current)) {
3826 ++ if (!timer_pending(&stack.timer))
3827 ++ mod_timer(&stack.timer, jiffies + 1);
3828 ++ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
3829 ++ schedule();
3830 ++ stack.entropy = random_get_entropy();
3831 + }
3832 +
3833 +- return urandom_read_nowarn(file, buf, nbytes, ppos);
3834 ++ del_timer_sync(&stack.timer);
3835 ++ destroy_timer_on_stack(&stack.timer);
3836 ++ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
3837 + }
3838 +
3839 +-static ssize_t
3840 +-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
3841 ++
3842 ++/**********************************************************************
3843 ++ *
3844 ++ * Userspace reader/writer interfaces.
3845 ++ *
3846 ++ * getrandom(2) is the primary modern interface into the RNG and should
3847 ++ * be used in preference to anything else.
3848 ++ *
3849 ++ * Reading from /dev/random has the same functionality as calling
3850 ++ * getrandom(2) with flags=0. In earlier versions, however, it had
3851 ++ * vastly different semantics and should therefore be avoided, to
3852 ++ * prevent backwards compatibility issues.
3853 ++ *
3854 ++ * Reading from /dev/urandom has the same functionality as calling
3855 ++ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
3856 ++ * waiting for the RNG to be ready, it should not be used.
3857 ++ *
3858 ++ * Writing to either /dev/random or /dev/urandom adds entropy to
3859 ++ * the input pool but does not credit it.
3860 ++ *
3861 ++ * Polling on /dev/random indicates when the RNG is initialized, on
3862 ++ * the read side, and when it wants new entropy, on the write side.
3863 ++ *
3864 ++ * Both /dev/random and /dev/urandom have the same set of ioctls for
3865 ++ * adding entropy, getting the entropy count, zeroing the count, and
3866 ++ * reseeding the crng.
3867 ++ *
3868 ++ **********************************************************************/
3869 ++
3870 ++SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
3871 + {
3872 ++ struct iov_iter iter;
3873 ++ struct iovec iov;
3874 + int ret;
3875 +
3876 +- ret = wait_for_random_bytes();
3877 +- if (ret != 0)
3878 ++ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
3879 ++ return -EINVAL;
3880 ++
3881 ++ /*
3882 ++ * Requesting insecure and blocking randomness at the same time makes
3883 ++ * no sense.
3884 ++ */
3885 ++ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
3886 ++ return -EINVAL;
3887 ++
3888 ++ if (!crng_ready() && !(flags & GRND_INSECURE)) {
3889 ++ if (flags & GRND_NONBLOCK)
3890 ++ return -EAGAIN;
3891 ++ ret = wait_for_random_bytes();
3892 ++ if (unlikely(ret))
3893 ++ return ret;
3894 ++ }
3895 ++
3896 ++ ret = import_single_range(READ, ubuf, len, &iov, &iter);
3897 ++ if (unlikely(ret))
3898 + return ret;
3899 +- return urandom_read_nowarn(file, buf, nbytes, ppos);
3900 ++ return get_random_bytes_user(&iter);
3901 + }
3902 +
3903 +-static __poll_t
3904 +-random_poll(struct file *file, poll_table * wait)
3905 ++static __poll_t random_poll(struct file *file, poll_table *wait)
3906 + {
3907 +- __poll_t mask;
3908 +-
3909 + poll_wait(file, &crng_init_wait, wait);
3910 +- poll_wait(file, &random_write_wait, wait);
3911 +- mask = 0;
3912 +- if (crng_ready())
3913 +- mask |= EPOLLIN | EPOLLRDNORM;
3914 +- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
3915 +- mask |= EPOLLOUT | EPOLLWRNORM;
3916 +- return mask;
3917 ++ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
3918 + }
3919 +
3920 +-static int
3921 +-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
3922 ++static ssize_t write_pool_user(struct iov_iter *iter)
3923 + {
3924 +- size_t bytes;
3925 +- __u32 t, buf[16];
3926 +- const char __user *p = buffer;
3927 ++ u8 block[BLAKE2S_BLOCK_SIZE];
3928 ++ ssize_t ret = 0;
3929 ++ size_t copied;
3930 +
3931 +- while (count > 0) {
3932 +- int b, i = 0;
3933 ++ if (unlikely(!iov_iter_count(iter)))
3934 ++ return 0;
3935 +
3936 +- bytes = min(count, sizeof(buf));
3937 +- if (copy_from_user(&buf, p, bytes))
3938 +- return -EFAULT;
3939 ++ for (;;) {
3940 ++ copied = copy_from_iter(block, sizeof(block), iter);
3941 ++ ret += copied;
3942 ++ mix_pool_bytes(block, copied);
3943 ++ if (!iov_iter_count(iter) || copied != sizeof(block))
3944 ++ break;
3945 +
3946 +- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
3947 +- if (!arch_get_random_int(&t))
3948 ++ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
3949 ++ if (ret % PAGE_SIZE == 0) {
3950 ++ if (signal_pending(current))
3951 + break;
3952 +- buf[i] ^= t;
3953 ++ cond_resched();
3954 + }
3955 ++ }
3956 ++
3957 ++ memzero_explicit(block, sizeof(block));
3958 ++ return ret ? ret : -EFAULT;
3959 ++}
3960 ++
3961 ++static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
3962 ++{
3963 ++ return write_pool_user(iter);
3964 ++}
3965 +
3966 +- count -= bytes;
3967 +- p += bytes;
3968 ++static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
3969 ++{
3970 ++ static int maxwarn = 10;
3971 +
3972 +- mix_pool_bytes(r, buf, bytes);
3973 +- cond_resched();
3974 ++ if (!crng_ready()) {
3975 ++ if (!ratelimit_disable && maxwarn <= 0)
3976 ++ ++urandom_warning.missed;
3977 ++ else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
3978 ++ --maxwarn;
3979 ++ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
3980 ++ current->comm, iov_iter_count(iter));
3981 ++ }
3982 + }
3983 +
3984 +- return 0;
3985 ++ return get_random_bytes_user(iter);
3986 + }
3987 +
3988 +-static ssize_t random_write(struct file *file, const char __user *buffer,
3989 +- size_t count, loff_t *ppos)
3990 ++static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
3991 + {
3992 +- size_t ret;
3993 ++ int ret;
3994 +
3995 +- ret = write_pool(&input_pool, buffer, count);
3996 +- if (ret)
3997 ++ ret = wait_for_random_bytes();
3998 ++ if (ret != 0)
3999 + return ret;
4000 +-
4001 +- return (ssize_t)count;
4002 ++ return get_random_bytes_user(iter);
4003 + }
4004 +
4005 + static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
4006 + {
4007 +- int size, ent_count;
4008 + int __user *p = (int __user *)arg;
4009 +- int retval;
4010 ++ int ent_count;
4011 +
4012 + switch (cmd) {
4013 + case RNDGETENTCNT:
4014 +- /* inherently racy, no point locking */
4015 +- ent_count = ENTROPY_BITS(&input_pool);
4016 +- if (put_user(ent_count, p))
4017 ++ /* Inherently racy, no point locking. */
4018 ++ if (put_user(input_pool.init_bits, p))
4019 + return -EFAULT;
4020 + return 0;
4021 + case RNDADDTOENTCNT:
4022 +@@ -1964,41 +1331,48 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
4023 + return -EPERM;
4024 + if (get_user(ent_count, p))
4025 + return -EFAULT;
4026 +- return credit_entropy_bits_safe(&input_pool, ent_count);
4027 +- case RNDADDENTROPY:
4028 ++ if (ent_count < 0)
4029 ++ return -EINVAL;
4030 ++ credit_init_bits(ent_count);
4031 ++ return 0;
4032 ++ case RNDADDENTROPY: {
4033 ++ struct iov_iter iter;
4034 ++ struct iovec iov;
4035 ++ ssize_t ret;
4036 ++ int len;
4037 ++
4038 + if (!capable(CAP_SYS_ADMIN))
4039 + return -EPERM;
4040 + if (get_user(ent_count, p++))
4041 + return -EFAULT;
4042 + if (ent_count < 0)
4043 + return -EINVAL;
4044 +- if (get_user(size, p++))
4045 ++ if (get_user(len, p++))
4046 ++ return -EFAULT;
4047 ++ ret = import_single_range(WRITE, p, len, &iov, &iter);
4048 ++ if (unlikely(ret))
4049 ++ return ret;
4050 ++ ret = write_pool_user(&iter);
4051 ++ if (unlikely(ret < 0))
4052 ++ return ret;
4053 ++ /* Since we're crediting, enforce that it was all written into the pool. */
4054 ++ if (unlikely(ret != len))
4055 + return -EFAULT;
4056 +- retval = write_pool(&input_pool, (const char __user *)p,
4057 +- size);
4058 +- if (retval < 0)
4059 +- return retval;
4060 +- return credit_entropy_bits_safe(&input_pool, ent_count);
4061 ++ credit_init_bits(ent_count);
4062 ++ return 0;
4063 ++ }
4064 + case RNDZAPENTCNT:
4065 + case RNDCLEARPOOL:
4066 +- /*
4067 +- * Clear the entropy pool counters. We no longer clear
4068 +- * the entropy pool, as that's silly.
4069 +- */
4070 ++ /* No longer has any effect. */
4071 + if (!capable(CAP_SYS_ADMIN))
4072 + return -EPERM;
4073 +- if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
4074 +- wake_up_interruptible(&random_write_wait);
4075 +- kill_fasync(&fasync, SIGIO, POLL_OUT);
4076 +- }
4077 + return 0;
4078 + case RNDRESEEDCRNG:
4079 + if (!capable(CAP_SYS_ADMIN))
4080 + return -EPERM;
4081 +- if (crng_init < 2)
4082 ++ if (!crng_ready())
4083 + return -ENODATA;
4084 +- crng_reseed(&primary_crng, &input_pool);
4085 +- WRITE_ONCE(crng_global_init_time, jiffies - 1);
4086 ++ crng_reseed();
4087 + return 0;
4088 + default:
4089 + return -EINVAL;
4090 +@@ -2011,55 +1385,56 @@ static int random_fasync(int fd, struct file *filp, int on)
4091 + }
4092 +
4093 + const struct file_operations random_fops = {
4094 +- .read = random_read,
4095 +- .write = random_write,
4096 +- .poll = random_poll,
4097 ++ .read_iter = random_read_iter,
4098 ++ .write_iter = random_write_iter,
4099 ++ .poll = random_poll,
4100 + .unlocked_ioctl = random_ioctl,
4101 + .compat_ioctl = compat_ptr_ioctl,
4102 + .fasync = random_fasync,
4103 + .llseek = noop_llseek,
4104 ++ .splice_read = generic_file_splice_read,
4105 ++ .splice_write = iter_file_splice_write,
4106 + };
4107 +
4108 + const struct file_operations urandom_fops = {
4109 +- .read = urandom_read,
4110 +- .write = random_write,
4111 ++ .read_iter = urandom_read_iter,
4112 ++ .write_iter = random_write_iter,
4113 + .unlocked_ioctl = random_ioctl,
4114 + .compat_ioctl = compat_ptr_ioctl,
4115 + .fasync = random_fasync,
4116 + .llseek = noop_llseek,
4117 ++ .splice_read = generic_file_splice_read,
4118 ++ .splice_write = iter_file_splice_write,
4119 + };
4120 +
4121 +-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
4122 +- unsigned int, flags)
4123 +-{
4124 +- int ret;
4125 +-
4126 +- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
4127 +- return -EINVAL;
4128 +-
4129 +- /*
4130 +- * Requesting insecure and blocking randomness at the same time makes
4131 +- * no sense.
4132 +- */
4133 +- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
4134 +- return -EINVAL;
4135 +-
4136 +- if (count > INT_MAX)
4137 +- count = INT_MAX;
4138 +-
4139 +- if (!(flags & GRND_INSECURE) && !crng_ready()) {
4140 +- if (flags & GRND_NONBLOCK)
4141 +- return -EAGAIN;
4142 +- ret = wait_for_random_bytes();
4143 +- if (unlikely(ret))
4144 +- return ret;
4145 +- }
4146 +- return urandom_read_nowarn(NULL, buf, count, NULL);
4147 +-}
4148 +
4149 + /********************************************************************
4150 + *
4151 +- * Sysctl interface
4152 ++ * Sysctl interface.
4153 ++ *
4154 ++ * These are partly unused legacy knobs with dummy values to not break
4155 ++ * userspace and partly still useful things. They are usually accessible
4156 ++ * in /proc/sys/kernel/random/ and are as follows:
4157 ++ *
4158 ++ * - boot_id - a UUID representing the current boot.
4159 ++ *
4160 ++ * - uuid - a random UUID, different each time the file is read.
4161 ++ *
4162 ++ * - poolsize - the number of bits of entropy that the input pool can
4163 ++ * hold, tied to the POOL_BITS constant.
4164 ++ *
4165 ++ * - entropy_avail - the number of bits of entropy currently in the
4166 ++ * input pool. Always <= poolsize.
4167 ++ *
4168 ++ * - write_wakeup_threshold - the amount of entropy in the input pool
4169 ++ * below which write polls to /dev/random will unblock, requesting
4170 ++ * more entropy, tied to the POOL_READY_BITS constant. It is writable
4171 ++ * to avoid breaking old userspaces, but writing to it does not
4172 ++ * change any behavior of the RNG.
4173 ++ *
4174 ++ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
4175 ++ * It is writable to avoid breaking old userspaces, but writing
4176 ++ * to it does not change any behavior of the RNG.
4177 + *
4178 + ********************************************************************/
4179 +
4180 +@@ -2067,25 +1442,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
4181 +
4182 + #include <linux/sysctl.h>
4183 +
4184 +-static int min_write_thresh;
4185 +-static int max_write_thresh = INPUT_POOL_WORDS * 32;
4186 +-static int random_min_urandom_seed = 60;
4187 +-static char sysctl_bootid[16];
4188 ++static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
4189 ++static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
4190 ++static int sysctl_poolsize = POOL_BITS;
4191 ++static u8 sysctl_bootid[UUID_SIZE];
4192 +
4193 + /*
4194 + * This function is used to return both the bootid UUID, and random
4195 +- * UUID. The difference is in whether table->data is NULL; if it is,
4196 ++ * UUID. The difference is in whether table->data is NULL; if it is,
4197 + * then a new UUID is generated and returned to the user.
4198 +- *
4199 +- * If the user accesses this via the proc interface, the UUID will be
4200 +- * returned as an ASCII string in the standard UUID format; if via the
4201 +- * sysctl system call, as 16 bytes of binary data.
4202 + */
4203 +-static int proc_do_uuid(struct ctl_table *table, int write,
4204 +- void *buffer, size_t *lenp, loff_t *ppos)
4205 +-{
4206 +- struct ctl_table fake_table;
4207 +- unsigned char buf[64], tmp_uuid[16], *uuid;
4208 ++static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
4209 ++ size_t *lenp, loff_t *ppos)
4210 ++{
4211 ++ u8 tmp_uuid[UUID_SIZE], *uuid;
4212 ++ char uuid_string[UUID_STRING_LEN + 1];
4213 ++ struct ctl_table fake_table = {
4214 ++ .data = uuid_string,
4215 ++ .maxlen = UUID_STRING_LEN
4216 ++ };
4217 ++
4218 ++ if (write)
4219 ++ return -EPERM;
4220 +
4221 + uuid = table->data;
4222 + if (!uuid) {
4223 +@@ -2100,32 +1478,17 @@ static int proc_do_uuid(struct ctl_table *table, int write,
4224 + spin_unlock(&bootid_spinlock);
4225 + }
4226 +
4227 +- sprintf(buf, "%pU", uuid);
4228 +-
4229 +- fake_table.data = buf;
4230 +- fake_table.maxlen = sizeof(buf);
4231 +-
4232 +- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
4233 ++ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
4234 ++ return proc_dostring(&fake_table, 0, buf, lenp, ppos);
4235 + }
4236 +
4237 +-/*
4238 +- * Return entropy available scaled to integral bits
4239 +- */
4240 +-static int proc_do_entropy(struct ctl_table *table, int write,
4241 +- void *buffer, size_t *lenp, loff_t *ppos)
4242 ++/* The same as proc_dointvec, but writes don't change anything. */
4243 ++static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
4244 ++ size_t *lenp, loff_t *ppos)
4245 + {
4246 +- struct ctl_table fake_table;
4247 +- int entropy_count;
4248 +-
4249 +- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
4250 +-
4251 +- fake_table.data = &entropy_count;
4252 +- fake_table.maxlen = sizeof(entropy_count);
4253 +-
4254 +- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
4255 ++ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
4256 + }
4257 +
4258 +-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
4259 + extern struct ctl_table random_table[];
4260 + struct ctl_table random_table[] = {
4261 + {
4262 +@@ -2137,222 +1500,36 @@ struct ctl_table random_table[] = {
4263 + },
4264 + {
4265 + .procname = "entropy_avail",
4266 ++ .data = &input_pool.init_bits,
4267 + .maxlen = sizeof(int),
4268 + .mode = 0444,
4269 +- .proc_handler = proc_do_entropy,
4270 +- .data = &input_pool.entropy_count,
4271 ++ .proc_handler = proc_dointvec,
4272 + },
4273 + {
4274 + .procname = "write_wakeup_threshold",
4275 +- .data = &random_write_wakeup_bits,
4276 ++ .data = &sysctl_random_write_wakeup_bits,
4277 + .maxlen = sizeof(int),
4278 + .mode = 0644,
4279 +- .proc_handler = proc_dointvec_minmax,
4280 +- .extra1 = &min_write_thresh,
4281 +- .extra2 = &max_write_thresh,
4282 ++ .proc_handler = proc_do_rointvec,
4283 + },
4284 + {
4285 + .procname = "urandom_min_reseed_secs",
4286 +- .data = &random_min_urandom_seed,
4287 ++ .data = &sysctl_random_min_urandom_seed,
4288 + .maxlen = sizeof(int),
4289 + .mode = 0644,
4290 +- .proc_handler = proc_dointvec,
4291 ++ .proc_handler = proc_do_rointvec,
4292 + },
4293 + {
4294 + .procname = "boot_id",
4295 + .data = &sysctl_bootid,
4296 +- .maxlen = 16,
4297 + .mode = 0444,
4298 + .proc_handler = proc_do_uuid,
4299 + },
4300 + {
4301 + .procname = "uuid",
4302 +- .maxlen = 16,
4303 + .mode = 0444,
4304 + .proc_handler = proc_do_uuid,
4305 + },
4306 +-#ifdef ADD_INTERRUPT_BENCH
4307 +- {
4308 +- .procname = "add_interrupt_avg_cycles",
4309 +- .data = &avg_cycles,
4310 +- .maxlen = sizeof(avg_cycles),
4311 +- .mode = 0444,
4312 +- .proc_handler = proc_doulongvec_minmax,
4313 +- },
4314 +- {
4315 +- .procname = "add_interrupt_avg_deviation",
4316 +- .data = &avg_deviation,
4317 +- .maxlen = sizeof(avg_deviation),
4318 +- .mode = 0444,
4319 +- .proc_handler = proc_doulongvec_minmax,
4320 +- },
4321 +-#endif
4322 + { }
4323 + };
4324 +-#endif /* CONFIG_SYSCTL */
4325 +-
4326 +-struct batched_entropy {
4327 +- union {
4328 +- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
4329 +- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
4330 +- };
4331 +- unsigned int position;
4332 +- spinlock_t batch_lock;
4333 +-};
4334 +-
4335 +-/*
4336 +- * Get a random word for internal kernel use only. The quality of the random
4337 +- * number is good as /dev/urandom, but there is no backtrack protection, with
4338 +- * the goal of being quite fast and not depleting entropy. In order to ensure
4339 +- * that the randomness provided by this function is okay, the function
4340 +- * wait_for_random_bytes() should be called and return 0 at least once at any
4341 +- * point prior.
4342 +- */
4343 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
4344 +- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
4345 +-};
4346 +-
4347 +-u64 get_random_u64(void)
4348 +-{
4349 +- u64 ret;
4350 +- unsigned long flags;
4351 +- struct batched_entropy *batch;
4352 +- static void *previous;
4353 +-
4354 +- warn_unseeded_randomness(&previous);
4355 +-
4356 +- batch = raw_cpu_ptr(&batched_entropy_u64);
4357 +- spin_lock_irqsave(&batch->batch_lock, flags);
4358 +- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
4359 +- extract_crng((u8 *)batch->entropy_u64);
4360 +- batch->position = 0;
4361 +- }
4362 +- ret = batch->entropy_u64[batch->position++];
4363 +- spin_unlock_irqrestore(&batch->batch_lock, flags);
4364 +- return ret;
4365 +-}
4366 +-EXPORT_SYMBOL(get_random_u64);
4367 +-
4368 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
4369 +- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
4370 +-};
4371 +-u32 get_random_u32(void)
4372 +-{
4373 +- u32 ret;
4374 +- unsigned long flags;
4375 +- struct batched_entropy *batch;
4376 +- static void *previous;
4377 +-
4378 +- warn_unseeded_randomness(&previous);
4379 +-
4380 +- batch = raw_cpu_ptr(&batched_entropy_u32);
4381 +- spin_lock_irqsave(&batch->batch_lock, flags);
4382 +- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
4383 +- extract_crng((u8 *)batch->entropy_u32);
4384 +- batch->position = 0;
4385 +- }
4386 +- ret = batch->entropy_u32[batch->position++];
4387 +- spin_unlock_irqrestore(&batch->batch_lock, flags);
4388 +- return ret;
4389 +-}
4390 +-EXPORT_SYMBOL(get_random_u32);
4391 +-
4392 +-/* It's important to invalidate all potential batched entropy that might
4393 +- * be stored before the crng is initialized, which we can do lazily by
4394 +- * simply resetting the counter to zero so that it's re-extracted on the
4395 +- * next usage. */
4396 +-static void invalidate_batched_entropy(void)
4397 +-{
4398 +- int cpu;
4399 +- unsigned long flags;
4400 +-
4401 +- for_each_possible_cpu (cpu) {
4402 +- struct batched_entropy *batched_entropy;
4403 +-
4404 +- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
4405 +- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
4406 +- batched_entropy->position = 0;
4407 +- spin_unlock(&batched_entropy->batch_lock);
4408 +-
4409 +- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
4410 +- spin_lock(&batched_entropy->batch_lock);
4411 +- batched_entropy->position = 0;
4412 +- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
4413 +- }
4414 +-}
4415 +-
4416 +-/**
4417 +- * randomize_page - Generate a random, page aligned address
4418 +- * @start: The smallest acceptable address the caller will take.
4419 +- * @range: The size of the area, starting at @start, within which the
4420 +- * random address must fall.
4421 +- *
4422 +- * If @start + @range would overflow, @range is capped.
4423 +- *
4424 +- * NOTE: Historical use of randomize_range, which this replaces, presumed that
4425 +- * @start was already page aligned. We now align it regardless.
4426 +- *
4427 +- * Return: A page aligned address within [start, start + range). On error,
4428 +- * @start is returned.
4429 +- */
4430 +-unsigned long
4431 +-randomize_page(unsigned long start, unsigned long range)
4432 +-{
4433 +- if (!PAGE_ALIGNED(start)) {
4434 +- range -= PAGE_ALIGN(start) - start;
4435 +- start = PAGE_ALIGN(start);
4436 +- }
4437 +-
4438 +- if (start > ULONG_MAX - range)
4439 +- range = ULONG_MAX - start;
4440 +-
4441 +- range >>= PAGE_SHIFT;
4442 +-
4443 +- if (range == 0)
4444 +- return start;
4445 +-
4446 +- return start + (get_random_long() % range << PAGE_SHIFT);
4447 +-}
4448 +-
4449 +-/* Interface for in-kernel drivers of true hardware RNGs.
4450 +- * Those devices may produce endless random bits and will be throttled
4451 +- * when our pool is full.
4452 +- */
4453 +-void add_hwgenerator_randomness(const char *buffer, size_t count,
4454 +- size_t entropy)
4455 +-{
4456 +- struct entropy_store *poolp = &input_pool;
4457 +-
4458 +- if (unlikely(crng_init == 0)) {
4459 +- size_t ret = crng_fast_load(buffer, count);
4460 +- count -= ret;
4461 +- buffer += ret;
4462 +- if (!count || crng_init == 0)
4463 +- return;
4464 +- }
4465 +-
4466 +- /* Suspend writing if we're above the trickle threshold.
4467 +- * We'll be woken up again once below random_write_wakeup_thresh,
4468 +- * or when the calling thread is about to terminate.
4469 +- */
4470 +- wait_event_interruptible(random_write_wait,
4471 +- !system_wq || kthread_should_stop() ||
4472 +- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
4473 +- mix_pool_bytes(poolp, buffer, count);
4474 +- credit_entropy_bits(poolp, entropy);
4475 +-}
4476 +-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
4477 +-
4478 +-/* Handle random seed passed by bootloader.
4479 +- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
4480 +- * it would be regarded as device data.
4481 +- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
4482 +- */
4483 +-void add_bootloader_randomness(const void *buf, unsigned int size)
4484 +-{
4485 +- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
4486 +- add_hwgenerator_randomness(buf, size, size * 8);
4487 +- else
4488 +- add_device_randomness(buf, size);
4489 +-}
4490 +-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
4491 ++#endif /* CONFIG_SYSCTL */
4492 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
4493 +index b9ac357e465db..5d820037e2918 100644
4494 +--- a/drivers/hv/vmbus_drv.c
4495 ++++ b/drivers/hv/vmbus_drv.c
4496 +@@ -1351,7 +1351,7 @@ static void vmbus_isr(void)
4497 + tasklet_schedule(&hv_cpu->msg_dpc);
4498 + }
4499 +
4500 +- add_interrupt_randomness(hv_get_vector(), 0);
4501 ++ add_interrupt_randomness(hv_get_vector());
4502 + }
4503 +
4504 + /*
4505 +diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
4506 +index a776bb8e0e093..a24624353f9ed 100644
4507 +--- a/drivers/media/test-drivers/vim2m.c
4508 ++++ b/drivers/media/test-drivers/vim2m.c
4509 +@@ -1325,12 +1325,6 @@ static int vim2m_probe(struct platform_device *pdev)
4510 + vfd->lock = &dev->dev_mutex;
4511 + vfd->v4l2_dev = &dev->v4l2_dev;
4512 +
4513 +- ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
4514 +- if (ret) {
4515 +- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
4516 +- goto error_v4l2;
4517 +- }
4518 +-
4519 + video_set_drvdata(vfd, dev);
4520 + v4l2_info(&dev->v4l2_dev,
4521 + "Device registered as /dev/video%d\n", vfd->num);
4522 +@@ -1353,12 +1347,20 @@ static int vim2m_probe(struct platform_device *pdev)
4523 + media_device_init(&dev->mdev);
4524 + dev->mdev.ops = &m2m_media_ops;
4525 + dev->v4l2_dev.mdev = &dev->mdev;
4526 ++#endif
4527 ++
4528 ++ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
4529 ++ if (ret) {
4530 ++ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
4531 ++ goto error_m2m;
4532 ++ }
4533 +
4534 ++#ifdef CONFIG_MEDIA_CONTROLLER
4535 + ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
4536 + MEDIA_ENT_F_PROC_VIDEO_SCALER);
4537 + if (ret) {
4538 + v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
4539 +- goto error_dev;
4540 ++ goto error_v4l2;
4541 + }
4542 +
4543 + ret = media_device_register(&dev->mdev);
4544 +@@ -1373,11 +1375,13 @@ static int vim2m_probe(struct platform_device *pdev)
4545 + error_m2m_mc:
4546 + v4l2_m2m_unregister_media_controller(dev->m2m_dev);
4547 + #endif
4548 +-error_dev:
4549 ++error_v4l2:
4550 + video_unregister_device(&dev->vfd);
4551 + /* vim2m_device_release called by video_unregister_device to release various objects */
4552 + return ret;
4553 +-error_v4l2:
4554 ++error_m2m:
4555 ++ v4l2_m2m_release(dev->m2m_dev);
4556 ++error_dev:
4557 + v4l2_device_unregister(&dev->v4l2_dev);
4558 + error_free:
4559 + kfree(dev);
4560 +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
4561 +index cb865b7ec3750..f208080243055 100644
4562 +--- a/drivers/net/Kconfig
4563 ++++ b/drivers/net/Kconfig
4564 +@@ -80,7 +80,6 @@ config WIREGUARD
4565 + select CRYPTO
4566 + select CRYPTO_LIB_CURVE25519
4567 + select CRYPTO_LIB_CHACHA20POLY1305
4568 +- select CRYPTO_LIB_BLAKE2S
4569 + select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
4570 + select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
4571 + select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
4572 +diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
4573 +index c0cfd9b36c0b5..720952b92e784 100644
4574 +--- a/drivers/net/wireguard/noise.c
4575 ++++ b/drivers/net/wireguard/noise.c
4576 +@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
4577 + static_identity->static_public, private_key);
4578 + }
4579 +
4580 ++static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
4581 ++{
4582 ++ struct blake2s_state state;
4583 ++ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
4584 ++ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
4585 ++ int i;
4586 ++
4587 ++ if (keylen > BLAKE2S_BLOCK_SIZE) {
4588 ++ blake2s_init(&state, BLAKE2S_HASH_SIZE);
4589 ++ blake2s_update(&state, key, keylen);
4590 ++ blake2s_final(&state, x_key);
4591 ++ } else
4592 ++ memcpy(x_key, key, keylen);
4593 ++
4594 ++ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
4595 ++ x_key[i] ^= 0x36;
4596 ++
4597 ++ blake2s_init(&state, BLAKE2S_HASH_SIZE);
4598 ++ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
4599 ++ blake2s_update(&state, in, inlen);
4600 ++ blake2s_final(&state, i_hash);
4601 ++
4602 ++ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
4603 ++ x_key[i] ^= 0x5c ^ 0x36;
4604 ++
4605 ++ blake2s_init(&state, BLAKE2S_HASH_SIZE);
4606 ++ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
4607 ++ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
4608 ++ blake2s_final(&state, i_hash);
4609 ++
4610 ++ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
4611 ++ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
4612 ++ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
4613 ++}
4614 ++
4615 + /* This is Hugo Krawczyk's HKDF:
4616 + * - https://eprint.iacr.org/2010/264.pdf
4617 + * - https://tools.ietf.org/html/rfc5869
4618 +@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
4619 + ((third_len || third_dst) && (!second_len || !second_dst))));
4620 +
4621 + /* Extract entropy from data into secret */
4622 +- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
4623 ++ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
4624 +
4625 + if (!first_dst || !first_len)
4626 + goto out;
4627 +
4628 + /* Expand first key: key = secret, data = 0x1 */
4629 + output[0] = 1;
4630 +- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
4631 ++ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
4632 + memcpy(first_dst, output, first_len);
4633 +
4634 + if (!second_dst || !second_len)
4635 +@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
4636 +
4637 + /* Expand second key: key = secret, data = first-key || 0x2 */
4638 + output[BLAKE2S_HASH_SIZE] = 2;
4639 +- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
4640 +- BLAKE2S_HASH_SIZE);
4641 ++ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
4642 + memcpy(second_dst, output, second_len);
4643 +
4644 + if (!third_dst || !third_len)
4645 +@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
4646 +
4647 + /* Expand third key: key = secret, data = second-key || 0x3 */
4648 + output[BLAKE2S_HASH_SIZE] = 3;
4649 +- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
4650 +- BLAKE2S_HASH_SIZE);
4651 ++ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
4652 + memcpy(third_dst, output, third_len);
4653 +
4654 + out:
4655 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
4656 +index 902ac81699484..083ff72976cf0 100644
4657 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
4658 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
4659 +@@ -1351,9 +1351,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
4660 +
4661 + sec_len = *(pos++); len -= 1;
4662 +
4663 +- if (sec_len > 0 && sec_len <= len) {
4664 ++ if (sec_len > 0 &&
4665 ++ sec_len <= len &&
4666 ++ sec_len <= 32) {
4667 + ssid[ssid_index].SsidLength = sec_len;
4668 +- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
4669 ++ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
4670 + /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
4671 + /* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
4672 + ssid_index++;
4673 +diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
4674 +index b471deac28ff8..4e30e1799e614 100644
4675 +--- a/include/crypto/blake2s.h
4676 ++++ b/include/crypto/blake2s.h
4677 +@@ -3,15 +3,14 @@
4678 + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
4679 + */
4680 +
4681 +-#ifndef BLAKE2S_H
4682 +-#define BLAKE2S_H
4683 ++#ifndef _CRYPTO_BLAKE2S_H
4684 ++#define _CRYPTO_BLAKE2S_H
4685 +
4686 ++#include <linux/bug.h>
4687 + #include <linux/types.h>
4688 + #include <linux/kernel.h>
4689 + #include <linux/string.h>
4690 +
4691 +-#include <asm/bug.h>
4692 +-
4693 + enum blake2s_lengths {
4694 + BLAKE2S_BLOCK_SIZE = 64,
4695 + BLAKE2S_HASH_SIZE = 32,
4696 +@@ -24,6 +23,7 @@ enum blake2s_lengths {
4697 + };
4698 +
4699 + struct blake2s_state {
4700 ++ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
4701 + u32 h[8];
4702 + u32 t[2];
4703 + u32 f[2];
4704 +@@ -43,29 +43,34 @@ enum blake2s_iv {
4705 + BLAKE2S_IV7 = 0x5BE0CD19UL,
4706 + };
4707 +
4708 +-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
4709 +-void blake2s_final(struct blake2s_state *state, u8 *out);
4710 +-
4711 +-static inline void blake2s_init_param(struct blake2s_state *state,
4712 +- const u32 param)
4713 ++static inline void __blake2s_init(struct blake2s_state *state, size_t outlen,
4714 ++ const void *key, size_t keylen)
4715 + {
4716 +- *state = (struct blake2s_state){{
4717 +- BLAKE2S_IV0 ^ param,
4718 +- BLAKE2S_IV1,
4719 +- BLAKE2S_IV2,
4720 +- BLAKE2S_IV3,
4721 +- BLAKE2S_IV4,
4722 +- BLAKE2S_IV5,
4723 +- BLAKE2S_IV6,
4724 +- BLAKE2S_IV7,
4725 +- }};
4726 ++ state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
4727 ++ state->h[1] = BLAKE2S_IV1;
4728 ++ state->h[2] = BLAKE2S_IV2;
4729 ++ state->h[3] = BLAKE2S_IV3;
4730 ++ state->h[4] = BLAKE2S_IV4;
4731 ++ state->h[5] = BLAKE2S_IV5;
4732 ++ state->h[6] = BLAKE2S_IV6;
4733 ++ state->h[7] = BLAKE2S_IV7;
4734 ++ state->t[0] = 0;
4735 ++ state->t[1] = 0;
4736 ++ state->f[0] = 0;
4737 ++ state->f[1] = 0;
4738 ++ state->buflen = 0;
4739 ++ state->outlen = outlen;
4740 ++ if (keylen) {
4741 ++ memcpy(state->buf, key, keylen);
4742 ++ memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
4743 ++ state->buflen = BLAKE2S_BLOCK_SIZE;
4744 ++ }
4745 + }
4746 +
4747 + static inline void blake2s_init(struct blake2s_state *state,
4748 + const size_t outlen)
4749 + {
4750 +- blake2s_init_param(state, 0x01010000 | outlen);
4751 +- state->outlen = outlen;
4752 ++ __blake2s_init(state, outlen, NULL, 0);
4753 + }
4754 +
4755 + static inline void blake2s_init_key(struct blake2s_state *state,
4756 +@@ -75,12 +80,12 @@ static inline void blake2s_init_key(struct blake2s_state *state,
4757 + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
4758 + !key || !keylen || keylen > BLAKE2S_KEY_SIZE));
4759 +
4760 +- blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen);
4761 +- memcpy(state->buf, key, keylen);
4762 +- state->buflen = BLAKE2S_BLOCK_SIZE;
4763 +- state->outlen = outlen;
4764 ++ __blake2s_init(state, outlen, key, keylen);
4765 + }
4766 +
4767 ++void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
4768 ++void blake2s_final(struct blake2s_state *state, u8 *out);
4769 ++
4770 + static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
4771 + const size_t outlen, const size_t inlen,
4772 + const size_t keylen)
4773 +@@ -91,16 +96,9 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
4774 + outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
4775 + (!key && keylen)));
4776 +
4777 +- if (keylen)
4778 +- blake2s_init_key(&state, outlen, key, keylen);
4779 +- else
4780 +- blake2s_init(&state, outlen);
4781 +-
4782 ++ __blake2s_init(&state, outlen, key, keylen);
4783 + blake2s_update(&state, in, inlen);
4784 + blake2s_final(&state, out);
4785 + }
4786 +
4787 +-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
4788 +- const size_t keylen);
4789 +-
4790 +-#endif /* BLAKE2S_H */
4791 ++#endif /* _CRYPTO_BLAKE2S_H */
4792 +diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
4793 +index dabaee6987186..b3ea73b819443 100644
4794 +--- a/include/crypto/chacha.h
4795 ++++ b/include/crypto/chacha.h
4796 +@@ -47,12 +47,19 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
4797 + hchacha_block_generic(state, out, nrounds);
4798 + }
4799 +
4800 ++enum chacha_constants { /* expand 32-byte k */
4801 ++ CHACHA_CONSTANT_EXPA = 0x61707865U,
4802 ++ CHACHA_CONSTANT_ND_3 = 0x3320646eU,
4803 ++ CHACHA_CONSTANT_2_BY = 0x79622d32U,
4804 ++ CHACHA_CONSTANT_TE_K = 0x6b206574U
4805 ++};
4806 ++
4807 + static inline void chacha_init_consts(u32 *state)
4808 + {
4809 +- state[0] = 0x61707865; /* "expa" */
4810 +- state[1] = 0x3320646e; /* "nd 3" */
4811 +- state[2] = 0x79622d32; /* "2-by" */
4812 +- state[3] = 0x6b206574; /* "te k" */
4813 ++ state[0] = CHACHA_CONSTANT_EXPA;
4814 ++ state[1] = CHACHA_CONSTANT_ND_3;
4815 ++ state[2] = CHACHA_CONSTANT_2_BY;
4816 ++ state[3] = CHACHA_CONSTANT_TE_K;
4817 + }
4818 +
4819 + void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
4820 +diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
4821 +index c4165126937e4..88e4d145f7cda 100644
4822 +--- a/include/crypto/drbg.h
4823 ++++ b/include/crypto/drbg.h
4824 +@@ -136,7 +136,7 @@ struct drbg_state {
4825 + const struct drbg_state_ops *d_ops;
4826 + const struct drbg_core *core;
4827 + struct drbg_string test_data;
4828 +- struct random_ready_callback random_ready;
4829 ++ struct notifier_block random_ready;
4830 + };
4831 +
4832 + static inline __u8 drbg_statelen(struct drbg_state *drbg)
4833 +diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
4834 +index 74ff77032e526..52363eee2b20e 100644
4835 +--- a/include/crypto/internal/blake2s.h
4836 ++++ b/include/crypto/internal/blake2s.h
4837 +@@ -1,24 +1,129 @@
4838 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4839 ++/*
4840 ++ * Helper functions for BLAKE2s implementations.
4841 ++ * Keep this in sync with the corresponding BLAKE2b header.
4842 ++ */
4843 +
4844 +-#ifndef BLAKE2S_INTERNAL_H
4845 +-#define BLAKE2S_INTERNAL_H
4846 ++#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
4847 ++#define _CRYPTO_INTERNAL_BLAKE2S_H
4848 +
4849 + #include <crypto/blake2s.h>
4850 ++#include <crypto/internal/hash.h>
4851 ++#include <linux/string.h>
4852 ++
4853 ++void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
4854 ++ size_t nblocks, const u32 inc);
4855 ++
4856 ++void blake2s_compress(struct blake2s_state *state, const u8 *block,
4857 ++ size_t nblocks, const u32 inc);
4858 ++
4859 ++bool blake2s_selftest(void);
4860 ++
4861 ++static inline void blake2s_set_lastblock(struct blake2s_state *state)
4862 ++{
4863 ++ state->f[0] = -1;
4864 ++}
4865 ++
4866 ++/* Helper functions for BLAKE2s shared by the library and shash APIs */
4867 ++
4868 ++static __always_inline void
4869 ++__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
4870 ++ bool force_generic)
4871 ++{
4872 ++ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
4873 ++
4874 ++ if (unlikely(!inlen))
4875 ++ return;
4876 ++ if (inlen > fill) {
4877 ++ memcpy(state->buf + state->buflen, in, fill);
4878 ++ if (force_generic)
4879 ++ blake2s_compress_generic(state, state->buf, 1,
4880 ++ BLAKE2S_BLOCK_SIZE);
4881 ++ else
4882 ++ blake2s_compress(state, state->buf, 1,
4883 ++ BLAKE2S_BLOCK_SIZE);
4884 ++ state->buflen = 0;
4885 ++ in += fill;
4886 ++ inlen -= fill;
4887 ++ }
4888 ++ if (inlen > BLAKE2S_BLOCK_SIZE) {
4889 ++ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
4890 ++ /* Hash one less (full) block than strictly possible */
4891 ++ if (force_generic)
4892 ++ blake2s_compress_generic(state, in, nblocks - 1,
4893 ++ BLAKE2S_BLOCK_SIZE);
4894 ++ else
4895 ++ blake2s_compress(state, in, nblocks - 1,
4896 ++ BLAKE2S_BLOCK_SIZE);
4897 ++ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
4898 ++ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
4899 ++ }
4900 ++ memcpy(state->buf + state->buflen, in, inlen);
4901 ++ state->buflen += inlen;
4902 ++}
4903 ++
4904 ++static __always_inline void
4905 ++__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
4906 ++{
4907 ++ blake2s_set_lastblock(state);
4908 ++ memset(state->buf + state->buflen, 0,
4909 ++ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
4910 ++ if (force_generic)
4911 ++ blake2s_compress_generic(state, state->buf, 1, state->buflen);
4912 ++ else
4913 ++ blake2s_compress(state, state->buf, 1, state->buflen);
4914 ++ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
4915 ++ memcpy(out, state->h, state->outlen);
4916 ++}
4917 ++
4918 ++/* Helper functions for shash implementations of BLAKE2s */
4919 +
4920 + struct blake2s_tfm_ctx {
4921 + u8 key[BLAKE2S_KEY_SIZE];
4922 + unsigned int keylen;
4923 + };
4924 +
4925 +-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
4926 +- size_t nblocks, const u32 inc);
4927 ++static inline int crypto_blake2s_setkey(struct crypto_shash *tfm,
4928 ++ const u8 *key, unsigned int keylen)
4929 ++{
4930 ++ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
4931 +
4932 +-void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
4933 +- size_t nblocks, const u32 inc);
4934 ++ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
4935 ++ return -EINVAL;
4936 +
4937 +-static inline void blake2s_set_lastblock(struct blake2s_state *state)
4938 ++ memcpy(tctx->key, key, keylen);
4939 ++ tctx->keylen = keylen;
4940 ++
4941 ++ return 0;
4942 ++}
4943 ++
4944 ++static inline int crypto_blake2s_init(struct shash_desc *desc)
4945 + {
4946 +- state->f[0] = -1;
4947 ++ const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
4948 ++ struct blake2s_state *state = shash_desc_ctx(desc);
4949 ++ unsigned int outlen = crypto_shash_digestsize(desc->tfm);
4950 ++
4951 ++ __blake2s_init(state, outlen, tctx->key, tctx->keylen);
4952 ++ return 0;
4953 ++}
4954 ++
4955 ++static inline int crypto_blake2s_update(struct shash_desc *desc,
4956 ++ const u8 *in, unsigned int inlen,
4957 ++ bool force_generic)
4958 ++{
4959 ++ struct blake2s_state *state = shash_desc_ctx(desc);
4960 ++
4961 ++ __blake2s_update(state, in, inlen, force_generic);
4962 ++ return 0;
4963 ++}
4964 ++
4965 ++static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
4966 ++ bool force_generic)
4967 ++{
4968 ++ struct blake2s_state *state = shash_desc_ctx(desc);
4969 ++
4970 ++ __blake2s_final(state, out, force_generic);
4971 ++ return 0;
4972 + }
4973 +
4974 +-#endif /* BLAKE2S_INTERNAL_H */
4975 ++#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
4976 +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
4977 +index 8fb893ed205e3..fc945f9df2c1d 100644
4978 +--- a/include/linux/cpuhotplug.h
4979 ++++ b/include/linux/cpuhotplug.h
4980 +@@ -61,6 +61,7 @@ enum cpuhp_state {
4981 + CPUHP_LUSTRE_CFS_DEAD,
4982 + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
4983 + CPUHP_PADATA_DEAD,
4984 ++ CPUHP_RANDOM_PREPARE,
4985 + CPUHP_WORKQUEUE_PREP,
4986 + CPUHP_POWER_NUMA_PREPARE,
4987 + CPUHP_HRTIMERS_PREPARE,
4988 +@@ -187,6 +188,7 @@ enum cpuhp_state {
4989 + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
4990 + CPUHP_AP_WATCHDOG_ONLINE,
4991 + CPUHP_AP_WORKQUEUE_ONLINE,
4992 ++ CPUHP_AP_RANDOM_ONLINE,
4993 + CPUHP_AP_RCUTREE_ONLINE,
4994 + CPUHP_AP_BASE_CACHEINFO_ONLINE,
4995 + CPUHP_AP_ONLINE_DYN,
4996 +diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
4997 +index 8e6dd908da216..aa1d4da03538b 100644
4998 +--- a/include/linux/hw_random.h
4999 ++++ b/include/linux/hw_random.h
5000 +@@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
5001 + /** Unregister a Hardware Random Number Generator driver. */
5002 + extern void hwrng_unregister(struct hwrng *rng);
5003 + extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
5004 +-/** Feed random bits into the pool. */
5005 +-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
5006 +
5007 + #endif /* LINUX_HWRANDOM_H_ */
5008 +diff --git a/include/linux/mm.h b/include/linux/mm.h
5009 +index 289c26f055cdd..5b4d88faf114a 100644
5010 +--- a/include/linux/mm.h
5011 ++++ b/include/linux/mm.h
5012 +@@ -2585,6 +2585,7 @@ extern int install_special_mapping(struct mm_struct *mm,
5013 + unsigned long flags, struct page **pages);
5014 +
5015 + unsigned long randomize_stack_top(unsigned long stack_top);
5016 ++unsigned long randomize_page(unsigned long start, unsigned long range);
5017 +
5018 + extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
5019 +
5020 +diff --git a/include/linux/prandom.h b/include/linux/prandom.h
5021 +index 056d31317e499..a4aadd2dc153e 100644
5022 +--- a/include/linux/prandom.h
5023 ++++ b/include/linux/prandom.h
5024 +@@ -10,6 +10,7 @@
5025 +
5026 + #include <linux/types.h>
5027 + #include <linux/percpu.h>
5028 ++#include <linux/siphash.h>
5029 +
5030 + u32 prandom_u32(void);
5031 + void prandom_bytes(void *buf, size_t nbytes);
5032 +@@ -27,15 +28,10 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise);
5033 + * The core SipHash round function. Each line can be executed in
5034 + * parallel given enough CPU resources.
5035 + */
5036 +-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
5037 +- v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
5038 +- v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
5039 +- v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
5040 +- v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
5041 +-)
5042 ++#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3)
5043 +
5044 +-#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
5045 +-#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
5046 ++#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2)
5047 ++#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3)
5048 +
5049 + #elif BITS_PER_LONG == 32
5050 + /*
5051 +@@ -43,14 +39,9 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise);
5052 + * This is weaker, but 32-bit machines are not used for high-traffic
5053 + * applications, so there is less output for an attacker to analyze.
5054 + */
5055 +-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
5056 +- v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
5057 +- v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
5058 +- v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
5059 +- v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
5060 +-)
5061 +-#define PRND_K0 0x6c796765
5062 +-#define PRND_K1 0x74656462
5063 ++#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3)
5064 ++#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2)
5065 ++#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3)
5066 +
5067 + #else
5068 + #error Unsupported BITS_PER_LONG
5069 +diff --git a/include/linux/random.h b/include/linux/random.h
5070 +index f45b8be3e3c4e..917470c4490ac 100644
5071 +--- a/include/linux/random.h
5072 ++++ b/include/linux/random.h
5073 +@@ -1,9 +1,5 @@
5074 + /* SPDX-License-Identifier: GPL-2.0 */
5075 +-/*
5076 +- * include/linux/random.h
5077 +- *
5078 +- * Include file for the random number generator.
5079 +- */
5080 ++
5081 + #ifndef _LINUX_RANDOM_H
5082 + #define _LINUX_RANDOM_H
5083 +
5084 +@@ -14,41 +10,26 @@
5085 +
5086 + #include <uapi/linux/random.h>
5087 +
5088 +-struct random_ready_callback {
5089 +- struct list_head list;
5090 +- void (*func)(struct random_ready_callback *rdy);
5091 +- struct module *owner;
5092 +-};
5093 ++struct notifier_block;
5094 +
5095 +-extern void add_device_randomness(const void *, unsigned int);
5096 +-extern void add_bootloader_randomness(const void *, unsigned int);
5097 ++void add_device_randomness(const void *buf, size_t len);
5098 ++void add_bootloader_randomness(const void *buf, size_t len);
5099 ++void add_input_randomness(unsigned int type, unsigned int code,
5100 ++ unsigned int value) __latent_entropy;
5101 ++void add_interrupt_randomness(int irq) __latent_entropy;
5102 ++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
5103 +
5104 + #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
5105 + static inline void add_latent_entropy(void)
5106 + {
5107 +- add_device_randomness((const void *)&latent_entropy,
5108 +- sizeof(latent_entropy));
5109 ++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
5110 + }
5111 + #else
5112 +-static inline void add_latent_entropy(void) {}
5113 +-#endif
5114 +-
5115 +-extern void add_input_randomness(unsigned int type, unsigned int code,
5116 +- unsigned int value) __latent_entropy;
5117 +-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
5118 +-
5119 +-extern void get_random_bytes(void *buf, int nbytes);
5120 +-extern int wait_for_random_bytes(void);
5121 +-extern int __init rand_initialize(void);
5122 +-extern bool rng_is_initialized(void);
5123 +-extern int add_random_ready_callback(struct random_ready_callback *rdy);
5124 +-extern void del_random_ready_callback(struct random_ready_callback *rdy);
5125 +-extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
5126 +-
5127 +-#ifndef MODULE
5128 +-extern const struct file_operations random_fops, urandom_fops;
5129 ++static inline void add_latent_entropy(void) { }
5130 + #endif
5131 +
5132 ++void get_random_bytes(void *buf, size_t len);
5133 ++size_t __must_check get_random_bytes_arch(void *buf, size_t len);
5134 + u32 get_random_u32(void);
5135 + u64 get_random_u64(void);
5136 + static inline unsigned int get_random_int(void)
5137 +@@ -80,36 +61,38 @@ static inline unsigned long get_random_long(void)
5138 +
5139 + static inline unsigned long get_random_canary(void)
5140 + {
5141 +- unsigned long val = get_random_long();
5142 +-
5143 +- return val & CANARY_MASK;
5144 ++ return get_random_long() & CANARY_MASK;
5145 + }
5146 +
5147 ++int __init random_init(const char *command_line);
5148 ++bool rng_is_initialized(void);
5149 ++int wait_for_random_bytes(void);
5150 ++int register_random_ready_notifier(struct notifier_block *nb);
5151 ++int unregister_random_ready_notifier(struct notifier_block *nb);
5152 ++
5153 + /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
5154 + * Returns the result of the call to wait_for_random_bytes. */
5155 +-static inline int get_random_bytes_wait(void *buf, int nbytes)
5156 ++static inline int get_random_bytes_wait(void *buf, size_t nbytes)
5157 + {
5158 + int ret = wait_for_random_bytes();
5159 + get_random_bytes(buf, nbytes);
5160 + return ret;
5161 + }
5162 +
5163 +-#define declare_get_random_var_wait(var) \
5164 +- static inline int get_random_ ## var ## _wait(var *out) { \
5165 ++#define declare_get_random_var_wait(name, ret_type) \
5166 ++ static inline int get_random_ ## name ## _wait(ret_type *out) { \
5167 + int ret = wait_for_random_bytes(); \
5168 + if (unlikely(ret)) \
5169 + return ret; \
5170 +- *out = get_random_ ## var(); \
5171 ++ *out = get_random_ ## name(); \
5172 + return 0; \
5173 + }
5174 +-declare_get_random_var_wait(u32)
5175 +-declare_get_random_var_wait(u64)
5176 +-declare_get_random_var_wait(int)
5177 +-declare_get_random_var_wait(long)
5178 ++declare_get_random_var_wait(u32, u32)
5179 ++declare_get_random_var_wait(u64, u32)
5180 ++declare_get_random_var_wait(int, unsigned int)
5181 ++declare_get_random_var_wait(long, unsigned long)
5182 + #undef declare_get_random_var
5183 +
5184 +-unsigned long randomize_page(unsigned long start, unsigned long range);
5185 +-
5186 + /*
5187 + * This is designed to be standalone for just prandom
5188 + * users, but for now we include it from <linux/random.h>
5189 +@@ -120,22 +103,10 @@ unsigned long randomize_page(unsigned long start, unsigned long range);
5190 + #ifdef CONFIG_ARCH_RANDOM
5191 + # include <asm/archrandom.h>
5192 + #else
5193 +-static inline bool __must_check arch_get_random_long(unsigned long *v)
5194 +-{
5195 +- return false;
5196 +-}
5197 +-static inline bool __must_check arch_get_random_int(unsigned int *v)
5198 +-{
5199 +- return false;
5200 +-}
5201 +-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
5202 +-{
5203 +- return false;
5204 +-}
5205 +-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
5206 +-{
5207 +- return false;
5208 +-}
5209 ++static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; }
5210 ++static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; }
5211 ++static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; }
5212 ++static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; }
5213 + #endif
5214 +
5215 + /*
5216 +@@ -158,4 +129,13 @@ static inline bool __init arch_get_random_long_early(unsigned long *v)
5217 + }
5218 + #endif
5219 +
5220 ++#ifdef CONFIG_SMP
5221 ++int random_prepare_cpu(unsigned int cpu);
5222 ++int random_online_cpu(unsigned int cpu);
5223 ++#endif
5224 ++
5225 ++#ifndef MODULE
5226 ++extern const struct file_operations random_fops, urandom_fops;
5227 ++#endif
5228 ++
5229 + #endif /* _LINUX_RANDOM_H */
5230 +diff --git a/include/linux/security.h b/include/linux/security.h
5231 +index 35355429648e3..330029ef7e894 100644
5232 +--- a/include/linux/security.h
5233 ++++ b/include/linux/security.h
5234 +@@ -121,10 +121,12 @@ enum lockdown_reason {
5235 + LOCKDOWN_DEBUGFS,
5236 + LOCKDOWN_XMON_WR,
5237 + LOCKDOWN_BPF_WRITE_USER,
5238 ++ LOCKDOWN_DBG_WRITE_KERNEL,
5239 + LOCKDOWN_INTEGRITY_MAX,
5240 + LOCKDOWN_KCORE,
5241 + LOCKDOWN_KPROBES,
5242 + LOCKDOWN_BPF_READ,
5243 ++ LOCKDOWN_DBG_READ_KERNEL,
5244 + LOCKDOWN_PERF,
5245 + LOCKDOWN_TRACEFS,
5246 + LOCKDOWN_XMON_RW,
5247 +diff --git a/include/linux/siphash.h b/include/linux/siphash.h
5248 +index 0cda61855d907..0bb5ecd507bef 100644
5249 +--- a/include/linux/siphash.h
5250 ++++ b/include/linux/siphash.h
5251 +@@ -136,4 +136,32 @@ static inline u32 hsiphash(const void *data, size_t len,
5252 + return ___hsiphash_aligned(data, len, key);
5253 + }
5254 +
5255 ++/*
5256 ++ * These macros expose the raw SipHash and HalfSipHash permutations.
5257 ++ * Do not use them directly! If you think you have a use for them,
5258 ++ * be sure to CC the maintainer of this file explaining why.
5259 ++ */
5260 ++
5261 ++#define SIPHASH_PERMUTATION(a, b, c, d) ( \
5262 ++ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
5263 ++ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
5264 ++ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
5265 ++ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
5266 ++
5267 ++#define SIPHASH_CONST_0 0x736f6d6570736575ULL
5268 ++#define SIPHASH_CONST_1 0x646f72616e646f6dULL
5269 ++#define SIPHASH_CONST_2 0x6c7967656e657261ULL
5270 ++#define SIPHASH_CONST_3 0x7465646279746573ULL
5271 ++
5272 ++#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
5273 ++ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
5274 ++ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
5275 ++ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
5276 ++ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
5277 ++
5278 ++#define HSIPHASH_CONST_0 0U
5279 ++#define HSIPHASH_CONST_1 0U
5280 ++#define HSIPHASH_CONST_2 0x6c796765U
5281 ++#define HSIPHASH_CONST_3 0x74656462U
5282 ++
5283 + #endif /* _LINUX_SIPHASH_H */
5284 +diff --git a/include/linux/timex.h b/include/linux/timex.h
5285 +index ce08597636705..2efab9a806a9d 100644
5286 +--- a/include/linux/timex.h
5287 ++++ b/include/linux/timex.h
5288 +@@ -62,6 +62,8 @@
5289 + #include <linux/types.h>
5290 + #include <linux/param.h>
5291 +
5292 ++unsigned long random_get_entropy_fallback(void);
5293 ++
5294 + #include <asm/timex.h>
5295 +
5296 + #ifndef random_get_entropy
5297 +@@ -74,8 +76,14 @@
5298 + *
5299 + * By default we use get_cycles() for this purpose, but individual
5300 + * architectures may override this in their asm/timex.h header file.
5301 ++ * If a given arch does not have get_cycles(), then we fallback to
5302 ++ * using random_get_entropy_fallback().
5303 + */
5304 +-#define random_get_entropy() get_cycles()
5305 ++#ifdef get_cycles
5306 ++#define random_get_entropy() ((unsigned long)get_cycles())
5307 ++#else
5308 ++#define random_get_entropy() random_get_entropy_fallback()
5309 ++#endif
5310 + #endif
5311 +
5312 + /*
5313 +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
5314 +index ca6a3ea9057ec..d4d611064a76f 100644
5315 +--- a/include/net/inet_hashtables.h
5316 ++++ b/include/net/inet_hashtables.h
5317 +@@ -419,7 +419,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
5318 + }
5319 +
5320 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
5321 +- struct sock *sk, u32 port_offset,
5322 ++ struct sock *sk, u64 port_offset,
5323 + int (*check_established)(struct inet_timewait_death_row *,
5324 + struct sock *, __u16,
5325 + struct inet_timewait_sock **));
5326 +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
5327 +index d7d2495f83c27..dac91aa38c5af 100644
5328 +--- a/include/net/secure_seq.h
5329 ++++ b/include/net/secure_seq.h
5330 +@@ -4,8 +4,8 @@
5331 +
5332 + #include <linux/types.h>
5333 +
5334 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
5335 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
5336 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
5337 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
5338 + __be16 dport);
5339 + u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
5340 + __be16 sport, __be16 dport);
5341 +diff --git a/include/trace/events/random.h b/include/trace/events/random.h
5342 +deleted file mode 100644
5343 +index 9570a10cb949b..0000000000000
5344 +--- a/include/trace/events/random.h
5345 ++++ /dev/null
5346 +@@ -1,330 +0,0 @@
5347 +-/* SPDX-License-Identifier: GPL-2.0 */
5348 +-#undef TRACE_SYSTEM
5349 +-#define TRACE_SYSTEM random
5350 +-
5351 +-#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
5352 +-#define _TRACE_RANDOM_H
5353 +-
5354 +-#include <linux/writeback.h>
5355 +-#include <linux/tracepoint.h>
5356 +-
5357 +-TRACE_EVENT(add_device_randomness,
5358 +- TP_PROTO(int bytes, unsigned long IP),
5359 +-
5360 +- TP_ARGS(bytes, IP),
5361 +-
5362 +- TP_STRUCT__entry(
5363 +- __field( int, bytes )
5364 +- __field(unsigned long, IP )
5365 +- ),
5366 +-
5367 +- TP_fast_assign(
5368 +- __entry->bytes = bytes;
5369 +- __entry->IP = IP;
5370 +- ),
5371 +-
5372 +- TP_printk("bytes %d caller %pS",
5373 +- __entry->bytes, (void *)__entry->IP)
5374 +-);
5375 +-
5376 +-DECLARE_EVENT_CLASS(random__mix_pool_bytes,
5377 +- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
5378 +-
5379 +- TP_ARGS(pool_name, bytes, IP),
5380 +-
5381 +- TP_STRUCT__entry(
5382 +- __field( const char *, pool_name )
5383 +- __field( int, bytes )
5384 +- __field(unsigned long, IP )
5385 +- ),
5386 +-
5387 +- TP_fast_assign(
5388 +- __entry->pool_name = pool_name;
5389 +- __entry->bytes = bytes;
5390 +- __entry->IP = IP;
5391 +- ),
5392 +-
5393 +- TP_printk("%s pool: bytes %d caller %pS",
5394 +- __entry->pool_name, __entry->bytes, (void *)__entry->IP)
5395 +-);
5396 +-
5397 +-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
5398 +- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
5399 +-
5400 +- TP_ARGS(pool_name, bytes, IP)
5401 +-);
5402 +-
5403 +-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
5404 +- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
5405 +-
5406 +- TP_ARGS(pool_name, bytes, IP)
5407 +-);
5408 +-
5409 +-TRACE_EVENT(credit_entropy_bits,
5410 +- TP_PROTO(const char *pool_name, int bits, int entropy_count,
5411 +- unsigned long IP),
5412 +-
5413 +- TP_ARGS(pool_name, bits, entropy_count, IP),
5414 +-
5415 +- TP_STRUCT__entry(
5416 +- __field( const char *, pool_name )
5417 +- __field( int, bits )
5418 +- __field( int, entropy_count )
5419 +- __field(unsigned long, IP )
5420 +- ),
5421 +-
5422 +- TP_fast_assign(
5423 +- __entry->pool_name = pool_name;
5424 +- __entry->bits = bits;
5425 +- __entry->entropy_count = entropy_count;
5426 +- __entry->IP = IP;
5427 +- ),
5428 +-
5429 +- TP_printk("%s pool: bits %d entropy_count %d caller %pS",
5430 +- __entry->pool_name, __entry->bits,
5431 +- __entry->entropy_count, (void *)__entry->IP)
5432 +-);
5433 +-
5434 +-TRACE_EVENT(push_to_pool,
5435 +- TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
5436 +-
5437 +- TP_ARGS(pool_name, pool_bits, input_bits),
5438 +-
5439 +- TP_STRUCT__entry(
5440 +- __field( const char *, pool_name )
5441 +- __field( int, pool_bits )
5442 +- __field( int, input_bits )
5443 +- ),
5444 +-
5445 +- TP_fast_assign(
5446 +- __entry->pool_name = pool_name;
5447 +- __entry->pool_bits = pool_bits;
5448 +- __entry->input_bits = input_bits;
5449 +- ),
5450 +-
5451 +- TP_printk("%s: pool_bits %d input_pool_bits %d",
5452 +- __entry->pool_name, __entry->pool_bits,
5453 +- __entry->input_bits)
5454 +-);
5455 +-
5456 +-TRACE_EVENT(debit_entropy,
5457 +- TP_PROTO(const char *pool_name, int debit_bits),
5458 +-
5459 +- TP_ARGS(pool_name, debit_bits),
5460 +-
5461 +- TP_STRUCT__entry(
5462 +- __field( const char *, pool_name )
5463 +- __field( int, debit_bits )
5464 +- ),
5465 +-
5466 +- TP_fast_assign(
5467 +- __entry->pool_name = pool_name;
5468 +- __entry->debit_bits = debit_bits;
5469 +- ),
5470 +-
5471 +- TP_printk("%s: debit_bits %d", __entry->pool_name,
5472 +- __entry->debit_bits)
5473 +-);
5474 +-
5475 +-TRACE_EVENT(add_input_randomness,
5476 +- TP_PROTO(int input_bits),
5477 +-
5478 +- TP_ARGS(input_bits),
5479 +-
5480 +- TP_STRUCT__entry(
5481 +- __field( int, input_bits )
5482 +- ),
5483 +-
5484 +- TP_fast_assign(
5485 +- __entry->input_bits = input_bits;
5486 +- ),
5487 +-
5488 +- TP_printk("input_pool_bits %d", __entry->input_bits)
5489 +-);
5490 +-
5491 +-TRACE_EVENT(add_disk_randomness,
5492 +- TP_PROTO(dev_t dev, int input_bits),
5493 +-
5494 +- TP_ARGS(dev, input_bits),
5495 +-
5496 +- TP_STRUCT__entry(
5497 +- __field( dev_t, dev )
5498 +- __field( int, input_bits )
5499 +- ),
5500 +-
5501 +- TP_fast_assign(
5502 +- __entry->dev = dev;
5503 +- __entry->input_bits = input_bits;
5504 +- ),
5505 +-
5506 +- TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
5507 +- MINOR(__entry->dev), __entry->input_bits)
5508 +-);
5509 +-
5510 +-TRACE_EVENT(xfer_secondary_pool,
5511 +- TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
5512 +- int pool_entropy, int input_entropy),
5513 +-
5514 +- TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
5515 +- input_entropy),
5516 +-
5517 +- TP_STRUCT__entry(
5518 +- __field( const char *, pool_name )
5519 +- __field( int, xfer_bits )
5520 +- __field( int, request_bits )
5521 +- __field( int, pool_entropy )
5522 +- __field( int, input_entropy )
5523 +- ),
5524 +-
5525 +- TP_fast_assign(
5526 +- __entry->pool_name = pool_name;
5527 +- __entry->xfer_bits = xfer_bits;
5528 +- __entry->request_bits = request_bits;
5529 +- __entry->pool_entropy = pool_entropy;
5530 +- __entry->input_entropy = input_entropy;
5531 +- ),
5532 +-
5533 +- TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
5534 +- "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
5535 +- __entry->request_bits, __entry->pool_entropy,
5536 +- __entry->input_entropy)
5537 +-);
5538 +-
5539 +-DECLARE_EVENT_CLASS(random__get_random_bytes,
5540 +- TP_PROTO(int nbytes, unsigned long IP),
5541 +-
5542 +- TP_ARGS(nbytes, IP),
5543 +-
5544 +- TP_STRUCT__entry(
5545 +- __field( int, nbytes )
5546 +- __field(unsigned long, IP )
5547 +- ),
5548 +-
5549 +- TP_fast_assign(
5550 +- __entry->nbytes = nbytes;
5551 +- __entry->IP = IP;
5552 +- ),
5553 +-
5554 +- TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
5555 +-);
5556 +-
5557 +-DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
5558 +- TP_PROTO(int nbytes, unsigned long IP),
5559 +-
5560 +- TP_ARGS(nbytes, IP)
5561 +-);
5562 +-
5563 +-DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
5564 +- TP_PROTO(int nbytes, unsigned long IP),
5565 +-
5566 +- TP_ARGS(nbytes, IP)
5567 +-);
5568 +-
5569 +-DECLARE_EVENT_CLASS(random__extract_entropy,
5570 +- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
5571 +- unsigned long IP),
5572 +-
5573 +- TP_ARGS(pool_name, nbytes, entropy_count, IP),
5574 +-
5575 +- TP_STRUCT__entry(
5576 +- __field( const char *, pool_name )
5577 +- __field( int, nbytes )
5578 +- __field( int, entropy_count )
5579 +- __field(unsigned long, IP )
5580 +- ),
5581 +-
5582 +- TP_fast_assign(
5583 +- __entry->pool_name = pool_name;
5584 +- __entry->nbytes = nbytes;
5585 +- __entry->entropy_count = entropy_count;
5586 +- __entry->IP = IP;
5587 +- ),
5588 +-
5589 +- TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
5590 +- __entry->pool_name, __entry->nbytes, __entry->entropy_count,
5591 +- (void *)__entry->IP)
5592 +-);
5593 +-
5594 +-
5595 +-DEFINE_EVENT(random__extract_entropy, extract_entropy,
5596 +- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
5597 +- unsigned long IP),
5598 +-
5599 +- TP_ARGS(pool_name, nbytes, entropy_count, IP)
5600 +-);
5601 +-
5602 +-DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
5603 +- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
5604 +- unsigned long IP),
5605 +-
5606 +- TP_ARGS(pool_name, nbytes, entropy_count, IP)
5607 +-);
5608 +-
5609 +-TRACE_EVENT(random_read,
5610 +- TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
5611 +-
5612 +- TP_ARGS(got_bits, need_bits, pool_left, input_left),
5613 +-
5614 +- TP_STRUCT__entry(
5615 +- __field( int, got_bits )
5616 +- __field( int, need_bits )
5617 +- __field( int, pool_left )
5618 +- __field( int, input_left )
5619 +- ),
5620 +-
5621 +- TP_fast_assign(
5622 +- __entry->got_bits = got_bits;
5623 +- __entry->need_bits = need_bits;
5624 +- __entry->pool_left = pool_left;
5625 +- __entry->input_left = input_left;
5626 +- ),
5627 +-
5628 +- TP_printk("got_bits %d still_needed_bits %d "
5629 +- "blocking_pool_entropy_left %d input_entropy_left %d",
5630 +- __entry->got_bits, __entry->got_bits, __entry->pool_left,
5631 +- __entry->input_left)
5632 +-);
5633 +-
5634 +-TRACE_EVENT(urandom_read,
5635 +- TP_PROTO(int got_bits, int pool_left, int input_left),
5636 +-
5637 +- TP_ARGS(got_bits, pool_left, input_left),
5638 +-
5639 +- TP_STRUCT__entry(
5640 +- __field( int, got_bits )
5641 +- __field( int, pool_left )
5642 +- __field( int, input_left )
5643 +- ),
5644 +-
5645 +- TP_fast_assign(
5646 +- __entry->got_bits = got_bits;
5647 +- __entry->pool_left = pool_left;
5648 +- __entry->input_left = input_left;
5649 +- ),
5650 +-
5651 +- TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
5652 +- "input_entropy_left %d", __entry->got_bits,
5653 +- __entry->pool_left, __entry->input_left)
5654 +-);
5655 +-
5656 +-TRACE_EVENT(prandom_u32,
5657 +-
5658 +- TP_PROTO(unsigned int ret),
5659 +-
5660 +- TP_ARGS(ret),
5661 +-
5662 +- TP_STRUCT__entry(
5663 +- __field( unsigned int, ret)
5664 +- ),
5665 +-
5666 +- TP_fast_assign(
5667 +- __entry->ret = ret;
5668 +- ),
5669 +-
5670 +- TP_printk("ret=%u" , __entry->ret)
5671 +-);
5672 +-
5673 +-#endif /* _TRACE_RANDOM_H */
5674 +-
5675 +-/* This part must be outside protection */
5676 +-#include <trace/define_trace.h>
5677 +diff --git a/init/main.c b/init/main.c
5678 +index 3526eaec7508f..d8bfe61b5a889 100644
5679 +--- a/init/main.c
5680 ++++ b/init/main.c
5681 +@@ -952,21 +952,18 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
5682 + hrtimers_init();
5683 + softirq_init();
5684 + timekeeping_init();
5685 ++ time_init();
5686 +
5687 + /*
5688 + * For best initial stack canary entropy, prepare it after:
5689 + * - setup_arch() for any UEFI RNG entropy and boot cmdline access
5690 +- * - timekeeping_init() for ktime entropy used in rand_initialize()
5691 +- * - rand_initialize() to get any arch-specific entropy like RDRAND
5692 +- * - add_latent_entropy() to get any latent entropy
5693 +- * - adding command line entropy
5694 ++ * - timekeeping_init() for ktime entropy used in random_init()
5695 ++ * - time_init() for making random_get_entropy() work on some platforms
5696 ++ * - random_init() to initialize the RNG from from early entropy sources
5697 + */
5698 +- rand_initialize();
5699 +- add_latent_entropy();
5700 +- add_device_randomness(command_line, strlen(command_line));
5701 ++ random_init(command_line);
5702 + boot_init_stack_canary();
5703 +
5704 +- time_init();
5705 + perf_event_init();
5706 + profile_init();
5707 + call_function_init();
5708 +diff --git a/kernel/cpu.c b/kernel/cpu.c
5709 +index c06ced18f78ad..3c9ee966c56a5 100644
5710 +--- a/kernel/cpu.c
5711 ++++ b/kernel/cpu.c
5712 +@@ -34,6 +34,7 @@
5713 + #include <linux/scs.h>
5714 + #include <linux/percpu-rwsem.h>
5715 + #include <linux/cpuset.h>
5716 ++#include <linux/random.h>
5717 +
5718 + #include <trace/events/power.h>
5719 + #define CREATE_TRACE_POINTS
5720 +@@ -1581,6 +1582,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
5721 + .startup.single = perf_event_init_cpu,
5722 + .teardown.single = perf_event_exit_cpu,
5723 + },
5724 ++ [CPUHP_RANDOM_PREPARE] = {
5725 ++ .name = "random:prepare",
5726 ++ .startup.single = random_prepare_cpu,
5727 ++ .teardown.single = NULL,
5728 ++ },
5729 + [CPUHP_WORKQUEUE_PREP] = {
5730 + .name = "workqueue:prepare",
5731 + .startup.single = workqueue_prepare_cpu,
5732 +@@ -1697,6 +1703,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
5733 + .startup.single = workqueue_online_cpu,
5734 + .teardown.single = workqueue_offline_cpu,
5735 + },
5736 ++ [CPUHP_AP_RANDOM_ONLINE] = {
5737 ++ .name = "random:online",
5738 ++ .startup.single = random_online_cpu,
5739 ++ .teardown.single = NULL,
5740 ++ },
5741 + [CPUHP_AP_RCUTREE_ONLINE] = {
5742 + .name = "RCU/tree:online",
5743 + .startup.single = rcutree_online_cpu,
5744 +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
5745 +index 8661eb2b17711..0f31b22abe8d9 100644
5746 +--- a/kernel/debug/debug_core.c
5747 ++++ b/kernel/debug/debug_core.c
5748 +@@ -56,6 +56,7 @@
5749 + #include <linux/vmacache.h>
5750 + #include <linux/rcupdate.h>
5751 + #include <linux/irq.h>
5752 ++#include <linux/security.h>
5753 +
5754 + #include <asm/cacheflush.h>
5755 + #include <asm/byteorder.h>
5756 +@@ -756,6 +757,29 @@ cpu_master_loop:
5757 + continue;
5758 + kgdb_connected = 0;
5759 + } else {
5760 ++ /*
5761 ++ * This is a brutal way to interfere with the debugger
5762 ++ * and prevent gdb being used to poke at kernel memory.
5763 ++ * This could cause trouble if lockdown is applied when
5764 ++ * there is already an active gdb session. For now the
5765 ++ * answer is simply "don't do that". Typically lockdown
5766 ++ * *will* be applied before the debug core gets started
5767 ++ * so only developers using kgdb for fairly advanced
5768 ++ * early kernel debug can be biten by this. Hopefully
5769 ++ * they are sophisticated enough to take care of
5770 ++ * themselves, especially with help from the lockdown
5771 ++ * message printed on the console!
5772 ++ */
5773 ++ if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
5774 ++ if (IS_ENABLED(CONFIG_KGDB_KDB)) {
5775 ++ /* Switch back to kdb if possible... */
5776 ++ dbg_kdb_mode = 1;
5777 ++ continue;
5778 ++ } else {
5779 ++ /* ... otherwise just bail */
5780 ++ break;
5781 ++ }
5782 ++ }
5783 + error = gdb_serial_stub(ks);
5784 + }
5785 +
5786 +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
5787 +index 930ac1b25ec7c..4e09fab52faf5 100644
5788 +--- a/kernel/debug/kdb/kdb_main.c
5789 ++++ b/kernel/debug/kdb/kdb_main.c
5790 +@@ -45,6 +45,7 @@
5791 + #include <linux/proc_fs.h>
5792 + #include <linux/uaccess.h>
5793 + #include <linux/slab.h>
5794 ++#include <linux/security.h>
5795 + #include "kdb_private.h"
5796 +
5797 + #undef MODULE_PARAM_PREFIX
5798 +@@ -197,10 +198,62 @@ struct task_struct *kdb_curr_task(int cpu)
5799 + }
5800 +
5801 + /*
5802 +- * Check whether the flags of the current command and the permissions
5803 +- * of the kdb console has allow a command to be run.
5804 ++ * Update the permissions flags (kdb_cmd_enabled) to match the
5805 ++ * current lockdown state.
5806 ++ *
5807 ++ * Within this function the calls to security_locked_down() are "lazy". We
5808 ++ * avoid calling them if the current value of kdb_cmd_enabled already excludes
5809 ++ * flags that might be subject to lockdown. Additionally we deliberately check
5810 ++ * the lockdown flags independently (even though read lockdown implies write
5811 ++ * lockdown) since that results in both simpler code and clearer messages to
5812 ++ * the user on first-time debugger entry.
5813 ++ *
5814 ++ * The permission masks during a read+write lockdown permits the following
5815 ++ * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE).
5816 ++ *
5817 ++ * The INSPECT commands are not blocked during lockdown because they are
5818 ++ * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes
5819 ++ * forcing them to have no arguments) and lsmod. These commands do expose
5820 ++ * some kernel state but do not allow the developer seated at the console to
5821 ++ * choose what state is reported. SIGNAL and REBOOT should not be controversial,
5822 ++ * given these are allowed for root during lockdown already.
5823 ++ */
5824 ++static void kdb_check_for_lockdown(void)
5825 ++{
5826 ++ const int write_flags = KDB_ENABLE_MEM_WRITE |
5827 ++ KDB_ENABLE_REG_WRITE |
5828 ++ KDB_ENABLE_FLOW_CTRL;
5829 ++ const int read_flags = KDB_ENABLE_MEM_READ |
5830 ++ KDB_ENABLE_REG_READ;
5831 ++
5832 ++ bool need_to_lockdown_write = false;
5833 ++ bool need_to_lockdown_read = false;
5834 ++
5835 ++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags))
5836 ++ need_to_lockdown_write =
5837 ++ security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL);
5838 ++
5839 ++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags))
5840 ++ need_to_lockdown_read =
5841 ++ security_locked_down(LOCKDOWN_DBG_READ_KERNEL);
5842 ++
5843 ++ /* De-compose KDB_ENABLE_ALL if required */
5844 ++ if (need_to_lockdown_write || need_to_lockdown_read)
5845 ++ if (kdb_cmd_enabled & KDB_ENABLE_ALL)
5846 ++ kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL;
5847 ++
5848 ++ if (need_to_lockdown_write)
5849 ++ kdb_cmd_enabled &= ~write_flags;
5850 ++
5851 ++ if (need_to_lockdown_read)
5852 ++ kdb_cmd_enabled &= ~read_flags;
5853 ++}
5854 ++
5855 ++/*
5856 ++ * Check whether the flags of the current command, the permissions of the kdb
5857 ++ * console and the lockdown state allow a command to be run.
5858 + */
5859 +-static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
5860 ++static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
5861 + bool no_args)
5862 + {
5863 + /* permissions comes from userspace so needs massaging slightly */
5864 +@@ -1194,6 +1247,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
5865 + kdb_curr_task(raw_smp_processor_id());
5866 +
5867 + KDB_DEBUG_STATE("kdb_local 1", reason);
5868 ++
5869 ++ kdb_check_for_lockdown();
5870 ++
5871 + kdb_go_count = 0;
5872 + if (reason == KDB_REASON_DEBUG) {
5873 + /* special case below */
5874 +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
5875 +index 762a928e18f92..8806444a68550 100644
5876 +--- a/kernel/irq/handle.c
5877 ++++ b/kernel/irq/handle.c
5878 +@@ -195,7 +195,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
5879 +
5880 + retval = __handle_irq_event_percpu(desc, &flags);
5881 +
5882 +- add_interrupt_randomness(desc->irq_data.irq, flags);
5883 ++ add_interrupt_randomness(desc->irq_data.irq);
5884 +
5885 + if (!noirqdebug)
5886 + note_interrupt(desc, retval);
5887 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
5888 +index cc4dc2857a870..e12ce2821dba5 100644
5889 +--- a/kernel/time/timekeeping.c
5890 ++++ b/kernel/time/timekeeping.c
5891 +@@ -17,6 +17,7 @@
5892 + #include <linux/clocksource.h>
5893 + #include <linux/jiffies.h>
5894 + #include <linux/time.h>
5895 ++#include <linux/timex.h>
5896 + #include <linux/tick.h>
5897 + #include <linux/stop_machine.h>
5898 + #include <linux/pvclock_gtod.h>
5899 +@@ -2378,6 +2379,20 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc)
5900 + return 0;
5901 + }
5902 +
5903 ++/**
5904 ++ * random_get_entropy_fallback - Returns the raw clock source value,
5905 ++ * used by random.c for platforms with no valid random_get_entropy().
5906 ++ */
5907 ++unsigned long random_get_entropy_fallback(void)
5908 ++{
5909 ++ struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
5910 ++ struct clocksource *clock = READ_ONCE(tkr->clock);
5911 ++
5912 ++ if (unlikely(timekeeping_suspended || !clock))
5913 ++ return 0;
5914 ++ return clock->read(clock);
5915 ++}
5916 ++EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
5917 +
5918 + /**
5919 + * do_adjtimex() - Accessor function to NTP __do_adjtimex function
5920 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
5921 +index 95f909540587c..3656fa8837834 100644
5922 +--- a/lib/Kconfig.debug
5923 ++++ b/lib/Kconfig.debug
5924 +@@ -1426,8 +1426,7 @@ config WARN_ALL_UNSEEDED_RANDOM
5925 + so architecture maintainers really need to do what they can
5926 + to get the CRNG seeded sooner after the system is booted.
5927 + However, since users cannot do anything actionable to
5928 +- address this, by default the kernel will issue only a single
5929 +- warning for the first use of unseeded randomness.
5930 ++ address this, by default this option is disabled.
5931 +
5932 + Say Y here if you want to receive warnings for all uses of
5933 + unseeded randomness. This will be of use primarily for
5934 +diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
5935 +index 14c032de276e6..af3da5a8bde8d 100644
5936 +--- a/lib/crypto/Kconfig
5937 ++++ b/lib/crypto/Kconfig
5938 +@@ -1,7 +1,5 @@
5939 + # SPDX-License-Identifier: GPL-2.0
5940 +
5941 +-comment "Crypto library routines"
5942 +-
5943 + config CRYPTO_LIB_AES
5944 + tristate
5945 +
5946 +@@ -9,14 +7,14 @@ config CRYPTO_LIB_ARC4
5947 + tristate
5948 +
5949 + config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
5950 +- tristate
5951 ++ bool
5952 + help
5953 + Declares whether the architecture provides an arch-specific
5954 + accelerated implementation of the Blake2s library interface,
5955 + either builtin or as a module.
5956 +
5957 + config CRYPTO_LIB_BLAKE2S_GENERIC
5958 +- tristate
5959 ++ def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
5960 + help
5961 + This symbol can be depended upon by arch implementations of the
5962 + Blake2s library interface that require the generic code as a
5963 +@@ -24,15 +22,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC
5964 + implementation is enabled, this implementation serves the users
5965 + of CRYPTO_LIB_BLAKE2S.
5966 +
5967 +-config CRYPTO_LIB_BLAKE2S
5968 +- tristate "BLAKE2s hash function library"
5969 +- depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
5970 +- select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n
5971 +- help
5972 +- Enable the Blake2s library interface. This interface may be fulfilled
5973 +- by either the generic implementation or an arch-specific one, if one
5974 +- is available and enabled.
5975 +-
5976 + config CRYPTO_ARCH_HAVE_LIB_CHACHA
5977 + tristate
5978 + help
5979 +@@ -51,7 +40,7 @@ config CRYPTO_LIB_CHACHA_GENERIC
5980 + of CRYPTO_LIB_CHACHA.
5981 +
5982 + config CRYPTO_LIB_CHACHA
5983 +- tristate "ChaCha library interface"
5984 ++ tristate
5985 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
5986 + select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
5987 + help
5988 +@@ -76,7 +65,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
5989 + of CRYPTO_LIB_CURVE25519.
5990 +
5991 + config CRYPTO_LIB_CURVE25519
5992 +- tristate "Curve25519 scalar multiplication library"
5993 ++ tristate
5994 + depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
5995 + select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
5996 + help
5997 +@@ -111,7 +100,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
5998 + of CRYPTO_LIB_POLY1305.
5999 +
6000 + config CRYPTO_LIB_POLY1305
6001 +- tristate "Poly1305 library interface"
6002 ++ tristate
6003 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
6004 + select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
6005 + help
6006 +@@ -120,7 +109,7 @@ config CRYPTO_LIB_POLY1305
6007 + is available and enabled.
6008 +
6009 + config CRYPTO_LIB_CHACHA20POLY1305
6010 +- tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
6011 ++ tristate
6012 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
6013 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
6014 + select CRYPTO_LIB_CHACHA
6015 +diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
6016 +index 3a435629d9ce9..26be2bbe09c59 100644
6017 +--- a/lib/crypto/Makefile
6018 ++++ b/lib/crypto/Makefile
6019 +@@ -10,11 +10,10 @@ libaes-y := aes.o
6020 + obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
6021 + libarc4-y := arc4.o
6022 +
6023 +-obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o
6024 +-libblake2s-generic-y += blake2s-generic.o
6025 +-
6026 +-obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o
6027 +-libblake2s-y += blake2s.o
6028 ++# blake2s is used by the /dev/random driver which is always builtin
6029 ++obj-y += libblake2s.o
6030 ++libblake2s-y := blake2s.o
6031 ++libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
6032 +
6033 + obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
6034 + libchacha20poly1305-y += chacha20poly1305.o
6035 +diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c
6036 +index 04ff8df245136..75ccb3e633e65 100644
6037 +--- a/lib/crypto/blake2s-generic.c
6038 ++++ b/lib/crypto/blake2s-generic.c
6039 +@@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state,
6040 + state->t[1] += (state->t[0] < inc);
6041 + }
6042 +
6043 +-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
6044 ++void blake2s_compress(struct blake2s_state *state, const u8 *block,
6045 ++ size_t nblocks, const u32 inc)
6046 ++ __weak __alias(blake2s_compress_generic);
6047 ++
6048 ++void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
6049 + size_t nblocks, const u32 inc)
6050 + {
6051 + u32 m[16];
6052 +diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
6053 +index 79ef404a990d2..409e4b7287704 100644
6054 +--- a/lib/crypto/blake2s-selftest.c
6055 ++++ b/lib/crypto/blake2s-selftest.c
6056 +@@ -3,7 +3,7 @@
6057 + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
6058 + */
6059 +
6060 +-#include <crypto/blake2s.h>
6061 ++#include <crypto/internal/blake2s.h>
6062 + #include <linux/string.h>
6063 +
6064 + /*
6065 +@@ -15,7 +15,6 @@
6066 + * #include <stdio.h>
6067 + *
6068 + * #include <openssl/evp.h>
6069 +- * #include <openssl/hmac.h>
6070 + *
6071 + * #define BLAKE2S_TESTVEC_COUNT 256
6072 + *
6073 +@@ -58,16 +57,6 @@
6074 + * }
6075 + * printf("};\n\n");
6076 + *
6077 +- * printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
6078 +- *
6079 +- * HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL);
6080 +- * print_vec(hash, BLAKE2S_OUTBYTES);
6081 +- *
6082 +- * HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL);
6083 +- * print_vec(hash, BLAKE2S_OUTBYTES);
6084 +- *
6085 +- * printf("};\n");
6086 +- *
6087 + * return 0;
6088 + *}
6089 + */
6090 +@@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
6091 + 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
6092 + };
6093 +
6094 +-static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
6095 +- { 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70,
6096 +- 0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79,
6097 +- 0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, },
6098 +- { 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9,
6099 +- 0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f,
6100 +- 0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, },
6101 +-};
6102 +-
6103 + bool __init blake2s_selftest(void)
6104 + {
6105 + u8 key[BLAKE2S_KEY_SIZE];
6106 +@@ -607,16 +587,5 @@ bool __init blake2s_selftest(void)
6107 + }
6108 + }
6109 +
6110 +- if (success) {
6111 +- blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key));
6112 +- success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE);
6113 +-
6114 +- blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf));
6115 +- success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE);
6116 +-
6117 +- if (!success)
6118 +- pr_err("blake2s256_hmac self-test: FAIL\n");
6119 +- }
6120 +-
6121 + return success;
6122 + }
6123 +diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
6124 +index 41025a30c524c..80b194f9a0a09 100644
6125 +--- a/lib/crypto/blake2s.c
6126 ++++ b/lib/crypto/blake2s.c
6127 +@@ -15,98 +15,21 @@
6128 + #include <linux/module.h>
6129 + #include <linux/init.h>
6130 + #include <linux/bug.h>
6131 +-#include <asm/unaligned.h>
6132 +-
6133 +-bool blake2s_selftest(void);
6134 +
6135 + void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
6136 + {
6137 +- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
6138 +-
6139 +- if (unlikely(!inlen))
6140 +- return;
6141 +- if (inlen > fill) {
6142 +- memcpy(state->buf + state->buflen, in, fill);
6143 +- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
6144 +- blake2s_compress_arch(state, state->buf, 1,
6145 +- BLAKE2S_BLOCK_SIZE);
6146 +- else
6147 +- blake2s_compress_generic(state, state->buf, 1,
6148 +- BLAKE2S_BLOCK_SIZE);
6149 +- state->buflen = 0;
6150 +- in += fill;
6151 +- inlen -= fill;
6152 +- }
6153 +- if (inlen > BLAKE2S_BLOCK_SIZE) {
6154 +- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
6155 +- /* Hash one less (full) block than strictly possible */
6156 +- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
6157 +- blake2s_compress_arch(state, in, nblocks - 1,
6158 +- BLAKE2S_BLOCK_SIZE);
6159 +- else
6160 +- blake2s_compress_generic(state, in, nblocks - 1,
6161 +- BLAKE2S_BLOCK_SIZE);
6162 +- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
6163 +- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
6164 +- }
6165 +- memcpy(state->buf + state->buflen, in, inlen);
6166 +- state->buflen += inlen;
6167 ++ __blake2s_update(state, in, inlen, false);
6168 + }
6169 + EXPORT_SYMBOL(blake2s_update);
6170 +
6171 + void blake2s_final(struct blake2s_state *state, u8 *out)
6172 + {
6173 + WARN_ON(IS_ENABLED(DEBUG) && !out);
6174 +- blake2s_set_lastblock(state);
6175 +- memset(state->buf + state->buflen, 0,
6176 +- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
6177 +- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
6178 +- blake2s_compress_arch(state, state->buf, 1, state->buflen);
6179 +- else
6180 +- blake2s_compress_generic(state, state->buf, 1, state->buflen);
6181 +- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
6182 +- memcpy(out, state->h, state->outlen);
6183 ++ __blake2s_final(state, out, false);
6184 + memzero_explicit(state, sizeof(*state));
6185 + }
6186 + EXPORT_SYMBOL(blake2s_final);
6187 +
6188 +-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
6189 +- const size_t keylen)
6190 +-{
6191 +- struct blake2s_state state;
6192 +- u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
6193 +- u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
6194 +- int i;
6195 +-
6196 +- if (keylen > BLAKE2S_BLOCK_SIZE) {
6197 +- blake2s_init(&state, BLAKE2S_HASH_SIZE);
6198 +- blake2s_update(&state, key, keylen);
6199 +- blake2s_final(&state, x_key);
6200 +- } else
6201 +- memcpy(x_key, key, keylen);
6202 +-
6203 +- for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
6204 +- x_key[i] ^= 0x36;
6205 +-
6206 +- blake2s_init(&state, BLAKE2S_HASH_SIZE);
6207 +- blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
6208 +- blake2s_update(&state, in, inlen);
6209 +- blake2s_final(&state, i_hash);
6210 +-
6211 +- for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
6212 +- x_key[i] ^= 0x5c ^ 0x36;
6213 +-
6214 +- blake2s_init(&state, BLAKE2S_HASH_SIZE);
6215 +- blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
6216 +- blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
6217 +- blake2s_final(&state, i_hash);
6218 +-
6219 +- memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
6220 +- memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
6221 +- memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
6222 +-}
6223 +-EXPORT_SYMBOL(blake2s256_hmac);
6224 +-
6225 + static int __init mod_init(void)
6226 + {
6227 + if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
6228 +diff --git a/lib/random32.c b/lib/random32.c
6229 +index 4d0e05e471d72..f0ab17c2244be 100644
6230 +--- a/lib/random32.c
6231 ++++ b/lib/random32.c
6232 +@@ -39,8 +39,9 @@
6233 + #include <linux/random.h>
6234 + #include <linux/sched.h>
6235 + #include <linux/bitops.h>
6236 ++#include <linux/slab.h>
6237 ++#include <linux/notifier.h>
6238 + #include <asm/unaligned.h>
6239 +-#include <trace/events/random.h>
6240 +
6241 + /**
6242 + * prandom_u32_state - seeded pseudo-random number generator.
6243 +@@ -386,7 +387,6 @@ u32 prandom_u32(void)
6244 + struct siprand_state *state = get_cpu_ptr(&net_rand_state);
6245 + u32 res = siprand_u32(state);
6246 +
6247 +- trace_prandom_u32(res);
6248 + put_cpu_ptr(&net_rand_state);
6249 + return res;
6250 + }
6251 +@@ -552,9 +552,11 @@ static void prandom_reseed(struct timer_list *unused)
6252 + * To avoid worrying about whether it's safe to delay that interrupt
6253 + * long enough to seed all CPUs, just schedule an immediate timer event.
6254 + */
6255 +-static void prandom_timer_start(struct random_ready_callback *unused)
6256 ++static int prandom_timer_start(struct notifier_block *nb,
6257 ++ unsigned long action, void *data)
6258 + {
6259 + mod_timer(&seed_timer, jiffies);
6260 ++ return 0;
6261 + }
6262 +
6263 + #ifdef CONFIG_RANDOM32_SELFTEST
6264 +@@ -618,13 +620,13 @@ core_initcall(prandom32_state_selftest);
6265 + */
6266 + static int __init prandom_init_late(void)
6267 + {
6268 +- static struct random_ready_callback random_ready = {
6269 +- .func = prandom_timer_start
6270 ++ static struct notifier_block random_ready = {
6271 ++ .notifier_call = prandom_timer_start
6272 + };
6273 +- int ret = add_random_ready_callback(&random_ready);
6274 ++ int ret = register_random_ready_notifier(&random_ready);
6275 +
6276 + if (ret == -EALREADY) {
6277 +- prandom_timer_start(&random_ready);
6278 ++ prandom_timer_start(&random_ready, 0, NULL);
6279 + ret = 0;
6280 + }
6281 + return ret;
6282 +diff --git a/lib/sha1.c b/lib/sha1.c
6283 +index 49257a915bb60..5ad4e49482728 100644
6284 +--- a/lib/sha1.c
6285 ++++ b/lib/sha1.c
6286 +@@ -9,6 +9,7 @@
6287 + #include <linux/kernel.h>
6288 + #include <linux/export.h>
6289 + #include <linux/bitops.h>
6290 ++#include <linux/string.h>
6291 + #include <crypto/sha.h>
6292 + #include <asm/unaligned.h>
6293 +
6294 +@@ -55,7 +56,8 @@
6295 + #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
6296 + __u32 TEMP = input(t); setW(t, TEMP); \
6297 + E += TEMP + rol32(A,5) + (fn) + (constant); \
6298 +- B = ror32(B, 2); } while (0)
6299 ++ B = ror32(B, 2); \
6300 ++ TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
6301 +
6302 + #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
6303 + #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
6304 +@@ -84,6 +86,7 @@
6305 + void sha1_transform(__u32 *digest, const char *data, __u32 *array)
6306 + {
6307 + __u32 A, B, C, D, E;
6308 ++ unsigned int i = 0;
6309 +
6310 + A = digest[0];
6311 + B = digest[1];
6312 +@@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array)
6313 + E = digest[4];
6314 +
6315 + /* Round 1 - iterations 0-16 take their input from 'data' */
6316 +- T_0_15( 0, A, B, C, D, E);
6317 +- T_0_15( 1, E, A, B, C, D);
6318 +- T_0_15( 2, D, E, A, B, C);
6319 +- T_0_15( 3, C, D, E, A, B);
6320 +- T_0_15( 4, B, C, D, E, A);
6321 +- T_0_15( 5, A, B, C, D, E);
6322 +- T_0_15( 6, E, A, B, C, D);
6323 +- T_0_15( 7, D, E, A, B, C);
6324 +- T_0_15( 8, C, D, E, A, B);
6325 +- T_0_15( 9, B, C, D, E, A);
6326 +- T_0_15(10, A, B, C, D, E);
6327 +- T_0_15(11, E, A, B, C, D);
6328 +- T_0_15(12, D, E, A, B, C);
6329 +- T_0_15(13, C, D, E, A, B);
6330 +- T_0_15(14, B, C, D, E, A);
6331 +- T_0_15(15, A, B, C, D, E);
6332 ++ for (; i < 16; ++i)
6333 ++ T_0_15(i, A, B, C, D, E);
6334 +
6335 + /* Round 1 - tail. Input from 512-bit mixing array */
6336 +- T_16_19(16, E, A, B, C, D);
6337 +- T_16_19(17, D, E, A, B, C);
6338 +- T_16_19(18, C, D, E, A, B);
6339 +- T_16_19(19, B, C, D, E, A);
6340 ++ for (; i < 20; ++i)
6341 ++ T_16_19(i, A, B, C, D, E);
6342 +
6343 + /* Round 2 */
6344 +- T_20_39(20, A, B, C, D, E);
6345 +- T_20_39(21, E, A, B, C, D);
6346 +- T_20_39(22, D, E, A, B, C);
6347 +- T_20_39(23, C, D, E, A, B);
6348 +- T_20_39(24, B, C, D, E, A);
6349 +- T_20_39(25, A, B, C, D, E);
6350 +- T_20_39(26, E, A, B, C, D);
6351 +- T_20_39(27, D, E, A, B, C);
6352 +- T_20_39(28, C, D, E, A, B);
6353 +- T_20_39(29, B, C, D, E, A);
6354 +- T_20_39(30, A, B, C, D, E);
6355 +- T_20_39(31, E, A, B, C, D);
6356 +- T_20_39(32, D, E, A, B, C);
6357 +- T_20_39(33, C, D, E, A, B);
6358 +- T_20_39(34, B, C, D, E, A);
6359 +- T_20_39(35, A, B, C, D, E);
6360 +- T_20_39(36, E, A, B, C, D);
6361 +- T_20_39(37, D, E, A, B, C);
6362 +- T_20_39(38, C, D, E, A, B);
6363 +- T_20_39(39, B, C, D, E, A);
6364 ++ for (; i < 40; ++i)
6365 ++ T_20_39(i, A, B, C, D, E);
6366 +
6367 + /* Round 3 */
6368 +- T_40_59(40, A, B, C, D, E);
6369 +- T_40_59(41, E, A, B, C, D);
6370 +- T_40_59(42, D, E, A, B, C);
6371 +- T_40_59(43, C, D, E, A, B);
6372 +- T_40_59(44, B, C, D, E, A);
6373 +- T_40_59(45, A, B, C, D, E);
6374 +- T_40_59(46, E, A, B, C, D);
6375 +- T_40_59(47, D, E, A, B, C);
6376 +- T_40_59(48, C, D, E, A, B);
6377 +- T_40_59(49, B, C, D, E, A);
6378 +- T_40_59(50, A, B, C, D, E);
6379 +- T_40_59(51, E, A, B, C, D);
6380 +- T_40_59(52, D, E, A, B, C);
6381 +- T_40_59(53, C, D, E, A, B);
6382 +- T_40_59(54, B, C, D, E, A);
6383 +- T_40_59(55, A, B, C, D, E);
6384 +- T_40_59(56, E, A, B, C, D);
6385 +- T_40_59(57, D, E, A, B, C);
6386 +- T_40_59(58, C, D, E, A, B);
6387 +- T_40_59(59, B, C, D, E, A);
6388 ++ for (; i < 60; ++i)
6389 ++ T_40_59(i, A, B, C, D, E);
6390 +
6391 + /* Round 4 */
6392 +- T_60_79(60, A, B, C, D, E);
6393 +- T_60_79(61, E, A, B, C, D);
6394 +- T_60_79(62, D, E, A, B, C);
6395 +- T_60_79(63, C, D, E, A, B);
6396 +- T_60_79(64, B, C, D, E, A);
6397 +- T_60_79(65, A, B, C, D, E);
6398 +- T_60_79(66, E, A, B, C, D);
6399 +- T_60_79(67, D, E, A, B, C);
6400 +- T_60_79(68, C, D, E, A, B);
6401 +- T_60_79(69, B, C, D, E, A);
6402 +- T_60_79(70, A, B, C, D, E);
6403 +- T_60_79(71, E, A, B, C, D);
6404 +- T_60_79(72, D, E, A, B, C);
6405 +- T_60_79(73, C, D, E, A, B);
6406 +- T_60_79(74, B, C, D, E, A);
6407 +- T_60_79(75, A, B, C, D, E);
6408 +- T_60_79(76, E, A, B, C, D);
6409 +- T_60_79(77, D, E, A, B, C);
6410 +- T_60_79(78, C, D, E, A, B);
6411 +- T_60_79(79, B, C, D, E, A);
6412 ++ for (; i < 80; ++i)
6413 ++ T_60_79(i, A, B, C, D, E);
6414 +
6415 + digest[0] += A;
6416 + digest[1] += B;
6417 +diff --git a/lib/siphash.c b/lib/siphash.c
6418 +index 025f0cbf6d7a7..b4055b1cc2f67 100644
6419 +--- a/lib/siphash.c
6420 ++++ b/lib/siphash.c
6421 +@@ -18,19 +18,13 @@
6422 + #include <asm/word-at-a-time.h>
6423 + #endif
6424 +
6425 +-#define SIPROUND \
6426 +- do { \
6427 +- v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
6428 +- v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
6429 +- v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
6430 +- v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
6431 +- } while (0)
6432 ++#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
6433 +
6434 + #define PREAMBLE(len) \
6435 +- u64 v0 = 0x736f6d6570736575ULL; \
6436 +- u64 v1 = 0x646f72616e646f6dULL; \
6437 +- u64 v2 = 0x6c7967656e657261ULL; \
6438 +- u64 v3 = 0x7465646279746573ULL; \
6439 ++ u64 v0 = SIPHASH_CONST_0; \
6440 ++ u64 v1 = SIPHASH_CONST_1; \
6441 ++ u64 v2 = SIPHASH_CONST_2; \
6442 ++ u64 v3 = SIPHASH_CONST_3; \
6443 + u64 b = ((u64)(len)) << 56; \
6444 + v3 ^= key->key[1]; \
6445 + v2 ^= key->key[0]; \
6446 +@@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
6447 + }
6448 + EXPORT_SYMBOL(hsiphash_4u32);
6449 + #else
6450 +-#define HSIPROUND \
6451 +- do { \
6452 +- v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
6453 +- v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
6454 +- v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
6455 +- v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
6456 +- } while (0)
6457 ++#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
6458 +
6459 + #define HPREAMBLE(len) \
6460 +- u32 v0 = 0; \
6461 +- u32 v1 = 0; \
6462 +- u32 v2 = 0x6c796765U; \
6463 +- u32 v3 = 0x74656462U; \
6464 ++ u32 v0 = HSIPHASH_CONST_0; \
6465 ++ u32 v1 = HSIPHASH_CONST_1; \
6466 ++ u32 v2 = HSIPHASH_CONST_2; \
6467 ++ u32 v3 = HSIPHASH_CONST_3; \
6468 + u32 b = ((u32)(len)) << 24; \
6469 + v3 ^= key->key[1]; \
6470 + v2 ^= key->key[0]; \
6471 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
6472 +index 8ade1a86d8187..daf32a489dc06 100644
6473 +--- a/lib/vsprintf.c
6474 ++++ b/lib/vsprintf.c
6475 +@@ -756,14 +756,16 @@ static void enable_ptr_key_workfn(struct work_struct *work)
6476 +
6477 + static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
6478 +
6479 +-static void fill_random_ptr_key(struct random_ready_callback *unused)
6480 ++static int fill_random_ptr_key(struct notifier_block *nb,
6481 ++ unsigned long action, void *data)
6482 + {
6483 + /* This may be in an interrupt handler. */
6484 + queue_work(system_unbound_wq, &enable_ptr_key_work);
6485 ++ return 0;
6486 + }
6487 +
6488 +-static struct random_ready_callback random_ready = {
6489 +- .func = fill_random_ptr_key
6490 ++static struct notifier_block random_ready = {
6491 ++ .notifier_call = fill_random_ptr_key
6492 + };
6493 +
6494 + static int __init initialize_ptr_random(void)
6495 +@@ -777,7 +779,7 @@ static int __init initialize_ptr_random(void)
6496 + return 0;
6497 + }
6498 +
6499 +- ret = add_random_ready_callback(&random_ready);
6500 ++ ret = register_random_ready_notifier(&random_ready);
6501 + if (!ret) {
6502 + return 0;
6503 + } else if (ret == -EALREADY) {
6504 +diff --git a/mm/util.c b/mm/util.c
6505 +index 8904727607907..ba9643de689ea 100644
6506 +--- a/mm/util.c
6507 ++++ b/mm/util.c
6508 +@@ -331,6 +331,38 @@ unsigned long randomize_stack_top(unsigned long stack_top)
6509 + #endif
6510 + }
6511 +
6512 ++/**
6513 ++ * randomize_page - Generate a random, page aligned address
6514 ++ * @start: The smallest acceptable address the caller will take.
6515 ++ * @range: The size of the area, starting at @start, within which the
6516 ++ * random address must fall.
6517 ++ *
6518 ++ * If @start + @range would overflow, @range is capped.
6519 ++ *
6520 ++ * NOTE: Historical use of randomize_range, which this replaces, presumed that
6521 ++ * @start was already page aligned. We now align it regardless.
6522 ++ *
6523 ++ * Return: A page aligned address within [start, start + range). On error,
6524 ++ * @start is returned.
6525 ++ */
6526 ++unsigned long randomize_page(unsigned long start, unsigned long range)
6527 ++{
6528 ++ if (!PAGE_ALIGNED(start)) {
6529 ++ range -= PAGE_ALIGN(start) - start;
6530 ++ start = PAGE_ALIGN(start);
6531 ++ }
6532 ++
6533 ++ if (start > ULONG_MAX - range)
6534 ++ range = ULONG_MAX - start;
6535 ++
6536 ++ range >>= PAGE_SHIFT;
6537 ++
6538 ++ if (range == 0)
6539 ++ return start;
6540 ++
6541 ++ return start + (get_random_long() % range << PAGE_SHIFT);
6542 ++}
6543 ++
6544 + #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
6545 + unsigned long arch_randomize_brk(struct mm_struct *mm)
6546 + {
6547 +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
6548 +index b8a33c841846f..7131cd1fb2ad5 100644
6549 +--- a/net/core/secure_seq.c
6550 ++++ b/net/core/secure_seq.c
6551 +@@ -96,7 +96,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
6552 + }
6553 + EXPORT_SYMBOL(secure_tcpv6_seq);
6554 +
6555 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
6556 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
6557 + __be16 dport)
6558 + {
6559 + const struct {
6560 +@@ -146,7 +146,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
6561 + }
6562 + EXPORT_SYMBOL_GPL(secure_tcp_seq);
6563 +
6564 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
6565 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
6566 + {
6567 + net_secret_init();
6568 + return siphash_4u32((__force u32)saddr, (__force u32)daddr,
6569 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
6570 +index 915b8e1bd9efb..44b524136f953 100644
6571 +--- a/net/ipv4/inet_hashtables.c
6572 ++++ b/net/ipv4/inet_hashtables.c
6573 +@@ -504,7 +504,7 @@ not_unique:
6574 + return -EADDRNOTAVAIL;
6575 + }
6576 +
6577 +-static u32 inet_sk_port_offset(const struct sock *sk)
6578 ++static u64 inet_sk_port_offset(const struct sock *sk)
6579 + {
6580 + const struct inet_sock *inet = inet_sk(sk);
6581 +
6582 +@@ -722,8 +722,19 @@ void inet_unhash(struct sock *sk)
6583 + }
6584 + EXPORT_SYMBOL_GPL(inet_unhash);
6585 +
6586 ++/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
6587 ++ * Note that we use 32bit integers (vs RFC 'short integers')
6588 ++ * because 2^16 is not a multiple of num_ephemeral and this
6589 ++ * property might be used by clever attacker.
6590 ++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
6591 ++ * we use 256 instead to really give more isolation and
6592 ++ * privacy, this only consumes 1 KB of kernel memory.
6593 ++ */
6594 ++#define INET_TABLE_PERTURB_SHIFT 8
6595 ++static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
6596 ++
6597 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
6598 +- struct sock *sk, u32 port_offset,
6599 ++ struct sock *sk, u64 port_offset,
6600 + int (*check_established)(struct inet_timewait_death_row *,
6601 + struct sock *, __u16, struct inet_timewait_sock **))
6602 + {
6603 +@@ -735,8 +746,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
6604 + struct inet_bind_bucket *tb;
6605 + u32 remaining, offset;
6606 + int ret, i, low, high;
6607 +- static u32 hint;
6608 + int l3mdev;
6609 ++ u32 index;
6610 +
6611 + if (port) {
6612 + head = &hinfo->bhash[inet_bhashfn(net, port,
6613 +@@ -763,7 +774,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
6614 + if (likely(remaining > 1))
6615 + remaining &= ~1U;
6616 +
6617 +- offset = (hint + port_offset) % remaining;
6618 ++ net_get_random_once(table_perturb, sizeof(table_perturb));
6619 ++ index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
6620 ++
6621 ++ offset = READ_ONCE(table_perturb[index]) + port_offset;
6622 ++ offset %= remaining;
6623 ++
6624 + /* In first pass we try ports of @low parity.
6625 + * inet_csk_get_port() does the opposite choice.
6626 + */
6627 +@@ -817,7 +833,7 @@ next_port:
6628 + return -EADDRNOTAVAIL;
6629 +
6630 + ok:
6631 +- hint += i + 2;
6632 ++ WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
6633 +
6634 + /* Head lock still held and bh's disabled */
6635 + inet_bind_hash(sk, tb, port);
6636 +@@ -840,7 +856,7 @@ ok:
6637 + int inet_hash_connect(struct inet_timewait_death_row *death_row,
6638 + struct sock *sk)
6639 + {
6640 +- u32 port_offset = 0;
6641 ++ u64 port_offset = 0;
6642 +
6643 + if (!inet_sk(sk)->inet_num)
6644 + port_offset = inet_sk_port_offset(sk);
6645 +diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
6646 +index 0a2e7f2283911..40203255ed88b 100644
6647 +--- a/net/ipv6/inet6_hashtables.c
6648 ++++ b/net/ipv6/inet6_hashtables.c
6649 +@@ -308,7 +308,7 @@ not_unique:
6650 + return -EADDRNOTAVAIL;
6651 + }
6652 +
6653 +-static u32 inet6_sk_port_offset(const struct sock *sk)
6654 ++static u64 inet6_sk_port_offset(const struct sock *sk)
6655 + {
6656 + const struct inet_sock *inet = inet_sk(sk);
6657 +
6658 +@@ -320,7 +320,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk)
6659 + int inet6_hash_connect(struct inet_timewait_death_row *death_row,
6660 + struct sock *sk)
6661 + {
6662 +- u32 port_offset = 0;
6663 ++ u64 port_offset = 0;
6664 +
6665 + if (!inet_sk(sk)->inet_num)
6666 + port_offset = inet6_sk_port_offset(sk);
6667 +diff --git a/security/security.c b/security/security.c
6668 +index d9d42d64f89f2..360706cdababc 100644
6669 +--- a/security/security.c
6670 ++++ b/security/security.c
6671 +@@ -59,10 +59,12 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
6672 + [LOCKDOWN_DEBUGFS] = "debugfs access",
6673 + [LOCKDOWN_XMON_WR] = "xmon write access",
6674 + [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
6675 ++ [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
6676 + [LOCKDOWN_INTEGRITY_MAX] = "integrity",
6677 + [LOCKDOWN_KCORE] = "/proc/kcore access",
6678 + [LOCKDOWN_KPROBES] = "use of kprobes",
6679 + [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
6680 ++ [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
6681 + [LOCKDOWN_PERF] = "unsafe use of perf",
6682 + [LOCKDOWN_TRACEFS] = "use of tracefs",
6683 + [LOCKDOWN_XMON_RW] = "xmon read and write access",
6684 +diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
6685 +index f8ac96cf38a43..06775519dab00 100644
6686 +--- a/sound/pci/ctxfi/ctatc.c
6687 ++++ b/sound/pci/ctxfi/ctatc.c
6688 +@@ -36,6 +36,7 @@
6689 + | ((IEC958_AES3_CON_FS_48000) << 24))
6690 +
6691 + static const struct snd_pci_quirk subsys_20k1_list[] = {
6692 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0021, "SB046x", CTSB046X),
6693 + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X),
6694 + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X),
6695 + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X),
6696 +@@ -64,6 +65,7 @@ static const struct snd_pci_quirk subsys_20k2_list[] = {
6697 +
6698 + static const char *ct_subsys_name[NUM_CTCARDS] = {
6699 + /* 20k1 models */
6700 ++ [CTSB046X] = "SB046x",
6701 + [CTSB055X] = "SB055x",
6702 + [CTSB073X] = "SB073x",
6703 + [CTUAA] = "UAA",
6704 +diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
6705 +index 9e6b83bd432d9..b50d61a08e283 100644
6706 +--- a/sound/pci/ctxfi/cthardware.h
6707 ++++ b/sound/pci/ctxfi/cthardware.h
6708 +@@ -26,8 +26,9 @@ enum CHIPTYP {
6709 +
6710 + enum CTCARDS {
6711 + /* 20k1 models */
6712 ++ CTSB046X,
6713 ++ CT20K1_MODEL_FIRST = CTSB046X,
6714 + CTSB055X,
6715 +- CT20K1_MODEL_FIRST = CTSB055X,
6716 + CTSB073X,
6717 + CTUAA,
6718 + CT20K1_UNKNOWN,