Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Mon, 30 May 2022 14:00:44
Message-Id: 1653919226.684b1f5f8f40f0da8ed67d8c54c59fadf96bf420.mpagano@gentoo
1 commit: 684b1f5f8f40f0da8ed67d8c54c59fadf96bf420
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon May 30 14:00:26 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon May 30 14:00:26 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=684b1f5f
7
8 Linux patch 5.15.44
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1043_linux-5.15.44.patch | 6005 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6009 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 75beaa27..d8201ada 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -215,6 +215,10 @@ Patch: 1042_linux-5.15.43.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.43
23
24 +Patch: 1043_linux-5.15.44.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.44
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1043_linux-5.15.44.patch b/1043_linux-5.15.44.patch
33 new file mode 100644
34 index 00000000..9f4dc621
35 --- /dev/null
36 +++ b/1043_linux-5.15.44.patch
37 @@ -0,0 +1,6005 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index bb25aae698609..fd3d14dabadcb 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -4307,6 +4307,12 @@
43 + fully seed the kernel's CRNG. Default is controlled
44 + by CONFIG_RANDOM_TRUST_CPU.
45 +
46 ++ random.trust_bootloader={on,off}
47 ++ [KNL] Enable or disable trusting the use of a
48 ++ seed passed by the bootloader (if available) to
49 ++ fully seed the kernel's CRNG. Default is controlled
50 ++ by CONFIG_RANDOM_TRUST_BOOTLOADER.
51 ++
52 + randomize_kstack_offset=
53 + [KNL] Enable or disable kernel stack offset
54 + randomization, which provides roughly 5 bits of
55 +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
56 +index d6977875c1b76..609b891754081 100644
57 +--- a/Documentation/admin-guide/sysctl/kernel.rst
58 ++++ b/Documentation/admin-guide/sysctl/kernel.rst
59 +@@ -1014,28 +1014,22 @@ This is a directory, with the following entries:
60 + * ``boot_id``: a UUID generated the first time this is retrieved, and
61 + unvarying after that;
62 +
63 ++* ``uuid``: a UUID generated every time this is retrieved (this can
64 ++ thus be used to generate UUIDs at will);
65 ++
66 + * ``entropy_avail``: the pool's entropy count, in bits;
67 +
68 + * ``poolsize``: the entropy pool size, in bits;
69 +
70 + * ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
71 +- number of seconds between urandom pool reseeding).
72 +-
73 +-* ``uuid``: a UUID generated every time this is retrieved (this can
74 +- thus be used to generate UUIDs at will);
75 ++ number of seconds between urandom pool reseeding). This file is
76 ++ writable for compatibility purposes, but writing to it has no effect
77 ++ on any RNG behavior;
78 +
79 + * ``write_wakeup_threshold``: when the entropy count drops below this
80 + (as a number of bits), processes waiting to write to ``/dev/random``
81 +- are woken up.
82 +-
83 +-If ``drivers/char/random.c`` is built with ``ADD_INTERRUPT_BENCH``
84 +-defined, these additional entries are present:
85 +-
86 +-* ``add_interrupt_avg_cycles``: the average number of cycles between
87 +- interrupts used to feed the pool;
88 +-
89 +-* ``add_interrupt_avg_deviation``: the standard deviation seen on the
90 +- number of cycles between interrupts used to feed the pool.
91 ++ are woken up. This file is writable for compatibility purposes, but
92 ++ writing to it has no effect on any RNG behavior.
93 +
94 +
95 + randomize_va_space
96 +diff --git a/MAINTAINERS b/MAINTAINERS
97 +index c8103e57a70be..942e0b173f2cc 100644
98 +--- a/MAINTAINERS
99 ++++ b/MAINTAINERS
100 +@@ -15720,6 +15720,8 @@ F: arch/mips/generic/board-ranchu.c
101 +
102 + RANDOM NUMBER DRIVER
103 + M: "Theodore Ts'o" <tytso@×××.edu>
104 ++M: Jason A. Donenfeld <Jason@×××××.com>
105 ++T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
106 + S: Maintained
107 + F: drivers/char/random.c
108 +
109 +diff --git a/Makefile b/Makefile
110 +index 6192e6be49c36..b8ce2ba174862 100644
111 +--- a/Makefile
112 ++++ b/Makefile
113 +@@ -1,7 +1,7 @@
114 + # SPDX-License-Identifier: GPL-2.0
115 + VERSION = 5
116 + PATCHLEVEL = 15
117 +-SUBLEVEL = 43
118 ++SUBLEVEL = 44
119 + EXTRAVERSION =
120 + NAME = Trick or Treat
121 +
122 +diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h
123 +index b565cc6f408e9..f89798da8a147 100644
124 +--- a/arch/alpha/include/asm/timex.h
125 ++++ b/arch/alpha/include/asm/timex.h
126 +@@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
127 + __asm__ __volatile__ ("rpcc %0" : "=r"(ret));
128 + return ret;
129 + }
130 ++#define get_cycles get_cycles
131 +
132 + #endif
133 +diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
134 +index eafa898ba6a73..0274f81cc8ea0 100644
135 +--- a/arch/arm/crypto/Makefile
136 ++++ b/arch/arm/crypto/Makefile
137 +@@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
138 + obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
139 + obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
140 + obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o
141 ++obj-$(if $(CONFIG_CRYPTO_BLAKE2S_ARM),y) += libblake2s-arm.o
142 + obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
143 + obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
144 + obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
145 +@@ -31,7 +32,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
146 + sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
147 + sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
148 + sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
149 +-blake2s-arm-y := blake2s-core.o blake2s-glue.o
150 ++blake2s-arm-y := blake2s-shash.o
151 ++libblake2s-arm-y:= blake2s-core.o blake2s-glue.o
152 + blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
153 + sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
154 + sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
155 +diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
156 +index 86345751bbf3a..df40e46601f10 100644
157 +--- a/arch/arm/crypto/blake2s-core.S
158 ++++ b/arch/arm/crypto/blake2s-core.S
159 +@@ -167,8 +167,8 @@
160 + .endm
161 +
162 + //
163 +-// void blake2s_compress_arch(struct blake2s_state *state,
164 +-// const u8 *block, size_t nblocks, u32 inc);
165 ++// void blake2s_compress(struct blake2s_state *state,
166 ++// const u8 *block, size_t nblocks, u32 inc);
167 + //
168 + // Only the first three fields of struct blake2s_state are used:
169 + // u32 h[8]; (inout)
170 +@@ -176,7 +176,7 @@
171 + // u32 f[2]; (in)
172 + //
173 + .align 5
174 +-ENTRY(blake2s_compress_arch)
175 ++ENTRY(blake2s_compress)
176 + push {r0-r2,r4-r11,lr} // keep this an even number
177 +
178 + .Lnext_block:
179 +@@ -303,4 +303,4 @@ ENTRY(blake2s_compress_arch)
180 + str r3, [r12], #4
181 + bne 1b
182 + b .Lcopy_block_done
183 +-ENDPROC(blake2s_compress_arch)
184 ++ENDPROC(blake2s_compress)
185 +diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/crypto/blake2s-glue.c
186 +index f2cc1e5fc9ec1..0238a70d9581e 100644
187 +--- a/arch/arm/crypto/blake2s-glue.c
188 ++++ b/arch/arm/crypto/blake2s-glue.c
189 +@@ -1,78 +1,7 @@
190 + // SPDX-License-Identifier: GPL-2.0-or-later
191 +-/*
192 +- * BLAKE2s digest algorithm, ARM scalar implementation
193 +- *
194 +- * Copyright 2020 Google LLC
195 +- */
196 +
197 + #include <crypto/internal/blake2s.h>
198 +-#include <crypto/internal/hash.h>
199 +-
200 + #include <linux/module.h>
201 +
202 + /* defined in blake2s-core.S */
203 +-EXPORT_SYMBOL(blake2s_compress_arch);
204 +-
205 +-static int crypto_blake2s_update_arm(struct shash_desc *desc,
206 +- const u8 *in, unsigned int inlen)
207 +-{
208 +- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
209 +-}
210 +-
211 +-static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
212 +-{
213 +- return crypto_blake2s_final(desc, out, blake2s_compress_arch);
214 +-}
215 +-
216 +-#define BLAKE2S_ALG(name, driver_name, digest_size) \
217 +- { \
218 +- .base.cra_name = name, \
219 +- .base.cra_driver_name = driver_name, \
220 +- .base.cra_priority = 200, \
221 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
222 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
223 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
224 +- .base.cra_module = THIS_MODULE, \
225 +- .digestsize = digest_size, \
226 +- .setkey = crypto_blake2s_setkey, \
227 +- .init = crypto_blake2s_init, \
228 +- .update = crypto_blake2s_update_arm, \
229 +- .final = crypto_blake2s_final_arm, \
230 +- .descsize = sizeof(struct blake2s_state), \
231 +- }
232 +-
233 +-static struct shash_alg blake2s_arm_algs[] = {
234 +- BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
235 +- BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
236 +- BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
237 +- BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
238 +-};
239 +-
240 +-static int __init blake2s_arm_mod_init(void)
241 +-{
242 +- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
243 +- crypto_register_shashes(blake2s_arm_algs,
244 +- ARRAY_SIZE(blake2s_arm_algs)) : 0;
245 +-}
246 +-
247 +-static void __exit blake2s_arm_mod_exit(void)
248 +-{
249 +- if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
250 +- crypto_unregister_shashes(blake2s_arm_algs,
251 +- ARRAY_SIZE(blake2s_arm_algs));
252 +-}
253 +-
254 +-module_init(blake2s_arm_mod_init);
255 +-module_exit(blake2s_arm_mod_exit);
256 +-
257 +-MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
258 +-MODULE_LICENSE("GPL");
259 +-MODULE_AUTHOR("Eric Biggers <ebiggers@××××××.com>");
260 +-MODULE_ALIAS_CRYPTO("blake2s-128");
261 +-MODULE_ALIAS_CRYPTO("blake2s-128-arm");
262 +-MODULE_ALIAS_CRYPTO("blake2s-160");
263 +-MODULE_ALIAS_CRYPTO("blake2s-160-arm");
264 +-MODULE_ALIAS_CRYPTO("blake2s-224");
265 +-MODULE_ALIAS_CRYPTO("blake2s-224-arm");
266 +-MODULE_ALIAS_CRYPTO("blake2s-256");
267 +-MODULE_ALIAS_CRYPTO("blake2s-256-arm");
268 ++EXPORT_SYMBOL(blake2s_compress);
269 +diff --git a/arch/arm/crypto/blake2s-shash.c b/arch/arm/crypto/blake2s-shash.c
270 +new file mode 100644
271 +index 0000000000000..763c73beea2d0
272 +--- /dev/null
273 ++++ b/arch/arm/crypto/blake2s-shash.c
274 +@@ -0,0 +1,75 @@
275 ++// SPDX-License-Identifier: GPL-2.0-or-later
276 ++/*
277 ++ * BLAKE2s digest algorithm, ARM scalar implementation
278 ++ *
279 ++ * Copyright 2020 Google LLC
280 ++ */
281 ++
282 ++#include <crypto/internal/blake2s.h>
283 ++#include <crypto/internal/hash.h>
284 ++
285 ++#include <linux/module.h>
286 ++
287 ++static int crypto_blake2s_update_arm(struct shash_desc *desc,
288 ++ const u8 *in, unsigned int inlen)
289 ++{
290 ++ return crypto_blake2s_update(desc, in, inlen, false);
291 ++}
292 ++
293 ++static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
294 ++{
295 ++ return crypto_blake2s_final(desc, out, false);
296 ++}
297 ++
298 ++#define BLAKE2S_ALG(name, driver_name, digest_size) \
299 ++ { \
300 ++ .base.cra_name = name, \
301 ++ .base.cra_driver_name = driver_name, \
302 ++ .base.cra_priority = 200, \
303 ++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
304 ++ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
305 ++ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
306 ++ .base.cra_module = THIS_MODULE, \
307 ++ .digestsize = digest_size, \
308 ++ .setkey = crypto_blake2s_setkey, \
309 ++ .init = crypto_blake2s_init, \
310 ++ .update = crypto_blake2s_update_arm, \
311 ++ .final = crypto_blake2s_final_arm, \
312 ++ .descsize = sizeof(struct blake2s_state), \
313 ++ }
314 ++
315 ++static struct shash_alg blake2s_arm_algs[] = {
316 ++ BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
317 ++ BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
318 ++ BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
319 ++ BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
320 ++};
321 ++
322 ++static int __init blake2s_arm_mod_init(void)
323 ++{
324 ++ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
325 ++ crypto_register_shashes(blake2s_arm_algs,
326 ++ ARRAY_SIZE(blake2s_arm_algs)) : 0;
327 ++}
328 ++
329 ++static void __exit blake2s_arm_mod_exit(void)
330 ++{
331 ++ if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
332 ++ crypto_unregister_shashes(blake2s_arm_algs,
333 ++ ARRAY_SIZE(blake2s_arm_algs));
334 ++}
335 ++
336 ++module_init(blake2s_arm_mod_init);
337 ++module_exit(blake2s_arm_mod_exit);
338 ++
339 ++MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
340 ++MODULE_LICENSE("GPL");
341 ++MODULE_AUTHOR("Eric Biggers <ebiggers@××××××.com>");
342 ++MODULE_ALIAS_CRYPTO("blake2s-128");
343 ++MODULE_ALIAS_CRYPTO("blake2s-128-arm");
344 ++MODULE_ALIAS_CRYPTO("blake2s-160");
345 ++MODULE_ALIAS_CRYPTO("blake2s-160-arm");
346 ++MODULE_ALIAS_CRYPTO("blake2s-224");
347 ++MODULE_ALIAS_CRYPTO("blake2s-224-arm");
348 ++MODULE_ALIAS_CRYPTO("blake2s-256");
349 ++MODULE_ALIAS_CRYPTO("blake2s-256-arm");
350 +diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
351 +index 7c3b3671d6c25..6d1337c169cd3 100644
352 +--- a/arch/arm/include/asm/timex.h
353 ++++ b/arch/arm/include/asm/timex.h
354 +@@ -11,5 +11,6 @@
355 +
356 + typedef unsigned long cycles_t;
357 + #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
358 ++#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
359 +
360 + #endif
361 +diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
362 +index 869a3ac6bf23a..7ccc077a60bed 100644
363 +--- a/arch/ia64/include/asm/timex.h
364 ++++ b/arch/ia64/include/asm/timex.h
365 +@@ -39,6 +39,7 @@ get_cycles (void)
366 + ret = ia64_getreg(_IA64_REG_AR_ITC);
367 + return ret;
368 + }
369 ++#define get_cycles get_cycles
370 +
371 + extern void ia64_cpu_local_tick (void);
372 + extern unsigned long long ia64_native_sched_clock (void);
373 +diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h
374 +index 6a21d93582805..f4a7a340f4cae 100644
375 +--- a/arch/m68k/include/asm/timex.h
376 ++++ b/arch/m68k/include/asm/timex.h
377 +@@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
378 + {
379 + if (mach_random_get_entropy)
380 + return mach_random_get_entropy();
381 +- return 0;
382 ++ return random_get_entropy_fallback();
383 + }
384 + #define random_get_entropy random_get_entropy
385 +
386 +diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
387 +index 8026baf46e729..2e107886f97ac 100644
388 +--- a/arch/mips/include/asm/timex.h
389 ++++ b/arch/mips/include/asm/timex.h
390 +@@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
391 + else
392 + return 0; /* no usable counter */
393 + }
394 ++#define get_cycles get_cycles
395 +
396 + /*
397 + * Like get_cycles - but where c0_count is not available we desperately
398 + * use c0_random in an attempt to get at least a little bit of entropy.
399 +- *
400 +- * R6000 and R6000A neither have a count register nor a random register.
401 +- * That leaves no entropy source in the CPU itself.
402 + */
403 + static inline unsigned long random_get_entropy(void)
404 + {
405 +- unsigned int prid = read_c0_prid();
406 +- unsigned int imp = prid & PRID_IMP_MASK;
407 ++ unsigned int c0_random;
408 +
409 +- if (can_use_mips_counter(prid))
410 ++ if (can_use_mips_counter(read_c0_prid()))
411 + return read_c0_count();
412 +- else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
413 +- return read_c0_random();
414 ++
415 ++ if (cpu_has_3kex)
416 ++ c0_random = (read_c0_random() >> 8) & 0x3f;
417 + else
418 +- return 0; /* no usable register */
419 ++ c0_random = read_c0_random() & 0x3f;
420 ++ return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
421 + }
422 + #define random_get_entropy random_get_entropy
423 +
424 +diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h
425 +index a769f871b28d9..40a1adc9bd03e 100644
426 +--- a/arch/nios2/include/asm/timex.h
427 ++++ b/arch/nios2/include/asm/timex.h
428 +@@ -8,5 +8,8 @@
429 + typedef unsigned long cycles_t;
430 +
431 + extern cycles_t get_cycles(void);
432 ++#define get_cycles get_cycles
433 ++
434 ++#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
435 +
436 + #endif
437 +diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h
438 +index 06b510f8172e3..b4622cb06a75e 100644
439 +--- a/arch/parisc/include/asm/timex.h
440 ++++ b/arch/parisc/include/asm/timex.h
441 +@@ -13,9 +13,10 @@
442 +
443 + typedef unsigned long cycles_t;
444 +
445 +-static inline cycles_t get_cycles (void)
446 ++static inline cycles_t get_cycles(void)
447 + {
448 + return mfctl(16);
449 + }
450 ++#define get_cycles get_cycles
451 +
452 + #endif
453 +diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
454 +index fa2e76e4093a3..14b4489de52c5 100644
455 +--- a/arch/powerpc/include/asm/timex.h
456 ++++ b/arch/powerpc/include/asm/timex.h
457 +@@ -19,6 +19,7 @@ static inline cycles_t get_cycles(void)
458 + {
459 + return mftb();
460 + }
461 ++#define get_cycles get_cycles
462 +
463 + #endif /* __KERNEL__ */
464 + #endif /* _ASM_POWERPC_TIMEX_H */
465 +diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
466 +index 507cae273bc62..d6a7428f6248d 100644
467 +--- a/arch/riscv/include/asm/timex.h
468 ++++ b/arch/riscv/include/asm/timex.h
469 +@@ -41,7 +41,7 @@ static inline u32 get_cycles_hi(void)
470 + static inline unsigned long random_get_entropy(void)
471 + {
472 + if (unlikely(clint_time_val == NULL))
473 +- return 0;
474 ++ return random_get_entropy_fallback();
475 + return get_cycles();
476 + }
477 + #define random_get_entropy() random_get_entropy()
478 +diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
479 +index 50d9b04ecbd14..bc50ee0e91ff1 100644
480 +--- a/arch/s390/include/asm/timex.h
481 ++++ b/arch/s390/include/asm/timex.h
482 +@@ -201,6 +201,7 @@ static inline cycles_t get_cycles(void)
483 + {
484 + return (cycles_t) get_tod_clock() >> 2;
485 + }
486 ++#define get_cycles get_cycles
487 +
488 + int get_phys_clock(unsigned long *clock);
489 + void init_cpu_timer(void);
490 +diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h
491 +index 542915b462097..f86326a6f89e0 100644
492 +--- a/arch/sparc/include/asm/timex_32.h
493 ++++ b/arch/sparc/include/asm/timex_32.h
494 +@@ -9,8 +9,6 @@
495 +
496 + #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
497 +
498 +-/* XXX Maybe do something better at some point... -DaveM */
499 +-typedef unsigned long cycles_t;
500 +-#define get_cycles() (0)
501 ++#include <asm-generic/timex.h>
502 +
503 + #endif
504 +diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h
505 +index e392a9a5bc9bd..9f27176adb26d 100644
506 +--- a/arch/um/include/asm/timex.h
507 ++++ b/arch/um/include/asm/timex.h
508 +@@ -2,13 +2,8 @@
509 + #ifndef __UM_TIMEX_H
510 + #define __UM_TIMEX_H
511 +
512 +-typedef unsigned long cycles_t;
513 +-
514 +-static inline cycles_t get_cycles (void)
515 +-{
516 +- return 0;
517 +-}
518 +-
519 + #define CLOCK_TICK_RATE (HZ)
520 +
521 ++#include <asm-generic/timex.h>
522 ++
523 + #endif
524 +diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
525 +index f307c93fc90a7..c3af959648e62 100644
526 +--- a/arch/x86/crypto/Makefile
527 ++++ b/arch/x86/crypto/Makefile
528 +@@ -62,7 +62,9 @@ obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
529 + sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
530 +
531 + obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o
532 +-blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
533 ++blake2s-x86_64-y := blake2s-shash.o
534 ++obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o
535 ++libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
536 +
537 + obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
538 + ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
539 +diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
540 +index a40365ab301ee..69853c13e8fb0 100644
541 +--- a/arch/x86/crypto/blake2s-glue.c
542 ++++ b/arch/x86/crypto/blake2s-glue.c
543 +@@ -5,7 +5,6 @@
544 +
545 + #include <crypto/internal/blake2s.h>
546 + #include <crypto/internal/simd.h>
547 +-#include <crypto/internal/hash.h>
548 +
549 + #include <linux/types.h>
550 + #include <linux/jump_label.h>
551 +@@ -28,9 +27,8 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
552 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
553 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
554 +
555 +-void blake2s_compress_arch(struct blake2s_state *state,
556 +- const u8 *block, size_t nblocks,
557 +- const u32 inc)
558 ++void blake2s_compress(struct blake2s_state *state, const u8 *block,
559 ++ size_t nblocks, const u32 inc)
560 + {
561 + /* SIMD disables preemption, so relax after processing each page. */
562 + BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
563 +@@ -56,49 +54,12 @@ void blake2s_compress_arch(struct blake2s_state *state,
564 + block += blocks * BLAKE2S_BLOCK_SIZE;
565 + } while (nblocks);
566 + }
567 +-EXPORT_SYMBOL(blake2s_compress_arch);
568 +-
569 +-static int crypto_blake2s_update_x86(struct shash_desc *desc,
570 +- const u8 *in, unsigned int inlen)
571 +-{
572 +- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
573 +-}
574 +-
575 +-static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
576 +-{
577 +- return crypto_blake2s_final(desc, out, blake2s_compress_arch);
578 +-}
579 +-
580 +-#define BLAKE2S_ALG(name, driver_name, digest_size) \
581 +- { \
582 +- .base.cra_name = name, \
583 +- .base.cra_driver_name = driver_name, \
584 +- .base.cra_priority = 200, \
585 +- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
586 +- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
587 +- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
588 +- .base.cra_module = THIS_MODULE, \
589 +- .digestsize = digest_size, \
590 +- .setkey = crypto_blake2s_setkey, \
591 +- .init = crypto_blake2s_init, \
592 +- .update = crypto_blake2s_update_x86, \
593 +- .final = crypto_blake2s_final_x86, \
594 +- .descsize = sizeof(struct blake2s_state), \
595 +- }
596 +-
597 +-static struct shash_alg blake2s_algs[] = {
598 +- BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
599 +- BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
600 +- BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
601 +- BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
602 +-};
603 ++EXPORT_SYMBOL(blake2s_compress);
604 +
605 + static int __init blake2s_mod_init(void)
606 + {
607 +- if (!boot_cpu_has(X86_FEATURE_SSSE3))
608 +- return 0;
609 +-
610 +- static_branch_enable(&blake2s_use_ssse3);
611 ++ if (boot_cpu_has(X86_FEATURE_SSSE3))
612 ++ static_branch_enable(&blake2s_use_ssse3);
613 +
614 + if (IS_ENABLED(CONFIG_AS_AVX512) &&
615 + boot_cpu_has(X86_FEATURE_AVX) &&
616 +@@ -109,26 +70,9 @@ static int __init blake2s_mod_init(void)
617 + XFEATURE_MASK_AVX512, NULL))
618 + static_branch_enable(&blake2s_use_avx512);
619 +
620 +- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
621 +- crypto_register_shashes(blake2s_algs,
622 +- ARRAY_SIZE(blake2s_algs)) : 0;
623 +-}
624 +-
625 +-static void __exit blake2s_mod_exit(void)
626 +-{
627 +- if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
628 +- crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
629 ++ return 0;
630 + }
631 +
632 + module_init(blake2s_mod_init);
633 +-module_exit(blake2s_mod_exit);
634 +
635 +-MODULE_ALIAS_CRYPTO("blake2s-128");
636 +-MODULE_ALIAS_CRYPTO("blake2s-128-x86");
637 +-MODULE_ALIAS_CRYPTO("blake2s-160");
638 +-MODULE_ALIAS_CRYPTO("blake2s-160-x86");
639 +-MODULE_ALIAS_CRYPTO("blake2s-224");
640 +-MODULE_ALIAS_CRYPTO("blake2s-224-x86");
641 +-MODULE_ALIAS_CRYPTO("blake2s-256");
642 +-MODULE_ALIAS_CRYPTO("blake2s-256-x86");
643 + MODULE_LICENSE("GPL v2");
644 +diff --git a/arch/x86/crypto/blake2s-shash.c b/arch/x86/crypto/blake2s-shash.c
645 +new file mode 100644
646 +index 0000000000000..59ae28abe35cc
647 +--- /dev/null
648 ++++ b/arch/x86/crypto/blake2s-shash.c
649 +@@ -0,0 +1,77 @@
650 ++// SPDX-License-Identifier: GPL-2.0 OR MIT
651 ++/*
652 ++ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
653 ++ */
654 ++
655 ++#include <crypto/internal/blake2s.h>
656 ++#include <crypto/internal/simd.h>
657 ++#include <crypto/internal/hash.h>
658 ++
659 ++#include <linux/types.h>
660 ++#include <linux/kernel.h>
661 ++#include <linux/module.h>
662 ++#include <linux/sizes.h>
663 ++
664 ++#include <asm/cpufeature.h>
665 ++#include <asm/processor.h>
666 ++
667 ++static int crypto_blake2s_update_x86(struct shash_desc *desc,
668 ++ const u8 *in, unsigned int inlen)
669 ++{
670 ++ return crypto_blake2s_update(desc, in, inlen, false);
671 ++}
672 ++
673 ++static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
674 ++{
675 ++ return crypto_blake2s_final(desc, out, false);
676 ++}
677 ++
678 ++#define BLAKE2S_ALG(name, driver_name, digest_size) \
679 ++ { \
680 ++ .base.cra_name = name, \
681 ++ .base.cra_driver_name = driver_name, \
682 ++ .base.cra_priority = 200, \
683 ++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
684 ++ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
685 ++ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
686 ++ .base.cra_module = THIS_MODULE, \
687 ++ .digestsize = digest_size, \
688 ++ .setkey = crypto_blake2s_setkey, \
689 ++ .init = crypto_blake2s_init, \
690 ++ .update = crypto_blake2s_update_x86, \
691 ++ .final = crypto_blake2s_final_x86, \
692 ++ .descsize = sizeof(struct blake2s_state), \
693 ++ }
694 ++
695 ++static struct shash_alg blake2s_algs[] = {
696 ++ BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
697 ++ BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
698 ++ BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
699 ++ BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
700 ++};
701 ++
702 ++static int __init blake2s_mod_init(void)
703 ++{
704 ++ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
705 ++ return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
706 ++ return 0;
707 ++}
708 ++
709 ++static void __exit blake2s_mod_exit(void)
710 ++{
711 ++ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
712 ++ crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
713 ++}
714 ++
715 ++module_init(blake2s_mod_init);
716 ++module_exit(blake2s_mod_exit);
717 ++
718 ++MODULE_ALIAS_CRYPTO("blake2s-128");
719 ++MODULE_ALIAS_CRYPTO("blake2s-128-x86");
720 ++MODULE_ALIAS_CRYPTO("blake2s-160");
721 ++MODULE_ALIAS_CRYPTO("blake2s-160-x86");
722 ++MODULE_ALIAS_CRYPTO("blake2s-224");
723 ++MODULE_ALIAS_CRYPTO("blake2s-224-x86");
724 ++MODULE_ALIAS_CRYPTO("blake2s-256");
725 ++MODULE_ALIAS_CRYPTO("blake2s-256-x86");
726 ++MODULE_LICENSE("GPL v2");
727 +diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
728 +index a4a8b1b16c0c1..956e4145311b1 100644
729 +--- a/arch/x86/include/asm/timex.h
730 ++++ b/arch/x86/include/asm/timex.h
731 +@@ -5,6 +5,15 @@
732 + #include <asm/processor.h>
733 + #include <asm/tsc.h>
734 +
735 ++static inline unsigned long random_get_entropy(void)
736 ++{
737 ++ if (!IS_ENABLED(CONFIG_X86_TSC) &&
738 ++ !cpu_feature_enabled(X86_FEATURE_TSC))
739 ++ return random_get_entropy_fallback();
740 ++ return rdtsc();
741 ++}
742 ++#define random_get_entropy random_get_entropy
743 ++
744 + /* Assume we use the PIT time source for the clock tick */
745 + #define CLOCK_TICK_RATE PIT_TICK_RATE
746 +
747 +diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
748 +index 01a300a9700b9..fbdc3d9514943 100644
749 +--- a/arch/x86/include/asm/tsc.h
750 ++++ b/arch/x86/include/asm/tsc.h
751 +@@ -20,13 +20,12 @@ extern void disable_TSC(void);
752 +
753 + static inline cycles_t get_cycles(void)
754 + {
755 +-#ifndef CONFIG_X86_TSC
756 +- if (!boot_cpu_has(X86_FEATURE_TSC))
757 ++ if (!IS_ENABLED(CONFIG_X86_TSC) &&
758 ++ !cpu_feature_enabled(X86_FEATURE_TSC))
759 + return 0;
760 +-#endif
761 +-
762 + return rdtsc();
763 + }
764 ++#define get_cycles get_cycles
765 +
766 + extern struct system_counterval_t convert_art_to_tsc(u64 art);
767 + extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
768 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
769 +index ef6316fef99ff..ba0efc30fac52 100644
770 +--- a/arch/x86/kernel/cpu/mshyperv.c
771 ++++ b/arch/x86/kernel/cpu/mshyperv.c
772 +@@ -79,7 +79,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
773 + inc_irq_stat(hyperv_stimer0_count);
774 + if (hv_stimer0_handler)
775 + hv_stimer0_handler();
776 +- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
777 ++ add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
778 + ack_APIC_irq();
779 +
780 + set_irq_regs(old_regs);
781 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
782 +index c5fd00945e752..3417405077adf 100644
783 +--- a/arch/x86/kvm/mmu/mmu.c
784 ++++ b/arch/x86/kvm/mmu/mmu.c
785 +@@ -5396,14 +5396,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
786 + uint i;
787 +
788 + if (pcid == kvm_get_active_pcid(vcpu)) {
789 +- mmu->invlpg(vcpu, gva, mmu->root_hpa);
790 ++ if (mmu->invlpg)
791 ++ mmu->invlpg(vcpu, gva, mmu->root_hpa);
792 + tlb_flush = true;
793 + }
794 +
795 + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
796 + if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
797 + pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
798 +- mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
799 ++ if (mmu->invlpg)
800 ++ mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
801 + tlb_flush = true;
802 + }
803 + }
804 +diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
805 +index 233ec75e60c69..3f2462f2d0270 100644
806 +--- a/arch/xtensa/include/asm/timex.h
807 ++++ b/arch/xtensa/include/asm/timex.h
808 +@@ -29,10 +29,6 @@
809 +
810 + extern unsigned long ccount_freq;
811 +
812 +-typedef unsigned long long cycles_t;
813 +-
814 +-#define get_cycles() (0)
815 +-
816 + void local_timer_setup(unsigned cpu);
817 +
818 + /*
819 +@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare)
820 + xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
821 + }
822 +
823 ++#include <asm-generic/timex.h>
824 ++
825 + #endif /* _XTENSA_TIMEX_H */
826 +diff --git a/crypto/Kconfig b/crypto/Kconfig
827 +index 285f82647d2b7..55718de561375 100644
828 +--- a/crypto/Kconfig
829 ++++ b/crypto/Kconfig
830 +@@ -1919,9 +1919,10 @@ config CRYPTO_STATS
831 + config CRYPTO_HASH_INFO
832 + bool
833 +
834 +-source "lib/crypto/Kconfig"
835 + source "drivers/crypto/Kconfig"
836 + source "crypto/asymmetric_keys/Kconfig"
837 + source "certs/Kconfig"
838 +
839 + endif # if CRYPTO
840 ++
841 ++source "lib/crypto/Kconfig"
842 +diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
843 +index 72fe480f9bd67..5f96a21f87883 100644
844 +--- a/crypto/blake2s_generic.c
845 ++++ b/crypto/blake2s_generic.c
846 +@@ -15,12 +15,12 @@
847 + static int crypto_blake2s_update_generic(struct shash_desc *desc,
848 + const u8 *in, unsigned int inlen)
849 + {
850 +- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
851 ++ return crypto_blake2s_update(desc, in, inlen, true);
852 + }
853 +
854 + static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
855 + {
856 +- return crypto_blake2s_final(desc, out, blake2s_compress_generic);
857 ++ return crypto_blake2s_final(desc, out, true);
858 + }
859 +
860 + #define BLAKE2S_ALG(name, driver_name, digest_size) \
861 +diff --git a/crypto/drbg.c b/crypto/drbg.c
862 +index ea85d4a0fe9e9..03c9ef768c227 100644
863 +--- a/crypto/drbg.c
864 ++++ b/crypto/drbg.c
865 +@@ -1491,12 +1491,13 @@ static int drbg_generate_long(struct drbg_state *drbg,
866 + return 0;
867 + }
868 +
869 +-static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
870 ++static int drbg_schedule_async_seed(struct notifier_block *nb, unsigned long action, void *data)
871 + {
872 +- struct drbg_state *drbg = container_of(rdy, struct drbg_state,
873 ++ struct drbg_state *drbg = container_of(nb, struct drbg_state,
874 + random_ready);
875 +
876 + schedule_work(&drbg->seed_work);
877 ++ return 0;
878 + }
879 +
880 + static int drbg_prepare_hrng(struct drbg_state *drbg)
881 +@@ -1511,10 +1512,8 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
882 +
883 + INIT_WORK(&drbg->seed_work, drbg_async_seed);
884 +
885 +- drbg->random_ready.owner = THIS_MODULE;
886 +- drbg->random_ready.func = drbg_schedule_async_seed;
887 +-
888 +- err = add_random_ready_callback(&drbg->random_ready);
889 ++ drbg->random_ready.notifier_call = drbg_schedule_async_seed;
890 ++ err = register_random_ready_notifier(&drbg->random_ready);
891 +
892 + switch (err) {
893 + case 0:
894 +@@ -1525,7 +1524,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
895 + fallthrough;
896 +
897 + default:
898 +- drbg->random_ready.func = NULL;
899 ++ drbg->random_ready.notifier_call = NULL;
900 + return err;
901 + }
902 +
903 +@@ -1629,8 +1628,8 @@ free_everything:
904 + */
905 + static int drbg_uninstantiate(struct drbg_state *drbg)
906 + {
907 +- if (drbg->random_ready.func) {
908 +- del_random_ready_callback(&drbg->random_ready);
909 ++ if (drbg->random_ready.notifier_call) {
910 ++ unregister_random_ready_notifier(&drbg->random_ready);
911 + cancel_work_sync(&drbg->seed_work);
912 + }
913 +
914 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
915 +index 00c0ebaab29f7..6e23b76aef5dc 100644
916 +--- a/drivers/acpi/sysfs.c
917 ++++ b/drivers/acpi/sysfs.c
918 +@@ -415,19 +415,30 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
919 + loff_t offset, size_t count)
920 + {
921 + struct acpi_data_attr *data_attr;
922 +- void *base;
923 +- ssize_t rc;
924 ++ void __iomem *base;
925 ++ ssize_t size;
926 +
927 + data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
928 ++ size = data_attr->attr.size;
929 ++
930 ++ if (offset < 0)
931 ++ return -EINVAL;
932 ++
933 ++ if (offset >= size)
934 ++ return 0;
935 +
936 +- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
937 ++ if (count > size - offset)
938 ++ count = size - offset;
939 ++
940 ++ base = acpi_os_map_iomem(data_attr->addr, size);
941 + if (!base)
942 + return -ENOMEM;
943 +- rc = memory_read_from_buffer(buf, count, &offset, base,
944 +- data_attr->attr.size);
945 +- acpi_os_unmap_memory(base, data_attr->attr.size);
946 +
947 +- return rc;
948 ++ memcpy_fromio(buf, base + offset, count);
949 ++
950 ++ acpi_os_unmap_iomem(base, size);
951 ++
952 ++ return count;
953 + }
954 +
955 + static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
956 +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
957 +index 740811893c570..55f48375e3fe5 100644
958 +--- a/drivers/char/Kconfig
959 ++++ b/drivers/char/Kconfig
960 +@@ -449,6 +449,7 @@ config RANDOM_TRUST_BOOTLOADER
961 + device randomness. Say Y here to assume the entropy provided by the
962 + booloader is trustworthy so it will be added to the kernel's entropy
963 + pool. Otherwise, say N here so it will be regarded as device input that
964 +- only mixes the entropy pool.
965 ++ only mixes the entropy pool. This can also be configured at boot with
966 ++ "random.trust_bootloader=on/off".
967 +
968 + endmenu
969 +diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
970 +index a3db27916256d..cfb085de876b7 100644
971 +--- a/drivers/char/hw_random/core.c
972 ++++ b/drivers/char/hw_random/core.c
973 +@@ -15,6 +15,7 @@
974 + #include <linux/err.h>
975 + #include <linux/fs.h>
976 + #include <linux/hw_random.h>
977 ++#include <linux/random.h>
978 + #include <linux/kernel.h>
979 + #include <linux/kthread.h>
980 + #include <linux/sched/signal.h>
981 +diff --git a/drivers/char/random.c b/drivers/char/random.c
982 +index ebe86de9d0acc..ca17a658c2147 100644
983 +--- a/drivers/char/random.c
984 ++++ b/drivers/char/random.c
985 +@@ -1,310 +1,26 @@
986 ++// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
987 + /*
988 +- * random.c -- A strong random number generator
989 +- *
990 +- * Copyright (C) 2017 Jason A. Donenfeld <Jason@×××××.com>. All
991 +- * Rights Reserved.
992 +- *
993 ++ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
994 + * Copyright Matt Mackall <mpm@×××××××.com>, 2003, 2004, 2005
995 +- *
996 +- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
997 +- * rights reserved.
998 +- *
999 +- * Redistribution and use in source and binary forms, with or without
1000 +- * modification, are permitted provided that the following conditions
1001 +- * are met:
1002 +- * 1. Redistributions of source code must retain the above copyright
1003 +- * notice, and the entire permission notice in its entirety,
1004 +- * including the disclaimer of warranties.
1005 +- * 2. Redistributions in binary form must reproduce the above copyright
1006 +- * notice, this list of conditions and the following disclaimer in the
1007 +- * documentation and/or other materials provided with the distribution.
1008 +- * 3. The name of the author may not be used to endorse or promote
1009 +- * products derived from this software without specific prior
1010 +- * written permission.
1011 +- *
1012 +- * ALTERNATIVELY, this product may be distributed under the terms of
1013 +- * the GNU General Public License, in which case the provisions of the GPL are
1014 +- * required INSTEAD OF the above restrictions. (This clause is
1015 +- * necessary due to a potential bad interaction between the GPL and
1016 +- * the restrictions contained in a BSD-style copyright.)
1017 +- *
1018 +- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
1019 +- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1020 +- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
1021 +- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
1022 +- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1023 +- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
1024 +- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1025 +- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1026 +- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1027 +- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
1028 +- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
1029 +- * DAMAGE.
1030 +- */
1031 +-
1032 +-/*
1033 +- * (now, with legal B.S. out of the way.....)
1034 +- *
1035 +- * This routine gathers environmental noise from device drivers, etc.,
1036 +- * and returns good random numbers, suitable for cryptographic use.
1037 +- * Besides the obvious cryptographic uses, these numbers are also good
1038 +- * for seeding TCP sequence numbers, and other places where it is
1039 +- * desirable to have numbers which are not only random, but hard to
1040 +- * predict by an attacker.
1041 +- *
1042 +- * Theory of operation
1043 +- * ===================
1044 +- *
1045 +- * Computers are very predictable devices. Hence it is extremely hard
1046 +- * to produce truly random numbers on a computer --- as opposed to
1047 +- * pseudo-random numbers, which can easily generated by using a
1048 +- * algorithm. Unfortunately, it is very easy for attackers to guess
1049 +- * the sequence of pseudo-random number generators, and for some
1050 +- * applications this is not acceptable. So instead, we must try to
1051 +- * gather "environmental noise" from the computer's environment, which
1052 +- * must be hard for outside attackers to observe, and use that to
1053 +- * generate random numbers. In a Unix environment, this is best done
1054 +- * from inside the kernel.
1055 +- *
1056 +- * Sources of randomness from the environment include inter-keyboard
1057 +- * timings, inter-interrupt timings from some interrupts, and other
1058 +- * events which are both (a) non-deterministic and (b) hard for an
1059 +- * outside observer to measure. Randomness from these sources are
1060 +- * added to an "entropy pool", which is mixed using a CRC-like function.
1061 +- * This is not cryptographically strong, but it is adequate assuming
1062 +- * the randomness is not chosen maliciously, and it is fast enough that
1063 +- * the overhead of doing it on every interrupt is very reasonable.
1064 +- * As random bytes are mixed into the entropy pool, the routines keep
1065 +- * an *estimate* of how many bits of randomness have been stored into
1066 +- * the random number generator's internal state.
1067 +- *
1068 +- * When random bytes are desired, they are obtained by taking the SHA
1069 +- * hash of the contents of the "entropy pool". The SHA hash avoids
1070 +- * exposing the internal state of the entropy pool. It is believed to
1071 +- * be computationally infeasible to derive any useful information
1072 +- * about the input of SHA from its output. Even if it is possible to
1073 +- * analyze SHA in some clever way, as long as the amount of data
1074 +- * returned from the generator is less than the inherent entropy in
1075 +- * the pool, the output data is totally unpredictable. For this
1076 +- * reason, the routine decreases its internal estimate of how many
1077 +- * bits of "true randomness" are contained in the entropy pool as it
1078 +- * outputs random numbers.
1079 +- *
1080 +- * If this estimate goes to zero, the routine can still generate
1081 +- * random numbers; however, an attacker may (at least in theory) be
1082 +- * able to infer the future output of the generator from prior
1083 +- * outputs. This requires successful cryptanalysis of SHA, which is
1084 +- * not believed to be feasible, but there is a remote possibility.
1085 +- * Nonetheless, these numbers should be useful for the vast majority
1086 +- * of purposes.
1087 +- *
1088 +- * Exported interfaces ---- output
1089 +- * ===============================
1090 +- *
1091 +- * There are four exported interfaces; two for use within the kernel,
1092 +- * and two or use from userspace.
1093 +- *
1094 +- * Exported interfaces ---- userspace output
1095 +- * -----------------------------------------
1096 +- *
1097 +- * The userspace interfaces are two character devices /dev/random and
1098 +- * /dev/urandom. /dev/random is suitable for use when very high
1099 +- * quality randomness is desired (for example, for key generation or
1100 +- * one-time pads), as it will only return a maximum of the number of
1101 +- * bits of randomness (as estimated by the random number generator)
1102 +- * contained in the entropy pool.
1103 +- *
1104 +- * The /dev/urandom device does not have this limit, and will return
1105 +- * as many bytes as are requested. As more and more random bytes are
1106 +- * requested without giving time for the entropy pool to recharge,
1107 +- * this will result in random numbers that are merely cryptographically
1108 +- * strong. For many applications, however, this is acceptable.
1109 +- *
1110 +- * Exported interfaces ---- kernel output
1111 +- * --------------------------------------
1112 +- *
1113 +- * The primary kernel interface is
1114 +- *
1115 +- * void get_random_bytes(void *buf, int nbytes);
1116 +- *
1117 +- * This interface will return the requested number of random bytes,
1118 +- * and place it in the requested buffer. This is equivalent to a
1119 +- * read from /dev/urandom.
1120 +- *
1121 +- * For less critical applications, there are the functions:
1122 +- *
1123 +- * u32 get_random_u32()
1124 +- * u64 get_random_u64()
1125 +- * unsigned int get_random_int()
1126 +- * unsigned long get_random_long()
1127 +- *
1128 +- * These are produced by a cryptographic RNG seeded from get_random_bytes,
1129 +- * and so do not deplete the entropy pool as much. These are recommended
1130 +- * for most in-kernel operations *if the result is going to be stored in
1131 +- * the kernel*.
1132 +- *
1133 +- * Specifically, the get_random_int() family do not attempt to do
1134 +- * "anti-backtracking". If you capture the state of the kernel (e.g.
1135 +- * by snapshotting the VM), you can figure out previous get_random_int()
1136 +- * return values. But if the value is stored in the kernel anyway,
1137 +- * this is not a problem.
1138 +- *
1139 +- * It *is* safe to expose get_random_int() output to attackers (e.g. as
1140 +- * network cookies); given outputs 1..n, it's not feasible to predict
1141 +- * outputs 0 or n+1. The only concern is an attacker who breaks into
1142 +- * the kernel later; the get_random_int() engine is not reseeded as
1143 +- * often as the get_random_bytes() one.
1144 +- *
1145 +- * get_random_bytes() is needed for keys that need to stay secret after
1146 +- * they are erased from the kernel. For example, any key that will
1147 +- * be wrapped and stored encrypted. And session encryption keys: we'd
1148 +- * like to know that after the session is closed and the keys erased,
1149 +- * the plaintext is unrecoverable to someone who recorded the ciphertext.
1150 +- *
1151 +- * But for network ports/cookies, stack canaries, PRNG seeds, address
1152 +- * space layout randomization, session *authentication* keys, or other
1153 +- * applications where the sensitive data is stored in the kernel in
1154 +- * plaintext for as long as it's sensitive, the get_random_int() family
1155 +- * is just fine.
1156 +- *
1157 +- * Consider ASLR. We want to keep the address space secret from an
1158 +- * outside attacker while the process is running, but once the address
1159 +- * space is torn down, it's of no use to an attacker any more. And it's
1160 +- * stored in kernel data structures as long as it's alive, so worrying
1161 +- * about an attacker's ability to extrapolate it from the get_random_int()
1162 +- * CRNG is silly.
1163 +- *
1164 +- * Even some cryptographic keys are safe to generate with get_random_int().
1165 +- * In particular, keys for SipHash are generally fine. Here, knowledge
1166 +- * of the key authorizes you to do something to a kernel object (inject
1167 +- * packets to a network connection, or flood a hash table), and the
1168 +- * key is stored with the object being protected. Once it goes away,
1169 +- * we no longer care if anyone knows the key.
1170 +- *
1171 +- * prandom_u32()
1172 +- * -------------
1173 +- *
1174 +- * For even weaker applications, see the pseudorandom generator
1175 +- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
1176 +- * numbers aren't security-critical at all, these are *far* cheaper.
1177 +- * Useful for self-tests, random error simulation, randomized backoffs,
1178 +- * and any other application where you trust that nobody is trying to
1179 +- * maliciously mess with you by guessing the "random" numbers.
1180 +- *
1181 +- * Exported interfaces ---- input
1182 +- * ==============================
1183 +- *
1184 +- * The current exported interfaces for gathering environmental noise
1185 +- * from the devices are:
1186 +- *
1187 +- * void add_device_randomness(const void *buf, unsigned int size);
1188 +- * void add_input_randomness(unsigned int type, unsigned int code,
1189 +- * unsigned int value);
1190 +- * void add_interrupt_randomness(int irq, int irq_flags);
1191 +- * void add_disk_randomness(struct gendisk *disk);
1192 +- *
1193 +- * add_device_randomness() is for adding data to the random pool that
1194 +- * is likely to differ between two devices (or possibly even per boot).
1195 +- * This would be things like MAC addresses or serial numbers, or the
1196 +- * read-out of the RTC. This does *not* add any actual entropy to the
1197 +- * pool, but it initializes the pool to different values for devices
1198 +- * that might otherwise be identical and have very little entropy
1199 +- * available to them (particularly common in the embedded world).
1200 +- *
1201 +- * add_input_randomness() uses the input layer interrupt timing, as well as
1202 +- * the event type information from the hardware.
1203 +- *
1204 +- * add_interrupt_randomness() uses the interrupt timing as random
1205 +- * inputs to the entropy pool. Using the cycle counters and the irq source
1206 +- * as inputs, it feeds the randomness roughly once a second.
1207 +- *
1208 +- * add_disk_randomness() uses what amounts to the seek time of block
1209 +- * layer request events, on a per-disk_devt basis, as input to the
1210 +- * entropy pool. Note that high-speed solid state drives with very low
1211 +- * seek times do not make for good sources of entropy, as their seek
1212 +- * times are usually fairly consistent.
1213 +- *
1214 +- * All of these routines try to estimate how many bits of randomness a
1215 +- * particular randomness source. They do this by keeping track of the
1216 +- * first and second order deltas of the event timings.
1217 +- *
1218 +- * Ensuring unpredictability at system startup
1219 +- * ============================================
1220 +- *
1221 +- * When any operating system starts up, it will go through a sequence
1222 +- * of actions that are fairly predictable by an adversary, especially
1223 +- * if the start-up does not involve interaction with a human operator.
1224 +- * This reduces the actual number of bits of unpredictability in the
1225 +- * entropy pool below the value in entropy_count. In order to
1226 +- * counteract this effect, it helps to carry information in the
1227 +- * entropy pool across shut-downs and start-ups. To do this, put the
1228 +- * following lines an appropriate script which is run during the boot
1229 +- * sequence:
1230 +- *
1231 +- * echo "Initializing random number generator..."
1232 +- * random_seed=/var/run/random-seed
1233 +- * # Carry a random seed from start-up to start-up
1234 +- * # Load and then save the whole entropy pool
1235 +- * if [ -f $random_seed ]; then
1236 +- * cat $random_seed >/dev/urandom
1237 +- * else
1238 +- * touch $random_seed
1239 +- * fi
1240 +- * chmod 600 $random_seed
1241 +- * dd if=/dev/urandom of=$random_seed count=1 bs=512
1242 +- *
1243 +- * and the following lines in an appropriate script which is run as
1244 +- * the system is shutdown:
1245 +- *
1246 +- * # Carry a random seed from shut-down to start-up
1247 +- * # Save the whole entropy pool
1248 +- * echo "Saving random seed..."
1249 +- * random_seed=/var/run/random-seed
1250 +- * touch $random_seed
1251 +- * chmod 600 $random_seed
1252 +- * dd if=/dev/urandom of=$random_seed count=1 bs=512
1253 +- *
1254 +- * For example, on most modern systems using the System V init
1255 +- * scripts, such code fragments would be found in
1256 +- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
1257 +- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
1258 +- *
1259 +- * Effectively, these commands cause the contents of the entropy pool
1260 +- * to be saved at shut-down time and reloaded into the entropy pool at
1261 +- * start-up. (The 'dd' in the addition to the bootup script is to
1262 +- * make sure that /etc/random-seed is different for every start-up,
1263 +- * even if the system crashes without executing rc.0.) Even with
1264 +- * complete knowledge of the start-up activities, predicting the state
1265 +- * of the entropy pool requires knowledge of the previous history of
1266 +- * the system.
1267 +- *
1268 +- * Configuring the /dev/random driver under Linux
1269 +- * ==============================================
1270 +- *
1271 +- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
1272 +- * the /dev/mem major number (#1). So if your system does not have
1273 +- * /dev/random and /dev/urandom created already, they can be created
1274 +- * by using the commands:
1275 +- *
1276 +- * mknod /dev/random c 1 8
1277 +- * mknod /dev/urandom c 1 9
1278 +- *
1279 +- * Acknowledgements:
1280 +- * =================
1281 +- *
1282 +- * Ideas for constructing this random number generator were derived
1283 +- * from Pretty Good Privacy's random number generator, and from private
1284 +- * discussions with Phil Karn. Colin Plumb provided a faster random
1285 +- * number generator, which speed up the mixing function of the entropy
1286 +- * pool, taken from PGPfone. Dale Worley has also contributed many
1287 +- * useful ideas and suggestions to improve this driver.
1288 +- *
1289 +- * Any flaws in the design are solely my responsibility, and should
1290 +- * not be attributed to the Phil, Colin, or any of authors of PGP.
1291 +- *
1292 +- * Further background information on this topic may be obtained from
1293 +- * RFC 1750, "Randomness Recommendations for Security", by Donald
1294 +- * Eastlake, Steve Crocker, and Jeff Schiller.
1295 ++ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
1296 ++ *
1297 ++ * This driver produces cryptographically secure pseudorandom data. It is divided
1298 ++ * into roughly six sections, each with a section header:
1299 ++ *
1300 ++ * - Initialization and readiness waiting.
1301 ++ * - Fast key erasure RNG, the "crng".
1302 ++ * - Entropy accumulation and extraction routines.
1303 ++ * - Entropy collection routines.
1304 ++ * - Userspace reader/writer interfaces.
1305 ++ * - Sysctl interface.
1306 ++ *
1307 ++ * The high level overview is that there is one input pool, into which
1308 ++ * various pieces of data are hashed. Prior to initialization, some of that
1309 ++ * data is then "credited" as having a certain number of bits of entropy.
1310 ++ * When enough bits of entropy are available, the hash is finalized and
1311 ++ * handed as a key to a stream cipher that expands it indefinitely for
1312 ++ * various consumers. This key is periodically refreshed as the various
1313 ++ * entropy collectors, described below, add data to the input pool.
1314 + */
1315 +
1316 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1317 +@@ -327,7 +43,6 @@
1318 + #include <linux/spinlock.h>
1319 + #include <linux/kthread.h>
1320 + #include <linux/percpu.h>
1321 +-#include <linux/fips.h>
1322 + #include <linux/ptrace.h>
1323 + #include <linux/workqueue.h>
1324 + #include <linux/irq.h>
1325 +@@ -335,1479 +50,1082 @@
1326 + #include <linux/syscalls.h>
1327 + #include <linux/completion.h>
1328 + #include <linux/uuid.h>
1329 ++#include <linux/uaccess.h>
1330 ++#include <linux/siphash.h>
1331 ++#include <linux/uio.h>
1332 + #include <crypto/chacha.h>
1333 +-#include <crypto/sha1.h>
1334 +-
1335 ++#include <crypto/blake2s.h>
1336 + #include <asm/processor.h>
1337 +-#include <linux/uaccess.h>
1338 + #include <asm/irq.h>
1339 + #include <asm/irq_regs.h>
1340 + #include <asm/io.h>
1341 +
1342 +-#define CREATE_TRACE_POINTS
1343 +-#include <trace/events/random.h>
1344 +-
1345 +-/* #define ADD_INTERRUPT_BENCH */
1346 ++/*********************************************************************
1347 ++ *
1348 ++ * Initialization and readiness waiting.
1349 ++ *
1350 ++ * Much of the RNG infrastructure is devoted to various dependencies
1351 ++ * being able to wait until the RNG has collected enough entropy and
1352 ++ * is ready for safe consumption.
1353 ++ *
1354 ++ *********************************************************************/
1355 +
1356 + /*
1357 +- * Configuration information
1358 ++ * crng_init is protected by base_crng->lock, and only increases
1359 ++ * its value (from empty->early->ready).
1360 + */
1361 +-#define INPUT_POOL_SHIFT 12
1362 +-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
1363 +-#define OUTPUT_POOL_SHIFT 10
1364 +-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
1365 +-#define EXTRACT_SIZE 10
1366 +-
1367 ++static enum {
1368 ++ CRNG_EMPTY = 0, /* Little to no entropy collected */
1369 ++ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
1370 ++ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
1371 ++} crng_init __read_mostly = CRNG_EMPTY;
1372 ++static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
1373 ++#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
1374 ++/* Various types of waiters for crng_init->CRNG_READY transition. */
1375 ++static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
1376 ++static struct fasync_struct *fasync;
1377 ++static DEFINE_SPINLOCK(random_ready_chain_lock);
1378 ++static RAW_NOTIFIER_HEAD(random_ready_chain);
1379 +
1380 +-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
1381 ++/* Control how we warn userspace. */
1382 ++static struct ratelimit_state urandom_warning =
1383 ++ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
1384 ++static int ratelimit_disable __read_mostly =
1385 ++ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
1386 ++module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
1387 ++MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
1388 +
1389 + /*
1390 +- * To allow fractional bits to be tracked, the entropy_count field is
1391 +- * denominated in units of 1/8th bits.
1392 ++ * Returns whether or not the input pool has been seeded and thus guaranteed
1393 ++ * to supply cryptographically secure random numbers. This applies to: the
1394 ++ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1395 ++ * ,u64,int,long} family of functions.
1396 + *
1397 +- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
1398 +- * credit_entropy_bits() needs to be 64 bits wide.
1399 ++ * Returns: true if the input pool has been seeded.
1400 ++ * false if the input pool has not been seeded.
1401 + */
1402 +-#define ENTROPY_SHIFT 3
1403 +-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
1404 ++bool rng_is_initialized(void)
1405 ++{
1406 ++ return crng_ready();
1407 ++}
1408 ++EXPORT_SYMBOL(rng_is_initialized);
1409 +
1410 +-/*
1411 +- * If the entropy count falls under this number of bits, then we
1412 +- * should wake up processes which are selecting or polling on write
1413 +- * access to /dev/random.
1414 +- */
1415 +-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
1416 ++static void __cold crng_set_ready(struct work_struct *work)
1417 ++{
1418 ++ static_branch_enable(&crng_is_ready);
1419 ++}
1420 ++
1421 ++/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
1422 ++static void try_to_generate_entropy(void);
1423 +
1424 + /*
1425 +- * Originally, we used a primitive polynomial of degree .poolwords
1426 +- * over GF(2). The taps for various sizes are defined below. They
1427 +- * were chosen to be evenly spaced except for the last tap, which is 1
1428 +- * to get the twisting happening as fast as possible.
1429 +- *
1430 +- * For the purposes of better mixing, we use the CRC-32 polynomial as
1431 +- * well to make a (modified) twisted Generalized Feedback Shift
1432 +- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
1433 +- * generators. ACM Transactions on Modeling and Computer Simulation
1434 +- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
1435 +- * GFSR generators II. ACM Transactions on Modeling and Computer
1436 +- * Simulation 4:254-266)
1437 +- *
1438 +- * Thanks to Colin Plumb for suggesting this.
1439 +- *
1440 +- * The mixing operation is much less sensitive than the output hash,
1441 +- * where we use SHA-1. All that we want of mixing operation is that
1442 +- * it be a good non-cryptographic hash; i.e. it not produce collisions
1443 +- * when fed "random" data of the sort we expect to see. As long as
1444 +- * the pool state differs for different inputs, we have preserved the
1445 +- * input entropy and done a good job. The fact that an intelligent
1446 +- * attacker can construct inputs that will produce controlled
1447 +- * alterations to the pool's state is not important because we don't
1448 +- * consider such inputs to contribute any randomness. The only
1449 +- * property we need with respect to them is that the attacker can't
1450 +- * increase his/her knowledge of the pool's state. Since all
1451 +- * additions are reversible (knowing the final state and the input,
1452 +- * you can reconstruct the initial state), if an attacker has any
1453 +- * uncertainty about the initial state, he/she can only shuffle that
1454 +- * uncertainty about, but never cause any collisions (which would
1455 +- * decrease the uncertainty).
1456 ++ * Wait for the input pool to be seeded and thus guaranteed to supply
1457 ++ * cryptographically secure random numbers. This applies to: the /dev/urandom
1458 ++ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1459 ++ * family of functions. Using any of these functions without first calling
1460 ++ * this function forfeits the guarantee of security.
1461 + *
1462 +- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
1463 +- * Videau in their paper, "The Linux Pseudorandom Number Generator
1464 +- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
1465 +- * paper, they point out that we are not using a true Twisted GFSR,
1466 +- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
1467 +- * is, with only three taps, instead of the six that we are using).
1468 +- * As a result, the resulting polynomial is neither primitive nor
1469 +- * irreducible, and hence does not have a maximal period over
1470 +- * GF(2**32). They suggest a slight change to the generator
1471 +- * polynomial which improves the resulting TGFSR polynomial to be
1472 +- * irreducible, which we have made here.
1473 ++ * Returns: 0 if the input pool has been seeded.
1474 ++ * -ERESTARTSYS if the function was interrupted by a signal.
1475 + */
1476 +-static const struct poolinfo {
1477 +- int poolbitshift, poolwords, poolbytes, poolfracbits;
1478 +-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
1479 +- int tap1, tap2, tap3, tap4, tap5;
1480 +-} poolinfo_table[] = {
1481 +- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
1482 +- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
1483 +- { S(128), 104, 76, 51, 25, 1 },
1484 +-};
1485 ++int wait_for_random_bytes(void)
1486 ++{
1487 ++ while (!crng_ready()) {
1488 ++ int ret;
1489 ++
1490 ++ try_to_generate_entropy();
1491 ++ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1492 ++ if (ret)
1493 ++ return ret > 0 ? 0 : ret;
1494 ++ }
1495 ++ return 0;
1496 ++}
1497 ++EXPORT_SYMBOL(wait_for_random_bytes);
1498 +
1499 + /*
1500 +- * Static global variables
1501 ++ * Add a callback function that will be invoked when the input
1502 ++ * pool is initialised.
1503 ++ *
1504 ++ * returns: 0 if callback is successfully added
1505 ++ * -EALREADY if pool is already initialised (callback not called)
1506 + */
1507 +-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
1508 +-static struct fasync_struct *fasync;
1509 +-
1510 +-static DEFINE_SPINLOCK(random_ready_list_lock);
1511 +-static LIST_HEAD(random_ready_list);
1512 ++int __cold register_random_ready_notifier(struct notifier_block *nb)
1513 ++{
1514 ++ unsigned long flags;
1515 ++ int ret = -EALREADY;
1516 +
1517 +-struct crng_state {
1518 +- __u32 state[16];
1519 +- unsigned long init_time;
1520 +- spinlock_t lock;
1521 +-};
1522 ++ if (crng_ready())
1523 ++ return ret;
1524 +
1525 +-static struct crng_state primary_crng = {
1526 +- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
1527 +-};
1528 ++ spin_lock_irqsave(&random_ready_chain_lock, flags);
1529 ++ if (!crng_ready())
1530 ++ ret = raw_notifier_chain_register(&random_ready_chain, nb);
1531 ++ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
1532 ++ return ret;
1533 ++}
1534 ++EXPORT_SYMBOL(register_random_ready_notifier);
1535 +
1536 + /*
1537 +- * crng_init = 0 --> Uninitialized
1538 +- * 1 --> Initialized
1539 +- * 2 --> Initialized from input_pool
1540 +- *
1541 +- * crng_init is protected by primary_crng->lock, and only increases
1542 +- * its value (from 0->1->2).
1543 ++ * Delete a previously registered readiness callback function.
1544 + */
1545 +-static int crng_init = 0;
1546 +-static bool crng_need_final_init = false;
1547 +-#define crng_ready() (likely(crng_init > 1))
1548 +-static int crng_init_cnt = 0;
1549 +-static unsigned long crng_global_init_time = 0;
1550 +-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
1551 +-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
1552 +-static void _crng_backtrack_protect(struct crng_state *crng,
1553 +- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
1554 +-static void process_random_ready_list(void);
1555 +-static void _get_random_bytes(void *buf, int nbytes);
1556 +-
1557 +-static struct ratelimit_state unseeded_warning =
1558 +- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
1559 +-static struct ratelimit_state urandom_warning =
1560 +- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
1561 ++int __cold unregister_random_ready_notifier(struct notifier_block *nb)
1562 ++{
1563 ++ unsigned long flags;
1564 ++ int ret;
1565 +
1566 +-static int ratelimit_disable __read_mostly;
1567 ++ spin_lock_irqsave(&random_ready_chain_lock, flags);
1568 ++ ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
1569 ++ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
1570 ++ return ret;
1571 ++}
1572 ++EXPORT_SYMBOL(unregister_random_ready_notifier);
1573 +
1574 +-module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
1575 +-MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
1576 ++static void __cold process_random_ready_list(void)
1577 ++{
1578 ++ unsigned long flags;
1579 +
1580 +-/**********************************************************************
1581 ++ spin_lock_irqsave(&random_ready_chain_lock, flags);
1582 ++ raw_notifier_call_chain(&random_ready_chain, 0, NULL);
1583 ++ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
1584 ++}
1585 ++
1586 ++#define warn_unseeded_randomness() \
1587 ++ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
1588 ++ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
1589 ++ __func__, (void *)_RET_IP_, crng_init)
1590 ++
1591 ++
1592 ++/*********************************************************************
1593 + *
1594 +- * OS independent entropy store. Here are the functions which handle
1595 +- * storing entropy in an entropy pool.
1596 ++ * Fast key erasure RNG, the "crng".
1597 + *
1598 +- **********************************************************************/
1599 ++ * These functions expand entropy from the entropy extractor into
1600 ++ * long streams for external consumption using the "fast key erasure"
1601 ++ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
1602 ++ *
1603 ++ * There are a few exported interfaces for use by other drivers:
1604 ++ *
1605 ++ * void get_random_bytes(void *buf, size_t len)
1606 ++ * u32 get_random_u32()
1607 ++ * u64 get_random_u64()
1608 ++ * unsigned int get_random_int()
1609 ++ * unsigned long get_random_long()
1610 ++ *
1611 ++ * These interfaces will return the requested number of random bytes
1612 ++ * into the given buffer or as a return value. This is equivalent to
1613 ++ * a read from /dev/urandom. The u32, u64, int, and long family of
1614 ++ * functions may be higher performance for one-off random integers,
1615 ++ * because they do a bit of buffering and do not invoke reseeding
1616 ++ * until the buffer is emptied.
1617 ++ *
1618 ++ *********************************************************************/
1619 +
1620 +-struct entropy_store;
1621 +-struct entropy_store {
1622 +- /* read-only data: */
1623 +- const struct poolinfo *poolinfo;
1624 +- __u32 *pool;
1625 +- const char *name;
1626 ++enum {
1627 ++ CRNG_RESEED_START_INTERVAL = HZ,
1628 ++ CRNG_RESEED_INTERVAL = 60 * HZ
1629 ++};
1630 +
1631 +- /* read-write data: */
1632 ++static struct {
1633 ++ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
1634 ++ unsigned long birth;
1635 ++ unsigned long generation;
1636 + spinlock_t lock;
1637 +- unsigned short add_ptr;
1638 +- unsigned short input_rotate;
1639 +- int entropy_count;
1640 +- unsigned int last_data_init:1;
1641 +- __u8 last_data[EXTRACT_SIZE];
1642 ++} base_crng = {
1643 ++ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
1644 + };
1645 +
1646 +-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1647 +- size_t nbytes, int min, int rsvd);
1648 +-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1649 +- size_t nbytes, int fips);
1650 +-
1651 +-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
1652 +-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
1653 +-
1654 +-static struct entropy_store input_pool = {
1655 +- .poolinfo = &poolinfo_table[0],
1656 +- .name = "input",
1657 +- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
1658 +- .pool = input_pool_data
1659 ++struct crng {
1660 ++ u8 key[CHACHA_KEY_SIZE];
1661 ++ unsigned long generation;
1662 ++ local_lock_t lock;
1663 + };
1664 +
1665 +-static __u32 const twist_table[8] = {
1666 +- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
1667 +- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
1668 +-
1669 +-/*
1670 +- * This function adds bytes into the entropy "pool". It does not
1671 +- * update the entropy estimate. The caller should call
1672 +- * credit_entropy_bits if this is appropriate.
1673 +- *
1674 +- * The pool is stirred with a primitive polynomial of the appropriate
1675 +- * degree, and then twisted. We twist by three bits at a time because
1676 +- * it's cheap to do so and helps slightly in the expected case where
1677 +- * the entropy is concentrated in the low-order bits.
1678 +- */
1679 +-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
1680 +- int nbytes)
1681 +-{
1682 +- unsigned long i, tap1, tap2, tap3, tap4, tap5;
1683 +- int input_rotate;
1684 +- int wordmask = r->poolinfo->poolwords - 1;
1685 +- const char *bytes = in;
1686 +- __u32 w;
1687 +-
1688 +- tap1 = r->poolinfo->tap1;
1689 +- tap2 = r->poolinfo->tap2;
1690 +- tap3 = r->poolinfo->tap3;
1691 +- tap4 = r->poolinfo->tap4;
1692 +- tap5 = r->poolinfo->tap5;
1693 +-
1694 +- input_rotate = r->input_rotate;
1695 +- i = r->add_ptr;
1696 +-
1697 +- /* mix one byte at a time to simplify size handling and churn faster */
1698 +- while (nbytes--) {
1699 +- w = rol32(*bytes++, input_rotate);
1700 +- i = (i - 1) & wordmask;
1701 +-
1702 +- /* XOR in the various taps */
1703 +- w ^= r->pool[i];
1704 +- w ^= r->pool[(i + tap1) & wordmask];
1705 +- w ^= r->pool[(i + tap2) & wordmask];
1706 +- w ^= r->pool[(i + tap3) & wordmask];
1707 +- w ^= r->pool[(i + tap4) & wordmask];
1708 +- w ^= r->pool[(i + tap5) & wordmask];
1709 +-
1710 +- /* Mix the result back in with a twist */
1711 +- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
1712 +-
1713 +- /*
1714 +- * Normally, we add 7 bits of rotation to the pool.
1715 +- * At the beginning of the pool, add an extra 7 bits
1716 +- * rotation, so that successive passes spread the
1717 +- * input bits across the pool evenly.
1718 +- */
1719 +- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
1720 +- }
1721 +-
1722 +- r->input_rotate = input_rotate;
1723 +- r->add_ptr = i;
1724 +-}
1725 ++static DEFINE_PER_CPU(struct crng, crngs) = {
1726 ++ .generation = ULONG_MAX,
1727 ++ .lock = INIT_LOCAL_LOCK(crngs.lock),
1728 ++};
1729 +
1730 +-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
1731 +- int nbytes)
1732 +-{
1733 +- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
1734 +- _mix_pool_bytes(r, in, nbytes);
1735 +-}
1736 ++/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
1737 ++static void extract_entropy(void *buf, size_t len);
1738 +
1739 +-static void mix_pool_bytes(struct entropy_store *r, const void *in,
1740 +- int nbytes)
1741 ++/* This extracts a new crng key from the input pool. */
1742 ++static void crng_reseed(void)
1743 + {
1744 + unsigned long flags;
1745 ++ unsigned long next_gen;
1746 ++ u8 key[CHACHA_KEY_SIZE];
1747 +
1748 +- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
1749 +- spin_lock_irqsave(&r->lock, flags);
1750 +- _mix_pool_bytes(r, in, nbytes);
1751 +- spin_unlock_irqrestore(&r->lock, flags);
1752 +-}
1753 ++ extract_entropy(key, sizeof(key));
1754 +
1755 +-struct fast_pool {
1756 +- __u32 pool[4];
1757 +- unsigned long last;
1758 +- unsigned short reg_idx;
1759 +- unsigned char count;
1760 +-};
1761 ++ /*
1762 ++ * We copy the new key into the base_crng, overwriting the old one,
1763 ++ * and update the generation counter. We avoid hitting ULONG_MAX,
1764 ++ * because the per-cpu crngs are initialized to ULONG_MAX, so this
1765 ++ * forces new CPUs that come online to always initialize.
1766 ++ */
1767 ++ spin_lock_irqsave(&base_crng.lock, flags);
1768 ++ memcpy(base_crng.key, key, sizeof(base_crng.key));
1769 ++ next_gen = base_crng.generation + 1;
1770 ++ if (next_gen == ULONG_MAX)
1771 ++ ++next_gen;
1772 ++ WRITE_ONCE(base_crng.generation, next_gen);
1773 ++ WRITE_ONCE(base_crng.birth, jiffies);
1774 ++ if (!static_branch_likely(&crng_is_ready))
1775 ++ crng_init = CRNG_READY;
1776 ++ spin_unlock_irqrestore(&base_crng.lock, flags);
1777 ++ memzero_explicit(key, sizeof(key));
1778 ++}
1779 +
1780 + /*
1781 +- * This is a fast mixing routine used by the interrupt randomness
1782 +- * collector. It's hardcoded for an 128 bit pool and assumes that any
1783 +- * locks that might be needed are taken by the caller.
1784 ++ * This generates a ChaCha block using the provided key, and then
1785 ++ * immediately overwites that key with half the block. It returns
1786 ++ * the resultant ChaCha state to the user, along with the second
1787 ++ * half of the block containing 32 bytes of random data that may
1788 ++ * be used; random_data_len may not be greater than 32.
1789 ++ *
1790 ++ * The returned ChaCha state contains within it a copy of the old
1791 ++ * key value, at index 4, so the state should always be zeroed out
1792 ++ * immediately after using in order to maintain forward secrecy.
1793 ++ * If the state cannot be erased in a timely manner, then it is
1794 ++ * safer to set the random_data parameter to &chacha_state[4] so
1795 ++ * that this function overwrites it before returning.
1796 + */
1797 +-static void fast_mix(struct fast_pool *f)
1798 ++static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
1799 ++ u32 chacha_state[CHACHA_STATE_WORDS],
1800 ++ u8 *random_data, size_t random_data_len)
1801 + {
1802 +- __u32 a = f->pool[0], b = f->pool[1];
1803 +- __u32 c = f->pool[2], d = f->pool[3];
1804 +-
1805 +- a += b; c += d;
1806 +- b = rol32(b, 6); d = rol32(d, 27);
1807 +- d ^= a; b ^= c;
1808 ++ u8 first_block[CHACHA_BLOCK_SIZE];
1809 +
1810 +- a += b; c += d;
1811 +- b = rol32(b, 16); d = rol32(d, 14);
1812 +- d ^= a; b ^= c;
1813 ++ BUG_ON(random_data_len > 32);
1814 +
1815 +- a += b; c += d;
1816 +- b = rol32(b, 6); d = rol32(d, 27);
1817 +- d ^= a; b ^= c;
1818 ++ chacha_init_consts(chacha_state);
1819 ++ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
1820 ++ memset(&chacha_state[12], 0, sizeof(u32) * 4);
1821 ++ chacha20_block(chacha_state, first_block);
1822 +
1823 +- a += b; c += d;
1824 +- b = rol32(b, 16); d = rol32(d, 14);
1825 +- d ^= a; b ^= c;
1826 +-
1827 +- f->pool[0] = a; f->pool[1] = b;
1828 +- f->pool[2] = c; f->pool[3] = d;
1829 +- f->count++;
1830 ++ memcpy(key, first_block, CHACHA_KEY_SIZE);
1831 ++ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
1832 ++ memzero_explicit(first_block, sizeof(first_block));
1833 + }
1834 +
1835 +-static void process_random_ready_list(void)
1836 +-{
1837 +- unsigned long flags;
1838 +- struct random_ready_callback *rdy, *tmp;
1839 +-
1840 +- spin_lock_irqsave(&random_ready_list_lock, flags);
1841 +- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
1842 +- struct module *owner = rdy->owner;
1843 +-
1844 +- list_del_init(&rdy->list);
1845 +- rdy->func(rdy);
1846 +- module_put(owner);
1847 ++/*
1848 ++ * Return whether the crng seed is considered to be sufficiently old
1849 ++ * that a reseeding is needed. This happens if the last reseeding
1850 ++ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
1851 ++ * proportional to the uptime.
1852 ++ */
1853 ++static bool crng_has_old_seed(void)
1854 ++{
1855 ++ static bool early_boot = true;
1856 ++ unsigned long interval = CRNG_RESEED_INTERVAL;
1857 ++
1858 ++ if (unlikely(READ_ONCE(early_boot))) {
1859 ++ time64_t uptime = ktime_get_seconds();
1860 ++ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
1861 ++ WRITE_ONCE(early_boot, false);
1862 ++ else
1863 ++ interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
1864 ++ (unsigned int)uptime / 2 * HZ);
1865 + }
1866 +- spin_unlock_irqrestore(&random_ready_list_lock, flags);
1867 ++ return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
1868 + }
1869 +
1870 + /*
1871 +- * Credit (or debit) the entropy store with n bits of entropy.
1872 +- * Use credit_entropy_bits_safe() if the value comes from userspace
1873 +- * or otherwise should be checked for extreme values.
1874 ++ * This function returns a ChaCha state that you may use for generating
1875 ++ * random data. It also returns up to 32 bytes on its own of random data
1876 ++ * that may be used; random_data_len may not be greater than 32.
1877 + */
1878 +-static void credit_entropy_bits(struct entropy_store *r, int nbits)
1879 ++static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
1880 ++ u8 *random_data, size_t random_data_len)
1881 + {
1882 +- int entropy_count, orig;
1883 +- const int pool_size = r->poolinfo->poolfracbits;
1884 +- int nfrac = nbits << ENTROPY_SHIFT;
1885 ++ unsigned long flags;
1886 ++ struct crng *crng;
1887 +
1888 +- if (!nbits)
1889 +- return;
1890 ++ BUG_ON(random_data_len > 32);
1891 +
1892 +-retry:
1893 +- entropy_count = orig = READ_ONCE(r->entropy_count);
1894 +- if (nfrac < 0) {
1895 +- /* Debit */
1896 +- entropy_count += nfrac;
1897 +- } else {
1898 +- /*
1899 +- * Credit: we have to account for the possibility of
1900 +- * overwriting already present entropy. Even in the
1901 +- * ideal case of pure Shannon entropy, new contributions
1902 +- * approach the full value asymptotically:
1903 +- *
1904 +- * entropy <- entropy + (pool_size - entropy) *
1905 +- * (1 - exp(-add_entropy/pool_size))
1906 +- *
1907 +- * For add_entropy <= pool_size/2 then
1908 +- * (1 - exp(-add_entropy/pool_size)) >=
1909 +- * (add_entropy/pool_size)*0.7869...
1910 +- * so we can approximate the exponential with
1911 +- * 3/4*add_entropy/pool_size and still be on the
1912 +- * safe side by adding at most pool_size/2 at a time.
1913 +- *
1914 +- * The use of pool_size-2 in the while statement is to
1915 +- * prevent rounding artifacts from making the loop
1916 +- * arbitrarily long; this limits the loop to log2(pool_size)*2
1917 +- * turns no matter how large nbits is.
1918 +- */
1919 +- int pnfrac = nfrac;
1920 +- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
1921 +- /* The +2 corresponds to the /4 in the denominator */
1922 +-
1923 +- do {
1924 +- unsigned int anfrac = min(pnfrac, pool_size/2);
1925 +- unsigned int add =
1926 +- ((pool_size - entropy_count)*anfrac*3) >> s;
1927 +-
1928 +- entropy_count += add;
1929 +- pnfrac -= anfrac;
1930 +- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
1931 ++ /*
1932 ++ * For the fast path, we check whether we're ready, unlocked first, and
1933 ++ * then re-check once locked later. In the case where we're really not
1934 ++ * ready, we do fast key erasure with the base_crng directly, extracting
1935 ++ * when crng_init is CRNG_EMPTY.
1936 ++ */
1937 ++ if (!crng_ready()) {
1938 ++ bool ready;
1939 ++
1940 ++ spin_lock_irqsave(&base_crng.lock, flags);
1941 ++ ready = crng_ready();
1942 ++ if (!ready) {
1943 ++ if (crng_init == CRNG_EMPTY)
1944 ++ extract_entropy(base_crng.key, sizeof(base_crng.key));
1945 ++ crng_fast_key_erasure(base_crng.key, chacha_state,
1946 ++ random_data, random_data_len);
1947 ++ }
1948 ++ spin_unlock_irqrestore(&base_crng.lock, flags);
1949 ++ if (!ready)
1950 ++ return;
1951 + }
1952 +
1953 +- if (WARN_ON(entropy_count < 0)) {
1954 +- pr_warn("negative entropy/overflow: pool %s count %d\n",
1955 +- r->name, entropy_count);
1956 +- entropy_count = 0;
1957 +- } else if (entropy_count > pool_size)
1958 +- entropy_count = pool_size;
1959 +- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1960 +- goto retry;
1961 +-
1962 +- trace_credit_entropy_bits(r->name, nbits,
1963 +- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
1964 ++ /*
1965 ++ * If the base_crng is old enough, we reseed, which in turn bumps the
1966 ++ * generation counter that we check below.
1967 ++ */
1968 ++ if (unlikely(crng_has_old_seed()))
1969 ++ crng_reseed();
1970 +
1971 +- if (r == &input_pool) {
1972 +- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
1973 ++ local_lock_irqsave(&crngs.lock, flags);
1974 ++ crng = raw_cpu_ptr(&crngs);
1975 +
1976 +- if (crng_init < 2 && entropy_bits >= 128)
1977 +- crng_reseed(&primary_crng, r);
1978 ++ /*
1979 ++ * If our per-cpu crng is older than the base_crng, then it means
1980 ++ * somebody reseeded the base_crng. In that case, we do fast key
1981 ++ * erasure on the base_crng, and use its output as the new key
1982 ++ * for our per-cpu crng. This brings us up to date with base_crng.
1983 ++ */
1984 ++ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
1985 ++ spin_lock(&base_crng.lock);
1986 ++ crng_fast_key_erasure(base_crng.key, chacha_state,
1987 ++ crng->key, sizeof(crng->key));
1988 ++ crng->generation = base_crng.generation;
1989 ++ spin_unlock(&base_crng.lock);
1990 + }
1991 ++
1992 ++ /*
1993 ++ * Finally, when we've made it this far, our per-cpu crng has an up
1994 ++ * to date key, and we can do fast key erasure with it to produce
1995 ++ * some random data and a ChaCha state for the caller. All other
1996 ++ * branches of this function are "unlikely", so most of the time we
1997 ++ * should wind up here immediately.
1998 ++ */
1999 ++ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
2000 ++ local_unlock_irqrestore(&crngs.lock, flags);
2001 + }
2002 +
2003 +-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
2004 ++static void _get_random_bytes(void *buf, size_t len)
2005 + {
2006 +- const int nbits_max = r->poolinfo->poolwords * 32;
2007 +-
2008 +- if (nbits < 0)
2009 +- return -EINVAL;
2010 ++ u32 chacha_state[CHACHA_STATE_WORDS];
2011 ++ u8 tmp[CHACHA_BLOCK_SIZE];
2012 ++ size_t first_block_len;
2013 +
2014 +- /* Cap the value to avoid overflows */
2015 +- nbits = min(nbits, nbits_max);
2016 ++ if (!len)
2017 ++ return;
2018 +
2019 +- credit_entropy_bits(r, nbits);
2020 +- return 0;
2021 +-}
2022 ++ first_block_len = min_t(size_t, 32, len);
2023 ++ crng_make_state(chacha_state, buf, first_block_len);
2024 ++ len -= first_block_len;
2025 ++ buf += first_block_len;
2026 +
2027 +-/*********************************************************************
2028 +- *
2029 +- * CRNG using CHACHA20
2030 +- *
2031 +- *********************************************************************/
2032 ++ while (len) {
2033 ++ if (len < CHACHA_BLOCK_SIZE) {
2034 ++ chacha20_block(chacha_state, tmp);
2035 ++ memcpy(buf, tmp, len);
2036 ++ memzero_explicit(tmp, sizeof(tmp));
2037 ++ break;
2038 ++ }
2039 +
2040 +-#define CRNG_RESEED_INTERVAL (300*HZ)
2041 ++ chacha20_block(chacha_state, buf);
2042 ++ if (unlikely(chacha_state[12] == 0))
2043 ++ ++chacha_state[13];
2044 ++ len -= CHACHA_BLOCK_SIZE;
2045 ++ buf += CHACHA_BLOCK_SIZE;
2046 ++ }
2047 +
2048 +-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
2049 ++ memzero_explicit(chacha_state, sizeof(chacha_state));
2050 ++}
2051 +
2052 +-#ifdef CONFIG_NUMA
2053 + /*
2054 +- * Hack to deal with crazy userspace progams when they are all trying
2055 +- * to access /dev/urandom in parallel. The programs are almost
2056 +- * certainly doing something terribly wrong, but we'll work around
2057 +- * their brain damage.
2058 ++ * This function is the exported kernel interface. It returns some
2059 ++ * number of good random numbers, suitable for key generation, seeding
2060 ++ * TCP sequence numbers, etc. It does not rely on the hardware random
2061 ++ * number generator. For random bytes direct from the hardware RNG
2062 ++ * (when available), use get_random_bytes_arch(). In order to ensure
2063 ++ * that the randomness provided by this function is okay, the function
2064 ++ * wait_for_random_bytes() should be called and return 0 at least once
2065 ++ * at any point prior.
2066 + */
2067 +-static struct crng_state **crng_node_pool __read_mostly;
2068 +-#endif
2069 +-
2070 +-static void invalidate_batched_entropy(void);
2071 +-static void numa_crng_init(void);
2072 +-
2073 +-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
2074 +-static int __init parse_trust_cpu(char *arg)
2075 ++void get_random_bytes(void *buf, size_t len)
2076 + {
2077 +- return kstrtobool(arg, &trust_cpu);
2078 ++ warn_unseeded_randomness();
2079 ++ _get_random_bytes(buf, len);
2080 + }
2081 +-early_param("random.trust_cpu", parse_trust_cpu);
2082 ++EXPORT_SYMBOL(get_random_bytes);
2083 +
2084 +-static bool crng_init_try_arch(struct crng_state *crng)
2085 ++static ssize_t get_random_bytes_user(struct iov_iter *iter)
2086 + {
2087 +- int i;
2088 +- bool arch_init = true;
2089 +- unsigned long rv;
2090 +-
2091 +- for (i = 4; i < 16; i++) {
2092 +- if (!arch_get_random_seed_long(&rv) &&
2093 +- !arch_get_random_long(&rv)) {
2094 +- rv = random_get_entropy();
2095 +- arch_init = false;
2096 +- }
2097 +- crng->state[i] ^= rv;
2098 ++ u32 chacha_state[CHACHA_STATE_WORDS];
2099 ++ u8 block[CHACHA_BLOCK_SIZE];
2100 ++ size_t ret = 0, copied;
2101 ++
2102 ++ if (unlikely(!iov_iter_count(iter)))
2103 ++ return 0;
2104 ++
2105 ++ /*
2106 ++ * Immediately overwrite the ChaCha key at index 4 with random
2107 ++ * bytes, in case userspace causes copy_to_user() below to sleep
2108 ++ * forever, so that we still retain forward secrecy in that case.
2109 ++ */
2110 ++ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
2111 ++ /*
2112 ++ * However, if we're doing a read of len <= 32, we don't need to
2113 ++ * use chacha_state after, so we can simply return those bytes to
2114 ++ * the user directly.
2115 ++ */
2116 ++ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
2117 ++ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
2118 ++ goto out_zero_chacha;
2119 + }
2120 +
2121 +- return arch_init;
2122 +-}
2123 ++ for (;;) {
2124 ++ chacha20_block(chacha_state, block);
2125 ++ if (unlikely(chacha_state[12] == 0))
2126 ++ ++chacha_state[13];
2127 +
2128 +-static bool __init crng_init_try_arch_early(struct crng_state *crng)
2129 +-{
2130 +- int i;
2131 +- bool arch_init = true;
2132 +- unsigned long rv;
2133 +-
2134 +- for (i = 4; i < 16; i++) {
2135 +- if (!arch_get_random_seed_long_early(&rv) &&
2136 +- !arch_get_random_long_early(&rv)) {
2137 +- rv = random_get_entropy();
2138 +- arch_init = false;
2139 ++ copied = copy_to_iter(block, sizeof(block), iter);
2140 ++ ret += copied;
2141 ++ if (!iov_iter_count(iter) || copied != sizeof(block))
2142 ++ break;
2143 ++
2144 ++ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
2145 ++ if (ret % PAGE_SIZE == 0) {
2146 ++ if (signal_pending(current))
2147 ++ break;
2148 ++ cond_resched();
2149 + }
2150 +- crng->state[i] ^= rv;
2151 + }
2152 +
2153 +- return arch_init;
2154 ++ memzero_explicit(block, sizeof(block));
2155 ++out_zero_chacha:
2156 ++ memzero_explicit(chacha_state, sizeof(chacha_state));
2157 ++ return ret ? ret : -EFAULT;
2158 + }
2159 +
2160 +-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
2161 +-{
2162 +- chacha_init_consts(crng->state);
2163 +- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
2164 +- crng_init_try_arch(crng);
2165 +- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
2166 +-}
2167 ++/*
2168 ++ * Batched entropy returns random integers. The quality of the random
2169 ++ * number is good as /dev/urandom. In order to ensure that the randomness
2170 ++ * provided by this function is okay, the function wait_for_random_bytes()
2171 ++ * should be called and return 0 at least once at any point prior.
2172 ++ */
2173 +
2174 +-static void __init crng_initialize_primary(struct crng_state *crng)
2175 ++#define DEFINE_BATCHED_ENTROPY(type) \
2176 ++struct batch_ ##type { \
2177 ++ /* \
2178 ++ * We make this 1.5x a ChaCha block, so that we get the \
2179 ++ * remaining 32 bytes from fast key erasure, plus one full \
2180 ++ * block from the detached ChaCha state. We can increase \
2181 ++ * the size of this later if needed so long as we keep the \
2182 ++ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
2183 ++ */ \
2184 ++ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
2185 ++ local_lock_t lock; \
2186 ++ unsigned long generation; \
2187 ++ unsigned int position; \
2188 ++}; \
2189 ++ \
2190 ++static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
2191 ++ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
2192 ++ .position = UINT_MAX \
2193 ++}; \
2194 ++ \
2195 ++type get_random_ ##type(void) \
2196 ++{ \
2197 ++ type ret; \
2198 ++ unsigned long flags; \
2199 ++ struct batch_ ##type *batch; \
2200 ++ unsigned long next_gen; \
2201 ++ \
2202 ++ warn_unseeded_randomness(); \
2203 ++ \
2204 ++ if (!crng_ready()) { \
2205 ++ _get_random_bytes(&ret, sizeof(ret)); \
2206 ++ return ret; \
2207 ++ } \
2208 ++ \
2209 ++ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
2210 ++ batch = raw_cpu_ptr(&batched_entropy_##type); \
2211 ++ \
2212 ++ next_gen = READ_ONCE(base_crng.generation); \
2213 ++ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
2214 ++ next_gen != batch->generation) { \
2215 ++ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
2216 ++ batch->position = 0; \
2217 ++ batch->generation = next_gen; \
2218 ++ } \
2219 ++ \
2220 ++ ret = batch->entropy[batch->position]; \
2221 ++ batch->entropy[batch->position] = 0; \
2222 ++ ++batch->position; \
2223 ++ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
2224 ++ return ret; \
2225 ++} \
2226 ++EXPORT_SYMBOL(get_random_ ##type);
2227 ++
2228 ++DEFINE_BATCHED_ENTROPY(u64)
2229 ++DEFINE_BATCHED_ENTROPY(u32)
2230 ++
2231 ++#ifdef CONFIG_SMP
2232 ++/*
2233 ++ * This function is called when the CPU is coming up, with entry
2234 ++ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
2235 ++ */
2236 ++int __cold random_prepare_cpu(unsigned int cpu)
2237 + {
2238 +- chacha_init_consts(crng->state);
2239 +- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
2240 +- if (crng_init_try_arch_early(crng) && trust_cpu) {
2241 +- invalidate_batched_entropy();
2242 +- numa_crng_init();
2243 +- crng_init = 2;
2244 +- pr_notice("crng done (trusting CPU's manufacturer)\n");
2245 +- }
2246 +- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
2247 +-}
2248 +-
2249 +-static void crng_finalize_init(struct crng_state *crng)
2250 +-{
2251 +- if (crng != &primary_crng || crng_init >= 2)
2252 +- return;
2253 +- if (!system_wq) {
2254 +- /* We can't call numa_crng_init until we have workqueues,
2255 +- * so mark this for processing later. */
2256 +- crng_need_final_init = true;
2257 +- return;
2258 +- }
2259 +-
2260 +- invalidate_batched_entropy();
2261 +- numa_crng_init();
2262 +- crng_init = 2;
2263 +- process_random_ready_list();
2264 +- wake_up_interruptible(&crng_init_wait);
2265 +- kill_fasync(&fasync, SIGIO, POLL_IN);
2266 +- pr_notice("crng init done\n");
2267 +- if (unseeded_warning.missed) {
2268 +- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
2269 +- unseeded_warning.missed);
2270 +- unseeded_warning.missed = 0;
2271 +- }
2272 +- if (urandom_warning.missed) {
2273 +- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
2274 +- urandom_warning.missed);
2275 +- urandom_warning.missed = 0;
2276 +- }
2277 +-}
2278 +-
2279 +-#ifdef CONFIG_NUMA
2280 +-static void do_numa_crng_init(struct work_struct *work)
2281 +-{
2282 +- int i;
2283 +- struct crng_state *crng;
2284 +- struct crng_state **pool;
2285 +-
2286 +- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
2287 +- for_each_online_node(i) {
2288 +- crng = kmalloc_node(sizeof(struct crng_state),
2289 +- GFP_KERNEL | __GFP_NOFAIL, i);
2290 +- spin_lock_init(&crng->lock);
2291 +- crng_initialize_secondary(crng);
2292 +- pool[i] = crng;
2293 +- }
2294 +- /* pairs with READ_ONCE() in select_crng() */
2295 +- if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
2296 +- for_each_node(i)
2297 +- kfree(pool[i]);
2298 +- kfree(pool);
2299 +- }
2300 +-}
2301 +-
2302 +-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
2303 +-
2304 +-static void numa_crng_init(void)
2305 +-{
2306 +- schedule_work(&numa_crng_init_work);
2307 +-}
2308 +-
2309 +-static struct crng_state *select_crng(void)
2310 +-{
2311 +- struct crng_state **pool;
2312 +- int nid = numa_node_id();
2313 +-
2314 +- /* pairs with cmpxchg_release() in do_numa_crng_init() */
2315 +- pool = READ_ONCE(crng_node_pool);
2316 +- if (pool && pool[nid])
2317 +- return pool[nid];
2318 +-
2319 +- return &primary_crng;
2320 +-}
2321 +-#else
2322 +-static void numa_crng_init(void) {}
2323 +-
2324 +-static struct crng_state *select_crng(void)
2325 +-{
2326 +- return &primary_crng;
2327 ++ /*
2328 ++ * When the cpu comes back online, immediately invalidate both
2329 ++ * the per-cpu crng and all batches, so that we serve fresh
2330 ++ * randomness.
2331 ++ */
2332 ++ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
2333 ++ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
2334 ++ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
2335 ++ return 0;
2336 + }
2337 + #endif
2338 +
2339 + /*
2340 +- * crng_fast_load() can be called by code in the interrupt service
2341 +- * path. So we can't afford to dilly-dally. Returns the number of
2342 +- * bytes processed from cp.
2343 +- */
2344 +-static size_t crng_fast_load(const char *cp, size_t len)
2345 +-{
2346 +- unsigned long flags;
2347 +- char *p;
2348 +- size_t ret = 0;
2349 +-
2350 +- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
2351 +- return 0;
2352 +- if (crng_init != 0) {
2353 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2354 +- return 0;
2355 +- }
2356 +- p = (unsigned char *) &primary_crng.state[4];
2357 +- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
2358 +- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
2359 +- cp++; crng_init_cnt++; len--; ret++;
2360 +- }
2361 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2362 +- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
2363 +- invalidate_batched_entropy();
2364 +- crng_init = 1;
2365 +- pr_notice("fast init done\n");
2366 +- }
2367 +- return ret;
2368 +-}
2369 +-
2370 +-/*
2371 +- * crng_slow_load() is called by add_device_randomness, which has two
2372 +- * attributes. (1) We can't trust the buffer passed to it is
2373 +- * guaranteed to be unpredictable (so it might not have any entropy at
2374 +- * all), and (2) it doesn't have the performance constraints of
2375 +- * crng_fast_load().
2376 +- *
2377 +- * So we do something more comprehensive which is guaranteed to touch
2378 +- * all of the primary_crng's state, and which uses a LFSR with a
2379 +- * period of 255 as part of the mixing algorithm. Finally, we do
2380 +- * *not* advance crng_init_cnt since buffer we may get may be something
2381 +- * like a fixed DMI table (for example), which might very well be
2382 +- * unique to the machine, but is otherwise unvarying.
2383 +- */
2384 +-static int crng_slow_load(const char *cp, size_t len)
2385 +-{
2386 +- unsigned long flags;
2387 +- static unsigned char lfsr = 1;
2388 +- unsigned char tmp;
2389 +- unsigned i, max = CHACHA_KEY_SIZE;
2390 +- const char * src_buf = cp;
2391 +- char * dest_buf = (char *) &primary_crng.state[4];
2392 +-
2393 +- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
2394 +- return 0;
2395 +- if (crng_init != 0) {
2396 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2397 +- return 0;
2398 +- }
2399 +- if (len > max)
2400 +- max = len;
2401 +-
2402 +- for (i = 0; i < max ; i++) {
2403 +- tmp = lfsr;
2404 +- lfsr >>= 1;
2405 +- if (tmp & 1)
2406 +- lfsr ^= 0xE1;
2407 +- tmp = dest_buf[i % CHACHA_KEY_SIZE];
2408 +- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
2409 +- lfsr += (tmp << 3) | (tmp >> 5);
2410 +- }
2411 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
2412 +- return 1;
2413 +-}
2414 +-
2415 +-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
2416 +-{
2417 +- unsigned long flags;
2418 +- int i, num;
2419 +- union {
2420 +- __u8 block[CHACHA_BLOCK_SIZE];
2421 +- __u32 key[8];
2422 +- } buf;
2423 +-
2424 +- if (r) {
2425 +- num = extract_entropy(r, &buf, 32, 16, 0);
2426 +- if (num == 0)
2427 +- return;
2428 +- } else {
2429 +- _extract_crng(&primary_crng, buf.block);
2430 +- _crng_backtrack_protect(&primary_crng, buf.block,
2431 +- CHACHA_KEY_SIZE);
2432 +- }
2433 +- spin_lock_irqsave(&crng->lock, flags);
2434 +- for (i = 0; i < 8; i++) {
2435 +- unsigned long rv;
2436 +- if (!arch_get_random_seed_long(&rv) &&
2437 +- !arch_get_random_long(&rv))
2438 +- rv = random_get_entropy();
2439 +- crng->state[i+4] ^= buf.key[i] ^ rv;
2440 +- }
2441 +- memzero_explicit(&buf, sizeof(buf));
2442 +- WRITE_ONCE(crng->init_time, jiffies);
2443 +- spin_unlock_irqrestore(&crng->lock, flags);
2444 +- crng_finalize_init(crng);
2445 +-}
2446 +-
2447 +-static void _extract_crng(struct crng_state *crng,
2448 +- __u8 out[CHACHA_BLOCK_SIZE])
2449 +-{
2450 +- unsigned long v, flags, init_time;
2451 +-
2452 +- if (crng_ready()) {
2453 +- init_time = READ_ONCE(crng->init_time);
2454 +- if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
2455 +- time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
2456 +- crng_reseed(crng, crng == &primary_crng ?
2457 +- &input_pool : NULL);
2458 +- }
2459 +- spin_lock_irqsave(&crng->lock, flags);
2460 +- if (arch_get_random_long(&v))
2461 +- crng->state[14] ^= v;
2462 +- chacha20_block(&crng->state[0], out);
2463 +- if (crng->state[12] == 0)
2464 +- crng->state[13]++;
2465 +- spin_unlock_irqrestore(&crng->lock, flags);
2466 +-}
2467 +-
2468 +-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
2469 +-{
2470 +- _extract_crng(select_crng(), out);
2471 +-}
2472 +-
2473 +-/*
2474 +- * Use the leftover bytes from the CRNG block output (if there is
2475 +- * enough) to mutate the CRNG key to provide backtracking protection.
2476 ++ * This function will use the architecture-specific hardware random
2477 ++ * number generator if it is available. It is not recommended for
2478 ++ * use. Use get_random_bytes() instead. It returns the number of
2479 ++ * bytes filled in.
2480 + */
2481 +-static void _crng_backtrack_protect(struct crng_state *crng,
2482 +- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
2483 ++size_t __must_check get_random_bytes_arch(void *buf, size_t len)
2484 + {
2485 +- unsigned long flags;
2486 +- __u32 *s, *d;
2487 +- int i;
2488 +-
2489 +- used = round_up(used, sizeof(__u32));
2490 +- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
2491 +- extract_crng(tmp);
2492 +- used = 0;
2493 +- }
2494 +- spin_lock_irqsave(&crng->lock, flags);
2495 +- s = (__u32 *) &tmp[used];
2496 +- d = &crng->state[4];
2497 +- for (i=0; i < 8; i++)
2498 +- *d++ ^= *s++;
2499 +- spin_unlock_irqrestore(&crng->lock, flags);
2500 +-}
2501 ++ size_t left = len;
2502 ++ u8 *p = buf;
2503 +
2504 +-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
2505 +-{
2506 +- _crng_backtrack_protect(select_crng(), tmp, used);
2507 +-}
2508 +-
2509 +-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
2510 +-{
2511 +- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
2512 +- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
2513 +- int large_request = (nbytes > 256);
2514 +-
2515 +- while (nbytes) {
2516 +- if (large_request && need_resched()) {
2517 +- if (signal_pending(current)) {
2518 +- if (ret == 0)
2519 +- ret = -ERESTARTSYS;
2520 +- break;
2521 +- }
2522 +- schedule();
2523 +- }
2524 ++ while (left) {
2525 ++ unsigned long v;
2526 ++ size_t block_len = min_t(size_t, left, sizeof(unsigned long));
2527 +
2528 +- extract_crng(tmp);
2529 +- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
2530 +- if (copy_to_user(buf, tmp, i)) {
2531 +- ret = -EFAULT;
2532 ++ if (!arch_get_random_long(&v))
2533 + break;
2534 +- }
2535 +
2536 +- nbytes -= i;
2537 +- buf += i;
2538 +- ret += i;
2539 ++ memcpy(p, &v, block_len);
2540 ++ p += block_len;
2541 ++ left -= block_len;
2542 + }
2543 +- crng_backtrack_protect(tmp, i);
2544 +-
2545 +- /* Wipe data just written to memory */
2546 +- memzero_explicit(tmp, sizeof(tmp));
2547 +
2548 +- return ret;
2549 ++ return len - left;
2550 + }
2551 ++EXPORT_SYMBOL(get_random_bytes_arch);
2552 +
2553 +
2554 +-/*********************************************************************
2555 ++/**********************************************************************
2556 + *
2557 +- * Entropy input management
2558 ++ * Entropy accumulation and extraction routines.
2559 + *
2560 +- *********************************************************************/
2561 ++ * Callers may add entropy via:
2562 ++ *
2563 ++ * static void mix_pool_bytes(const void *buf, size_t len)
2564 ++ *
2565 ++ * After which, if added entropy should be credited:
2566 ++ *
2567 ++ * static void credit_init_bits(size_t bits)
2568 ++ *
2569 ++ * Finally, extract entropy via:
2570 ++ *
2571 ++ * static void extract_entropy(void *buf, size_t len)
2572 ++ *
2573 ++ **********************************************************************/
2574 +
2575 +-/* There is one of these per entropy source */
2576 +-struct timer_rand_state {
2577 +- cycles_t last_time;
2578 +- long last_delta, last_delta2;
2579 ++enum {
2580 ++ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
2581 ++ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
2582 ++ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
2583 ++};
2584 ++
2585 ++static struct {
2586 ++ struct blake2s_state hash;
2587 ++ spinlock_t lock;
2588 ++ unsigned int init_bits;
2589 ++} input_pool = {
2590 ++ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
2591 ++ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
2592 ++ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
2593 ++ .hash.outlen = BLAKE2S_HASH_SIZE,
2594 ++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
2595 + };
2596 +
2597 +-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
2598 ++static void _mix_pool_bytes(const void *buf, size_t len)
2599 ++{
2600 ++ blake2s_update(&input_pool.hash, buf, len);
2601 ++}
2602 +
2603 + /*
2604 +- * Add device- or boot-specific data to the input pool to help
2605 +- * initialize it.
2606 +- *
2607 +- * None of this adds any entropy; it is meant to avoid the problem of
2608 +- * the entropy pool having similar initial state across largely
2609 +- * identical devices.
2610 ++ * This function adds bytes into the input pool. It does not
2611 ++ * update the initialization bit counter; the caller should call
2612 ++ * credit_init_bits if this is appropriate.
2613 + */
2614 +-void add_device_randomness(const void *buf, unsigned int size)
2615 ++static void mix_pool_bytes(const void *buf, size_t len)
2616 + {
2617 +- unsigned long time = random_get_entropy() ^ jiffies;
2618 + unsigned long flags;
2619 +
2620 +- if (!crng_ready() && size)
2621 +- crng_slow_load(buf, size);
2622 +-
2623 +- trace_add_device_randomness(size, _RET_IP_);
2624 + spin_lock_irqsave(&input_pool.lock, flags);
2625 +- _mix_pool_bytes(&input_pool, buf, size);
2626 +- _mix_pool_bytes(&input_pool, &time, sizeof(time));
2627 ++ _mix_pool_bytes(buf, len);
2628 + spin_unlock_irqrestore(&input_pool.lock, flags);
2629 + }
2630 +-EXPORT_SYMBOL(add_device_randomness);
2631 +-
2632 +-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
2633 +
2634 + /*
2635 +- * This function adds entropy to the entropy "pool" by using timing
2636 +- * delays. It uses the timer_rand_state structure to make an estimate
2637 +- * of how many bits of entropy this call has added to the pool.
2638 +- *
2639 +- * The number "num" is also added to the pool - it should somehow describe
2640 +- * the type of event which just happened. This is currently 0-255 for
2641 +- * keyboard scan codes, and 256 upwards for interrupts.
2642 +- *
2643 ++ * This is an HKDF-like construction for using the hashed collected entropy
2644 ++ * as a PRF key, that's then expanded block-by-block.
2645 + */
2646 +-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
2647 ++static void extract_entropy(void *buf, size_t len)
2648 + {
2649 +- struct entropy_store *r;
2650 ++ unsigned long flags;
2651 ++ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
2652 + struct {
2653 +- long jiffies;
2654 +- unsigned cycles;
2655 +- unsigned num;
2656 +- } sample;
2657 +- long delta, delta2, delta3;
2658 +-
2659 +- sample.jiffies = jiffies;
2660 +- sample.cycles = random_get_entropy();
2661 +- sample.num = num;
2662 +- r = &input_pool;
2663 +- mix_pool_bytes(r, &sample, sizeof(sample));
2664 +-
2665 +- /*
2666 +- * Calculate number of bits of randomness we probably added.
2667 +- * We take into account the first, second and third-order deltas
2668 +- * in order to make our estimate.
2669 +- */
2670 +- delta = sample.jiffies - READ_ONCE(state->last_time);
2671 +- WRITE_ONCE(state->last_time, sample.jiffies);
2672 +-
2673 +- delta2 = delta - READ_ONCE(state->last_delta);
2674 +- WRITE_ONCE(state->last_delta, delta);
2675 +-
2676 +- delta3 = delta2 - READ_ONCE(state->last_delta2);
2677 +- WRITE_ONCE(state->last_delta2, delta2);
2678 ++ unsigned long rdseed[32 / sizeof(long)];
2679 ++ size_t counter;
2680 ++ } block;
2681 ++ size_t i;
2682 ++
2683 ++ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
2684 ++ if (!arch_get_random_seed_long(&block.rdseed[i]) &&
2685 ++ !arch_get_random_long(&block.rdseed[i]))
2686 ++ block.rdseed[i] = random_get_entropy();
2687 ++ }
2688 +
2689 +- if (delta < 0)
2690 +- delta = -delta;
2691 +- if (delta2 < 0)
2692 +- delta2 = -delta2;
2693 +- if (delta3 < 0)
2694 +- delta3 = -delta3;
2695 +- if (delta > delta2)
2696 +- delta = delta2;
2697 +- if (delta > delta3)
2698 +- delta = delta3;
2699 ++ spin_lock_irqsave(&input_pool.lock, flags);
2700 +
2701 +- /*
2702 +- * delta is now minimum absolute delta.
2703 +- * Round down by 1 bit on general principles,
2704 +- * and limit entropy estimate to 12 bits.
2705 +- */
2706 +- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
2707 +-}
2708 ++ /* seed = HASHPRF(last_key, entropy_input) */
2709 ++ blake2s_final(&input_pool.hash, seed);
2710 +
2711 +-void add_input_randomness(unsigned int type, unsigned int code,
2712 +- unsigned int value)
2713 +-{
2714 +- static unsigned char last_value;
2715 ++ /* next_key = HASHPRF(seed, RDSEED || 0) */
2716 ++ block.counter = 0;
2717 ++ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
2718 ++ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
2719 +
2720 +- /* ignore autorepeat and the like */
2721 +- if (value == last_value)
2722 +- return;
2723 ++ spin_unlock_irqrestore(&input_pool.lock, flags);
2724 ++ memzero_explicit(next_key, sizeof(next_key));
2725 ++
2726 ++ while (len) {
2727 ++ i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
2728 ++ /* output = HASHPRF(seed, RDSEED || ++counter) */
2729 ++ ++block.counter;
2730 ++ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
2731 ++ len -= i;
2732 ++ buf += i;
2733 ++ }
2734 +
2735 +- last_value = value;
2736 +- add_timer_randomness(&input_timer_state,
2737 +- (type << 4) ^ code ^ (code >> 4) ^ value);
2738 +- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
2739 ++ memzero_explicit(seed, sizeof(seed));
2740 ++ memzero_explicit(&block, sizeof(block));
2741 + }
2742 +-EXPORT_SYMBOL_GPL(add_input_randomness);
2743 +
2744 +-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
2745 ++#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
2746 +
2747 +-#ifdef ADD_INTERRUPT_BENCH
2748 +-static unsigned long avg_cycles, avg_deviation;
2749 +-
2750 +-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
2751 +-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
2752 +-
2753 +-static void add_interrupt_bench(cycles_t start)
2754 ++static void __cold _credit_init_bits(size_t bits)
2755 + {
2756 +- long delta = random_get_entropy() - start;
2757 +-
2758 +- /* Use a weighted moving average */
2759 +- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
2760 +- avg_cycles += delta;
2761 +- /* And average deviation */
2762 +- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
2763 +- avg_deviation += delta;
2764 +-}
2765 +-#else
2766 +-#define add_interrupt_bench(x)
2767 +-#endif
2768 ++ static struct execute_work set_ready;
2769 ++ unsigned int new, orig, add;
2770 ++ unsigned long flags;
2771 +
2772 +-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
2773 +-{
2774 +- __u32 *ptr = (__u32 *) regs;
2775 +- unsigned int idx;
2776 ++ if (!bits)
2777 ++ return;
2778 +
2779 +- if (regs == NULL)
2780 +- return 0;
2781 +- idx = READ_ONCE(f->reg_idx);
2782 +- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
2783 +- idx = 0;
2784 +- ptr += idx++;
2785 +- WRITE_ONCE(f->reg_idx, idx);
2786 +- return *ptr;
2787 +-}
2788 ++ add = min_t(size_t, bits, POOL_BITS);
2789 +
2790 +-void add_interrupt_randomness(int irq, int irq_flags)
2791 +-{
2792 +- struct entropy_store *r;
2793 +- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
2794 +- struct pt_regs *regs = get_irq_regs();
2795 +- unsigned long now = jiffies;
2796 +- cycles_t cycles = random_get_entropy();
2797 +- __u32 c_high, j_high;
2798 +- __u64 ip;
2799 +-
2800 +- if (cycles == 0)
2801 +- cycles = get_reg(fast_pool, regs);
2802 +- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
2803 +- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
2804 +- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
2805 +- fast_pool->pool[1] ^= now ^ c_high;
2806 +- ip = regs ? instruction_pointer(regs) : _RET_IP_;
2807 +- fast_pool->pool[2] ^= ip;
2808 +- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
2809 +- get_reg(fast_pool, regs);
2810 +-
2811 +- fast_mix(fast_pool);
2812 +- add_interrupt_bench(cycles);
2813 +-
2814 +- if (unlikely(crng_init == 0)) {
2815 +- if ((fast_pool->count >= 64) &&
2816 +- crng_fast_load((char *) fast_pool->pool,
2817 +- sizeof(fast_pool->pool)) > 0) {
2818 +- fast_pool->count = 0;
2819 +- fast_pool->last = now;
2820 ++ do {
2821 ++ orig = READ_ONCE(input_pool.init_bits);
2822 ++ new = min_t(unsigned int, POOL_BITS, orig + add);
2823 ++ } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
2824 ++
2825 ++ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
2826 ++ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
2827 ++ execute_in_process_context(crng_set_ready, &set_ready);
2828 ++ process_random_ready_list();
2829 ++ wake_up_interruptible(&crng_init_wait);
2830 ++ kill_fasync(&fasync, SIGIO, POLL_IN);
2831 ++ pr_notice("crng init done\n");
2832 ++ if (urandom_warning.missed)
2833 ++ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
2834 ++ urandom_warning.missed);
2835 ++ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
2836 ++ spin_lock_irqsave(&base_crng.lock, flags);
2837 ++ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
2838 ++ if (crng_init == CRNG_EMPTY) {
2839 ++ extract_entropy(base_crng.key, sizeof(base_crng.key));
2840 ++ crng_init = CRNG_EARLY;
2841 + }
2842 +- return;
2843 ++ spin_unlock_irqrestore(&base_crng.lock, flags);
2844 + }
2845 +-
2846 +- if ((fast_pool->count < 64) &&
2847 +- !time_after(now, fast_pool->last + HZ))
2848 +- return;
2849 +-
2850 +- r = &input_pool;
2851 +- if (!spin_trylock(&r->lock))
2852 +- return;
2853 +-
2854 +- fast_pool->last = now;
2855 +- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
2856 +- spin_unlock(&r->lock);
2857 +-
2858 +- fast_pool->count = 0;
2859 +-
2860 +- /* award one bit for the contents of the fast pool */
2861 +- credit_entropy_bits(r, 1);
2862 + }
2863 +-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
2864 +
2865 +-#ifdef CONFIG_BLOCK
2866 +-void add_disk_randomness(struct gendisk *disk)
2867 +-{
2868 +- if (!disk || !disk->random)
2869 +- return;
2870 +- /* first major is 1, so we get >= 0x200 here */
2871 +- add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
2872 +- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
2873 +-}
2874 +-EXPORT_SYMBOL_GPL(add_disk_randomness);
2875 +-#endif
2876 +
2877 +-/*********************************************************************
2878 ++/**********************************************************************
2879 + *
2880 +- * Entropy extraction routines
2881 ++ * Entropy collection routines.
2882 + *
2883 +- *********************************************************************/
2884 ++ * The following exported functions are used for pushing entropy into
2885 ++ * the above entropy accumulation routines:
2886 ++ *
2887 ++ * void add_device_randomness(const void *buf, size_t len);
2888 ++ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
2889 ++ * void add_bootloader_randomness(const void *buf, size_t len);
2890 ++ * void add_interrupt_randomness(int irq);
2891 ++ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
2892 ++ * void add_disk_randomness(struct gendisk *disk);
2893 ++ *
2894 ++ * add_device_randomness() adds data to the input pool that
2895 ++ * is likely to differ between two devices (or possibly even per boot).
2896 ++ * This would be things like MAC addresses or serial numbers, or the
2897 ++ * read-out of the RTC. This does *not* credit any actual entropy to
2898 ++ * the pool, but it initializes the pool to different values for devices
2899 ++ * that might otherwise be identical and have very little entropy
2900 ++ * available to them (particularly common in the embedded world).
2901 ++ *
2902 ++ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
2903 ++ * entropy as specified by the caller. If the entropy pool is full it will
2904 ++ * block until more entropy is needed.
2905 ++ *
2906 ++ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
2907 ++ * and device tree, and credits its input depending on whether or not the
2908 ++ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
2909 ++ *
2910 ++ * add_interrupt_randomness() uses the interrupt timing as random
2911 ++ * inputs to the entropy pool. Using the cycle counters and the irq source
2912 ++ * as inputs, it feeds the input pool roughly once a second or after 64
2913 ++ * interrupts, crediting 1 bit of entropy for whichever comes first.
2914 ++ *
2915 ++ * add_input_randomness() uses the input layer interrupt timing, as well
2916 ++ * as the event type information from the hardware.
2917 ++ *
2918 ++ * add_disk_randomness() uses what amounts to the seek time of block
2919 ++ * layer request events, on a per-disk_devt basis, as input to the
2920 ++ * entropy pool. Note that high-speed solid state drives with very low
2921 ++ * seek times do not make for good sources of entropy, as their seek
2922 ++ * times are usually fairly consistent.
2923 ++ *
2924 ++ * The last two routines try to estimate how many bits of entropy
2925 ++ * to credit. They do this by keeping track of the first and second
2926 ++ * order deltas of the event timings.
2927 ++ *
2928 ++ **********************************************************************/
2929 +
2930 +-/*
2931 +- * This function decides how many bytes to actually take from the
2932 +- * given pool, and also debits the entropy count accordingly.
2933 +- */
2934 +-static size_t account(struct entropy_store *r, size_t nbytes, int min,
2935 +- int reserved)
2936 ++static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
2937 ++static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
2938 ++static int __init parse_trust_cpu(char *arg)
2939 + {
2940 +- int entropy_count, orig, have_bytes;
2941 +- size_t ibytes, nfrac;
2942 +-
2943 +- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
2944 +-
2945 +- /* Can we pull enough? */
2946 +-retry:
2947 +- entropy_count = orig = READ_ONCE(r->entropy_count);
2948 +- ibytes = nbytes;
2949 +- /* never pull more than available */
2950 +- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
2951 +-
2952 +- if ((have_bytes -= reserved) < 0)
2953 +- have_bytes = 0;
2954 +- ibytes = min_t(size_t, ibytes, have_bytes);
2955 +- if (ibytes < min)
2956 +- ibytes = 0;
2957 +-
2958 +- if (WARN_ON(entropy_count < 0)) {
2959 +- pr_warn("negative entropy count: pool %s count %d\n",
2960 +- r->name, entropy_count);
2961 +- entropy_count = 0;
2962 +- }
2963 +- nfrac = ibytes << (ENTROPY_SHIFT + 3);
2964 +- if ((size_t) entropy_count > nfrac)
2965 +- entropy_count -= nfrac;
2966 +- else
2967 +- entropy_count = 0;
2968 +-
2969 +- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
2970 +- goto retry;
2971 +-
2972 +- trace_debit_entropy(r->name, 8 * ibytes);
2973 +- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
2974 +- wake_up_interruptible(&random_write_wait);
2975 +- kill_fasync(&fasync, SIGIO, POLL_OUT);
2976 +- }
2977 +-
2978 +- return ibytes;
2979 ++ return kstrtobool(arg, &trust_cpu);
2980 + }
2981 +-
2982 +-/*
2983 +- * This function does the actual extraction for extract_entropy.
2984 +- *
2985 +- * Note: we assume that .poolwords is a multiple of 16 words.
2986 +- */
2987 +-static void extract_buf(struct entropy_store *r, __u8 *out)
2988 ++static int __init parse_trust_bootloader(char *arg)
2989 + {
2990 +- int i;
2991 +- union {
2992 +- __u32 w[5];
2993 +- unsigned long l[LONGS(20)];
2994 +- } hash;
2995 +- __u32 workspace[SHA1_WORKSPACE_WORDS];
2996 +- unsigned long flags;
2997 +-
2998 +- /*
2999 +- * If we have an architectural hardware random number
3000 +- * generator, use it for SHA's initial vector
3001 +- */
3002 +- sha1_init(hash.w);
3003 +- for (i = 0; i < LONGS(20); i++) {
3004 +- unsigned long v;
3005 +- if (!arch_get_random_long(&v))
3006 +- break;
3007 +- hash.l[i] = v;
3008 +- }
3009 +-
3010 +- /* Generate a hash across the pool, 16 words (512 bits) at a time */
3011 +- spin_lock_irqsave(&r->lock, flags);
3012 +- for (i = 0; i < r->poolinfo->poolwords; i += 16)
3013 +- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
3014 +-
3015 +- /*
3016 +- * We mix the hash back into the pool to prevent backtracking
3017 +- * attacks (where the attacker knows the state of the pool
3018 +- * plus the current outputs, and attempts to find previous
3019 +- * ouputs), unless the hash function can be inverted. By
3020 +- * mixing at least a SHA1 worth of hash data back, we make
3021 +- * brute-forcing the feedback as hard as brute-forcing the
3022 +- * hash.
3023 +- */
3024 +- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
3025 +- spin_unlock_irqrestore(&r->lock, flags);
3026 +-
3027 +- memzero_explicit(workspace, sizeof(workspace));
3028 +-
3029 +- /*
3030 +- * In case the hash function has some recognizable output
3031 +- * pattern, we fold it in half. Thus, we always feed back
3032 +- * twice as much data as we output.
3033 +- */
3034 +- hash.w[0] ^= hash.w[3];
3035 +- hash.w[1] ^= hash.w[4];
3036 +- hash.w[2] ^= rol32(hash.w[2], 16);
3037 +-
3038 +- memcpy(out, &hash, EXTRACT_SIZE);
3039 +- memzero_explicit(&hash, sizeof(hash));
3040 ++ return kstrtobool(arg, &trust_bootloader);
3041 + }
3042 ++early_param("random.trust_cpu", parse_trust_cpu);
3043 ++early_param("random.trust_bootloader", parse_trust_bootloader);
3044 +
3045 +-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
3046 +- size_t nbytes, int fips)
3047 ++/*
3048 ++ * The first collection of entropy occurs at system boot while interrupts
3049 ++ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
3050 ++ * utsname(), and the command line. Depending on the above configuration knob,
3051 ++ * RDSEED may be considered sufficient for initialization. Note that much
3052 ++ * earlier setup may already have pushed entropy into the input pool by the
3053 ++ * time we get here.
3054 ++ */
3055 ++int __init random_init(const char *command_line)
3056 + {
3057 +- ssize_t ret = 0, i;
3058 +- __u8 tmp[EXTRACT_SIZE];
3059 +- unsigned long flags;
3060 ++ ktime_t now = ktime_get_real();
3061 ++ unsigned int i, arch_bytes;
3062 ++ unsigned long entropy;
3063 +
3064 +- while (nbytes) {
3065 +- extract_buf(r, tmp);
3066 ++#if defined(LATENT_ENTROPY_PLUGIN)
3067 ++ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
3068 ++ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
3069 ++#endif
3070 +
3071 +- if (fips) {
3072 +- spin_lock_irqsave(&r->lock, flags);
3073 +- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
3074 +- panic("Hardware RNG duplicated output!\n");
3075 +- memcpy(r->last_data, tmp, EXTRACT_SIZE);
3076 +- spin_unlock_irqrestore(&r->lock, flags);
3077 ++ for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
3078 ++ i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
3079 ++ if (!arch_get_random_seed_long_early(&entropy) &&
3080 ++ !arch_get_random_long_early(&entropy)) {
3081 ++ entropy = random_get_entropy();
3082 ++ arch_bytes -= sizeof(entropy);
3083 + }
3084 +- i = min_t(int, nbytes, EXTRACT_SIZE);
3085 +- memcpy(buf, tmp, i);
3086 +- nbytes -= i;
3087 +- buf += i;
3088 +- ret += i;
3089 ++ _mix_pool_bytes(&entropy, sizeof(entropy));
3090 + }
3091 ++ _mix_pool_bytes(&now, sizeof(now));
3092 ++ _mix_pool_bytes(utsname(), sizeof(*(utsname())));
3093 ++ _mix_pool_bytes(command_line, strlen(command_line));
3094 ++ add_latent_entropy();
3095 +
3096 +- /* Wipe data just returned from memory */
3097 +- memzero_explicit(tmp, sizeof(tmp));
3098 ++ if (crng_ready())
3099 ++ crng_reseed();
3100 ++ else if (trust_cpu)
3101 ++ credit_init_bits(arch_bytes * 8);
3102 +
3103 +- return ret;
3104 ++ return 0;
3105 + }
3106 +
3107 + /*
3108 +- * This function extracts randomness from the "entropy pool", and
3109 +- * returns it in a buffer.
3110 ++ * Add device- or boot-specific data to the input pool to help
3111 ++ * initialize it.
3112 + *
3113 +- * The min parameter specifies the minimum amount we can pull before
3114 +- * failing to avoid races that defeat catastrophic reseeding while the
3115 +- * reserved parameter indicates how much entropy we must leave in the
3116 +- * pool after each pull to avoid starving other readers.
3117 ++ * None of this adds any entropy; it is meant to avoid the problem of
3118 ++ * the entropy pool having similar initial state across largely
3119 ++ * identical devices.
3120 + */
3121 +-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
3122 +- size_t nbytes, int min, int reserved)
3123 ++void add_device_randomness(const void *buf, size_t len)
3124 + {
3125 +- __u8 tmp[EXTRACT_SIZE];
3126 ++ unsigned long entropy = random_get_entropy();
3127 + unsigned long flags;
3128 +
3129 +- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
3130 +- if (fips_enabled) {
3131 +- spin_lock_irqsave(&r->lock, flags);
3132 +- if (!r->last_data_init) {
3133 +- r->last_data_init = 1;
3134 +- spin_unlock_irqrestore(&r->lock, flags);
3135 +- trace_extract_entropy(r->name, EXTRACT_SIZE,
3136 +- ENTROPY_BITS(r), _RET_IP_);
3137 +- extract_buf(r, tmp);
3138 +- spin_lock_irqsave(&r->lock, flags);
3139 +- memcpy(r->last_data, tmp, EXTRACT_SIZE);
3140 +- }
3141 +- spin_unlock_irqrestore(&r->lock, flags);
3142 +- }
3143 +-
3144 +- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
3145 +- nbytes = account(r, nbytes, min, reserved);
3146 +-
3147 +- return _extract_entropy(r, buf, nbytes, fips_enabled);
3148 ++ spin_lock_irqsave(&input_pool.lock, flags);
3149 ++ _mix_pool_bytes(&entropy, sizeof(entropy));
3150 ++ _mix_pool_bytes(buf, len);
3151 ++ spin_unlock_irqrestore(&input_pool.lock, flags);
3152 + }
3153 ++EXPORT_SYMBOL(add_device_randomness);
3154 +
3155 +-#define warn_unseeded_randomness(previous) \
3156 +- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
3157 +-
3158 +-static void _warn_unseeded_randomness(const char *func_name, void *caller,
3159 +- void **previous)
3160 ++/*
3161 ++ * Interface for in-kernel drivers of true hardware RNGs.
3162 ++ * Those devices may produce endless random bits and will be throttled
3163 ++ * when our pool is full.
3164 ++ */
3165 ++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
3166 + {
3167 +-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
3168 +- const bool print_once = false;
3169 +-#else
3170 +- static bool print_once __read_mostly;
3171 +-#endif
3172 ++ mix_pool_bytes(buf, len);
3173 ++ credit_init_bits(entropy);
3174 +
3175 +- if (print_once ||
3176 +- crng_ready() ||
3177 +- (previous && (caller == READ_ONCE(*previous))))
3178 +- return;
3179 +- WRITE_ONCE(*previous, caller);
3180 +-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
3181 +- print_once = true;
3182 +-#endif
3183 +- if (__ratelimit(&unseeded_warning))
3184 +- printk_deferred(KERN_NOTICE "random: %s called from %pS "
3185 +- "with crng_init=%d\n", func_name, caller,
3186 +- crng_init);
3187 ++ /*
3188 ++ * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
3189 ++ * we're not yet initialized.
3190 ++ */
3191 ++ if (!kthread_should_stop() && crng_ready())
3192 ++ schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
3193 + }
3194 ++EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
3195 +
3196 + /*
3197 +- * This function is the exported kernel interface. It returns some
3198 +- * number of good random numbers, suitable for key generation, seeding
3199 +- * TCP sequence numbers, etc. It does not rely on the hardware random
3200 +- * number generator. For random bytes direct from the hardware RNG
3201 +- * (when available), use get_random_bytes_arch(). In order to ensure
3202 +- * that the randomness provided by this function is okay, the function
3203 +- * wait_for_random_bytes() should be called and return 0 at least once
3204 +- * at any point prior.
3205 ++ * Handle random seed passed by bootloader, and credit it if
3206 ++ * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
3207 + */
3208 +-static void _get_random_bytes(void *buf, int nbytes)
3209 ++void __cold add_bootloader_randomness(const void *buf, size_t len)
3210 + {
3211 +- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
3212 +-
3213 +- trace_get_random_bytes(nbytes, _RET_IP_);
3214 +-
3215 +- while (nbytes >= CHACHA_BLOCK_SIZE) {
3216 +- extract_crng(buf);
3217 +- buf += CHACHA_BLOCK_SIZE;
3218 +- nbytes -= CHACHA_BLOCK_SIZE;
3219 +- }
3220 +-
3221 +- if (nbytes > 0) {
3222 +- extract_crng(tmp);
3223 +- memcpy(buf, tmp, nbytes);
3224 +- crng_backtrack_protect(tmp, nbytes);
3225 +- } else
3226 +- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
3227 +- memzero_explicit(tmp, sizeof(tmp));
3228 ++ mix_pool_bytes(buf, len);
3229 ++ if (trust_bootloader)
3230 ++ credit_init_bits(len * 8);
3231 + }
3232 ++EXPORT_SYMBOL_GPL(add_bootloader_randomness);
3233 +
3234 +-void get_random_bytes(void *buf, int nbytes)
3235 +-{
3236 +- static void *previous;
3237 +-
3238 +- warn_unseeded_randomness(&previous);
3239 +- _get_random_bytes(buf, nbytes);
3240 +-}
3241 +-EXPORT_SYMBOL(get_random_bytes);
3242 ++struct fast_pool {
3243 ++ struct work_struct mix;
3244 ++ unsigned long pool[4];
3245 ++ unsigned long last;
3246 ++ unsigned int count;
3247 ++};
3248 +
3249 ++static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
3250 ++#ifdef CONFIG_64BIT
3251 ++#define FASTMIX_PERM SIPHASH_PERMUTATION
3252 ++ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
3253 ++#else
3254 ++#define FASTMIX_PERM HSIPHASH_PERMUTATION
3255 ++ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
3256 ++#endif
3257 ++};
3258 +
3259 + /*
3260 +- * Each time the timer fires, we expect that we got an unpredictable
3261 +- * jump in the cycle counter. Even if the timer is running on another
3262 +- * CPU, the timer activity will be touching the stack of the CPU that is
3263 +- * generating entropy..
3264 +- *
3265 +- * Note that we don't re-arm the timer in the timer itself - we are
3266 +- * happy to be scheduled away, since that just makes the load more
3267 +- * complex, but we do not want the timer to keep ticking unless the
3268 +- * entropy loop is running.
3269 +- *
3270 +- * So the re-arming always happens in the entropy loop itself.
3271 ++ * This is [Half]SipHash-1-x, starting from an empty key. Because
3272 ++ * the key is fixed, it assumes that its inputs are non-malicious,
3273 ++ * and therefore this has no security on its own. s represents the
3274 ++ * four-word SipHash state, while v represents a two-word input.
3275 + */
3276 +-static void entropy_timer(struct timer_list *t)
3277 ++static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
3278 + {
3279 +- credit_entropy_bits(&input_pool, 1);
3280 ++ s[3] ^= v1;
3281 ++ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
3282 ++ s[0] ^= v1;
3283 ++ s[3] ^= v2;
3284 ++ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
3285 ++ s[0] ^= v2;
3286 + }
3287 +
3288 ++#ifdef CONFIG_SMP
3289 + /*
3290 +- * If we have an actual cycle counter, see if we can
3291 +- * generate enough entropy with timing noise
3292 ++ * This function is called when the CPU has just come online, with
3293 ++ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
3294 + */
3295 +-static void try_to_generate_entropy(void)
3296 ++int __cold random_online_cpu(unsigned int cpu)
3297 + {
3298 +- struct {
3299 +- unsigned long now;
3300 +- struct timer_list timer;
3301 +- } stack;
3302 ++ /*
3303 ++ * During CPU shutdown and before CPU onlining, add_interrupt_
3304 ++ * randomness() may schedule mix_interrupt_randomness(), and
3305 ++ * set the MIX_INFLIGHT flag. However, because the worker can
3306 ++ * be scheduled on a different CPU during this period, that
3307 ++ * flag will never be cleared. For that reason, we zero out
3308 ++ * the flag here, which runs just after workqueues are onlined
3309 ++ * for the CPU again. This also has the effect of setting the
3310 ++ * irq randomness count to zero so that new accumulated irqs
3311 ++ * are fresh.
3312 ++ */
3313 ++ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
3314 ++ return 0;
3315 ++}
3316 ++#endif
3317 +
3318 +- stack.now = random_get_entropy();
3319 ++static void mix_interrupt_randomness(struct work_struct *work)
3320 ++{
3321 ++ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
3322 ++ /*
3323 ++ * The size of the copied stack pool is explicitly 2 longs so that we
3324 ++ * only ever ingest half of the siphash output each time, retaining
3325 ++ * the other half as the next "key" that carries over. The entropy is
3326 ++ * supposed to be sufficiently dispersed between bits so on average
3327 ++ * we don't wind up "losing" some.
3328 ++ */
3329 ++ unsigned long pool[2];
3330 ++ unsigned int count;
3331 +
3332 +- /* Slow counter - or none. Don't even bother */
3333 +- if (stack.now == random_get_entropy())
3334 ++ /* Check to see if we're running on the wrong CPU due to hotplug. */
3335 ++ local_irq_disable();
3336 ++ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
3337 ++ local_irq_enable();
3338 + return;
3339 +-
3340 +- timer_setup_on_stack(&stack.timer, entropy_timer, 0);
3341 +- while (!crng_ready()) {
3342 +- if (!timer_pending(&stack.timer))
3343 +- mod_timer(&stack.timer, jiffies+1);
3344 +- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
3345 +- schedule();
3346 +- stack.now = random_get_entropy();
3347 + }
3348 +
3349 +- del_timer_sync(&stack.timer);
3350 +- destroy_timer_on_stack(&stack.timer);
3351 +- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
3352 +-}
3353 +-
3354 +-/*
3355 +- * Wait for the urandom pool to be seeded and thus guaranteed to supply
3356 +- * cryptographically secure random numbers. This applies to: the /dev/urandom
3357 +- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
3358 +- * family of functions. Using any of these functions without first calling
3359 +- * this function forfeits the guarantee of security.
3360 +- *
3361 +- * Returns: 0 if the urandom pool has been seeded.
3362 +- * -ERESTARTSYS if the function was interrupted by a signal.
3363 +- */
3364 +-int wait_for_random_bytes(void)
3365 +-{
3366 +- if (likely(crng_ready()))
3367 +- return 0;
3368 +-
3369 +- do {
3370 +- int ret;
3371 +- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
3372 +- if (ret)
3373 +- return ret > 0 ? 0 : ret;
3374 ++ /*
3375 ++ * Copy the pool to the stack so that the mixer always has a
3376 ++ * consistent view, before we reenable irqs again.
3377 ++ */
3378 ++ memcpy(pool, fast_pool->pool, sizeof(pool));
3379 ++ count = fast_pool->count;
3380 ++ fast_pool->count = 0;
3381 ++ fast_pool->last = jiffies;
3382 ++ local_irq_enable();
3383 +
3384 +- try_to_generate_entropy();
3385 +- } while (!crng_ready());
3386 ++ mix_pool_bytes(pool, sizeof(pool));
3387 ++ credit_init_bits(max(1u, (count & U16_MAX) / 64));
3388 +
3389 +- return 0;
3390 ++ memzero_explicit(pool, sizeof(pool));
3391 + }
3392 +-EXPORT_SYMBOL(wait_for_random_bytes);
3393 +
3394 +-/*
3395 +- * Returns whether or not the urandom pool has been seeded and thus guaranteed
3396 +- * to supply cryptographically secure random numbers. This applies to: the
3397 +- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
3398 +- * ,u64,int,long} family of functions.
3399 +- *
3400 +- * Returns: true if the urandom pool has been seeded.
3401 +- * false if the urandom pool has not been seeded.
3402 +- */
3403 +-bool rng_is_initialized(void)
3404 ++void add_interrupt_randomness(int irq)
3405 + {
3406 +- return crng_ready();
3407 +-}
3408 +-EXPORT_SYMBOL(rng_is_initialized);
3409 ++ enum { MIX_INFLIGHT = 1U << 31 };
3410 ++ unsigned long entropy = random_get_entropy();
3411 ++ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
3412 ++ struct pt_regs *regs = get_irq_regs();
3413 ++ unsigned int new_count;
3414 +
3415 +-/*
3416 +- * Add a callback function that will be invoked when the nonblocking
3417 +- * pool is initialised.
3418 +- *
3419 +- * returns: 0 if callback is successfully added
3420 +- * -EALREADY if pool is already initialised (callback not called)
3421 +- * -ENOENT if module for callback is not alive
3422 +- */
3423 +-int add_random_ready_callback(struct random_ready_callback *rdy)
3424 +-{
3425 +- struct module *owner;
3426 +- unsigned long flags;
3427 +- int err = -EALREADY;
3428 +-
3429 +- if (crng_ready())
3430 +- return err;
3431 ++ fast_mix(fast_pool->pool, entropy,
3432 ++ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
3433 ++ new_count = ++fast_pool->count;
3434 +
3435 +- owner = rdy->owner;
3436 +- if (!try_module_get(owner))
3437 +- return -ENOENT;
3438 +-
3439 +- spin_lock_irqsave(&random_ready_list_lock, flags);
3440 +- if (crng_ready())
3441 +- goto out;
3442 +-
3443 +- owner = NULL;
3444 +-
3445 +- list_add(&rdy->list, &random_ready_list);
3446 +- err = 0;
3447 +-
3448 +-out:
3449 +- spin_unlock_irqrestore(&random_ready_list_lock, flags);
3450 ++ if (new_count & MIX_INFLIGHT)
3451 ++ return;
3452 +
3453 +- module_put(owner);
3454 ++ if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
3455 ++ return;
3456 +
3457 +- return err;
3458 ++ if (unlikely(!fast_pool->mix.func))
3459 ++ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
3460 ++ fast_pool->count |= MIX_INFLIGHT;
3461 ++ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
3462 + }
3463 +-EXPORT_SYMBOL(add_random_ready_callback);
3464 ++EXPORT_SYMBOL_GPL(add_interrupt_randomness);
3465 ++
3466 ++/* There is one of these per entropy source */
3467 ++struct timer_rand_state {
3468 ++ unsigned long last_time;
3469 ++ long last_delta, last_delta2;
3470 ++};
3471 +
3472 + /*
3473 +- * Delete a previously registered readiness callback function.
3474 ++ * This function adds entropy to the entropy "pool" by using timing
3475 ++ * delays. It uses the timer_rand_state structure to make an estimate
3476 ++ * of how many bits of entropy this call has added to the pool. The
3477 ++ * value "num" is also added to the pool; it should somehow describe
3478 ++ * the type of event that just happened.
3479 + */
3480 +-void del_random_ready_callback(struct random_ready_callback *rdy)
3481 ++static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
3482 + {
3483 +- unsigned long flags;
3484 +- struct module *owner = NULL;
3485 ++ unsigned long entropy = random_get_entropy(), now = jiffies, flags;
3486 ++ long delta, delta2, delta3;
3487 ++ unsigned int bits;
3488 +
3489 +- spin_lock_irqsave(&random_ready_list_lock, flags);
3490 +- if (!list_empty(&rdy->list)) {
3491 +- list_del_init(&rdy->list);
3492 +- owner = rdy->owner;
3493 ++ /*
3494 ++ * If we're in a hard IRQ, add_interrupt_randomness() will be called
3495 ++ * sometime after, so mix into the fast pool.
3496 ++ */
3497 ++ if (in_hardirq()) {
3498 ++ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
3499 ++ } else {
3500 ++ spin_lock_irqsave(&input_pool.lock, flags);
3501 ++ _mix_pool_bytes(&entropy, sizeof(entropy));
3502 ++ _mix_pool_bytes(&num, sizeof(num));
3503 ++ spin_unlock_irqrestore(&input_pool.lock, flags);
3504 + }
3505 +- spin_unlock_irqrestore(&random_ready_list_lock, flags);
3506 +
3507 +- module_put(owner);
3508 +-}
3509 +-EXPORT_SYMBOL(del_random_ready_callback);
3510 ++ if (crng_ready())
3511 ++ return;
3512 +
3513 +-/*
3514 +- * This function will use the architecture-specific hardware random
3515 +- * number generator if it is available. The arch-specific hw RNG will
3516 +- * almost certainly be faster than what we can do in software, but it
3517 +- * is impossible to verify that it is implemented securely (as
3518 +- * opposed, to, say, the AES encryption of a sequence number using a
3519 +- * key known by the NSA). So it's useful if we need the speed, but
3520 +- * only if we're willing to trust the hardware manufacturer not to
3521 +- * have put in a back door.
3522 +- *
3523 +- * Return number of bytes filled in.
3524 +- */
3525 +-int __must_check get_random_bytes_arch(void *buf, int nbytes)
3526 +-{
3527 +- int left = nbytes;
3528 +- char *p = buf;
3529 ++ /*
3530 ++ * Calculate number of bits of randomness we probably added.
3531 ++ * We take into account the first, second and third-order deltas
3532 ++ * in order to make our estimate.
3533 ++ */
3534 ++ delta = now - READ_ONCE(state->last_time);
3535 ++ WRITE_ONCE(state->last_time, now);
3536 +
3537 +- trace_get_random_bytes_arch(left, _RET_IP_);
3538 +- while (left) {
3539 +- unsigned long v;
3540 +- int chunk = min_t(int, left, sizeof(unsigned long));
3541 ++ delta2 = delta - READ_ONCE(state->last_delta);
3542 ++ WRITE_ONCE(state->last_delta, delta);
3543 +
3544 +- if (!arch_get_random_long(&v))
3545 +- break;
3546 ++ delta3 = delta2 - READ_ONCE(state->last_delta2);
3547 ++ WRITE_ONCE(state->last_delta2, delta2);
3548 +
3549 +- memcpy(p, &v, chunk);
3550 +- p += chunk;
3551 +- left -= chunk;
3552 +- }
3553 ++ if (delta < 0)
3554 ++ delta = -delta;
3555 ++ if (delta2 < 0)
3556 ++ delta2 = -delta2;
3557 ++ if (delta3 < 0)
3558 ++ delta3 = -delta3;
3559 ++ if (delta > delta2)
3560 ++ delta = delta2;
3561 ++ if (delta > delta3)
3562 ++ delta = delta3;
3563 ++
3564 ++ /*
3565 ++ * delta is now minimum absolute delta. Round down by 1 bit
3566 ++ * on general principles, and limit entropy estimate to 11 bits.
3567 ++ */
3568 ++ bits = min(fls(delta >> 1), 11);
3569 +
3570 +- return nbytes - left;
3571 ++ /*
3572 ++ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
3573 ++ * will run after this, which uses a different crediting scheme of 1 bit
3574 ++ * per every 64 interrupts. In order to let that function do accounting
3575 ++ * close to the one in this function, we credit a full 64/64 bit per bit,
3576 ++ * and then subtract one to account for the extra one added.
3577 ++ */
3578 ++ if (in_hardirq())
3579 ++ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
3580 ++ else
3581 ++ _credit_init_bits(bits);
3582 + }
3583 +-EXPORT_SYMBOL(get_random_bytes_arch);
3584 +
3585 +-/*
3586 +- * init_std_data - initialize pool with system data
3587 +- *
3588 +- * @r: pool to initialize
3589 +- *
3590 +- * This function clears the pool's entropy count and mixes some system
3591 +- * data into the pool to prepare it for use. The pool is not cleared
3592 +- * as that can only decrease the entropy in the pool.
3593 +- */
3594 +-static void __init init_std_data(struct entropy_store *r)
3595 ++void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
3596 + {
3597 +- int i;
3598 +- ktime_t now = ktime_get_real();
3599 +- unsigned long rv;
3600 +-
3601 +- mix_pool_bytes(r, &now, sizeof(now));
3602 +- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
3603 +- if (!arch_get_random_seed_long(&rv) &&
3604 +- !arch_get_random_long(&rv))
3605 +- rv = random_get_entropy();
3606 +- mix_pool_bytes(r, &rv, sizeof(rv));
3607 +- }
3608 +- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
3609 ++ static unsigned char last_value;
3610 ++ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
3611 ++
3612 ++ /* Ignore autorepeat and the like. */
3613 ++ if (value == last_value)
3614 ++ return;
3615 ++
3616 ++ last_value = value;
3617 ++ add_timer_randomness(&input_timer_state,
3618 ++ (type << 4) ^ code ^ (code >> 4) ^ value);
3619 + }
3620 ++EXPORT_SYMBOL_GPL(add_input_randomness);
3621 +
3622 +-/*
3623 +- * Note that setup_arch() may call add_device_randomness()
3624 +- * long before we get here. This allows seeding of the pools
3625 +- * with some platform dependent data very early in the boot
3626 +- * process. But it limits our options here. We must use
3627 +- * statically allocated structures that already have all
3628 +- * initializations complete at compile time. We should also
3629 +- * take care not to overwrite the precious per platform data
3630 +- * we were given.
3631 +- */
3632 +-int __init rand_initialize(void)
3633 ++#ifdef CONFIG_BLOCK
3634 ++void add_disk_randomness(struct gendisk *disk)
3635 + {
3636 +- init_std_data(&input_pool);
3637 +- if (crng_need_final_init)
3638 +- crng_finalize_init(&primary_crng);
3639 +- crng_initialize_primary(&primary_crng);
3640 +- crng_global_init_time = jiffies;
3641 +- if (ratelimit_disable) {
3642 +- urandom_warning.interval = 0;
3643 +- unseeded_warning.interval = 0;
3644 +- }
3645 +- return 0;
3646 ++ if (!disk || !disk->random)
3647 ++ return;
3648 ++ /* First major is 1, so we get >= 0x200 here. */
3649 ++ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
3650 + }
3651 ++EXPORT_SYMBOL_GPL(add_disk_randomness);
3652 +
3653 +-#ifdef CONFIG_BLOCK
3654 +-void rand_initialize_disk(struct gendisk *disk)
3655 ++void __cold rand_initialize_disk(struct gendisk *disk)
3656 + {
3657 + struct timer_rand_state *state;
3658 +
3659 +@@ -1823,116 +1141,189 @@ void rand_initialize_disk(struct gendisk *disk)
3660 + }
3661 + #endif
3662 +
3663 +-static ssize_t
3664 +-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
3665 +- loff_t *ppos)
3666 ++/*
3667 ++ * Each time the timer fires, we expect that we got an unpredictable
3668 ++ * jump in the cycle counter. Even if the timer is running on another
3669 ++ * CPU, the timer activity will be touching the stack of the CPU that is
3670 ++ * generating entropy..
3671 ++ *
3672 ++ * Note that we don't re-arm the timer in the timer itself - we are
3673 ++ * happy to be scheduled away, since that just makes the load more
3674 ++ * complex, but we do not want the timer to keep ticking unless the
3675 ++ * entropy loop is running.
3676 ++ *
3677 ++ * So the re-arming always happens in the entropy loop itself.
3678 ++ */
3679 ++static void __cold entropy_timer(struct timer_list *t)
3680 + {
3681 +- int ret;
3682 +-
3683 +- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
3684 +- ret = extract_crng_user(buf, nbytes);
3685 +- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
3686 +- return ret;
3687 ++ credit_init_bits(1);
3688 + }
3689 +
3690 +-static ssize_t
3691 +-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
3692 ++/*
3693 ++ * If we have an actual cycle counter, see if we can
3694 ++ * generate enough entropy with timing noise
3695 ++ */
3696 ++static void __cold try_to_generate_entropy(void)
3697 + {
3698 +- unsigned long flags;
3699 +- static int maxwarn = 10;
3700 ++ struct {
3701 ++ unsigned long entropy;
3702 ++ struct timer_list timer;
3703 ++ } stack;
3704 ++
3705 ++ stack.entropy = random_get_entropy();
3706 +
3707 +- if (!crng_ready() && maxwarn > 0) {
3708 +- maxwarn--;
3709 +- if (__ratelimit(&urandom_warning))
3710 +- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
3711 +- current->comm, nbytes);
3712 +- spin_lock_irqsave(&primary_crng.lock, flags);
3713 +- crng_init_cnt = 0;
3714 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
3715 ++ /* Slow counter - or none. Don't even bother */
3716 ++ if (stack.entropy == random_get_entropy())
3717 ++ return;
3718 ++
3719 ++ timer_setup_on_stack(&stack.timer, entropy_timer, 0);
3720 ++ while (!crng_ready() && !signal_pending(current)) {
3721 ++ if (!timer_pending(&stack.timer))
3722 ++ mod_timer(&stack.timer, jiffies + 1);
3723 ++ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
3724 ++ schedule();
3725 ++ stack.entropy = random_get_entropy();
3726 + }
3727 +
3728 +- return urandom_read_nowarn(file, buf, nbytes, ppos);
3729 ++ del_timer_sync(&stack.timer);
3730 ++ destroy_timer_on_stack(&stack.timer);
3731 ++ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
3732 + }
3733 +
3734 +-static ssize_t
3735 +-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
3736 ++
3737 ++/**********************************************************************
3738 ++ *
3739 ++ * Userspace reader/writer interfaces.
3740 ++ *
3741 ++ * getrandom(2) is the primary modern interface into the RNG and should
3742 ++ * be used in preference to anything else.
3743 ++ *
3744 ++ * Reading from /dev/random has the same functionality as calling
3745 ++ * getrandom(2) with flags=0. In earlier versions, however, it had
3746 ++ * vastly different semantics and should therefore be avoided, to
3747 ++ * prevent backwards compatibility issues.
3748 ++ *
3749 ++ * Reading from /dev/urandom has the same functionality as calling
3750 ++ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
3751 ++ * waiting for the RNG to be ready, it should not be used.
3752 ++ *
3753 ++ * Writing to either /dev/random or /dev/urandom adds entropy to
3754 ++ * the input pool but does not credit it.
3755 ++ *
3756 ++ * Polling on /dev/random indicates when the RNG is initialized, on
3757 ++ * the read side, and when it wants new entropy, on the write side.
3758 ++ *
3759 ++ * Both /dev/random and /dev/urandom have the same set of ioctls for
3760 ++ * adding entropy, getting the entropy count, zeroing the count, and
3761 ++ * reseeding the crng.
3762 ++ *
3763 ++ **********************************************************************/
3764 ++
3765 ++SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
3766 + {
3767 ++ struct iov_iter iter;
3768 ++ struct iovec iov;
3769 + int ret;
3770 +
3771 +- ret = wait_for_random_bytes();
3772 +- if (ret != 0)
3773 ++ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
3774 ++ return -EINVAL;
3775 ++
3776 ++ /*
3777 ++ * Requesting insecure and blocking randomness at the same time makes
3778 ++ * no sense.
3779 ++ */
3780 ++ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
3781 ++ return -EINVAL;
3782 ++
3783 ++ if (!crng_ready() && !(flags & GRND_INSECURE)) {
3784 ++ if (flags & GRND_NONBLOCK)
3785 ++ return -EAGAIN;
3786 ++ ret = wait_for_random_bytes();
3787 ++ if (unlikely(ret))
3788 ++ return ret;
3789 ++ }
3790 ++
3791 ++ ret = import_single_range(READ, ubuf, len, &iov, &iter);
3792 ++ if (unlikely(ret))
3793 + return ret;
3794 +- return urandom_read_nowarn(file, buf, nbytes, ppos);
3795 ++ return get_random_bytes_user(&iter);
3796 + }
3797 +
3798 +-static __poll_t
3799 +-random_poll(struct file *file, poll_table * wait)
3800 ++static __poll_t random_poll(struct file *file, poll_table *wait)
3801 + {
3802 +- __poll_t mask;
3803 +-
3804 + poll_wait(file, &crng_init_wait, wait);
3805 +- poll_wait(file, &random_write_wait, wait);
3806 +- mask = 0;
3807 +- if (crng_ready())
3808 +- mask |= EPOLLIN | EPOLLRDNORM;
3809 +- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
3810 +- mask |= EPOLLOUT | EPOLLWRNORM;
3811 +- return mask;
3812 ++ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
3813 + }
3814 +
3815 +-static int
3816 +-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
3817 ++static ssize_t write_pool_user(struct iov_iter *iter)
3818 + {
3819 +- size_t bytes;
3820 +- __u32 t, buf[16];
3821 +- const char __user *p = buffer;
3822 ++ u8 block[BLAKE2S_BLOCK_SIZE];
3823 ++ ssize_t ret = 0;
3824 ++ size_t copied;
3825 +
3826 +- while (count > 0) {
3827 +- int b, i = 0;
3828 ++ if (unlikely(!iov_iter_count(iter)))
3829 ++ return 0;
3830 +
3831 +- bytes = min(count, sizeof(buf));
3832 +- if (copy_from_user(&buf, p, bytes))
3833 +- return -EFAULT;
3834 ++ for (;;) {
3835 ++ copied = copy_from_iter(block, sizeof(block), iter);
3836 ++ ret += copied;
3837 ++ mix_pool_bytes(block, copied);
3838 ++ if (!iov_iter_count(iter) || copied != sizeof(block))
3839 ++ break;
3840 +
3841 +- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
3842 +- if (!arch_get_random_int(&t))
3843 ++ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
3844 ++ if (ret % PAGE_SIZE == 0) {
3845 ++ if (signal_pending(current))
3846 + break;
3847 +- buf[i] ^= t;
3848 ++ cond_resched();
3849 + }
3850 ++ }
3851 ++
3852 ++ memzero_explicit(block, sizeof(block));
3853 ++ return ret ? ret : -EFAULT;
3854 ++}
3855 ++
3856 ++static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
3857 ++{
3858 ++ return write_pool_user(iter);
3859 ++}
3860 +
3861 +- count -= bytes;
3862 +- p += bytes;
3863 ++static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
3864 ++{
3865 ++ static int maxwarn = 10;
3866 +
3867 +- mix_pool_bytes(r, buf, bytes);
3868 +- cond_resched();
3869 ++ if (!crng_ready()) {
3870 ++ if (!ratelimit_disable && maxwarn <= 0)
3871 ++ ++urandom_warning.missed;
3872 ++ else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
3873 ++ --maxwarn;
3874 ++ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
3875 ++ current->comm, iov_iter_count(iter));
3876 ++ }
3877 + }
3878 +
3879 +- return 0;
3880 ++ return get_random_bytes_user(iter);
3881 + }
3882 +
3883 +-static ssize_t random_write(struct file *file, const char __user *buffer,
3884 +- size_t count, loff_t *ppos)
3885 ++static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
3886 + {
3887 +- size_t ret;
3888 ++ int ret;
3889 +
3890 +- ret = write_pool(&input_pool, buffer, count);
3891 +- if (ret)
3892 ++ ret = wait_for_random_bytes();
3893 ++ if (ret != 0)
3894 + return ret;
3895 +-
3896 +- return (ssize_t)count;
3897 ++ return get_random_bytes_user(iter);
3898 + }
3899 +
3900 + static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
3901 + {
3902 +- int size, ent_count;
3903 + int __user *p = (int __user *)arg;
3904 +- int retval;
3905 ++ int ent_count;
3906 +
3907 + switch (cmd) {
3908 + case RNDGETENTCNT:
3909 +- /* inherently racy, no point locking */
3910 +- ent_count = ENTROPY_BITS(&input_pool);
3911 +- if (put_user(ent_count, p))
3912 ++ /* Inherently racy, no point locking. */
3913 ++ if (put_user(input_pool.init_bits, p))
3914 + return -EFAULT;
3915 + return 0;
3916 + case RNDADDTOENTCNT:
3917 +@@ -1940,41 +1331,48 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
3918 + return -EPERM;
3919 + if (get_user(ent_count, p))
3920 + return -EFAULT;
3921 +- return credit_entropy_bits_safe(&input_pool, ent_count);
3922 +- case RNDADDENTROPY:
3923 ++ if (ent_count < 0)
3924 ++ return -EINVAL;
3925 ++ credit_init_bits(ent_count);
3926 ++ return 0;
3927 ++ case RNDADDENTROPY: {
3928 ++ struct iov_iter iter;
3929 ++ struct iovec iov;
3930 ++ ssize_t ret;
3931 ++ int len;
3932 ++
3933 + if (!capable(CAP_SYS_ADMIN))
3934 + return -EPERM;
3935 + if (get_user(ent_count, p++))
3936 + return -EFAULT;
3937 + if (ent_count < 0)
3938 + return -EINVAL;
3939 +- if (get_user(size, p++))
3940 ++ if (get_user(len, p++))
3941 ++ return -EFAULT;
3942 ++ ret = import_single_range(WRITE, p, len, &iov, &iter);
3943 ++ if (unlikely(ret))
3944 ++ return ret;
3945 ++ ret = write_pool_user(&iter);
3946 ++ if (unlikely(ret < 0))
3947 ++ return ret;
3948 ++ /* Since we're crediting, enforce that it was all written into the pool. */
3949 ++ if (unlikely(ret != len))
3950 + return -EFAULT;
3951 +- retval = write_pool(&input_pool, (const char __user *)p,
3952 +- size);
3953 +- if (retval < 0)
3954 +- return retval;
3955 +- return credit_entropy_bits_safe(&input_pool, ent_count);
3956 ++ credit_init_bits(ent_count);
3957 ++ return 0;
3958 ++ }
3959 + case RNDZAPENTCNT:
3960 + case RNDCLEARPOOL:
3961 +- /*
3962 +- * Clear the entropy pool counters. We no longer clear
3963 +- * the entropy pool, as that's silly.
3964 +- */
3965 ++ /* No longer has any effect. */
3966 + if (!capable(CAP_SYS_ADMIN))
3967 + return -EPERM;
3968 +- if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
3969 +- wake_up_interruptible(&random_write_wait);
3970 +- kill_fasync(&fasync, SIGIO, POLL_OUT);
3971 +- }
3972 + return 0;
3973 + case RNDRESEEDCRNG:
3974 + if (!capable(CAP_SYS_ADMIN))
3975 + return -EPERM;
3976 +- if (crng_init < 2)
3977 ++ if (!crng_ready())
3978 + return -ENODATA;
3979 +- crng_reseed(&primary_crng, &input_pool);
3980 +- WRITE_ONCE(crng_global_init_time, jiffies - 1);
3981 ++ crng_reseed();
3982 + return 0;
3983 + default:
3984 + return -EINVAL;
3985 +@@ -1987,55 +1385,56 @@ static int random_fasync(int fd, struct file *filp, int on)
3986 + }
3987 +
3988 + const struct file_operations random_fops = {
3989 +- .read = random_read,
3990 +- .write = random_write,
3991 +- .poll = random_poll,
3992 ++ .read_iter = random_read_iter,
3993 ++ .write_iter = random_write_iter,
3994 ++ .poll = random_poll,
3995 + .unlocked_ioctl = random_ioctl,
3996 + .compat_ioctl = compat_ptr_ioctl,
3997 + .fasync = random_fasync,
3998 + .llseek = noop_llseek,
3999 ++ .splice_read = generic_file_splice_read,
4000 ++ .splice_write = iter_file_splice_write,
4001 + };
4002 +
4003 + const struct file_operations urandom_fops = {
4004 +- .read = urandom_read,
4005 +- .write = random_write,
4006 ++ .read_iter = urandom_read_iter,
4007 ++ .write_iter = random_write_iter,
4008 + .unlocked_ioctl = random_ioctl,
4009 + .compat_ioctl = compat_ptr_ioctl,
4010 + .fasync = random_fasync,
4011 + .llseek = noop_llseek,
4012 ++ .splice_read = generic_file_splice_read,
4013 ++ .splice_write = iter_file_splice_write,
4014 + };
4015 +
4016 +-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
4017 +- unsigned int, flags)
4018 +-{
4019 +- int ret;
4020 +-
4021 +- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
4022 +- return -EINVAL;
4023 +-
4024 +- /*
4025 +- * Requesting insecure and blocking randomness at the same time makes
4026 +- * no sense.
4027 +- */
4028 +- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
4029 +- return -EINVAL;
4030 +-
4031 +- if (count > INT_MAX)
4032 +- count = INT_MAX;
4033 +-
4034 +- if (!(flags & GRND_INSECURE) && !crng_ready()) {
4035 +- if (flags & GRND_NONBLOCK)
4036 +- return -EAGAIN;
4037 +- ret = wait_for_random_bytes();
4038 +- if (unlikely(ret))
4039 +- return ret;
4040 +- }
4041 +- return urandom_read_nowarn(NULL, buf, count, NULL);
4042 +-}
4043 +
4044 + /********************************************************************
4045 + *
4046 +- * Sysctl interface
4047 ++ * Sysctl interface.
4048 ++ *
4049 ++ * These are partly unused legacy knobs with dummy values to not break
4050 ++ * userspace and partly still useful things. They are usually accessible
4051 ++ * in /proc/sys/kernel/random/ and are as follows:
4052 ++ *
4053 ++ * - boot_id - a UUID representing the current boot.
4054 ++ *
4055 ++ * - uuid - a random UUID, different each time the file is read.
4056 ++ *
4057 ++ * - poolsize - the number of bits of entropy that the input pool can
4058 ++ * hold, tied to the POOL_BITS constant.
4059 ++ *
4060 ++ * - entropy_avail - the number of bits of entropy currently in the
4061 ++ * input pool. Always <= poolsize.
4062 ++ *
4063 ++ * - write_wakeup_threshold - the amount of entropy in the input pool
4064 ++ * below which write polls to /dev/random will unblock, requesting
4065 ++ * more entropy, tied to the POOL_READY_BITS constant. It is writable
4066 ++ * to avoid breaking old userspaces, but writing to it does not
4067 ++ * change any behavior of the RNG.
4068 ++ *
4069 ++ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
4070 ++ * It is writable to avoid breaking old userspaces, but writing
4071 ++ * to it does not change any behavior of the RNG.
4072 + *
4073 + ********************************************************************/
4074 +
4075 +@@ -2043,25 +1442,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
4076 +
4077 + #include <linux/sysctl.h>
4078 +
4079 +-static int min_write_thresh;
4080 +-static int max_write_thresh = INPUT_POOL_WORDS * 32;
4081 +-static int random_min_urandom_seed = 60;
4082 +-static char sysctl_bootid[16];
4083 ++static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
4084 ++static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
4085 ++static int sysctl_poolsize = POOL_BITS;
4086 ++static u8 sysctl_bootid[UUID_SIZE];
4087 +
4088 + /*
4089 + * This function is used to return both the bootid UUID, and random
4090 +- * UUID. The difference is in whether table->data is NULL; if it is,
4091 ++ * UUID. The difference is in whether table->data is NULL; if it is,
4092 + * then a new UUID is generated and returned to the user.
4093 +- *
4094 +- * If the user accesses this via the proc interface, the UUID will be
4095 +- * returned as an ASCII string in the standard UUID format; if via the
4096 +- * sysctl system call, as 16 bytes of binary data.
4097 + */
4098 +-static int proc_do_uuid(struct ctl_table *table, int write,
4099 +- void *buffer, size_t *lenp, loff_t *ppos)
4100 +-{
4101 +- struct ctl_table fake_table;
4102 +- unsigned char buf[64], tmp_uuid[16], *uuid;
4103 ++static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
4104 ++ size_t *lenp, loff_t *ppos)
4105 ++{
4106 ++ u8 tmp_uuid[UUID_SIZE], *uuid;
4107 ++ char uuid_string[UUID_STRING_LEN + 1];
4108 ++ struct ctl_table fake_table = {
4109 ++ .data = uuid_string,
4110 ++ .maxlen = UUID_STRING_LEN
4111 ++ };
4112 ++
4113 ++ if (write)
4114 ++ return -EPERM;
4115 +
4116 + uuid = table->data;
4117 + if (!uuid) {
4118 +@@ -2076,32 +1478,17 @@ static int proc_do_uuid(struct ctl_table *table, int write,
4119 + spin_unlock(&bootid_spinlock);
4120 + }
4121 +
4122 +- sprintf(buf, "%pU", uuid);
4123 +-
4124 +- fake_table.data = buf;
4125 +- fake_table.maxlen = sizeof(buf);
4126 +-
4127 +- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
4128 ++ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
4129 ++ return proc_dostring(&fake_table, 0, buf, lenp, ppos);
4130 + }
4131 +
4132 +-/*
4133 +- * Return entropy available scaled to integral bits
4134 +- */
4135 +-static int proc_do_entropy(struct ctl_table *table, int write,
4136 +- void *buffer, size_t *lenp, loff_t *ppos)
4137 ++/* The same as proc_dointvec, but writes don't change anything. */
4138 ++static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
4139 ++ size_t *lenp, loff_t *ppos)
4140 + {
4141 +- struct ctl_table fake_table;
4142 +- int entropy_count;
4143 +-
4144 +- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
4145 +-
4146 +- fake_table.data = &entropy_count;
4147 +- fake_table.maxlen = sizeof(entropy_count);
4148 +-
4149 +- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
4150 ++ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
4151 + }
4152 +
4153 +-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
4154 + extern struct ctl_table random_table[];
4155 + struct ctl_table random_table[] = {
4156 + {
4157 +@@ -2113,222 +1500,36 @@ struct ctl_table random_table[] = {
4158 + },
4159 + {
4160 + .procname = "entropy_avail",
4161 ++ .data = &input_pool.init_bits,
4162 + .maxlen = sizeof(int),
4163 + .mode = 0444,
4164 +- .proc_handler = proc_do_entropy,
4165 +- .data = &input_pool.entropy_count,
4166 ++ .proc_handler = proc_dointvec,
4167 + },
4168 + {
4169 + .procname = "write_wakeup_threshold",
4170 +- .data = &random_write_wakeup_bits,
4171 ++ .data = &sysctl_random_write_wakeup_bits,
4172 + .maxlen = sizeof(int),
4173 + .mode = 0644,
4174 +- .proc_handler = proc_dointvec_minmax,
4175 +- .extra1 = &min_write_thresh,
4176 +- .extra2 = &max_write_thresh,
4177 ++ .proc_handler = proc_do_rointvec,
4178 + },
4179 + {
4180 + .procname = "urandom_min_reseed_secs",
4181 +- .data = &random_min_urandom_seed,
4182 ++ .data = &sysctl_random_min_urandom_seed,
4183 + .maxlen = sizeof(int),
4184 + .mode = 0644,
4185 +- .proc_handler = proc_dointvec,
4186 ++ .proc_handler = proc_do_rointvec,
4187 + },
4188 + {
4189 + .procname = "boot_id",
4190 + .data = &sysctl_bootid,
4191 +- .maxlen = 16,
4192 + .mode = 0444,
4193 + .proc_handler = proc_do_uuid,
4194 + },
4195 + {
4196 + .procname = "uuid",
4197 +- .maxlen = 16,
4198 + .mode = 0444,
4199 + .proc_handler = proc_do_uuid,
4200 + },
4201 +-#ifdef ADD_INTERRUPT_BENCH
4202 +- {
4203 +- .procname = "add_interrupt_avg_cycles",
4204 +- .data = &avg_cycles,
4205 +- .maxlen = sizeof(avg_cycles),
4206 +- .mode = 0444,
4207 +- .proc_handler = proc_doulongvec_minmax,
4208 +- },
4209 +- {
4210 +- .procname = "add_interrupt_avg_deviation",
4211 +- .data = &avg_deviation,
4212 +- .maxlen = sizeof(avg_deviation),
4213 +- .mode = 0444,
4214 +- .proc_handler = proc_doulongvec_minmax,
4215 +- },
4216 +-#endif
4217 + { }
4218 + };
4219 +-#endif /* CONFIG_SYSCTL */
4220 +-
4221 +-struct batched_entropy {
4222 +- union {
4223 +- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
4224 +- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
4225 +- };
4226 +- unsigned int position;
4227 +- spinlock_t batch_lock;
4228 +-};
4229 +-
4230 +-/*
4231 +- * Get a random word for internal kernel use only. The quality of the random
4232 +- * number is good as /dev/urandom, but there is no backtrack protection, with
4233 +- * the goal of being quite fast and not depleting entropy. In order to ensure
4234 +- * that the randomness provided by this function is okay, the function
4235 +- * wait_for_random_bytes() should be called and return 0 at least once at any
4236 +- * point prior.
4237 +- */
4238 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
4239 +- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
4240 +-};
4241 +-
4242 +-u64 get_random_u64(void)
4243 +-{
4244 +- u64 ret;
4245 +- unsigned long flags;
4246 +- struct batched_entropy *batch;
4247 +- static void *previous;
4248 +-
4249 +- warn_unseeded_randomness(&previous);
4250 +-
4251 +- batch = raw_cpu_ptr(&batched_entropy_u64);
4252 +- spin_lock_irqsave(&batch->batch_lock, flags);
4253 +- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
4254 +- extract_crng((u8 *)batch->entropy_u64);
4255 +- batch->position = 0;
4256 +- }
4257 +- ret = batch->entropy_u64[batch->position++];
4258 +- spin_unlock_irqrestore(&batch->batch_lock, flags);
4259 +- return ret;
4260 +-}
4261 +-EXPORT_SYMBOL(get_random_u64);
4262 +-
4263 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
4264 +- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
4265 +-};
4266 +-u32 get_random_u32(void)
4267 +-{
4268 +- u32 ret;
4269 +- unsigned long flags;
4270 +- struct batched_entropy *batch;
4271 +- static void *previous;
4272 +-
4273 +- warn_unseeded_randomness(&previous);
4274 +-
4275 +- batch = raw_cpu_ptr(&batched_entropy_u32);
4276 +- spin_lock_irqsave(&batch->batch_lock, flags);
4277 +- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
4278 +- extract_crng((u8 *)batch->entropy_u32);
4279 +- batch->position = 0;
4280 +- }
4281 +- ret = batch->entropy_u32[batch->position++];
4282 +- spin_unlock_irqrestore(&batch->batch_lock, flags);
4283 +- return ret;
4284 +-}
4285 +-EXPORT_SYMBOL(get_random_u32);
4286 +-
4287 +-/* It's important to invalidate all potential batched entropy that might
4288 +- * be stored before the crng is initialized, which we can do lazily by
4289 +- * simply resetting the counter to zero so that it's re-extracted on the
4290 +- * next usage. */
4291 +-static void invalidate_batched_entropy(void)
4292 +-{
4293 +- int cpu;
4294 +- unsigned long flags;
4295 +-
4296 +- for_each_possible_cpu (cpu) {
4297 +- struct batched_entropy *batched_entropy;
4298 +-
4299 +- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
4300 +- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
4301 +- batched_entropy->position = 0;
4302 +- spin_unlock(&batched_entropy->batch_lock);
4303 +-
4304 +- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
4305 +- spin_lock(&batched_entropy->batch_lock);
4306 +- batched_entropy->position = 0;
4307 +- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
4308 +- }
4309 +-}
4310 +-
4311 +-/**
4312 +- * randomize_page - Generate a random, page aligned address
4313 +- * @start: The smallest acceptable address the caller will take.
4314 +- * @range: The size of the area, starting at @start, within which the
4315 +- * random address must fall.
4316 +- *
4317 +- * If @start + @range would overflow, @range is capped.
4318 +- *
4319 +- * NOTE: Historical use of randomize_range, which this replaces, presumed that
4320 +- * @start was already page aligned. We now align it regardless.
4321 +- *
4322 +- * Return: A page aligned address within [start, start + range). On error,
4323 +- * @start is returned.
4324 +- */
4325 +-unsigned long
4326 +-randomize_page(unsigned long start, unsigned long range)
4327 +-{
4328 +- if (!PAGE_ALIGNED(start)) {
4329 +- range -= PAGE_ALIGN(start) - start;
4330 +- start = PAGE_ALIGN(start);
4331 +- }
4332 +-
4333 +- if (start > ULONG_MAX - range)
4334 +- range = ULONG_MAX - start;
4335 +-
4336 +- range >>= PAGE_SHIFT;
4337 +-
4338 +- if (range == 0)
4339 +- return start;
4340 +-
4341 +- return start + (get_random_long() % range << PAGE_SHIFT);
4342 +-}
4343 +-
4344 +-/* Interface for in-kernel drivers of true hardware RNGs.
4345 +- * Those devices may produce endless random bits and will be throttled
4346 +- * when our pool is full.
4347 +- */
4348 +-void add_hwgenerator_randomness(const char *buffer, size_t count,
4349 +- size_t entropy)
4350 +-{
4351 +- struct entropy_store *poolp = &input_pool;
4352 +-
4353 +- if (unlikely(crng_init == 0)) {
4354 +- size_t ret = crng_fast_load(buffer, count);
4355 +- count -= ret;
4356 +- buffer += ret;
4357 +- if (!count || crng_init == 0)
4358 +- return;
4359 +- }
4360 +-
4361 +- /* Suspend writing if we're above the trickle threshold.
4362 +- * We'll be woken up again once below random_write_wakeup_thresh,
4363 +- * or when the calling thread is about to terminate.
4364 +- */
4365 +- wait_event_interruptible(random_write_wait,
4366 +- !system_wq || kthread_should_stop() ||
4367 +- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
4368 +- mix_pool_bytes(poolp, buffer, count);
4369 +- credit_entropy_bits(poolp, entropy);
4370 +-}
4371 +-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
4372 +-
4373 +-/* Handle random seed passed by bootloader.
4374 +- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
4375 +- * it would be regarded as device data.
4376 +- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
4377 +- */
4378 +-void add_bootloader_randomness(const void *buf, unsigned int size)
4379 +-{
4380 +- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
4381 +- add_hwgenerator_randomness(buf, size, size * 8);
4382 +- else
4383 +- add_device_randomness(buf, size);
4384 +-}
4385 +-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
4386 ++#endif /* CONFIG_SYSCTL */
4387 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
4388 +index 840fd075c56f1..6284db50ec9bf 100644
4389 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
4390 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
4391 +@@ -226,6 +226,17 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
4392 + dev_dbg(dev, "sid 0x%x status 0x%x\n",
4393 + cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
4394 + }
4395 ++ if (privdata->mp2_ops->discovery_status &&
4396 ++ privdata->mp2_ops->discovery_status(privdata) == 0) {
4397 ++ amd_sfh_hid_client_deinit(privdata);
4398 ++ for (i = 0; i < cl_data->num_hid_devices; i++) {
4399 ++ devm_kfree(dev, cl_data->feature_report[i]);
4400 ++ devm_kfree(dev, in_data->input_report[i]);
4401 ++ devm_kfree(dev, cl_data->report_descr[i]);
4402 ++ }
4403 ++ dev_warn(dev, "Failed to discover, sensors not enabled\n");
4404 ++ return -EOPNOTSUPP;
4405 ++ }
4406 + schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
4407 + return 0;
4408 +
4409 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
4410 +index 561bb27f42b10..ae8f1f2536e94 100644
4411 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
4412 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
4413 +@@ -126,6 +126,12 @@ static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
4414 + return 0;
4415 + }
4416 +
4417 ++static int amd_sfh_dis_sts_v2(struct amd_mp2_dev *privdata)
4418 ++{
4419 ++ return (readl(privdata->mmio + AMD_P2C_MSG(1)) &
4420 ++ SENSOR_DISCOVERY_STATUS_MASK) >> SENSOR_DISCOVERY_STATUS_SHIFT;
4421 ++}
4422 ++
4423 + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
4424 + {
4425 + union sfh_cmd_param cmd_param;
4426 +@@ -241,6 +247,7 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
4427 + .response = amd_sfh_wait_response_v2,
4428 + .clear_intr = amd_sfh_clear_intr_v2,
4429 + .init_intr = amd_sfh_irq_init_v2,
4430 ++ .discovery_status = amd_sfh_dis_sts_v2,
4431 + };
4432 +
4433 + static const struct amd_mp2_ops amd_sfh_ops = {
4434 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
4435 +index 00fc083dc1239..2d3203d3daeb3 100644
4436 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
4437 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
4438 +@@ -38,6 +38,9 @@
4439 +
4440 + #define AMD_SFH_IDLE_LOOP 200
4441 +
4442 ++#define SENSOR_DISCOVERY_STATUS_MASK GENMASK(5, 3)
4443 ++#define SENSOR_DISCOVERY_STATUS_SHIFT 3
4444 ++
4445 + /* SFH Command register */
4446 + union sfh_cmd_base {
4447 + u32 ul;
4448 +@@ -142,5 +145,6 @@ struct amd_mp2_ops {
4449 + int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
4450 + void (*clear_intr)(struct amd_mp2_dev *privdata);
4451 + int (*init_intr)(struct amd_mp2_dev *privdata);
4452 ++ int (*discovery_status)(struct amd_mp2_dev *privdata);
4453 + };
4454 + #endif
4455 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
4456 +index aea8125a4db8e..50d9113f54025 100644
4457 +--- a/drivers/hv/vmbus_drv.c
4458 ++++ b/drivers/hv/vmbus_drv.c
4459 +@@ -1381,7 +1381,7 @@ static void vmbus_isr(void)
4460 + tasklet_schedule(&hv_cpu->msg_dpc);
4461 + }
4462 +
4463 +- add_interrupt_randomness(vmbus_interrupt, 0);
4464 ++ add_interrupt_randomness(vmbus_interrupt);
4465 + }
4466 +
4467 + static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
4468 +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
4469 +index dd335ae1122b0..44a7d36446c52 100644
4470 +--- a/drivers/net/Kconfig
4471 ++++ b/drivers/net/Kconfig
4472 +@@ -81,7 +81,6 @@ config WIREGUARD
4473 + select CRYPTO
4474 + select CRYPTO_LIB_CURVE25519
4475 + select CRYPTO_LIB_CHACHA20POLY1305
4476 +- select CRYPTO_LIB_BLAKE2S
4477 + select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
4478 + select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
4479 + select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
4480 +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
4481 +index 2b1873061912d..5581747947e57 100644
4482 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
4483 ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
4484 +@@ -378,7 +378,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
4485 +
4486 + do {
4487 + *xdp = xsk_buff_alloc(rx_ring->xsk_pool);
4488 +- if (!xdp) {
4489 ++ if (!*xdp) {
4490 + ok = false;
4491 + break;
4492 + }
4493 +diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
4494 +index c0cfd9b36c0b5..720952b92e784 100644
4495 +--- a/drivers/net/wireguard/noise.c
4496 ++++ b/drivers/net/wireguard/noise.c
4497 +@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
4498 + static_identity->static_public, private_key);
4499 + }
4500 +
4501 ++static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
4502 ++{
4503 ++ struct blake2s_state state;
4504 ++ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
4505 ++ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
4506 ++ int i;
4507 ++
4508 ++ if (keylen > BLAKE2S_BLOCK_SIZE) {
4509 ++ blake2s_init(&state, BLAKE2S_HASH_SIZE);
4510 ++ blake2s_update(&state, key, keylen);
4511 ++ blake2s_final(&state, x_key);
4512 ++ } else
4513 ++ memcpy(x_key, key, keylen);
4514 ++
4515 ++ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
4516 ++ x_key[i] ^= 0x36;
4517 ++
4518 ++ blake2s_init(&state, BLAKE2S_HASH_SIZE);
4519 ++ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
4520 ++ blake2s_update(&state, in, inlen);
4521 ++ blake2s_final(&state, i_hash);
4522 ++
4523 ++ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
4524 ++ x_key[i] ^= 0x5c ^ 0x36;
4525 ++
4526 ++ blake2s_init(&state, BLAKE2S_HASH_SIZE);
4527 ++ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
4528 ++ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
4529 ++ blake2s_final(&state, i_hash);
4530 ++
4531 ++ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
4532 ++ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
4533 ++ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
4534 ++}
4535 ++
4536 + /* This is Hugo Krawczyk's HKDF:
4537 + * - https://eprint.iacr.org/2010/264.pdf
4538 + * - https://tools.ietf.org/html/rfc5869
4539 +@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
4540 + ((third_len || third_dst) && (!second_len || !second_dst))));
4541 +
4542 + /* Extract entropy from data into secret */
4543 +- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
4544 ++ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
4545 +
4546 + if (!first_dst || !first_len)
4547 + goto out;
4548 +
4549 + /* Expand first key: key = secret, data = 0x1 */
4550 + output[0] = 1;
4551 +- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
4552 ++ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
4553 + memcpy(first_dst, output, first_len);
4554 +
4555 + if (!second_dst || !second_len)
4556 +@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
4557 +
4558 + /* Expand second key: key = secret, data = first-key || 0x2 */
4559 + output[BLAKE2S_HASH_SIZE] = 2;
4560 +- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
4561 +- BLAKE2S_HASH_SIZE);
4562 ++ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
4563 + memcpy(second_dst, output, second_len);
4564 +
4565 + if (!third_dst || !third_len)
4566 +@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
4567 +
4568 + /* Expand third key: key = secret, data = second-key || 0x3 */
4569 + output[BLAKE2S_HASH_SIZE] = 3;
4570 +- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
4571 +- BLAKE2S_HASH_SIZE);
4572 ++ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
4573 + memcpy(third_dst, output, third_len);
4574 +
4575 + out:
4576 +diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
4577 +index bc3fb59442ce5..4e30e1799e614 100644
4578 +--- a/include/crypto/blake2s.h
4579 ++++ b/include/crypto/blake2s.h
4580 +@@ -101,7 +101,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
4581 + blake2s_final(&state, out);
4582 + }
4583 +
4584 +-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
4585 +- const size_t keylen);
4586 +-
4587 + #endif /* _CRYPTO_BLAKE2S_H */
4588 +diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
4589 +index dabaee6987186..b3ea73b819443 100644
4590 +--- a/include/crypto/chacha.h
4591 ++++ b/include/crypto/chacha.h
4592 +@@ -47,12 +47,19 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
4593 + hchacha_block_generic(state, out, nrounds);
4594 + }
4595 +
4596 ++enum chacha_constants { /* expand 32-byte k */
4597 ++ CHACHA_CONSTANT_EXPA = 0x61707865U,
4598 ++ CHACHA_CONSTANT_ND_3 = 0x3320646eU,
4599 ++ CHACHA_CONSTANT_2_BY = 0x79622d32U,
4600 ++ CHACHA_CONSTANT_TE_K = 0x6b206574U
4601 ++};
4602 ++
4603 + static inline void chacha_init_consts(u32 *state)
4604 + {
4605 +- state[0] = 0x61707865; /* "expa" */
4606 +- state[1] = 0x3320646e; /* "nd 3" */
4607 +- state[2] = 0x79622d32; /* "2-by" */
4608 +- state[3] = 0x6b206574; /* "te k" */
4609 ++ state[0] = CHACHA_CONSTANT_EXPA;
4610 ++ state[1] = CHACHA_CONSTANT_ND_3;
4611 ++ state[2] = CHACHA_CONSTANT_2_BY;
4612 ++ state[3] = CHACHA_CONSTANT_TE_K;
4613 + }
4614 +
4615 + void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
4616 +diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
4617 +index c4165126937e4..88e4d145f7cda 100644
4618 +--- a/include/crypto/drbg.h
4619 ++++ b/include/crypto/drbg.h
4620 +@@ -136,7 +136,7 @@ struct drbg_state {
4621 + const struct drbg_state_ops *d_ops;
4622 + const struct drbg_core *core;
4623 + struct drbg_string test_data;
4624 +- struct random_ready_callback random_ready;
4625 ++ struct notifier_block random_ready;
4626 + };
4627 +
4628 + static inline __u8 drbg_statelen(struct drbg_state *drbg)
4629 +diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
4630 +index 8e50d487500f2..52363eee2b20e 100644
4631 +--- a/include/crypto/internal/blake2s.h
4632 ++++ b/include/crypto/internal/blake2s.h
4633 +@@ -11,11 +11,11 @@
4634 + #include <crypto/internal/hash.h>
4635 + #include <linux/string.h>
4636 +
4637 +-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
4638 ++void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
4639 + size_t nblocks, const u32 inc);
4640 +
4641 +-void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
4642 +- size_t nblocks, const u32 inc);
4643 ++void blake2s_compress(struct blake2s_state *state, const u8 *block,
4644 ++ size_t nblocks, const u32 inc);
4645 +
4646 + bool blake2s_selftest(void);
4647 +
4648 +@@ -24,14 +24,11 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state)
4649 + state->f[0] = -1;
4650 + }
4651 +
4652 +-typedef void (*blake2s_compress_t)(struct blake2s_state *state,
4653 +- const u8 *block, size_t nblocks, u32 inc);
4654 +-
4655 + /* Helper functions for BLAKE2s shared by the library and shash APIs */
4656 +
4657 +-static inline void __blake2s_update(struct blake2s_state *state,
4658 +- const u8 *in, size_t inlen,
4659 +- blake2s_compress_t compress)
4660 ++static __always_inline void
4661 ++__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
4662 ++ bool force_generic)
4663 + {
4664 + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
4665 +
4666 +@@ -39,7 +36,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
4667 + return;
4668 + if (inlen > fill) {
4669 + memcpy(state->buf + state->buflen, in, fill);
4670 +- (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
4671 ++ if (force_generic)
4672 ++ blake2s_compress_generic(state, state->buf, 1,
4673 ++ BLAKE2S_BLOCK_SIZE);
4674 ++ else
4675 ++ blake2s_compress(state, state->buf, 1,
4676 ++ BLAKE2S_BLOCK_SIZE);
4677 + state->buflen = 0;
4678 + in += fill;
4679 + inlen -= fill;
4680 +@@ -47,7 +49,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
4681 + if (inlen > BLAKE2S_BLOCK_SIZE) {
4682 + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
4683 + /* Hash one less (full) block than strictly possible */
4684 +- (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
4685 ++ if (force_generic)
4686 ++ blake2s_compress_generic(state, in, nblocks - 1,
4687 ++ BLAKE2S_BLOCK_SIZE);
4688 ++ else
4689 ++ blake2s_compress(state, in, nblocks - 1,
4690 ++ BLAKE2S_BLOCK_SIZE);
4691 + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
4692 + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
4693 + }
4694 +@@ -55,13 +62,16 @@ static inline void __blake2s_update(struct blake2s_state *state,
4695 + state->buflen += inlen;
4696 + }
4697 +
4698 +-static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
4699 +- blake2s_compress_t compress)
4700 ++static __always_inline void
4701 ++__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
4702 + {
4703 + blake2s_set_lastblock(state);
4704 + memset(state->buf + state->buflen, 0,
4705 + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
4706 +- (*compress)(state, state->buf, 1, state->buflen);
4707 ++ if (force_generic)
4708 ++ blake2s_compress_generic(state, state->buf, 1, state->buflen);
4709 ++ else
4710 ++ blake2s_compress(state, state->buf, 1, state->buflen);
4711 + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
4712 + memcpy(out, state->h, state->outlen);
4713 + }
4714 +@@ -99,20 +109,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc)
4715 +
4716 + static inline int crypto_blake2s_update(struct shash_desc *desc,
4717 + const u8 *in, unsigned int inlen,
4718 +- blake2s_compress_t compress)
4719 ++ bool force_generic)
4720 + {
4721 + struct blake2s_state *state = shash_desc_ctx(desc);
4722 +
4723 +- __blake2s_update(state, in, inlen, compress);
4724 ++ __blake2s_update(state, in, inlen, force_generic);
4725 + return 0;
4726 + }
4727 +
4728 + static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
4729 +- blake2s_compress_t compress)
4730 ++ bool force_generic)
4731 + {
4732 + struct blake2s_state *state = shash_desc_ctx(desc);
4733 +
4734 +- __blake2s_final(state, out, compress);
4735 ++ __blake2s_final(state, out, force_generic);
4736 + return 0;
4737 + }
4738 +
4739 +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
4740 +index 991911048857a..c88ccc48877d6 100644
4741 +--- a/include/linux/cpuhotplug.h
4742 ++++ b/include/linux/cpuhotplug.h
4743 +@@ -99,6 +99,7 @@ enum cpuhp_state {
4744 + CPUHP_LUSTRE_CFS_DEAD,
4745 + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
4746 + CPUHP_PADATA_DEAD,
4747 ++ CPUHP_RANDOM_PREPARE,
4748 + CPUHP_WORKQUEUE_PREP,
4749 + CPUHP_POWER_NUMA_PREPARE,
4750 + CPUHP_HRTIMERS_PREPARE,
4751 +@@ -238,6 +239,7 @@ enum cpuhp_state {
4752 + CPUHP_AP_PERF_CSKY_ONLINE,
4753 + CPUHP_AP_WATCHDOG_ONLINE,
4754 + CPUHP_AP_WORKQUEUE_ONLINE,
4755 ++ CPUHP_AP_RANDOM_ONLINE,
4756 + CPUHP_AP_RCUTREE_ONLINE,
4757 + CPUHP_AP_BASE_CACHEINFO_ONLINE,
4758 + CPUHP_AP_ONLINE_DYN,
4759 +diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
4760 +index 8e6dd908da216..aa1d4da03538b 100644
4761 +--- a/include/linux/hw_random.h
4762 ++++ b/include/linux/hw_random.h
4763 +@@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
4764 + /** Unregister a Hardware Random Number Generator driver. */
4765 + extern void hwrng_unregister(struct hwrng *rng);
4766 + extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
4767 +-/** Feed random bits into the pool. */
4768 +-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
4769 +
4770 + #endif /* LINUX_HWRANDOM_H_ */
4771 +diff --git a/include/linux/mm.h b/include/linux/mm.h
4772 +index 04345ff97f8ca..85205adcdd0d1 100644
4773 +--- a/include/linux/mm.h
4774 ++++ b/include/linux/mm.h
4775 +@@ -2607,6 +2607,7 @@ extern int install_special_mapping(struct mm_struct *mm,
4776 + unsigned long flags, struct page **pages);
4777 +
4778 + unsigned long randomize_stack_top(unsigned long stack_top);
4779 ++unsigned long randomize_page(unsigned long start, unsigned long range);
4780 +
4781 + extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
4782 +
4783 +diff --git a/include/linux/prandom.h b/include/linux/prandom.h
4784 +index 056d31317e499..a4aadd2dc153e 100644
4785 +--- a/include/linux/prandom.h
4786 ++++ b/include/linux/prandom.h
4787 +@@ -10,6 +10,7 @@
4788 +
4789 + #include <linux/types.h>
4790 + #include <linux/percpu.h>
4791 ++#include <linux/siphash.h>
4792 +
4793 + u32 prandom_u32(void);
4794 + void prandom_bytes(void *buf, size_t nbytes);
4795 +@@ -27,15 +28,10 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise);
4796 + * The core SipHash round function. Each line can be executed in
4797 + * parallel given enough CPU resources.
4798 + */
4799 +-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
4800 +- v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
4801 +- v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
4802 +- v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
4803 +- v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
4804 +-)
4805 ++#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3)
4806 +
4807 +-#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
4808 +-#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
4809 ++#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2)
4810 ++#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3)
4811 +
4812 + #elif BITS_PER_LONG == 32
4813 + /*
4814 +@@ -43,14 +39,9 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise);
4815 + * This is weaker, but 32-bit machines are not used for high-traffic
4816 + * applications, so there is less output for an attacker to analyze.
4817 + */
4818 +-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
4819 +- v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
4820 +- v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
4821 +- v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
4822 +- v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
4823 +-)
4824 +-#define PRND_K0 0x6c796765
4825 +-#define PRND_K1 0x74656462
4826 ++#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3)
4827 ++#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2)
4828 ++#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3)
4829 +
4830 + #else
4831 + #error Unsupported BITS_PER_LONG
4832 +diff --git a/include/linux/random.h b/include/linux/random.h
4833 +index f45b8be3e3c4e..917470c4490ac 100644
4834 +--- a/include/linux/random.h
4835 ++++ b/include/linux/random.h
4836 +@@ -1,9 +1,5 @@
4837 + /* SPDX-License-Identifier: GPL-2.0 */
4838 +-/*
4839 +- * include/linux/random.h
4840 +- *
4841 +- * Include file for the random number generator.
4842 +- */
4843 ++
4844 + #ifndef _LINUX_RANDOM_H
4845 + #define _LINUX_RANDOM_H
4846 +
4847 +@@ -14,41 +10,26 @@
4848 +
4849 + #include <uapi/linux/random.h>
4850 +
4851 +-struct random_ready_callback {
4852 +- struct list_head list;
4853 +- void (*func)(struct random_ready_callback *rdy);
4854 +- struct module *owner;
4855 +-};
4856 ++struct notifier_block;
4857 +
4858 +-extern void add_device_randomness(const void *, unsigned int);
4859 +-extern void add_bootloader_randomness(const void *, unsigned int);
4860 ++void add_device_randomness(const void *buf, size_t len);
4861 ++void add_bootloader_randomness(const void *buf, size_t len);
4862 ++void add_input_randomness(unsigned int type, unsigned int code,
4863 ++ unsigned int value) __latent_entropy;
4864 ++void add_interrupt_randomness(int irq) __latent_entropy;
4865 ++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
4866 +
4867 + #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
4868 + static inline void add_latent_entropy(void)
4869 + {
4870 +- add_device_randomness((const void *)&latent_entropy,
4871 +- sizeof(latent_entropy));
4872 ++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
4873 + }
4874 + #else
4875 +-static inline void add_latent_entropy(void) {}
4876 +-#endif
4877 +-
4878 +-extern void add_input_randomness(unsigned int type, unsigned int code,
4879 +- unsigned int value) __latent_entropy;
4880 +-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
4881 +-
4882 +-extern void get_random_bytes(void *buf, int nbytes);
4883 +-extern int wait_for_random_bytes(void);
4884 +-extern int __init rand_initialize(void);
4885 +-extern bool rng_is_initialized(void);
4886 +-extern int add_random_ready_callback(struct random_ready_callback *rdy);
4887 +-extern void del_random_ready_callback(struct random_ready_callback *rdy);
4888 +-extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
4889 +-
4890 +-#ifndef MODULE
4891 +-extern const struct file_operations random_fops, urandom_fops;
4892 ++static inline void add_latent_entropy(void) { }
4893 + #endif
4894 +
4895 ++void get_random_bytes(void *buf, size_t len);
4896 ++size_t __must_check get_random_bytes_arch(void *buf, size_t len);
4897 + u32 get_random_u32(void);
4898 + u64 get_random_u64(void);
4899 + static inline unsigned int get_random_int(void)
4900 +@@ -80,36 +61,38 @@ static inline unsigned long get_random_long(void)
4901 +
4902 + static inline unsigned long get_random_canary(void)
4903 + {
4904 +- unsigned long val = get_random_long();
4905 +-
4906 +- return val & CANARY_MASK;
4907 ++ return get_random_long() & CANARY_MASK;
4908 + }
4909 +
4910 ++int __init random_init(const char *command_line);
4911 ++bool rng_is_initialized(void);
4912 ++int wait_for_random_bytes(void);
4913 ++int register_random_ready_notifier(struct notifier_block *nb);
4914 ++int unregister_random_ready_notifier(struct notifier_block *nb);
4915 ++
4916 + /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
4917 + * Returns the result of the call to wait_for_random_bytes. */
4918 +-static inline int get_random_bytes_wait(void *buf, int nbytes)
4919 ++static inline int get_random_bytes_wait(void *buf, size_t nbytes)
4920 + {
4921 + int ret = wait_for_random_bytes();
4922 + get_random_bytes(buf, nbytes);
4923 + return ret;
4924 + }
4925 +
4926 +-#define declare_get_random_var_wait(var) \
4927 +- static inline int get_random_ ## var ## _wait(var *out) { \
4928 ++#define declare_get_random_var_wait(name, ret_type) \
4929 ++ static inline int get_random_ ## name ## _wait(ret_type *out) { \
4930 + int ret = wait_for_random_bytes(); \
4931 + if (unlikely(ret)) \
4932 + return ret; \
4933 +- *out = get_random_ ## var(); \
4934 ++ *out = get_random_ ## name(); \
4935 + return 0; \
4936 + }
4937 +-declare_get_random_var_wait(u32)
4938 +-declare_get_random_var_wait(u64)
4939 +-declare_get_random_var_wait(int)
4940 +-declare_get_random_var_wait(long)
4941 ++declare_get_random_var_wait(u32, u32)
4942 ++declare_get_random_var_wait(u64, u32)
4943 ++declare_get_random_var_wait(int, unsigned int)
4944 ++declare_get_random_var_wait(long, unsigned long)
4945 + #undef declare_get_random_var
4946 +
4947 +-unsigned long randomize_page(unsigned long start, unsigned long range);
4948 +-
4949 + /*
4950 + * This is designed to be standalone for just prandom
4951 + * users, but for now we include it from <linux/random.h>
4952 +@@ -120,22 +103,10 @@ unsigned long randomize_page(unsigned long start, unsigned long range);
4953 + #ifdef CONFIG_ARCH_RANDOM
4954 + # include <asm/archrandom.h>
4955 + #else
4956 +-static inline bool __must_check arch_get_random_long(unsigned long *v)
4957 +-{
4958 +- return false;
4959 +-}
4960 +-static inline bool __must_check arch_get_random_int(unsigned int *v)
4961 +-{
4962 +- return false;
4963 +-}
4964 +-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
4965 +-{
4966 +- return false;
4967 +-}
4968 +-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
4969 +-{
4970 +- return false;
4971 +-}
4972 ++static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; }
4973 ++static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; }
4974 ++static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; }
4975 ++static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; }
4976 + #endif
4977 +
4978 + /*
4979 +@@ -158,4 +129,13 @@ static inline bool __init arch_get_random_long_early(unsigned long *v)
4980 + }
4981 + #endif
4982 +
4983 ++#ifdef CONFIG_SMP
4984 ++int random_prepare_cpu(unsigned int cpu);
4985 ++int random_online_cpu(unsigned int cpu);
4986 ++#endif
4987 ++
4988 ++#ifndef MODULE
4989 ++extern const struct file_operations random_fops, urandom_fops;
4990 ++#endif
4991 ++
4992 + #endif /* _LINUX_RANDOM_H */
4993 +diff --git a/include/linux/siphash.h b/include/linux/siphash.h
4994 +index 0cda61855d907..0bb5ecd507bef 100644
4995 +--- a/include/linux/siphash.h
4996 ++++ b/include/linux/siphash.h
4997 +@@ -136,4 +136,32 @@ static inline u32 hsiphash(const void *data, size_t len,
4998 + return ___hsiphash_aligned(data, len, key);
4999 + }
5000 +
5001 ++/*
5002 ++ * These macros expose the raw SipHash and HalfSipHash permutations.
5003 ++ * Do not use them directly! If you think you have a use for them,
5004 ++ * be sure to CC the maintainer of this file explaining why.
5005 ++ */
5006 ++
5007 ++#define SIPHASH_PERMUTATION(a, b, c, d) ( \
5008 ++ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
5009 ++ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
5010 ++ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
5011 ++ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
5012 ++
5013 ++#define SIPHASH_CONST_0 0x736f6d6570736575ULL
5014 ++#define SIPHASH_CONST_1 0x646f72616e646f6dULL
5015 ++#define SIPHASH_CONST_2 0x6c7967656e657261ULL
5016 ++#define SIPHASH_CONST_3 0x7465646279746573ULL
5017 ++
5018 ++#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
5019 ++ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
5020 ++ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
5021 ++ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
5022 ++ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
5023 ++
5024 ++#define HSIPHASH_CONST_0 0U
5025 ++#define HSIPHASH_CONST_1 0U
5026 ++#define HSIPHASH_CONST_2 0x6c796765U
5027 ++#define HSIPHASH_CONST_3 0x74656462U
5028 ++
5029 + #endif /* _LINUX_SIPHASH_H */
5030 +diff --git a/include/linux/timex.h b/include/linux/timex.h
5031 +index 059b18eb1f1fa..3871b06bd302c 100644
5032 +--- a/include/linux/timex.h
5033 ++++ b/include/linux/timex.h
5034 +@@ -62,6 +62,8 @@
5035 + #include <linux/types.h>
5036 + #include <linux/param.h>
5037 +
5038 ++unsigned long random_get_entropy_fallback(void);
5039 ++
5040 + #include <asm/timex.h>
5041 +
5042 + #ifndef random_get_entropy
5043 +@@ -74,8 +76,14 @@
5044 + *
5045 + * By default we use get_cycles() for this purpose, but individual
5046 + * architectures may override this in their asm/timex.h header file.
5047 ++ * If a given arch does not have get_cycles(), then we fallback to
5048 ++ * using random_get_entropy_fallback().
5049 + */
5050 +-#define random_get_entropy() get_cycles()
5051 ++#ifdef get_cycles
5052 ++#define random_get_entropy() ((unsigned long)get_cycles())
5053 ++#else
5054 ++#define random_get_entropy() random_get_entropy_fallback()
5055 ++#endif
5056 + #endif
5057 +
5058 + /*
5059 +diff --git a/include/trace/events/random.h b/include/trace/events/random.h
5060 +deleted file mode 100644
5061 +index 3d7b432ca5f31..0000000000000
5062 +--- a/include/trace/events/random.h
5063 ++++ /dev/null
5064 +@@ -1,247 +0,0 @@
5065 +-/* SPDX-License-Identifier: GPL-2.0 */
5066 +-#undef TRACE_SYSTEM
5067 +-#define TRACE_SYSTEM random
5068 +-
5069 +-#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
5070 +-#define _TRACE_RANDOM_H
5071 +-
5072 +-#include <linux/writeback.h>
5073 +-#include <linux/tracepoint.h>
5074 +-
5075 +-TRACE_EVENT(add_device_randomness,
5076 +- TP_PROTO(int bytes, unsigned long IP),
5077 +-
5078 +- TP_ARGS(bytes, IP),
5079 +-
5080 +- TP_STRUCT__entry(
5081 +- __field( int, bytes )
5082 +- __field(unsigned long, IP )
5083 +- ),
5084 +-
5085 +- TP_fast_assign(
5086 +- __entry->bytes = bytes;
5087 +- __entry->IP = IP;
5088 +- ),
5089 +-
5090 +- TP_printk("bytes %d caller %pS",
5091 +- __entry->bytes, (void *)__entry->IP)
5092 +-);
5093 +-
5094 +-DECLARE_EVENT_CLASS(random__mix_pool_bytes,
5095 +- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
5096 +-
5097 +- TP_ARGS(pool_name, bytes, IP),
5098 +-
5099 +- TP_STRUCT__entry(
5100 +- __field( const char *, pool_name )
5101 +- __field( int, bytes )
5102 +- __field(unsigned long, IP )
5103 +- ),
5104 +-
5105 +- TP_fast_assign(
5106 +- __entry->pool_name = pool_name;
5107 +- __entry->bytes = bytes;
5108 +- __entry->IP = IP;
5109 +- ),
5110 +-
5111 +- TP_printk("%s pool: bytes %d caller %pS",
5112 +- __entry->pool_name, __entry->bytes, (void *)__entry->IP)
5113 +-);
5114 +-
5115 +-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
5116 +- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
5117 +-
5118 +- TP_ARGS(pool_name, bytes, IP)
5119 +-);
5120 +-
5121 +-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
5122 +- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
5123 +-
5124 +- TP_ARGS(pool_name, bytes, IP)
5125 +-);
5126 +-
5127 +-TRACE_EVENT(credit_entropy_bits,
5128 +- TP_PROTO(const char *pool_name, int bits, int entropy_count,
5129 +- unsigned long IP),
5130 +-
5131 +- TP_ARGS(pool_name, bits, entropy_count, IP),
5132 +-
5133 +- TP_STRUCT__entry(
5134 +- __field( const char *, pool_name )
5135 +- __field( int, bits )
5136 +- __field( int, entropy_count )
5137 +- __field(unsigned long, IP )
5138 +- ),
5139 +-
5140 +- TP_fast_assign(
5141 +- __entry->pool_name = pool_name;
5142 +- __entry->bits = bits;
5143 +- __entry->entropy_count = entropy_count;
5144 +- __entry->IP = IP;
5145 +- ),
5146 +-
5147 +- TP_printk("%s pool: bits %d entropy_count %d caller %pS",
5148 +- __entry->pool_name, __entry->bits,
5149 +- __entry->entropy_count, (void *)__entry->IP)
5150 +-);
5151 +-
5152 +-TRACE_EVENT(debit_entropy,
5153 +- TP_PROTO(const char *pool_name, int debit_bits),
5154 +-
5155 +- TP_ARGS(pool_name, debit_bits),
5156 +-
5157 +- TP_STRUCT__entry(
5158 +- __field( const char *, pool_name )
5159 +- __field( int, debit_bits )
5160 +- ),
5161 +-
5162 +- TP_fast_assign(
5163 +- __entry->pool_name = pool_name;
5164 +- __entry->debit_bits = debit_bits;
5165 +- ),
5166 +-
5167 +- TP_printk("%s: debit_bits %d", __entry->pool_name,
5168 +- __entry->debit_bits)
5169 +-);
5170 +-
5171 +-TRACE_EVENT(add_input_randomness,
5172 +- TP_PROTO(int input_bits),
5173 +-
5174 +- TP_ARGS(input_bits),
5175 +-
5176 +- TP_STRUCT__entry(
5177 +- __field( int, input_bits )
5178 +- ),
5179 +-
5180 +- TP_fast_assign(
5181 +- __entry->input_bits = input_bits;
5182 +- ),
5183 +-
5184 +- TP_printk("input_pool_bits %d", __entry->input_bits)
5185 +-);
5186 +-
5187 +-TRACE_EVENT(add_disk_randomness,
5188 +- TP_PROTO(dev_t dev, int input_bits),
5189 +-
5190 +- TP_ARGS(dev, input_bits),
5191 +-
5192 +- TP_STRUCT__entry(
5193 +- __field( dev_t, dev )
5194 +- __field( int, input_bits )
5195 +- ),
5196 +-
5197 +- TP_fast_assign(
5198 +- __entry->dev = dev;
5199 +- __entry->input_bits = input_bits;
5200 +- ),
5201 +-
5202 +- TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
5203 +- MINOR(__entry->dev), __entry->input_bits)
5204 +-);
5205 +-
5206 +-DECLARE_EVENT_CLASS(random__get_random_bytes,
5207 +- TP_PROTO(int nbytes, unsigned long IP),
5208 +-
5209 +- TP_ARGS(nbytes, IP),
5210 +-
5211 +- TP_STRUCT__entry(
5212 +- __field( int, nbytes )
5213 +- __field(unsigned long, IP )
5214 +- ),
5215 +-
5216 +- TP_fast_assign(
5217 +- __entry->nbytes = nbytes;
5218 +- __entry->IP = IP;
5219 +- ),
5220 +-
5221 +- TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
5222 +-);
5223 +-
5224 +-DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
5225 +- TP_PROTO(int nbytes, unsigned long IP),
5226 +-
5227 +- TP_ARGS(nbytes, IP)
5228 +-);
5229 +-
5230 +-DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
5231 +- TP_PROTO(int nbytes, unsigned long IP),
5232 +-
5233 +- TP_ARGS(nbytes, IP)
5234 +-);
5235 +-
5236 +-DECLARE_EVENT_CLASS(random__extract_entropy,
5237 +- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
5238 +- unsigned long IP),
5239 +-
5240 +- TP_ARGS(pool_name, nbytes, entropy_count, IP),
5241 +-
5242 +- TP_STRUCT__entry(
5243 +- __field( const char *, pool_name )
5244 +- __field( int, nbytes )
5245 +- __field( int, entropy_count )
5246 +- __field(unsigned long, IP )
5247 +- ),
5248 +-
5249 +- TP_fast_assign(
5250 +- __entry->pool_name = pool_name;
5251 +- __entry->nbytes = nbytes;
5252 +- __entry->entropy_count = entropy_count;
5253 +- __entry->IP = IP;
5254 +- ),
5255 +-
5256 +- TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
5257 +- __entry->pool_name, __entry->nbytes, __entry->entropy_count,
5258 +- (void *)__entry->IP)
5259 +-);
5260 +-
5261 +-
5262 +-DEFINE_EVENT(random__extract_entropy, extract_entropy,
5263 +- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
5264 +- unsigned long IP),
5265 +-
5266 +- TP_ARGS(pool_name, nbytes, entropy_count, IP)
5267 +-);
5268 +-
5269 +-TRACE_EVENT(urandom_read,
5270 +- TP_PROTO(int got_bits, int pool_left, int input_left),
5271 +-
5272 +- TP_ARGS(got_bits, pool_left, input_left),
5273 +-
5274 +- TP_STRUCT__entry(
5275 +- __field( int, got_bits )
5276 +- __field( int, pool_left )
5277 +- __field( int, input_left )
5278 +- ),
5279 +-
5280 +- TP_fast_assign(
5281 +- __entry->got_bits = got_bits;
5282 +- __entry->pool_left = pool_left;
5283 +- __entry->input_left = input_left;
5284 +- ),
5285 +-
5286 +- TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
5287 +- "input_entropy_left %d", __entry->got_bits,
5288 +- __entry->pool_left, __entry->input_left)
5289 +-);
5290 +-
5291 +-TRACE_EVENT(prandom_u32,
5292 +-
5293 +- TP_PROTO(unsigned int ret),
5294 +-
5295 +- TP_ARGS(ret),
5296 +-
5297 +- TP_STRUCT__entry(
5298 +- __field( unsigned int, ret)
5299 +- ),
5300 +-
5301 +- TP_fast_assign(
5302 +- __entry->ret = ret;
5303 +- ),
5304 +-
5305 +- TP_printk("ret=%u" , __entry->ret)
5306 +-);
5307 +-
5308 +-#endif /* _TRACE_RANDOM_H */
5309 +-
5310 +-/* This part must be outside protection */
5311 +-#include <trace/define_trace.h>
5312 +diff --git a/init/main.c b/init/main.c
5313 +index 06b98350ebd24..cf79b5a766cb1 100644
5314 +--- a/init/main.c
5315 ++++ b/init/main.c
5316 +@@ -1041,21 +1041,18 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
5317 + softirq_init();
5318 + timekeeping_init();
5319 + kfence_init();
5320 ++ time_init();
5321 +
5322 + /*
5323 + * For best initial stack canary entropy, prepare it after:
5324 + * - setup_arch() for any UEFI RNG entropy and boot cmdline access
5325 +- * - timekeeping_init() for ktime entropy used in rand_initialize()
5326 +- * - rand_initialize() to get any arch-specific entropy like RDRAND
5327 +- * - add_latent_entropy() to get any latent entropy
5328 +- * - adding command line entropy
5329 ++ * - timekeeping_init() for ktime entropy used in random_init()
5330 ++ * - time_init() for making random_get_entropy() work on some platforms
5331 ++ * - random_init() to initialize the RNG from from early entropy sources
5332 + */
5333 +- rand_initialize();
5334 +- add_latent_entropy();
5335 +- add_device_randomness(command_line, strlen(command_line));
5336 ++ random_init(command_line);
5337 + boot_init_stack_canary();
5338 +
5339 +- time_init();
5340 + perf_event_init();
5341 + profile_init();
5342 + call_function_init();
5343 +diff --git a/kernel/cpu.c b/kernel/cpu.c
5344 +index 5601216eb51bd..da871eb075662 100644
5345 +--- a/kernel/cpu.c
5346 ++++ b/kernel/cpu.c
5347 +@@ -34,6 +34,7 @@
5348 + #include <linux/scs.h>
5349 + #include <linux/percpu-rwsem.h>
5350 + #include <linux/cpuset.h>
5351 ++#include <linux/random.h>
5352 +
5353 + #include <trace/events/power.h>
5354 + #define CREATE_TRACE_POINTS
5355 +@@ -1659,6 +1660,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
5356 + .startup.single = perf_event_init_cpu,
5357 + .teardown.single = perf_event_exit_cpu,
5358 + },
5359 ++ [CPUHP_RANDOM_PREPARE] = {
5360 ++ .name = "random:prepare",
5361 ++ .startup.single = random_prepare_cpu,
5362 ++ .teardown.single = NULL,
5363 ++ },
5364 + [CPUHP_WORKQUEUE_PREP] = {
5365 + .name = "workqueue:prepare",
5366 + .startup.single = workqueue_prepare_cpu,
5367 +@@ -1782,6 +1788,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
5368 + .startup.single = workqueue_online_cpu,
5369 + .teardown.single = workqueue_offline_cpu,
5370 + },
5371 ++ [CPUHP_AP_RANDOM_ONLINE] = {
5372 ++ .name = "random:online",
5373 ++ .startup.single = random_online_cpu,
5374 ++ .teardown.single = NULL,
5375 ++ },
5376 + [CPUHP_AP_RCUTREE_ONLINE] = {
5377 + .name = "RCU/tree:online",
5378 + .startup.single = rcutree_online_cpu,
5379 +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
5380 +index 221d80c31e94c..fca637d4da1a7 100644
5381 +--- a/kernel/irq/handle.c
5382 ++++ b/kernel/irq/handle.c
5383 +@@ -195,7 +195,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
5384 +
5385 + retval = __handle_irq_event_percpu(desc, &flags);
5386 +
5387 +- add_interrupt_randomness(desc->irq_data.irq, flags);
5388 ++ add_interrupt_randomness(desc->irq_data.irq);
5389 +
5390 + if (!irq_settings_no_debug(desc))
5391 + note_interrupt(desc, retval);
5392 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
5393 +index 3b1398fbddaf8..871c912860ed5 100644
5394 +--- a/kernel/time/timekeeping.c
5395 ++++ b/kernel/time/timekeeping.c
5396 +@@ -17,6 +17,7 @@
5397 + #include <linux/clocksource.h>
5398 + #include <linux/jiffies.h>
5399 + #include <linux/time.h>
5400 ++#include <linux/timex.h>
5401 + #include <linux/tick.h>
5402 + #include <linux/stop_machine.h>
5403 + #include <linux/pvclock_gtod.h>
5404 +@@ -2380,6 +2381,20 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc)
5405 + return 0;
5406 + }
5407 +
5408 ++/**
5409 ++ * random_get_entropy_fallback - Returns the raw clock source value,
5410 ++ * used by random.c for platforms with no valid random_get_entropy().
5411 ++ */
5412 ++unsigned long random_get_entropy_fallback(void)
5413 ++{
5414 ++ struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
5415 ++ struct clocksource *clock = READ_ONCE(tkr->clock);
5416 ++
5417 ++ if (unlikely(timekeeping_suspended || !clock))
5418 ++ return 0;
5419 ++ return clock->read(clock);
5420 ++}
5421 ++EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
5422 +
5423 + /**
5424 + * do_adjtimex() - Accessor function to NTP __do_adjtimex function
5425 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
5426 +index 55e89b237b6f8..7fd3fa05379e2 100644
5427 +--- a/lib/Kconfig.debug
5428 ++++ b/lib/Kconfig.debug
5429 +@@ -1559,8 +1559,7 @@ config WARN_ALL_UNSEEDED_RANDOM
5430 + so architecture maintainers really need to do what they can
5431 + to get the CRNG seeded sooner after the system is booted.
5432 + However, since users cannot do anything actionable to
5433 +- address this, by default the kernel will issue only a single
5434 +- warning for the first use of unseeded randomness.
5435 ++ address this, by default this option is disabled.
5436 +
5437 + Say Y here if you want to receive warnings for all uses of
5438 + unseeded randomness. This will be of use primarily for
5439 +diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
5440 +index 545ccbddf6a1d..8620f38e117c0 100644
5441 +--- a/lib/crypto/Kconfig
5442 ++++ b/lib/crypto/Kconfig
5443 +@@ -1,7 +1,5 @@
5444 + # SPDX-License-Identifier: GPL-2.0
5445 +
5446 +-comment "Crypto library routines"
5447 +-
5448 + config CRYPTO_LIB_AES
5449 + tristate
5450 +
5451 +@@ -9,14 +7,14 @@ config CRYPTO_LIB_ARC4
5452 + tristate
5453 +
5454 + config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
5455 +- tristate
5456 ++ bool
5457 + help
5458 + Declares whether the architecture provides an arch-specific
5459 + accelerated implementation of the Blake2s library interface,
5460 + either builtin or as a module.
5461 +
5462 + config CRYPTO_LIB_BLAKE2S_GENERIC
5463 +- tristate
5464 ++ def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
5465 + help
5466 + This symbol can be depended upon by arch implementations of the
5467 + Blake2s library interface that require the generic code as a
5468 +@@ -24,15 +22,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC
5469 + implementation is enabled, this implementation serves the users
5470 + of CRYPTO_LIB_BLAKE2S.
5471 +
5472 +-config CRYPTO_LIB_BLAKE2S
5473 +- tristate "BLAKE2s hash function library"
5474 +- depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
5475 +- select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n
5476 +- help
5477 +- Enable the Blake2s library interface. This interface may be fulfilled
5478 +- by either the generic implementation or an arch-specific one, if one
5479 +- is available and enabled.
5480 +-
5481 + config CRYPTO_ARCH_HAVE_LIB_CHACHA
5482 + tristate
5483 + help
5484 +@@ -51,7 +40,7 @@ config CRYPTO_LIB_CHACHA_GENERIC
5485 + of CRYPTO_LIB_CHACHA.
5486 +
5487 + config CRYPTO_LIB_CHACHA
5488 +- tristate "ChaCha library interface"
5489 ++ tristate
5490 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
5491 + select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
5492 + help
5493 +@@ -76,7 +65,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
5494 + of CRYPTO_LIB_CURVE25519.
5495 +
5496 + config CRYPTO_LIB_CURVE25519
5497 +- tristate "Curve25519 scalar multiplication library"
5498 ++ tristate
5499 + depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
5500 + select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
5501 + help
5502 +@@ -111,7 +100,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
5503 + of CRYPTO_LIB_POLY1305.
5504 +
5505 + config CRYPTO_LIB_POLY1305
5506 +- tristate "Poly1305 library interface"
5507 ++ tristate
5508 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
5509 + select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
5510 + help
5511 +@@ -120,7 +109,7 @@ config CRYPTO_LIB_POLY1305
5512 + is available and enabled.
5513 +
5514 + config CRYPTO_LIB_CHACHA20POLY1305
5515 +- tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
5516 ++ tristate
5517 + depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
5518 + depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
5519 + select CRYPTO_LIB_CHACHA
5520 +diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
5521 +index 73205ed269bad..ed43a41f2dcc8 100644
5522 +--- a/lib/crypto/Makefile
5523 ++++ b/lib/crypto/Makefile
5524 +@@ -10,11 +10,10 @@ libaes-y := aes.o
5525 + obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
5526 + libarc4-y := arc4.o
5527 +
5528 +-obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o
5529 +-libblake2s-generic-y += blake2s-generic.o
5530 +-
5531 +-obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o
5532 +-libblake2s-y += blake2s.o
5533 ++# blake2s is used by the /dev/random driver which is always builtin
5534 ++obj-y += libblake2s.o
5535 ++libblake2s-y := blake2s.o
5536 ++libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
5537 +
5538 + obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
5539 + libchacha20poly1305-y += chacha20poly1305.o
5540 +diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c
5541 +index 04ff8df245136..75ccb3e633e65 100644
5542 +--- a/lib/crypto/blake2s-generic.c
5543 ++++ b/lib/crypto/blake2s-generic.c
5544 +@@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state,
5545 + state->t[1] += (state->t[0] < inc);
5546 + }
5547 +
5548 +-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
5549 ++void blake2s_compress(struct blake2s_state *state, const u8 *block,
5550 ++ size_t nblocks, const u32 inc)
5551 ++ __weak __alias(blake2s_compress_generic);
5552 ++
5553 ++void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
5554 + size_t nblocks, const u32 inc)
5555 + {
5556 + u32 m[16];
5557 +diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
5558 +index 5d9ea53be9736..409e4b7287704 100644
5559 +--- a/lib/crypto/blake2s-selftest.c
5560 ++++ b/lib/crypto/blake2s-selftest.c
5561 +@@ -15,7 +15,6 @@
5562 + * #include <stdio.h>
5563 + *
5564 + * #include <openssl/evp.h>
5565 +- * #include <openssl/hmac.h>
5566 + *
5567 + * #define BLAKE2S_TESTVEC_COUNT 256
5568 + *
5569 +@@ -58,16 +57,6 @@
5570 + * }
5571 + * printf("};\n\n");
5572 + *
5573 +- * printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
5574 +- *
5575 +- * HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL);
5576 +- * print_vec(hash, BLAKE2S_OUTBYTES);
5577 +- *
5578 +- * HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL);
5579 +- * print_vec(hash, BLAKE2S_OUTBYTES);
5580 +- *
5581 +- * printf("};\n");
5582 +- *
5583 + * return 0;
5584 + *}
5585 + */
5586 +@@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
5587 + 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
5588 + };
5589 +
5590 +-static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
5591 +- { 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70,
5592 +- 0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79,
5593 +- 0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, },
5594 +- { 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9,
5595 +- 0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f,
5596 +- 0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, },
5597 +-};
5598 +-
5599 + bool __init blake2s_selftest(void)
5600 + {
5601 + u8 key[BLAKE2S_KEY_SIZE];
5602 +@@ -607,16 +587,5 @@ bool __init blake2s_selftest(void)
5603 + }
5604 + }
5605 +
5606 +- if (success) {
5607 +- blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key));
5608 +- success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE);
5609 +-
5610 +- blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf));
5611 +- success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE);
5612 +-
5613 +- if (!success)
5614 +- pr_err("blake2s256_hmac self-test: FAIL\n");
5615 +- }
5616 +-
5617 + return success;
5618 + }
5619 +diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
5620 +index 4055aa593ec49..c71c09621c09c 100644
5621 +--- a/lib/crypto/blake2s.c
5622 ++++ b/lib/crypto/blake2s.c
5623 +@@ -16,63 +16,20 @@
5624 + #include <linux/init.h>
5625 + #include <linux/bug.h>
5626 +
5627 +-#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
5628 +-# define blake2s_compress blake2s_compress_arch
5629 +-#else
5630 +-# define blake2s_compress blake2s_compress_generic
5631 +-#endif
5632 +-
5633 + void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
5634 + {
5635 +- __blake2s_update(state, in, inlen, blake2s_compress);
5636 ++ __blake2s_update(state, in, inlen, false);
5637 + }
5638 + EXPORT_SYMBOL(blake2s_update);
5639 +
5640 + void blake2s_final(struct blake2s_state *state, u8 *out)
5641 + {
5642 + WARN_ON(IS_ENABLED(DEBUG) && !out);
5643 +- __blake2s_final(state, out, blake2s_compress);
5644 ++ __blake2s_final(state, out, false);
5645 + memzero_explicit(state, sizeof(*state));
5646 + }
5647 + EXPORT_SYMBOL(blake2s_final);
5648 +
5649 +-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
5650 +- const size_t keylen)
5651 +-{
5652 +- struct blake2s_state state;
5653 +- u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
5654 +- u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
5655 +- int i;
5656 +-
5657 +- if (keylen > BLAKE2S_BLOCK_SIZE) {
5658 +- blake2s_init(&state, BLAKE2S_HASH_SIZE);
5659 +- blake2s_update(&state, key, keylen);
5660 +- blake2s_final(&state, x_key);
5661 +- } else
5662 +- memcpy(x_key, key, keylen);
5663 +-
5664 +- for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
5665 +- x_key[i] ^= 0x36;
5666 +-
5667 +- blake2s_init(&state, BLAKE2S_HASH_SIZE);
5668 +- blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
5669 +- blake2s_update(&state, in, inlen);
5670 +- blake2s_final(&state, i_hash);
5671 +-
5672 +- for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
5673 +- x_key[i] ^= 0x5c ^ 0x36;
5674 +-
5675 +- blake2s_init(&state, BLAKE2S_HASH_SIZE);
5676 +- blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
5677 +- blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
5678 +- blake2s_final(&state, i_hash);
5679 +-
5680 +- memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
5681 +- memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
5682 +- memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
5683 +-}
5684 +-EXPORT_SYMBOL(blake2s256_hmac);
5685 +-
5686 + static int __init blake2s_mod_init(void)
5687 + {
5688 + if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
5689 +diff --git a/lib/random32.c b/lib/random32.c
5690 +index 4d0e05e471d72..f0ab17c2244be 100644
5691 +--- a/lib/random32.c
5692 ++++ b/lib/random32.c
5693 +@@ -39,8 +39,9 @@
5694 + #include <linux/random.h>
5695 + #include <linux/sched.h>
5696 + #include <linux/bitops.h>
5697 ++#include <linux/slab.h>
5698 ++#include <linux/notifier.h>
5699 + #include <asm/unaligned.h>
5700 +-#include <trace/events/random.h>
5701 +
5702 + /**
5703 + * prandom_u32_state - seeded pseudo-random number generator.
5704 +@@ -386,7 +387,6 @@ u32 prandom_u32(void)
5705 + struct siprand_state *state = get_cpu_ptr(&net_rand_state);
5706 + u32 res = siprand_u32(state);
5707 +
5708 +- trace_prandom_u32(res);
5709 + put_cpu_ptr(&net_rand_state);
5710 + return res;
5711 + }
5712 +@@ -552,9 +552,11 @@ static void prandom_reseed(struct timer_list *unused)
5713 + * To avoid worrying about whether it's safe to delay that interrupt
5714 + * long enough to seed all CPUs, just schedule an immediate timer event.
5715 + */
5716 +-static void prandom_timer_start(struct random_ready_callback *unused)
5717 ++static int prandom_timer_start(struct notifier_block *nb,
5718 ++ unsigned long action, void *data)
5719 + {
5720 + mod_timer(&seed_timer, jiffies);
5721 ++ return 0;
5722 + }
5723 +
5724 + #ifdef CONFIG_RANDOM32_SELFTEST
5725 +@@ -618,13 +620,13 @@ core_initcall(prandom32_state_selftest);
5726 + */
5727 + static int __init prandom_init_late(void)
5728 + {
5729 +- static struct random_ready_callback random_ready = {
5730 +- .func = prandom_timer_start
5731 ++ static struct notifier_block random_ready = {
5732 ++ .notifier_call = prandom_timer_start
5733 + };
5734 +- int ret = add_random_ready_callback(&random_ready);
5735 ++ int ret = register_random_ready_notifier(&random_ready);
5736 +
5737 + if (ret == -EALREADY) {
5738 +- prandom_timer_start(&random_ready);
5739 ++ prandom_timer_start(&random_ready, 0, NULL);
5740 + ret = 0;
5741 + }
5742 + return ret;
5743 +diff --git a/lib/sha1.c b/lib/sha1.c
5744 +index 9bd1935a14727..0494766fc574e 100644
5745 +--- a/lib/sha1.c
5746 ++++ b/lib/sha1.c
5747 +@@ -9,6 +9,7 @@
5748 + #include <linux/kernel.h>
5749 + #include <linux/export.h>
5750 + #include <linux/bitops.h>
5751 ++#include <linux/string.h>
5752 + #include <crypto/sha1.h>
5753 + #include <asm/unaligned.h>
5754 +
5755 +@@ -55,7 +56,8 @@
5756 + #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
5757 + __u32 TEMP = input(t); setW(t, TEMP); \
5758 + E += TEMP + rol32(A,5) + (fn) + (constant); \
5759 +- B = ror32(B, 2); } while (0)
5760 ++ B = ror32(B, 2); \
5761 ++ TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
5762 +
5763 + #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
5764 + #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
5765 +@@ -84,6 +86,7 @@
5766 + void sha1_transform(__u32 *digest, const char *data, __u32 *array)
5767 + {
5768 + __u32 A, B, C, D, E;
5769 ++ unsigned int i = 0;
5770 +
5771 + A = digest[0];
5772 + B = digest[1];
5773 +@@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array)
5774 + E = digest[4];
5775 +
5776 + /* Round 1 - iterations 0-16 take their input from 'data' */
5777 +- T_0_15( 0, A, B, C, D, E);
5778 +- T_0_15( 1, E, A, B, C, D);
5779 +- T_0_15( 2, D, E, A, B, C);
5780 +- T_0_15( 3, C, D, E, A, B);
5781 +- T_0_15( 4, B, C, D, E, A);
5782 +- T_0_15( 5, A, B, C, D, E);
5783 +- T_0_15( 6, E, A, B, C, D);
5784 +- T_0_15( 7, D, E, A, B, C);
5785 +- T_0_15( 8, C, D, E, A, B);
5786 +- T_0_15( 9, B, C, D, E, A);
5787 +- T_0_15(10, A, B, C, D, E);
5788 +- T_0_15(11, E, A, B, C, D);
5789 +- T_0_15(12, D, E, A, B, C);
5790 +- T_0_15(13, C, D, E, A, B);
5791 +- T_0_15(14, B, C, D, E, A);
5792 +- T_0_15(15, A, B, C, D, E);
5793 ++ for (; i < 16; ++i)
5794 ++ T_0_15(i, A, B, C, D, E);
5795 +
5796 + /* Round 1 - tail. Input from 512-bit mixing array */
5797 +- T_16_19(16, E, A, B, C, D);
5798 +- T_16_19(17, D, E, A, B, C);
5799 +- T_16_19(18, C, D, E, A, B);
5800 +- T_16_19(19, B, C, D, E, A);
5801 ++ for (; i < 20; ++i)
5802 ++ T_16_19(i, A, B, C, D, E);
5803 +
5804 + /* Round 2 */
5805 +- T_20_39(20, A, B, C, D, E);
5806 +- T_20_39(21, E, A, B, C, D);
5807 +- T_20_39(22, D, E, A, B, C);
5808 +- T_20_39(23, C, D, E, A, B);
5809 +- T_20_39(24, B, C, D, E, A);
5810 +- T_20_39(25, A, B, C, D, E);
5811 +- T_20_39(26, E, A, B, C, D);
5812 +- T_20_39(27, D, E, A, B, C);
5813 +- T_20_39(28, C, D, E, A, B);
5814 +- T_20_39(29, B, C, D, E, A);
5815 +- T_20_39(30, A, B, C, D, E);
5816 +- T_20_39(31, E, A, B, C, D);
5817 +- T_20_39(32, D, E, A, B, C);
5818 +- T_20_39(33, C, D, E, A, B);
5819 +- T_20_39(34, B, C, D, E, A);
5820 +- T_20_39(35, A, B, C, D, E);
5821 +- T_20_39(36, E, A, B, C, D);
5822 +- T_20_39(37, D, E, A, B, C);
5823 +- T_20_39(38, C, D, E, A, B);
5824 +- T_20_39(39, B, C, D, E, A);
5825 ++ for (; i < 40; ++i)
5826 ++ T_20_39(i, A, B, C, D, E);
5827 +
5828 + /* Round 3 */
5829 +- T_40_59(40, A, B, C, D, E);
5830 +- T_40_59(41, E, A, B, C, D);
5831 +- T_40_59(42, D, E, A, B, C);
5832 +- T_40_59(43, C, D, E, A, B);
5833 +- T_40_59(44, B, C, D, E, A);
5834 +- T_40_59(45, A, B, C, D, E);
5835 +- T_40_59(46, E, A, B, C, D);
5836 +- T_40_59(47, D, E, A, B, C);
5837 +- T_40_59(48, C, D, E, A, B);
5838 +- T_40_59(49, B, C, D, E, A);
5839 +- T_40_59(50, A, B, C, D, E);
5840 +- T_40_59(51, E, A, B, C, D);
5841 +- T_40_59(52, D, E, A, B, C);
5842 +- T_40_59(53, C, D, E, A, B);
5843 +- T_40_59(54, B, C, D, E, A);
5844 +- T_40_59(55, A, B, C, D, E);
5845 +- T_40_59(56, E, A, B, C, D);
5846 +- T_40_59(57, D, E, A, B, C);
5847 +- T_40_59(58, C, D, E, A, B);
5848 +- T_40_59(59, B, C, D, E, A);
5849 ++ for (; i < 60; ++i)
5850 ++ T_40_59(i, A, B, C, D, E);
5851 +
5852 + /* Round 4 */
5853 +- T_60_79(60, A, B, C, D, E);
5854 +- T_60_79(61, E, A, B, C, D);
5855 +- T_60_79(62, D, E, A, B, C);
5856 +- T_60_79(63, C, D, E, A, B);
5857 +- T_60_79(64, B, C, D, E, A);
5858 +- T_60_79(65, A, B, C, D, E);
5859 +- T_60_79(66, E, A, B, C, D);
5860 +- T_60_79(67, D, E, A, B, C);
5861 +- T_60_79(68, C, D, E, A, B);
5862 +- T_60_79(69, B, C, D, E, A);
5863 +- T_60_79(70, A, B, C, D, E);
5864 +- T_60_79(71, E, A, B, C, D);
5865 +- T_60_79(72, D, E, A, B, C);
5866 +- T_60_79(73, C, D, E, A, B);
5867 +- T_60_79(74, B, C, D, E, A);
5868 +- T_60_79(75, A, B, C, D, E);
5869 +- T_60_79(76, E, A, B, C, D);
5870 +- T_60_79(77, D, E, A, B, C);
5871 +- T_60_79(78, C, D, E, A, B);
5872 +- T_60_79(79, B, C, D, E, A);
5873 ++ for (; i < 80; ++i)
5874 ++ T_60_79(i, A, B, C, D, E);
5875 +
5876 + digest[0] += A;
5877 + digest[1] += B;
5878 +diff --git a/lib/siphash.c b/lib/siphash.c
5879 +index 72b9068ab57bf..71d315a6ad623 100644
5880 +--- a/lib/siphash.c
5881 ++++ b/lib/siphash.c
5882 +@@ -18,19 +18,13 @@
5883 + #include <asm/word-at-a-time.h>
5884 + #endif
5885 +
5886 +-#define SIPROUND \
5887 +- do { \
5888 +- v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
5889 +- v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
5890 +- v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
5891 +- v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
5892 +- } while (0)
5893 ++#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
5894 +
5895 + #define PREAMBLE(len) \
5896 +- u64 v0 = 0x736f6d6570736575ULL; \
5897 +- u64 v1 = 0x646f72616e646f6dULL; \
5898 +- u64 v2 = 0x6c7967656e657261ULL; \
5899 +- u64 v3 = 0x7465646279746573ULL; \
5900 ++ u64 v0 = SIPHASH_CONST_0; \
5901 ++ u64 v1 = SIPHASH_CONST_1; \
5902 ++ u64 v2 = SIPHASH_CONST_2; \
5903 ++ u64 v3 = SIPHASH_CONST_3; \
5904 + u64 b = ((u64)(len)) << 56; \
5905 + v3 ^= key->key[1]; \
5906 + v2 ^= key->key[0]; \
5907 +@@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
5908 + }
5909 + EXPORT_SYMBOL(hsiphash_4u32);
5910 + #else
5911 +-#define HSIPROUND \
5912 +- do { \
5913 +- v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
5914 +- v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
5915 +- v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
5916 +- v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
5917 +- } while (0)
5918 ++#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
5919 +
5920 + #define HPREAMBLE(len) \
5921 +- u32 v0 = 0; \
5922 +- u32 v1 = 0; \
5923 +- u32 v2 = 0x6c796765U; \
5924 +- u32 v3 = 0x74656462U; \
5925 ++ u32 v0 = HSIPHASH_CONST_0; \
5926 ++ u32 v1 = HSIPHASH_CONST_1; \
5927 ++ u32 v2 = HSIPHASH_CONST_2; \
5928 ++ u32 v3 = HSIPHASH_CONST_3; \
5929 + u32 b = ((u32)(len)) << 24; \
5930 + v3 ^= key->key[1]; \
5931 + v2 ^= key->key[0]; \
5932 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
5933 +index a3b73f28b7a9c..a60f0bb2ea902 100644
5934 +--- a/lib/vsprintf.c
5935 ++++ b/lib/vsprintf.c
5936 +@@ -761,14 +761,16 @@ static void enable_ptr_key_workfn(struct work_struct *work)
5937 +
5938 + static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
5939 +
5940 +-static void fill_random_ptr_key(struct random_ready_callback *unused)
5941 ++static int fill_random_ptr_key(struct notifier_block *nb,
5942 ++ unsigned long action, void *data)
5943 + {
5944 + /* This may be in an interrupt handler. */
5945 + queue_work(system_unbound_wq, &enable_ptr_key_work);
5946 ++ return 0;
5947 + }
5948 +
5949 +-static struct random_ready_callback random_ready = {
5950 +- .func = fill_random_ptr_key
5951 ++static struct notifier_block random_ready = {
5952 ++ .notifier_call = fill_random_ptr_key
5953 + };
5954 +
5955 + static int __init initialize_ptr_random(void)
5956 +@@ -782,7 +784,7 @@ static int __init initialize_ptr_random(void)
5957 + return 0;
5958 + }
5959 +
5960 +- ret = add_random_ready_callback(&random_ready);
5961 ++ ret = register_random_ready_notifier(&random_ready);
5962 + if (!ret) {
5963 + return 0;
5964 + } else if (ret == -EALREADY) {
5965 +diff --git a/mm/util.c b/mm/util.c
5966 +index ea09dd33ab594..3073de05c2bd4 100644
5967 +--- a/mm/util.c
5968 ++++ b/mm/util.c
5969 +@@ -343,6 +343,38 @@ unsigned long randomize_stack_top(unsigned long stack_top)
5970 + #endif
5971 + }
5972 +
5973 ++/**
5974 ++ * randomize_page - Generate a random, page aligned address
5975 ++ * @start: The smallest acceptable address the caller will take.
5976 ++ * @range: The size of the area, starting at @start, within which the
5977 ++ * random address must fall.
5978 ++ *
5979 ++ * If @start + @range would overflow, @range is capped.
5980 ++ *
5981 ++ * NOTE: Historical use of randomize_range, which this replaces, presumed that
5982 ++ * @start was already page aligned. We now align it regardless.
5983 ++ *
5984 ++ * Return: A page aligned address within [start, start + range). On error,
5985 ++ * @start is returned.
5986 ++ */
5987 ++unsigned long randomize_page(unsigned long start, unsigned long range)
5988 ++{
5989 ++ if (!PAGE_ALIGNED(start)) {
5990 ++ range -= PAGE_ALIGN(start) - start;
5991 ++ start = PAGE_ALIGN(start);
5992 ++ }
5993 ++
5994 ++ if (start > ULONG_MAX - range)
5995 ++ range = ULONG_MAX - start;
5996 ++
5997 ++ range >>= PAGE_SHIFT;
5998 ++
5999 ++ if (range == 0)
6000 ++ return start;
6001 ++
6002 ++ return start + (get_random_long() % range << PAGE_SHIFT);
6003 ++}
6004 ++
6005 + #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
6006 + unsigned long arch_randomize_brk(struct mm_struct *mm)
6007 + {
6008 +diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
6009 +index 78f35e88aed6b..fbdb8a3d5b8e5 100644
6010 +--- a/sound/pci/ctxfi/ctatc.c
6011 ++++ b/sound/pci/ctxfi/ctatc.c
6012 +@@ -36,6 +36,7 @@
6013 + | ((IEC958_AES3_CON_FS_48000) << 24))
6014 +
6015 + static const struct snd_pci_quirk subsys_20k1_list[] = {
6016 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0021, "SB046x", CTSB046X),
6017 + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X),
6018 + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X),
6019 + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X),
6020 +@@ -64,6 +65,7 @@ static const struct snd_pci_quirk subsys_20k2_list[] = {
6021 +
6022 + static const char *ct_subsys_name[NUM_CTCARDS] = {
6023 + /* 20k1 models */
6024 ++ [CTSB046X] = "SB046x",
6025 + [CTSB055X] = "SB055x",
6026 + [CTSB073X] = "SB073x",
6027 + [CTUAA] = "UAA",
6028 +diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
6029 +index f406b626a28c4..2875cec83b8f2 100644
6030 +--- a/sound/pci/ctxfi/cthardware.h
6031 ++++ b/sound/pci/ctxfi/cthardware.h
6032 +@@ -26,8 +26,9 @@ enum CHIPTYP {
6033 +
6034 + enum CTCARDS {
6035 + /* 20k1 models */
6036 ++ CTSB046X,
6037 ++ CT20K1_MODEL_FIRST = CTSB046X,
6038 + CTSB055X,
6039 +- CT20K1_MODEL_FIRST = CTSB055X,
6040 + CTSB073X,
6041 + CTUAA,
6042 + CT20K1_UNKNOWN,