1 |
commit: 4c9bb1563e46363720d3778468b068a8509a2f36 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Mon May 30 13:57:08 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Mon May 30 13:57:08 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4c9bb156 |
7 |
|
8 |
Linux patch 5.18.1 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1000_linux-5.18.1.patch | 2933 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 2937 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 298c5715..62ab5b31 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -43,6 +43,10 @@ EXPERIMENTAL |
21 |
Individual Patch Descriptions: |
22 |
-------------------------------------------------------------------------- |
23 |
|
24 |
+Patch: 1000_linux-5.18.1.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.18.1 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1000_linux-5.18.1.patch b/1000_linux-5.18.1.patch |
33 |
new file mode 100644 |
34 |
index 00000000..679abefd |
35 |
--- /dev/null |
36 |
+++ b/1000_linux-5.18.1.patch |
37 |
@@ -0,0 +1,2933 @@ |
38 |
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst |
39 |
+index 1144ea3229a37..e9c18dabc5523 100644 |
40 |
+--- a/Documentation/admin-guide/sysctl/kernel.rst |
41 |
++++ b/Documentation/admin-guide/sysctl/kernel.rst |
42 |
+@@ -994,6 +994,9 @@ This is a directory, with the following entries: |
43 |
+ * ``boot_id``: a UUID generated the first time this is retrieved, and |
44 |
+ unvarying after that; |
45 |
+ |
46 |
++* ``uuid``: a UUID generated every time this is retrieved (this can |
47 |
++ thus be used to generate UUIDs at will); |
48 |
++ |
49 |
+ * ``entropy_avail``: the pool's entropy count, in bits; |
50 |
+ |
51 |
+ * ``poolsize``: the entropy pool size, in bits; |
52 |
+@@ -1001,10 +1004,7 @@ This is a directory, with the following entries: |
53 |
+ * ``urandom_min_reseed_secs``: obsolete (used to determine the minimum |
54 |
+ number of seconds between urandom pool reseeding). This file is |
55 |
+ writable for compatibility purposes, but writing to it has no effect |
56 |
+- on any RNG behavior. |
57 |
+- |
58 |
+-* ``uuid``: a UUID generated every time this is retrieved (this can |
59 |
+- thus be used to generate UUIDs at will); |
60 |
++ on any RNG behavior; |
61 |
+ |
62 |
+ * ``write_wakeup_threshold``: when the entropy count drops below this |
63 |
+ (as a number of bits), processes waiting to write to ``/dev/random`` |
64 |
+diff --git a/Makefile b/Makefile |
65 |
+index 7d5b0bfe79602..2bb168acb8f43 100644 |
66 |
+--- a/Makefile |
67 |
++++ b/Makefile |
68 |
+@@ -1,7 +1,7 @@ |
69 |
+ # SPDX-License-Identifier: GPL-2.0 |
70 |
+ VERSION = 5 |
71 |
+ PATCHLEVEL = 18 |
72 |
+-SUBLEVEL = 0 |
73 |
++SUBLEVEL = 1 |
74 |
+ EXTRAVERSION = |
75 |
+ NAME = Superb Owl |
76 |
+ |
77 |
+diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h |
78 |
+index b565cc6f408e9..f89798da8a147 100644 |
79 |
+--- a/arch/alpha/include/asm/timex.h |
80 |
++++ b/arch/alpha/include/asm/timex.h |
81 |
+@@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void) |
82 |
+ __asm__ __volatile__ ("rpcc %0" : "=r"(ret)); |
83 |
+ return ret; |
84 |
+ } |
85 |
++#define get_cycles get_cycles |
86 |
+ |
87 |
+ #endif |
88 |
+diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h |
89 |
+index 7c3b3671d6c25..6d1337c169cd3 100644 |
90 |
+--- a/arch/arm/include/asm/timex.h |
91 |
++++ b/arch/arm/include/asm/timex.h |
92 |
+@@ -11,5 +11,6 @@ |
93 |
+ |
94 |
+ typedef unsigned long cycles_t; |
95 |
+ #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) |
96 |
++#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback()) |
97 |
+ |
98 |
+ #endif |
99 |
+diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h |
100 |
+index 869a3ac6bf23a..7ccc077a60bed 100644 |
101 |
+--- a/arch/ia64/include/asm/timex.h |
102 |
++++ b/arch/ia64/include/asm/timex.h |
103 |
+@@ -39,6 +39,7 @@ get_cycles (void) |
104 |
+ ret = ia64_getreg(_IA64_REG_AR_ITC); |
105 |
+ return ret; |
106 |
+ } |
107 |
++#define get_cycles get_cycles |
108 |
+ |
109 |
+ extern void ia64_cpu_local_tick (void); |
110 |
+ extern unsigned long long ia64_native_sched_clock (void); |
111 |
+diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h |
112 |
+index 6a21d93582805..f4a7a340f4cae 100644 |
113 |
+--- a/arch/m68k/include/asm/timex.h |
114 |
++++ b/arch/m68k/include/asm/timex.h |
115 |
+@@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void) |
116 |
+ { |
117 |
+ if (mach_random_get_entropy) |
118 |
+ return mach_random_get_entropy(); |
119 |
+- return 0; |
120 |
++ return random_get_entropy_fallback(); |
121 |
+ } |
122 |
+ #define random_get_entropy random_get_entropy |
123 |
+ |
124 |
+diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h |
125 |
+index 8026baf46e729..2e107886f97ac 100644 |
126 |
+--- a/arch/mips/include/asm/timex.h |
127 |
++++ b/arch/mips/include/asm/timex.h |
128 |
+@@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void) |
129 |
+ else |
130 |
+ return 0; /* no usable counter */ |
131 |
+ } |
132 |
++#define get_cycles get_cycles |
133 |
+ |
134 |
+ /* |
135 |
+ * Like get_cycles - but where c0_count is not available we desperately |
136 |
+ * use c0_random in an attempt to get at least a little bit of entropy. |
137 |
+- * |
138 |
+- * R6000 and R6000A neither have a count register nor a random register. |
139 |
+- * That leaves no entropy source in the CPU itself. |
140 |
+ */ |
141 |
+ static inline unsigned long random_get_entropy(void) |
142 |
+ { |
143 |
+- unsigned int prid = read_c0_prid(); |
144 |
+- unsigned int imp = prid & PRID_IMP_MASK; |
145 |
++ unsigned int c0_random; |
146 |
+ |
147 |
+- if (can_use_mips_counter(prid)) |
148 |
++ if (can_use_mips_counter(read_c0_prid())) |
149 |
+ return read_c0_count(); |
150 |
+- else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A)) |
151 |
+- return read_c0_random(); |
152 |
++ |
153 |
++ if (cpu_has_3kex) |
154 |
++ c0_random = (read_c0_random() >> 8) & 0x3f; |
155 |
+ else |
156 |
+- return 0; /* no usable register */ |
157 |
++ c0_random = read_c0_random() & 0x3f; |
158 |
++ return (random_get_entropy_fallback() << 6) | (0x3f - c0_random); |
159 |
+ } |
160 |
+ #define random_get_entropy random_get_entropy |
161 |
+ |
162 |
+diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h |
163 |
+index a769f871b28d9..40a1adc9bd03e 100644 |
164 |
+--- a/arch/nios2/include/asm/timex.h |
165 |
++++ b/arch/nios2/include/asm/timex.h |
166 |
+@@ -8,5 +8,8 @@ |
167 |
+ typedef unsigned long cycles_t; |
168 |
+ |
169 |
+ extern cycles_t get_cycles(void); |
170 |
++#define get_cycles get_cycles |
171 |
++ |
172 |
++#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback()) |
173 |
+ |
174 |
+ #endif |
175 |
+diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h |
176 |
+index 06b510f8172e3..b4622cb06a75e 100644 |
177 |
+--- a/arch/parisc/include/asm/timex.h |
178 |
++++ b/arch/parisc/include/asm/timex.h |
179 |
+@@ -13,9 +13,10 @@ |
180 |
+ |
181 |
+ typedef unsigned long cycles_t; |
182 |
+ |
183 |
+-static inline cycles_t get_cycles (void) |
184 |
++static inline cycles_t get_cycles(void) |
185 |
+ { |
186 |
+ return mfctl(16); |
187 |
+ } |
188 |
++#define get_cycles get_cycles |
189 |
+ |
190 |
+ #endif |
191 |
+diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h |
192 |
+index fa2e76e4093a3..14b4489de52c5 100644 |
193 |
+--- a/arch/powerpc/include/asm/timex.h |
194 |
++++ b/arch/powerpc/include/asm/timex.h |
195 |
+@@ -19,6 +19,7 @@ static inline cycles_t get_cycles(void) |
196 |
+ { |
197 |
+ return mftb(); |
198 |
+ } |
199 |
++#define get_cycles get_cycles |
200 |
+ |
201 |
+ #endif /* __KERNEL__ */ |
202 |
+ #endif /* _ASM_POWERPC_TIMEX_H */ |
203 |
+diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h |
204 |
+index 507cae273bc62..d6a7428f6248d 100644 |
205 |
+--- a/arch/riscv/include/asm/timex.h |
206 |
++++ b/arch/riscv/include/asm/timex.h |
207 |
+@@ -41,7 +41,7 @@ static inline u32 get_cycles_hi(void) |
208 |
+ static inline unsigned long random_get_entropy(void) |
209 |
+ { |
210 |
+ if (unlikely(clint_time_val == NULL)) |
211 |
+- return 0; |
212 |
++ return random_get_entropy_fallback(); |
213 |
+ return get_cycles(); |
214 |
+ } |
215 |
+ #define random_get_entropy() random_get_entropy() |
216 |
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h |
217 |
+index 2cfce42aa7fc4..ce878e85b6e4e 100644 |
218 |
+--- a/arch/s390/include/asm/timex.h |
219 |
++++ b/arch/s390/include/asm/timex.h |
220 |
+@@ -197,6 +197,7 @@ static inline cycles_t get_cycles(void) |
221 |
+ { |
222 |
+ return (cycles_t) get_tod_clock() >> 2; |
223 |
+ } |
224 |
++#define get_cycles get_cycles |
225 |
+ |
226 |
+ int get_phys_clock(unsigned long *clock); |
227 |
+ void init_cpu_timer(void); |
228 |
+diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h |
229 |
+index 542915b462097..f86326a6f89e0 100644 |
230 |
+--- a/arch/sparc/include/asm/timex_32.h |
231 |
++++ b/arch/sparc/include/asm/timex_32.h |
232 |
+@@ -9,8 +9,6 @@ |
233 |
+ |
234 |
+ #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ |
235 |
+ |
236 |
+-/* XXX Maybe do something better at some point... -DaveM */ |
237 |
+-typedef unsigned long cycles_t; |
238 |
+-#define get_cycles() (0) |
239 |
++#include <asm-generic/timex.h> |
240 |
+ |
241 |
+ #endif |
242 |
+diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h |
243 |
+index e392a9a5bc9bd..9f27176adb26d 100644 |
244 |
+--- a/arch/um/include/asm/timex.h |
245 |
++++ b/arch/um/include/asm/timex.h |
246 |
+@@ -2,13 +2,8 @@ |
247 |
+ #ifndef __UM_TIMEX_H |
248 |
+ #define __UM_TIMEX_H |
249 |
+ |
250 |
+-typedef unsigned long cycles_t; |
251 |
+- |
252 |
+-static inline cycles_t get_cycles (void) |
253 |
+-{ |
254 |
+- return 0; |
255 |
+-} |
256 |
+- |
257 |
+ #define CLOCK_TICK_RATE (HZ) |
258 |
+ |
259 |
++#include <asm-generic/timex.h> |
260 |
++ |
261 |
+ #endif |
262 |
+diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h |
263 |
+index a4a8b1b16c0c1..956e4145311b1 100644 |
264 |
+--- a/arch/x86/include/asm/timex.h |
265 |
++++ b/arch/x86/include/asm/timex.h |
266 |
+@@ -5,6 +5,15 @@ |
267 |
+ #include <asm/processor.h> |
268 |
+ #include <asm/tsc.h> |
269 |
+ |
270 |
++static inline unsigned long random_get_entropy(void) |
271 |
++{ |
272 |
++ if (!IS_ENABLED(CONFIG_X86_TSC) && |
273 |
++ !cpu_feature_enabled(X86_FEATURE_TSC)) |
274 |
++ return random_get_entropy_fallback(); |
275 |
++ return rdtsc(); |
276 |
++} |
277 |
++#define random_get_entropy random_get_entropy |
278 |
++ |
279 |
+ /* Assume we use the PIT time source for the clock tick */ |
280 |
+ #define CLOCK_TICK_RATE PIT_TICK_RATE |
281 |
+ |
282 |
+diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h |
283 |
+index 01a300a9700b9..fbdc3d9514943 100644 |
284 |
+--- a/arch/x86/include/asm/tsc.h |
285 |
++++ b/arch/x86/include/asm/tsc.h |
286 |
+@@ -20,13 +20,12 @@ extern void disable_TSC(void); |
287 |
+ |
288 |
+ static inline cycles_t get_cycles(void) |
289 |
+ { |
290 |
+-#ifndef CONFIG_X86_TSC |
291 |
+- if (!boot_cpu_has(X86_FEATURE_TSC)) |
292 |
++ if (!IS_ENABLED(CONFIG_X86_TSC) && |
293 |
++ !cpu_feature_enabled(X86_FEATURE_TSC)) |
294 |
+ return 0; |
295 |
+-#endif |
296 |
+- |
297 |
+ return rdtsc(); |
298 |
+ } |
299 |
++#define get_cycles get_cycles |
300 |
+ |
301 |
+ extern struct system_counterval_t convert_art_to_tsc(u64 art); |
302 |
+ extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); |
303 |
+diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h |
304 |
+index 233ec75e60c69..3f2462f2d0270 100644 |
305 |
+--- a/arch/xtensa/include/asm/timex.h |
306 |
++++ b/arch/xtensa/include/asm/timex.h |
307 |
+@@ -29,10 +29,6 @@ |
308 |
+ |
309 |
+ extern unsigned long ccount_freq; |
310 |
+ |
311 |
+-typedef unsigned long long cycles_t; |
312 |
+- |
313 |
+-#define get_cycles() (0) |
314 |
+- |
315 |
+ void local_timer_setup(unsigned cpu); |
316 |
+ |
317 |
+ /* |
318 |
+@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare) |
319 |
+ xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER); |
320 |
+ } |
321 |
+ |
322 |
++#include <asm-generic/timex.h> |
323 |
++ |
324 |
+ #endif /* _XTENSA_TIMEX_H */ |
325 |
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c |
326 |
+index a4b638bea6f16..cc2fe0618178e 100644 |
327 |
+--- a/drivers/acpi/sysfs.c |
328 |
++++ b/drivers/acpi/sysfs.c |
329 |
+@@ -415,19 +415,30 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, |
330 |
+ loff_t offset, size_t count) |
331 |
+ { |
332 |
+ struct acpi_data_attr *data_attr; |
333 |
+- void *base; |
334 |
+- ssize_t rc; |
335 |
++ void __iomem *base; |
336 |
++ ssize_t size; |
337 |
+ |
338 |
+ data_attr = container_of(bin_attr, struct acpi_data_attr, attr); |
339 |
++ size = data_attr->attr.size; |
340 |
++ |
341 |
++ if (offset < 0) |
342 |
++ return -EINVAL; |
343 |
++ |
344 |
++ if (offset >= size) |
345 |
++ return 0; |
346 |
+ |
347 |
+- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); |
348 |
++ if (count > size - offset) |
349 |
++ count = size - offset; |
350 |
++ |
351 |
++ base = acpi_os_map_iomem(data_attr->addr, size); |
352 |
+ if (!base) |
353 |
+ return -ENOMEM; |
354 |
+- rc = memory_read_from_buffer(buf, count, &offset, base, |
355 |
+- data_attr->attr.size); |
356 |
+- acpi_os_unmap_memory(base, data_attr->attr.size); |
357 |
+ |
358 |
+- return rc; |
359 |
++ memcpy_fromio(buf, base + offset, count); |
360 |
++ |
361 |
++ acpi_os_unmap_iomem(base, size); |
362 |
++ |
363 |
++ return count; |
364 |
+ } |
365 |
+ |
366 |
+ static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) |
367 |
+diff --git a/drivers/char/random.c b/drivers/char/random.c |
368 |
+index 4c9adb4f3d5d7..7a66eec08e373 100644 |
369 |
+--- a/drivers/char/random.c |
370 |
++++ b/drivers/char/random.c |
371 |
+@@ -15,14 +15,12 @@ |
372 |
+ * - Sysctl interface. |
373 |
+ * |
374 |
+ * The high level overview is that there is one input pool, into which |
375 |
+- * various pieces of data are hashed. Some of that data is then "credited" as |
376 |
+- * having a certain number of bits of entropy. When enough bits of entropy are |
377 |
+- * available, the hash is finalized and handed as a key to a stream cipher that |
378 |
+- * expands it indefinitely for various consumers. This key is periodically |
379 |
+- * refreshed as the various entropy collectors, described below, add data to the |
380 |
+- * input pool and credit it. There is currently no Fortuna-like scheduler |
381 |
+- * involved, which can lead to malicious entropy sources causing a premature |
382 |
+- * reseed, and the entropy estimates are, at best, conservative guesses. |
383 |
++ * various pieces of data are hashed. Prior to initialization, some of that |
384 |
++ * data is then "credited" as having a certain number of bits of entropy. |
385 |
++ * When enough bits of entropy are available, the hash is finalized and |
386 |
++ * handed as a key to a stream cipher that expands it indefinitely for |
387 |
++ * various consumers. This key is periodically refreshed as the various |
388 |
++ * entropy collectors, described below, add data to the input pool. |
389 |
+ */ |
390 |
+ |
391 |
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
392 |
+@@ -53,6 +51,7 @@ |
393 |
+ #include <linux/completion.h> |
394 |
+ #include <linux/uuid.h> |
395 |
+ #include <linux/uaccess.h> |
396 |
++#include <linux/siphash.h> |
397 |
+ #include <crypto/chacha.h> |
398 |
+ #include <crypto/blake2s.h> |
399 |
+ #include <asm/processor.h> |
400 |
+@@ -71,27 +70,27 @@ |
401 |
+ *********************************************************************/ |
402 |
+ |
403 |
+ /* |
404 |
+- * crng_init = 0 --> Uninitialized |
405 |
+- * 1 --> Initialized |
406 |
+- * 2 --> Initialized from input_pool |
407 |
+- * |
408 |
+ * crng_init is protected by base_crng->lock, and only increases |
409 |
+- * its value (from 0->1->2). |
410 |
++ * its value (from empty->early->ready). |
411 |
+ */ |
412 |
+-static int crng_init = 0; |
413 |
+-#define crng_ready() (likely(crng_init > 1)) |
414 |
+-/* Various types of waiters for crng_init->2 transition. */ |
415 |
++static enum { |
416 |
++ CRNG_EMPTY = 0, /* Little to no entropy collected */ |
417 |
++ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ |
418 |
++ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ |
419 |
++} crng_init __read_mostly = CRNG_EMPTY; |
420 |
++static DEFINE_STATIC_KEY_FALSE(crng_is_ready); |
421 |
++#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY) |
422 |
++/* Various types of waiters for crng_init->CRNG_READY transition. */ |
423 |
+ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); |
424 |
+ static struct fasync_struct *fasync; |
425 |
+ static DEFINE_SPINLOCK(random_ready_chain_lock); |
426 |
+ static RAW_NOTIFIER_HEAD(random_ready_chain); |
427 |
+ |
428 |
+ /* Control how we warn userspace. */ |
429 |
+-static struct ratelimit_state unseeded_warning = |
430 |
+- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); |
431 |
+ static struct ratelimit_state urandom_warning = |
432 |
+ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); |
433 |
+-static int ratelimit_disable __read_mostly; |
434 |
++static int ratelimit_disable __read_mostly = |
435 |
++ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); |
436 |
+ module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); |
437 |
+ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); |
438 |
+ |
439 |
+@@ -110,6 +109,11 @@ bool rng_is_initialized(void) |
440 |
+ } |
441 |
+ EXPORT_SYMBOL(rng_is_initialized); |
442 |
+ |
443 |
++static void __cold crng_set_ready(struct work_struct *work) |
444 |
++{ |
445 |
++ static_branch_enable(&crng_is_ready); |
446 |
++} |
447 |
++ |
448 |
+ /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ |
449 |
+ static void try_to_generate_entropy(void); |
450 |
+ |
451 |
+@@ -144,7 +148,7 @@ EXPORT_SYMBOL(wait_for_random_bytes); |
452 |
+ * returns: 0 if callback is successfully added |
453 |
+ * -EALREADY if pool is already initialised (callback not called) |
454 |
+ */ |
455 |
+-int register_random_ready_notifier(struct notifier_block *nb) |
456 |
++int __cold register_random_ready_notifier(struct notifier_block *nb) |
457 |
+ { |
458 |
+ unsigned long flags; |
459 |
+ int ret = -EALREADY; |
460 |
+@@ -162,7 +166,7 @@ int register_random_ready_notifier(struct notifier_block *nb) |
461 |
+ /* |
462 |
+ * Delete a previously registered readiness callback function. |
463 |
+ */ |
464 |
+-int unregister_random_ready_notifier(struct notifier_block *nb) |
465 |
++int __cold unregister_random_ready_notifier(struct notifier_block *nb) |
466 |
+ { |
467 |
+ unsigned long flags; |
468 |
+ int ret; |
469 |
+@@ -173,7 +177,7 @@ int unregister_random_ready_notifier(struct notifier_block *nb) |
470 |
+ return ret; |
471 |
+ } |
472 |
+ |
473 |
+-static void process_random_ready_list(void) |
474 |
++static void __cold process_random_ready_list(void) |
475 |
+ { |
476 |
+ unsigned long flags; |
477 |
+ |
478 |
+@@ -182,28 +186,10 @@ static void process_random_ready_list(void) |
479 |
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags); |
480 |
+ } |
481 |
+ |
482 |
+-#define warn_unseeded_randomness(previous) \ |
483 |
+- _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) |
484 |
+- |
485 |
+-static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) |
486 |
+-{ |
487 |
+-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM |
488 |
+- const bool print_once = false; |
489 |
+-#else |
490 |
+- static bool print_once __read_mostly; |
491 |
+-#endif |
492 |
+- |
493 |
+- if (print_once || crng_ready() || |
494 |
+- (previous && (caller == READ_ONCE(*previous)))) |
495 |
+- return; |
496 |
+- WRITE_ONCE(*previous, caller); |
497 |
+-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM |
498 |
+- print_once = true; |
499 |
+-#endif |
500 |
+- if (__ratelimit(&unseeded_warning)) |
501 |
+- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", |
502 |
+- func_name, caller, crng_init); |
503 |
+-} |
504 |
++#define warn_unseeded_randomness() \ |
505 |
++ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ |
506 |
++ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ |
507 |
++ __func__, (void *)_RET_IP_, crng_init) |
508 |
+ |
509 |
+ |
510 |
+ /********************************************************************* |
511 |
+@@ -216,7 +202,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void |
512 |
+ * |
513 |
+ * There are a few exported interfaces for use by other drivers: |
514 |
+ * |
515 |
+- * void get_random_bytes(void *buf, size_t nbytes) |
516 |
++ * void get_random_bytes(void *buf, size_t len) |
517 |
+ * u32 get_random_u32() |
518 |
+ * u64 get_random_u64() |
519 |
+ * unsigned int get_random_int() |
520 |
+@@ -232,8 +218,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void |
521 |
+ *********************************************************************/ |
522 |
+ |
523 |
+ enum { |
524 |
+- CRNG_RESEED_INTERVAL = 300 * HZ, |
525 |
+- CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE |
526 |
++ CRNG_RESEED_START_INTERVAL = HZ, |
527 |
++ CRNG_RESEED_INTERVAL = 60 * HZ |
528 |
+ }; |
529 |
+ |
530 |
+ static struct { |
531 |
+@@ -256,24 +242,17 @@ static DEFINE_PER_CPU(struct crng, crngs) = { |
532 |
+ .lock = INIT_LOCAL_LOCK(crngs.lock), |
533 |
+ }; |
534 |
+ |
535 |
+-/* Used by crng_reseed() to extract a new seed from the input pool. */ |
536 |
+-static bool drain_entropy(void *buf, size_t nbytes, bool force); |
537 |
++/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ |
538 |
++static void extract_entropy(void *buf, size_t len); |
539 |
+ |
540 |
+-/* |
541 |
+- * This extracts a new crng key from the input pool, but only if there is a |
542 |
+- * sufficient amount of entropy available or force is true, in order to |
543 |
+- * mitigate bruteforcing of newly added bits. |
544 |
+- */ |
545 |
+-static void crng_reseed(bool force) |
546 |
++/* This extracts a new crng key from the input pool. */ |
547 |
++static void crng_reseed(void) |
548 |
+ { |
549 |
+ unsigned long flags; |
550 |
+ unsigned long next_gen; |
551 |
+ u8 key[CHACHA_KEY_SIZE]; |
552 |
+- bool finalize_init = false; |
553 |
+ |
554 |
+- /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ |
555 |
+- if (!drain_entropy(key, sizeof(key), force)) |
556 |
+- return; |
557 |
++ extract_entropy(key, sizeof(key)); |
558 |
+ |
559 |
+ /* |
560 |
+ * We copy the new key into the base_crng, overwriting the old one, |
561 |
+@@ -288,28 +267,10 @@ static void crng_reseed(bool force) |
562 |
+ ++next_gen; |
563 |
+ WRITE_ONCE(base_crng.generation, next_gen); |
564 |
+ WRITE_ONCE(base_crng.birth, jiffies); |
565 |
+- if (!crng_ready()) { |
566 |
+- crng_init = 2; |
567 |
+- finalize_init = true; |
568 |
+- } |
569 |
++ if (!static_branch_likely(&crng_is_ready)) |
570 |
++ crng_init = CRNG_READY; |
571 |
+ spin_unlock_irqrestore(&base_crng.lock, flags); |
572 |
+ memzero_explicit(key, sizeof(key)); |
573 |
+- if (finalize_init) { |
574 |
+- process_random_ready_list(); |
575 |
+- wake_up_interruptible(&crng_init_wait); |
576 |
+- kill_fasync(&fasync, SIGIO, POLL_IN); |
577 |
+- pr_notice("crng init done\n"); |
578 |
+- if (unseeded_warning.missed) { |
579 |
+- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", |
580 |
+- unseeded_warning.missed); |
581 |
+- unseeded_warning.missed = 0; |
582 |
+- } |
583 |
+- if (urandom_warning.missed) { |
584 |
+- pr_notice("%d urandom warning(s) missed due to ratelimiting\n", |
585 |
+- urandom_warning.missed); |
586 |
+- urandom_warning.missed = 0; |
587 |
+- } |
588 |
+- } |
589 |
+ } |
590 |
+ |
591 |
+ /* |
592 |
+@@ -345,10 +306,10 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], |
593 |
+ } |
594 |
+ |
595 |
+ /* |
596 |
+- * Return whether the crng seed is considered to be sufficiently |
597 |
+- * old that a reseeding might be attempted. This happens if the last |
598 |
+- * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at |
599 |
+- * an interval proportional to the uptime. |
600 |
++ * Return whether the crng seed is considered to be sufficiently old |
601 |
++ * that a reseeding is needed. This happens if the last reseeding |
602 |
++ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval |
603 |
++ * proportional to the uptime. |
604 |
+ */ |
605 |
+ static bool crng_has_old_seed(void) |
606 |
+ { |
607 |
+@@ -360,10 +321,10 @@ static bool crng_has_old_seed(void) |
608 |
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) |
609 |
+ WRITE_ONCE(early_boot, false); |
610 |
+ else |
611 |
+- interval = max_t(unsigned int, 5 * HZ, |
612 |
++ interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, |
613 |
+ (unsigned int)uptime / 2 * HZ); |
614 |
+ } |
615 |
+- return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); |
616 |
++ return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval); |
617 |
+ } |
618 |
+ |
619 |
+ /* |
620 |
+@@ -382,28 +343,31 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], |
621 |
+ /* |
622 |
+ * For the fast path, we check whether we're ready, unlocked first, and |
623 |
+ * then re-check once locked later. In the case where we're really not |
624 |
+- * ready, we do fast key erasure with the base_crng directly, because |
625 |
+- * this is what crng_pre_init_inject() mutates during early init. |
626 |
++ * ready, we do fast key erasure with the base_crng directly, extracting |
627 |
++ * when crng_init is CRNG_EMPTY. |
628 |
+ */ |
629 |
+ if (!crng_ready()) { |
630 |
+ bool ready; |
631 |
+ |
632 |
+ spin_lock_irqsave(&base_crng.lock, flags); |
633 |
+ ready = crng_ready(); |
634 |
+- if (!ready) |
635 |
++ if (!ready) { |
636 |
++ if (crng_init == CRNG_EMPTY) |
637 |
++ extract_entropy(base_crng.key, sizeof(base_crng.key)); |
638 |
+ crng_fast_key_erasure(base_crng.key, chacha_state, |
639 |
+ random_data, random_data_len); |
640 |
++ } |
641 |
+ spin_unlock_irqrestore(&base_crng.lock, flags); |
642 |
+ if (!ready) |
643 |
+ return; |
644 |
+ } |
645 |
+ |
646 |
+ /* |
647 |
+- * If the base_crng is old enough, we try to reseed, which in turn |
648 |
+- * bumps the generation counter that we check below. |
649 |
++ * If the base_crng is old enough, we reseed, which in turn bumps the |
650 |
++ * generation counter that we check below. |
651 |
+ */ |
652 |
+ if (unlikely(crng_has_old_seed())) |
653 |
+- crng_reseed(false); |
654 |
++ crng_reseed(); |
655 |
+ |
656 |
+ local_lock_irqsave(&crngs.lock, flags); |
657 |
+ crng = raw_cpu_ptr(&crngs); |
658 |
+@@ -433,68 +397,24 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], |
659 |
+ local_unlock_irqrestore(&crngs.lock, flags); |
660 |
+ } |
661 |
+ |
662 |
+-/* |
663 |
+- * This function is for crng_init == 0 only. It loads entropy directly |
664 |
+- * into the crng's key, without going through the input pool. It is, |
665 |
+- * generally speaking, not very safe, but we use this only at early |
666 |
+- * boot time when it's better to have something there rather than |
667 |
+- * nothing. |
668 |
+- * |
669 |
+- * If account is set, then the crng_init_cnt counter is incremented. |
670 |
+- * This shouldn't be set by functions like add_device_randomness(), |
671 |
+- * where we can't trust the buffer passed to it is guaranteed to be |
672 |
+- * unpredictable (so it might not have any entropy at all). |
673 |
+- */ |
674 |
+-static void crng_pre_init_inject(const void *input, size_t len, bool account) |
675 |
+-{ |
676 |
+- static int crng_init_cnt = 0; |
677 |
+- struct blake2s_state hash; |
678 |
+- unsigned long flags; |
679 |
+- |
680 |
+- blake2s_init(&hash, sizeof(base_crng.key)); |
681 |
+- |
682 |
+- spin_lock_irqsave(&base_crng.lock, flags); |
683 |
+- if (crng_init != 0) { |
684 |
+- spin_unlock_irqrestore(&base_crng.lock, flags); |
685 |
+- return; |
686 |
+- } |
687 |
+- |
688 |
+- blake2s_update(&hash, base_crng.key, sizeof(base_crng.key)); |
689 |
+- blake2s_update(&hash, input, len); |
690 |
+- blake2s_final(&hash, base_crng.key); |
691 |
+- |
692 |
+- if (account) { |
693 |
+- crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); |
694 |
+- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { |
695 |
+- ++base_crng.generation; |
696 |
+- crng_init = 1; |
697 |
+- } |
698 |
+- } |
699 |
+- |
700 |
+- spin_unlock_irqrestore(&base_crng.lock, flags); |
701 |
+- |
702 |
+- if (crng_init == 1) |
703 |
+- pr_notice("fast init done\n"); |
704 |
+-} |
705 |
+- |
706 |
+-static void _get_random_bytes(void *buf, size_t nbytes) |
707 |
++static void _get_random_bytes(void *buf, size_t len) |
708 |
+ { |
709 |
+ u32 chacha_state[CHACHA_STATE_WORDS]; |
710 |
+ u8 tmp[CHACHA_BLOCK_SIZE]; |
711 |
+- size_t len; |
712 |
++ size_t first_block_len; |
713 |
+ |
714 |
+- if (!nbytes) |
715 |
++ if (!len) |
716 |
+ return; |
717 |
+ |
718 |
+- len = min_t(size_t, 32, nbytes); |
719 |
+- crng_make_state(chacha_state, buf, len); |
720 |
+- nbytes -= len; |
721 |
+- buf += len; |
722 |
++ first_block_len = min_t(size_t, 32, len); |
723 |
++ crng_make_state(chacha_state, buf, first_block_len); |
724 |
++ len -= first_block_len; |
725 |
++ buf += first_block_len; |
726 |
+ |
727 |
+- while (nbytes) { |
728 |
+- if (nbytes < CHACHA_BLOCK_SIZE) { |
729 |
++ while (len) { |
730 |
++ if (len < CHACHA_BLOCK_SIZE) { |
731 |
+ chacha20_block(chacha_state, tmp); |
732 |
+- memcpy(buf, tmp, nbytes); |
733 |
++ memcpy(buf, tmp, len); |
734 |
+ memzero_explicit(tmp, sizeof(tmp)); |
735 |
+ break; |
736 |
+ } |
737 |
+@@ -502,7 +422,7 @@ static void _get_random_bytes(void *buf, size_t nbytes) |
738 |
+ chacha20_block(chacha_state, buf); |
739 |
+ if (unlikely(chacha_state[12] == 0)) |
740 |
+ ++chacha_state[13]; |
741 |
+- nbytes -= CHACHA_BLOCK_SIZE; |
742 |
++ len -= CHACHA_BLOCK_SIZE; |
743 |
+ buf += CHACHA_BLOCK_SIZE; |
744 |
+ } |
745 |
+ |
746 |
+@@ -519,22 +439,20 @@ static void _get_random_bytes(void *buf, size_t nbytes) |
747 |
+ * wait_for_random_bytes() should be called and return 0 at least once |
748 |
+ * at any point prior. |
749 |
+ */ |
750 |
+-void get_random_bytes(void *buf, size_t nbytes) |
751 |
++void get_random_bytes(void *buf, size_t len) |
752 |
+ { |
753 |
+- static void *previous; |
754 |
+- |
755 |
+- warn_unseeded_randomness(&previous); |
756 |
+- _get_random_bytes(buf, nbytes); |
757 |
++ warn_unseeded_randomness(); |
758 |
++ _get_random_bytes(buf, len); |
759 |
+ } |
760 |
+ EXPORT_SYMBOL(get_random_bytes); |
761 |
+ |
762 |
+-static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) |
763 |
++static ssize_t get_random_bytes_user(struct iov_iter *iter) |
764 |
+ { |
765 |
+- size_t len, left, ret = 0; |
766 |
+ u32 chacha_state[CHACHA_STATE_WORDS]; |
767 |
+- u8 output[CHACHA_BLOCK_SIZE]; |
768 |
++ u8 block[CHACHA_BLOCK_SIZE]; |
769 |
++ size_t ret = 0, copied; |
770 |
+ |
771 |
+- if (!nbytes) |
772 |
++ if (unlikely(!iov_iter_count(iter))) |
773 |
+ return 0; |
774 |
+ |
775 |
+ /* |
776 |
+@@ -548,30 +466,22 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) |
777 |
+ * use chacha_state after, so we can simply return those bytes to |
778 |
+ * the user directly. |
779 |
+ */ |
780 |
+- if (nbytes <= CHACHA_KEY_SIZE) { |
781 |
+- ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes); |
782 |
++ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { |
783 |
++ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); |
784 |
+ goto out_zero_chacha; |
785 |
+ } |
786 |
+ |
787 |
+ for (;;) { |
788 |
+- chacha20_block(chacha_state, output); |
789 |
++ chacha20_block(chacha_state, block); |
790 |
+ if (unlikely(chacha_state[12] == 0)) |
791 |
+ ++chacha_state[13]; |
792 |
+ |
793 |
+- len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); |
794 |
+- left = copy_to_user(buf, output, len); |
795 |
+- if (left) { |
796 |
+- ret += len - left; |
797 |
+- break; |
798 |
+- } |
799 |
+- |
800 |
+- buf += len; |
801 |
+- ret += len; |
802 |
+- nbytes -= len; |
803 |
+- if (!nbytes) |
804 |
++ copied = copy_to_iter(block, sizeof(block), iter); |
805 |
++ ret += copied; |
806 |
++ if (!iov_iter_count(iter) || copied != sizeof(block)) |
807 |
+ break; |
808 |
+ |
809 |
+- BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0); |
810 |
++ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); |
811 |
+ if (ret % PAGE_SIZE == 0) { |
812 |
+ if (signal_pending(current)) |
813 |
+ break; |
814 |
+@@ -579,7 +489,7 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) |
815 |
+ } |
816 |
+ } |
817 |
+ |
818 |
+- memzero_explicit(output, sizeof(output)); |
819 |
++ memzero_explicit(block, sizeof(block)); |
820 |
+ out_zero_chacha: |
821 |
+ memzero_explicit(chacha_state, sizeof(chacha_state)); |
822 |
+ return ret ? ret : -EFAULT; |
823 |
+@@ -591,98 +501,69 @@ out_zero_chacha: |
824 |
+ * provided by this function is okay, the function wait_for_random_bytes() |
825 |
+ * should be called and return 0 at least once at any point prior. |
826 |
+ */ |
827 |
+-struct batched_entropy { |
828 |
+- union { |
829 |
+- /* |
830 |
+- * We make this 1.5x a ChaCha block, so that we get the |
831 |
+- * remaining 32 bytes from fast key erasure, plus one full |
832 |
+- * block from the detached ChaCha state. We can increase |
833 |
+- * the size of this later if needed so long as we keep the |
834 |
+- * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. |
835 |
+- */ |
836 |
+- u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))]; |
837 |
+- u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))]; |
838 |
+- }; |
839 |
+- local_lock_t lock; |
840 |
+- unsigned long generation; |
841 |
+- unsigned int position; |
842 |
+-}; |
843 |
+- |
844 |
+ |
845 |
+-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { |
846 |
+- .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock), |
847 |
+- .position = UINT_MAX |
848 |
+-}; |
849 |
+- |
850 |
+-u64 get_random_u64(void) |
851 |
+-{ |
852 |
+- u64 ret; |
853 |
+- unsigned long flags; |
854 |
+- struct batched_entropy *batch; |
855 |
+- static void *previous; |
856 |
+- unsigned long next_gen; |
857 |
+- |
858 |
+- warn_unseeded_randomness(&previous); |
859 |
+- |
860 |
+- local_lock_irqsave(&batched_entropy_u64.lock, flags); |
861 |
+- batch = raw_cpu_ptr(&batched_entropy_u64); |
862 |
+- |
863 |
+- next_gen = READ_ONCE(base_crng.generation); |
864 |
+- if (batch->position >= ARRAY_SIZE(batch->entropy_u64) || |
865 |
+- next_gen != batch->generation) { |
866 |
+- _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64)); |
867 |
+- batch->position = 0; |
868 |
+- batch->generation = next_gen; |
869 |
+- } |
870 |
+- |
871 |
+- ret = batch->entropy_u64[batch->position]; |
872 |
+- batch->entropy_u64[batch->position] = 0; |
873 |
+- ++batch->position; |
874 |
+- local_unlock_irqrestore(&batched_entropy_u64.lock, flags); |
875 |
+- return ret; |
876 |
+-} |
877 |
+-EXPORT_SYMBOL(get_random_u64); |
878 |
+- |
879 |
+-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { |
880 |
+- .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock), |
881 |
+- .position = UINT_MAX |
882 |
+-}; |
883 |
+- |
884 |
+-u32 get_random_u32(void) |
885 |
+-{ |
886 |
+- u32 ret; |
887 |
+- unsigned long flags; |
888 |
+- struct batched_entropy *batch; |
889 |
+- static void *previous; |
890 |
+- unsigned long next_gen; |
891 |
+- |
892 |
+- warn_unseeded_randomness(&previous); |
893 |
+- |
894 |
+- local_lock_irqsave(&batched_entropy_u32.lock, flags); |
895 |
+- batch = raw_cpu_ptr(&batched_entropy_u32); |
896 |
+- |
897 |
+- next_gen = READ_ONCE(base_crng.generation); |
898 |
+- if (batch->position >= ARRAY_SIZE(batch->entropy_u32) || |
899 |
+- next_gen != batch->generation) { |
900 |
+- _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32)); |
901 |
+- batch->position = 0; |
902 |
+- batch->generation = next_gen; |
903 |
+- } |
904 |
+- |
905 |
+- ret = batch->entropy_u32[batch->position]; |
906 |
+- batch->entropy_u32[batch->position] = 0; |
907 |
+- ++batch->position; |
908 |
+- local_unlock_irqrestore(&batched_entropy_u32.lock, flags); |
909 |
+- return ret; |
910 |
+-} |
911 |
+-EXPORT_SYMBOL(get_random_u32); |
912 |
++#define DEFINE_BATCHED_ENTROPY(type) \ |
913 |
++struct batch_ ##type { \ |
914 |
++ /* \ |
915 |
++ * We make this 1.5x a ChaCha block, so that we get the \ |
916 |
++ * remaining 32 bytes from fast key erasure, plus one full \ |
917 |
++ * block from the detached ChaCha state. We can increase \ |
918 |
++ * the size of this later if needed so long as we keep the \ |
919 |
++ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ |
920 |
++ */ \ |
921 |
++ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ |
922 |
++ local_lock_t lock; \ |
923 |
++ unsigned long generation; \ |
924 |
++ unsigned int position; \ |
925 |
++}; \ |
926 |
++ \ |
927 |
++static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ |
928 |
++ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ |
929 |
++ .position = UINT_MAX \ |
930 |
++}; \ |
931 |
++ \ |
932 |
++type get_random_ ##type(void) \ |
933 |
++{ \ |
934 |
++ type ret; \ |
935 |
++ unsigned long flags; \ |
936 |
++ struct batch_ ##type *batch; \ |
937 |
++ unsigned long next_gen; \ |
938 |
++ \ |
939 |
++ warn_unseeded_randomness(); \ |
940 |
++ \ |
941 |
++ if (!crng_ready()) { \ |
942 |
++ _get_random_bytes(&ret, sizeof(ret)); \ |
943 |
++ return ret; \ |
944 |
++ } \ |
945 |
++ \ |
946 |
++ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ |
947 |
++ batch = raw_cpu_ptr(&batched_entropy_##type); \ |
948 |
++ \ |
949 |
++ next_gen = READ_ONCE(base_crng.generation); \ |
950 |
++ if (batch->position >= ARRAY_SIZE(batch->entropy) || \ |
951 |
++ next_gen != batch->generation) { \ |
952 |
++ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ |
953 |
++ batch->position = 0; \ |
954 |
++ batch->generation = next_gen; \ |
955 |
++ } \ |
956 |
++ \ |
957 |
++ ret = batch->entropy[batch->position]; \ |
958 |
++ batch->entropy[batch->position] = 0; \ |
959 |
++ ++batch->position; \ |
960 |
++ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ |
961 |
++ return ret; \ |
962 |
++} \ |
963 |
++EXPORT_SYMBOL(get_random_ ##type); |
964 |
++ |
965 |
++DEFINE_BATCHED_ENTROPY(u64) |
966 |
++DEFINE_BATCHED_ENTROPY(u32) |
967 |
+ |
968 |
+ #ifdef CONFIG_SMP |
969 |
+ /* |
970 |
+ * This function is called when the CPU is coming up, with entry |
971 |
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. |
972 |
+ */ |
973 |
+-int random_prepare_cpu(unsigned int cpu) |
974 |
++int __cold random_prepare_cpu(unsigned int cpu) |
975 |
+ { |
976 |
+ /* |
977 |
+ * When the cpu comes back online, immediately invalidate both |
978 |
+@@ -696,62 +577,30 @@ int random_prepare_cpu(unsigned int cpu) |
979 |
+ } |
980 |
+ #endif |
981 |
+ |
982 |
+-/** |
983 |
+- * randomize_page - Generate a random, page aligned address |
984 |
+- * @start: The smallest acceptable address the caller will take. |
985 |
+- * @range: The size of the area, starting at @start, within which the |
986 |
+- * random address must fall. |
987 |
+- * |
988 |
+- * If @start + @range would overflow, @range is capped. |
989 |
+- * |
990 |
+- * NOTE: Historical use of randomize_range, which this replaces, presumed that |
991 |
+- * @start was already page aligned. We now align it regardless. |
992 |
+- * |
993 |
+- * Return: A page aligned address within [start, start + range). On error, |
994 |
+- * @start is returned. |
995 |
+- */ |
996 |
+-unsigned long randomize_page(unsigned long start, unsigned long range) |
997 |
+-{ |
998 |
+- if (!PAGE_ALIGNED(start)) { |
999 |
+- range -= PAGE_ALIGN(start) - start; |
1000 |
+- start = PAGE_ALIGN(start); |
1001 |
+- } |
1002 |
+- |
1003 |
+- if (start > ULONG_MAX - range) |
1004 |
+- range = ULONG_MAX - start; |
1005 |
+- |
1006 |
+- range >>= PAGE_SHIFT; |
1007 |
+- |
1008 |
+- if (range == 0) |
1009 |
+- return start; |
1010 |
+- |
1011 |
+- return start + (get_random_long() % range << PAGE_SHIFT); |
1012 |
+-} |
1013 |
+- |
1014 |
+ /* |
1015 |
+ * This function will use the architecture-specific hardware random |
1016 |
+ * number generator if it is available. It is not recommended for |
1017 |
+ * use. Use get_random_bytes() instead. It returns the number of |
1018 |
+ * bytes filled in. |
1019 |
+ */ |
1020 |
+-size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) |
1021 |
++size_t __must_check get_random_bytes_arch(void *buf, size_t len) |
1022 |
+ { |
1023 |
+- size_t left = nbytes; |
1024 |
++ size_t left = len; |
1025 |
+ u8 *p = buf; |
1026 |
+ |
1027 |
+ while (left) { |
1028 |
+ unsigned long v; |
1029 |
+- size_t chunk = min_t(size_t, left, sizeof(unsigned long)); |
1030 |
++ size_t block_len = min_t(size_t, left, sizeof(unsigned long)); |
1031 |
+ |
1032 |
+ if (!arch_get_random_long(&v)) |
1033 |
+ break; |
1034 |
+ |
1035 |
+- memcpy(p, &v, chunk); |
1036 |
+- p += chunk; |
1037 |
+- left -= chunk; |
1038 |
++ memcpy(p, &v, block_len); |
1039 |
++ p += block_len; |
1040 |
++ left -= block_len; |
1041 |
+ } |
1042 |
+ |
1043 |
+- return nbytes - left; |
1044 |
++ return len - left; |
1045 |
+ } |
1046 |
+ EXPORT_SYMBOL(get_random_bytes_arch); |
1047 |
+ |
1048 |
+@@ -762,33 +611,28 @@ EXPORT_SYMBOL(get_random_bytes_arch); |
1049 |
+ * |
1050 |
+ * Callers may add entropy via: |
1051 |
+ * |
1052 |
+- * static void mix_pool_bytes(const void *in, size_t nbytes) |
1053 |
++ * static void mix_pool_bytes(const void *buf, size_t len) |
1054 |
+ * |
1055 |
+ * After which, if added entropy should be credited: |
1056 |
+ * |
1057 |
+- * static void credit_entropy_bits(size_t nbits) |
1058 |
++ * static void credit_init_bits(size_t bits) |
1059 |
+ * |
1060 |
+- * Finally, extract entropy via these two, with the latter one |
1061 |
+- * setting the entropy count to zero and extracting only if there |
1062 |
+- * is POOL_MIN_BITS entropy credited prior or force is true: |
1063 |
++ * Finally, extract entropy via: |
1064 |
+ * |
1065 |
+- * static void extract_entropy(void *buf, size_t nbytes) |
1066 |
+- * static bool drain_entropy(void *buf, size_t nbytes, bool force) |
1067 |
++ * static void extract_entropy(void *buf, size_t len) |
1068 |
+ * |
1069 |
+ **********************************************************************/ |
1070 |
+ |
1071 |
+ enum { |
1072 |
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8, |
1073 |
+- POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */ |
1074 |
++ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ |
1075 |
++ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ |
1076 |
+ }; |
1077 |
+ |
1078 |
+-/* For notifying userspace should write into /dev/random. */ |
1079 |
+-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); |
1080 |
+- |
1081 |
+ static struct { |
1082 |
+ struct blake2s_state hash; |
1083 |
+ spinlock_t lock; |
1084 |
+- unsigned int entropy_count; |
1085 |
++ unsigned int init_bits; |
1086 |
+ } input_pool = { |
1087 |
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), |
1088 |
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, |
1089 |
+@@ -797,48 +641,30 @@ static struct { |
1090 |
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), |
1091 |
+ }; |
1092 |
+ |
1093 |
+-static void _mix_pool_bytes(const void *in, size_t nbytes) |
1094 |
++static void _mix_pool_bytes(const void *buf, size_t len) |
1095 |
+ { |
1096 |
+- blake2s_update(&input_pool.hash, in, nbytes); |
1097 |
++ blake2s_update(&input_pool.hash, buf, len); |
1098 |
+ } |
1099 |
+ |
1100 |
+ /* |
1101 |
+- * This function adds bytes into the entropy "pool". It does not |
1102 |
+- * update the entropy estimate. The caller should call |
1103 |
+- * credit_entropy_bits if this is appropriate. |
1104 |
++ * This function adds bytes into the input pool. It does not |
1105 |
++ * update the initialization bit counter; the caller should call |
1106 |
++ * credit_init_bits if this is appropriate. |
1107 |
+ */ |
1108 |
+-static void mix_pool_bytes(const void *in, size_t nbytes) |
1109 |
++static void mix_pool_bytes(const void *buf, size_t len) |
1110 |
+ { |
1111 |
+ unsigned long flags; |
1112 |
+ |
1113 |
+ spin_lock_irqsave(&input_pool.lock, flags); |
1114 |
+- _mix_pool_bytes(in, nbytes); |
1115 |
++ _mix_pool_bytes(buf, len); |
1116 |
+ spin_unlock_irqrestore(&input_pool.lock, flags); |
1117 |
+ } |
1118 |
+ |
1119 |
+-static void credit_entropy_bits(size_t nbits) |
1120 |
+-{ |
1121 |
+- unsigned int entropy_count, orig, add; |
1122 |
+- |
1123 |
+- if (!nbits) |
1124 |
+- return; |
1125 |
+- |
1126 |
+- add = min_t(size_t, nbits, POOL_BITS); |
1127 |
+- |
1128 |
+- do { |
1129 |
+- orig = READ_ONCE(input_pool.entropy_count); |
1130 |
+- entropy_count = min_t(unsigned int, POOL_BITS, orig + add); |
1131 |
+- } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); |
1132 |
+- |
1133 |
+- if (!crng_ready() && entropy_count >= POOL_MIN_BITS) |
1134 |
+- crng_reseed(false); |
1135 |
+-} |
1136 |
+- |
1137 |
+ /* |
1138 |
+ * This is an HKDF-like construction for using the hashed collected entropy |
1139 |
+ * as a PRF key, that's then expanded block-by-block. |
1140 |
+ */ |
1141 |
+-static void extract_entropy(void *buf, size_t nbytes) |
1142 |
++static void extract_entropy(void *buf, size_t len) |
1143 |
+ { |
1144 |
+ unsigned long flags; |
1145 |
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; |
1146 |
+@@ -867,12 +693,12 @@ static void extract_entropy(void *buf, size_t nbytes) |
1147 |
+ spin_unlock_irqrestore(&input_pool.lock, flags); |
1148 |
+ memzero_explicit(next_key, sizeof(next_key)); |
1149 |
+ |
1150 |
+- while (nbytes) { |
1151 |
+- i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); |
1152 |
++ while (len) { |
1153 |
++ i = min_t(size_t, len, BLAKE2S_HASH_SIZE); |
1154 |
+ /* output = HASHPRF(seed, RDSEED || ++counter) */ |
1155 |
+ ++block.counter; |
1156 |
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); |
1157 |
+- nbytes -= i; |
1158 |
++ len -= i; |
1159 |
+ buf += i; |
1160 |
+ } |
1161 |
+ |
1162 |
+@@ -880,23 +706,43 @@ static void extract_entropy(void *buf, size_t nbytes) |
1163 |
+ memzero_explicit(&block, sizeof(block)); |
1164 |
+ } |
1165 |
+ |
1166 |
+-/* |
1167 |
+- * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force |
1168 |
+- * is true, and then we set the entropy count to zero (but don't actually touch |
1169 |
+- * any data). Only then can we extract a new key with extract_entropy(). |
1170 |
+- */ |
1171 |
+-static bool drain_entropy(void *buf, size_t nbytes, bool force) |
1172 |
++#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) |
1173 |
++ |
1174 |
++static void __cold _credit_init_bits(size_t bits) |
1175 |
+ { |
1176 |
+- unsigned int entropy_count; |
1177 |
++ static struct execute_work set_ready; |
1178 |
++ unsigned int new, orig, add; |
1179 |
++ unsigned long flags; |
1180 |
++ |
1181 |
++ if (!bits) |
1182 |
++ return; |
1183 |
++ |
1184 |
++ add = min_t(size_t, bits, POOL_BITS); |
1185 |
++ |
1186 |
+ do { |
1187 |
+- entropy_count = READ_ONCE(input_pool.entropy_count); |
1188 |
+- if (!force && entropy_count < POOL_MIN_BITS) |
1189 |
+- return false; |
1190 |
+- } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); |
1191 |
+- extract_entropy(buf, nbytes); |
1192 |
+- wake_up_interruptible(&random_write_wait); |
1193 |
+- kill_fasync(&fasync, SIGIO, POLL_OUT); |
1194 |
+- return true; |
1195 |
++ orig = READ_ONCE(input_pool.init_bits); |
1196 |
++ new = min_t(unsigned int, POOL_BITS, orig + add); |
1197 |
++ } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); |
1198 |
++ |
1199 |
++ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { |
1200 |
++ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ |
1201 |
++ execute_in_process_context(crng_set_ready, &set_ready); |
1202 |
++ process_random_ready_list(); |
1203 |
++ wake_up_interruptible(&crng_init_wait); |
1204 |
++ kill_fasync(&fasync, SIGIO, POLL_IN); |
1205 |
++ pr_notice("crng init done\n"); |
1206 |
++ if (urandom_warning.missed) |
1207 |
++ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", |
1208 |
++ urandom_warning.missed); |
1209 |
++ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { |
1210 |
++ spin_lock_irqsave(&base_crng.lock, flags); |
1211 |
++ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ |
1212 |
++ if (crng_init == CRNG_EMPTY) { |
1213 |
++ extract_entropy(base_crng.key, sizeof(base_crng.key)); |
1214 |
++ crng_init = CRNG_EARLY; |
1215 |
++ } |
1216 |
++ spin_unlock_irqrestore(&base_crng.lock, flags); |
1217 |
++ } |
1218 |
+ } |
1219 |
+ |
1220 |
+ |
1221 |
+@@ -907,15 +753,13 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) |
1222 |
+ * The following exported functions are used for pushing entropy into |
1223 |
+ * the above entropy accumulation routines: |
1224 |
+ * |
1225 |
+- * void add_device_randomness(const void *buf, size_t size); |
1226 |
+- * void add_input_randomness(unsigned int type, unsigned int code, |
1227 |
+- * unsigned int value); |
1228 |
+- * void add_disk_randomness(struct gendisk *disk); |
1229 |
+- * void add_hwgenerator_randomness(const void *buffer, size_t count, |
1230 |
+- * size_t entropy); |
1231 |
+- * void add_bootloader_randomness(const void *buf, size_t size); |
1232 |
+- * void add_vmfork_randomness(const void *unique_vm_id, size_t size); |
1233 |
++ * void add_device_randomness(const void *buf, size_t len); |
1234 |
++ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); |
1235 |
++ * void add_bootloader_randomness(const void *buf, size_t len); |
1236 |
++ * void add_vmfork_randomness(const void *unique_vm_id, size_t len); |
1237 |
+ * void add_interrupt_randomness(int irq); |
1238 |
++ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); |
1239 |
++ * void add_disk_randomness(struct gendisk *disk); |
1240 |
+ * |
1241 |
+ * add_device_randomness() adds data to the input pool that |
1242 |
+ * is likely to differ between two devices (or possibly even per boot). |
1243 |
+@@ -925,26 +769,13 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) |
1244 |
+ * that might otherwise be identical and have very little entropy |
1245 |
+ * available to them (particularly common in the embedded world). |
1246 |
+ * |
1247 |
+- * add_input_randomness() uses the input layer interrupt timing, as well |
1248 |
+- * as the event type information from the hardware. |
1249 |
+- * |
1250 |
+- * add_disk_randomness() uses what amounts to the seek time of block |
1251 |
+- * layer request events, on a per-disk_devt basis, as input to the |
1252 |
+- * entropy pool. Note that high-speed solid state drives with very low |
1253 |
+- * seek times do not make for good sources of entropy, as their seek |
1254 |
+- * times are usually fairly consistent. |
1255 |
+- * |
1256 |
+- * The above two routines try to estimate how many bits of entropy |
1257 |
+- * to credit. They do this by keeping track of the first and second |
1258 |
+- * order deltas of the event timings. |
1259 |
+- * |
1260 |
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit |
1261 |
+ * entropy as specified by the caller. If the entropy pool is full it will |
1262 |
+ * block until more entropy is needed. |
1263 |
+ * |
1264 |
+- * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or |
1265 |
+- * add_device_randomness(), depending on whether or not the configuration |
1266 |
+- * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. |
1267 |
++ * add_bootloader_randomness() is called by bootloader drivers, such as EFI |
1268 |
++ * and device tree, and credits its input depending on whether or not the |
1269 |
++ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set. |
1270 |
+ * |
1271 |
+ * add_vmfork_randomness() adds a unique (but not necessarily secret) ID |
1272 |
+ * representing the current instance of a VM to the pool, without crediting, |
1273 |
+@@ -955,6 +786,19 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) |
1274 |
+ * as inputs, it feeds the input pool roughly once a second or after 64 |
1275 |
+ * interrupts, crediting 1 bit of entropy for whichever comes first. |
1276 |
+ * |
1277 |
++ * add_input_randomness() uses the input layer interrupt timing, as well |
1278 |
++ * as the event type information from the hardware. |
1279 |
++ * |
1280 |
++ * add_disk_randomness() uses what amounts to the seek time of block |
1281 |
++ * layer request events, on a per-disk_devt basis, as input to the |
1282 |
++ * entropy pool. Note that high-speed solid state drives with very low |
1283 |
++ * seek times do not make for good sources of entropy, as their seek |
1284 |
++ * times are usually fairly consistent. |
1285 |
++ * |
1286 |
++ * The last two routines try to estimate how many bits of entropy |
1287 |
++ * to credit. They do this by keeping track of the first and second |
1288 |
++ * order deltas of the event timings. |
1289 |
++ * |
1290 |
+ **********************************************************************/ |
1291 |
+ |
1292 |
+ static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); |
1293 |
+@@ -972,46 +816,42 @@ early_param("random.trust_bootloader", parse_trust_bootloader); |
1294 |
+ |
1295 |
+ /* |
1296 |
+ * The first collection of entropy occurs at system boot while interrupts |
1297 |
+- * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). |
1298 |
+- * Depending on the above configuration knob, RDSEED may be considered |
1299 |
+- * sufficient for initialization. Note that much earlier setup may already |
1300 |
+- * have pushed entropy into the input pool by the time we get here. |
1301 |
++ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp, |
1302 |
++ * utsname(), and the command line. Depending on the above configuration knob, |
1303 |
++ * RDSEED may be considered sufficient for initialization. Note that much |
1304 |
++ * earlier setup may already have pushed entropy into the input pool by the |
1305 |
++ * time we get here. |
1306 |
+ */ |
1307 |
+-int __init rand_initialize(void) |
1308 |
++int __init random_init(const char *command_line) |
1309 |
+ { |
1310 |
+- size_t i; |
1311 |
+ ktime_t now = ktime_get_real(); |
1312 |
+- bool arch_init = true; |
1313 |
+- unsigned long rv; |
1314 |
++ unsigned int i, arch_bytes; |
1315 |
++ unsigned long entropy; |
1316 |
+ |
1317 |
+ #if defined(LATENT_ENTROPY_PLUGIN) |
1318 |
+ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; |
1319 |
+ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); |
1320 |
+ #endif |
1321 |
+ |
1322 |
+- for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { |
1323 |
+- if (!arch_get_random_seed_long_early(&rv) && |
1324 |
+- !arch_get_random_long_early(&rv)) { |
1325 |
+- rv = random_get_entropy(); |
1326 |
+- arch_init = false; |
1327 |
++ for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; |
1328 |
++ i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { |
1329 |
++ if (!arch_get_random_seed_long_early(&entropy) && |
1330 |
++ !arch_get_random_long_early(&entropy)) { |
1331 |
++ entropy = random_get_entropy(); |
1332 |
++ arch_bytes -= sizeof(entropy); |
1333 |
+ } |
1334 |
+- _mix_pool_bytes(&rv, sizeof(rv)); |
1335 |
++ _mix_pool_bytes(&entropy, sizeof(entropy)); |
1336 |
+ } |
1337 |
+ _mix_pool_bytes(&now, sizeof(now)); |
1338 |
+ _mix_pool_bytes(utsname(), sizeof(*(utsname()))); |
1339 |
++ _mix_pool_bytes(command_line, strlen(command_line)); |
1340 |
++ add_latent_entropy(); |
1341 |
+ |
1342 |
+- extract_entropy(base_crng.key, sizeof(base_crng.key)); |
1343 |
+- ++base_crng.generation; |
1344 |
+- |
1345 |
+- if (arch_init && trust_cpu && !crng_ready()) { |
1346 |
+- crng_init = 2; |
1347 |
+- pr_notice("crng init done (trusting CPU's manufacturer)\n"); |
1348 |
+- } |
1349 |
++ if (crng_ready()) |
1350 |
++ crng_reseed(); |
1351 |
++ else if (trust_cpu) |
1352 |
++ credit_init_bits(arch_bytes * 8); |
1353 |
+ |
1354 |
+- if (ratelimit_disable) { |
1355 |
+- urandom_warning.interval = 0; |
1356 |
+- unseeded_warning.interval = 0; |
1357 |
+- } |
1358 |
+ return 0; |
1359 |
+ } |
1360 |
+ |
1361 |
+@@ -1023,164 +863,46 @@ int __init rand_initialize(void) |
1362 |
+ * the entropy pool having similar initial state across largely |
1363 |
+ * identical devices. |
1364 |
+ */ |
1365 |
+-void add_device_randomness(const void *buf, size_t size) |
1366 |
++void add_device_randomness(const void *buf, size_t len) |
1367 |
+ { |
1368 |
+- unsigned long cycles = random_get_entropy(); |
1369 |
+- unsigned long flags, now = jiffies; |
1370 |
+- |
1371 |
+- if (crng_init == 0 && size) |
1372 |
+- crng_pre_init_inject(buf, size, false); |
1373 |
++ unsigned long entropy = random_get_entropy(); |
1374 |
++ unsigned long flags; |
1375 |
+ |
1376 |
+ spin_lock_irqsave(&input_pool.lock, flags); |
1377 |
+- _mix_pool_bytes(&cycles, sizeof(cycles)); |
1378 |
+- _mix_pool_bytes(&now, sizeof(now)); |
1379 |
+- _mix_pool_bytes(buf, size); |
1380 |
++ _mix_pool_bytes(&entropy, sizeof(entropy)); |
1381 |
++ _mix_pool_bytes(buf, len); |
1382 |
+ spin_unlock_irqrestore(&input_pool.lock, flags); |
1383 |
+ } |
1384 |
+ EXPORT_SYMBOL(add_device_randomness); |
1385 |
+ |
1386 |
+-/* There is one of these per entropy source */ |
1387 |
+-struct timer_rand_state { |
1388 |
+- unsigned long last_time; |
1389 |
+- long last_delta, last_delta2; |
1390 |
+-}; |
1391 |
+- |
1392 |
+-/* |
1393 |
+- * This function adds entropy to the entropy "pool" by using timing |
1394 |
+- * delays. It uses the timer_rand_state structure to make an estimate |
1395 |
+- * of how many bits of entropy this call has added to the pool. |
1396 |
+- * |
1397 |
+- * The number "num" is also added to the pool - it should somehow describe |
1398 |
+- * the type of event which just happened. This is currently 0-255 for |
1399 |
+- * keyboard scan codes, and 256 upwards for interrupts. |
1400 |
+- */ |
1401 |
+-static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) |
1402 |
+-{ |
1403 |
+- unsigned long cycles = random_get_entropy(), now = jiffies, flags; |
1404 |
+- long delta, delta2, delta3; |
1405 |
+- |
1406 |
+- spin_lock_irqsave(&input_pool.lock, flags); |
1407 |
+- _mix_pool_bytes(&cycles, sizeof(cycles)); |
1408 |
+- _mix_pool_bytes(&now, sizeof(now)); |
1409 |
+- _mix_pool_bytes(&num, sizeof(num)); |
1410 |
+- spin_unlock_irqrestore(&input_pool.lock, flags); |
1411 |
+- |
1412 |
+- /* |
1413 |
+- * Calculate number of bits of randomness we probably added. |
1414 |
+- * We take into account the first, second and third-order deltas |
1415 |
+- * in order to make our estimate. |
1416 |
+- */ |
1417 |
+- delta = now - READ_ONCE(state->last_time); |
1418 |
+- WRITE_ONCE(state->last_time, now); |
1419 |
+- |
1420 |
+- delta2 = delta - READ_ONCE(state->last_delta); |
1421 |
+- WRITE_ONCE(state->last_delta, delta); |
1422 |
+- |
1423 |
+- delta3 = delta2 - READ_ONCE(state->last_delta2); |
1424 |
+- WRITE_ONCE(state->last_delta2, delta2); |
1425 |
+- |
1426 |
+- if (delta < 0) |
1427 |
+- delta = -delta; |
1428 |
+- if (delta2 < 0) |
1429 |
+- delta2 = -delta2; |
1430 |
+- if (delta3 < 0) |
1431 |
+- delta3 = -delta3; |
1432 |
+- if (delta > delta2) |
1433 |
+- delta = delta2; |
1434 |
+- if (delta > delta3) |
1435 |
+- delta = delta3; |
1436 |
+- |
1437 |
+- /* |
1438 |
+- * delta is now minimum absolute delta. |
1439 |
+- * Round down by 1 bit on general principles, |
1440 |
+- * and limit entropy estimate to 12 bits. |
1441 |
+- */ |
1442 |
+- credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11)); |
1443 |
+-} |
1444 |
+- |
1445 |
+-void add_input_randomness(unsigned int type, unsigned int code, |
1446 |
+- unsigned int value) |
1447 |
+-{ |
1448 |
+- static unsigned char last_value; |
1449 |
+- static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; |
1450 |
+- |
1451 |
+- /* Ignore autorepeat and the like. */ |
1452 |
+- if (value == last_value) |
1453 |
+- return; |
1454 |
+- |
1455 |
+- last_value = value; |
1456 |
+- add_timer_randomness(&input_timer_state, |
1457 |
+- (type << 4) ^ code ^ (code >> 4) ^ value); |
1458 |
+-} |
1459 |
+-EXPORT_SYMBOL_GPL(add_input_randomness); |
1460 |
+- |
1461 |
+-#ifdef CONFIG_BLOCK |
1462 |
+-void add_disk_randomness(struct gendisk *disk) |
1463 |
+-{ |
1464 |
+- if (!disk || !disk->random) |
1465 |
+- return; |
1466 |
+- /* First major is 1, so we get >= 0x200 here. */ |
1467 |
+- add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); |
1468 |
+-} |
1469 |
+-EXPORT_SYMBOL_GPL(add_disk_randomness); |
1470 |
+- |
1471 |
+-void rand_initialize_disk(struct gendisk *disk) |
1472 |
+-{ |
1473 |
+- struct timer_rand_state *state; |
1474 |
+- |
1475 |
+- /* |
1476 |
+- * If kzalloc returns null, we just won't use that entropy |
1477 |
+- * source. |
1478 |
+- */ |
1479 |
+- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
1480 |
+- if (state) { |
1481 |
+- state->last_time = INITIAL_JIFFIES; |
1482 |
+- disk->random = state; |
1483 |
+- } |
1484 |
+-} |
1485 |
+-#endif |
1486 |
+- |
1487 |
+ /* |
1488 |
+ * Interface for in-kernel drivers of true hardware RNGs. |
1489 |
+ * Those devices may produce endless random bits and will be throttled |
1490 |
+ * when our pool is full. |
1491 |
+ */ |
1492 |
+-void add_hwgenerator_randomness(const void *buffer, size_t count, |
1493 |
+- size_t entropy) |
1494 |
++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy) |
1495 |
+ { |
1496 |
+- if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) { |
1497 |
+- crng_pre_init_inject(buffer, count, true); |
1498 |
+- mix_pool_bytes(buffer, count); |
1499 |
+- return; |
1500 |
+- } |
1501 |
++ mix_pool_bytes(buf, len); |
1502 |
++ credit_init_bits(entropy); |
1503 |
+ |
1504 |
+ /* |
1505 |
+- * Throttle writing if we're above the trickle threshold. |
1506 |
+- * We'll be woken up again once below POOL_MIN_BITS, when |
1507 |
+- * the calling thread is about to terminate, or once |
1508 |
+- * CRNG_RESEED_INTERVAL has elapsed. |
1509 |
++ * Throttle writing to once every CRNG_RESEED_INTERVAL, unless |
1510 |
++ * we're not yet initialized. |
1511 |
+ */ |
1512 |
+- wait_event_interruptible_timeout(random_write_wait, |
1513 |
+- !system_wq || kthread_should_stop() || |
1514 |
+- input_pool.entropy_count < POOL_MIN_BITS, |
1515 |
+- CRNG_RESEED_INTERVAL); |
1516 |
+- mix_pool_bytes(buffer, count); |
1517 |
+- credit_entropy_bits(entropy); |
1518 |
++ if (!kthread_should_stop() && crng_ready()) |
1519 |
++ schedule_timeout_interruptible(CRNG_RESEED_INTERVAL); |
1520 |
+ } |
1521 |
+ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); |
1522 |
+ |
1523 |
+ /* |
1524 |
+- * Handle random seed passed by bootloader. |
1525 |
+- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise |
1526 |
+- * it would be regarded as device data. |
1527 |
+- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. |
1528 |
++ * Handle random seed passed by bootloader, and credit it if |
1529 |
++ * CONFIG_RANDOM_TRUST_BOOTLOADER is set. |
1530 |
+ */ |
1531 |
+-void add_bootloader_randomness(const void *buf, size_t size) |
1532 |
++void __cold add_bootloader_randomness(const void *buf, size_t len) |
1533 |
+ { |
1534 |
++ mix_pool_bytes(buf, len); |
1535 |
+ if (trust_bootloader) |
1536 |
+- add_hwgenerator_randomness(buf, size, size * 8); |
1537 |
+- else |
1538 |
+- add_device_randomness(buf, size); |
1539 |
++ credit_init_bits(len * 8); |
1540 |
+ } |
1541 |
+ EXPORT_SYMBOL_GPL(add_bootloader_randomness); |
1542 |
+ |
1543 |
+@@ -1192,11 +914,11 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain); |
1544 |
+ * don't credit it, but we do immediately force a reseed after so |
1545 |
+ * that it's used by the crng posthaste. |
1546 |
+ */ |
1547 |
+-void add_vmfork_randomness(const void *unique_vm_id, size_t size) |
1548 |
++void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len) |
1549 |
+ { |
1550 |
+- add_device_randomness(unique_vm_id, size); |
1551 |
++ add_device_randomness(unique_vm_id, len); |
1552 |
+ if (crng_ready()) { |
1553 |
+- crng_reseed(true); |
1554 |
++ crng_reseed(); |
1555 |
+ pr_notice("crng reseeded due to virtual machine fork\n"); |
1556 |
+ } |
1557 |
+ blocking_notifier_call_chain(&vmfork_chain, 0, NULL); |
1558 |
+@@ -1205,13 +927,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) |
1559 |
+ EXPORT_SYMBOL_GPL(add_vmfork_randomness); |
1560 |
+ #endif |
1561 |
+ |
1562 |
+-int register_random_vmfork_notifier(struct notifier_block *nb) |
1563 |
++int __cold register_random_vmfork_notifier(struct notifier_block *nb) |
1564 |
+ { |
1565 |
+ return blocking_notifier_chain_register(&vmfork_chain, nb); |
1566 |
+ } |
1567 |
+ EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); |
1568 |
+ |
1569 |
+-int unregister_random_vmfork_notifier(struct notifier_block *nb) |
1570 |
++int __cold unregister_random_vmfork_notifier(struct notifier_block *nb) |
1571 |
+ { |
1572 |
+ return blocking_notifier_chain_unregister(&vmfork_chain, nb); |
1573 |
+ } |
1574 |
+@@ -1223,17 +945,15 @@ struct fast_pool { |
1575 |
+ unsigned long pool[4]; |
1576 |
+ unsigned long last; |
1577 |
+ unsigned int count; |
1578 |
+- u16 reg_idx; |
1579 |
+ }; |
1580 |
+ |
1581 |
+ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { |
1582 |
+ #ifdef CONFIG_64BIT |
1583 |
+- /* SipHash constants */ |
1584 |
+- .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL, |
1585 |
+- 0x6c7967656e657261UL, 0x7465646279746573UL } |
1586 |
++#define FASTMIX_PERM SIPHASH_PERMUTATION |
1587 |
++ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 } |
1588 |
+ #else |
1589 |
+- /* HalfSipHash constants */ |
1590 |
+- .pool = { 0, 0, 0x6c796765U, 0x74656462U } |
1591 |
++#define FASTMIX_PERM HSIPHASH_PERMUTATION |
1592 |
++ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 } |
1593 |
+ #endif |
1594 |
+ }; |
1595 |
+ |
1596 |
+@@ -1241,27 +961,16 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { |
1597 |
+ * This is [Half]SipHash-1-x, starting from an empty key. Because |
1598 |
+ * the key is fixed, it assumes that its inputs are non-malicious, |
1599 |
+ * and therefore this has no security on its own. s represents the |
1600 |
+- * 128 or 256-bit SipHash state, while v represents a 128-bit input. |
1601 |
++ * four-word SipHash state, while v represents a two-word input. |
1602 |
+ */ |
1603 |
+-static void fast_mix(unsigned long s[4], const unsigned long *v) |
1604 |
++static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) |
1605 |
+ { |
1606 |
+- size_t i; |
1607 |
+- |
1608 |
+- for (i = 0; i < 16 / sizeof(long); ++i) { |
1609 |
+- s[3] ^= v[i]; |
1610 |
+-#ifdef CONFIG_64BIT |
1611 |
+- s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); |
1612 |
+- s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; |
1613 |
+- s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; |
1614 |
+- s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); |
1615 |
+-#else |
1616 |
+- s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); |
1617 |
+- s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; |
1618 |
+- s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; |
1619 |
+- s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); |
1620 |
+-#endif |
1621 |
+- s[0] ^= v[i]; |
1622 |
+- } |
1623 |
++ s[3] ^= v1; |
1624 |
++ FASTMIX_PERM(s[0], s[1], s[2], s[3]); |
1625 |
++ s[0] ^= v1; |
1626 |
++ s[3] ^= v2; |
1627 |
++ FASTMIX_PERM(s[0], s[1], s[2], s[3]); |
1628 |
++ s[0] ^= v2; |
1629 |
+ } |
1630 |
+ |
1631 |
+ #ifdef CONFIG_SMP |
1632 |
+@@ -1269,7 +978,7 @@ static void fast_mix(unsigned long s[4], const unsigned long *v) |
1633 |
+ * This function is called when the CPU has just come online, with |
1634 |
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. |
1635 |
+ */ |
1636 |
+-int random_online_cpu(unsigned int cpu) |
1637 |
++int __cold random_online_cpu(unsigned int cpu) |
1638 |
+ { |
1639 |
+ /* |
1640 |
+ * During CPU shutdown and before CPU onlining, add_interrupt_ |
1641 |
+@@ -1287,33 +996,18 @@ int random_online_cpu(unsigned int cpu) |
1642 |
+ } |
1643 |
+ #endif |
1644 |
+ |
1645 |
+-static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs) |
1646 |
+-{ |
1647 |
+- unsigned long *ptr = (unsigned long *)regs; |
1648 |
+- unsigned int idx; |
1649 |
+- |
1650 |
+- if (regs == NULL) |
1651 |
+- return 0; |
1652 |
+- idx = READ_ONCE(f->reg_idx); |
1653 |
+- if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long)) |
1654 |
+- idx = 0; |
1655 |
+- ptr += idx++; |
1656 |
+- WRITE_ONCE(f->reg_idx, idx); |
1657 |
+- return *ptr; |
1658 |
+-} |
1659 |
+- |
1660 |
+ static void mix_interrupt_randomness(struct work_struct *work) |
1661 |
+ { |
1662 |
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); |
1663 |
+ /* |
1664 |
+- * The size of the copied stack pool is explicitly 16 bytes so that we |
1665 |
+- * tax mix_pool_byte()'s compression function the same amount on all |
1666 |
+- * platforms. This means on 64-bit we copy half the pool into this, |
1667 |
+- * while on 32-bit we copy all of it. The entropy is supposed to be |
1668 |
+- * sufficiently dispersed between bits that in the sponge-like |
1669 |
+- * half case, on average we don't wind up "losing" some. |
1670 |
++ * The size of the copied stack pool is explicitly 2 longs so that we |
1671 |
++ * only ever ingest half of the siphash output each time, retaining |
1672 |
++ * the other half as the next "key" that carries over. The entropy is |
1673 |
++ * supposed to be sufficiently dispersed between bits so on average |
1674 |
++ * we don't wind up "losing" some. |
1675 |
+ */ |
1676 |
+- u8 pool[16]; |
1677 |
++ unsigned long pool[2]; |
1678 |
++ unsigned int count; |
1679 |
+ |
1680 |
+ /* Check to see if we're running on the wrong CPU due to hotplug. */ |
1681 |
+ local_irq_disable(); |
1682 |
+@@ -1327,17 +1021,13 @@ static void mix_interrupt_randomness(struct work_struct *work) |
1683 |
+ * consistent view, before we reenable irqs again. |
1684 |
+ */ |
1685 |
+ memcpy(pool, fast_pool->pool, sizeof(pool)); |
1686 |
++ count = fast_pool->count; |
1687 |
+ fast_pool->count = 0; |
1688 |
+ fast_pool->last = jiffies; |
1689 |
+ local_irq_enable(); |
1690 |
+ |
1691 |
+- if (unlikely(crng_init == 0)) { |
1692 |
+- crng_pre_init_inject(pool, sizeof(pool), true); |
1693 |
+- mix_pool_bytes(pool, sizeof(pool)); |
1694 |
+- } else { |
1695 |
+- mix_pool_bytes(pool, sizeof(pool)); |
1696 |
+- credit_entropy_bits(1); |
1697 |
+- } |
1698 |
++ mix_pool_bytes(pool, sizeof(pool)); |
1699 |
++ credit_init_bits(max(1u, (count & U16_MAX) / 64)); |
1700 |
+ |
1701 |
+ memzero_explicit(pool, sizeof(pool)); |
1702 |
+ } |
1703 |
+@@ -1345,37 +1035,19 @@ static void mix_interrupt_randomness(struct work_struct *work) |
1704 |
+ void add_interrupt_randomness(int irq) |
1705 |
+ { |
1706 |
+ enum { MIX_INFLIGHT = 1U << 31 }; |
1707 |
+- unsigned long cycles = random_get_entropy(), now = jiffies; |
1708 |
++ unsigned long entropy = random_get_entropy(); |
1709 |
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); |
1710 |
+ struct pt_regs *regs = get_irq_regs(); |
1711 |
+ unsigned int new_count; |
1712 |
+- union { |
1713 |
+- u32 u32[4]; |
1714 |
+- u64 u64[2]; |
1715 |
+- unsigned long longs[16 / sizeof(long)]; |
1716 |
+- } irq_data; |
1717 |
+- |
1718 |
+- if (cycles == 0) |
1719 |
+- cycles = get_reg(fast_pool, regs); |
1720 |
+- |
1721 |
+- if (sizeof(unsigned long) == 8) { |
1722 |
+- irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; |
1723 |
+- irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; |
1724 |
+- } else { |
1725 |
+- irq_data.u32[0] = cycles ^ irq; |
1726 |
+- irq_data.u32[1] = now; |
1727 |
+- irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; |
1728 |
+- irq_data.u32[3] = get_reg(fast_pool, regs); |
1729 |
+- } |
1730 |
+ |
1731 |
+- fast_mix(fast_pool->pool, irq_data.longs); |
1732 |
++ fast_mix(fast_pool->pool, entropy, |
1733 |
++ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); |
1734 |
+ new_count = ++fast_pool->count; |
1735 |
+ |
1736 |
+ if (new_count & MIX_INFLIGHT) |
1737 |
+ return; |
1738 |
+ |
1739 |
+- if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) || |
1740 |
+- unlikely(crng_init == 0))) |
1741 |
++ if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) |
1742 |
+ return; |
1743 |
+ |
1744 |
+ if (unlikely(!fast_pool->mix.func)) |
1745 |
+@@ -1385,6 +1057,126 @@ void add_interrupt_randomness(int irq) |
1746 |
+ } |
1747 |
+ EXPORT_SYMBOL_GPL(add_interrupt_randomness); |
1748 |
+ |
1749 |
++/* There is one of these per entropy source */ |
1750 |
++struct timer_rand_state { |
1751 |
++ unsigned long last_time; |
1752 |
++ long last_delta, last_delta2; |
1753 |
++}; |
1754 |
++ |
1755 |
++/* |
1756 |
++ * This function adds entropy to the entropy "pool" by using timing |
1757 |
++ * delays. It uses the timer_rand_state structure to make an estimate |
1758 |
++ * of how many bits of entropy this call has added to the pool. The |
1759 |
++ * value "num" is also added to the pool; it should somehow describe |
1760 |
++ * the type of event that just happened. |
1761 |
++ */ |
1762 |
++static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) |
1763 |
++{ |
1764 |
++ unsigned long entropy = random_get_entropy(), now = jiffies, flags; |
1765 |
++ long delta, delta2, delta3; |
1766 |
++ unsigned int bits; |
1767 |
++ |
1768 |
++ /* |
1769 |
++ * If we're in a hard IRQ, add_interrupt_randomness() will be called |
1770 |
++ * sometime after, so mix into the fast pool. |
1771 |
++ */ |
1772 |
++ if (in_hardirq()) { |
1773 |
++ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); |
1774 |
++ } else { |
1775 |
++ spin_lock_irqsave(&input_pool.lock, flags); |
1776 |
++ _mix_pool_bytes(&entropy, sizeof(entropy)); |
1777 |
++ _mix_pool_bytes(&num, sizeof(num)); |
1778 |
++ spin_unlock_irqrestore(&input_pool.lock, flags); |
1779 |
++ } |
1780 |
++ |
1781 |
++ if (crng_ready()) |
1782 |
++ return; |
1783 |
++ |
1784 |
++ /* |
1785 |
++ * Calculate number of bits of randomness we probably added. |
1786 |
++ * We take into account the first, second and third-order deltas |
1787 |
++ * in order to make our estimate. |
1788 |
++ */ |
1789 |
++ delta = now - READ_ONCE(state->last_time); |
1790 |
++ WRITE_ONCE(state->last_time, now); |
1791 |
++ |
1792 |
++ delta2 = delta - READ_ONCE(state->last_delta); |
1793 |
++ WRITE_ONCE(state->last_delta, delta); |
1794 |
++ |
1795 |
++ delta3 = delta2 - READ_ONCE(state->last_delta2); |
1796 |
++ WRITE_ONCE(state->last_delta2, delta2); |
1797 |
++ |
1798 |
++ if (delta < 0) |
1799 |
++ delta = -delta; |
1800 |
++ if (delta2 < 0) |
1801 |
++ delta2 = -delta2; |
1802 |
++ if (delta3 < 0) |
1803 |
++ delta3 = -delta3; |
1804 |
++ if (delta > delta2) |
1805 |
++ delta = delta2; |
1806 |
++ if (delta > delta3) |
1807 |
++ delta = delta3; |
1808 |
++ |
1809 |
++ /* |
1810 |
++ * delta is now minimum absolute delta. Round down by 1 bit |
1811 |
++ * on general principles, and limit entropy estimate to 11 bits. |
1812 |
++ */ |
1813 |
++ bits = min(fls(delta >> 1), 11); |
1814 |
++ |
1815 |
++ /* |
1816 |
++ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() |
1817 |
++ * will run after this, which uses a different crediting scheme of 1 bit |
1818 |
++ * per every 64 interrupts. In order to let that function do accounting |
1819 |
++ * close to the one in this function, we credit a full 64/64 bit per bit, |
1820 |
++ * and then subtract one to account for the extra one added. |
1821 |
++ */ |
1822 |
++ if (in_hardirq()) |
1823 |
++ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; |
1824 |
++ else |
1825 |
++ _credit_init_bits(bits); |
1826 |
++} |
1827 |
++ |
1828 |
++void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) |
1829 |
++{ |
1830 |
++ static unsigned char last_value; |
1831 |
++ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; |
1832 |
++ |
1833 |
++ /* Ignore autorepeat and the like. */ |
1834 |
++ if (value == last_value) |
1835 |
++ return; |
1836 |
++ |
1837 |
++ last_value = value; |
1838 |
++ add_timer_randomness(&input_timer_state, |
1839 |
++ (type << 4) ^ code ^ (code >> 4) ^ value); |
1840 |
++} |
1841 |
++EXPORT_SYMBOL_GPL(add_input_randomness); |
1842 |
++ |
1843 |
++#ifdef CONFIG_BLOCK |
1844 |
++void add_disk_randomness(struct gendisk *disk) |
1845 |
++{ |
1846 |
++ if (!disk || !disk->random) |
1847 |
++ return; |
1848 |
++ /* First major is 1, so we get >= 0x200 here. */ |
1849 |
++ add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); |
1850 |
++} |
1851 |
++EXPORT_SYMBOL_GPL(add_disk_randomness); |
1852 |
++ |
1853 |
++void __cold rand_initialize_disk(struct gendisk *disk) |
1854 |
++{ |
1855 |
++ struct timer_rand_state *state; |
1856 |
++ |
1857 |
++ /* |
1858 |
++ * If kzalloc returns null, we just won't use that entropy |
1859 |
++ * source. |
1860 |
++ */ |
1861 |
++ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
1862 |
++ if (state) { |
1863 |
++ state->last_time = INITIAL_JIFFIES; |
1864 |
++ disk->random = state; |
1865 |
++ } |
1866 |
++} |
1867 |
++#endif |
1868 |
++ |
1869 |
+ /* |
1870 |
+ * Each time the timer fires, we expect that we got an unpredictable |
1871 |
+ * jump in the cycle counter. Even if the timer is running on another |
1872 |
+@@ -1398,40 +1190,40 @@ EXPORT_SYMBOL_GPL(add_interrupt_randomness); |
1873 |
+ * |
1874 |
+ * So the re-arming always happens in the entropy loop itself. |
1875 |
+ */ |
1876 |
+-static void entropy_timer(struct timer_list *t) |
1877 |
++static void __cold entropy_timer(struct timer_list *t) |
1878 |
+ { |
1879 |
+- credit_entropy_bits(1); |
1880 |
++ credit_init_bits(1); |
1881 |
+ } |
1882 |
+ |
1883 |
+ /* |
1884 |
+ * If we have an actual cycle counter, see if we can |
1885 |
+ * generate enough entropy with timing noise |
1886 |
+ */ |
1887 |
+-static void try_to_generate_entropy(void) |
1888 |
++static void __cold try_to_generate_entropy(void) |
1889 |
+ { |
1890 |
+ struct { |
1891 |
+- unsigned long cycles; |
1892 |
++ unsigned long entropy; |
1893 |
+ struct timer_list timer; |
1894 |
+ } stack; |
1895 |
+ |
1896 |
+- stack.cycles = random_get_entropy(); |
1897 |
++ stack.entropy = random_get_entropy(); |
1898 |
+ |
1899 |
+ /* Slow counter - or none. Don't even bother */ |
1900 |
+- if (stack.cycles == random_get_entropy()) |
1901 |
++ if (stack.entropy == random_get_entropy()) |
1902 |
+ return; |
1903 |
+ |
1904 |
+ timer_setup_on_stack(&stack.timer, entropy_timer, 0); |
1905 |
+ while (!crng_ready() && !signal_pending(current)) { |
1906 |
+ if (!timer_pending(&stack.timer)) |
1907 |
+ mod_timer(&stack.timer, jiffies + 1); |
1908 |
+- mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); |
1909 |
++ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); |
1910 |
+ schedule(); |
1911 |
+- stack.cycles = random_get_entropy(); |
1912 |
++ stack.entropy = random_get_entropy(); |
1913 |
+ } |
1914 |
+ |
1915 |
+ del_timer_sync(&stack.timer); |
1916 |
+ destroy_timer_on_stack(&stack.timer); |
1917 |
+- mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); |
1918 |
++ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); |
1919 |
+ } |
1920 |
+ |
1921 |
+ |
1922 |
+@@ -1463,9 +1255,12 @@ static void try_to_generate_entropy(void) |
1923 |
+ * |
1924 |
+ **********************************************************************/ |
1925 |
+ |
1926 |
+-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, |
1927 |
+- flags) |
1928 |
++SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) |
1929 |
+ { |
1930 |
++ struct iov_iter iter; |
1931 |
++ struct iovec iov; |
1932 |
++ int ret; |
1933 |
++ |
1934 |
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) |
1935 |
+ return -EINVAL; |
1936 |
+ |
1937 |
+@@ -1476,72 +1271,60 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, |
1938 |
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) |
1939 |
+ return -EINVAL; |
1940 |
+ |
1941 |
+- if (count > INT_MAX) |
1942 |
+- count = INT_MAX; |
1943 |
+- |
1944 |
+- if (!(flags & GRND_INSECURE) && !crng_ready()) { |
1945 |
+- int ret; |
1946 |
+- |
1947 |
++ if (!crng_ready() && !(flags & GRND_INSECURE)) { |
1948 |
+ if (flags & GRND_NONBLOCK) |
1949 |
+ return -EAGAIN; |
1950 |
+ ret = wait_for_random_bytes(); |
1951 |
+ if (unlikely(ret)) |
1952 |
+ return ret; |
1953 |
+ } |
1954 |
+- return get_random_bytes_user(buf, count); |
1955 |
++ |
1956 |
++ ret = import_single_range(READ, ubuf, len, &iov, &iter); |
1957 |
++ if (unlikely(ret)) |
1958 |
++ return ret; |
1959 |
++ return get_random_bytes_user(&iter); |
1960 |
+ } |
1961 |
+ |
1962 |
+ static __poll_t random_poll(struct file *file, poll_table *wait) |
1963 |
+ { |
1964 |
+- __poll_t mask; |
1965 |
+- |
1966 |
+ poll_wait(file, &crng_init_wait, wait); |
1967 |
+- poll_wait(file, &random_write_wait, wait); |
1968 |
+- mask = 0; |
1969 |
+- if (crng_ready()) |
1970 |
+- mask |= EPOLLIN | EPOLLRDNORM; |
1971 |
+- if (input_pool.entropy_count < POOL_MIN_BITS) |
1972 |
+- mask |= EPOLLOUT | EPOLLWRNORM; |
1973 |
+- return mask; |
1974 |
++ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; |
1975 |
+ } |
1976 |
+ |
1977 |
+-static int write_pool(const char __user *ubuf, size_t count) |
1978 |
++static ssize_t write_pool_user(struct iov_iter *iter) |
1979 |
+ { |
1980 |
+- size_t len; |
1981 |
+- int ret = 0; |
1982 |
+ u8 block[BLAKE2S_BLOCK_SIZE]; |
1983 |
++ ssize_t ret = 0; |
1984 |
++ size_t copied; |
1985 |
+ |
1986 |
+- while (count) { |
1987 |
+- len = min(count, sizeof(block)); |
1988 |
+- if (copy_from_user(block, ubuf, len)) { |
1989 |
+- ret = -EFAULT; |
1990 |
+- goto out; |
1991 |
++ if (unlikely(!iov_iter_count(iter))) |
1992 |
++ return 0; |
1993 |
++ |
1994 |
++ for (;;) { |
1995 |
++ copied = copy_from_iter(block, sizeof(block), iter); |
1996 |
++ ret += copied; |
1997 |
++ mix_pool_bytes(block, copied); |
1998 |
++ if (!iov_iter_count(iter) || copied != sizeof(block)) |
1999 |
++ break; |
2000 |
++ |
2001 |
++ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); |
2002 |
++ if (ret % PAGE_SIZE == 0) { |
2003 |
++ if (signal_pending(current)) |
2004 |
++ break; |
2005 |
++ cond_resched(); |
2006 |
+ } |
2007 |
+- count -= len; |
2008 |
+- ubuf += len; |
2009 |
+- mix_pool_bytes(block, len); |
2010 |
+- cond_resched(); |
2011 |
+ } |
2012 |
+ |
2013 |
+-out: |
2014 |
+ memzero_explicit(block, sizeof(block)); |
2015 |
+- return ret; |
2016 |
++ return ret ? ret : -EFAULT; |
2017 |
+ } |
2018 |
+ |
2019 |
+-static ssize_t random_write(struct file *file, const char __user *buffer, |
2020 |
+- size_t count, loff_t *ppos) |
2021 |
++static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) |
2022 |
+ { |
2023 |
+- int ret; |
2024 |
+- |
2025 |
+- ret = write_pool(buffer, count); |
2026 |
+- if (ret) |
2027 |
+- return ret; |
2028 |
+- |
2029 |
+- return (ssize_t)count; |
2030 |
++ return write_pool_user(iter); |
2031 |
+ } |
2032 |
+ |
2033 |
+-static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, |
2034 |
+- loff_t *ppos) |
2035 |
++static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) |
2036 |
+ { |
2037 |
+ static int maxwarn = 10; |
2038 |
+ |
2039 |
+@@ -1552,37 +1335,38 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, |
2040 |
+ if (!crng_ready()) |
2041 |
+ try_to_generate_entropy(); |
2042 |
+ |
2043 |
+- if (!crng_ready() && maxwarn > 0) { |
2044 |
+- maxwarn--; |
2045 |
+- if (__ratelimit(&urandom_warning)) |
2046 |
+- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", |
2047 |
+- current->comm, nbytes); |
2048 |
++ if (!crng_ready()) { |
2049 |
++ if (!ratelimit_disable && maxwarn <= 0) |
2050 |
++ ++urandom_warning.missed; |
2051 |
++ else if (ratelimit_disable || __ratelimit(&urandom_warning)) { |
2052 |
++ --maxwarn; |
2053 |
++ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", |
2054 |
++ current->comm, iov_iter_count(iter)); |
2055 |
++ } |
2056 |
+ } |
2057 |
+ |
2058 |
+- return get_random_bytes_user(buf, nbytes); |
2059 |
++ return get_random_bytes_user(iter); |
2060 |
+ } |
2061 |
+ |
2062 |
+-static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, |
2063 |
+- loff_t *ppos) |
2064 |
++static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) |
2065 |
+ { |
2066 |
+ int ret; |
2067 |
+ |
2068 |
+ ret = wait_for_random_bytes(); |
2069 |
+ if (ret != 0) |
2070 |
+ return ret; |
2071 |
+- return get_random_bytes_user(buf, nbytes); |
2072 |
++ return get_random_bytes_user(iter); |
2073 |
+ } |
2074 |
+ |
2075 |
+ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
2076 |
+ { |
2077 |
+- int size, ent_count; |
2078 |
+ int __user *p = (int __user *)arg; |
2079 |
+- int retval; |
2080 |
++ int ent_count; |
2081 |
+ |
2082 |
+ switch (cmd) { |
2083 |
+ case RNDGETENTCNT: |
2084 |
+ /* Inherently racy, no point locking. */ |
2085 |
+- if (put_user(input_pool.entropy_count, p)) |
2086 |
++ if (put_user(input_pool.init_bits, p)) |
2087 |
+ return -EFAULT; |
2088 |
+ return 0; |
2089 |
+ case RNDADDTOENTCNT: |
2090 |
+@@ -1592,41 +1376,46 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
2091 |
+ return -EFAULT; |
2092 |
+ if (ent_count < 0) |
2093 |
+ return -EINVAL; |
2094 |
+- credit_entropy_bits(ent_count); |
2095 |
++ credit_init_bits(ent_count); |
2096 |
+ return 0; |
2097 |
+- case RNDADDENTROPY: |
2098 |
++ case RNDADDENTROPY: { |
2099 |
++ struct iov_iter iter; |
2100 |
++ struct iovec iov; |
2101 |
++ ssize_t ret; |
2102 |
++ int len; |
2103 |
++ |
2104 |
+ if (!capable(CAP_SYS_ADMIN)) |
2105 |
+ return -EPERM; |
2106 |
+ if (get_user(ent_count, p++)) |
2107 |
+ return -EFAULT; |
2108 |
+ if (ent_count < 0) |
2109 |
+ return -EINVAL; |
2110 |
+- if (get_user(size, p++)) |
2111 |
++ if (get_user(len, p++)) |
2112 |
+ return -EFAULT; |
2113 |
+- retval = write_pool((const char __user *)p, size); |
2114 |
+- if (retval < 0) |
2115 |
+- return retval; |
2116 |
+- credit_entropy_bits(ent_count); |
2117 |
++ ret = import_single_range(WRITE, p, len, &iov, &iter); |
2118 |
++ if (unlikely(ret)) |
2119 |
++ return ret; |
2120 |
++ ret = write_pool_user(&iter); |
2121 |
++ if (unlikely(ret < 0)) |
2122 |
++ return ret; |
2123 |
++ /* Since we're crediting, enforce that it was all written into the pool. */ |
2124 |
++ if (unlikely(ret != len)) |
2125 |
++ return -EFAULT; |
2126 |
++ credit_init_bits(ent_count); |
2127 |
+ return 0; |
2128 |
++ } |
2129 |
+ case RNDZAPENTCNT: |
2130 |
+ case RNDCLEARPOOL: |
2131 |
+- /* |
2132 |
+- * Clear the entropy pool counters. We no longer clear |
2133 |
+- * the entropy pool, as that's silly. |
2134 |
+- */ |
2135 |
++ /* No longer has any effect. */ |
2136 |
+ if (!capable(CAP_SYS_ADMIN)) |
2137 |
+ return -EPERM; |
2138 |
+- if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) { |
2139 |
+- wake_up_interruptible(&random_write_wait); |
2140 |
+- kill_fasync(&fasync, SIGIO, POLL_OUT); |
2141 |
+- } |
2142 |
+ return 0; |
2143 |
+ case RNDRESEEDCRNG: |
2144 |
+ if (!capable(CAP_SYS_ADMIN)) |
2145 |
+ return -EPERM; |
2146 |
+ if (!crng_ready()) |
2147 |
+ return -ENODATA; |
2148 |
+- crng_reseed(false); |
2149 |
++ crng_reseed(); |
2150 |
+ return 0; |
2151 |
+ default: |
2152 |
+ return -EINVAL; |
2153 |
+@@ -1639,22 +1428,26 @@ static int random_fasync(int fd, struct file *filp, int on) |
2154 |
+ } |
2155 |
+ |
2156 |
+ const struct file_operations random_fops = { |
2157 |
+- .read = random_read, |
2158 |
+- .write = random_write, |
2159 |
++ .read_iter = random_read_iter, |
2160 |
++ .write_iter = random_write_iter, |
2161 |
+ .poll = random_poll, |
2162 |
+ .unlocked_ioctl = random_ioctl, |
2163 |
+ .compat_ioctl = compat_ptr_ioctl, |
2164 |
+ .fasync = random_fasync, |
2165 |
+ .llseek = noop_llseek, |
2166 |
++ .splice_read = generic_file_splice_read, |
2167 |
++ .splice_write = iter_file_splice_write, |
2168 |
+ }; |
2169 |
+ |
2170 |
+ const struct file_operations urandom_fops = { |
2171 |
+- .read = urandom_read, |
2172 |
+- .write = random_write, |
2173 |
++ .read_iter = urandom_read_iter, |
2174 |
++ .write_iter = random_write_iter, |
2175 |
+ .unlocked_ioctl = random_ioctl, |
2176 |
+ .compat_ioctl = compat_ptr_ioctl, |
2177 |
+ .fasync = random_fasync, |
2178 |
+ .llseek = noop_llseek, |
2179 |
++ .splice_read = generic_file_splice_read, |
2180 |
++ .splice_write = iter_file_splice_write, |
2181 |
+ }; |
2182 |
+ |
2183 |
+ |
2184 |
+@@ -1678,7 +1471,7 @@ const struct file_operations urandom_fops = { |
2185 |
+ * |
2186 |
+ * - write_wakeup_threshold - the amount of entropy in the input pool |
2187 |
+ * below which write polls to /dev/random will unblock, requesting |
2188 |
+- * more entropy, tied to the POOL_MIN_BITS constant. It is writable |
2189 |
++ * more entropy, tied to the POOL_READY_BITS constant. It is writable |
2190 |
+ * to avoid breaking old userspaces, but writing to it does not |
2191 |
+ * change any behavior of the RNG. |
2192 |
+ * |
2193 |
+@@ -1693,7 +1486,7 @@ const struct file_operations urandom_fops = { |
2194 |
+ #include <linux/sysctl.h> |
2195 |
+ |
2196 |
+ static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; |
2197 |
+-static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS; |
2198 |
++static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; |
2199 |
+ static int sysctl_poolsize = POOL_BITS; |
2200 |
+ static u8 sysctl_bootid[UUID_SIZE]; |
2201 |
+ |
2202 |
+@@ -1702,7 +1495,7 @@ static u8 sysctl_bootid[UUID_SIZE]; |
2203 |
+ * UUID. The difference is in whether table->data is NULL; if it is, |
2204 |
+ * then a new UUID is generated and returned to the user. |
2205 |
+ */ |
2206 |
+-static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, |
2207 |
++static int proc_do_uuid(struct ctl_table *table, int write, void *buf, |
2208 |
+ size_t *lenp, loff_t *ppos) |
2209 |
+ { |
2210 |
+ u8 tmp_uuid[UUID_SIZE], *uuid; |
2211 |
+@@ -1729,14 +1522,14 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, |
2212 |
+ } |
2213 |
+ |
2214 |
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); |
2215 |
+- return proc_dostring(&fake_table, 0, buffer, lenp, ppos); |
2216 |
++ return proc_dostring(&fake_table, 0, buf, lenp, ppos); |
2217 |
+ } |
2218 |
+ |
2219 |
+ /* The same as proc_dointvec, but writes don't change anything. */ |
2220 |
+-static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer, |
2221 |
++static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, |
2222 |
+ size_t *lenp, loff_t *ppos) |
2223 |
+ { |
2224 |
+- return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos); |
2225 |
++ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); |
2226 |
+ } |
2227 |
+ |
2228 |
+ static struct ctl_table random_table[] = { |
2229 |
+@@ -1749,7 +1542,7 @@ static struct ctl_table random_table[] = { |
2230 |
+ }, |
2231 |
+ { |
2232 |
+ .procname = "entropy_avail", |
2233 |
+- .data = &input_pool.entropy_count, |
2234 |
++ .data = &input_pool.init_bits, |
2235 |
+ .maxlen = sizeof(int), |
2236 |
+ .mode = 0444, |
2237 |
+ .proc_handler = proc_dointvec, |
2238 |
+@@ -1783,8 +1576,8 @@ static struct ctl_table random_table[] = { |
2239 |
+ }; |
2240 |
+ |
2241 |
+ /* |
2242 |
+- * rand_initialize() is called before sysctl_init(), |
2243 |
+- * so we cannot call register_sysctl_init() in rand_initialize() |
2244 |
++ * random_init() is called before sysctl_init(), |
2245 |
++ * so we cannot call register_sysctl_init() in random_init() |
2246 |
+ */ |
2247 |
+ static int __init random_sysctls_init(void) |
2248 |
+ { |
2249 |
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
2250 |
+index c5de0ec4f9d03..444acd9e2cd6a 100644 |
2251 |
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
2252 |
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
2253 |
+@@ -227,6 +227,17 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) |
2254 |
+ dev_dbg(dev, "sid 0x%x status 0x%x\n", |
2255 |
+ cl_data->sensor_idx[i], cl_data->sensor_sts[i]); |
2256 |
+ } |
2257 |
++ if (privdata->mp2_ops->discovery_status && |
2258 |
++ privdata->mp2_ops->discovery_status(privdata) == 0) { |
2259 |
++ amd_sfh_hid_client_deinit(privdata); |
2260 |
++ for (i = 0; i < cl_data->num_hid_devices; i++) { |
2261 |
++ devm_kfree(dev, cl_data->feature_report[i]); |
2262 |
++ devm_kfree(dev, in_data->input_report[i]); |
2263 |
++ devm_kfree(dev, cl_data->report_descr[i]); |
2264 |
++ } |
2265 |
++ dev_warn(dev, "Failed to discover, sensors not enabled\n"); |
2266 |
++ return -EOPNOTSUPP; |
2267 |
++ } |
2268 |
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); |
2269 |
+ return 0; |
2270 |
+ |
2271 |
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c |
2272 |
+index 6b5fd90b0bd1b..e18a4efd8839e 100644 |
2273 |
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c |
2274 |
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c |
2275 |
+@@ -130,6 +130,12 @@ static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata) |
2276 |
+ return 0; |
2277 |
+ } |
2278 |
+ |
2279 |
++static int amd_sfh_dis_sts_v2(struct amd_mp2_dev *privdata) |
2280 |
++{ |
2281 |
++ return (readl(privdata->mmio + AMD_P2C_MSG(1)) & |
2282 |
++ SENSOR_DISCOVERY_STATUS_MASK) >> SENSOR_DISCOVERY_STATUS_SHIFT; |
2283 |
++} |
2284 |
++ |
2285 |
+ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info) |
2286 |
+ { |
2287 |
+ union sfh_cmd_param cmd_param; |
2288 |
+@@ -245,6 +251,7 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = { |
2289 |
+ .response = amd_sfh_wait_response_v2, |
2290 |
+ .clear_intr = amd_sfh_clear_intr_v2, |
2291 |
+ .init_intr = amd_sfh_irq_init_v2, |
2292 |
++ .discovery_status = amd_sfh_dis_sts_v2, |
2293 |
+ }; |
2294 |
+ |
2295 |
+ static const struct amd_mp2_ops amd_sfh_ops = { |
2296 |
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h |
2297 |
+index 97b99861fae25..9aa88a91ac8d1 100644 |
2298 |
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h |
2299 |
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h |
2300 |
+@@ -39,6 +39,9 @@ |
2301 |
+ |
2302 |
+ #define AMD_SFH_IDLE_LOOP 200 |
2303 |
+ |
2304 |
++#define SENSOR_DISCOVERY_STATUS_MASK GENMASK(5, 3) |
2305 |
++#define SENSOR_DISCOVERY_STATUS_SHIFT 3 |
2306 |
++ |
2307 |
+ /* SFH Command register */ |
2308 |
+ union sfh_cmd_base { |
2309 |
+ u32 ul; |
2310 |
+@@ -143,5 +146,6 @@ struct amd_mp2_ops { |
2311 |
+ int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts); |
2312 |
+ void (*clear_intr)(struct amd_mp2_dev *privdata); |
2313 |
+ int (*init_intr)(struct amd_mp2_dev *privdata); |
2314 |
++ int (*discovery_status)(struct amd_mp2_dev *privdata); |
2315 |
+ }; |
2316 |
+ #endif |
2317 |
+diff --git a/include/linux/mm.h b/include/linux/mm.h |
2318 |
+index 9f44254af8ce9..b0183450e484b 100644 |
2319 |
+--- a/include/linux/mm.h |
2320 |
++++ b/include/linux/mm.h |
2321 |
+@@ -2677,6 +2677,7 @@ extern int install_special_mapping(struct mm_struct *mm, |
2322 |
+ unsigned long flags, struct page **pages); |
2323 |
+ |
2324 |
+ unsigned long randomize_stack_top(unsigned long stack_top); |
2325 |
++unsigned long randomize_page(unsigned long start, unsigned long range); |
2326 |
+ |
2327 |
+ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); |
2328 |
+ |
2329 |
+diff --git a/include/linux/prandom.h b/include/linux/prandom.h |
2330 |
+index 056d31317e499..a4aadd2dc153e 100644 |
2331 |
+--- a/include/linux/prandom.h |
2332 |
++++ b/include/linux/prandom.h |
2333 |
+@@ -10,6 +10,7 @@ |
2334 |
+ |
2335 |
+ #include <linux/types.h> |
2336 |
+ #include <linux/percpu.h> |
2337 |
++#include <linux/siphash.h> |
2338 |
+ |
2339 |
+ u32 prandom_u32(void); |
2340 |
+ void prandom_bytes(void *buf, size_t nbytes); |
2341 |
+@@ -27,15 +28,10 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise); |
2342 |
+ * The core SipHash round function. Each line can be executed in |
2343 |
+ * parallel given enough CPU resources. |
2344 |
+ */ |
2345 |
+-#define PRND_SIPROUND(v0, v1, v2, v3) ( \ |
2346 |
+- v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \ |
2347 |
+- v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \ |
2348 |
+- v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \ |
2349 |
+- v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \ |
2350 |
+-) |
2351 |
++#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3) |
2352 |
+ |
2353 |
+-#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261) |
2354 |
+-#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573) |
2355 |
++#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2) |
2356 |
++#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3) |
2357 |
+ |
2358 |
+ #elif BITS_PER_LONG == 32 |
2359 |
+ /* |
2360 |
+@@ -43,14 +39,9 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise); |
2361 |
+ * This is weaker, but 32-bit machines are not used for high-traffic |
2362 |
+ * applications, so there is less output for an attacker to analyze. |
2363 |
+ */ |
2364 |
+-#define PRND_SIPROUND(v0, v1, v2, v3) ( \ |
2365 |
+- v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \ |
2366 |
+- v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \ |
2367 |
+- v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \ |
2368 |
+- v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \ |
2369 |
+-) |
2370 |
+-#define PRND_K0 0x6c796765 |
2371 |
+-#define PRND_K1 0x74656462 |
2372 |
++#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3) |
2373 |
++#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2) |
2374 |
++#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3) |
2375 |
+ |
2376 |
+ #else |
2377 |
+ #error Unsupported BITS_PER_LONG |
2378 |
+diff --git a/include/linux/random.h b/include/linux/random.h |
2379 |
+index f673fbb838b35..4364de2300be6 100644 |
2380 |
+--- a/include/linux/random.h |
2381 |
++++ b/include/linux/random.h |
2382 |
+@@ -12,45 +12,33 @@ |
2383 |
+ |
2384 |
+ struct notifier_block; |
2385 |
+ |
2386 |
+-extern void add_device_randomness(const void *, size_t); |
2387 |
+-extern void add_bootloader_randomness(const void *, size_t); |
2388 |
++void add_device_randomness(const void *buf, size_t len); |
2389 |
++void add_bootloader_randomness(const void *buf, size_t len); |
2390 |
++void add_input_randomness(unsigned int type, unsigned int code, |
2391 |
++ unsigned int value) __latent_entropy; |
2392 |
++void add_interrupt_randomness(int irq) __latent_entropy; |
2393 |
++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); |
2394 |
+ |
2395 |
+ #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) |
2396 |
+ static inline void add_latent_entropy(void) |
2397 |
+ { |
2398 |
+- add_device_randomness((const void *)&latent_entropy, |
2399 |
+- sizeof(latent_entropy)); |
2400 |
++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); |
2401 |
+ } |
2402 |
+ #else |
2403 |
+-static inline void add_latent_entropy(void) {} |
2404 |
++static inline void add_latent_entropy(void) { } |
2405 |
+ #endif |
2406 |
+ |
2407 |
+-extern void add_input_randomness(unsigned int type, unsigned int code, |
2408 |
+- unsigned int value) __latent_entropy; |
2409 |
+-extern void add_interrupt_randomness(int irq) __latent_entropy; |
2410 |
+-extern void add_hwgenerator_randomness(const void *buffer, size_t count, |
2411 |
+- size_t entropy); |
2412 |
+ #if IS_ENABLED(CONFIG_VMGENID) |
2413 |
+-extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); |
2414 |
+-extern int register_random_vmfork_notifier(struct notifier_block *nb); |
2415 |
+-extern int unregister_random_vmfork_notifier(struct notifier_block *nb); |
2416 |
++void add_vmfork_randomness(const void *unique_vm_id, size_t len); |
2417 |
++int register_random_vmfork_notifier(struct notifier_block *nb); |
2418 |
++int unregister_random_vmfork_notifier(struct notifier_block *nb); |
2419 |
+ #else |
2420 |
+ static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } |
2421 |
+ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } |
2422 |
+ #endif |
2423 |
+ |
2424 |
+-extern void get_random_bytes(void *buf, size_t nbytes); |
2425 |
+-extern int wait_for_random_bytes(void); |
2426 |
+-extern int __init rand_initialize(void); |
2427 |
+-extern bool rng_is_initialized(void); |
2428 |
+-extern int register_random_ready_notifier(struct notifier_block *nb); |
2429 |
+-extern int unregister_random_ready_notifier(struct notifier_block *nb); |
2430 |
+-extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); |
2431 |
+- |
2432 |
+-#ifndef MODULE |
2433 |
+-extern const struct file_operations random_fops, urandom_fops; |
2434 |
+-#endif |
2435 |
+- |
2436 |
++void get_random_bytes(void *buf, size_t len); |
2437 |
++size_t __must_check get_random_bytes_arch(void *buf, size_t len); |
2438 |
+ u32 get_random_u32(void); |
2439 |
+ u64 get_random_u64(void); |
2440 |
+ static inline unsigned int get_random_int(void) |
2441 |
+@@ -82,11 +70,15 @@ static inline unsigned long get_random_long(void) |
2442 |
+ |
2443 |
+ static inline unsigned long get_random_canary(void) |
2444 |
+ { |
2445 |
+- unsigned long val = get_random_long(); |
2446 |
+- |
2447 |
+- return val & CANARY_MASK; |
2448 |
++ return get_random_long() & CANARY_MASK; |
2449 |
+ } |
2450 |
+ |
2451 |
++int __init random_init(const char *command_line); |
2452 |
++bool rng_is_initialized(void); |
2453 |
++int wait_for_random_bytes(void); |
2454 |
++int register_random_ready_notifier(struct notifier_block *nb); |
2455 |
++int unregister_random_ready_notifier(struct notifier_block *nb); |
2456 |
++ |
2457 |
+ /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). |
2458 |
+ * Returns the result of the call to wait_for_random_bytes. */ |
2459 |
+ static inline int get_random_bytes_wait(void *buf, size_t nbytes) |
2460 |
+@@ -96,22 +88,20 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes) |
2461 |
+ return ret; |
2462 |
+ } |
2463 |
+ |
2464 |
+-#define declare_get_random_var_wait(var) \ |
2465 |
+- static inline int get_random_ ## var ## _wait(var *out) { \ |
2466 |
++#define declare_get_random_var_wait(name, ret_type) \ |
2467 |
++ static inline int get_random_ ## name ## _wait(ret_type *out) { \ |
2468 |
+ int ret = wait_for_random_bytes(); \ |
2469 |
+ if (unlikely(ret)) \ |
2470 |
+ return ret; \ |
2471 |
+- *out = get_random_ ## var(); \ |
2472 |
++ *out = get_random_ ## name(); \ |
2473 |
+ return 0; \ |
2474 |
+ } |
2475 |
+-declare_get_random_var_wait(u32) |
2476 |
+-declare_get_random_var_wait(u64) |
2477 |
+-declare_get_random_var_wait(int) |
2478 |
+-declare_get_random_var_wait(long) |
2479 |
++declare_get_random_var_wait(u32, u32) |
2480 |
++declare_get_random_var_wait(u64, u32) |
2481 |
++declare_get_random_var_wait(int, unsigned int) |
2482 |
++declare_get_random_var_wait(long, unsigned long) |
2483 |
+ #undef declare_get_random_var |
2484 |
+ |
2485 |
+-unsigned long randomize_page(unsigned long start, unsigned long range); |
2486 |
+- |
2487 |
+ /* |
2488 |
+ * This is designed to be standalone for just prandom |
2489 |
+ * users, but for now we include it from <linux/random.h> |
2490 |
+@@ -122,22 +112,10 @@ unsigned long randomize_page(unsigned long start, unsigned long range); |
2491 |
+ #ifdef CONFIG_ARCH_RANDOM |
2492 |
+ # include <asm/archrandom.h> |
2493 |
+ #else |
2494 |
+-static inline bool __must_check arch_get_random_long(unsigned long *v) |
2495 |
+-{ |
2496 |
+- return false; |
2497 |
+-} |
2498 |
+-static inline bool __must_check arch_get_random_int(unsigned int *v) |
2499 |
+-{ |
2500 |
+- return false; |
2501 |
+-} |
2502 |
+-static inline bool __must_check arch_get_random_seed_long(unsigned long *v) |
2503 |
+-{ |
2504 |
+- return false; |
2505 |
+-} |
2506 |
+-static inline bool __must_check arch_get_random_seed_int(unsigned int *v) |
2507 |
+-{ |
2508 |
+- return false; |
2509 |
+-} |
2510 |
++static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; } |
2511 |
++static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; } |
2512 |
++static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; } |
2513 |
++static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; } |
2514 |
+ #endif |
2515 |
+ |
2516 |
+ /* |
2517 |
+@@ -161,8 +139,12 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) |
2518 |
+ #endif |
2519 |
+ |
2520 |
+ #ifdef CONFIG_SMP |
2521 |
+-extern int random_prepare_cpu(unsigned int cpu); |
2522 |
+-extern int random_online_cpu(unsigned int cpu); |
2523 |
++int random_prepare_cpu(unsigned int cpu); |
2524 |
++int random_online_cpu(unsigned int cpu); |
2525 |
++#endif |
2526 |
++ |
2527 |
++#ifndef MODULE |
2528 |
++extern const struct file_operations random_fops, urandom_fops; |
2529 |
+ #endif |
2530 |
+ |
2531 |
+ #endif /* _LINUX_RANDOM_H */ |
2532 |
+diff --git a/include/linux/security.h b/include/linux/security.h |
2533 |
+index 25b3ef71f495e..7fc4e9f49f542 100644 |
2534 |
+--- a/include/linux/security.h |
2535 |
++++ b/include/linux/security.h |
2536 |
+@@ -121,10 +121,12 @@ enum lockdown_reason { |
2537 |
+ LOCKDOWN_DEBUGFS, |
2538 |
+ LOCKDOWN_XMON_WR, |
2539 |
+ LOCKDOWN_BPF_WRITE_USER, |
2540 |
++ LOCKDOWN_DBG_WRITE_KERNEL, |
2541 |
+ LOCKDOWN_INTEGRITY_MAX, |
2542 |
+ LOCKDOWN_KCORE, |
2543 |
+ LOCKDOWN_KPROBES, |
2544 |
+ LOCKDOWN_BPF_READ_KERNEL, |
2545 |
++ LOCKDOWN_DBG_READ_KERNEL, |
2546 |
+ LOCKDOWN_PERF, |
2547 |
+ LOCKDOWN_TRACEFS, |
2548 |
+ LOCKDOWN_XMON_RW, |
2549 |
+diff --git a/include/linux/siphash.h b/include/linux/siphash.h |
2550 |
+index cce8a9acc76cb..3af1428da5597 100644 |
2551 |
+--- a/include/linux/siphash.h |
2552 |
++++ b/include/linux/siphash.h |
2553 |
+@@ -138,4 +138,32 @@ static inline u32 hsiphash(const void *data, size_t len, |
2554 |
+ return ___hsiphash_aligned(data, len, key); |
2555 |
+ } |
2556 |
+ |
2557 |
++/* |
2558 |
++ * These macros expose the raw SipHash and HalfSipHash permutations. |
2559 |
++ * Do not use them directly! If you think you have a use for them, |
2560 |
++ * be sure to CC the maintainer of this file explaining why. |
2561 |
++ */ |
2562 |
++ |
2563 |
++#define SIPHASH_PERMUTATION(a, b, c, d) ( \ |
2564 |
++ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \ |
2565 |
++ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \ |
2566 |
++ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \ |
2567 |
++ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32)) |
2568 |
++ |
2569 |
++#define SIPHASH_CONST_0 0x736f6d6570736575ULL |
2570 |
++#define SIPHASH_CONST_1 0x646f72616e646f6dULL |
2571 |
++#define SIPHASH_CONST_2 0x6c7967656e657261ULL |
2572 |
++#define SIPHASH_CONST_3 0x7465646279746573ULL |
2573 |
++ |
2574 |
++#define HSIPHASH_PERMUTATION(a, b, c, d) ( \ |
2575 |
++ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \ |
2576 |
++ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \ |
2577 |
++ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \ |
2578 |
++ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16)) |
2579 |
++ |
2580 |
++#define HSIPHASH_CONST_0 0U |
2581 |
++#define HSIPHASH_CONST_1 0U |
2582 |
++#define HSIPHASH_CONST_2 0x6c796765U |
2583 |
++#define HSIPHASH_CONST_3 0x74656462U |
2584 |
++ |
2585 |
+ #endif /* _LINUX_SIPHASH_H */ |
2586 |
+diff --git a/include/linux/timex.h b/include/linux/timex.h |
2587 |
+index 5745c90c88005..3871b06bd302c 100644 |
2588 |
+--- a/include/linux/timex.h |
2589 |
++++ b/include/linux/timex.h |
2590 |
+@@ -62,6 +62,8 @@ |
2591 |
+ #include <linux/types.h> |
2592 |
+ #include <linux/param.h> |
2593 |
+ |
2594 |
++unsigned long random_get_entropy_fallback(void); |
2595 |
++ |
2596 |
+ #include <asm/timex.h> |
2597 |
+ |
2598 |
+ #ifndef random_get_entropy |
2599 |
+@@ -74,8 +76,14 @@ |
2600 |
+ * |
2601 |
+ * By default we use get_cycles() for this purpose, but individual |
2602 |
+ * architectures may override this in their asm/timex.h header file. |
2603 |
++ * If a given arch does not have get_cycles(), then we fallback to |
2604 |
++ * using random_get_entropy_fallback(). |
2605 |
+ */ |
2606 |
++#ifdef get_cycles |
2607 |
+ #define random_get_entropy() ((unsigned long)get_cycles()) |
2608 |
++#else |
2609 |
++#define random_get_entropy() random_get_entropy_fallback() |
2610 |
++#endif |
2611 |
+ #endif |
2612 |
+ |
2613 |
+ /* |
2614 |
+diff --git a/init/main.c b/init/main.c |
2615 |
+index 98182c3c2c4b3..f057c49f1d9d8 100644 |
2616 |
+--- a/init/main.c |
2617 |
++++ b/init/main.c |
2618 |
+@@ -1035,21 +1035,18 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) |
2619 |
+ softirq_init(); |
2620 |
+ timekeeping_init(); |
2621 |
+ kfence_init(); |
2622 |
++ time_init(); |
2623 |
+ |
2624 |
+ /* |
2625 |
+ * For best initial stack canary entropy, prepare it after: |
2626 |
+ * - setup_arch() for any UEFI RNG entropy and boot cmdline access |
2627 |
+- * - timekeeping_init() for ktime entropy used in rand_initialize() |
2628 |
+- * - rand_initialize() to get any arch-specific entropy like RDRAND |
2629 |
+- * - add_latent_entropy() to get any latent entropy |
2630 |
+- * - adding command line entropy |
2631 |
++ * - timekeeping_init() for ktime entropy used in random_init() |
2632 |
++ * - time_init() for making random_get_entropy() work on some platforms |
2633 |
++ * - random_init() to initialize the RNG from from early entropy sources |
2634 |
+ */ |
2635 |
+- rand_initialize(); |
2636 |
+- add_latent_entropy(); |
2637 |
+- add_device_randomness(command_line, strlen(command_line)); |
2638 |
++ random_init(command_line); |
2639 |
+ boot_init_stack_canary(); |
2640 |
+ |
2641 |
+- time_init(); |
2642 |
+ perf_event_init(); |
2643 |
+ profile_init(); |
2644 |
+ call_function_init(); |
2645 |
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c |
2646 |
+index da06a5553835b..7beceb447211d 100644 |
2647 |
+--- a/kernel/debug/debug_core.c |
2648 |
++++ b/kernel/debug/debug_core.c |
2649 |
+@@ -53,6 +53,7 @@ |
2650 |
+ #include <linux/vmacache.h> |
2651 |
+ #include <linux/rcupdate.h> |
2652 |
+ #include <linux/irq.h> |
2653 |
++#include <linux/security.h> |
2654 |
+ |
2655 |
+ #include <asm/cacheflush.h> |
2656 |
+ #include <asm/byteorder.h> |
2657 |
+@@ -752,6 +753,29 @@ cpu_master_loop: |
2658 |
+ continue; |
2659 |
+ kgdb_connected = 0; |
2660 |
+ } else { |
2661 |
++ /* |
2662 |
++ * This is a brutal way to interfere with the debugger |
2663 |
++ * and prevent gdb being used to poke at kernel memory. |
2664 |
++ * This could cause trouble if lockdown is applied when |
2665 |
++ * there is already an active gdb session. For now the |
2666 |
++ * answer is simply "don't do that". Typically lockdown |
2667 |
++ * *will* be applied before the debug core gets started |
2668 |
++ * so only developers using kgdb for fairly advanced |
2669 |
++ * early kernel debug can be biten by this. Hopefully |
2670 |
++ * they are sophisticated enough to take care of |
2671 |
++ * themselves, especially with help from the lockdown |
2672 |
++ * message printed on the console! |
2673 |
++ */ |
2674 |
++ if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { |
2675 |
++ if (IS_ENABLED(CONFIG_KGDB_KDB)) { |
2676 |
++ /* Switch back to kdb if possible... */ |
2677 |
++ dbg_kdb_mode = 1; |
2678 |
++ continue; |
2679 |
++ } else { |
2680 |
++ /* ... otherwise just bail */ |
2681 |
++ break; |
2682 |
++ } |
2683 |
++ } |
2684 |
+ error = gdb_serial_stub(ks); |
2685 |
+ } |
2686 |
+ |
2687 |
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c |
2688 |
+index 0852a537dad4c..ead4da9471270 100644 |
2689 |
+--- a/kernel/debug/kdb/kdb_main.c |
2690 |
++++ b/kernel/debug/kdb/kdb_main.c |
2691 |
+@@ -45,6 +45,7 @@ |
2692 |
+ #include <linux/proc_fs.h> |
2693 |
+ #include <linux/uaccess.h> |
2694 |
+ #include <linux/slab.h> |
2695 |
++#include <linux/security.h> |
2696 |
+ #include "kdb_private.h" |
2697 |
+ |
2698 |
+ #undef MODULE_PARAM_PREFIX |
2699 |
+@@ -166,10 +167,62 @@ struct task_struct *kdb_curr_task(int cpu) |
2700 |
+ } |
2701 |
+ |
2702 |
+ /* |
2703 |
+- * Check whether the flags of the current command and the permissions |
2704 |
+- * of the kdb console has allow a command to be run. |
2705 |
++ * Update the permissions flags (kdb_cmd_enabled) to match the |
2706 |
++ * current lockdown state. |
2707 |
++ * |
2708 |
++ * Within this function the calls to security_locked_down() are "lazy". We |
2709 |
++ * avoid calling them if the current value of kdb_cmd_enabled already excludes |
2710 |
++ * flags that might be subject to lockdown. Additionally we deliberately check |
2711 |
++ * the lockdown flags independently (even though read lockdown implies write |
2712 |
++ * lockdown) since that results in both simpler code and clearer messages to |
2713 |
++ * the user on first-time debugger entry. |
2714 |
++ * |
2715 |
++ * The permission masks during a read+write lockdown permits the following |
2716 |
++ * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE). |
2717 |
++ * |
2718 |
++ * The INSPECT commands are not blocked during lockdown because they are |
2719 |
++ * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes |
2720 |
++ * forcing them to have no arguments) and lsmod. These commands do expose |
2721 |
++ * some kernel state but do not allow the developer seated at the console to |
2722 |
++ * choose what state is reported. SIGNAL and REBOOT should not be controversial, |
2723 |
++ * given these are allowed for root during lockdown already. |
2724 |
++ */ |
2725 |
++static void kdb_check_for_lockdown(void) |
2726 |
++{ |
2727 |
++ const int write_flags = KDB_ENABLE_MEM_WRITE | |
2728 |
++ KDB_ENABLE_REG_WRITE | |
2729 |
++ KDB_ENABLE_FLOW_CTRL; |
2730 |
++ const int read_flags = KDB_ENABLE_MEM_READ | |
2731 |
++ KDB_ENABLE_REG_READ; |
2732 |
++ |
2733 |
++ bool need_to_lockdown_write = false; |
2734 |
++ bool need_to_lockdown_read = false; |
2735 |
++ |
2736 |
++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags)) |
2737 |
++ need_to_lockdown_write = |
2738 |
++ security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL); |
2739 |
++ |
2740 |
++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags)) |
2741 |
++ need_to_lockdown_read = |
2742 |
++ security_locked_down(LOCKDOWN_DBG_READ_KERNEL); |
2743 |
++ |
2744 |
++ /* De-compose KDB_ENABLE_ALL if required */ |
2745 |
++ if (need_to_lockdown_write || need_to_lockdown_read) |
2746 |
++ if (kdb_cmd_enabled & KDB_ENABLE_ALL) |
2747 |
++ kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL; |
2748 |
++ |
2749 |
++ if (need_to_lockdown_write) |
2750 |
++ kdb_cmd_enabled &= ~write_flags; |
2751 |
++ |
2752 |
++ if (need_to_lockdown_read) |
2753 |
++ kdb_cmd_enabled &= ~read_flags; |
2754 |
++} |
2755 |
++ |
2756 |
++/* |
2757 |
++ * Check whether the flags of the current command, the permissions of the kdb |
2758 |
++ * console and the lockdown state allow a command to be run. |
2759 |
+ */ |
2760 |
+-static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, |
2761 |
++static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, |
2762 |
+ bool no_args) |
2763 |
+ { |
2764 |
+ /* permissions comes from userspace so needs massaging slightly */ |
2765 |
+@@ -1180,6 +1233,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, |
2766 |
+ kdb_curr_task(raw_smp_processor_id()); |
2767 |
+ |
2768 |
+ KDB_DEBUG_STATE("kdb_local 1", reason); |
2769 |
++ |
2770 |
++ kdb_check_for_lockdown(); |
2771 |
++ |
2772 |
+ kdb_go_count = 0; |
2773 |
+ if (reason == KDB_REASON_DEBUG) { |
2774 |
+ /* special case below */ |
2775 |
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c |
2776 |
+index 3b1398fbddaf8..871c912860ed5 100644 |
2777 |
+--- a/kernel/time/timekeeping.c |
2778 |
++++ b/kernel/time/timekeeping.c |
2779 |
+@@ -17,6 +17,7 @@ |
2780 |
+ #include <linux/clocksource.h> |
2781 |
+ #include <linux/jiffies.h> |
2782 |
+ #include <linux/time.h> |
2783 |
++#include <linux/timex.h> |
2784 |
+ #include <linux/tick.h> |
2785 |
+ #include <linux/stop_machine.h> |
2786 |
+ #include <linux/pvclock_gtod.h> |
2787 |
+@@ -2380,6 +2381,20 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc) |
2788 |
+ return 0; |
2789 |
+ } |
2790 |
+ |
2791 |
++/** |
2792 |
++ * random_get_entropy_fallback - Returns the raw clock source value, |
2793 |
++ * used by random.c for platforms with no valid random_get_entropy(). |
2794 |
++ */ |
2795 |
++unsigned long random_get_entropy_fallback(void) |
2796 |
++{ |
2797 |
++ struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono; |
2798 |
++ struct clocksource *clock = READ_ONCE(tkr->clock); |
2799 |
++ |
2800 |
++ if (unlikely(timekeeping_suspended || !clock)) |
2801 |
++ return 0; |
2802 |
++ return clock->read(clock); |
2803 |
++} |
2804 |
++EXPORT_SYMBOL_GPL(random_get_entropy_fallback); |
2805 |
+ |
2806 |
+ /** |
2807 |
+ * do_adjtimex() - Accessor function to NTP __do_adjtimex function |
2808 |
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug |
2809 |
+index 075cd25363ac3..7e282970177a8 100644 |
2810 |
+--- a/lib/Kconfig.debug |
2811 |
++++ b/lib/Kconfig.debug |
2812 |
+@@ -1616,8 +1616,7 @@ config WARN_ALL_UNSEEDED_RANDOM |
2813 |
+ so architecture maintainers really need to do what they can |
2814 |
+ to get the CRNG seeded sooner after the system is booted. |
2815 |
+ However, since users cannot do anything actionable to |
2816 |
+- address this, by default the kernel will issue only a single |
2817 |
+- warning for the first use of unseeded randomness. |
2818 |
++ address this, by default this option is disabled. |
2819 |
+ |
2820 |
+ Say Y here if you want to receive warnings for all uses of |
2821 |
+ unseeded randomness. This will be of use primarily for |
2822 |
+diff --git a/lib/siphash.c b/lib/siphash.c |
2823 |
+index 72b9068ab57bf..71d315a6ad623 100644 |
2824 |
+--- a/lib/siphash.c |
2825 |
++++ b/lib/siphash.c |
2826 |
+@@ -18,19 +18,13 @@ |
2827 |
+ #include <asm/word-at-a-time.h> |
2828 |
+ #endif |
2829 |
+ |
2830 |
+-#define SIPROUND \ |
2831 |
+- do { \ |
2832 |
+- v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ |
2833 |
+- v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ |
2834 |
+- v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ |
2835 |
+- v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ |
2836 |
+- } while (0) |
2837 |
++#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) |
2838 |
+ |
2839 |
+ #define PREAMBLE(len) \ |
2840 |
+- u64 v0 = 0x736f6d6570736575ULL; \ |
2841 |
+- u64 v1 = 0x646f72616e646f6dULL; \ |
2842 |
+- u64 v2 = 0x6c7967656e657261ULL; \ |
2843 |
+- u64 v3 = 0x7465646279746573ULL; \ |
2844 |
++ u64 v0 = SIPHASH_CONST_0; \ |
2845 |
++ u64 v1 = SIPHASH_CONST_1; \ |
2846 |
++ u64 v2 = SIPHASH_CONST_2; \ |
2847 |
++ u64 v3 = SIPHASH_CONST_3; \ |
2848 |
+ u64 b = ((u64)(len)) << 56; \ |
2849 |
+ v3 ^= key->key[1]; \ |
2850 |
+ v2 ^= key->key[0]; \ |
2851 |
+@@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, |
2852 |
+ } |
2853 |
+ EXPORT_SYMBOL(hsiphash_4u32); |
2854 |
+ #else |
2855 |
+-#define HSIPROUND \ |
2856 |
+- do { \ |
2857 |
+- v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ |
2858 |
+- v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ |
2859 |
+- v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ |
2860 |
+- v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ |
2861 |
+- } while (0) |
2862 |
++#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) |
2863 |
+ |
2864 |
+ #define HPREAMBLE(len) \ |
2865 |
+- u32 v0 = 0; \ |
2866 |
+- u32 v1 = 0; \ |
2867 |
+- u32 v2 = 0x6c796765U; \ |
2868 |
+- u32 v3 = 0x74656462U; \ |
2869 |
++ u32 v0 = HSIPHASH_CONST_0; \ |
2870 |
++ u32 v1 = HSIPHASH_CONST_1; \ |
2871 |
++ u32 v2 = HSIPHASH_CONST_2; \ |
2872 |
++ u32 v3 = HSIPHASH_CONST_3; \ |
2873 |
+ u32 b = ((u32)(len)) << 24; \ |
2874 |
+ v3 ^= key->key[1]; \ |
2875 |
+ v2 ^= key->key[0]; \ |
2876 |
+diff --git a/mm/util.c b/mm/util.c |
2877 |
+index 3492a9e81aa3a..ac63e5ca8b211 100644 |
2878 |
+--- a/mm/util.c |
2879 |
++++ b/mm/util.c |
2880 |
+@@ -343,6 +343,38 @@ unsigned long randomize_stack_top(unsigned long stack_top) |
2881 |
+ #endif |
2882 |
+ } |
2883 |
+ |
2884 |
++/** |
2885 |
++ * randomize_page - Generate a random, page aligned address |
2886 |
++ * @start: The smallest acceptable address the caller will take. |
2887 |
++ * @range: The size of the area, starting at @start, within which the |
2888 |
++ * random address must fall. |
2889 |
++ * |
2890 |
++ * If @start + @range would overflow, @range is capped. |
2891 |
++ * |
2892 |
++ * NOTE: Historical use of randomize_range, which this replaces, presumed that |
2893 |
++ * @start was already page aligned. We now align it regardless. |
2894 |
++ * |
2895 |
++ * Return: A page aligned address within [start, start + range). On error, |
2896 |
++ * @start is returned. |
2897 |
++ */ |
2898 |
++unsigned long randomize_page(unsigned long start, unsigned long range) |
2899 |
++{ |
2900 |
++ if (!PAGE_ALIGNED(start)) { |
2901 |
++ range -= PAGE_ALIGN(start) - start; |
2902 |
++ start = PAGE_ALIGN(start); |
2903 |
++ } |
2904 |
++ |
2905 |
++ if (start > ULONG_MAX - range) |
2906 |
++ range = ULONG_MAX - start; |
2907 |
++ |
2908 |
++ range >>= PAGE_SHIFT; |
2909 |
++ |
2910 |
++ if (range == 0) |
2911 |
++ return start; |
2912 |
++ |
2913 |
++ return start + (get_random_long() % range << PAGE_SHIFT); |
2914 |
++} |
2915 |
++ |
2916 |
+ #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT |
2917 |
+ unsigned long arch_randomize_brk(struct mm_struct *mm) |
2918 |
+ { |
2919 |
+diff --git a/security/security.c b/security/security.c |
2920 |
+index b7cf5cbfdc677..aaf6566deb9f0 100644 |
2921 |
+--- a/security/security.c |
2922 |
++++ b/security/security.c |
2923 |
+@@ -59,10 +59,12 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { |
2924 |
+ [LOCKDOWN_DEBUGFS] = "debugfs access", |
2925 |
+ [LOCKDOWN_XMON_WR] = "xmon write access", |
2926 |
+ [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", |
2927 |
++ [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", |
2928 |
+ [LOCKDOWN_INTEGRITY_MAX] = "integrity", |
2929 |
+ [LOCKDOWN_KCORE] = "/proc/kcore access", |
2930 |
+ [LOCKDOWN_KPROBES] = "use of kprobes", |
2931 |
+ [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM", |
2932 |
++ [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", |
2933 |
+ [LOCKDOWN_PERF] = "unsafe use of perf", |
2934 |
+ [LOCKDOWN_TRACEFS] = "use of tracefs", |
2935 |
+ [LOCKDOWN_XMON_RW] = "xmon read and write access", |
2936 |
+diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c |
2937 |
+index 78f35e88aed6b..fbdb8a3d5b8e5 100644 |
2938 |
+--- a/sound/pci/ctxfi/ctatc.c |
2939 |
++++ b/sound/pci/ctxfi/ctatc.c |
2940 |
+@@ -36,6 +36,7 @@ |
2941 |
+ | ((IEC958_AES3_CON_FS_48000) << 24)) |
2942 |
+ |
2943 |
+ static const struct snd_pci_quirk subsys_20k1_list[] = { |
2944 |
++ SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0021, "SB046x", CTSB046X), |
2945 |
+ SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X), |
2946 |
+ SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X), |
2947 |
+ SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X), |
2948 |
+@@ -64,6 +65,7 @@ static const struct snd_pci_quirk subsys_20k2_list[] = { |
2949 |
+ |
2950 |
+ static const char *ct_subsys_name[NUM_CTCARDS] = { |
2951 |
+ /* 20k1 models */ |
2952 |
++ [CTSB046X] = "SB046x", |
2953 |
+ [CTSB055X] = "SB055x", |
2954 |
+ [CTSB073X] = "SB073x", |
2955 |
+ [CTUAA] = "UAA", |
2956 |
+diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h |
2957 |
+index f406b626a28c4..2875cec83b8f2 100644 |
2958 |
+--- a/sound/pci/ctxfi/cthardware.h |
2959 |
++++ b/sound/pci/ctxfi/cthardware.h |
2960 |
+@@ -26,8 +26,9 @@ enum CHIPTYP { |
2961 |
+ |
2962 |
+ enum CTCARDS { |
2963 |
+ /* 20k1 models */ |
2964 |
++ CTSB046X, |
2965 |
++ CT20K1_MODEL_FIRST = CTSB046X, |
2966 |
+ CTSB055X, |
2967 |
+- CT20K1_MODEL_FIRST = CTSB055X, |
2968 |
+ CTSB073X, |
2969 |
+ CTUAA, |
2970 |
+ CT20K1_UNKNOWN, |