summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2022-05-15 00:22:05 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-30 09:29:17 +0200
commit6244da28c6b3f7b413e32197acf5bd06dbe2ecb5 (patch)
tree1f064bf2075e600d33052ad5c8f9a88380d01e0f
parent64cb7f01ddd289fefbd2a8051c288aaf5d50a9c7 (diff)
downloadlinux-stable-6244da28c6b3f7b413e32197acf5bd06dbe2ecb5.tar.gz
random: unify batched entropy implementations
commit 3092adcef3ffd2ef59634998297ca8358461ebce upstream. There are currently two separate batched entropy implementations, for u32 and u64, with nearly identical code, with the goal of avoiding unaligned memory accesses and letting the buffers be used more efficiently. Having to maintain these two functions independently is a bit of a hassle though, considering that they always need to be kept in sync. This commit factors them out into a type-generic macro, so that the expansion produces the same code as before, such that diffing the assembly shows no differences. This will also make it easier in the future to add u16 and u8 batches. This was initially tested using an always_inline function and letting gcc constant fold the type size in, but the code gen was less efficient, and in general it was more verbose and harder to follow. So this patch goes with the boring macro solution, similar to what's already done for the _wait functions in random.h. Cc: Dominik Brodowski <linux@dominikbrodowski.net> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/char/random.c147
1 files changed, 55 insertions, 92 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index dfff00c43da8..c41e372955c3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -511,99 +511,62 @@ out_zero_chacha:
* provided by this function is okay, the function wait_for_random_bytes()
* should be called and return 0 at least once at any point prior.
*/
-struct batched_entropy {
- union {
- /*
- * We make this 1.5x a ChaCha block, so that we get the
- * remaining 32 bytes from fast key erasure, plus one full
- * block from the detached ChaCha state. We can increase
- * the size of this later if needed so long as we keep the
- * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
- */
- u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
- u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
- };
- local_lock_t lock;
- unsigned long generation;
- unsigned int position;
-};
-
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
- .position = UINT_MAX
-};
-
-u64 get_random_u64(void)
-{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- unsigned long next_gen;
-
- warn_unseeded_randomness();
-
- if (!crng_ready()) {
- _get_random_bytes(&ret, sizeof(ret));
- return ret;
- }
-
- local_lock_irqsave(&batched_entropy_u64.lock, flags);
- batch = raw_cpu_ptr(&batched_entropy_u64);
-
- next_gen = READ_ONCE(base_crng.generation);
- if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
- next_gen != batch->generation) {
- _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
- batch->position = 0;
- batch->generation = next_gen;
- }
- ret = batch->entropy_u64[batch->position];
- batch->entropy_u64[batch->position] = 0;
- ++batch->position;
- local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
- .position = UINT_MAX
-};
-
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- unsigned long next_gen;
-
- warn_unseeded_randomness();
-
- if (!crng_ready()) {
- _get_random_bytes(&ret, sizeof(ret));
- return ret;
- }
-
- local_lock_irqsave(&batched_entropy_u32.lock, flags);
- batch = raw_cpu_ptr(&batched_entropy_u32);
-
- next_gen = READ_ONCE(base_crng.generation);
- if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
- next_gen != batch->generation) {
- _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
- batch->position = 0;
- batch->generation = next_gen;
- }
-
- ret = batch->entropy_u32[batch->position];
- batch->entropy_u32[batch->position] = 0;
- ++batch->position;
- local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
+#define DEFINE_BATCHED_ENTROPY(type) \
+struct batch_ ##type { \
+ /* \
+ * We make this 1.5x a ChaCha block, so that we get the \
+ * remaining 32 bytes from fast key erasure, plus one full \
+ * block from the detached ChaCha state. We can increase \
+ * the size of this later if needed so long as we keep the \
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
+ */ \
+ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ local_lock_t lock; \
+ unsigned long generation; \
+ unsigned int position; \
+}; \
+ \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
+ .position = UINT_MAX \
+}; \
+ \
+type get_random_ ##type(void) \
+{ \
+ type ret; \
+ unsigned long flags; \
+ struct batch_ ##type *batch; \
+ unsigned long next_gen; \
+ \
+ warn_unseeded_randomness(); \
+ \
+ if (!crng_ready()) { \
+ _get_random_bytes(&ret, sizeof(ret)); \
+ return ret; \
+ } \
+ \
+ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
+ batch = raw_cpu_ptr(&batched_entropy_##type); \
+ \
+ next_gen = READ_ONCE(base_crng.generation); \
+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
+ next_gen != batch->generation) { \
+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
+ batch->position = 0; \
+ batch->generation = next_gen; \
+ } \
+ \
+ ret = batch->entropy[batch->position]; \
+ batch->entropy[batch->position] = 0; \
+ ++batch->position; \
+ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u32)
#ifdef CONFIG_SMP
/*