[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170814173515.n3vcpz37xh33ayuq@pd.tnic>
Date: Mon, 14 Aug 2017 19:35:15 +0200
From: Borislav Petkov <bp@...en8.de>
To: x86-ml <x86@...nel.org>
Cc: Theodore Ts'o <tytso@....edu>,
"Jason A. Donenfeld" <Jason@...c4.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
lkml <linux-kernel@...r.kernel.org>
Subject: early x86 unseeded randomness
Hi,
how about we address that unseeded randomness usage during early boot by
falling back on the TSC on x86? I mean, we already do that for the stack
canary value anyway...
---
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 8abedf1d650e..e636ac6f8418 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -71,7 +71,11 @@ static __always_inline void boot_init_stack_canary(void)
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.
*/
- get_random_bytes(&canary, sizeof(canary));
+ if (crng_ready())
+ get_random_bytes(&canary, sizeof(canary));
+ else
+ canary = rdtsc();
+
tsc = rdtsc();
canary += tsc + (tsc << 32UL);
canary &= CANARY_MASK;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 3b9e220621f8..859009daf345 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -526,8 +526,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
va_align.mask = (upperbit - 1) & PAGE_MASK;
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
- /* A random value per boot for bit slice [12:upper_bit) */
- va_align.bits = get_random_int() & va_align.mask;
+ /* A pseudo-random value per boot for bit slice [12:upper_bit) */
+ va_align.bits = rdtsc() & va_align.mask;
}
if (cpu_has(c, X86_FEATURE_MWAITX))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8ad92707e45f..887cca606d7b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -428,7 +428,6 @@ struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 0))
static int crng_init_cnt = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
@@ -497,6 +496,11 @@ static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+bool crng_ready(void)
+{
+ return likely(crng_init > 0);
+}
+
/*
* This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
diff --git a/include/linux/random.h b/include/linux/random.h
index eafea6a09361..18035ba94e43 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -197,4 +197,6 @@ static inline u32 next_pseudo_random32(u32 seed)
return seed * 1664525 + 1013904223;
}
+extern bool crng_ready(void);
+
#endif /* _LINUX_RANDOM_H */
--
Regards/Gruss,
Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.
Powered by blists - more mailing lists