Perhaps calling OpenSSL routines, something like the programmatic equivalent of:
openssl rand NUM_BYTES | head -c NUM_BYTES > /dev/null
which should run faster than /dev/random
and /dev/urandom
.
Here's some test code:
/* randombytes.c */
#include <stdlib.h>
#include <stdio.h>
#include <openssl/rand.h>
/*
compile with:
gcc -Wall -lcrypto randombytes.c -o randombytes
*/
int main (int argc, char **argv)
{
unsigned char *random_bytes = NULL;
int length = 0;
if (argc == 2)
length = atoi(argv[1]);
else {
fprintf(stderr, "usage: randombytes number_of_bytes\n");
return EXIT_FAILURE;
}
random_bytes = malloc((size_t)length + 1);
if (! random_bytes) {
fprintf(stderr, "could not allocate space for random_bytes...\n");
return EXIT_FAILURE;
}
if (! RAND_bytes(random_bytes, length)) {
fprintf(stderr, "could not get random bytes...\n");
return EXIT_FAILURE;
}
*(random_bytes + length) = '\0';
fprintf(stdout, "bytes: %s\n", random_bytes);
free(random_bytes);
return EXIT_SUCCESS;
}
Here's how it performs on a Mac OS X 10.7.3 system (1.7 GHz i5, 4 GB), relative to /dev/urandom
and OpenSSL's openssl
binary:
$ time ./randombytes 100000000 > /dev/null
real 0m6.902s
user 0m6.842s
sys 0m0.059s
$ time cat /dev/urandom | head -c 100000000 > /dev/null
real 0m9.391s
user 0m0.050s
sys 0m9.326s
$ time openssl rand 100000000 | head -c 100000000 > /dev/null
real 0m7.060s
user 0m7.050s
sys 0m0.118s
The randombytes
binary is 27% faster than reading bytes from /dev/urandom
and about 2% faster than openssl rand
.
You could profile other approaches in a similar fashion.