aboutsummaryrefslogtreecommitdiff
path: root/rand/src/rngs
diff options
context:
space:
mode:
authorDaniel Mueller <deso@posteo.net>2019-01-02 21:14:10 -0800
committerDaniel Mueller <deso@posteo.net>2019-01-02 21:14:10 -0800
commitecf3474223ca3d16a10f12dc2272e3b0ed72c1bb (patch)
tree03134a683791176b49ef5c92e8d6acd24c3b5a9b /rand/src/rngs
parent686f61b75055ecb02baf9d9449525ae447a3bed1 (diff)
downloadnitrocli-ecf3474223ca3d16a10f12dc2272e3b0ed72c1bb.tar.gz
nitrocli-ecf3474223ca3d16a10f12dc2272e3b0ed72c1bb.tar.bz2
Update nitrokey crate to 0.2.3
This change updates the nitrokey crate to version 0.2.3. This version bumps the rand crate used to 0.6.1, which in turn requires an additional set of dependencies. Import subrepo nitrokey/:nitrokey at b3e2adc5bb1300441ca74cc7672617c042f3ea31 Import subrepo rand/:rand at 73613ff903512e9503e41cc8ba9eae76269dc598 Import subrepo rustc_version/:rustc_version at 0294f2ba2018bf7be672abd53db351ce5055fa02 Import subrepo semver-parser/:semver-parser at 750da9b11a04125231b1fb293866ca036845acee Import subrepo semver/:semver at 5eb6db94fa03f4d5c64a625a56188f496be47598
Diffstat (limited to 'rand/src/rngs')
-rw-r--r--rand/src/rngs/adapter/mod.rs15
-rw-r--r--rand/src/rngs/adapter/read.rs137
-rw-r--r--rand/src/rngs/adapter/reseeding.rs370
-rw-r--r--rand/src/rngs/entropy.rs297
-rw-r--r--rand/src/rngs/jitter.rs885
-rw-r--r--rand/src/rngs/mock.rs59
-rw-r--r--rand/src/rngs/mod.rs217
-rw-r--r--rand/src/rngs/os.rs1275
-rw-r--r--rand/src/rngs/small.rs105
-rw-r--r--rand/src/rngs/std.rs81
-rw-r--r--rand/src/rngs/thread.rs135
11 files changed, 3576 insertions, 0 deletions
diff --git a/rand/src/rngs/adapter/mod.rs b/rand/src/rngs/adapter/mod.rs
new file mode 100644
index 0000000..60b832e
--- /dev/null
+++ b/rand/src/rngs/adapter/mod.rs
@@ -0,0 +1,15 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Wrappers / adapters forming RNGs
+
+#[cfg(feature="std")] #[doc(hidden)] pub mod read;
+mod reseeding;
+
+#[cfg(feature="std")] pub use self::read::ReadRng;
+pub use self::reseeding::ReseedingRng;
diff --git a/rand/src/rngs/adapter/read.rs b/rand/src/rngs/adapter/read.rs
new file mode 100644
index 0000000..30b6de6
--- /dev/null
+++ b/rand/src/rngs/adapter/read.rs
@@ -0,0 +1,137 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A wrapper around any Read to treat it as an RNG.
+
+use std::io::Read;
+
+use rand_core::{RngCore, Error, ErrorKind, impls};
+
+
+/// An RNG that reads random bytes straight from any type supporting
+/// `std::io::Read`, for example files.
+///
+/// This will work best with an infinite reader, but that is not required.
+///
+/// This can be used with `/dev/urandom` on Unix but it is recommended to use
+/// [`OsRng`] instead.
+///
+/// # Panics
+///
+/// `ReadRng` uses `std::io::read_exact`, which retries on interrupts. All other
+/// errors from the underlying reader, including when it does not have enough
+/// data, will only be reported through [`try_fill_bytes`]. The other
+/// [`RngCore`] methods will panic in case of an error.
+///
+/// # Example
+///
+/// ```
+/// use rand::Rng;
+/// use rand::rngs::adapter::ReadRng;
+///
+/// let data = vec![1, 2, 3, 4, 5, 6, 7, 8];
+/// let mut rng = ReadRng::new(&data[..]);
+/// println!("{:x}", rng.gen::<u32>());
+/// ```
+///
+/// [`OsRng`]: ../struct.OsRng.html
+/// [`RngCore`]: ../../trait.RngCore.html
+/// [`try_fill_bytes`]: ../../trait.RngCore.html#method.tymethod.try_fill_bytes
+#[derive(Debug)]
+pub struct ReadRng<R> {
+ reader: R
+}
+
+impl<R: Read> ReadRng<R> {
+ /// Create a new `ReadRng` from a `Read`.
+ pub fn new(r: R) -> ReadRng<R> {
+ ReadRng {
+ reader: r
+ }
+ }
+}
+
+impl<R: Read> RngCore for ReadRng<R> {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.try_fill_bytes(dest).unwrap_or_else(|err|
+ panic!("reading random bytes from Read implementation failed; error: {}", err));
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ if dest.len() == 0 { return Ok(()); }
+ // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
+ self.reader.read_exact(dest).map_err(|err| {
+ match err.kind() {
+ ::std::io::ErrorKind::UnexpectedEof => Error::with_cause(
+ ErrorKind::Unavailable,
+ "not enough bytes available, reached end of source", err),
+ _ => Error::with_cause(ErrorKind::Unavailable,
+ "error reading from Read source", err)
+ }
+ })
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::ReadRng;
+ use {RngCore, ErrorKind};
+
+ #[test]
+ fn test_reader_rng_u64() {
+ // transmute from the target to avoid endianness concerns.
+ let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
+ 0 , 0, 0, 0, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 3];
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert_eq!(rng.next_u64(), 1_u64.to_be());
+ assert_eq!(rng.next_u64(), 2_u64.to_be());
+ assert_eq!(rng.next_u64(), 3_u64.to_be());
+ }
+
+ #[test]
+ fn test_reader_rng_u32() {
+ let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert_eq!(rng.next_u32(), 1_u32.to_be());
+ assert_eq!(rng.next_u32(), 2_u32.to_be());
+ assert_eq!(rng.next_u32(), 3_u32.to_be());
+ }
+
+ #[test]
+ fn test_reader_rng_fill_bytes() {
+ let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
+ let mut w = [0u8; 8];
+
+ let mut rng = ReadRng::new(&v[..]);
+ rng.fill_bytes(&mut w);
+
+ assert!(v == w);
+ }
+
+ #[test]
+ fn test_reader_rng_insufficient_bytes() {
+ let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
+ let mut w = [0u8; 9];
+
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert!(rng.try_fill_bytes(&mut w).err().unwrap().kind == ErrorKind::Unavailable);
+ }
+}
diff --git a/rand/src/rngs/adapter/reseeding.rs b/rand/src/rngs/adapter/reseeding.rs
new file mode 100644
index 0000000..016afab
--- /dev/null
+++ b/rand/src/rngs/adapter/reseeding.rs
@@ -0,0 +1,370 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A wrapper around another PRNG that reseeds it after it
+//! generates a certain number of random bytes.
+
+use core::mem::size_of;
+
+use rand_core::{RngCore, CryptoRng, SeedableRng, Error, ErrorKind};
+use rand_core::block::{BlockRngCore, BlockRng};
+
+/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the
+/// ability to reseed it.
+///
+/// `ReseedingRng` reseeds the underlying PRNG in the following cases:
+///
+/// - On a manual call to [`reseed()`].
+/// - After `clone()`, the clone will be reseeded on first use.
+/// - After a process is forked, the RNG in the child process is reseeded within
+/// the next few generated values, depending on the block size of the
+/// underlying PRNG. For [`ChaChaCore`] and [`Hc128Core`] this is a maximum of
+/// 15 `u32` values before reseeding.
+/// - After the PRNG has generated a configurable number of random bytes.
+///
+/// # When should reseeding after a fixed number of generated bytes be used?
+///
+/// Reseeding after a fixed number of generated bytes is never strictly
+/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they
+/// can output, or at least not a limit reachable in any practical way. There is
+/// no such thing as 'running out of entropy'.
+///
+/// Occasionally reseeding can be seen as some form of 'security in depth'. Even
+/// if in the future a cryptographic weakness is found in the CSPRNG being used,
+/// or a flaw in the implementation, occasionally reseeding should make
+/// exploiting it much more difficult or even impossible.
+///
+/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
+/// after a fixed number of generated bytes.
+///
+/// # Error handling
+///
+/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
+/// never panic but try to handle the error intelligently through some
+/// combination of retrying and delaying reseeding until later.
+/// If handling the source error fails `ReseedingRng` will continue generating
+/// data from the wrapped PRNG without reseeding.
+///
+/// Manually calling [`reseed()`] will not have this retry or delay logic, but
+/// reports the error.
+///
+/// # Example
+///
+/// ```
+/// # extern crate rand;
+/// # extern crate rand_chacha;
+/// # fn main() {
+/// use rand::prelude::*;
+/// use rand_chacha::ChaChaCore; // Internal part of ChaChaRng that
+/// // implements BlockRngCore
+/// use rand::rngs::OsRng;
+/// use rand::rngs::adapter::ReseedingRng;
+///
+/// let prng = ChaChaCore::from_entropy();
+// FIXME: it is better to use EntropyRng as reseeder, but that doesn't implement
+// clone yet.
+/// let reseeder = OsRng::new().unwrap();
+/// let mut reseeding_rng = ReseedingRng::new(prng, 0, reseeder);
+///
+/// println!("{}", reseeding_rng.gen::<u64>());
+///
+/// let mut cloned_rng = reseeding_rng.clone();
+/// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>());
+/// # }
+/// ```
+///
+/// [`ChaChaCore`]: ../../../rand_chacha/struct.ChaChaCore.html
+/// [`Hc128Core`]: ../../../rand_hc/struct.Hc128Core.html
+/// [`BlockRngCore`]: ../../../rand_core/block/trait.BlockRngCore.html
+/// [`ReseedingRng::new`]: struct.ReseedingRng.html#method.new
+/// [`reseed()`]: struct.ReseedingRng.html#method.reseed
+#[derive(Debug)]
+pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>)
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore;
+
+impl<R, Rsdr> ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ /// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG
+ /// to use as reseeder.
+ ///
+ /// `threshold` sets the number of generated bytes after which to reseed the
+ /// PRNG. Set it to zero to never reseed based on the number of generated
+ /// values.
+ pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder)))
+ }
+
+ /// Reseed the internal PRNG.
+ pub fn reseed(&mut self) -> Result<(), Error> {
+ self.0.core.reseed()
+ }
+}
+
+// TODO: this should be implemented for any type where the inner type
+// implements RngCore, but we can't specify that because ReseedingCore is private
+impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr>
+where R: BlockRngCore<Item = u32> + SeedableRng,
+ <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest)
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng + Clone,
+ Rsdr: RngCore + Clone
+{
+ fn clone(&self) -> ReseedingRng<R, Rsdr> {
+ // Recreating `BlockRng` seems easier than cloning it and resetting
+ // the index.
+ ReseedingRng(BlockRng::new(self.0.core.clone()))
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng {}
+
+#[derive(Debug)]
+struct ReseedingCore<R, Rsdr> {
+ inner: R,
+ reseeder: Rsdr,
+ threshold: i64,
+ bytes_until_reseed: i64,
+ fork_counter: usize,
+}
+
+impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ type Item = <R as BlockRngCore>::Item;
+ type Results = <R as BlockRngCore>::Results;
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ let global_fork_counter = fork::get_fork_counter();
+ if self.bytes_until_reseed <= 0 ||
+ self.is_forked(global_fork_counter) {
+ // We get better performance by not calling only `reseed` here
+ // and continuing with the rest of the function, but by directly
+ // returning from a non-inlined function.
+ return self.reseed_and_generate(results, global_fork_counter);
+ }
+ let num_bytes = results.as_ref().len() * size_of::<Self::Item>();
+ self.bytes_until_reseed -= num_bytes as i64;
+ self.inner.generate(results);
+ }
+}
+
+impl<R, Rsdr> ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ /// Create a new `ReseedingCore`.
+ fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ use ::core::i64::MAX;
+ fork::register_fork_handler();
+
+ // Because generating more values than `i64::MAX` takes centuries on
+ // current hardware, we just clamp to that value.
+ // Also we set a threshold of 0, which indicates no limit, to that
+ // value.
+ let threshold =
+ if threshold == 0 { MAX }
+ else if threshold <= MAX as u64 { threshold as i64 }
+ else { MAX };
+
+ ReseedingCore {
+ inner: rng,
+ reseeder,
+ threshold: threshold as i64,
+ bytes_until_reseed: threshold as i64,
+ fork_counter: 0,
+ }
+ }
+
+ /// Reseed the internal PRNG.
+ fn reseed(&mut self) -> Result<(), Error> {
+ R::from_rng(&mut self.reseeder).map(|result| {
+ self.bytes_until_reseed = self.threshold;
+ self.inner = result
+ })
+ }
+
+ fn is_forked(&self, global_fork_counter: usize) -> bool {
+ // In theory, on 32-bit platforms, it is possible for
+ // `global_fork_counter` to wrap around after ~4e9 forks.
+ //
+ // This check will detect a fork in the normal case where
+ // `fork_counter < global_fork_counter`, and also when the difference
+ // between both is greater than `isize::MAX` (wrapped around).
+ //
+ // It will still fail to detect a fork if there have been more than
+ // `isize::MAX` forks, without any reseed in between. Seems unlikely
+ // enough.
+ (self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0
+ }
+
+ #[inline(never)]
+ fn reseed_and_generate(&mut self,
+ results: &mut <Self as BlockRngCore>::Results,
+ global_fork_counter: usize)
+ {
+ if self.is_forked(global_fork_counter) {
+ info!("Fork detected, reseeding RNG");
+ } else {
+ trace!("Reseeding RNG (periodic reseed)");
+ }
+
+ let num_bytes =
+ results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>();
+
+ let threshold = if let Err(e) = self.reseed() {
+ let delay = match e.kind {
+ ErrorKind::Transient => num_bytes as i64,
+ kind @ _ if kind.should_retry() => self.threshold >> 8,
+ _ => self.threshold,
+ };
+ warn!("Reseeding RNG delayed reseeding by {} bytes due to \
+ error from source: {}", delay, e);
+ delay
+ } else {
+ self.fork_counter = global_fork_counter;
+ self.threshold
+ };
+
+ self.bytes_until_reseed = threshold - num_bytes as i64;
+ self.inner.generate(results);
+ }
+}
+
+impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng + Clone,
+ Rsdr: RngCore + Clone
+{
+ fn clone(&self) -> ReseedingCore<R, Rsdr> {
+ ReseedingCore {
+ inner: self.inner.clone(),
+ reseeder: self.reseeder.clone(),
+ threshold: self.threshold,
+ bytes_until_reseed: 0, // reseed clone on first use
+ fork_counter: self.fork_counter,
+ }
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng {}
+
+
+#[cfg(all(feature="std", unix, not(target_os="emscripten")))]
+mod fork {
+ extern crate libc;
+
+ use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT};
+
+ // Fork protection
+ //
+ // We implement fork protection on Unix using `pthread_atfork`.
+ // When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`.
+ // Every `ReseedingRng` stores the last known value of the static in
+ // `fork_counter`. If the cached `fork_counter` is less than
+ // `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG.
+ //
+ // If reseeding fails, we don't deal with this by setting a delay, but just
+ // don't update `fork_counter`, so a reseed is attempted as soon as
+ // possible.
+
+ static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
+
+ pub fn get_fork_counter() -> usize {
+ RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed)
+ }
+
+ static FORK_HANDLER_REGISTERED: AtomicBool = ATOMIC_BOOL_INIT;
+
+ extern fn fork_handler() {
+ // Note: fetch_add is defined to wrap on overflow
+ // (which is what we want).
+ RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed);
+ }
+
+ pub fn register_fork_handler() {
+ if FORK_HANDLER_REGISTERED.load(Ordering::Relaxed) == false {
+ unsafe { libc::pthread_atfork(None, None, Some(fork_handler)) };
+ FORK_HANDLER_REGISTERED.store(true, Ordering::Relaxed);
+ }
+ }
+}
+
+#[cfg(not(all(feature="std", unix, not(target_os="emscripten"))))]
+mod fork {
+ pub fn get_fork_counter() -> usize { 0 }
+ pub fn register_fork_handler() {}
+}
+
+
+#[cfg(test)]
+mod test {
+ use {Rng, SeedableRng};
+ use rand_chacha::ChaChaCore;
+ use rngs::mock::StepRng;
+ use super::ReseedingRng;
+
+ #[test]
+ fn test_reseeding() {
+ let mut zero = StepRng::new(0, 0);
+ let rng = ChaChaCore::from_rng(&mut zero).unwrap();
+ let mut reseeding = ReseedingRng::new(rng, 32*4, zero);
+
+ // Currently we only support for arrays up to length 32.
+ // TODO: cannot generate seq via Rng::gen because it uses different alg
+ let mut buf = [0u32; 32]; // Needs to be a multiple of the RNGs result
+ // size to test exactly.
+ reseeding.fill(&mut buf);
+ let seq = buf;
+ for _ in 0..10 {
+ reseeding.fill(&mut buf);
+ assert_eq!(buf, seq);
+ }
+ }
+
+ #[test]
+ fn test_clone_reseeding() {
+ let mut zero = StepRng::new(0, 0);
+ let rng = ChaChaCore::from_rng(&mut zero).unwrap();
+ let mut rng1 = ReseedingRng::new(rng, 32*4, zero);
+
+ let first: u32 = rng1.gen();
+ for _ in 0..10 { let _ = rng1.gen::<u32>(); }
+
+ let mut rng2 = rng1.clone();
+ assert_eq!(first, rng2.gen::<u32>());
+ }
+}
diff --git a/rand/src/rngs/entropy.rs b/rand/src/rngs/entropy.rs
new file mode 100644
index 0000000..8736324
--- /dev/null
+++ b/rand/src/rngs/entropy.rs
@@ -0,0 +1,297 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Entropy generator, or wrapper around external generators
+
+use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls};
+#[allow(unused)]
+use rngs;
+
+/// An interface returning random data from external source(s), provided
+/// specifically for securely seeding algorithmic generators (PRNGs).
+///
+/// Where possible, `EntropyRng` retrieves random data from the operating
+/// system's interface for random numbers ([`OsRng`]); if that fails it will
+/// fall back to the [`JitterRng`] entropy collector. In the latter case it will
+/// still try to use [`OsRng`] on the next usage.
+///
+/// If no secure source of entropy is available `EntropyRng` will panic on use;
+/// i.e. it should never output predictable data.
+///
+/// This is either a little slow ([`OsRng`] requires a system call) or extremely
+/// slow ([`JitterRng`] must use significant CPU time to generate sufficient
+/// jitter); for better performance it is common to seed a local PRNG from
+/// external entropy then primarily use the local PRNG ([`thread_rng`] is
+/// provided as a convenient, local, automatically-seeded CSPRNG).
+///
+/// # Panics
+///
+/// On most systems, like Windows, Linux, macOS and *BSD on common hardware, it
+/// is highly unlikely for both [`OsRng`] and [`JitterRng`] to fail. But on
+/// combinations like webassembly without Emscripten or stdweb both sources are
+/// unavailable. If both sources fail, only [`try_fill_bytes`] is able to
+/// report the error, and only the one from `OsRng`. The other [`RngCore`]
+/// methods will panic in case of an error.
+///
+/// [`OsRng`]: struct.OsRng.html
+/// [`JitterRng`]: jitter/struct.JitterRng.html
+/// [`thread_rng`]: ../fn.thread_rng.html
+/// [`RngCore`]: ../trait.RngCore.html
+/// [`try_fill_bytes`]: ../trait.RngCore.html#method.tymethod.try_fill_bytes
+#[derive(Debug)]
+pub struct EntropyRng {
+ source: Source,
+}
+
+#[derive(Debug)]
+enum Source {
+ Os(Os),
+ Custom(Custom),
+ Jitter(Jitter),
+ None,
+}
+
+impl EntropyRng {
+ /// Create a new `EntropyRng`.
+ ///
+ /// This method will do no system calls or other initialization routines,
+ /// those are done on first use. This is done to make `new` infallible,
+ /// and `try_fill_bytes` the only place to report errors.
+ pub fn new() -> Self {
+ EntropyRng { source: Source::None }
+ }
+}
+
+impl Default for EntropyRng {
+ fn default() -> Self {
+ EntropyRng::new()
+ }
+}
+
+impl RngCore for EntropyRng {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.try_fill_bytes(dest).unwrap_or_else(|err|
+ panic!("all entropy sources failed; first error: {}", err))
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let mut reported_error = None;
+
+ if let Source::Os(ref mut os_rng) = self.source {
+ match os_rng.fill(dest) {
+ Ok(()) => return Ok(()),
+ Err(err) => {
+ warn!("EntropyRng: OsRng failed \
+ [trying other entropy sources]: {}", err);
+ reported_error = Some(err);
+ },
+ }
+ } else if Os::is_supported() {
+ match Os::new_and_fill(dest) {
+ Ok(os_rng) => {
+ debug!("EntropyRng: using OsRng");
+ self.source = Source::Os(os_rng);
+ return Ok(());
+ },
+ Err(err) => { reported_error = reported_error.or(Some(err)) },
+ }
+ }
+
+ if let Source::Custom(ref mut rng) = self.source {
+ match rng.fill(dest) {
+ Ok(()) => return Ok(()),
+ Err(err) => {
+ warn!("EntropyRng: custom entropy source failed \
+ [trying other entropy sources]: {}", err);
+ reported_error = Some(err);
+ },
+ }
+ } else if Custom::is_supported() {
+ match Custom::new_and_fill(dest) {
+ Ok(custom) => {
+ debug!("EntropyRng: using custom entropy source");
+ self.source = Source::Custom(custom);
+ return Ok(());
+ },
+ Err(err) => { reported_error = reported_error.or(Some(err)) },
+ }
+ }
+
+ if let Source::Jitter(ref mut jitter_rng) = self.source {
+ match jitter_rng.fill(dest) {
+ Ok(()) => return Ok(()),
+ Err(err) => {
+ warn!("EntropyRng: JitterRng failed: {}", err);
+ reported_error = Some(err);
+ },
+ }
+ } else if Jitter::is_supported() {
+ match Jitter::new_and_fill(dest) {
+ Ok(jitter_rng) => {
+ debug!("EntropyRng: using JitterRng");
+ self.source = Source::Jitter(jitter_rng);
+ return Ok(());
+ },
+ Err(err) => { reported_error = reported_error.or(Some(err)) },
+ }
+ }
+
+ if let Some(err) = reported_error {
+ Err(Error::with_cause(ErrorKind::Unavailable,
+ "All entropy sources failed",
+ err))
+ } else {
+ Err(Error::new(ErrorKind::Unavailable,
+ "No entropy sources available"))
+ }
+ }
+}
+
+impl CryptoRng for EntropyRng {}
+
+
+
+trait EntropySource {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error>
+ where Self: Sized;
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error>;
+
+ fn is_supported() -> bool { true }
+}
+
+#[allow(unused)]
+#[derive(Clone, Debug)]
+struct NoSource;
+
+#[allow(unused)]
+impl EntropySource for NoSource {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> {
+ Err(Error::new(ErrorKind::Unavailable, "Source not supported"))
+ }
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ unreachable!()
+ }
+
+ fn is_supported() -> bool { false }
+}
+
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[derive(Clone, Debug)]
+pub struct Os(rngs::OsRng);
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+impl EntropySource for Os {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> {
+ let mut rng = rngs::OsRng::new()?;
+ rng.try_fill_bytes(dest)?;
+ Ok(Os(rng))
+ }
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(not(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+))))]
+type Os = NoSource;
+
+
+type Custom = NoSource;
+
+
+#[cfg(not(target_arch = "wasm32"))]
+#[derive(Clone, Debug)]
+pub struct Jitter(rngs::JitterRng);
+
+#[cfg(not(target_arch = "wasm32"))]
+impl EntropySource for Jitter {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> {
+ let mut rng = rngs::JitterRng::new()?;
+ rng.try_fill_bytes(dest)?;
+ Ok(Jitter(rng))
+ }
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(target_arch = "wasm32")]
+type Jitter = NoSource;
+
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_entropy() {
+ let mut rng = EntropyRng::new();
+ let n = (rng.next_u32() ^ rng.next_u32()).count_ones();
+ assert!(n >= 2); // p(failure) approx 1e-7
+ }
+}
diff --git a/rand/src/rngs/jitter.rs b/rand/src/rngs/jitter.rs
new file mode 100644
index 0000000..3e93477
--- /dev/null
+++ b/rand/src/rngs/jitter.rs
@@ -0,0 +1,885 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// Based on jitterentropy-library, http://www.chronox.de/jent.html.
+// Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2017.
+//
+// With permission from Stephan Mueller to relicense the Rust translation under
+// the MIT license.
+
+//! Non-physical true random number generator based on timing jitter.
+
+// Note: the C implementation of `Jitterentropy` relies on being compiled
+// without optimizations. This implementation goes through lengths to make the
+// compiler not optimize out code which does influence timing jitter, but is
+// technically dead code.
+
+use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls};
+
+use core::{fmt, mem, ptr};
+#[cfg(all(feature="std", not(target_arch = "wasm32")))]
+use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+
+const MEMORY_BLOCKS: usize = 64;
+const MEMORY_BLOCKSIZE: usize = 32;
+const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
+
+/// A true random number generator based on jitter in the CPU execution time,
+/// and jitter in memory access time.
+///
+/// This is a true random number generator, as opposed to pseudo-random
+/// generators. Random numbers generated by `JitterRng` can be seen as fresh
+/// entropy. A consequence is that is orders of magnitude slower than [`OsRng`]
+/// and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower).
+///
+/// There are very few situations where using this RNG is appropriate. Only very
+/// few applications require true entropy. A normal PRNG can be statistically
+/// indistinguishable, and a cryptographic PRNG should also be as impossible to
+/// predict.
+///
+/// Use of `JitterRng` is recommended for initializing cryptographic PRNGs when
+/// [`OsRng`] is not available.
+///
+/// `JitterRng` can be used without the standard library, but not conveniently,
+/// you must provide a high-precision timer and carefully have to follow the
+/// instructions of [`new_with_timer`].
+///
+/// This implementation is based on
+/// [Jitterentropy](http://www.chronox.de/jent.html) version 2.1.0.
+///
+/// Note: There is no accurate timer available on Wasm platforms, to help
+/// prevent fingerprinting or timing side-channel attacks. Therefore
+/// [`JitterRng::new()`] is not available on Wasm.
+///
+/// # Quality testing
+///
+/// [`JitterRng::new()`] has build-in, but limited, quality testing, however
+/// before using `JitterRng` on untested hardware, or after changes that could
+/// effect how the code is optimized (such as a new LLVM version), it is
+/// recommend to run the much more stringent
+/// [NIST SP 800-90B Entropy Estimation Suite](
+/// https://github.com/usnistgov/SP800-90B_EntropyAssessment).
+///
+/// Use the following code using [`timer_stats`] to collect the data:
+///
+/// ```no_run
+/// use rand::rngs::JitterRng;
+/// #
+/// # use std::error::Error;
+/// # use std::fs::File;
+/// # use std::io::Write;
+/// #
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// let mut rng = JitterRng::new()?;
+///
+/// // 1_000_000 results are required for the
+/// // NIST SP 800-90B Entropy Estimation Suite
+/// const ROUNDS: usize = 1_000_000;
+/// let mut deltas_variable: Vec<u8> = Vec::with_capacity(ROUNDS);
+/// let mut deltas_minimal: Vec<u8> = Vec::with_capacity(ROUNDS);
+///
+/// for _ in 0..ROUNDS {
+/// deltas_variable.push(rng.timer_stats(true) as u8);
+/// deltas_minimal.push(rng.timer_stats(false) as u8);
+/// }
+///
+/// // Write out after the statistics collection loop, to not disturb the
+/// // test results.
+/// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?;
+/// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?;
+/// #
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// This will produce two files: `jitter_rng_var.bin` and `jitter_rng_min.bin`.
+/// Run the Entropy Estimation Suite in three configurations, as outlined below.
+/// Every run has two steps. One step to produce an estimation, another to
+/// validate the estimation.
+///
+/// 1. Estimate the expected amount of entropy that is at least available with
+/// each round of the entropy collector. This number should be greater than
+/// the amount estimated with `64 / test_timer()`.
+/// ```sh
+/// python noniid_main.py -v jitter_rng_var.bin 8
+/// restart.py -v jitter_rng_var.bin 8 <min-entropy>
+/// ```
+/// 2. Estimate the expected amount of entropy that is available in the last 4
+/// bits of the timer delta after running noice sources. Note that a value of
+/// `3.70` is the minimum estimated entropy for true randomness.
+/// ```sh
+/// python noniid_main.py -v -u 4 jitter_rng_var.bin 4
+/// restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>
+/// ```
+/// 3. Estimate the expected amount of entropy that is available to the entropy
+/// collector if both noice sources only run their minimal number of times.
+/// This measures the absolute worst-case, and gives a lower bound for the
+/// available entropy.
+/// ```sh
+/// python noniid_main.py -v -u 4 jitter_rng_min.bin 4
+/// restart.py -v -u 4 jitter_rng_min.bin 4 <min-entropy>
+/// ```
+///
+/// [`OsRng`]: struct.OsRng.html
+/// [`JitterRng::new()`]: struct.JitterRng.html#method.new
+/// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer
+/// [`timer_stats`]: struct.JitterRng.html#method.timer_stats
+pub struct JitterRng {
+ data: u64, // Actual random number
+ // Number of rounds to run the entropy collector per 64 bits
+ rounds: u8,
+ // Timer used by `measure_jitter`
+ timer: fn() -> u64,
+ // Memory for the Memory Access noise source
+ mem_prev_index: u16,
+ // Make `next_u32` not waste 32 bits
+ data_half_used: bool,
+}
+
+// Note: `JitterRng` maintains a small 64-bit entropy pool. With every
+// `generate` 64 new bits should be integrated in the pool. If a round of
+// `generate` were to collect less than the expected 64 bit, then the returned
+// value, and the new state of the entropy pool, would be in some way related to
+// the initial state. It is therefore better if the initial state of the entropy
+// pool is different on each call to `generate`. This has a few implications:
+// - `generate` should be called once before using `JitterRng` to produce the
+// first usable value (this is done by default in `new`);
+// - We do not zero the entropy pool after generating a result. The reference
+// implementation also does not support zeroing, but recommends generating a
+// new value without using it if you want to protect a previously generated
+// 'secret' value from someone inspecting the memory;
+// - Implementing `Clone` seems acceptable, as it would not cause the systematic
+// bias a constant might cause. Only instead of one value that could be
+// potentially related to the same initial state, there are now two.
+
+// Entropy collector state.
+// These values are not necessary to preserve across runs.
+struct EcState {
+ // Previous time stamp to determine the timer delta
+ prev_time: u64,
+ // Deltas used for the stuck test
+ last_delta: i32,
+ last_delta2: i32,
+ // Memory for the Memory Access noise source
+ mem: [u8; MEMORY_SIZE],
+}
+
+impl EcState {
+ // Stuck test by checking the:
+ // - 1st derivation of the jitter measurement (time delta)
+ // - 2nd derivation of the jitter measurement (delta of time deltas)
+ // - 3rd derivation of the jitter measurement (delta of delta of time
+ // deltas)
+ //
+ // All values must always be non-zero.
+ // This test is a heuristic to see whether the last measurement holds
+ // entropy.
+ fn stuck(&mut self, current_delta: i32) -> bool {
+ let delta2 = self.last_delta - current_delta;
+ let delta3 = delta2 - self.last_delta2;
+
+ self.last_delta = current_delta;
+ self.last_delta2 = delta2;
+
+ current_delta == 0 || delta2 == 0 || delta3 == 0
+ }
+}
+
+// Custom Debug implementation that does not expose the internal state
+impl fmt::Debug for JitterRng {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "JitterRng {{}}")
+ }
+}
+
+impl Clone for JitterRng {
+ fn clone(&self) -> JitterRng {
+ JitterRng {
+ data: self.data,
+ rounds: self.rounds,
+ timer: self.timer,
+ mem_prev_index: self.mem_prev_index,
+ // The 32 bits that may still be unused from the previous round are
+ // for the original to use, not for the clone.
+ data_half_used: false,
+ }
+ }
+}
+
+/// An error that can occur when [`JitterRng::test_timer`] fails.
+///
+/// [`JitterRng::test_timer`]: struct.JitterRng.html#method.test_timer
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum TimerError {
+ /// No timer available.
+ NoTimer,
+ /// Timer too coarse to use as an entropy source.
+ CoarseTimer,
+ /// Timer is not monotonically increasing.
+ NotMonotonic,
+ /// Variations of deltas of time too small.
+ TinyVariantions,
+ /// Too many stuck results (indicating no added entropy).
+ TooManyStuck,
+ #[doc(hidden)]
+ __Nonexhaustive,
+}
+
+impl TimerError {
+ fn description(&self) -> &'static str {
+ match *self {
+ TimerError::NoTimer => "no timer available",
+ TimerError::CoarseTimer => "coarse timer",
+ TimerError::NotMonotonic => "timer not monotonic",
+ TimerError::TinyVariantions => "time delta variations too small",
+ TimerError::TooManyStuck => "too many stuck results",
+ TimerError::__Nonexhaustive => unreachable!(),
+ }
+ }
+}
+
+impl fmt::Display for TimerError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.description())
+ }
+}
+
+#[cfg(feature="std")]
+impl ::std::error::Error for TimerError {
+ fn description(&self) -> &str {
+ self.description()
+ }
+}
+
+impl From<TimerError> for Error {
+ fn from(err: TimerError) -> Error {
+ // Timer check is already quite permissive of failures so we don't
+ // expect false-positive failures, i.e. any error is irrecoverable.
+ Error::with_cause(ErrorKind::Unavailable,
+ "timer jitter failed basic quality tests", err)
+ }
+}
+
+// Initialise to zero; must be positive
+#[cfg(all(feature="std", not(target_arch = "wasm32")))]
+static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+impl JitterRng {
+ /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a
+ /// platform-specific function with higher accuracy if necessary and
+ /// available.
+ ///
+ /// During initialization CPU execution timing jitter is measured a few
+ /// hundred times. If this does not pass basic quality tests, an error is
+ /// returned. The test result is cached to make subsequent calls faster.
+ #[cfg(all(feature="std", not(target_arch = "wasm32")))]
+ pub fn new() -> Result<JitterRng, TimerError> {
+ let mut state = JitterRng::new_with_timer(platform::get_nstime);
+ let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8;
+ if rounds == 0 {
+ // No result yet: run test.
+ // This allows the timer test to run multiple times; we don't care.
+ rounds = state.test_timer()?;
+ JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed);
+ info!("JitterRng: using {} rounds per u64 output", rounds);
+ }
+ state.set_rounds(rounds);
+
+ // Fill `data` with a non-zero value.
+ state.gen_entropy();
+ Ok(state)
+ }
+
+ /// Create a new `JitterRng`.
+ /// A custom timer can be supplied, making it possible to use `JitterRng` in
+ /// `no_std` environments.
+ ///
+ /// The timer must have nanosecond precision.
+ ///
+ /// This method is more low-level than `new()`. It is the responsibility of
+ /// the caller to run [`test_timer`] before using any numbers generated with
+ /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to
+ /// consume at least one `u64` before using the first result to initialize
+ /// the entropy collection pool.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use rand::{Rng, Error};
+ /// use rand::rngs::JitterRng;
+ ///
+ /// # fn try_inner() -> Result<(), Error> {
+ /// fn get_nstime() -> u64 {
+ /// use std::time::{SystemTime, UNIX_EPOCH};
+ ///
+ /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
+ /// // The correct way to calculate the current time is
+ /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
+ /// // But this is faster, and the difference in terms of entropy is
+ /// // negligible (log2(10^9) == 29.9).
+ /// dur.as_secs() << 30 | dur.subsec_nanos() as u64
+ /// }
+ ///
+ /// let mut rng = JitterRng::new_with_timer(get_nstime);
+ /// let rounds = rng.test_timer()?;
+ /// rng.set_rounds(rounds); // optional
+ /// let _ = rng.gen::<u64>();
+ ///
+ /// // Ready for use
+ /// let v: u64 = rng.gen();
+ /// # Ok(())
+ /// # }
+ ///
+ /// # let _ = try_inner();
+ /// ```
+ ///
+ /// [`test_timer`]: struct.JitterRng.html#method.test_timer
+ /// [`set_rounds`]: struct.JitterRng.html#method.set_rounds
+ pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
+ JitterRng {
+ data: 0,
+ rounds: 64,
+ timer,
+ mem_prev_index: 0,
+ data_half_used: false,
+ }
+ }
+
+ /// Configures how many rounds are used to generate each 64-bit value.
+ /// This must be greater than zero, and has a big impact on performance
+ /// and output quality.
+ ///
+ /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds
+ /// can be used. The `test_timer()` function returns the minimum number of
+ /// rounds required for full strength (platform dependent), so one may use
+ /// `rng.set_rounds(rng.test_timer()?);` or cache the value.
+ ///
+ /// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer
+ pub fn set_rounds(&mut self, rounds: u8) {
+ assert!(rounds > 0);
+ self.rounds = rounds;
+ }
+
+ // Calculate a random loop count used for the next round of an entropy
+ // collection, based on bits from a fresh value from the timer.
+ //
+ // The timer is folded to produce a number that contains at most `n_bits`
+ // bits.
+ //
+ // Note: A constant should be added to the resulting random loop count to
+ // prevent loops that run 0 times.
+ #[inline(never)]
+ fn random_loop_cnt(&mut self, n_bits: u32) -> u32 {
+ let mut rounds = 0;
+
+ let mut time = (self.timer)();
+ // Mix with the current state of the random number balance the random
+ // loop counter a bit more.
+ time ^= self.data;
+
+ // We fold the time value as much as possible to ensure that as many
+ // bits of the time stamp are included as possible.
+ let folds = (64 + n_bits - 1) / n_bits;
+ let mask = (1 << n_bits) - 1;
+ for _ in 0..folds {
+ rounds ^= time & mask;
+ time >>= n_bits;
+ }
+
+ rounds as u32
+ }
+
+ // CPU jitter noise source
+ // Noise source based on the CPU execution time jitter
+ //
+ // This function injects the individual bits of the time value into the
+ // entropy pool using an LFSR.
+ //
+ // The code is deliberately inefficient with respect to the bit shifting.
+ // This function not only acts as folding operation, but this function's
+ // execution is used to measure the CPU execution time jitter. Any change to
+ // the loop in this function implies that careful retesting must be done.
+ #[inline(never)]
+ fn lfsr_time(&mut self, time: u64, var_rounds: bool) {
+ fn lfsr(mut data: u64, time: u64) -> u64{
+ for i in 1..65 {
+ let mut tmp = time << (64 - i);
+ tmp >>= 64 - 1;
+
+ // Fibonacci LSFR with polynomial of
+ // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
+ // primitive according to
+ // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
+ // (the shift values are the polynomial values minus one
+ // due to counting bits from 0 to 63). As the current
+ // position is always the LSB, the polynomial only needs
+ // to shift data in from the left without wrap.
+ data ^= tmp;
+ data ^= (data >> 63) & 1;
+ data ^= (data >> 60) & 1;
+ data ^= (data >> 55) & 1;
+ data ^= (data >> 30) & 1;
+ data ^= (data >> 27) & 1;
+ data ^= (data >> 22) & 1;
+ data = data.rotate_left(1);
+ }
+ data
+ }
+
+ // Note: in the reference implementation only the last round effects
+ // `self.data`, all the other results are ignored. To make sure the
+ // other rounds are not optimised out, we first run all but the last
+ // round on a throw-away value instead of the real `self.data`.
+ let mut lfsr_loop_cnt = 0;
+ if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) };
+
+ let mut throw_away: u64 = 0;
+ for _ in 0..lfsr_loop_cnt {
+ throw_away = lfsr(throw_away, time);
+ }
+ black_box(throw_away);
+
+ self.data = lfsr(self.data, time);
+ }
+
+ // Memory Access noise source
+ // This is a noise source based on variations in memory access times
+ //
+ // This function performs memory accesses which will add to the timing
+ // variations due to an unknown amount of CPU wait states that need to be
+ // added when accessing memory. The memory size should be larger than the L1
+ // caches as outlined in the documentation and the associated testing.
+ //
+ // The L1 cache has a very high bandwidth, albeit its access rate is usually
+ // slower than accessing CPU registers. Therefore, L1 accesses only add
+ // minimal variations as the CPU has hardly to wait. Starting with L2,
+ // significant variations are added because L2 typically does not belong to
+ // the CPU any more and therefore a wider range of CPU wait states is
+ // necessary for accesses. L3 and real memory accesses have even a wider
+ // range of wait states. However, to reliably access either L3 or memory,
+ // the `self.mem` memory must be quite large which is usually not desirable.
+ #[inline(never)]
+ fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) {
+ let mut acc_loop_cnt = 128;
+ if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) };
+
+ let mut index = self.mem_prev_index as usize;
+ for _ in 0..acc_loop_cnt {
+ // Addition of memblocksize - 1 to index with wrap around logic to
+ // ensure that every memory location is hit evenly.
+ // The modulus also allows the compiler to remove the indexing
+ // bounds check.
+ index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE;
+
+ // memory access: just add 1 to one byte
+ // memory access implies read from and write to memory location
+ mem[index] = mem[index].wrapping_add(1);
+ }
+ self.mem_prev_index = index as u16;
+ }
+
+ // This is the heart of the entropy generation: calculate time deltas and
+ // use the CPU jitter in the time deltas. The jitter is injected into the
+ // entropy pool.
+ //
+ // Ensure that `ec.prev_time` is primed before using the output of this
+ // function. This can be done by calling this function and not using its
+ // result.
+ fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> {
+ // Invoke one noise source before time measurement to add variations
+ self.memaccess(&mut ec.mem, true);
+
+ // Get time stamp and calculate time delta to previous
+ // invocation to measure the timing variations
+ let time = (self.timer)();
+ // Note: wrapping_sub combined with a cast to `i64` generates a correct
+ // delta, even in the unlikely case this is a timer that is not strictly
+ // monotonic.
+ let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32;
+ ec.prev_time = time;
+
+ // Call the next noise source which also injects the data
+ self.lfsr_time(current_delta as u64, true);
+
+ // Check whether we have a stuck measurement (i.e. does the last
+ // measurement holds entropy?).
+ if ec.stuck(current_delta) { return None };
+
+ // Rotate the data buffer by a prime number (any odd number would
+ // do) to ensure that every bit position of the input time stamp
+ // has an even chance of being merged with a bit position in the
+ // entropy pool. We do not use one here as the adjacent bits in
+ // successive time deltas may have some form of dependency. The
+ // chosen value of 7 implies that the low 7 bits of the next
+ // time delta value is concatenated with the current time delta.
+ self.data = self.data.rotate_left(7);
+
+ Some(())
+ }
+
+ // Shuffle the pool a bit by mixing some value with a bijective function
+ // (XOR) into the pool.
+ //
+ // The function generates a mixer value that depends on the bits set and
+ // the location of the set bits in the random number generated by the
+ // entropy source. Therefore, based on the generated random number, this
+ // mixer value can have 2^64 different values. That mixer value is
+ // initialized with the first two SHA-1 constants. After obtaining the
+ // mixer value, it is XORed into the random number.
+ //
+ // The mixer value is not assumed to contain any entropy. But due to the
+ // XOR operation, it can also not destroy any entropy present in the
+ // entropy pool.
+ #[inline(never)]
+ fn stir_pool(&mut self) {
+ // This constant is derived from the first two 32 bit initialization
+ // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
+ // The order does not really matter as we do not rely on the specific
+ // numbers. We just pick the SHA-1 constants as they have a good mix of
+ // bit set and unset.
+ const CONSTANT: u64 = 0x67452301efcdab89;
+
+ // The start value of the mixer variable is derived from the third
+ // and fourth 32 bit initialization vector of SHA-1 as defined in
+ // FIPS 180-4 section 5.3.1
+ let mut mixer = 0x98badcfe10325476;
+
+ // This is a constant time function to prevent leaking timing
+ // information about the random number.
+ // The normal code is:
+ // ```
+ // for i in 0..64 {
+ // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; }
+ // }
+ // ```
+ // This is a bit fragile, as LLVM really wants to use branches here, and
+ // we rely on it to not recognise the opportunity.
+ for i in 0..64 {
+ let apply = (self.data >> i) & 1;
+ let mask = !apply.wrapping_sub(1);
+ mixer ^= CONSTANT & mask;
+ mixer = mixer.rotate_left(1);
+ }
+
+ self.data ^= mixer;
+ }
+
+ fn gen_entropy(&mut self) -> u64 {
+ trace!("JitterRng: collecting entropy");
+
+ // Prime `ec.prev_time`, and run the noice sources to make sure the
+ // first loop round collects the expected entropy.
+ let mut ec = EcState {
+ prev_time: (self.timer)(),
+ last_delta: 0,
+ last_delta2: 0,
+ mem: [0; MEMORY_SIZE],
+ };
+ let _ = self.measure_jitter(&mut ec);
+
+ for _ in 0..self.rounds {
+ // If a stuck measurement is received, repeat measurement
+ // Note: we do not guard against an infinite loop, that would mean
+ // the timer suddenly became broken.
+ while self.measure_jitter(&mut ec).is_none() {}
+ }
+
+ // Do a single read from `self.mem` to make sure the Memory Access noise
+ // source is not optimised out.
+ black_box(ec.mem[0]);
+
+ self.stir_pool();
+ self.data
+ }
+
+ /// Basic quality tests on the timer, by measuring CPU timing jitter a few
+ /// hundred times.
+ ///
+ /// If succesful, this will return the estimated number of rounds necessary
+ /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause
+ /// of the failure will be returned.
+ ///
+ /// [`TimerError`]: enum.TimerError.html
+ pub fn test_timer(&mut self) -> Result<u8, TimerError> {
+ debug!("JitterRng: testing timer ...");
+ // We could add a check for system capabilities such as `clock_getres`
+ // or check for `CONFIG_X86_TSC`, but it does not make much sense as the
+ // following sanity checks verify that we have a high-resolution timer.
+
+ let mut delta_sum = 0;
+ let mut old_delta = 0;
+
+ let mut time_backwards = 0;
+ let mut count_mod = 0;
+ let mut count_stuck = 0;
+
+ let mut ec = EcState {
+ prev_time: (self.timer)(),
+ last_delta: 0,
+ last_delta2: 0,
+ mem: [0; MEMORY_SIZE],
+ };
+
+ // TESTLOOPCOUNT needs some loops to identify edge systems.
+ // 100 is definitely too little.
+ const TESTLOOPCOUNT: u64 = 300;
+ const CLEARCACHE: u64 = 100;
+
+ for i in 0..(CLEARCACHE + TESTLOOPCOUNT) {
+ // Measure time delta of core entropy collection logic
+ let time = (self.timer)();
+ self.memaccess(&mut ec.mem, true);
+ self.lfsr_time(time, true);
+ let time2 = (self.timer)();
+
+ // Test whether timer works
+ if time == 0 || time2 == 0 {
+ return Err(TimerError::NoTimer);
+ }
+ let delta = time2.wrapping_sub(time) as i64 as i32;
+
+ // Test whether timer is fine grained enough to provide delta even
+ // when called shortly after each other -- this implies that we also
+ // have a high resolution timer
+ if delta == 0 {
+ return Err(TimerError::CoarseTimer);
+ }
+
+ // Up to here we did not modify any variable that will be
+ // evaluated later, but we already performed some work. Thus we
+ // already have had an impact on the caches, branch prediction,
+ // etc. with the goal to clear it to get the worst case
+ // measurements.
+ if i < CLEARCACHE { continue; }
+
+ if ec.stuck(delta) { count_stuck += 1; }
+
+ // Test whether we have an increasing timer.
+ if !(time2 > time) { time_backwards += 1; }
+
+ // Count the number of times the counter increases in steps of 100ns
+ // or greater.
+ if (delta % 100) == 0 { count_mod += 1; }
+
+ // Ensure that we have a varying delta timer which is necessary for
+ // the calculation of entropy -- perform this check only after the
+ // first loop is executed as we need to prime the old_delta value
+ delta_sum += (delta - old_delta).abs() as u64;
+ old_delta = delta;
+ }
+
+ // Do a single read from `self.mem` to make sure the Memory Access noise
+ // source is not optimised out.
+ black_box(ec.mem[0]);
+
+ // We allow the time to run backwards for up to three times.
+ // This can happen if the clock is being adjusted by NTP operations.
+ // If such an operation just happens to interfere with our test, it
+ // should not fail. The value of 3 should cover the NTP case being
+ // performed during our test run.
+ if time_backwards > 3 {
+ return Err(TimerError::NotMonotonic);
+ }
+
+ // Test that the available amount of entropy per round does not get to
+ // low. We expect 1 bit of entropy per round as a reasonable minimum
+ // (although less is possible, it means the collector loop has to run
+ // much more often).
+ // `assert!(delta_average >= log2(1))`
+ // `assert!(delta_sum / TESTLOOPCOUNT >= 1)`
+ // `assert!(delta_sum >= TESTLOOPCOUNT)`
+ if delta_sum < TESTLOOPCOUNT {
+ return Err(TimerError::TinyVariantions);
+ }
+
+ // Ensure that we have variations in the time stamp below 100 for at
+ // least 10% of all checks -- on some platforms, the counter increments
+ // in multiples of 100, but not always
+ if count_mod > (TESTLOOPCOUNT * 9 / 10) {
+ return Err(TimerError::CoarseTimer);
+ }
+
+ // If we have more than 90% stuck results, then this Jitter RNG is
+ // likely to not work well.
+ if count_stuck > (TESTLOOPCOUNT * 9 / 10) {
+ return Err(TimerError::TooManyStuck);
+ }
+
+ // Estimate the number of `measure_jitter` rounds necessary for 64 bits
+ // of entropy.
+ //
+ // We don't try very hard to come up with a good estimate of the
+ // available bits of entropy per round here for two reasons:
+ // 1. Simple estimates of the available bits (like Shannon entropy) are
+ // too optimistic.
+ // 2. Unless we want to waste a lot of time during intialization, there
+ // only a small number of samples are available.
+ //
+ // Therefore we use a very simple and conservative estimate:
+ // `let bits_of_entropy = log2(delta_average) / 2`.
+ //
+ // The number of rounds `measure_jitter` should run to collect 64 bits
+ // of entropy is `64 / bits_of_entropy`.
+ let delta_average = delta_sum / TESTLOOPCOUNT;
+
+ if delta_average >= 16 {
+ let log2 = 64 - delta_average.leading_zeros();
+ // Do something similar to roundup(64/(log2/2)):
+ Ok( ((64u32 * 2 + log2 - 1) / log2) as u8)
+ } else {
+ // For values < 16 the rounding error becomes too large, use a
+ // lookup table.
+ // Values 0 and 1 are invalid, and filtered out by the
+ // `delta_sum < TESTLOOPCOUNT` test above.
+ let log2_lookup = [0, 0, 128, 81, 64, 56, 50, 46,
+ 43, 41, 39, 38, 36, 35, 34, 33];
+ Ok(log2_lookup[delta_average as usize])
+ }
+ }
+
+ /// Statistical test: return the timer delta of one normal run of the
+ /// `JitterRng` entropy collector.
+ ///
+ /// Setting `var_rounds` to `true` will execute the memory access and the
+ /// CPU jitter noice sources a variable amount of times (just like a real
+ /// `JitterRng` round).
+ ///
+ /// Setting `var_rounds` to `false` will execute the noice sources the
+ /// minimal number of times. This can be used to measure the minimum amount
+ /// of entropy one round of the entropy collector can collect in the worst
+ /// case.
+ ///
+ /// See [Quality testing](struct.JitterRng.html#quality-testing) on how to
+ /// use `timer_stats` to test the quality of `JitterRng`.
+ pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
+ let mut mem = [0; MEMORY_SIZE];
+
+ let time = (self.timer)();
+ self.memaccess(&mut mem, var_rounds);
+ self.lfsr_time(time, var_rounds);
+ let time2 = (self.timer)();
+ time2.wrapping_sub(time) as i64
+ }
+}
+
+#[cfg(feature="std")]
+mod platform {
+ #[cfg(not(any(target_os = "macos", target_os = "ios",
+ target_os = "windows",
+ target_arch = "wasm32")))]
+ pub fn get_nstime() -> u64 {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
+ let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
+ // The correct way to calculate the current time is
+ // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
+ // But this is faster, and the difference in terms of entropy is
+ // negligible (log2(10^9) == 29.9).
+ dur.as_secs() << 30 | dur.subsec_nanos() as u64
+ }
+
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ pub fn get_nstime() -> u64 {
+ extern crate libc;
+ // On Mac OS and iOS std::time::SystemTime only has 1000ns resolution.
+ // We use `mach_absolute_time` instead. This provides a CPU dependent
+ // unit, to get real nanoseconds the result should by multiplied by
+ // numer/denom from `mach_timebase_info`.
+ // But we are not interested in the exact nanoseconds, just entropy. So
+ // we use the raw result.
+ unsafe { libc::mach_absolute_time() }
+ }
+
+ #[cfg(target_os = "windows")]
+ pub fn get_nstime() -> u64 {
+ extern crate winapi;
+ unsafe {
+ let mut t = super::mem::zeroed();
+ winapi::um::profileapi::QueryPerformanceCounter(&mut t);
+ *t.QuadPart() as u64
+ }
+ }
+}
+
+// A function that is opaque to the optimizer to assist in avoiding dead-code
+// elimination. Taken from `bencher`.
+fn black_box<T>(dummy: T) -> T {
+ unsafe {
+ let ret = ptr::read_volatile(&dummy);
+ mem::forget(dummy);
+ ret
+ }
+}
+
+impl RngCore for JitterRng {
+ fn next_u32(&mut self) -> u32 {
+ // We want to use both parts of the generated entropy
+ if self.data_half_used {
+ self.data_half_used = false;
+ (self.data >> 32) as u32
+ } else {
+ self.data = self.next_u64();
+ self.data_half_used = true;
+ self.data as u32
+ }
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ self.data_half_used = false;
+ self.gen_entropy()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ // Fill using `next_u32`. This is faster for filling small slices (four
+ // bytes or less), while the overhead is negligible.
+ //
+ // This is done especially for wrappers that implement `next_u32`
+ // themselves via `fill_bytes`.
+ impls::fill_bytes_via_next(self, dest)
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
+
+impl CryptoRng for JitterRng {}
+
+#[cfg(test)]
+mod test_jitter_init {
+ use super::JitterRng;
+
+ #[cfg(all(feature="std", not(target_arch = "wasm32")))]
+ #[test]
+ fn test_jitter_init() {
+ use RngCore;
+ // Because this is a debug build, measurements here are not representive
+ // of the final release build.
+ // Don't fail this test if initializing `JitterRng` fails because of a
+ // bad timer (the timer from the standard library may not have enough
+ // accuracy on all platforms).
+ match JitterRng::new() {
+ Ok(ref mut rng) => {
+ // false positives are possible, but extremely unlikely
+ assert!(rng.next_u32() | rng.next_u32() != 0);
+ },
+ Err(_) => {},
+ }
+ }
+
+ #[test]
+ fn test_jitter_bad_timer() {
+ fn bad_timer() -> u64 { 0 }
+ let mut rng = JitterRng::new_with_timer(bad_timer);
+ assert!(rng.test_timer().is_err());
+ }
+}
diff --git a/rand/src/rngs/mock.rs b/rand/src/rngs/mock.rs
new file mode 100644
index 0000000..3c9a994
--- /dev/null
+++ b/rand/src/rngs/mock.rs
@@ -0,0 +1,59 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Mock random number generator
+
+use rand_core::{RngCore, Error, impls};
+
+/// A simple implementation of `RngCore` for testing purposes.
+///
+/// This generates an arithmetic sequence (i.e. adds a constant each step)
+/// over a `u64` number, using wrapping arithmetic. If the increment is 0
+/// the generator yields a constant.
+///
+/// ```
+/// use rand::Rng;
+/// use rand::rngs::mock::StepRng;
+///
+/// let mut my_rng = StepRng::new(2, 1);
+/// let sample: [u64; 3] = my_rng.gen();
+/// assert_eq!(sample, [2, 3, 4]);
+/// ```
+#[derive(Debug, Clone)]
+pub struct StepRng {
+ v: u64,
+ a: u64,
+}
+
+impl StepRng {
+ /// Create a `StepRng`, yielding an arithmetic sequence starting with
+ /// `initial` and incremented by `increment` each time.
+ pub fn new(initial: u64, increment: u64) -> Self {
+ StepRng { v: initial, a: increment }
+ }
+}
+
+impl RngCore for StepRng {
+ fn next_u32(&mut self) -> u32 {
+ self.next_u64() as u32
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ let result = self.v;
+ self.v = self.v.wrapping_add(self.a);
+ result
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ impls::fill_bytes_via_next(self, dest);
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
diff --git a/rand/src/rngs/mod.rs b/rand/src/rngs/mod.rs
new file mode 100644
index 0000000..70c4506
--- /dev/null
+++ b/rand/src/rngs/mod.rs
@@ -0,0 +1,217 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Random number generators and adapters for common usage:
+//!
+//! - [`ThreadRng`], a fast, secure, auto-seeded thread-local generator
+//! - [`StdRng`] and [`SmallRng`], algorithms to cover typical usage
+//! - [`EntropyRng`], [`OsRng`] and [`JitterRng`] as entropy sources
+//! - [`mock::StepRng`] as a simple counter for tests
+//! - [`adapter::ReadRng`] to read from a file/stream
+//! - [`adapter::ReseedingRng`] to reseed a PRNG on clone / process fork etc.
+//!
+//! # Background — Random number generators (RNGs)
+//!
+//! Computers are inherently deterministic, so to get *random* numbers one
+//! either has to use a hardware generator or collect bits of *entropy* from
+//! various sources (e.g. event timestamps, or jitter). This is a relatively
+//! slow and complicated operation.
+//!
+//! Generally the operating system will collect some entropy, remove bias, and
+//! use that to seed its own PRNG; [`OsRng`] provides an interface to this.
+//! [`JitterRng`] is an entropy collector included with Rand that measures
+//! jitter in the CPU execution time, and jitter in memory access time.
+//! [`EntropyRng`] is a wrapper that uses the best entropy source that is
+//! available.
+//!
+//! ## Pseudo-random number generators
+//!
+//! What is commonly used instead of "true" random number renerators, are
+//! *pseudo-random number generators* (PRNGs), deterministic algorithms that
+//! produce an infinite stream of pseudo-random numbers from a small random
+//! seed. PRNGs are faster, and have better provable properties. The numbers
+//! produced can be statistically of very high quality and can be impossible to
+//! predict. (They can also have obvious correlations and be trivial to predict;
+//! quality varies.)
+//!
+//! There are two different types of PRNGs: those developed for simulations
+//! and statistics, and those developed for use in cryptography; the latter are
+//! called Cryptographically Secure PRNGs (CSPRNG or CPRNG). Both types can
+//! have good statistical quality but the latter also have to be impossible to
+//! predict, even after seeing many previous output values. Rand provides a good
+//! default algorithm from each class:
+//!
+//! - [`SmallRng`] is a PRNG chosen for low memory usage, high performance and
+//! good statistical quality.
+//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security
+//! (based on reviews, maturity and usage). The current algorithm is HC-128,
+//! which is one of the recommendations by ECRYPT's eSTREAM project.
+//!
+//! The above PRNGs do not cover all use-cases; more algorithms can be found in
+//! the [`prng` module], as well as in several other crates. For example, you
+//! may wish a CSPRNG with significantly lower memory usage than [`StdRng`]
+//! while being less concerned about performance, in which case [`ChaChaRng`]
+//! is a good choice.
+//!
+//! One complexity is that the internal state of a PRNG must change with every
+//! generated number. For APIs this generally means a mutable reference to the
+//! state of the PRNG has to be passed around.
+//!
+//! A solution is [`ThreadRng`]. This is a thread-local implementation of
+//! [`StdRng`] with automatic seeding on first use. It is the best choice if you
+//! "just" want a convenient, secure, fast random number source. Use via the
+//! [`thread_rng`] function, which gets a reference to the current thread's
+//! local instance.
+//!
+//! ## Seeding
+//!
+//! As mentioned above, PRNGs require a random seed in order to produce random
+//! output. This is especially important for CSPRNGs, which are still
+//! deterministic algorithms, thus can only be secure if their seed value is
+//! also secure. To seed a PRNG, use one of:
+//!
+//! - [`FromEntropy::from_entropy`]; this is the most convenient way to seed
+//! with fresh, secure random data.
+//! - [`SeedableRng::from_rng`]; this allows seeding from another PRNG or
+//! from an entropy source such as [`EntropyRng`].
+//! - [`SeedableRng::from_seed`]; this is mostly useful if you wish to be able
+//! to reproduce the output sequence by using a fixed seed. (Don't use
+//! [`StdRng`] or [`SmallRng`] in this case since different algorithms may be
+//! used by future versions of Rand; use an algorithm from the
+//! [`prng` module].)
+//!
+//! ## Conclusion
+//!
+//! - [`thread_rng`] is what you often want to use.
+//! - If you want more control, flexibility, or better performance, use
+//! [`StdRng`], [`SmallRng`] or an algorithm from the [`prng` module].
+//! - Use [`FromEntropy::from_entropy`] to seed new PRNGs.
+//! - If you need reproducibility, use [`SeedableRng::from_seed`] combined with
+//! a named PRNG.
+//!
+//! More information and notes on cryptographic security can be found
+//! in the [`prng` module].
+//!
+//! ## Examples
+//!
+//! Examples of seeding PRNGs:
+//!
+//! ```
+//! use rand::prelude::*;
+//! # use rand::Error;
+//!
+//! // StdRng seeded securely by the OS or local entropy collector:
+//! let mut rng = StdRng::from_entropy();
+//! # let v: u32 = rng.gen();
+//!
+//! // SmallRng seeded from thread_rng:
+//! # fn try_inner() -> Result<(), Error> {
+//! let mut rng = SmallRng::from_rng(thread_rng())?;
+//! # let v: u32 = rng.gen();
+//! # Ok(())
+//! # }
+//! # try_inner().unwrap();
+//!
+//! // SmallRng seeded by a constant, for deterministic results:
+//! let seed = [1,2,3,4, 5,6,7,8, 9,10,11,12, 13,14,15,16]; // byte array
+//! let mut rng = SmallRng::from_seed(seed);
+//! # let v: u32 = rng.gen();
+//! ```
+//!
+//!
+//! # Implementing custom RNGs
+//!
+//! If you want to implement custom RNG, see the [`rand_core`] crate. The RNG
+//! will have to implement the [`RngCore`] trait, where the [`Rng`] trait is
+//! build on top of.
+//!
+//! If the RNG needs seeding, also implement the [`SeedableRng`] trait.
+//!
+//! [`CryptoRng`] is a marker trait cryptographically secure PRNGs can
+//! implement.
+//!
+//!
+// This module:
+//! [`ThreadRng`]: struct.ThreadRng.html
+//! [`StdRng`]: struct.StdRng.html
+//! [`SmallRng`]: struct.SmallRng.html
+//! [`EntropyRng`]: struct.EntropyRng.html
+//! [`OsRng`]: struct.OsRng.html
+//! [`JitterRng`]: struct.JitterRng.html
+// Other traits and functions:
+//! [`rand_core`]: https://crates.io/crates/rand_core
+//! [`prng` module]: ../prng/index.html
+//! [`CryptoRng`]: ../trait.CryptoRng.html
+//! [`FromEntropy`]: ../trait.FromEntropy.html
+//! [`FromEntropy::from_entropy`]: ../trait.FromEntropy.html#tymethod.from_entropy
+//! [`RngCore`]: ../trait.RngCore.html
+//! [`Rng`]: ../trait.Rng.html
+//! [`SeedableRng`]: ../trait.SeedableRng.html
+//! [`SeedableRng::from_rng`]: ../trait.SeedableRng.html#tymethod.from_rng
+//! [`SeedableRng::from_seed`]: ../trait.SeedableRng.html#tymethod.from_seed
+//! [`thread_rng`]: ../fn.thread_rng.html
+//! [`mock::StepRng`]: mock/struct.StepRng.html
+//! [`adapter::ReadRng`]: adapter/struct.ReadRng.html
+//! [`adapter::ReseedingRng`]: adapter/struct.ReseedingRng.html
+//! [`ChaChaRng`]: ../../rand_chacha/struct.ChaChaRng.html
+
+pub mod adapter;
+
+#[cfg(feature="std")] mod entropy;
+mod jitter;
+pub mod mock; // Public so we don't export `StepRng` directly, making it a bit
+ // more clear it is intended for testing.
+mod small;
+mod std;
+#[cfg(feature="std")] pub(crate) mod thread;
+
+
+pub use self::jitter::{JitterRng, TimerError};
+#[cfg(feature="std")] pub use self::entropy::EntropyRng;
+
+pub use self::small::SmallRng;
+pub use self::std::StdRng;
+#[cfg(feature="std")] pub use self::thread::ThreadRng;
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+mod os;
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+pub use self::os::OsRng;
diff --git a/rand/src/rngs/os.rs b/rand/src/rngs/os.rs
new file mode 100644
index 0000000..e609c50
--- /dev/null
+++ b/rand/src/rngs/os.rs
@@ -0,0 +1,1275 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2015 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Interface to the random number generator of the operating system.
+
+use std::fmt;
+use rand_core::{CryptoRng, RngCore, Error, impls};
+
+/// A random number generator that retrieves randomness straight from the
+/// operating system.
+///
+/// This is the preferred external source of entropy for most applications.
+/// Commonly it is used to initialize a user-space RNG, which can then be used
+/// to generate random values with much less overhead than `OsRng`.
+///
+/// You may prefer to use [`EntropyRng`] instead of `OsRng`. It is unlikely, but
+/// not entirely theoretical, for `OsRng` to fail. In such cases [`EntropyRng`]
+/// falls back on a good alternative entropy source.
+///
+/// `OsRng::new()` is guaranteed to be very cheap (after the first successful
+/// call), and will never consume more than one file handle per process.
+///
+/// # Platform sources
+///
+/// | OS | interface
+/// |------------------|---------------------------------------------------------
+/// | Linux, Android | [`getrandom`][1] system call if available, otherwise [`/dev/urandom`][2] after reading from `/dev/random` once
+/// | Windows | [`RtlGenRandom`][3]
+/// | macOS, iOS | [`SecRandomCopyBytes`][4]
+/// | FreeBSD | [`kern.arandom`][5]
+/// | OpenBSD, Bitrig | [`getentropy`][6]
+/// | NetBSD | [`/dev/urandom`][7] after reading from `/dev/random` once
+/// | Dragonfly BSD | [`/dev/random`][8]
+/// | Solaris, illumos | [`getrandom`][9] system call if available, otherwise [`/dev/random`][10]
+/// | Fuchsia OS | [`cprng_draw`][11]
+/// | Redox | [`rand:`][12]
+/// | CloudABI | [`random_get`][13]
+/// | Haiku | `/dev/random` (identical to `/dev/urandom`)
+/// | Web browsers | [`Crypto.getRandomValues`][14] (see [Support for WebAssembly and ams.js][14])
+/// | Node.js | [`crypto.randomBytes`][15] (see [Support for WebAssembly and ams.js][16])
+///
+/// Rand doesn't have a blanket implementation for all Unix-like operating
+/// systems that reads from `/dev/urandom`. This ensures all supported operating
+/// systems are using the recommended interface and respect maximum buffer
+/// sizes.
+///
+/// ## Support for WebAssembly and ams.js
+///
+/// The three Emscripten targets `asmjs-unknown-emscripten`,
+/// `wasm32-unknown-emscripten` and `wasm32-experimental-emscripten` use
+/// Emscripten's emulation of `/dev/random` on web browsers and Node.js.
+///
+/// The bare Wasm target `wasm32-unknown-unknown` tries to call the javascript
+/// methods directly, using either `stdweb` in combination with `cargo-web` or
+/// `wasm-bindgen` depending on what features are activated for this crate.
+///
+/// ## Early boot
+///
+/// It is possible that early in the boot process the OS hasn't had enough time
+/// yet to collect entropy to securely seed its RNG, especially on virtual
+/// machines.
+///
+/// Some operating systems always block the thread until the RNG is securely
+/// seeded. This can take anywhere from a few seconds to more than a minute.
+/// Others make a best effort to use a seed from before the shutdown and don't
+/// document much.
+///
+/// A few, Linux, NetBSD and Solaris, offer a choice between blocking, and
+/// getting an error. With `try_fill_bytes` we choose to get the error
+/// ([`ErrorKind::NotReady`]), while the other methods use a blocking interface.
+///
+/// On Linux (when the `genrandom` system call is not available) and on NetBSD
+/// reading from `/dev/urandom` never blocks, even when the OS hasn't collected
+/// enough entropy yet. As a countermeasure we try to do a single read from
+/// `/dev/random` until we know the OS RNG is initialized (and store this in a
+/// global static).
+///
+/// # Panics
+///
+/// `OsRng` is extremely unlikely to fail if `OsRng::new()`, and one read from
+/// it, where succesfull. But in case it does fail, only [`try_fill_bytes`] is
+/// able to report the cause. Depending on the error the other [`RngCore`]
+/// methods will retry several times, and panic in case the error remains.
+///
+/// [`EntropyRng`]: struct.EntropyRng.html
+/// [`RngCore`]: ../trait.RngCore.html
+/// [`try_fill_bytes`]: ../trait.RngCore.html#method.tymethod.try_fill_bytes
+/// [`ErrorKind::NotReady`]: ../enum.ErrorKind.html#variant.NotReady
+///
+/// [1]: http://man7.org/linux/man-pages/man2/getrandom.2.html
+/// [2]: http://man7.org/linux/man-pages/man4/urandom.4.html
+/// [3]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx
+/// [4]: https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
+/// [5]: https://www.freebsd.org/cgi/man.cgi?query=random&sektion=4
+/// [6]: https://man.openbsd.org/getentropy.2
+/// [7]: http://netbsd.gw.com/cgi-bin/man-cgi?random+4+NetBSD-current
+/// [8]: https://leaf.dragonflybsd.org/cgi/web-man?command=random&section=4
+/// [9]: https://docs.oracle.com/cd/E88353_01/html/E37841/getrandom-2.html
+/// [10]: https://docs.oracle.com/cd/E86824_01/html/E54777/random-7d.html
+/// [11]: https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/cprng_draw.md
+/// [12]: https://github.com/redox-os/randd/blob/master/src/main.rs
+/// [13]: https://github.com/NuxiNL/cloudabi/blob/v0.20/cloudabi.txt#L1826
+/// [14]: https://www.w3.org/TR/WebCryptoAPI/#Crypto-method-getRandomValues
+/// [15]: https://nodejs.org/api/crypto.html#crypto_crypto_randombytes_size_callback
+/// [16]: #support-for-webassembly-and-amsjs
+
+
+#[derive(Clone)]
+pub struct OsRng(imp::OsRng);
+
+impl fmt::Debug for OsRng {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl OsRng {
+ /// Create a new `OsRng`.
+ pub fn new() -> Result<OsRng, Error> {
+ imp::OsRng::new().map(OsRng)
+ }
+}
+
+impl CryptoRng for OsRng {}
+
+impl RngCore for OsRng {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ use std::{time, thread};
+
+ // We cannot return Err(..), so we try to handle before panicking.
+ const MAX_RETRY_PERIOD: u32 = 10; // max 10s
+ const WAIT_DUR_MS: u32 = 100; // retry every 100ms
+ let wait_dur = time::Duration::from_millis(WAIT_DUR_MS as u64);
+ const RETRY_LIMIT: u32 = (MAX_RETRY_PERIOD * 1000) / WAIT_DUR_MS;
+ const TRANSIENT_RETRIES: u32 = 8;
+ let mut err_count = 0;
+ let mut error_logged = false;
+
+ // Maybe block until the OS RNG is initialized
+ let mut read = 0;
+ if let Ok(n) = self.0.test_initialized(dest, true) { read = n };
+ let dest = &mut dest[read..];
+
+ loop {
+ if let Err(e) = self.try_fill_bytes(dest) {
+ if err_count >= RETRY_LIMIT {
+ error!("OsRng failed too many times; last error: {}", e);
+ panic!("OsRng failed too many times; last error: {}", e);
+ }
+
+ if e.kind.should_wait() {
+ if !error_logged {
+ warn!("OsRng failed; waiting up to {}s and retrying. Error: {}",
+ MAX_RETRY_PERIOD, e);
+ error_logged = true;
+ }
+ err_count += 1;
+ thread::sleep(wait_dur);
+ continue;
+ } else if e.kind.should_retry() {
+ if !error_logged {
+ warn!("OsRng failed; retrying up to {} times. Error: {}",
+ TRANSIENT_RETRIES, e);
+ error_logged = true;
+ }
+ err_count += (RETRY_LIMIT + TRANSIENT_RETRIES - 1)
+ / TRANSIENT_RETRIES; // round up
+ continue;
+ } else {
+ error!("OsRng failed: {}", e);
+ panic!("OsRng fatal error: {}", e);
+ }
+ }
+
+ break;
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ // Some systems do not support reading 0 random bytes.
+ // (And why waste a system call?)
+ if dest.len() == 0 { return Ok(()); }
+
+ let read = self.0.test_initialized(dest, false)?;
+ let dest = &mut dest[read..];
+
+ let max = self.0.max_chunk_size();
+ if dest.len() <= max {
+ trace!("OsRng: reading {} bytes via {}",
+ dest.len(), self.0.method_str());
+ } else {
+ trace!("OsRng: reading {} bytes via {} in {} chunks of {} bytes",
+ dest.len(), self.0.method_str(), (dest.len() + max) / max, max);
+ }
+ for slice in dest.chunks_mut(max) {
+ self.0.fill_chunk(slice)?;
+ }
+ Ok(())
+ }
+}
+
+trait OsRngImpl where Self: Sized {
+ // Create a new `OsRng` platform interface.
+ fn new() -> Result<Self, Error>;
+
+ // Fill a chunk with random bytes.
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error>;
+
+ // Test whether the OS RNG is initialized. This method may not be possible
+ // to support cheaply (or at all) on all operating systems.
+ //
+ // If `blocking` is set, this will cause the OS the block execution until
+ // its RNG is initialized.
+ //
+ // Random values that are read while this are stored in `dest`, the amount
+ // of read bytes is returned.
+ fn test_initialized(&mut self, _dest: &mut [u8], _blocking: bool)
+ -> Result<usize, Error> { Ok(0) }
+
+ // Maximum chunk size supported.
+ fn max_chunk_size(&self) -> usize { ::core::usize::MAX }
+
+ // Name of the OS interface (used for logging).
+ fn method_str(&self) -> &'static str;
+}
+
+
+
+
+// Helper functions to read from a random device such as `/dev/urandom`.
+//
+// All instances use a single internal file handle, to prevent possible
+// exhaustion of file descriptors.
+#[cfg(any(target_os = "linux", target_os = "android",
+ target_os = "netbsd", target_os = "dragonfly",
+ target_os = "solaris", target_os = "redox",
+ target_os = "haiku", target_os = "emscripten"))]
+mod random_device {
+ use {Error, ErrorKind};
+ use std::fs::File;
+ use std::io;
+ use std::io::Read;
+ use std::sync::{Once, Mutex, ONCE_INIT};
+
+ // TODO: remove outer Option when `Mutex::new(None)` is a constant expression
+ static mut READ_RNG_FILE: Option<Mutex<Option<File>>> = None;
+ static READ_RNG_ONCE: Once = ONCE_INIT;
+
+ #[allow(unused)]
+ pub fn open<F>(path: &'static str, open_fn: F) -> Result<(), Error>
+ where F: Fn(&'static str) -> Result<File, io::Error>
+ {
+ READ_RNG_ONCE.call_once(|| {
+ unsafe { READ_RNG_FILE = Some(Mutex::new(None)) }
+ });
+
+ // We try opening the file outside the `call_once` fn because we cannot
+ // clone the error, thus we must retry on failure.
+
+ let mutex = unsafe { READ_RNG_FILE.as_ref().unwrap() };
+ let mut guard = mutex.lock().unwrap();
+ if (*guard).is_none() {
+ info!("OsRng: opening random device {}", path);
+ let file = open_fn(path).map_err(map_err)?;
+ *guard = Some(file);
+ };
+ Ok(())
+ }
+
+ pub fn read(dest: &mut [u8]) -> Result<(), Error> {
+ // We expect this function only to be used after `random_device::open`
+ // was succesful. Therefore we can assume that our memory was set with a
+ // valid object.
+ let mutex = unsafe { READ_RNG_FILE.as_ref().unwrap() };
+ let mut guard = mutex.lock().unwrap();
+ let file = (*guard).as_mut().unwrap();
+
+ // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
+ file.read_exact(dest).map_err(|err| {
+ Error::with_cause(ErrorKind::Unavailable,
+ "error reading random device", err)
+ })
+
+ }
+
+ pub fn map_err(err: io::Error) -> Error {
+ match err.kind() {
+ io::ErrorKind::Interrupted =>
+ Error::new(ErrorKind::Transient, "interrupted"),
+ io::ErrorKind::WouldBlock =>
+ Error::with_cause(ErrorKind::NotReady,
+ "OS RNG not yet seeded", err),
+ _ => Error::with_cause(ErrorKind::Unavailable,
+ "error while opening random device", err)
+ }
+ }
+}
+
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::random_device;
+ use super::OsRngImpl;
+
+ use std::io;
+ use std::io::Read;
+ use std::fs::{File, OpenOptions};
+ use std::os::unix::fs::OpenOptionsExt;
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+ use std::sync::{Once, ONCE_INIT};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng {
+ method: OsRngMethod,
+ initialized: bool,
+ }
+
+ #[derive(Clone, Debug)]
+ enum OsRngMethod {
+ GetRandom,
+ RandomDevice,
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ if is_getrandom_available() {
+ return Ok(OsRng { method: OsRngMethod::GetRandom,
+ initialized: false });
+ }
+ random_device::open("/dev/urandom", &|p| File::open(p))?;
+ Ok(OsRng { method: OsRngMethod::RandomDevice, initialized: false })
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ match self.method {
+ OsRngMethod::GetRandom => getrandom_try_fill(dest, false),
+ OsRngMethod::RandomDevice => random_device::read(dest),
+ }
+ }
+
+ fn test_initialized(&mut self, dest: &mut [u8], blocking: bool)
+ -> Result<usize, Error>
+ {
+ static OS_RNG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
+ if !self.initialized {
+ self.initialized = OS_RNG_INITIALIZED.load(Ordering::Relaxed);
+ }
+ if self.initialized { return Ok(0); }
+
+ let result = match self.method {
+ OsRngMethod::GetRandom => {
+ getrandom_try_fill(dest, blocking)?;
+ Ok(dest.len())
+ }
+ OsRngMethod::RandomDevice => {
+ info!("OsRng: testing random device /dev/random");
+ let mut file = OpenOptions::new()
+ .read(true)
+ .custom_flags(if blocking { 0 } else { libc::O_NONBLOCK })
+ .open("/dev/random")
+ .map_err(random_device::map_err)?;
+ file.read(&mut dest[..1]).map_err(random_device::map_err)?;
+ Ok(1)
+ }
+ };
+ OS_RNG_INITIALIZED.store(true, Ordering::Relaxed);
+ self.initialized = true;
+ result
+ }
+
+ fn method_str(&self) -> &'static str {
+ match self.method {
+ OsRngMethod::GetRandom => "getrandom",
+ OsRngMethod::RandomDevice => "/dev/urandom",
+ }
+ }
+ }
+
+ #[cfg(target_arch = "x86_64")]
+ const NR_GETRANDOM: libc::c_long = 318;
+ #[cfg(target_arch = "x86")]
+ const NR_GETRANDOM: libc::c_long = 355;
+ #[cfg(target_arch = "arm")]
+ const NR_GETRANDOM: libc::c_long = 384;
+ #[cfg(target_arch = "aarch64")]
+ const NR_GETRANDOM: libc::c_long = 278;
+ #[cfg(target_arch = "s390x")]
+ const NR_GETRANDOM: libc::c_long = 349;
+ #[cfg(target_arch = "powerpc")]
+ const NR_GETRANDOM: libc::c_long = 359;
+ #[cfg(target_arch = "powerpc64")]
+ const NR_GETRANDOM: libc::c_long = 359;
+ #[cfg(target_arch = "mips")] // old ABI
+ const NR_GETRANDOM: libc::c_long = 4353;
+ #[cfg(target_arch = "mips64")]
+ const NR_GETRANDOM: libc::c_long = 5313;
+ #[cfg(target_arch = "sparc")]
+ const NR_GETRANDOM: libc::c_long = 347;
+ #[cfg(target_arch = "sparc64")]
+ const NR_GETRANDOM: libc::c_long = 347;
+ #[cfg(not(any(target_arch = "x86_64", target_arch = "x86",
+ target_arch = "arm", target_arch = "aarch64",
+ target_arch = "s390x", target_arch = "powerpc",
+ target_arch = "powerpc64", target_arch = "mips",
+ target_arch = "mips64", target_arch = "sparc",
+ target_arch = "sparc64")))]
+ const NR_GETRANDOM: libc::c_long = 0;
+
+ fn getrandom(buf: &mut [u8], blocking: bool) -> libc::c_long {
+ const GRND_NONBLOCK: libc::c_uint = 0x0001;
+
+ if NR_GETRANDOM == 0 { return -1 };
+
+ unsafe {
+ libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(),
+ if blocking { 0 } else { GRND_NONBLOCK })
+ }
+ }
+
+ fn getrandom_try_fill(dest: &mut [u8], blocking: bool) -> Result<(), Error> {
+ let mut read = 0;
+ while read < dest.len() {
+ let result = getrandom(&mut dest[read..], blocking);
+ if result == -1 {
+ let err = io::Error::last_os_error();
+ let kind = err.kind();
+ if kind == io::ErrorKind::Interrupted {
+ continue;
+ } else if kind == io::ErrorKind::WouldBlock {
+ return Err(Error::with_cause(
+ ErrorKind::NotReady,
+ "getrandom not ready",
+ err,
+ ));
+ } else {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "unexpected getrandom error",
+ err,
+ ));
+ }
+ } else {
+ read += result as usize;
+ }
+ }
+ Ok(())
+ }
+
+ fn is_getrandom_available() -> bool {
+ static CHECKER: Once = ONCE_INIT;
+ static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
+
+ if NR_GETRANDOM == 0 { return false };
+
+ CHECKER.call_once(|| {
+ debug!("OsRng: testing getrandom");
+ let mut buf: [u8; 0] = [];
+ let result = getrandom(&mut buf, false);
+ let available = if result == -1 {
+ let err = io::Error::last_os_error().raw_os_error();
+ err != Some(libc::ENOSYS)
+ } else {
+ true
+ };
+ AVAILABLE.store(available, Ordering::Relaxed);
+ info!("OsRng: using {}", if available { "getrandom" } else { "/dev/urandom" });
+ });
+
+ AVAILABLE.load(Ordering::Relaxed)
+ }
+}
+
+
+#[cfg(target_os = "netbsd")]
+mod imp {
+ use Error;
+ use super::random_device;
+ use super::OsRngImpl;
+
+ use std::fs::File;
+ use std::io::Read;
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng { initialized: bool }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ random_device::open("/dev/urandom", &|p| File::open(p))?;
+ Ok(OsRng { initialized: false })
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ random_device::read(dest)
+ }
+
+ // Read a single byte from `/dev/random` to determine if the OS RNG is
+ // already seeded. NetBSD always blocks if not yet ready.
+ fn test_initialized(&mut self, dest: &mut [u8], _blocking: bool)
+ -> Result<usize, Error>
+ {
+ static OS_RNG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
+ if !self.initialized {
+ self.initialized = OS_RNG_INITIALIZED.load(Ordering::Relaxed);
+ }
+ if self.initialized { return Ok(0); }
+
+ info!("OsRng: testing random device /dev/random");
+ let mut file =
+ File::open("/dev/random").map_err(random_device::map_err)?;
+ file.read(&mut dest[..1]).map_err(random_device::map_err)?;
+
+ OS_RNG_INITIALIZED.store(true, Ordering::Relaxed);
+ self.initialized = true;
+ Ok(1)
+ }
+
+ fn method_str(&self) -> &'static str { "/dev/urandom" }
+ }
+}
+
+
+#[cfg(any(target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten"))]
+mod imp {
+ use Error;
+ use super::random_device;
+ use super::OsRngImpl;
+ use std::fs::File;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng();
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ random_device::open("/dev/random", &|p| File::open(p))?;
+ Ok(OsRng())
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ random_device::read(dest)
+ }
+
+ #[cfg(target_os = "emscripten")]
+ fn max_chunk_size(&self) -> usize {
+ // `Crypto.getRandomValues` documents `dest` should be at most 65536
+ // bytes. `crypto.randomBytes` documents: "To minimize threadpool
+ // task length variation, partition large randomBytes requests when
+ // doing so as part of fulfilling a client request.
+ 65536
+ }
+
+ fn method_str(&self) -> &'static str { "/dev/random" }
+ }
+}
+
+
+// Read from `/dev/random`, with chunks of limited size (1040 bytes).
+// `/dev/random` uses the Hash_DRBG with SHA512 algorithm from NIST SP 800-90A.
+// `/dev/urandom` uses the FIPS 186-2 algorithm, which is considered less
+// secure. We choose to read from `/dev/random`.
+//
+// Since Solaris 11.3 the `getrandom` syscall is available. To make sure we can
+// compile on both Solaris and on OpenSolaris derivatives, that do not have the
+// function, we do a direct syscall instead of calling a library function.
+//
+// We have no way to differentiate between Solaris, illumos, SmartOS, etc.
+#[cfg(target_os = "solaris")]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::random_device;
+ use super::OsRngImpl;
+
+ use std::io;
+ use std::io::Read;
+ use std::fs::{File, OpenOptions};
+ use std::os::unix::fs::OpenOptionsExt;
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng {
+ method: OsRngMethod,
+ initialized: bool,
+ }
+
+ #[derive(Clone, Debug)]
+ enum OsRngMethod {
+ GetRandom,
+ RandomDevice,
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ if is_getrandom_available() {
+ return Ok(OsRng { method: OsRngMethod::GetRandom,
+ initialized: false });
+ }
+ let open = |p| OpenOptions::new()
+ .read(true)
+ .custom_flags(libc::O_NONBLOCK)
+ .open(p);
+ random_device::open("/dev/random", &open)?;
+ Ok(OsRng { method: OsRngMethod::RandomDevice, initialized: false })
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ match self.method {
+ OsRngMethod::GetRandom => getrandom_try_fill(dest, false),
+ OsRngMethod::RandomDevice => random_device::read(dest),
+ }
+ }
+
+ fn test_initialized(&mut self, dest: &mut [u8], blocking: bool)
+ -> Result<usize, Error>
+ {
+ static OS_RNG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
+ if !self.initialized {
+ self.initialized = OS_RNG_INITIALIZED.load(Ordering::Relaxed);
+ }
+ if self.initialized { return Ok(0); }
+
+ let chunk_len = ::core::cmp::min(1024, dest.len());
+ let dest = &mut dest[..chunk_len];
+
+ match self.method {
+ OsRngMethod::GetRandom => getrandom_try_fill(dest, blocking)?,
+ OsRngMethod::RandomDevice => {
+ if blocking {
+ info!("OsRng: testing random device /dev/random");
+ // We already have a non-blocking handle, but now need a
+ // blocking one. Not much choice except opening it twice
+ let mut file = File::open("/dev/random")
+ .map_err(random_device::map_err)?;
+ file.read(dest).map_err(random_device::map_err)?;
+ } else {
+ self.fill_chunk(dest)?;
+ }
+ }
+ };
+ OS_RNG_INITIALIZED.store(true, Ordering::Relaxed);
+ self.initialized = true;
+ Ok(chunk_len)
+ }
+
+ fn max_chunk_size(&self) -> usize {
+ // The documentation says 1024 is the maximum for getrandom, but
+ // 1040 for /dev/random.
+ 1024
+ }
+
+ fn method_str(&self) -> &'static str {
+ match self.method {
+ OsRngMethod::GetRandom => "getrandom",
+ OsRngMethod::RandomDevice => "/dev/random",
+ }
+ }
+ }
+
+ fn getrandom(buf: &mut [u8], blocking: bool) -> libc::c_long {
+ extern "C" {
+ fn syscall(number: libc::c_long, ...) -> libc::c_long;
+ }
+
+ const SYS_GETRANDOM: libc::c_long = 143;
+ const GRND_NONBLOCK: libc::c_uint = 0x0001;
+ const GRND_RANDOM: libc::c_uint = 0x0002;
+
+ unsafe {
+ syscall(SYS_GETRANDOM, buf.as_mut_ptr(), buf.len(),
+ if blocking { 0 } else { GRND_NONBLOCK } | GRND_RANDOM)
+ }
+ }
+
+ fn getrandom_try_fill(dest: &mut [u8], blocking: bool) -> Result<(), Error> {
+ let result = getrandom(dest, blocking);
+ if result == -1 || result == 0 {
+ let err = io::Error::last_os_error();
+ let kind = err.kind();
+ if kind == io::ErrorKind::WouldBlock {
+ return Err(Error::with_cause(
+ ErrorKind::NotReady,
+ "getrandom not ready",
+ err,
+ ));
+ } else {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "unexpected getrandom error",
+ err,
+ ));
+ }
+ } else if result != dest.len() as i64 {
+ return Err(Error::new(ErrorKind::Unavailable,
+ "unexpected getrandom error"));
+ }
+ Ok(())
+ }
+
+ fn is_getrandom_available() -> bool {
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+ use std::sync::{Once, ONCE_INIT};
+
+ static CHECKER: Once = ONCE_INIT;
+ static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
+
+ CHECKER.call_once(|| {
+ debug!("OsRng: testing getrandom");
+ let mut buf: [u8; 0] = [];
+ let result = getrandom(&mut buf, false);
+ let available = if result == -1 {
+ let err = io::Error::last_os_error().raw_os_error();
+ err != Some(libc::ENOSYS)
+ } else {
+ true
+ };
+ AVAILABLE.store(available, Ordering::Relaxed);
+ info!("OsRng: using {}", if available { "getrandom" } else { "/dev/random" });
+ });
+
+ AVAILABLE.load(Ordering::Relaxed)
+ }
+}
+
+
+#[cfg(target_os = "cloudabi")]
+mod imp {
+ extern crate cloudabi;
+
+ use std::io;
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let errno = unsafe { cloudabi::random_get(dest) };
+ if errno == cloudabi::errno::SUCCESS {
+ Ok(())
+ } else {
+ // Cloudlibc provides its own `strerror` implementation so we
+ // can use `from_raw_os_error` here.
+ Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "random_get() system call failed",
+ io::Error::from_raw_os_error(errno as i32),
+ ))
+ }
+ }
+
+ fn method_str(&self) -> &'static str { "cloudabi::random_get" }
+ }
+}
+
+
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::io;
+ use self::libc::{c_int, size_t};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ enum SecRandom {}
+
+ #[allow(non_upper_case_globals)]
+ const kSecRandomDefault: *const SecRandom = 0 as *const SecRandom;
+
+ #[link(name = "Security", kind = "framework")]
+ extern {
+ fn SecRandomCopyBytes(rnd: *const SecRandom,
+ count: size_t, bytes: *mut u8) -> c_int;
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let ret = unsafe {
+ SecRandomCopyBytes(kSecRandomDefault,
+ dest.len() as size_t,
+ dest.as_mut_ptr())
+ };
+ if ret == -1 {
+ Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "couldn't generate random bytes",
+ io::Error::last_os_error()))
+ } else {
+ Ok(())
+ }
+ }
+
+ fn method_str(&self) -> &'static str { "SecRandomCopyBytes" }
+ }
+}
+
+
+#[cfg(target_os = "freebsd")]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::ptr;
+ use std::io;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let mib = [libc::CTL_KERN, libc::KERN_ARND];
+ let mut len = dest.len();
+ let ret = unsafe {
+ libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint,
+ dest.as_mut_ptr() as *mut _, &mut len,
+ ptr::null(), 0)
+ };
+ if ret == -1 || len != dest.len() {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "kern.arandom sysctl failed",
+ io::Error::last_os_error()));
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize { 256 }
+
+ fn method_str(&self) -> &'static str { "kern.arandom" }
+ }
+}
+
+
+#[cfg(any(target_os = "openbsd", target_os = "bitrig"))]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::io;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let ret = unsafe {
+ libc::getentropy(dest.as_mut_ptr() as *mut libc::c_void, dest.len())
+ };
+ if ret == -1 {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "getentropy failed",
+ io::Error::last_os_error()));
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize { 256 }
+
+ fn method_str(&self) -> &'static str { "getentropy" }
+ }
+}
+
+
+#[cfg(target_os = "redox")]
+mod imp {
+ use Error;
+ use super::random_device;
+ use super::OsRngImpl;
+ use std::fs::File;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng();
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ random_device::open("rand:", &|p| File::open(p))?;
+ Ok(OsRng())
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ random_device::read(dest)
+ }
+
+ fn method_str(&self) -> &'static str { "'rand:'" }
+ }
+}
+
+
+#[cfg(target_os = "fuchsia")]
+mod imp {
+ extern crate fuchsia_zircon;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let mut read = 0;
+ while read < dest.len() {
+ match fuchsia_zircon::cprng_draw(&mut dest[read..]) {
+ Ok(actual) => read += actual,
+ Err(e) => {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "cprng_draw failed",
+ e.into_io_error()));
+ }
+ };
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize {
+ fuchsia_zircon::sys::ZX_CPRNG_DRAW_MAX_LEN
+ }
+
+ fn method_str(&self) -> &'static str { "cprng_draw" }
+ }
+}
+
+
+#[cfg(windows)]
+mod imp {
+ extern crate winapi;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::io;
+
+ use self::winapi::shared::minwindef::ULONG;
+ use self::winapi::um::ntsecapi::RtlGenRandom;
+ use self::winapi::um::winnt::PVOID;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let ret = unsafe {
+ RtlGenRandom(dest.as_mut_ptr() as PVOID, dest.len() as ULONG)
+ };
+ if ret == 0 {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "couldn't generate random bytes",
+ io::Error::last_os_error()));
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize { <ULONG>::max_value() as usize }
+
+ fn method_str(&self) -> &'static str { "RtlGenRandom" }
+ }
+}
+
+
+#[cfg(all(target_arch = "wasm32",
+ not(target_os = "emscripten"),
+ feature = "stdweb"))]
+mod imp {
+ use std::mem;
+ use stdweb::unstable::TryInto;
+ use stdweb::web::error::Error as WebError;
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ enum OsRngMethod {
+ Browser,
+ Node
+ }
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng(OsRngMethod);
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ let result = js! {
+ try {
+ if (
+ typeof self === "object" &&
+ typeof self.crypto === "object" &&
+ typeof self.crypto.getRandomValues === "function"
+ ) {
+ return { success: true, ty: 1 };
+ }
+
+ if (typeof require("crypto").randomBytes === "function") {
+ return { success: true, ty: 2 };
+ }
+
+ return { success: false, error: new Error("not supported") };
+ } catch(err) {
+ return { success: false, error: err };
+ }
+ };
+
+ if js!{ return @{ result.as_ref() }.success } == true {
+ let ty = js!{ return @{ result }.ty };
+
+ if ty == 1 { Ok(OsRng(OsRngMethod::Browser)) }
+ else if ty == 2 { Ok(OsRng(OsRngMethod::Node)) }
+ else { unreachable!() }
+ } else {
+ let err: WebError = js!{ return @{ result }.error }.try_into().unwrap();
+ Err(Error::with_cause(ErrorKind::Unavailable, "WASM Error", err))
+ }
+ }
+
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ assert_eq!(mem::size_of::<usize>(), 4);
+
+ let len = dest.len() as u32;
+ let ptr = dest.as_mut_ptr() as i32;
+
+ let result = match self.0 {
+ OsRngMethod::Browser => js! {
+ try {
+ let array = new Uint8Array(@{ len });
+ self.crypto.getRandomValues(array);
+ HEAPU8.set(array, @{ ptr });
+
+ return { success: true };
+ } catch(err) {
+ return { success: false, error: err };
+ }
+ },
+ OsRngMethod::Node => js! {
+ try {
+ let bytes = require("crypto").randomBytes(@{ len });
+ HEAPU8.set(new Uint8Array(bytes), @{ ptr });
+
+ return { success: true };
+ } catch(err) {
+ return { success: false, error: err };
+ }
+ }
+ };
+
+ if js!{ return @{ result.as_ref() }.success } == true {
+ Ok(())
+ } else {
+ let err: WebError = js!{ return @{ result }.error }.try_into().unwrap();
+ Err(Error::with_cause(ErrorKind::Unexpected, "WASM Error", err))
+ }
+ }
+
+ fn max_chunk_size(&self) -> usize { 65536 }
+
+ fn method_str(&self) -> &'static str {
+ match self.0 {
+ OsRngMethod::Browser => "Crypto.getRandomValues",
+ OsRngMethod::Node => "crypto.randomBytes",
+ }
+ }
+ }
+}
+
+#[cfg(all(target_arch = "wasm32",
+ not(target_os = "emscripten"),
+ not(feature = "stdweb"),
+ feature = "wasm-bindgen"))]
+mod imp {
+ use __wbg_shims::*;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ pub enum OsRng {
+ Node(NodeCrypto),
+ Browser(BrowserCrypto),
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ // First up we need to detect if we're running in node.js or a
+ // browser. To do this we get ahold of the `this` object (in a bit
+ // of a roundabout fashion).
+ //
+ // Once we have `this` we look at its `self` property, which is
+ // only defined on the web (either a main window or web worker).
+ let this = Function::new("return this").call(&JsValue::undefined());
+ assert!(this != JsValue::undefined());
+ let this = This::from(this);
+ let is_browser = this.self_() != JsValue::undefined();
+
+ if !is_browser {
+ return Ok(OsRng::Node(node_require("crypto")))
+ }
+
+ // If `self` is defined then we're in a browser somehow (main window
+ // or web worker). Here we want to try to use
+ // `crypto.getRandomValues`, but if `crypto` isn't defined we assume
+ // we're in an older web browser and the OS RNG isn't available.
+ let crypto = this.crypto();
+ if crypto.is_undefined() {
+ let msg = "self.crypto is undefined";
+ return Err(Error::new(ErrorKind::Unavailable, msg))
+ }
+
+ // Test if `crypto.getRandomValues` is undefined as well
+ let crypto: BrowserCrypto = crypto.into();
+ if crypto.get_random_values_fn().is_undefined() {
+ let msg = "crypto.getRandomValues is undefined";
+ return Err(Error::new(ErrorKind::Unavailable, msg))
+ }
+
+ // Ok! `self.crypto.getRandomValues` is a defined value, so let's
+ // assume we can do browser crypto.
+ Ok(OsRng::Browser(crypto))
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ match *self {
+ OsRng::Node(ref n) => n.random_fill_sync(dest),
+ OsRng::Browser(ref n) => n.get_random_values(dest),
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize {
+ match *self {
+ OsRng::Node(_) => usize::max_value(),
+ OsRng::Browser(_) => {
+ // see https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues
+ //
+ // where it says:
+ //
+ // > A QuotaExceededError DOMException is thrown if the
+ // > requested length is greater than 65536 bytes.
+ 65536
+ }
+ }
+ }
+
+ fn method_str(&self) -> &'static str {
+ match *self {
+ OsRng::Node(_) => "crypto.randomFillSync",
+ OsRng::Browser(_) => "crypto.getRandomValues",
+ }
+ }
+ }
+}
+
+
+#[cfg(test)]
+mod test {
+ use RngCore;
+ use super::OsRng;
+
+ #[test]
+ fn test_os_rng() {
+ let mut r = OsRng::new().unwrap();
+
+ r.next_u32();
+ r.next_u64();
+
+ let mut v1 = [0u8; 1000];
+ r.fill_bytes(&mut v1);
+
+ let mut v2 = [0u8; 1000];
+ r.fill_bytes(&mut v2);
+
+ let mut n_diff_bits = 0;
+ for i in 0..v1.len() {
+ n_diff_bits += (v1[i] ^ v2[i]).count_ones();
+ }
+
+ // Check at least 1 bit per byte differs. p(failure) < 1e-1000 with random input.
+ assert!(n_diff_bits >= v1.len() as u32);
+ }
+
+ #[test]
+ fn test_os_rng_empty() {
+ let mut r = OsRng::new().unwrap();
+
+ let mut empty = [0u8; 0];
+ r.fill_bytes(&mut empty);
+ }
+
+ #[test]
+ fn test_os_rng_huge() {
+ let mut r = OsRng::new().unwrap();
+
+ let mut huge = [0u8; 100_000];
+ r.fill_bytes(&mut huge);
+ }
+
+ #[cfg(not(any(target_arch = "wasm32", target_arch = "asmjs")))]
+ #[test]
+ fn test_os_rng_tasks() {
+ use std::sync::mpsc::channel;
+ use std::thread;
+
+ let mut txs = vec!();
+ for _ in 0..20 {
+ let (tx, rx) = channel();
+ txs.push(tx);
+
+ thread::spawn(move|| {
+ // wait until all the tasks are ready to go.
+ rx.recv().unwrap();
+
+ // deschedule to attempt to interleave things as much
+ // as possible (XXX: is this a good test?)
+ let mut r = OsRng::new().unwrap();
+ thread::yield_now();
+ let mut v = [0u8; 1000];
+
+ for _ in 0..100 {
+ r.next_u32();
+ thread::yield_now();
+ r.next_u64();
+ thread::yield_now();
+ r.fill_bytes(&mut v);
+ thread::yield_now();
+ }
+ });
+ }
+
+ // start all the tasks
+ for tx in txs.iter() {
+ tx.send(()).unwrap();
+ }
+ }
+}
diff --git a/rand/src/rngs/small.rs b/rand/src/rngs/small.rs
new file mode 100644
index 0000000..e74a83e
--- /dev/null
+++ b/rand/src/rngs/small.rs
@@ -0,0 +1,105 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A small fast RNG
+
+use {RngCore, SeedableRng, Error};
+
+#[cfg(all(rust_1_26, target_pointer_width = "64"))]
+type Rng = ::rand_pcg::Pcg64Mcg;
+#[cfg(not(all(rust_1_26, target_pointer_width = "64")))]
+type Rng = ::rand_pcg::Pcg32;
+
+/// An RNG recommended when small state, cheap initialization and good
+/// performance are required. The PRNG algorithm in `SmallRng` is chosen to be
+/// efficient on the current platform, **without consideration for cryptography
+/// or security**. The size of its state is much smaller than for [`StdRng`].
+///
+/// Reproducibility of output from this generator is however not required, thus
+/// future library versions may use a different internal generator with
+/// different output. Further, this generator may not be portable and can
+/// produce different output depending on the architecture. If you require
+/// reproducible output, use a named RNG. Refer to the documentation on the
+/// [`prng` module](../prng/index.html).
+///
+/// The current algorithm is [`Pcg64Mcg`] on 64-bit platforms with Rust version
+/// 1.26 and later, or [`Pcg32`] otherwise.
+///
+/// # Examples
+///
+/// Initializing `SmallRng` with a random seed can be done using [`FromEntropy`]:
+///
+/// ```
+/// # use rand::Rng;
+/// use rand::FromEntropy;
+/// use rand::rngs::SmallRng;
+///
+/// // Create small, cheap to initialize and fast RNG with a random seed.
+/// // The randomness is supplied by the operating system.
+/// let mut small_rng = SmallRng::from_entropy();
+/// # let v: u32 = small_rng.gen();
+/// ```
+///
+/// When initializing a lot of `SmallRng`'s, using [`thread_rng`] can be more
+/// efficient:
+///
+/// ```
+/// use std::iter;
+/// use rand::{SeedableRng, thread_rng};
+/// use rand::rngs::SmallRng;
+///
+/// // Create a big, expensive to initialize and slower, but unpredictable RNG.
+/// // This is cached and done only once per thread.
+/// let mut thread_rng = thread_rng();
+/// // Create small, cheap to initialize and fast RNGs with random seeds.
+/// // One can generally assume this won't fail.
+/// let rngs: Vec<SmallRng> = iter::repeat(())
+/// .map(|()| SmallRng::from_rng(&mut thread_rng).unwrap())
+/// .take(10)
+/// .collect();
+/// ```
+///
+/// [`FromEntropy`]: ../trait.FromEntropy.html
+/// [`StdRng`]: struct.StdRng.html
+/// [`thread_rng`]: ../fn.thread_rng.html
+/// [`Pcg64Mcg`]: ../../rand_pcg/type.Pcg64Mcg.html
+/// [`Pcg32`]: ../../rand_pcg/type.Pcg32.html
+#[derive(Clone, Debug)]
+pub struct SmallRng(Rng);
+
+impl RngCore for SmallRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for SmallRng {
+ type Seed = <Rng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ SmallRng(Rng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ Rng::from_rng(rng).map(SmallRng)
+ }
+}
diff --git a/rand/src/rngs/std.rs b/rand/src/rngs/std.rs
new file mode 100644
index 0000000..ce1658b
--- /dev/null
+++ b/rand/src/rngs/std.rs
@@ -0,0 +1,81 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The standard RNG
+
+use {RngCore, CryptoRng, Error, SeedableRng};
+use rand_hc::Hc128Rng;
+
+/// The standard RNG. The PRNG algorithm in `StdRng` is chosen to be efficient
+/// on the current platform, to be statistically strong and unpredictable
+/// (meaning a cryptographically secure PRNG).
+///
+/// The current algorithm used on all platforms is [HC-128].
+///
+/// Reproducibility of output from this generator is however not required, thus
+/// future library versions may use a different internal generator with
+/// different output. Further, this generator may not be portable and can
+/// produce different output depending on the architecture. If you require
+/// reproducible output, use a named RNG, for example [`ChaChaRng`].
+///
+/// [HC-128]: ../../rand_hc/struct.Hc128Rng.html
+/// [`ChaChaRng`]: ../../rand_chacha/struct.ChaChaRng.html
+#[derive(Clone, Debug)]
+pub struct StdRng(Hc128Rng);
+
+impl RngCore for StdRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for StdRng {
+ type Seed = <Hc128Rng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ StdRng(Hc128Rng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ Hc128Rng::from_rng(rng).map(StdRng)
+ }
+}
+
+impl CryptoRng for StdRng {}
+
+
+#[cfg(test)]
+mod test {
+ use {RngCore, SeedableRng};
+ use rngs::StdRng;
+
+ #[test]
+ fn test_stdrng_construction() {
+ let seed = [1,0,0,0, 23,0,0,0, 200,1,0,0, 210,30,0,0,
+ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0];
+ let mut rng1 = StdRng::from_seed(seed);
+ assert_eq!(rng1.next_u64(), 15759097995037006553);
+
+ let mut rng2 = StdRng::from_rng(rng1).unwrap();
+ assert_eq!(rng2.next_u64(), 6766915756997287454);
+ }
+}
diff --git a/rand/src/rngs/thread.rs b/rand/src/rngs/thread.rs
new file mode 100644
index 0000000..ff772e3
--- /dev/null
+++ b/rand/src/rngs/thread.rs
@@ -0,0 +1,135 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Thread-local random number generator
+
+use std::cell::UnsafeCell;
+
+use {RngCore, CryptoRng, SeedableRng, Error};
+use rngs::adapter::ReseedingRng;
+use rngs::EntropyRng;
+use rand_hc::Hc128Core;
+
+// Rationale for using `UnsafeCell` in `ThreadRng`:
+//
+// Previously we used a `RefCell`, with an overhead of ~15%. There will only
+// ever be one mutable reference to the interior of the `UnsafeCell`, because
+// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a
+// single thread (which is the definition of `ThreadRng`), there will only ever
+// be one of these methods active at a time.
+//
+// A possible scenario where there could be multiple mutable references is if
+// `ThreadRng` is used inside `next_u32` and co. But the implementation is
+// completely under our control. We just have to ensure none of them use
+// `ThreadRng` internally, which is nonsensical anyway. We should also never run
+// `ThreadRng` in destructors of its implementation, which is also nonsensical.
+//
+// The additional `Rc` is not strictly neccesary, and could be removed. For now
+// it ensures `ThreadRng` stays `!Send` and `!Sync`, and implements `Clone`.
+
+
+// Number of generated bytes after which to reseed `TreadRng`.
+//
+// The time it takes to reseed HC-128 is roughly equivalent to generating 7 KiB.
+// We pick a treshold here that is large enough to not reduce the average
+// performance too much, but also small enough to not make reseeding something
+// that basically never happens.
+const THREAD_RNG_RESEED_THRESHOLD: u64 = 32*1024*1024; // 32 MiB
+
+/// The type returned by [`thread_rng`], essentially just a reference to the
+/// PRNG in thread-local memory.
+///
+/// `ThreadRng` uses [`ReseedingRng`] wrapping the same PRNG as [`StdRng`],
+/// which is reseeded after generating 32 MiB of random data. A single instance
+/// is cached per thread and the returned `ThreadRng` is a reference to this
+/// instance — hence `ThreadRng` is neither `Send` nor `Sync` but is safe to use
+/// within a single thread. This RNG is seeded and reseeded via [`EntropyRng`]
+/// as required.
+///
+/// Note that the reseeding is done as an extra precaution against entropy
+/// leaks and is in theory unnecessary — to predict `ThreadRng`'s output, an
+/// attacker would have to either determine most of the RNG's seed or internal
+/// state, or crack the algorithm used.
+///
+/// Like [`StdRng`], `ThreadRng` is a cryptographically secure PRNG. The current
+/// algorithm used is [HC-128], which is an array-based PRNG that trades memory
+/// usage for better performance. This makes it similar to ISAAC, the algorithm
+/// used in `ThreadRng` before rand 0.5.
+///
+/// Cloning this handle just produces a new reference to the same thread-local
+/// generator.
+///
+/// [`thread_rng`]: ../fn.thread_rng.html
+/// [`ReseedingRng`]: adapter/struct.ReseedingRng.html
+/// [`StdRng`]: struct.StdRng.html
+/// [`EntropyRng`]: struct.EntropyRng.html
+/// [HC-128]: ../../rand_hc/struct.Hc128Rng.html
+#[derive(Clone, Debug)]
+pub struct ThreadRng {
+ // use of raw pointer implies type is neither Send nor Sync
+ rng: *mut ReseedingRng<Hc128Core, EntropyRng>,
+}
+
+thread_local!(
+ static THREAD_RNG_KEY: UnsafeCell<ReseedingRng<Hc128Core, EntropyRng>> = {
+ let mut entropy_source = EntropyRng::new();
+ let r = Hc128Core::from_rng(&mut entropy_source).unwrap_or_else(|err|
+ panic!("could not initialize thread_rng: {}", err));
+ let rng = ReseedingRng::new(r,
+ THREAD_RNG_RESEED_THRESHOLD,
+ entropy_source);
+ UnsafeCell::new(rng)
+ }
+);
+
+/// Retrieve the lazily-initialized thread-local random number
+/// generator, seeded by the system. Intended to be used in method
+/// chaining style, e.g. `thread_rng().gen::<i32>()`, or cached locally, e.g.
+/// `let mut rng = thread_rng();`.
+///
+/// For more information see [`ThreadRng`].
+///
+/// [`ThreadRng`]: rngs/struct.ThreadRng.html
+pub fn thread_rng() -> ThreadRng {
+ ThreadRng { rng: THREAD_RNG_KEY.with(|t| t.get()) }
+}
+
+impl RngCore for ThreadRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ unsafe { (*self.rng).next_u32() }
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ unsafe { (*self.rng).next_u64() }
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ unsafe { (*self.rng).fill_bytes(dest) }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ unsafe { (*self.rng).try_fill_bytes(dest) }
+ }
+}
+
+impl CryptoRng for ThreadRng {}
+
+
+#[cfg(test)]
+mod test {
+ #[test]
+ #[cfg(not(feature="stdweb"))]
+ fn test_thread_rng() {
+ use Rng;
+ let mut r = ::thread_rng();
+ r.gen::<i32>();
+ assert_eq!(r.gen_range(0, 1), 0);
+ }
+}