aboutsummaryrefslogtreecommitdiff
path: root/rand/rand_core/src
diff options
context:
space:
mode:
authorDaniel Mueller <deso@posteo.net>2019-01-02 21:14:10 -0800
committerDaniel Mueller <deso@posteo.net>2019-01-02 21:14:10 -0800
commitecf3474223ca3d16a10f12dc2272e3b0ed72c1bb (patch)
tree03134a683791176b49ef5c92e8d6acd24c3b5a9b /rand/rand_core/src
parent686f61b75055ecb02baf9d9449525ae447a3bed1 (diff)
downloadnitrocli-ecf3474223ca3d16a10f12dc2272e3b0ed72c1bb.tar.gz
nitrocli-ecf3474223ca3d16a10f12dc2272e3b0ed72c1bb.tar.bz2
Update nitrokey crate to 0.2.3
This change updates the nitrokey crate to version 0.2.3. This version bumps the rand crate used to 0.6.1, which in turn requires an additional set of dependencies. Import subrepo nitrokey/:nitrokey at b3e2adc5bb1300441ca74cc7672617c042f3ea31 Import subrepo rand/:rand at 73613ff903512e9503e41cc8ba9eae76269dc598 Import subrepo rustc_version/:rustc_version at 0294f2ba2018bf7be672abd53db351ce5055fa02 Import subrepo semver-parser/:semver-parser at 750da9b11a04125231b1fb293866ca036845acee Import subrepo semver/:semver at 5eb6db94fa03f4d5c64a625a56188f496be47598
Diffstat (limited to 'rand/rand_core/src')
-rw-r--r--rand/rand_core/src/block.rs508
-rw-r--r--rand/rand_core/src/error.rs177
-rw-r--r--rand/rand_core/src/impls.rs165
-rw-r--r--rand/rand_core/src/le.rs68
-rw-r--r--rand/rand_core/src/lib.rs486
5 files changed, 1404 insertions, 0 deletions
diff --git a/rand/rand_core/src/block.rs b/rand/rand_core/src/block.rs
new file mode 100644
index 0000000..de480e4
--- /dev/null
+++ b/rand/rand_core/src/block.rs
@@ -0,0 +1,508 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The `BlockRngCore` trait and implementation helpers
+//!
+//! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs
+//! which generate a block of data in a cache instead of returning generated
+//! values directly.
+//!
+//! Usage of this trait is optional, but provides two advantages:
+//! implementations only need to concern themselves with generation of the
+//! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where
+//! the optimal implementations are not trivial), and this allows
+//! [`ReseedingRng`] perform periodic reseeding with very low overhead.
+//!
+//! # Example
+//!
+//! ```norun
+//! use rand_core::block::{BlockRngCore, BlockRng};
+//!
+//! struct MyRngCore;
+//!
+//! impl BlockRngCore for MyRngCore {
+//! type Results = [u32; 16];
+//!
+//! fn generate(&mut self, results: &mut Self::Results) {
+//! unimplemented!()
+//! }
+//! }
+//!
+//! impl SeedableRng for MyRngCore {
+//! type Seed = unimplemented!();
+//! fn from_seed(seed: Self::Seed) -> Self {
+//! unimplemented!()
+//! }
+//! }
+//!
+//! // optionally, also implement CryptoRng for MyRngCore
+//!
+//! // Final RNG.
+//! type MyRng = BlockRng<u32, MyRngCore>;
+//! ```
+//!
+//! [`BlockRngCore`]: trait.BlockRngCore.html
+//! [`RngCore`]: ../trait.RngCore.html
+//! [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes
+//! [`ReseedingRng`]: ../../rand/rngs/adapter/struct.ReseedingRng.html
+
+use core::convert::AsRef;
+use core::fmt;
+use {RngCore, CryptoRng, SeedableRng, Error};
+use impls::{fill_via_u32_chunks, fill_via_u64_chunks};
+
+/// A trait for RNGs which do not generate random numbers individually, but in
+/// blocks (typically `[u32; N]`). This technique is commonly used by
+/// cryptographic RNGs to improve performance.
+///
+/// See the [module documentation](index.html) for details.
+pub trait BlockRngCore {
+ /// Results element type, e.g. `u32`.
+ type Item;
+
+ /// Results type. This is the 'block' an RNG implementing `BlockRngCore`
+ /// generates, which will usually be an array like `[u32; 16]`.
+ type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default;
+
+ /// Generate a new block of results.
+ fn generate(&mut self, results: &mut Self::Results);
+}
+
+
+/// A wrapper type implementing [`RngCore`] for some type implementing
+/// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement
+/// a full RNG from just a `generate` function.
+///
+/// The `core` field may be accessed directly but the results buffer may not.
+/// PRNG implementations can simply use a type alias
+/// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a
+/// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must
+/// re-implement `RngCore` but hides the implementation details and allows
+/// extra functionality to be defined on the RNG
+/// (e.g. `impl MyRng { fn set_stream(...){...} }`).
+///
+/// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods
+/// reading values from the results buffer, as well as
+/// calling [`BlockRngCore::generate`] directly on the output array when
+/// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods
+/// also handle the bookkeeping of when to generate a new batch of values.
+///
+/// No whole generated `u32` values are thown away and all values are consumed
+/// in-order. [`next_u32`] simply takes the next available `u32` value.
+/// [`next_u64`] is implemented by combining two `u32` values, least
+/// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole
+/// number of `u32` values, converting each `u32` to a byte slice in
+/// little-endian order. If the requested byte length is not a multiple of 4,
+/// some bytes will be discarded.
+///
+/// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is
+/// no direct support for other buffer types.
+///
+/// For easy initialization `BlockRng` also implements [`SeedableRng`].
+///
+/// [`BlockRngCore`]: BlockRngCore.t.html
+/// [`BlockRngCore::generate`]: trait.BlockRngCore.html#tymethod.generate
+/// [`BlockRng64`]: struct.BlockRng64.html
+/// [`RngCore`]: ../RngCore.t.html
+/// [`next_u32`]: ../trait.RngCore.html#tymethod.next_u32
+/// [`next_u64`]: ../trait.RngCore.html#tymethod.next_u64
+/// [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes
+/// [`try_fill_bytes`]: ../trait.RngCore.html#tymethod.try_fill_bytes
+/// [`SeedableRng`]: ../SeedableRng.t.html
+#[derive(Clone)]
+#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
+pub struct BlockRng<R: BlockRngCore + ?Sized> {
+ results: R::Results,
+ index: usize,
+ /// The *core* part of the RNG, implementing the `generate` function.
+ pub core: R,
+}
+
+// Custom Debug implementation that does not expose the contents of `results`.
+impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("BlockRng")
+ .field("core", &self.core)
+ .field("result_len", &self.results.as_ref().len())
+ .field("index", &self.index)
+ .finish()
+ }
+}
+
+impl<R: BlockRngCore> BlockRng<R> {
+ /// Create a new `BlockRng` from an existing RNG implementing
+ /// `BlockRngCore`. Results will be generated on first use.
+ pub fn new(core: R) -> BlockRng<R>{
+ let results_empty = R::Results::default();
+ BlockRng {
+ core,
+ index: results_empty.as_ref().len(),
+ results: results_empty,
+ }
+ }
+
+ /// Get the index into the result buffer.
+ ///
+ /// If this is equal to or larger than the size of the result buffer then
+ /// the buffer is "empty" and `generate()` must be called to produce new
+ /// results.
+ pub fn index(&self) -> usize {
+ self.index
+ }
+
+ /// Reset the number of available results.
+ /// This will force a new set of results to be generated on next use.
+ pub fn reset(&mut self) {
+ self.index = self.results.as_ref().len();
+ }
+
+ /// Generate a new set of results immediately, setting the index to the
+ /// given value.
+ pub fn generate_and_set(&mut self, index: usize) {
+ assert!(index < self.results.as_ref().len());
+ self.core.generate(&mut self.results);
+ self.index = index;
+ }
+}
+
+impl<R: BlockRngCore<Item=u32>> RngCore for BlockRng<R>
+where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ if self.index >= self.results.as_ref().len() {
+ self.generate_and_set(0);
+ }
+
+ let value = self.results.as_ref()[self.index];
+ self.index += 1;
+ value
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ let read_u64 = |results: &[u32], index| {
+ if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+ // requires little-endian CPU supporting unaligned reads:
+ unsafe { *(&results[index] as *const u32 as *const u64) }
+ } else {
+ let x = u64::from(results[index]);
+ let y = u64::from(results[index + 1]);
+ (y << 32) | x
+ }
+ };
+
+ let len = self.results.as_ref().len();
+
+ let index = self.index;
+ if index < len-1 {
+ self.index += 2;
+ // Read an u64 from the current index
+ read_u64(self.results.as_ref(), index)
+ } else if index >= len {
+ self.generate_and_set(2);
+ read_u64(self.results.as_ref(), 0)
+ } else {
+ let x = u64::from(self.results.as_ref()[len-1]);
+ self.generate_and_set(1);
+ let y = u64::from(self.results.as_ref()[0]);
+ (y << 32) | x
+ }
+ }
+
+ // As an optimization we try to write directly into the output buffer.
+ // This is only enabled for little-endian platforms where unaligned writes
+ // are known to be safe and fast.
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut filled = 0;
+
+ // Continue filling from the current set of results
+ if self.index < self.results.as_ref().len() {
+ let (consumed_u32, filled_u8) =
+ fill_via_u32_chunks(&self.results.as_ref()[self.index..],
+ dest);
+
+ self.index += consumed_u32;
+ filled += filled_u8;
+ }
+
+ let len_remainder =
+ (dest.len() - filled) % (self.results.as_ref().len() * 4);
+ let end_direct = dest.len() - len_remainder;
+
+ while filled < end_direct {
+ let dest_u32: &mut R::Results = unsafe {
+ &mut *(dest[filled..].as_mut_ptr() as
+ *mut <R as BlockRngCore>::Results)
+ };
+ self.core.generate(dest_u32);
+ filled += self.results.as_ref().len() * 4;
+ self.index = self.results.as_ref().len();
+ }
+
+ if len_remainder > 0 {
+ self.core.generate(&mut self.results);
+ let (consumed_u32, _) =
+ fill_via_u32_chunks(self.results.as_ref(),
+ &mut dest[filled..]);
+
+ self.index = consumed_u32;
+ }
+ }
+
+ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut read_len = 0;
+ while read_len < dest.len() {
+ if self.index >= self.results.as_ref().len() {
+ self.generate_and_set(0);
+ }
+ let (consumed_u32, filled_u8) =
+ fill_via_u32_chunks(&self.results.as_ref()[self.index..],
+ &mut dest[read_len..]);
+
+ self.index += consumed_u32;
+ read_len += filled_u8;
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.fill_bytes(dest);
+ Ok(())
+ }
+}
+
+impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
+ type Seed = R::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ Self::new(R::from_seed(seed))
+ }
+
+ fn seed_from_u64(seed: u64) -> Self {
+ Self::new(R::seed_from_u64(seed))
+ }
+
+ fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
+ Ok(Self::new(R::from_rng(rng)?))
+ }
+}
+
+
+
+/// A wrapper type implementing [`RngCore`] for some type implementing
+/// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement
+/// a full RNG from just a `generate` function.
+///
+/// This is similar to [`BlockRng`], but specialized for algorithms that operate
+/// on `u64` values.
+///
+/// No whole generated `u64` values are thrown away and all values are consumed
+/// in-order. [`next_u64`] simply takes the next available `u64` value.
+/// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving
+/// the other half in the buffer. If the next function called is [`next_u32`]
+/// then the other half is then consumed, however both [`next_u64`] and
+/// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called.
+///
+/// [`fill_bytes`] and [`try_fill_bytes`] consume a whole number of `u64`
+/// values. If the requested length is not a multiple of 8, some bytes will be
+/// discarded.
+///
+/// [`BlockRngCore`]: BlockRngCore.t.html
+/// [`RngCore`]: ../RngCore.t.html
+/// [`next_u32`]: ../trait.RngCore.html#tymethod.next_u32
+/// [`next_u64`]: ../trait.RngCore.html#tymethod.next_u64
+/// [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes
+/// [`try_fill_bytes`]: ../trait.RngCore.html#tymethod.try_fill_bytes
+/// [`BlockRng`]: struct.BlockRng.html
+#[derive(Clone)]
+#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
+pub struct BlockRng64<R: BlockRngCore + ?Sized> {
+ results: R::Results,
+ index: usize,
+ half_used: bool, // true if only half of the previous result is used
+ /// The *core* part of the RNG, implementing the `generate` function.
+ pub core: R,
+}
+
+// Custom Debug implementation that does not expose the contents of `results`.
+impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("BlockRng64")
+ .field("core", &self.core)
+ .field("result_len", &self.results.as_ref().len())
+ .field("index", &self.index)
+ .field("half_used", &self.half_used)
+ .finish()
+ }
+}
+
+impl<R: BlockRngCore> BlockRng64<R> {
+ /// Create a new `BlockRng` from an existing RNG implementing
+ /// `BlockRngCore`. Results will be generated on first use.
+ pub fn new(core: R) -> BlockRng64<R>{
+ let results_empty = R::Results::default();
+ BlockRng64 {
+ core,
+ index: results_empty.as_ref().len(),
+ half_used: false,
+ results: results_empty,
+ }
+ }
+
+ /// Get the index into the result buffer.
+ ///
+ /// If this is equal to or larger than the size of the result buffer then
+ /// the buffer is "empty" and `generate()` must be called to produce new
+ /// results.
+ pub fn index(&self) -> usize {
+ self.index
+ }
+
+ /// Reset the number of available results.
+ /// This will force a new set of results to be generated on next use.
+ pub fn reset(&mut self) {
+ self.index = self.results.as_ref().len();
+ self.half_used = false;
+ }
+
+ /// Generate a new set of results immediately, setting the index to the
+ /// given value.
+ pub fn generate_and_set(&mut self, index: usize) {
+ assert!(index < self.results.as_ref().len());
+ self.core.generate(&mut self.results);
+ self.index = index;
+ self.half_used = false;
+ }
+}
+
+impl<R: BlockRngCore<Item=u64>> RngCore for BlockRng64<R>
+where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ let mut index = self.index * 2 - self.half_used as usize;
+ if index >= self.results.as_ref().len() * 2 {
+ self.core.generate(&mut self.results);
+ self.index = 0;
+ // `self.half_used` is by definition `false`
+ self.half_used = false;
+ index = 0;
+ }
+
+ self.half_used = !self.half_used;
+ self.index += self.half_used as usize;
+
+ // Index as if this is a u32 slice.
+ unsafe {
+ let results =
+ &*(self.results.as_ref() as *const [u64] as *const [u32]);
+ if cfg!(target_endian = "little") {
+ *results.get_unchecked(index)
+ } else {
+ *results.get_unchecked(index ^ 1)
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ if self.index >= self.results.as_ref().len() {
+ self.core.generate(&mut self.results);
+ self.index = 0;
+ }
+
+ let value = self.results.as_ref()[self.index];
+ self.index += 1;
+ self.half_used = false;
+ value
+ }
+
+ // As an optimization we try to write directly into the output buffer.
+ // This is only enabled for little-endian platforms where unaligned writes
+ // are known to be safe and fast.
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut filled = 0;
+ self.half_used = false;
+
+ // Continue filling from the current set of results
+ if self.index < self.results.as_ref().len() {
+ let (consumed_u64, filled_u8) =
+ fill_via_u64_chunks(&self.results.as_ref()[self.index..],
+ dest);
+
+ self.index += consumed_u64;
+ filled += filled_u8;
+ }
+
+ let len_remainder =
+ (dest.len() - filled) % (self.results.as_ref().len() * 8);
+ let end_direct = dest.len() - len_remainder;
+
+ while filled < end_direct {
+ let dest_u64: &mut R::Results = unsafe {
+ ::core::mem::transmute(dest[filled..].as_mut_ptr())
+ };
+ self.core.generate(dest_u64);
+ filled += self.results.as_ref().len() * 8;
+ self.index = self.results.as_ref().len();
+ }
+
+ if len_remainder > 0 {
+ self.core.generate(&mut self.results);
+ let (consumed_u64, _) =
+ fill_via_u64_chunks(&mut self.results.as_ref(),
+ &mut dest[filled..]);
+
+ self.index = consumed_u64;
+ }
+ }
+
+ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut read_len = 0;
+ self.half_used = false;
+ while read_len < dest.len() {
+ if self.index as usize >= self.results.as_ref().len() {
+ self.core.generate(&mut self.results);
+ self.index = 0;
+ }
+
+ let (consumed_u64, filled_u8) =
+ fill_via_u64_chunks(&self.results.as_ref()[self.index as usize..],
+ &mut dest[read_len..]);
+
+ self.index += consumed_u64;
+ read_len += filled_u8;
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
+
+impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
+ type Seed = R::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ Self::new(R::from_seed(seed))
+ }
+
+ fn seed_from_u64(seed: u64) -> Self {
+ Self::new(R::seed_from_u64(seed))
+ }
+
+ fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
+ Ok(Self::new(R::from_rng(rng)?))
+ }
+}
+
+impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {}
diff --git a/rand/rand_core/src/error.rs b/rand/rand_core/src/error.rs
new file mode 100644
index 0000000..5a8459e
--- /dev/null
+++ b/rand/rand_core/src/error.rs
@@ -0,0 +1,177 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Error types
+
+use core::fmt;
+
+#[cfg(feature="std")]
+use std::error::Error as stdError;
+#[cfg(feature="std")]
+use std::io;
+
+/// Error kind which can be matched over.
+#[derive(PartialEq, Eq, Debug, Copy, Clone)]
+pub enum ErrorKind {
+ /// Feature is not available; not recoverable.
+ ///
+ /// This is the most permanent failure type and implies the error cannot be
+ /// resolved simply by retrying (e.g. the feature may not exist in this
+ /// build of the application or on the current platform).
+ Unavailable,
+ /// General failure; there may be a chance of recovery on retry.
+ ///
+ /// This is the catch-all kind for errors from known and unknown sources
+ /// which do not have a more specific kind / handling method.
+ ///
+ /// It is suggested to retry a couple of times or retry later when
+ /// handling; some error sources may be able to resolve themselves,
+ /// although this is not likely.
+ Unexpected,
+ /// A transient failure which likely can be resolved or worked around.
+ ///
+ /// This error kind exists for a few specific cases where it is known that
+ /// the error likely can be resolved internally, but is reported anyway.
+ Transient,
+ /// Not ready yet: recommended to try again a little later.
+ ///
+ /// This error kind implies the generator needs more time or needs some
+ /// other part of the application to do something else first before it is
+ /// ready for use; for example this may be used by external generators
+ /// which require time for initialization.
+ NotReady,
+ #[doc(hidden)]
+ __Nonexhaustive,
+}
+
+impl ErrorKind {
+ /// True if this kind of error may resolve itself on retry.
+ ///
+ /// See also `should_wait()`.
+ pub fn should_retry(self) -> bool {
+ self != ErrorKind::Unavailable
+ }
+
+ /// True if we should retry but wait before retrying
+ ///
+ /// This implies `should_retry()` is true.
+ pub fn should_wait(self) -> bool {
+ self == ErrorKind::NotReady
+ }
+
+ /// A description of this error kind
+ pub fn description(self) -> &'static str {
+ match self {
+ ErrorKind::Unavailable => "permanently unavailable",
+ ErrorKind::Unexpected => "unexpected failure",
+ ErrorKind::Transient => "transient failure",
+ ErrorKind::NotReady => "not ready yet",
+ ErrorKind::__Nonexhaustive => unreachable!(),
+ }
+ }
+}
+
+
+/// Error type of random number generators
+///
+/// This is a relatively simple error type, designed for compatibility with and
+/// without the Rust `std` library. It embeds a "kind" code, a message (static
+/// string only), and an optional chained cause (`std` only). The `kind` and
+/// `msg` fields can be accessed directly; cause can be accessed via
+/// `std::error::Error::cause` or `Error::take_cause`. Construction can only be
+/// done via `Error::new` or `Error::with_cause`.
+#[derive(Debug)]
+pub struct Error {
+ /// The error kind
+ pub kind: ErrorKind,
+ /// The error message
+ pub msg: &'static str,
+ #[cfg(feature="std")]
+ cause: Option<Box<stdError + Send + Sync>>,
+}
+
+impl Error {
+ /// Create a new instance, with specified kind and a message.
+ pub fn new(kind: ErrorKind, msg: &'static str) -> Self {
+ #[cfg(feature="std")] {
+ Error { kind, msg, cause: None }
+ }
+ #[cfg(not(feature="std"))] {
+ Error { kind, msg }
+ }
+ }
+
+ /// Create a new instance, with specified kind, message, and a
+ /// chained cause.
+ ///
+ /// Note: `stdError` is an alias for `std::error::Error`.
+ ///
+ /// If not targetting `std` (i.e. `no_std`), this function is replaced by
+ /// another with the same prototype, except that there are no bounds on the
+ /// type `E` (because both `Box` and `stdError` are unavailable), and the
+ /// `cause` is ignored.
+ #[cfg(feature="std")]
+ pub fn with_cause<E>(kind: ErrorKind, msg: &'static str, cause: E) -> Self
+ where E: Into<Box<stdError + Send + Sync>>
+ {
+ Error { kind, msg, cause: Some(cause.into()) }
+ }
+
+ /// Create a new instance, with specified kind, message, and a
+ /// chained cause.
+ ///
+ /// In `no_std` mode the *cause* is ignored.
+ #[cfg(not(feature="std"))]
+ pub fn with_cause<E>(kind: ErrorKind, msg: &'static str, _cause: E) -> Self {
+ Error { kind, msg }
+ }
+
+ /// Take the cause, if any. This allows the embedded cause to be extracted.
+ /// This uses `Option::take`, leaving `self` with no cause.
+ #[cfg(feature="std")]
+ pub fn take_cause(&mut self) -> Option<Box<stdError + Send + Sync>> {
+ self.cause.take()
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ #[cfg(feature="std")] {
+ if let Some(ref cause) = self.cause {
+ return write!(f, "{} ({}); cause: {}",
+ self.msg, self.kind.description(), cause);
+ }
+ }
+ write!(f, "{} ({})", self.msg, self.kind.description())
+ }
+}
+
+#[cfg(feature="std")]
+impl stdError for Error {
+ fn description(&self) -> &str {
+ self.msg
+ }
+
+ fn cause(&self) -> Option<&stdError> {
+ self.cause.as_ref().map(|e| e.as_ref() as &stdError)
+ }
+}
+
+#[cfg(feature="std")]
+impl From<Error> for io::Error {
+ fn from(error: Error) -> Self {
+ use std::io::ErrorKind::*;
+ match error.kind {
+ ErrorKind::Unavailable => io::Error::new(NotFound, error),
+ ErrorKind::Unexpected |
+ ErrorKind::Transient => io::Error::new(Other, error),
+ ErrorKind::NotReady => io::Error::new(WouldBlock, error),
+ ErrorKind::__Nonexhaustive => unreachable!(),
+ }
+ }
+}
diff --git a/rand/rand_core/src/impls.rs b/rand/rand_core/src/impls.rs
new file mode 100644
index 0000000..57bdd07
--- /dev/null
+++ b/rand/rand_core/src/impls.rs
@@ -0,0 +1,165 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Helper functions for implementing `RngCore` functions.
+//!
+//! For cross-platform reproducibility, these functions all use Little Endian:
+//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
+//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
+//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
+//!
+//! Byte-swapping (like the std `to_le` functions) is only needed to convert
+//! to/from byte sequences, and since its purpose is reproducibility,
+//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
+
+use core::intrinsics::transmute;
+use core::ptr::copy_nonoverlapping;
+use core::slice;
+use core::cmp::min;
+use core::mem::size_of;
+use RngCore;
+
+
+/// Implement `next_u64` via `next_u32`, little-endian order.
+pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
+ // Use LE; we explicitly generate one value before the next.
+ let x = u64::from(rng.next_u32());
+ let y = u64::from(rng.next_u32());
+ (y << 32) | x
+}
+
+/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
+///
+/// The fastest way to fill a slice is usually to work as long as possible with
+/// integers. That is why this method mostly uses `next_u64`, and only when
+/// there are 4 or less bytes remaining at the end of the slice it uses
+/// `next_u32` once.
+pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
+ let mut left = dest;
+ while left.len() >= 8 {
+ let (l, r) = {left}.split_at_mut(8);
+ left = r;
+ let chunk: [u8; 8] = unsafe {
+ transmute(rng.next_u64().to_le())
+ };
+ l.copy_from_slice(&chunk);
+ }
+ let n = left.len();
+ if n > 4 {
+ let chunk: [u8; 8] = unsafe {
+ transmute(rng.next_u64().to_le())
+ };
+ left.copy_from_slice(&chunk[..n]);
+ } else if n > 0 {
+ let chunk: [u8; 4] = unsafe {
+ transmute(rng.next_u32().to_le())
+ };
+ left.copy_from_slice(&chunk[..n]);
+ }
+}
+
+macro_rules! impl_uint_from_fill {
+ ($rng:expr, $ty:ty, $N:expr) => ({
+ debug_assert!($N == size_of::<$ty>());
+
+ let mut int: $ty = 0;
+ unsafe {
+ let ptr = &mut int as *mut $ty as *mut u8;
+ let slice = slice::from_raw_parts_mut(ptr, $N);
+ $rng.fill_bytes(slice);
+ }
+ int
+ });
+}
+
+macro_rules! fill_via_chunks {
+ ($src:expr, $dst:expr, $ty:ty, $size:expr) => ({
+ let chunk_size_u8 = min($src.len() * $size, $dst.len());
+ let chunk_size = (chunk_size_u8 + $size - 1) / $size;
+ if cfg!(target_endian="little") {
+ unsafe {
+ copy_nonoverlapping(
+ $src.as_ptr() as *const u8,
+ $dst.as_mut_ptr(),
+ chunk_size_u8);
+ }
+ } else {
+ for (&n, chunk) in $src.iter().zip($dst.chunks_mut($size)) {
+ let tmp = n.to_le();
+ let src_ptr = &tmp as *const $ty as *const u8;
+ unsafe {
+ copy_nonoverlapping(src_ptr,
+ chunk.as_mut_ptr(),
+ chunk.len());
+ }
+ }
+ }
+
+ (chunk_size, chunk_size_u8)
+ });
+}
+
+/// Implement `fill_bytes` by reading chunks from the output buffer of a block
+/// based RNG.
+///
+/// The return values are `(consumed_u32, filled_u8)`.
+///
+/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
+/// the length of `dest`.
+/// `consumed_u32` is the number of words consumed from `src`, which is the same
+/// as `filled_u8 / 4` rounded up.
+///
+/// # Example
+/// (from `IsaacRng`)
+///
+/// ```ignore
+/// fn fill_bytes(&mut self, dest: &mut [u8]) {
+/// let mut read_len = 0;
+/// while read_len < dest.len() {
+/// if self.index >= self.rsl.len() {
+/// self.isaac();
+/// }
+///
+/// let (consumed_u32, filled_u8) =
+/// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
+/// &mut dest[read_len..]);
+///
+/// self.index += consumed_u32;
+/// read_len += filled_u8;
+/// }
+/// }
+/// ```
+pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
+ fill_via_chunks!(src, dest, u32, 4)
+}
+
+/// Implement `fill_bytes` by reading chunks from the output buffer of a block
+/// based RNG.
+///
+/// The return values are `(consumed_u64, filled_u8)`.
+/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
+/// the length of `dest`.
+/// `consumed_u64` is the number of words consumed from `src`, which is the same
+/// as `filled_u8 / 8` rounded up.
+///
+/// See `fill_via_u32_chunks` for an example.
+pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
+ fill_via_chunks!(src, dest, u64, 8)
+}
+
+/// Implement `next_u32` via `fill_bytes`, little-endian order.
+pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
+ impl_uint_from_fill!(rng, u32, 4)
+}
+
+/// Implement `next_u64` via `fill_bytes`, little-endian order.
+pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
+ impl_uint_from_fill!(rng, u64, 8)
+}
+
+// TODO: implement tests for the above
diff --git a/rand/rand_core/src/le.rs b/rand/rand_core/src/le.rs
new file mode 100644
index 0000000..266651f
--- /dev/null
+++ b/rand/rand_core/src/le.rs
@@ -0,0 +1,68 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Little-Endian utilities
+//!
+//! Little-Endian order has been chosen for internal usage; this makes some
+//! useful functions available.
+
+use core::ptr;
+
+macro_rules! read_slice {
+ ($src:expr, $dst:expr, $size:expr, $which:ident) => {{
+ assert_eq!($src.len(), $size * $dst.len());
+
+ unsafe {
+ ptr::copy_nonoverlapping(
+ $src.as_ptr(),
+ $dst.as_mut_ptr() as *mut u8,
+ $src.len());
+ }
+ for v in $dst.iter_mut() {
+ *v = v.$which();
+ }
+ }};
+}
+
+/// Reads unsigned 32 bit integers from `src` into `dst`.
+/// Borrowed from the `byteorder` crate.
+#[inline]
+pub fn read_u32_into(src: &[u8], dst: &mut [u32]) {
+ read_slice!(src, dst, 4, to_le);
+}
+
+/// Reads unsigned 64 bit integers from `src` into `dst`.
+/// Borrowed from the `byteorder` crate.
+#[inline]
+pub fn read_u64_into(src: &[u8], dst: &mut [u64]) {
+ read_slice!(src, dst, 8, to_le);
+}
+
+#[test]
+fn test_read() {
+ let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+
+ let mut buf = [0u32; 4];
+ read_u32_into(&bytes, &mut buf);
+ assert_eq!(buf[0], 0x04030201);
+ assert_eq!(buf[3], 0x100F0E0D);
+
+ let mut buf = [0u32; 3];
+ read_u32_into(&bytes[1..13], &mut buf); // unaligned
+ assert_eq!(buf[0], 0x05040302);
+ assert_eq!(buf[2], 0x0D0C0B0A);
+
+ let mut buf = [0u64; 2];
+ read_u64_into(&bytes, &mut buf);
+ assert_eq!(buf[0], 0x0807060504030201);
+ assert_eq!(buf[1], 0x100F0E0D0C0B0A09);
+
+ let mut buf = [0u64; 1];
+ read_u64_into(&bytes[7..15], &mut buf); // unaligned
+ assert_eq!(buf[0], 0x0F0E0D0C0B0A0908);
+}
diff --git a/rand/rand_core/src/lib.rs b/rand/rand_core/src/lib.rs
new file mode 100644
index 0000000..a65db93
--- /dev/null
+++ b/rand/rand_core/src/lib.rs
@@ -0,0 +1,486 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2017-2018 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Random number generation traits
+//!
+//! This crate is mainly of interest to crates publishing implementations of
+//! [`RngCore`]. Other users are encouraged to use the [rand] crate instead
+//! which re-exports the main traits and error types.
+//!
+//! [`RngCore`] is the core trait implemented by algorithmic pseudo-random number
+//! generators and external random-number sources.
+//!
+//! [`SeedableRng`] is an extension trait for construction from fixed seeds and
+//! other random number generators.
+//!
+//! [`Error`] is provided for error-handling. It is safe to use in `no_std`
+//! environments.
+//!
+//! The [`impls`] and [`le`] sub-modules include a few small functions to assist
+//! implementation of [`RngCore`].
+//!
+//! [rand]: https://crates.io/crates/rand
+//! [`RngCore`]: trait.RngCore.html
+//! [`SeedableRng`]: trait.SeedableRng.html
+//! [`Error`]: struct.Error.html
+//! [`impls`]: impls/index.html
+//! [`le`]: le/index.html
+
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
+ html_favicon_url = "https://www.rust-lang.org/favicon.ico",
+ html_root_url = "https://rust-random.github.io/rand/")]
+
+#![deny(missing_docs)]
+#![deny(missing_debug_implementations)]
+#![doc(test(attr(allow(unused_variables), deny(warnings))))]
+
+#![cfg_attr(not(feature="std"), no_std)]
+#![cfg_attr(all(feature="alloc", not(feature="std")), feature(alloc))]
+
+#[cfg(feature="std")] extern crate core;
+#[cfg(all(feature = "alloc", not(feature="std")))] extern crate alloc;
+#[cfg(feature="serde1")] extern crate serde;
+#[cfg(feature="serde1")] #[macro_use] extern crate serde_derive;
+
+
+use core::default::Default;
+use core::convert::AsMut;
+use core::ptr::copy_nonoverlapping;
+
+#[cfg(all(feature="alloc", not(feature="std")))] use alloc::boxed::Box;
+
+pub use error::{ErrorKind, Error};
+
+
+mod error;
+pub mod block;
+pub mod impls;
+pub mod le;
+
+
+/// The core of a random number generator.
+///
+/// This trait encapsulates the low-level functionality common to all
+/// generators, and is the "back end", to be implemented by generators.
+/// End users should normally use [`Rng`] from the [rand] crate, which is
+/// automatically implemented for every type implementing `RngCore`.
+///
+/// Three different methods for generating random data are provided since the
+/// optimal implementation of each is dependent on the type of generator. There
+/// is no required relationship between the output of each; e.g. many
+/// implementations of [`fill_bytes`] consume a whole number of `u32` or `u64`
+/// values and drop any remaining unused bytes.
+///
+/// The [`try_fill_bytes`] method is a variant of [`fill_bytes`] allowing error
+/// handling; it is not deemed sufficiently useful to add equivalents for
+/// [`next_u32`] or [`next_u64`] since the latter methods are almost always used
+/// with algorithmic generators (PRNGs), which are normally infallible.
+///
+/// Algorithmic generators implementing [`SeedableRng`] should normally have
+/// *portable, reproducible* output, i.e. fix Endianness when converting values
+/// to avoid platform differences, and avoid making any changes which affect
+/// output (except by communicating that the release has breaking changes).
+///
+/// Typically implementators will implement only one of the methods available
+/// in this trait directly, then use the helper functions from the
+/// [`rand_core::impls`] module to implement the other methods.
+///
+/// It is recommended that implementations also implement:
+///
+/// - `Debug` with a custom implementation which *does not* print any internal
+/// state (at least, [`CryptoRng`]s should not risk leaking state through
+/// `Debug`).
+/// - `Serialize` and `Deserialize` (from Serde), preferably making Serde
+/// support optional at the crate level in PRNG libs.
+/// - `Clone`, if possible.
+/// - *never* implement `Copy` (accidental copies may cause repeated values).
+/// - *do not* implement `Default` for pseudorandom generators, but instead
+/// implement [`SeedableRng`], to guide users towards proper seeding.
+/// External / hardware RNGs can choose to implement `Default`.
+/// - `Eq` and `PartialEq` could be implemented, but are probably not useful.
+///
+/// # Example
+///
+/// A simple example, obviously not generating very *random* output:
+///
+/// ```
+/// #![allow(dead_code)]
+/// use rand_core::{RngCore, Error, impls};
+///
+/// struct CountingRng(u64);
+///
+/// impl RngCore for CountingRng {
+/// fn next_u32(&mut self) -> u32 {
+/// self.next_u64() as u32
+/// }
+///
+/// fn next_u64(&mut self) -> u64 {
+/// self.0 += 1;
+/// self.0
+/// }
+///
+/// fn fill_bytes(&mut self, dest: &mut [u8]) {
+/// impls::fill_bytes_via_next(self, dest)
+/// }
+///
+/// fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+/// Ok(self.fill_bytes(dest))
+/// }
+/// }
+/// ```
+///
+/// [rand]: https://crates.io/crates/rand
+/// [`Rng`]: ../rand/trait.Rng.html
+/// [`SeedableRng`]: trait.SeedableRng.html
+/// [`rand_core::impls`]: ../rand_core/impls/index.html
+/// [`try_fill_bytes`]: trait.RngCore.html#tymethod.try_fill_bytes
+/// [`fill_bytes`]: trait.RngCore.html#tymethod.fill_bytes
+/// [`next_u32`]: trait.RngCore.html#tymethod.next_u32
+/// [`next_u64`]: trait.RngCore.html#tymethod.next_u64
+/// [`CryptoRng`]: trait.CryptoRng.html
+pub trait RngCore {
+ /// Return the next random `u32`.
+ ///
+ /// RNGs must implement at least one method from this trait directly. In
+ /// the case this method is not implemented directly, it can be implemented
+ /// using `self.next_u64() as u32` or
+ /// [via `fill_bytes`](../rand_core/impls/fn.next_u32_via_fill.html).
+ fn next_u32(&mut self) -> u32;
+
+ /// Return the next random `u64`.
+ ///
+ /// RNGs must implement at least one method from this trait directly. In
+ /// the case this method is not implemented directly, it can be implemented
+ /// [via `next_u32`](../rand_core/impls/fn.next_u64_via_u32.html) or
+ /// [via `fill_bytes`](../rand_core/impls/fn.next_u64_via_fill.html).
+ fn next_u64(&mut self) -> u64;
+
+ /// Fill `dest` with random data.
+ ///
+ /// RNGs must implement at least one method from this trait directly. In
+ /// the case this method is not implemented directly, it can be implemented
+ /// [via `next_u*`](../rand_core/impls/fn.fill_bytes_via_next.html) or
+ /// via `try_fill_bytes`; if this generator can fail the implementation
+ /// must choose how best to handle errors here (e.g. panic with a
+ /// descriptive message or log a warning and retry a few times).
+ ///
+ /// This method should guarantee that `dest` is entirely filled
+ /// with new data, and may panic if this is impossible
+ /// (e.g. reading past the end of a file that is being used as the
+ /// source of randomness).
+ fn fill_bytes(&mut self, dest: &mut [u8]);
+
+ /// Fill `dest` entirely with random data.
+ ///
+ /// This is the only method which allows an RNG to report errors while
+ /// generating random data thus making this the primary method implemented
+ /// by external (true) RNGs (e.g. `OsRng`) which can fail. It may be used
+ /// directly to generate keys and to seed (infallible) PRNGs.
+ ///
+ /// Other than error handling, this method is identical to [`fill_bytes`];
+ /// thus this may be implemented using `Ok(self.fill_bytes(dest))` or
+ /// `fill_bytes` may be implemented with
+ /// `self.try_fill_bytes(dest).unwrap()` or more specific error handling.
+ ///
+ /// [`fill_bytes`]: trait.RngCore.html#method.fill_bytes
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error>;
+}
+
+/// A marker trait used to indicate that an [`RngCore`] or [`BlockRngCore`]
+/// implementation is supposed to be cryptographically secure.
+///
+/// *Cryptographically secure generators*, also known as *CSPRNGs*, should
+/// satisfy an additional properties over other generators: given the first
+/// *k* bits of an algorithm's output
+/// sequence, it should not be possible using polynomial-time algorithms to
+/// predict the next bit with probability significantly greater than 50%.
+///
+/// Some generators may satisfy an additional property, however this is not
+/// required by this trait: if the CSPRNG's state is revealed, it should not be
+/// computationally-feasible to reconstruct output prior to this. Some other
+/// generators allow backwards-computation and are consided *reversible*.
+///
+/// Note that this trait is provided for guidance only and cannot guarantee
+/// suitability for cryptographic applications. In general it should only be
+/// implemented for well-reviewed code implementing well-regarded algorithms.
+///
+/// Note also that use of a `CryptoRng` does not protect against other
+/// weaknesses such as seeding from a weak entropy source or leaking state.
+///
+/// [`RngCore`]: trait.RngCore.html
+/// [`BlockRngCore`]: ../rand_core/block/trait.BlockRngCore.html
+pub trait CryptoRng {}
+
+/// A random number generator that can be explicitly seeded.
+///
+/// This trait encapsulates the low-level functionality common to all
+/// pseudo-random number generators (PRNGs, or algorithmic generators).
+///
+/// The [`rand::FromEntropy`] trait is automatically implemented for every type
+/// implementing `SeedableRng`, providing a convenient `from_entropy()`
+/// constructor.
+///
+/// [`rand::FromEntropy`]: ../rand/trait.FromEntropy.html
+pub trait SeedableRng: Sized {
+ /// Seed type, which is restricted to types mutably-dereferencable as `u8`
+ /// arrays (we recommend `[u8; N]` for some `N`).
+ ///
+ /// It is recommended to seed PRNGs with a seed of at least circa 100 bits,
+ /// which means an array of `[u8; 12]` or greater to avoid picking RNGs with
+ /// partially overlapping periods.
+ ///
+ /// For cryptographic RNG's a seed of 256 bits is recommended, `[u8; 32]`.
+ ///
+ ///
+ /// # Implementing `SeedableRng` for RNGs with large seeds
+ ///
+ /// Note that the required traits `core::default::Default` and
+ /// `core::convert::AsMut<u8>` are not implemented for large arrays
+ /// `[u8; N]` with `N` > 32. To be able to implement the traits required by
+ /// `SeedableRng` for RNGs with such large seeds, the newtype pattern can be
+ /// used:
+ ///
+ /// ```
+ /// use rand_core::SeedableRng;
+ ///
+ /// const N: usize = 64;
+ /// pub struct MyRngSeed(pub [u8; N]);
+ /// pub struct MyRng(MyRngSeed);
+ ///
+ /// impl Default for MyRngSeed {
+ /// fn default() -> MyRngSeed {
+ /// MyRngSeed([0; N])
+ /// }
+ /// }
+ ///
+ /// impl AsMut<[u8]> for MyRngSeed {
+ /// fn as_mut(&mut self) -> &mut [u8] {
+ /// &mut self.0
+ /// }
+ /// }
+ ///
+ /// impl SeedableRng for MyRng {
+ /// type Seed = MyRngSeed;
+ ///
+ /// fn from_seed(seed: MyRngSeed) -> MyRng {
+ /// MyRng(seed)
+ /// }
+ /// }
+ /// ```
+ type Seed: Sized + Default + AsMut<[u8]>;
+
+ /// Create a new PRNG using the given seed.
+ ///
+ /// PRNG implementations are allowed to assume that bits in the seed are
+ /// well distributed. That means usually that the number of one and zero
+ /// bits are about equal, and values like 0, 1 and (size - 1) are unlikely.
+ ///
+ /// PRNG implementations are recommended to be reproducible. A PRNG seeded
+ /// using this function with a fixed seed should produce the same sequence
+ /// of output in the future and on different architectures (with for example
+ /// different endianness).
+ ///
+ /// It is however not required that this function yield the same state as a
+ /// reference implementation of the PRNG given equivalent seed; if necessary
+ /// another constructor replicating behaviour from a reference
+ /// implementation can be added.
+ ///
+ /// PRNG implementations should make sure `from_seed` never panics. In the
+ /// case that some special values (like an all zero seed) are not viable
+ /// seeds it is preferable to map these to alternative constant value(s),
+ /// for example `0xBAD5EEDu32` or `0x0DDB1A5E5BAD5EEDu64` ("odd biases? bad
+ /// seed"). This is assuming only a small number of values must be rejected.
+ fn from_seed(seed: Self::Seed) -> Self;
+
+ /// Create a new PRNG using a `u64` seed.
+ ///
+ /// This is a convenience-wrapper around `from_seed` to allow construction
+ /// of any `SeedableRng` from a simple `u64` value. It is designed such that
+ /// low Hamming Weight numbers like 0 and 1 can be used and should still
+ /// result in good, independent seeds to the PRNG which is returned.
+ ///
+ /// This **is not suitable for cryptography**, as should be clear given that
+ /// the input size is only 64 bits.
+ ///
+ /// Implementations for PRNGs *may* provide their own implementations of
+ /// this function, but the default implementation should be good enough for
+ /// all purposes. *Changing* the implementation of this function should be
+ /// considered a value-breaking change.
+ fn seed_from_u64(mut state: u64) -> Self {
+ // We use PCG32 to generate a u32 sequence, and copy to the seed
+ const MUL: u64 = 6364136223846793005;
+ const INC: u64 = 11634580027462260723;
+
+ let mut seed = Self::Seed::default();
+ for chunk in seed.as_mut().chunks_mut(4) {
+ // We advance the state first (to get away from the input value,
+ // in case it has low Hamming Weight).
+ state = state.wrapping_mul(MUL).wrapping_add(INC);
+
+ // Use PCG output function with to_le to generate x:
+ let xorshifted = (((state >> 18) ^ state) >> 27) as u32;
+ let rot = (state >> 59) as u32;
+ let x = xorshifted.rotate_right(rot).to_le();
+
+ unsafe {
+ let p = &x as *const u32 as *const u8;
+ copy_nonoverlapping(p, chunk.as_mut_ptr(), chunk.len());
+ }
+ }
+
+ Self::from_seed(seed)
+ }
+
+ /// Create a new PRNG seeded from another `Rng`.
+ ///
+ /// This is the recommended way to initialize PRNGs with fresh entropy. The
+ /// [`FromEntropy`] trait provides a convenient `from_entropy` method
+ /// based on `from_rng`.
+ ///
+ /// Usage of this method is not recommended when reproducibility is required
+ /// since implementing PRNGs are not required to fix Endianness and are
+ /// allowed to modify implementations in new releases.
+ ///
+ /// It is important to use a good source of randomness to initialize the
+ /// PRNG. Cryptographic PRNG may be rendered insecure when seeded from a
+ /// non-cryptographic PRNG or with insufficient entropy.
+ /// Many non-cryptographic PRNGs will show statistical bias in their first
+ /// results if their seed numbers are small or if there is a simple pattern
+ /// between them.
+ ///
+ /// Prefer to seed from a strong external entropy source like [`OsRng`] or
+ /// from a cryptographic PRNG; if creating a new generator for cryptographic
+ /// uses you *must* seed from a strong source.
+ ///
+ /// Seeding a small PRNG from another small PRNG is possible, but
+ /// something to be careful with. An extreme example of how this can go
+ /// wrong is seeding an Xorshift RNG from another Xorshift RNG, which
+ /// will effectively clone the generator. In general seeding from a
+ /// generator which is hard to predict is probably okay.
+ ///
+ /// PRNG implementations are allowed to assume that a good RNG is provided
+ /// for seeding, and that it is cryptographically secure when appropriate.
+ ///
+ /// [`FromEntropy`]: ../rand/trait.FromEntropy.html
+ /// [`OsRng`]: ../rand/rngs/struct.OsRng.html
+ fn from_rng<R: RngCore>(mut rng: R) -> Result<Self, Error> {
+ let mut seed = Self::Seed::default();
+ rng.try_fill_bytes(seed.as_mut())?;
+ Ok(Self::from_seed(seed))
+ }
+}
+
+// Implement `RngCore` for references to an `RngCore`.
+// Force inlining all functions, so that it is up to the `RngCore`
+// implementation and the optimizer to decide on inlining.
+impl<'a, R: RngCore + ?Sized> RngCore for &'a mut R {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ (**self).next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ (**self).next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ (**self).fill_bytes(dest)
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ (**self).try_fill_bytes(dest)
+ }
+}
+
+// Implement `RngCore` for boxed references to an `RngCore`.
+// Force inlining all functions, so that it is up to the `RngCore`
+// implementation and the optimizer to decide on inlining.
+#[cfg(feature="alloc")]
+impl<R: RngCore + ?Sized> RngCore for Box<R> {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ (**self).next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ (**self).next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ (**self).fill_bytes(dest)
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ (**self).try_fill_bytes(dest)
+ }
+}
+
+#[cfg(feature="std")]
+impl std::io::Read for RngCore {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
+ self.try_fill_bytes(buf)?;
+ Ok(buf.len())
+ }
+}
+
+// Implement `CryptoRng` for references to an `CryptoRng`.
+impl<'a, R: CryptoRng + ?Sized> CryptoRng for &'a mut R {}
+
+// Implement `CryptoRng` for boxed references to an `CryptoRng`.
+#[cfg(feature="alloc")]
+impl<R: CryptoRng + ?Sized> CryptoRng for Box<R> {}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_seed_from_u64() {
+ struct SeedableNum(u64);
+ impl SeedableRng for SeedableNum {
+ type Seed = [u8; 8];
+ fn from_seed(seed: Self::Seed) -> Self {
+ let mut x = [0u64; 1];
+ le::read_u64_into(&seed, &mut x);
+ SeedableNum(x[0])
+ }
+ }
+
+ const N: usize = 8;
+ const SEEDS: [u64; N] = [0u64, 1, 2, 3, 4, 8, 16, -1i64 as u64];
+ let mut results = [0u64; N];
+ for (i, seed) in SEEDS.iter().enumerate() {
+ let SeedableNum(x) = SeedableNum::seed_from_u64(*seed);
+ results[i] = x;
+ }
+
+ for (i1, r1) in results.iter().enumerate() {
+ let weight = r1.count_ones();
+ // This is the binomial distribution B(64, 0.5), so chance of
+ // weight < 20 is binocdf(19, 64, 0.5) = 7.8e-4, and same for
+ // weight > 44.
+ assert!(weight >= 20 && weight <= 44);
+
+ for (i2, r2) in results.iter().enumerate() {
+ if i1 == i2 { continue; }
+ let diff_weight = (r1 ^ r2).count_ones();
+ assert!(diff_weight >= 20);
+ }
+ }
+
+ // value-breakage test:
+ assert_eq!(results[0], 5029875928683246316);
+ }
+}