aboutsummaryrefslogtreecommitdiff
path: root/rand/src
diff options
context:
space:
mode:
Diffstat (limited to 'rand/src')
-rw-r--r--rand/src/deprecated.rs611
-rw-r--r--rand/src/distributions/bernoulli.rs165
-rw-r--r--rand/src/distributions/binomial.rs177
-rw-r--r--rand/src/distributions/cauchy.rs115
-rw-r--r--rand/src/distributions/dirichlet.rs137
-rw-r--r--rand/src/distributions/exponential.rs86
-rw-r--r--rand/src/distributions/float.rs259
-rw-r--r--rand/src/distributions/gamma.rs209
-rw-r--r--rand/src/distributions/integer.rs161
-rw-r--r--rand/src/distributions/mod.rs666
-rw-r--r--rand/src/distributions/normal.rs120
-rw-r--r--rand/src/distributions/other.rs219
-rw-r--r--rand/src/distributions/pareto.rs74
-rw-r--r--rand/src/distributions/poisson.rs157
-rw-r--r--rand/src/distributions/range.rs241
-rw-r--r--rand/src/distributions/triangular.rs86
-rw-r--r--rand/src/distributions/uniform.rs1297
-rw-r--r--rand/src/distributions/unit_circle.rs102
-rw-r--r--rand/src/distributions/unit_sphere.rs100
-rw-r--r--rand/src/distributions/utils.rs504
-rw-r--r--rand/src/distributions/weibull.rs71
-rw-r--r--rand/src/distributions/weighted.rs232
-rw-r--r--rand/src/distributions/ziggurat_tables.rs9
-rw-r--r--rand/src/lib.rs1604
-rw-r--r--rand/src/os.rs617
-rw-r--r--rand/src/prelude.rs27
-rw-r--r--rand/src/prng/chacha.rs321
-rw-r--r--rand/src/prng/isaac.rs328
-rw-r--r--rand/src/prng/isaac64.rs340
-rw-r--r--rand/src/prng/mod.rs74
-rw-r--r--rand/src/prng/xorshift.rs101
-rw-r--r--rand/src/rand_impls.rs299
-rw-r--r--rand/src/read.rs123
-rw-r--r--rand/src/reseeding.rs229
-rw-r--r--rand/src/rngs/adapter/mod.rs15
-rw-r--r--rand/src/rngs/adapter/read.rs137
-rw-r--r--rand/src/rngs/adapter/reseeding.rs370
-rw-r--r--rand/src/rngs/entropy.rs297
-rw-r--r--rand/src/rngs/jitter.rs (renamed from rand/src/jitter.rs)605
-rw-r--r--rand/src/rngs/mock.rs59
-rw-r--r--rand/src/rngs/mod.rs217
-rw-r--r--rand/src/rngs/os.rs1275
-rw-r--r--rand/src/rngs/small.rs105
-rw-r--r--rand/src/rngs/std.rs81
-rw-r--r--rand/src/rngs/thread.rs135
-rw-r--r--rand/src/seq.rs337
-rw-r--r--rand/src/seq/index.rs378
-rw-r--r--rand/src/seq/mod.rs836
48 files changed, 10126 insertions, 4582 deletions
diff --git a/rand/src/deprecated.rs b/rand/src/deprecated.rs
new file mode 100644
index 0000000..985ae61
--- /dev/null
+++ b/rand/src/deprecated.rs
@@ -0,0 +1,611 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Deprecated re-exports (we can't add deprecation warnings otherwise)
+
+#![allow(deprecated)]
+
+use rngs;
+use {RngCore, CryptoRng, SeedableRng, Error};
+use rand_core::block::BlockRngCore;
+use rand_isaac;
+use rand_chacha;
+use rand_hc;
+
+#[cfg(feature="std")]
+use std::io::Read;
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0",
+ note="import from rand_isaac crate instead, or use the newer Hc128Rng")]
+pub struct IsaacRng(rand_isaac::IsaacRng);
+
+impl RngCore for IsaacRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for IsaacRng {
+ type Seed = <rand_isaac::IsaacRng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ IsaacRng(rand_isaac::IsaacRng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ rand_isaac::IsaacRng::from_rng(rng).map(IsaacRng)
+ }
+}
+
+impl IsaacRng {
+ pub fn new_from_u64(seed: u64) -> Self {
+ IsaacRng(rand_isaac::IsaacRng::new_from_u64(seed))
+ }
+}
+
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0",
+ note="import from rand_isaac crate instead, or use newer Hc128Rng")]
+pub struct Isaac64Rng(rand_isaac::Isaac64Rng);
+
+impl RngCore for Isaac64Rng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for Isaac64Rng {
+ type Seed = <rand_isaac::Isaac64Rng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ Isaac64Rng(rand_isaac::Isaac64Rng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ rand_isaac::Isaac64Rng::from_rng(rng).map(Isaac64Rng)
+ }
+}
+
+impl Isaac64Rng {
+ pub fn new_from_u64(seed: u64) -> Self {
+ Isaac64Rng(rand_isaac::Isaac64Rng::new_from_u64(seed))
+ }
+}
+
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0", note="import from rand_chacha crate instead")]
+pub struct ChaChaRng(rand_chacha::ChaChaRng);
+
+impl RngCore for ChaChaRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for ChaChaRng {
+ type Seed = <rand_chacha::ChaChaRng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ ChaChaRng(rand_chacha::ChaChaRng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ rand_chacha::ChaChaRng::from_rng(rng).map(ChaChaRng)
+ }
+}
+
+impl ChaChaRng {
+ #[cfg(rust_1_26)]
+ pub fn get_word_pos(&self) -> u128 {
+ self.0.get_word_pos()
+ }
+
+ #[cfg(rust_1_26)]
+ pub fn set_word_pos(&mut self, word_offset: u128) {
+ self.0.set_word_pos(word_offset)
+ }
+
+ pub fn set_stream(&mut self, stream: u64) {
+ self.0.set_stream(stream)
+ }
+}
+
+impl CryptoRng for ChaChaRng {}
+
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0", note="import from rand_hc crate instead")]
+pub struct Hc128Rng(rand_hc::Hc128Rng);
+
+impl RngCore for Hc128Rng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for Hc128Rng {
+ type Seed = <rand_hc::Hc128Rng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ Hc128Rng(rand_hc::Hc128Rng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ rand_hc::Hc128Rng::from_rng(rng).map(Hc128Rng)
+ }
+}
+
+impl CryptoRng for Hc128Rng {}
+
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0", note="import from rand_xorshift crate instead")]
+pub struct XorShiftRng(::rand_xorshift::XorShiftRng);
+
+impl RngCore for XorShiftRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for XorShiftRng {
+ type Seed = <::rand_xorshift::XorShiftRng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ XorShiftRng(::rand_xorshift::XorShiftRng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ ::rand_xorshift::XorShiftRng::from_rng(rng).map(XorShiftRng)
+ }
+}
+
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0",
+ note="import with rand::prelude::* or rand::rngs::StdRng instead")]
+pub struct StdRng(rngs::StdRng);
+
+impl RngCore for StdRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for StdRng {
+ type Seed = <rngs::StdRng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ StdRng(rngs::StdRng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ rngs::StdRng::from_rng(rng).map(StdRng)
+ }
+}
+
+impl CryptoRng for StdRng {}
+
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0", note="import with rand::rngs::OsRng instead")]
+pub struct OsRng(rngs::OsRng);
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[cfg(feature="std")]
+impl RngCore for OsRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[cfg(feature="std")]
+impl OsRng {
+ pub fn new() -> Result<Self, Error> {
+ rngs::OsRng::new().map(OsRng)
+ }
+}
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[cfg(feature="std")]
+impl CryptoRng for OsRng {}
+
+
+#[cfg(feature="std")]
+#[derive(Debug)]
+#[deprecated(since="0.6.0", note="import with rand::rngs::EntropyRng instead")]
+pub struct EntropyRng(rngs::EntropyRng);
+
+#[cfg(feature="std")]
+impl RngCore for EntropyRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(feature="std")]
+impl EntropyRng {
+ pub fn new() -> Self {
+ EntropyRng(rngs::EntropyRng::new())
+ }
+}
+
+#[cfg(feature="std")]
+impl Default for EntropyRng {
+ fn default() -> Self {
+ EntropyRng::new()
+ }
+}
+
+#[cfg(feature="std")]
+impl CryptoRng for EntropyRng {}
+
+
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0", note="import with rand::rngs::JitterRng instead")]
+pub struct JitterRng(rngs::JitterRng);
+
+impl RngCore for JitterRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl JitterRng {
+ #[cfg(all(feature="std", not(target_arch = "wasm32")))]
+ pub fn new() -> Result<JitterRng, rngs::TimerError> {
+ rngs::JitterRng::new().map(JitterRng)
+ }
+
+ pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
+ JitterRng(rngs::JitterRng::new_with_timer(timer))
+ }
+
+ pub fn set_rounds(&mut self, rounds: u8) {
+ self.0.set_rounds(rounds)
+ }
+
+ pub fn test_timer(&mut self) -> Result<u8, rngs::TimerError> {
+ self.0.test_timer()
+ }
+
+ #[cfg(feature="std")]
+ pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
+ self.0.timer_stats(var_rounds)
+ }
+}
+
+impl CryptoRng for JitterRng {}
+
+
+#[cfg(feature="std")]
+#[derive(Clone, Debug)]
+#[deprecated(since="0.6.0",
+ note="import with rand::prelude::* or rand::rngs::ThreadRng instead")]
+pub struct ThreadRng(rngs::ThreadRng);
+
+#[cfg(feature="std")]
+impl RngCore for ThreadRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(feature="std")]
+impl CryptoRng for ThreadRng {}
+
+
+#[cfg(feature="std")]
+#[derive(Debug)]
+#[deprecated(since="0.6.0", note="import with rand::rngs::adapter::ReadRng instead")]
+pub struct ReadRng<R>(rngs::adapter::ReadRng<R>);
+
+#[cfg(feature="std")]
+impl<R: Read> RngCore for ReadRng<R> {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ #[inline(always)]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ #[inline(always)]
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(feature="std")]
+impl<R: Read> ReadRng<R> {
+ pub fn new(r: R) -> ReadRng<R> {
+ ReadRng(rngs::adapter::ReadRng::new(r))
+ }
+}
+
+
+#[derive(Clone, Debug)]
+pub struct ReseedingRng<R, Rsdr>(rngs::adapter::ReseedingRng<R, Rsdr>)
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore;
+
+impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr>
+where R: BlockRngCore<Item = u32> + SeedableRng,
+ <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest)
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl<R, Rsdr> ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ ReseedingRng(rngs::adapter::ReseedingRng::new(rng, threshold, reseeder))
+ }
+
+ pub fn reseed(&mut self) -> Result<(), Error> {
+ self.0.reseed()
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng {}
diff --git a/rand/src/distributions/bernoulli.rs b/rand/src/distributions/bernoulli.rs
new file mode 100644
index 0000000..f49618c
--- /dev/null
+++ b/rand/src/distributions/bernoulli.rs
@@ -0,0 +1,165 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Bernoulli distribution.
+
+use Rng;
+use distributions::Distribution;
+
+/// The Bernoulli distribution.
+///
+/// This is a special case of the Binomial distribution where `n = 1`.
+///
+/// # Example
+///
+/// ```rust
+/// use rand::distributions::{Bernoulli, Distribution};
+///
+/// let d = Bernoulli::new(0.3);
+/// let v = d.sample(&mut rand::thread_rng());
+/// println!("{} is from a Bernoulli distribution", v);
+/// ```
+///
+/// # Precision
+///
+/// This `Bernoulli` distribution uses 64 bits from the RNG (a `u64`),
+/// so only probabilities that are multiples of 2<sup>-64</sup> can be
+/// represented.
+#[derive(Clone, Copy, Debug)]
+pub struct Bernoulli {
+ /// Probability of success, relative to the maximal integer.
+ p_int: u64,
+}
+
+// To sample from the Bernoulli distribution we use a method that compares a
+// random `u64` value `v < (p * 2^64)`.
+//
+// If `p == 1.0`, the integer `v` to compare against can not represented as a
+// `u64`. We manually set it to `u64::MAX` instead (2^64 - 1 instead of 2^64).
+// Note that value of `p < 1.0` can never result in `u64::MAX`, because an
+// `f64` only has 53 bits of precision, and the next largest value of `p` will
+// result in `2^64 - 2048`.
+//
+// Also there is a 100% theoretical concern: if someone consistenly wants to
+// generate `true` using the Bernoulli distribution (i.e. by using a probability
+// of `1.0`), just using `u64::MAX` is not enough. On average it would return
+// false once every 2^64 iterations. Some people apparently care about this
+// case.
+//
+// That is why we special-case `u64::MAX` to always return `true`, without using
+// the RNG, and pay the performance price for all uses that *are* reasonable.
+// Luckily, if `new()` and `sample` are close, the compiler can optimize out the
+// extra check.
+const ALWAYS_TRUE: u64 = ::core::u64::MAX;
+
+// This is just `2.0.powi(64)`, but written this way because it is not available
+// in `no_std` mode.
+const SCALE: f64 = 2.0 * (1u64 << 63) as f64;
+
+impl Bernoulli {
+ /// Construct a new `Bernoulli` with the given probability of success `p`.
+ ///
+ /// # Panics
+ ///
+ /// If `p < 0` or `p > 1`.
+ ///
+ /// # Precision
+ ///
+ /// For `p = 1.0`, the resulting distribution will always generate true.
+ /// For `p = 0.0`, the resulting distribution will always generate false.
+ ///
+ /// This method is accurate for any input `p` in the range `[0, 1]` which is
+ /// a multiple of 2<sup>-64</sup>. (Note that not all multiples of
+ /// 2<sup>-64</sup> in `[0, 1]` can be represented as a `f64`.)
+ #[inline]
+ pub fn new(p: f64) -> Bernoulli {
+ if p < 0.0 || p >= 1.0 {
+ if p == 1.0 { return Bernoulli { p_int: ALWAYS_TRUE } }
+ panic!("Bernoulli::new not called with 0.0 <= p <= 1.0");
+ }
+ Bernoulli { p_int: (p * SCALE) as u64 }
+ }
+
+ /// Construct a new `Bernoulli` with the probability of success of
+ /// `numerator`-in-`denominator`. I.e. `new_ratio(2, 3)` will return
+ /// a `Bernoulli` with a 2-in-3 chance, or about 67%, of returning `true`.
+ ///
+ /// If `numerator == denominator` then the returned `Bernoulli` will always
+ /// return `true`. If `numerator == 0` it will always return `false`.
+ ///
+ /// # Panics
+ ///
+ /// If `denominator == 0` or `numerator > denominator`.
+ ///
+ #[inline]
+ pub fn from_ratio(numerator: u32, denominator: u32) -> Bernoulli {
+ assert!(numerator <= denominator);
+ if numerator == denominator {
+ return Bernoulli { p_int: ::core::u64::MAX }
+ }
+ let p_int = ((numerator as f64 / denominator as f64) * SCALE) as u64;
+ Bernoulli { p_int }
+ }
+}
+
+impl Distribution<bool> for Bernoulli {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
+ // Make sure to always return true for p = 1.0.
+ if self.p_int == ALWAYS_TRUE { return true; }
+ let v: u64 = rng.gen();
+ v < self.p_int
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use Rng;
+ use distributions::Distribution;
+ use super::Bernoulli;
+
+ #[test]
+ fn test_trivial() {
+ let mut r = ::test::rng(1);
+ let always_false = Bernoulli::new(0.0);
+ let always_true = Bernoulli::new(1.0);
+ for _ in 0..5 {
+ assert_eq!(r.sample::<bool, _>(&always_false), false);
+ assert_eq!(r.sample::<bool, _>(&always_true), true);
+ assert_eq!(Distribution::<bool>::sample(&always_false, &mut r), false);
+ assert_eq!(Distribution::<bool>::sample(&always_true, &mut r), true);
+ }
+ }
+
+ #[test]
+ fn test_average() {
+ const P: f64 = 0.3;
+ const NUM: u32 = 3;
+ const DENOM: u32 = 10;
+ let d1 = Bernoulli::new(P);
+ let d2 = Bernoulli::from_ratio(NUM, DENOM);
+ const N: u32 = 100_000;
+
+ let mut sum1: u32 = 0;
+ let mut sum2: u32 = 0;
+ let mut rng = ::test::rng(2);
+ for _ in 0..N {
+ if d1.sample(&mut rng) {
+ sum1 += 1;
+ }
+ if d2.sample(&mut rng) {
+ sum2 += 1;
+ }
+ }
+ let avg1 = (sum1 as f64) / (N as f64);
+ assert!((avg1 - P).abs() < 5e-3);
+
+ let avg2 = (sum2 as f64) / (N as f64);
+ assert!((avg2 - (NUM as f64)/(DENOM as f64)).abs() < 5e-3);
+ }
+}
diff --git a/rand/src/distributions/binomial.rs b/rand/src/distributions/binomial.rs
new file mode 100644
index 0000000..2df393e
--- /dev/null
+++ b/rand/src/distributions/binomial.rs
@@ -0,0 +1,177 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2016-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The binomial distribution.
+
+use Rng;
+use distributions::{Distribution, Bernoulli, Cauchy};
+use distributions::utils::log_gamma;
+
+/// The binomial distribution `Binomial(n, p)`.
+///
+/// This distribution has density function:
+/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Binomial, Distribution};
+///
+/// let bin = Binomial::new(20, 0.3);
+/// let v = bin.sample(&mut rand::thread_rng());
+/// println!("{} is from a binomial distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Binomial {
+ /// Number of trials.
+ n: u64,
+ /// Probability of success.
+ p: f64,
+}
+
+impl Binomial {
+ /// Construct a new `Binomial` with the given shape parameters `n` (number
+ /// of trials) and `p` (probability of success).
+ ///
+ /// Panics if `p < 0` or `p > 1`.
+ pub fn new(n: u64, p: f64) -> Binomial {
+ assert!(p >= 0.0, "Binomial::new called with p < 0");
+ assert!(p <= 1.0, "Binomial::new called with p > 1");
+ Binomial { n, p }
+ }
+}
+
+impl Distribution<u64> for Binomial {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+ // Handle these values directly.
+ if self.p == 0.0 {
+ return 0;
+ } else if self.p == 1.0 {
+ return self.n;
+ }
+
+ // For low n, it is faster to sample directly. For both methods,
+ // performance is independent of p. On Intel Haswell CPU this method
+ // appears to be faster for approx n < 300.
+ if self.n < 300 {
+ let mut result = 0;
+ let d = Bernoulli::new(self.p);
+ for _ in 0 .. self.n {
+ result += rng.sample(d) as u32;
+ }
+ return result as u64;
+ }
+
+ // binomial distribution is symmetrical with respect to p -> 1-p, k -> n-k
+ // switch p so that it is less than 0.5 - this allows for lower expected values
+ // we will just invert the result at the end
+ let p = if self.p <= 0.5 {
+ self.p
+ } else {
+ 1.0 - self.p
+ };
+
+ // prepare some cached values
+ let float_n = self.n as f64;
+ let ln_fact_n = log_gamma(float_n + 1.0);
+ let pc = 1.0 - p;
+ let log_p = p.ln();
+ let log_pc = pc.ln();
+ let expected = self.n as f64 * p;
+ let sq = (expected * (2.0 * pc)).sqrt();
+
+ let mut lresult;
+
+ // we use the Cauchy distribution as the comparison distribution
+ // f(x) ~ 1/(1+x^2)
+ let cauchy = Cauchy::new(0.0, 1.0);
+ loop {
+ let mut comp_dev: f64;
+ loop {
+ // draw from the Cauchy distribution
+ comp_dev = rng.sample(cauchy);
+ // shift the peak of the comparison ditribution
+ lresult = expected + sq * comp_dev;
+ // repeat the drawing until we are in the range of possible values
+ if lresult >= 0.0 && lresult < float_n + 1.0 {
+ break;
+ }
+ }
+
+ // the result should be discrete
+ lresult = lresult.floor();
+
+ let log_binomial_dist = ln_fact_n - log_gamma(lresult+1.0) -
+ log_gamma(float_n - lresult + 1.0) + lresult*log_p + (float_n - lresult)*log_pc;
+ // this is the binomial probability divided by the comparison probability
+ // we will generate a uniform random value and if it is larger than this,
+ // we interpret it as a value falling out of the distribution and repeat
+ let comparison_coeff = (log_binomial_dist.exp() * sq) * (1.2 * (1.0 + comp_dev*comp_dev));
+
+ if comparison_coeff >= rng.gen() {
+ break;
+ }
+ }
+
+ // invert the result for p < 0.5
+ if p != self.p {
+ self.n - lresult as u64
+ } else {
+ lresult as u64
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use Rng;
+ use distributions::Distribution;
+ use super::Binomial;
+
+ fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
+ let binomial = Binomial::new(n, p);
+
+ let expected_mean = n as f64 * p;
+ let expected_variance = n as f64 * p * (1.0 - p);
+
+ let mut results = [0.0; 1000];
+ for i in results.iter_mut() { *i = binomial.sample(rng) as f64; }
+
+ let mean = results.iter().sum::<f64>() / results.len() as f64;
+ assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);
+
+ let variance =
+ results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>()
+ / results.len() as f64;
+ assert!((variance - expected_variance).abs() < expected_variance / 10.0);
+ }
+
+ #[test]
+ fn test_binomial() {
+ let mut rng = ::test::rng(351);
+ test_binomial_mean_and_variance(150, 0.1, &mut rng);
+ test_binomial_mean_and_variance(70, 0.6, &mut rng);
+ test_binomial_mean_and_variance(40, 0.5, &mut rng);
+ test_binomial_mean_and_variance(20, 0.7, &mut rng);
+ test_binomial_mean_and_variance(20, 0.5, &mut rng);
+ }
+
+ #[test]
+ fn test_binomial_end_points() {
+ let mut rng = ::test::rng(352);
+ assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
+ assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_binomial_invalid_lambda_neg() {
+ Binomial::new(20, -10.0);
+ }
+}
diff --git a/rand/src/distributions/cauchy.rs b/rand/src/distributions/cauchy.rs
new file mode 100644
index 0000000..feef015
--- /dev/null
+++ b/rand/src/distributions/cauchy.rs
@@ -0,0 +1,115 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2016-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Cauchy distribution.
+
+use Rng;
+use distributions::Distribution;
+use std::f64::consts::PI;
+
+/// The Cauchy distribution `Cauchy(median, scale)`.
+///
+/// This distribution has a density function:
+/// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))`
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Cauchy, Distribution};
+///
+/// let cau = Cauchy::new(2.0, 5.0);
+/// let v = cau.sample(&mut rand::thread_rng());
+/// println!("{} is from a Cauchy(2, 5) distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Cauchy {
+ median: f64,
+ scale: f64
+}
+
+impl Cauchy {
+ /// Construct a new `Cauchy` with the given shape parameters
+ /// `median` the peak location and `scale` the scale factor.
+ /// Panics if `scale <= 0`.
+ pub fn new(median: f64, scale: f64) -> Cauchy {
+ assert!(scale > 0.0, "Cauchy::new called with scale factor <= 0");
+ Cauchy {
+ median,
+ scale
+ }
+ }
+}
+
+impl Distribution<f64> for Cauchy {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ // sample from [0, 1)
+ let x = rng.gen::<f64>();
+ // get standard cauchy random number
+ // note that π/2 is not exactly representable, even if x=0.5 the result is finite
+ let comp_dev = (PI * x).tan();
+ // shift and scale according to parameters
+ let result = self.median + self.scale * comp_dev;
+ result
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use distributions::Distribution;
+ use super::Cauchy;
+
+ fn median(mut numbers: &mut [f64]) -> f64 {
+ sort(&mut numbers);
+ let mid = numbers.len() / 2;
+ numbers[mid]
+ }
+
+ fn sort(numbers: &mut [f64]) {
+ numbers.sort_by(|a, b| a.partial_cmp(b).unwrap());
+ }
+
+ #[test]
+ fn test_cauchy_median() {
+ let cauchy = Cauchy::new(10.0, 5.0);
+ let mut rng = ::test::rng(123);
+ let mut numbers: [f64; 1000] = [0.0; 1000];
+ for i in 0..1000 {
+ numbers[i] = cauchy.sample(&mut rng);
+ }
+ let median = median(&mut numbers);
+ println!("Cauchy median: {}", median);
+ assert!((median - 10.0).abs() < 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ fn test_cauchy_mean() {
+ let cauchy = Cauchy::new(10.0, 5.0);
+ let mut rng = ::test::rng(123);
+ let mut sum = 0.0;
+ for _ in 0..1000 {
+ sum += cauchy.sample(&mut rng);
+ }
+ let mean = sum / 1000.0;
+ println!("Cauchy mean: {}", mean);
+ // for a Cauchy distribution the mean should not converge
+ assert!((mean - 10.0).abs() > 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_cauchy_invalid_scale_zero() {
+ Cauchy::new(0.0, 0.0);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_cauchy_invalid_scale_neg() {
+ Cauchy::new(0.0, -10.0);
+ }
+}
diff --git a/rand/src/distributions/dirichlet.rs b/rand/src/distributions/dirichlet.rs
new file mode 100644
index 0000000..19384b8
--- /dev/null
+++ b/rand/src/distributions/dirichlet.rs
@@ -0,0 +1,137 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The dirichlet distribution.
+
+use Rng;
+use distributions::Distribution;
+use distributions::gamma::Gamma;
+
+/// The dirichelet distribution `Dirichlet(alpha)`.
+///
+/// The Dirichlet distribution is a family of continuous multivariate
+/// probability distributions parameterized by a vector alpha of positive reals.
+/// It is a multivariate generalization of the beta distribution.
+///
+/// # Example
+///
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Dirichlet;
+///
+/// let dirichlet = Dirichlet::new(vec![1.0, 2.0, 3.0]);
+/// let samples = dirichlet.sample(&mut rand::thread_rng());
+/// println!("{:?} is from a Dirichlet([1.0, 2.0, 3.0]) distribution", samples);
+/// ```
+
+#[derive(Clone, Debug)]
+pub struct Dirichlet {
+ /// Concentration parameters (alpha)
+ alpha: Vec<f64>,
+}
+
+impl Dirichlet {
+ /// Construct a new `Dirichlet` with the given alpha parameter `alpha`.
+ ///
+ /// # Panics
+ /// - if `alpha.len() < 2`
+ ///
+ #[inline]
+ pub fn new<V: Into<Vec<f64>>>(alpha: V) -> Dirichlet {
+ let a = alpha.into();
+ assert!(a.len() > 1);
+ for i in 0..a.len() {
+ assert!(a[i] > 0.0);
+ }
+
+ Dirichlet { alpha: a }
+ }
+
+ /// Construct a new `Dirichlet` with the given shape parameter `alpha` and `size`.
+ ///
+ /// # Panics
+ /// - if `alpha <= 0.0`
+ /// - if `size < 2`
+ ///
+ #[inline]
+ pub fn new_with_param(alpha: f64, size: usize) -> Dirichlet {
+ assert!(alpha > 0.0);
+ assert!(size > 1);
+ Dirichlet {
+ alpha: vec![alpha; size],
+ }
+ }
+}
+
+impl Distribution<Vec<f64>> for Dirichlet {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Vec<f64> {
+ let n = self.alpha.len();
+ let mut samples = vec![0.0f64; n];
+ let mut sum = 0.0f64;
+
+ for i in 0..n {
+ let g = Gamma::new(self.alpha[i], 1.0);
+ samples[i] = g.sample(rng);
+ sum += samples[i];
+ }
+ let invacc = 1.0 / sum;
+ for i in 0..n {
+ samples[i] *= invacc;
+ }
+ samples
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::Dirichlet;
+ use distributions::Distribution;
+
+ #[test]
+ fn test_dirichlet() {
+ let d = Dirichlet::new(vec![1.0, 2.0, 3.0]);
+ let mut rng = ::test::rng(221);
+ let samples = d.sample(&mut rng);
+ let _: Vec<f64> = samples
+ .into_iter()
+ .map(|x| {
+ assert!(x > 0.0);
+ x
+ })
+ .collect();
+ }
+
+ #[test]
+ fn test_dirichlet_with_param() {
+ let alpha = 0.5f64;
+ let size = 2;
+ let d = Dirichlet::new_with_param(alpha, size);
+ let mut rng = ::test::rng(221);
+ let samples = d.sample(&mut rng);
+ let _: Vec<f64> = samples
+ .into_iter()
+ .map(|x| {
+ assert!(x > 0.0);
+ x
+ })
+ .collect();
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_dirichlet_invalid_length() {
+ Dirichlet::new_with_param(0.5f64, 1);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_dirichlet_invalid_alpha() {
+ Dirichlet::new_with_param(0.0f64, 2);
+ }
+}
diff --git a/rand/src/distributions/exponential.rs b/rand/src/distributions/exponential.rs
index c3c924c..a7d0500 100644
--- a/rand/src/distributions/exponential.rs
+++ b/rand/src/distributions/exponential.rs
@@ -1,74 +1,78 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The exponential distribution.
-use {Rng, Rand};
-use distributions::{ziggurat, ziggurat_tables, Sample, IndependentSample};
+use {Rng};
+use distributions::{ziggurat_tables, Distribution};
+use distributions::utils::ziggurat;
-/// A wrapper around an `f64` to generate Exp(1) random numbers.
+/// Samples floating-point numbers according to the exponential distribution,
+/// with rate parameter `λ = 1`. This is equivalent to `Exp::new(1.0)` or
+/// sampling with `-rng.gen::<f64>().ln()`, but faster.
///
/// See `Exp` for the general exponential distribution.
///
-/// Implemented via the ZIGNOR variant[1] of the Ziggurat method. The
-/// exact description in the paper was adjusted to use tables for the
-/// exponential distribution rather than normal.
+/// Implemented via the ZIGNOR variant[^1] of the Ziggurat method. The exact
+/// description in the paper was adjusted to use tables for the exponential
+/// distribution rather than normal.
///
-/// [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
-/// Generate Normal Random
-/// Samples*](http://www.doornik.com/research/ziggurat.pdf). Nuffield
-/// College, Oxford
+/// [^1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
+/// Generate Normal Random Samples*](
+/// https://www.doornik.com/research/ziggurat.pdf).
+/// Nuffield College, Oxford
///
/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Exp1;
///
-/// ```rust
-/// use rand::distributions::exponential::Exp1;
-///
-/// let Exp1(x) = rand::random();
-/// println!("{}", x);
+/// let val: f64 = SmallRng::from_entropy().sample(Exp1);
+/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
-pub struct Exp1(pub f64);
+pub struct Exp1;
// This could be done via `-rng.gen::<f64>().ln()` but that is slower.
-impl Rand for Exp1 {
+impl Distribution<f64> for Exp1 {
#[inline]
- fn rand<R:Rng>(rng: &mut R) -> Exp1 {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
#[inline]
fn pdf(x: f64) -> f64 {
(-x).exp()
}
#[inline]
- fn zero_case<R:Rng>(rng: &mut R, _u: f64) -> f64 {
+ fn zero_case<R: Rng + ?Sized>(rng: &mut R, _u: f64) -> f64 {
ziggurat_tables::ZIG_EXP_R - rng.gen::<f64>().ln()
}
- Exp1(ziggurat(rng, false,
- &ziggurat_tables::ZIG_EXP_X,
- &ziggurat_tables::ZIG_EXP_F,
- pdf, zero_case))
+ ziggurat(rng, false,
+ &ziggurat_tables::ZIG_EXP_X,
+ &ziggurat_tables::ZIG_EXP_F,
+ pdf, zero_case)
}
}
/// The exponential distribution `Exp(lambda)`.
///
-/// This distribution has density function: `f(x) = lambda *
-/// exp(-lambda * x)` for `x > 0`.
+/// This distribution has density function: `f(x) = lambda * exp(-lambda * x)`
+/// for `x > 0`.
+///
+/// Note that [`Exp1`](struct.Exp1.html) is an optimised implementation for `lambda = 1`.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{Exp, IndependentSample};
+/// ```
+/// use rand::distributions::{Exp, Distribution};
///
/// let exp = Exp::new(2.0);
-/// let v = exp.ind_sample(&mut rand::thread_rng());
+/// let v = exp.sample(&mut rand::thread_rng());
/// println!("{} is from a Exp(2) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
@@ -87,28 +91,24 @@ impl Exp {
}
}
-impl Sample<f64> for Exp {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for Exp {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let Exp1(n) = rng.gen::<Exp1>();
+impl Distribution<f64> for Exp {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let n: f64 = rng.sample(Exp1);
n * self.lambda_inverse
}
}
#[cfg(test)]
mod test {
- use distributions::{Sample, IndependentSample};
+ use distributions::Distribution;
use super::Exp;
#[test]
fn test_exp() {
- let mut exp = Exp::new(10.0);
- let mut rng = ::test::rng();
+ let exp = Exp::new(10.0);
+ let mut rng = ::test::rng(221);
for _ in 0..1000 {
assert!(exp.sample(&mut rng) >= 0.0);
- assert!(exp.ind_sample(&mut rng) >= 0.0);
}
}
#[test]
diff --git a/rand/src/distributions/float.rs b/rand/src/distributions/float.rs
new file mode 100644
index 0000000..ece12f5
--- /dev/null
+++ b/rand/src/distributions/float.rs
@@ -0,0 +1,259 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Basic floating-point number distributions
+
+use core::mem;
+use Rng;
+use distributions::{Distribution, Standard};
+use distributions::utils::FloatSIMDUtils;
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+
+/// A distribution to sample floating point numbers uniformly in the half-open
+/// interval `(0, 1]`, i.e. including 1 but not 0.
+///
+/// All values that can be generated are of the form `n * ε/2`. For `f32`
+/// the 23 most significant random bits of a `u32` are used and for `f64` the
+/// 53 most significant bits of a `u64` are used. The conversion uses the
+/// multiplicative method.
+///
+/// See also: [`Standard`] which samples from `[0, 1)`, [`Open01`]
+/// which samples from `(0, 1)` and [`Uniform`] which samples from arbitrary
+/// ranges.
+///
+/// # Example
+/// ```
+/// use rand::{thread_rng, Rng};
+/// use rand::distributions::OpenClosed01;
+///
+/// let val: f32 = thread_rng().sample(OpenClosed01);
+/// println!("f32 from (0, 1): {}", val);
+/// ```
+///
+/// [`Standard`]: struct.Standard.html
+/// [`Open01`]: struct.Open01.html
+/// [`Uniform`]: uniform/struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct OpenClosed01;
+
+/// A distribution to sample floating point numbers uniformly in the open
+/// interval `(0, 1)`, i.e. not including either endpoint.
+///
+/// All values that can be generated are of the form `n * ε + ε/2`. For `f32`
+/// the 22 most significant random bits of an `u32` are used, for `f64` 52 from
+/// an `u64`. The conversion uses a transmute-based method.
+///
+/// See also: [`Standard`] which samples from `[0, 1)`, [`OpenClosed01`]
+/// which samples from `(0, 1]` and [`Uniform`] which samples from arbitrary
+/// ranges.
+///
+/// # Example
+/// ```
+/// use rand::{thread_rng, Rng};
+/// use rand::distributions::Open01;
+///
+/// let val: f32 = thread_rng().sample(Open01);
+/// println!("f32 from (0, 1): {}", val);
+/// ```
+///
+/// [`Standard`]: struct.Standard.html
+/// [`OpenClosed01`]: struct.OpenClosed01.html
+/// [`Uniform`]: uniform/struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct Open01;
+
+
+pub(crate) trait IntoFloat {
+ type F;
+
+ /// Helper method to combine the fraction and a contant exponent into a
+ /// float.
+ ///
+ /// Only the least significant bits of `self` may be set, 23 for `f32` and
+ /// 52 for `f64`.
+ /// The resulting value will fall in a range that depends on the exponent.
+ /// As an example the range with exponent 0 will be
+ /// [2<sup>0</sup>..2<sup>1</sup>), which is [1..2).
+ fn into_float_with_exponent(self, exponent: i32) -> Self::F;
+}
+
+macro_rules! float_impls {
+ ($ty:ident, $uty:ident, $f_scalar:ident, $u_scalar:ty,
+ $fraction_bits:expr, $exponent_bias:expr) => {
+ impl IntoFloat for $uty {
+ type F = $ty;
+ #[inline(always)]
+ fn into_float_with_exponent(self, exponent: i32) -> $ty {
+ // The exponent is encoded using an offset-binary representation
+ let exponent_bits: $u_scalar =
+ (($exponent_bias + exponent) as $u_scalar) << $fraction_bits;
+ // TODO: use from_bits when min compiler > 1.25 (see #545)
+ // $ty::from_bits(self | exponent_bits)
+ unsafe{ mem::transmute(self | exponent_bits) }
+ }
+ }
+
+ impl Distribution<$ty> for Standard {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ // Multiply-based method; 24/53 random bits; [0, 1) interval.
+ // We use the most significant bits because for simple RNGs
+ // those are usually more random.
+ let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+ let precision = $fraction_bits + 1;
+ let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
+
+ let value: $uty = rng.gen();
+ let value = value >> (float_size - precision);
+ scale * $ty::cast_from_int(value)
+ }
+ }
+
+ impl Distribution<$ty> for OpenClosed01 {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ // Multiply-based method; 24/53 random bits; (0, 1] interval.
+ // We use the most significant bits because for simple RNGs
+ // those are usually more random.
+ let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+ let precision = $fraction_bits + 1;
+ let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
+
+ let value: $uty = rng.gen();
+ let value = value >> (float_size - precision);
+ // Add 1 to shift up; will not overflow because of right-shift:
+ scale * $ty::cast_from_int(value + 1)
+ }
+ }
+
+ impl Distribution<$ty> for Open01 {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ // Transmute-based method; 23/52 random bits; (0, 1) interval.
+ // We use the most significant bits because for simple RNGs
+ // those are usually more random.
+ use core::$f_scalar::EPSILON;
+ let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+
+ let value: $uty = rng.gen();
+ let fraction = value >> (float_size - $fraction_bits);
+ fraction.into_float_with_exponent(0) - (1.0 - EPSILON / 2.0)
+ }
+ }
+ }
+}
+
+float_impls! { f32, u32, f32, u32, 23, 127 }
+float_impls! { f64, u64, f64, u64, 52, 1023 }
+
+#[cfg(feature="simd_support")]
+float_impls! { f32x2, u32x2, f32, u32, 23, 127 }
+#[cfg(feature="simd_support")]
+float_impls! { f32x4, u32x4, f32, u32, 23, 127 }
+#[cfg(feature="simd_support")]
+float_impls! { f32x8, u32x8, f32, u32, 23, 127 }
+#[cfg(feature="simd_support")]
+float_impls! { f32x16, u32x16, f32, u32, 23, 127 }
+
+#[cfg(feature="simd_support")]
+float_impls! { f64x2, u64x2, f64, u64, 52, 1023 }
+#[cfg(feature="simd_support")]
+float_impls! { f64x4, u64x4, f64, u64, 52, 1023 }
+#[cfg(feature="simd_support")]
+float_impls! { f64x8, u64x8, f64, u64, 52, 1023 }
+
+
+#[cfg(test)]
+mod tests {
+ use Rng;
+ use distributions::{Open01, OpenClosed01};
+ use rngs::mock::StepRng;
+ #[cfg(feature="simd_support")]
+ use packed_simd::*;
+
+ const EPSILON32: f32 = ::core::f32::EPSILON;
+ const EPSILON64: f64 = ::core::f64::EPSILON;
+
+ macro_rules! test_f32 {
+ ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
+ #[test]
+ fn $fnn() {
+ // Standard
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.gen::<$ty>(), $ZERO);
+ let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
+ assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
+
+ // OpenClosed01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(OpenClosed01),
+ 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
+ assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
+
+ // Open01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 9 | 1 << (9 + 32), 0);
+ assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
+ }
+ }
+ }
+ test_f32! { f32_edge_cases, f32, 0.0, EPSILON32 }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x2_edge_cases, f32x2, f32x2::splat(0.0), f32x2::splat(EPSILON32) }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x4_edge_cases, f32x4, f32x4::splat(0.0), f32x4::splat(EPSILON32) }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x8_edge_cases, f32x8, f32x8::splat(0.0), f32x8::splat(EPSILON32) }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x16_edge_cases, f32x16, f32x16::splat(0.0), f32x16::splat(EPSILON32) }
+
+ macro_rules! test_f64 {
+ ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
+ #[test]
+ fn $fnn() {
+ // Standard
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.gen::<$ty>(), $ZERO);
+ let mut one = StepRng::new(1 << 11, 0);
+ assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
+
+ // OpenClosed01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(OpenClosed01),
+ 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 11, 0);
+ assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
+
+ // Open01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 12, 0);
+ assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
+ }
+ }
+ }
+ test_f64! { f64_edge_cases, f64, 0.0, EPSILON64 }
+ #[cfg(feature="simd_support")]
+ test_f64! { f64x2_edge_cases, f64x2, f64x2::splat(0.0), f64x2::splat(EPSILON64) }
+ #[cfg(feature="simd_support")]
+ test_f64! { f64x4_edge_cases, f64x4, f64x4::splat(0.0), f64x4::splat(EPSILON64) }
+ #[cfg(feature="simd_support")]
+ test_f64! { f64x8_edge_cases, f64x8, f64x8::splat(0.0), f64x8::splat(EPSILON64) }
+}
diff --git a/rand/src/distributions/gamma.rs b/rand/src/distributions/gamma.rs
index 2806495..43ac2bc 100644
--- a/rand/src/distributions/gamma.rs
+++ b/rand/src/distributions/gamma.rs
@@ -1,23 +1,20 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//
-// ignore-lexer-test FIXME #15679
//! The Gamma and derived distributions.
use self::GammaRepr::*;
use self::ChiSquaredRepr::*;
-use {Rng, Open01};
-use super::normal::StandardNormal;
-use super::{IndependentSample, Sample, Exp};
+use Rng;
+use distributions::normal::StandardNormal;
+use distributions::{Distribution, Exp, Open01};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
@@ -30,25 +27,25 @@ use super::{IndependentSample, Sample, Exp};
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
-/// The algorithm used is that described by Marsaglia & Tsang 2000[1],
+/// The algorithm used is that described by Marsaglia & Tsang 2000[^1],
/// falling back to directly sampling from an Exponential for `shape
-/// == 1`, and using the boosting technique described in [1] for
+/// == 1`, and using the boosting technique described in that paper for
/// `shape < 1`.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{IndependentSample, Gamma};
+/// ```
+/// use rand::distributions::{Distribution, Gamma};
///
/// let gamma = Gamma::new(2.0, 5.0);
-/// let v = gamma.ind_sample(&mut rand::thread_rng());
+/// let v = gamma.sample(&mut rand::thread_rng());
/// println!("{} is from a Gamma(2, 5) distribution", v);
/// ```
///
-/// [1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method
-/// for Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
-/// (September 2000),
-/// 363-372. DOI:[10.1145/358407.358414](http://doi.acm.org/10.1145/358407.358414)
+/// [^1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method for
+/// Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
+/// (September 2000), 363-372.
+/// DOI:[10.1145/358407.358414](https://doi.acm.org/10.1145/358407.358414)
#[derive(Clone, Copy, Debug)]
pub struct Gamma {
repr: GammaRepr,
@@ -109,7 +106,7 @@ impl Gamma {
} else {
Large(GammaLargeShape::new_raw(shape, scale))
};
- Gamma { repr: repr }
+ Gamma { repr }
}
}
@@ -126,50 +123,40 @@ impl GammaLargeShape {
fn new_raw(shape: f64, scale: f64) -> GammaLargeShape {
let d = shape - 1. / 3.;
GammaLargeShape {
- scale: scale,
+ scale,
c: 1. / (9. * d).sqrt(),
- d: d
+ d
}
}
}
-impl Sample<f64> for Gamma {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl Sample<f64> for GammaSmallShape {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl Sample<f64> for GammaLargeShape {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-
-impl IndependentSample<f64> for Gamma {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
+impl Distribution<f64> for Gamma {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
match self.repr {
- Small(ref g) => g.ind_sample(rng),
- One(ref g) => g.ind_sample(rng),
- Large(ref g) => g.ind_sample(rng),
+ Small(ref g) => g.sample(rng),
+ One(ref g) => g.sample(rng),
+ Large(ref g) => g.sample(rng),
}
}
}
-impl IndependentSample<f64> for GammaSmallShape {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let Open01(u) = rng.gen::<Open01<f64>>();
+impl Distribution<f64> for GammaSmallShape {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let u: f64 = rng.sample(Open01);
- self.large_shape.ind_sample(rng) * u.powf(self.inv_shape)
+ self.large_shape.sample(rng) * u.powf(self.inv_shape)
}
}
-impl IndependentSample<f64> for GammaLargeShape {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
+impl Distribution<f64> for GammaLargeShape {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
loop {
- let StandardNormal(x) = rng.gen::<StandardNormal>();
+ let x = rng.sample(StandardNormal);
let v_cbrt = 1.0 + self.c * x;
if v_cbrt <= 0.0 { // a^3 <= 0 iff a <= 0
continue
}
let v = v_cbrt * v_cbrt * v_cbrt;
- let Open01(u) = rng.gen::<Open01<f64>>();
+ let u: f64 = rng.sample(Open01);
let x_sqr = x * x;
if u < 1.0 - 0.0331 * x_sqr * x_sqr ||
@@ -190,11 +177,11 @@ impl IndependentSample<f64> for GammaLargeShape {
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{ChiSquared, IndependentSample};
+/// ```
+/// use rand::distributions::{ChiSquared, Distribution};
///
/// let chi = ChiSquared::new(11.0);
-/// let v = chi.ind_sample(&mut rand::thread_rng());
+/// let v = chi.sample(&mut rand::thread_rng());
/// println!("{} is from a χ²(11) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -221,21 +208,18 @@ impl ChiSquared {
assert!(k > 0.0, "ChiSquared::new called with `k` < 0");
DoFAnythingElse(Gamma::new(0.5 * k, 2.0))
};
- ChiSquared { repr: repr }
+ ChiSquared { repr }
}
}
-impl Sample<f64> for ChiSquared {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for ChiSquared {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
+impl Distribution<f64> for ChiSquared {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
- let StandardNormal(norm) = rng.gen::<StandardNormal>();
+ let norm = rng.sample(StandardNormal);
norm * norm
}
- DoFAnythingElse(ref g) => g.ind_sample(rng)
+ DoFAnythingElse(ref g) => g.sample(rng)
}
}
}
@@ -248,11 +232,11 @@ impl IndependentSample<f64> for ChiSquared {
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{FisherF, IndependentSample};
+/// ```
+/// use rand::distributions::{FisherF, Distribution};
///
/// let f = FisherF::new(2.0, 32.0);
-/// let v = f.ind_sample(&mut rand::thread_rng());
+/// let v = f.sample(&mut rand::thread_rng());
/// println!("{} is from an F(2, 32) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -278,12 +262,9 @@ impl FisherF {
}
}
}
-impl Sample<f64> for FisherF {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for FisherF {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- self.numer.ind_sample(rng) / self.denom.ind_sample(rng) * self.dof_ratio
+impl Distribution<f64> for FisherF {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ self.numer.sample(rng) / self.denom.sample(rng) * self.dof_ratio
}
}
@@ -292,11 +273,11 @@ impl IndependentSample<f64> for FisherF {
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{StudentT, IndependentSample};
+/// ```
+/// use rand::distributions::{StudentT, Distribution};
///
/// let t = StudentT::new(11.0);
-/// let v = t.ind_sample(&mut rand::thread_rng());
+/// let v = t.sample(&mut rand::thread_rng());
/// println!("{} is from a t(11) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -316,46 +297,79 @@ impl StudentT {
}
}
}
-impl Sample<f64> for StudentT {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
+impl Distribution<f64> for StudentT {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let norm = rng.sample(StandardNormal);
+ norm * (self.dof / self.chi.sample(rng)).sqrt()
+ }
+}
+
+/// The Beta distribution with shape parameters `alpha` and `beta`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Distribution, Beta};
+///
+/// let beta = Beta::new(2.0, 5.0);
+/// let v = beta.sample(&mut rand::thread_rng());
+/// println!("{} is from a Beta(2, 5) distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Beta {
+ gamma_a: Gamma,
+ gamma_b: Gamma,
+}
+
+impl Beta {
+ /// Construct an object representing the `Beta(alpha, beta)`
+ /// distribution.
+ ///
+ /// Panics if `shape <= 0` or `scale <= 0`.
+ pub fn new(alpha: f64, beta: f64) -> Beta {
+ assert!((alpha > 0.) & (beta > 0.));
+ Beta {
+ gamma_a: Gamma::new(alpha, 1.),
+ gamma_b: Gamma::new(beta, 1.),
+ }
+ }
}
-impl IndependentSample<f64> for StudentT {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let StandardNormal(norm) = rng.gen::<StandardNormal>();
- norm * (self.dof / self.chi.ind_sample(rng)).sqrt()
+
+impl Distribution<f64> for Beta {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let x = self.gamma_a.sample(rng);
+ let y = self.gamma_b.sample(rng);
+ x / (x + y)
}
}
#[cfg(test)]
mod test {
- use distributions::{Sample, IndependentSample};
- use super::{ChiSquared, StudentT, FisherF};
+ use distributions::Distribution;
+ use super::{Beta, ChiSquared, StudentT, FisherF};
#[test]
fn test_chi_squared_one() {
- let mut chi = ChiSquared::new(1.0);
- let mut rng = ::test::rng();
+ let chi = ChiSquared::new(1.0);
+ let mut rng = ::test::rng(201);
for _ in 0..1000 {
chi.sample(&mut rng);
- chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
- let mut chi = ChiSquared::new(0.5);
- let mut rng = ::test::rng();
+ let chi = ChiSquared::new(0.5);
+ let mut rng = ::test::rng(202);
for _ in 0..1000 {
chi.sample(&mut rng);
- chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
- let mut chi = ChiSquared::new(30.0);
- let mut rng = ::test::rng();
+ let chi = ChiSquared::new(30.0);
+ let mut rng = ::test::rng(203);
for _ in 0..1000 {
chi.sample(&mut rng);
- chi.ind_sample(&mut rng);
}
}
#[test]
@@ -366,21 +380,34 @@ mod test {
#[test]
fn test_f() {
- let mut f = FisherF::new(2.0, 32.0);
- let mut rng = ::test::rng();
+ let f = FisherF::new(2.0, 32.0);
+ let mut rng = ::test::rng(204);
for _ in 0..1000 {
f.sample(&mut rng);
- f.ind_sample(&mut rng);
}
}
#[test]
fn test_t() {
- let mut t = StudentT::new(11.0);
- let mut rng = ::test::rng();
+ let t = StudentT::new(11.0);
+ let mut rng = ::test::rng(205);
for _ in 0..1000 {
t.sample(&mut rng);
- t.ind_sample(&mut rng);
}
}
+
+ #[test]
+ fn test_beta() {
+ let beta = Beta::new(1.0, 2.0);
+ let mut rng = ::test::rng(201);
+ for _ in 0..1000 {
+ beta.sample(&mut rng);
+ }
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_beta_invalid_dof() {
+ Beta::new(0., 0.);
+ }
}
diff --git a/rand/src/distributions/integer.rs b/rand/src/distributions/integer.rs
new file mode 100644
index 0000000..4e6604d
--- /dev/null
+++ b/rand/src/distributions/integer.rs
@@ -0,0 +1,161 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The implementations of the `Standard` distribution for integer types.
+
+use {Rng};
+use distributions::{Distribution, Standard};
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+#[cfg(all(target_arch = "x86", feature="nightly"))]
+use core::arch::x86::*;
+#[cfg(all(target_arch = "x86_64", feature="nightly"))]
+use core::arch::x86_64::*;
+
+impl Distribution<u8> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
+ rng.next_u32() as u8
+ }
+}
+
+impl Distribution<u16> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 {
+ rng.next_u32() as u16
+ }
+}
+
+impl Distribution<u32> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 {
+ rng.next_u32()
+ }
+}
+
+impl Distribution<u64> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+ rng.next_u64()
+ }
+}
+
+#[cfg(rust_1_26)]
+impl Distribution<u128> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 {
+ // Use LE; we explicitly generate one value before the next.
+ let x = rng.next_u64() as u128;
+ let y = rng.next_u64() as u128;
+ (y << 64) | x
+ }
+}
+
+impl Distribution<usize> for Standard {
+ #[inline]
+ #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+ rng.next_u32() as usize
+ }
+
+ #[inline]
+ #[cfg(target_pointer_width = "64")]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+ rng.next_u64() as usize
+ }
+}
+
+macro_rules! impl_int_from_uint {
+ ($ty:ty, $uty:ty) => {
+ impl Distribution<$ty> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ rng.gen::<$uty>() as $ty
+ }
+ }
+ }
+}
+
+impl_int_from_uint! { i8, u8 }
+impl_int_from_uint! { i16, u16 }
+impl_int_from_uint! { i32, u32 }
+impl_int_from_uint! { i64, u64 }
+#[cfg(rust_1_26)] impl_int_from_uint! { i128, u128 }
+impl_int_from_uint! { isize, usize }
+
+#[cfg(feature="simd_support")]
+macro_rules! simd_impl {
+ ($(($intrinsic:ident, $vec:ty),)+) => {$(
+ impl Distribution<$intrinsic> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $intrinsic {
+ $intrinsic::from_bits(rng.gen::<$vec>())
+ }
+ }
+ )+};
+
+ ($bits:expr,) => {};
+ ($bits:expr, $ty:ty, $($ty_more:ty,)*) => {
+ simd_impl!($bits, $($ty_more,)*);
+
+ impl Distribution<$ty> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ let mut vec: $ty = Default::default();
+ unsafe {
+ let ptr = &mut vec;
+ let b_ptr = &mut *(ptr as *mut $ty as *mut [u8; $bits/8]);
+ rng.fill_bytes(b_ptr);
+ }
+ vec.to_le()
+ }
+ }
+ };
+}
+
+#[cfg(feature="simd_support")]
+simd_impl!(16, u8x2, i8x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(32, u8x4, i8x4, u16x2, i16x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(64, u8x8, i8x8, u16x4, i16x4, u32x2, i32x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(128, u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(256, u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4,);
+#[cfg(feature="simd_support")]
+simd_impl!(512, u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8,);
+#[cfg(all(feature="simd_support", feature="nightly", any(target_arch="x86", target_arch="x86_64")))]
+simd_impl!((__m64, u8x8), (__m128i, u8x16), (__m256i, u8x32),);
+
+#[cfg(test)]
+mod tests {
+ use Rng;
+ use distributions::{Standard};
+
+ #[test]
+ fn test_integers() {
+ let mut rng = ::test::rng(806);
+
+ rng.sample::<isize, _>(Standard);
+ rng.sample::<i8, _>(Standard);
+ rng.sample::<i16, _>(Standard);
+ rng.sample::<i32, _>(Standard);
+ rng.sample::<i64, _>(Standard);
+ #[cfg(rust_1_26)]
+ rng.sample::<i128, _>(Standard);
+
+ rng.sample::<usize, _>(Standard);
+ rng.sample::<u8, _>(Standard);
+ rng.sample::<u16, _>(Standard);
+ rng.sample::<u32, _>(Standard);
+ rng.sample::<u64, _>(Standard);
+ #[cfg(rust_1_26)]
+ rng.sample::<u128, _>(Standard);
+ }
+}
diff --git a/rand/src/distributions/mod.rs b/rand/src/distributions/mod.rs
index 5de8efb..160cd31 100644
--- a/rand/src/distributions/mod.rs
+++ b/rand/src/distributions/mod.rs
@@ -1,94 +1,394 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Sampling from random distributions.
+//! Generating random samples from probability distributions.
//!
-//! This is a generalization of `Rand` to allow parameters to control the
-//! exact properties of the generated values, e.g. the mean and standard
-//! deviation of a normal distribution. The `Sample` trait is the most
-//! general, and allows for generating values that change some state
-//! internally. The `IndependentSample` trait is for generating values
-//! that do not need to record state.
-
-use core::marker;
-
-use {Rng, Rand};
-
-pub use self::range::Range;
-#[cfg(feature="std")]
-pub use self::gamma::{Gamma, ChiSquared, FisherF, StudentT};
-#[cfg(feature="std")]
-pub use self::normal::{Normal, LogNormal};
-#[cfg(feature="std")]
-pub use self::exponential::Exp;
-
-pub mod range;
-#[cfg(feature="std")]
-pub mod gamma;
-#[cfg(feature="std")]
-pub mod normal;
-#[cfg(feature="std")]
-pub mod exponential;
-
-#[cfg(feature="std")]
-mod ziggurat_tables;
-
-/// Types that can be used to create a random instance of `Support`.
-pub trait Sample<Support> {
- /// Generate a random value of `Support`, using `rng` as the
- /// source of randomness.
- fn sample<R: Rng>(&mut self, rng: &mut R) -> Support;
-}
-
-/// `Sample`s that do not require keeping track of state.
+//! This module is the home of the [`Distribution`] trait and several of its
+//! implementations. It is the workhorse behind some of the convenient
+//! functionality of the [`Rng`] trait, including [`gen`], [`gen_range`] and
+//! of course [`sample`].
+//!
+//! Abstractly, a [probability distribution] describes the probability of
+//! occurance of each value in its sample space.
+//!
+//! More concretely, an implementation of `Distribution<T>` for type `X` is an
+//! algorithm for choosing values from the sample space (a subset of `T`)
+//! according to the distribution `X` represents, using an external source of
+//! randomness (an RNG supplied to the `sample` function).
+//!
+//! A type `X` may implement `Distribution<T>` for multiple types `T`.
+//! Any type implementing [`Distribution`] is stateless (i.e. immutable),
+//! but it may have internal parameters set at construction time (for example,
+//! [`Uniform`] allows specification of its sample space as a range within `T`).
+//!
+//!
+//! # The `Standard` distribution
+//!
+//! The [`Standard`] distribution is important to mention. This is the
+//! distribution used by [`Rng::gen()`] and represents the "default" way to
+//! produce a random value for many different types, including most primitive
+//! types, tuples, arrays, and a few derived types. See the documentation of
+//! [`Standard`] for more details.
+//!
+//! Implementing `Distribution<T>` for [`Standard`] for user types `T` makes it
+//! possible to generate type `T` with [`Rng::gen()`], and by extension also
+//! with the [`random()`] function.
+//!
+//!
+//! # Distribution to sample from a `Uniform` range
+//!
+//! The [`Uniform`] distribution is more flexible than [`Standard`], but also
+//! more specialised: it supports fewer target types, but allows the sample
+//! space to be specified as an arbitrary range within its target type `T`.
+//! Both [`Standard`] and [`Uniform`] are in some sense uniform distributions.
+//!
+//! Values may be sampled from this distribution using [`Rng::gen_range`] or
+//! by creating a distribution object with [`Uniform::new`],
+//! [`Uniform::new_inclusive`] or `From<Range>`. When the range limits are not
+//! known at compile time it is typically faster to reuse an existing
+//! distribution object than to call [`Rng::gen_range`].
+//!
+//! User types `T` may also implement `Distribution<T>` for [`Uniform`],
+//! although this is less straightforward than for [`Standard`] (see the
+//! documentation in the [`uniform` module]. Doing so enables generation of
+//! values of type `T` with [`Rng::gen_range`].
+//!
+//!
+//! # Other distributions
+//!
+//! There are surprisingly many ways to uniformly generate random floats. A
+//! range between 0 and 1 is standard, but the exact bounds (open vs closed)
+//! and accuracy differ. In addition to the [`Standard`] distribution Rand offers
+//! [`Open01`] and [`OpenClosed01`]. See [Floating point implementation] for
+//! more details.
+//!
+//! [`Alphanumeric`] is a simple distribution to sample random letters and
+//! numbers of the `char` type; in contrast [`Standard`] may sample any valid
+//! `char`.
+//!
+//! [`WeightedIndex`] can be used to do weighted sampling from a set of items,
+//! such as from an array.
+//!
+//! # Non-uniform probability distributions
+//!
+//! Rand currently provides the following probability distributions:
+//!
+//! - Related to real-valued quantities that grow linearly
+//! (e.g. errors, offsets):
+//! - [`Normal`] distribution, and [`StandardNormal`] as a primitive
+//! - [`Cauchy`] distribution
+//! - Related to Bernoulli trials (yes/no events, with a given probability):
+//! - [`Binomial`] distribution
+//! - [`Bernoulli`] distribution, similar to [`Rng::gen_bool`].
+//! - Related to positive real-valued quantities that grow exponentially
+//! (e.g. prices, incomes, populations):
+//! - [`LogNormal`] distribution
+//! - Related to the occurrence of independent events at a given rate:
+//! - [`Pareto`] distribution
+//! - [`Poisson`] distribution
+//! - [`Exp`]onential distribution, and [`Exp1`] as a primitive
+//! - [`Weibull`] distribution
+//! - Gamma and derived distributions:
+//! - [`Gamma`] distribution
+//! - [`ChiSquared`] distribution
+//! - [`StudentT`] distribution
+//! - [`FisherF`] distribution
+//! - Triangular distribution:
+//! - [`Beta`] distribution
+//! - [`Triangular`] distribution
+//! - Multivariate probability distributions
+//! - [`Dirichlet`] distribution
+//! - [`UnitSphereSurface`] distribution
+//! - [`UnitCircle`] distribution
+//!
+//! # Examples
+//!
+//! Sampling from a distribution:
+//!
+//! ```
+//! use rand::{thread_rng, Rng};
+//! use rand::distributions::Exp;
+//!
+//! let exp = Exp::new(2.0);
+//! let v = thread_rng().sample(exp);
+//! println!("{} is from an Exp(2) distribution", v);
+//! ```
+//!
+//! Implementing the [`Standard`] distribution for a user type:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use rand::Rng;
+//! use rand::distributions::{Distribution, Standard};
+//!
+//! struct MyF32 {
+//! x: f32,
+//! }
+//!
+//! impl Distribution<MyF32> for Standard {
+//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 {
+//! MyF32 { x: rng.gen() }
+//! }
+//! }
+//! ```
+//!
+//!
+//! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution
+//! [`Distribution`]: trait.Distribution.html
+//! [`gen_range`]: ../trait.Rng.html#method.gen_range
+//! [`gen`]: ../trait.Rng.html#method.gen
+//! [`sample`]: ../trait.Rng.html#method.sample
+//! [`new_inclusive`]: struct.Uniform.html#method.new_inclusive
+//! [`random()`]: ../fn.random.html
+//! [`Rng::gen_bool`]: ../trait.Rng.html#method.gen_bool
+//! [`Rng::gen_range`]: ../trait.Rng.html#method.gen_range
+//! [`Rng::gen()`]: ../trait.Rng.html#method.gen
+//! [`Rng`]: ../trait.Rng.html
+//! [`uniform` module]: uniform/index.html
+//! [Floating point implementation]: struct.Standard.html#floating-point-implementation
+// distributions
+//! [`Alphanumeric`]: struct.Alphanumeric.html
+//! [`Bernoulli`]: struct.Bernoulli.html
+//! [`Beta`]: struct.Beta.html
+//! [`Binomial`]: struct.Binomial.html
+//! [`Cauchy`]: struct.Cauchy.html
+//! [`ChiSquared`]: struct.ChiSquared.html
+//! [`Dirichlet`]: struct.Dirichlet.html
+//! [`Exp`]: struct.Exp.html
+//! [`Exp1`]: struct.Exp1.html
+//! [`FisherF`]: struct.FisherF.html
+//! [`Gamma`]: struct.Gamma.html
+//! [`LogNormal`]: struct.LogNormal.html
+//! [`Normal`]: struct.Normal.html
+//! [`Open01`]: struct.Open01.html
+//! [`OpenClosed01`]: struct.OpenClosed01.html
+//! [`Pareto`]: struct.Pareto.html
+//! [`Poisson`]: struct.Poisson.html
+//! [`Standard`]: struct.Standard.html
+//! [`StandardNormal`]: struct.StandardNormal.html
+//! [`StudentT`]: struct.StudentT.html
+//! [`Triangular`]: struct.Triangular.html
+//! [`Uniform`]: struct.Uniform.html
+//! [`Uniform::new`]: struct.Uniform.html#method.new
+//! [`Uniform::new_inclusive`]: struct.Uniform.html#method.new_inclusive
+//! [`UnitSphereSurface`]: struct.UnitSphereSurface.html
+//! [`UnitCircle`]: struct.UnitCircle.html
+//! [`Weibull`]: struct.Weibull.html
+//! [`WeightedIndex`]: struct.WeightedIndex.html
+
+#[cfg(any(rust_1_26, features="nightly"))]
+use core::iter;
+use Rng;
+
+pub use self::other::Alphanumeric;
+#[doc(inline)] pub use self::uniform::Uniform;
+pub use self::float::{OpenClosed01, Open01};
+pub use self::bernoulli::Bernoulli;
+#[cfg(feature="alloc")] pub use self::weighted::{WeightedIndex, WeightedError};
+#[cfg(feature="std")] pub use self::unit_sphere::UnitSphereSurface;
+#[cfg(feature="std")] pub use self::unit_circle::UnitCircle;
+#[cfg(feature="std")] pub use self::gamma::{Gamma, ChiSquared, FisherF,
+ StudentT, Beta};
+#[cfg(feature="std")] pub use self::normal::{Normal, LogNormal, StandardNormal};
+#[cfg(feature="std")] pub use self::exponential::{Exp, Exp1};
+#[cfg(feature="std")] pub use self::pareto::Pareto;
+#[cfg(feature="std")] pub use self::poisson::Poisson;
+#[cfg(feature="std")] pub use self::binomial::Binomial;
+#[cfg(feature="std")] pub use self::cauchy::Cauchy;
+#[cfg(feature="std")] pub use self::dirichlet::Dirichlet;
+#[cfg(feature="std")] pub use self::triangular::Triangular;
+#[cfg(feature="std")] pub use self::weibull::Weibull;
+
+pub mod uniform;
+mod bernoulli;
+#[cfg(feature="alloc")] mod weighted;
+#[cfg(feature="std")] mod unit_sphere;
+#[cfg(feature="std")] mod unit_circle;
+#[cfg(feature="std")] mod gamma;
+#[cfg(feature="std")] mod normal;
+#[cfg(feature="std")] mod exponential;
+#[cfg(feature="std")] mod pareto;
+#[cfg(feature="std")] mod poisson;
+#[cfg(feature="std")] mod binomial;
+#[cfg(feature="std")] mod cauchy;
+#[cfg(feature="std")] mod dirichlet;
+#[cfg(feature="std")] mod triangular;
+#[cfg(feature="std")] mod weibull;
+
+mod float;
+mod integer;
+mod other;
+mod utils;
+#[cfg(feature="std")] mod ziggurat_tables;
+
+/// Types (distributions) that can be used to create a random instance of `T`.
+///
+/// It is possible to sample from a distribution through both the
+/// `Distribution` and [`Rng`] traits, via `distr.sample(&mut rng)` and
+/// `rng.sample(distr)`. They also both offer the [`sample_iter`] method, which
+/// produces an iterator that samples from the distribution.
+///
+/// All implementations are expected to be immutable; this has the significant
+/// advantage of not needing to consider thread safety, and for most
+/// distributions efficient state-less sampling algorithms are available.
///
-/// Since no state is recorded, each sample is (statistically)
-/// independent of all others, assuming the `Rng` used has this
-/// property.
-// FIXME maybe having this separate is overkill (the only reason is to
-// take &self rather than &mut self)? or maybe this should be the
-// trait called `Sample` and the other should be `DependentSample`.
-pub trait IndependentSample<Support>: Sample<Support> {
- /// Generate a random value.
- fn ind_sample<R: Rng>(&self, &mut R) -> Support;
+/// [`Rng`]: ../trait.Rng.html
+/// [`sample_iter`]: trait.Distribution.html#method.sample_iter
+pub trait Distribution<T> {
+ /// Generate a random value of `T`, using `rng` as the source of randomness.
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T;
+
+ /// Create an iterator that generates random values of `T`, using `rng` as
+ /// the source of randomness.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use rand::thread_rng;
+ /// use rand::distributions::{Distribution, Alphanumeric, Uniform, Standard};
+ ///
+ /// let mut rng = thread_rng();
+ ///
+ /// // Vec of 16 x f32:
+ /// let v: Vec<f32> = Standard.sample_iter(&mut rng).take(16).collect();
+ ///
+ /// // String:
+ /// let s: String = Alphanumeric.sample_iter(&mut rng).take(7).collect();
+ ///
+ /// // Dice-rolling:
+ /// let die_range = Uniform::new_inclusive(1, 6);
+ /// let mut roll_die = die_range.sample_iter(&mut rng);
+ /// while roll_die.next().unwrap() != 6 {
+ /// println!("Not a 6; rolling again!");
+ /// }
+ /// ```
+ fn sample_iter<'a, R>(&'a self, rng: &'a mut R) -> DistIter<'a, Self, R, T>
+ where Self: Sized, R: Rng
+ {
+ DistIter {
+ distr: self,
+ rng: rng,
+ phantom: ::core::marker::PhantomData,
+ }
+ }
}
-/// A wrapper for generating types that implement `Rand` via the
-/// `Sample` & `IndependentSample` traits.
-#[derive(Debug)]
-pub struct RandSample<Sup> {
- _marker: marker::PhantomData<fn() -> Sup>,
+impl<'a, T, D: Distribution<T>> Distribution<T> for &'a D {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
+ (*self).sample(rng)
+ }
}
-impl<Sup> Copy for RandSample<Sup> {}
-impl<Sup> Clone for RandSample<Sup> {
- fn clone(&self) -> Self { *self }
-}
-impl<Sup: Rand> Sample<Sup> for RandSample<Sup> {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> Sup { self.ind_sample(rng) }
+/// An iterator that generates random values of `T` with distribution `D`,
+/// using `R` as the source of randomness.
+///
+/// This `struct` is created by the [`sample_iter`] method on [`Distribution`].
+/// See its documentation for more.
+///
+/// [`Distribution`]: trait.Distribution.html
+/// [`sample_iter`]: trait.Distribution.html#method.sample_iter
+#[derive(Debug)]
+pub struct DistIter<'a, D: 'a, R: 'a, T> {
+ distr: &'a D,
+ rng: &'a mut R,
+ phantom: ::core::marker::PhantomData<T>,
}
-impl<Sup: Rand> IndependentSample<Sup> for RandSample<Sup> {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> Sup {
- rng.gen()
+impl<'a, D, R, T> Iterator for DistIter<'a, D, R, T>
+ where D: Distribution<T>, R: Rng + 'a
+{
+ type Item = T;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<T> {
+ Some(self.distr.sample(self.rng))
}
-}
-impl<Sup> RandSample<Sup> {
- pub fn new() -> RandSample<Sup> {
- RandSample { _marker: marker::PhantomData }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::max_value(), None)
}
}
+#[cfg(rust_1_26)]
+impl<'a, D, R, T> iter::FusedIterator for DistIter<'a, D, R, T>
+ where D: Distribution<T>, R: Rng + 'a {}
+
+#[cfg(features = "nightly")]
+impl<'a, D, R, T> iter::TrustedLen for DistIter<'a, D, R, T>
+ where D: Distribution<T>, R: Rng + 'a {}
+
+
+/// A generic random value distribution, implemented for many primitive types.
+/// Usually generates values with a numerically uniform distribution, and with a
+/// range appropriate to the type.
+///
+/// ## Built-in Implementations
+///
+/// Assuming the provided `Rng` is well-behaved, these implementations
+/// generate values with the following ranges and distributions:
+///
+/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed
+/// over all values of the type.
+/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all
+/// code points in the range `0...0x10_FFFF`, except for the range
+/// `0xD800...0xDFFF` (the surrogate code points). This includes
+/// unassigned/reserved code points.
+/// * `bool`: Generates `false` or `true`, each with probability 0.5.
+/// * Floating point types (`f32` and `f64`): Uniformly distributed in the
+/// half-open range `[0, 1)`. See notes below.
+/// * Wrapping integers (`Wrapping<T>`), besides the type identical to their
+/// normal integer variants.
+///
+/// The following aggregate types also implement the distribution `Standard` as
+/// long as their component types implement it:
+///
+/// * Tuples and arrays: Each element of the tuple or array is generated
+/// independently, using the `Standard` distribution recursively.
+/// * `Option<T>` where `Standard` is implemented for `T`: Returns `None` with
+/// probability 0.5; otherwise generates a random `x: T` and returns `Some(x)`.
+///
+/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Standard;
+///
+/// let val: f32 = SmallRng::from_entropy().sample(Standard);
+/// println!("f32 from [0, 1): {}", val);
+/// ```
+///
+/// # Floating point implementation
+/// The floating point implementations for `Standard` generate a random value in
+/// the half-open interval `[0, 1)`, i.e. including 0 but not 1.
+///
+/// All values that can be generated are of the form `n * ε/2`. For `f32`
+/// the 23 most significant random bits of a `u32` are used and for `f64` the
+/// 53 most significant bits of a `u64` are used. The conversion uses the
+/// multiplicative method: `(rng.gen::<$uty>() >> N) as $ty * (ε/2)`.
+///
+/// See also: [`Open01`] which samples from `(0, 1)`, [`OpenClosed01`] which
+/// samples from `(0, 1]` and `Rng::gen_range(0, 1)` which also samples from
+/// `[0, 1)`. Note that `Open01` and `gen_range` (which uses [`Uniform`]) use
+/// transmute-based methods which yield 1 bit less precision but may perform
+/// faster on some architectures (on modern Intel CPUs all methods have
+/// approximately equal performance).
+///
+/// [`Open01`]: struct.Open01.html
+/// [`OpenClosed01`]: struct.OpenClosed01.html
+/// [`Uniform`]: uniform/struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct Standard;
+
+
/// A value with a particular weight for use with `WeightedChoice`.
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
#[derive(Copy, Clone, Debug)]
pub struct Weighted<T> {
/// The numerical weight of this item
@@ -99,35 +399,19 @@ pub struct Weighted<T> {
/// A distribution that selects from a finite collection of weighted items.
///
-/// Each item has an associated weight that influences how likely it
-/// is to be chosen: higher weight is more likely.
-///
-/// The `Clone` restriction is a limitation of the `Sample` and
-/// `IndependentSample` traits. Note that `&T` is (cheaply) `Clone` for
-/// all `T`, as is `u32`, so one can store references or indices into
-/// another vector.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::distributions::{Weighted, WeightedChoice, IndependentSample};
+/// Deprecated: use [`WeightedIndex`] instead.
///
-/// let mut items = vec!(Weighted { weight: 2, item: 'a' },
-/// Weighted { weight: 4, item: 'b' },
-/// Weighted { weight: 1, item: 'c' });
-/// let wc = WeightedChoice::new(&mut items);
-/// let mut rng = rand::thread_rng();
-/// for _ in 0..16 {
-/// // on average prints 'a' 4 times, 'b' 8 and 'c' twice.
-/// println!("{}", wc.ind_sample(&mut rng));
-/// }
-/// ```
+/// [`WeightedIndex`]: struct.WeightedIndex.html
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
#[derive(Debug)]
pub struct WeightedChoice<'a, T:'a> {
items: &'a mut [Weighted<T>],
- weight_range: Range<u32>
+ weight_range: Uniform<u32>,
}
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
impl<'a, T: Clone> WeightedChoice<'a, T> {
/// Create a new `WeightedChoice`.
///
@@ -157,26 +441,24 @@ impl<'a, T: Clone> WeightedChoice<'a, T> {
assert!(running_total != 0, "WeightedChoice::new called with a total weight of 0");
WeightedChoice {
- items: items,
+ items,
// we're likely to be generating numbers in this range
// relatively often, so might as well cache it
- weight_range: Range::new(0, running_total)
+ weight_range: Uniform::new(0, running_total)
}
}
}
-impl<'a, T: Clone> Sample<T> for WeightedChoice<'a, T> {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> T { self.ind_sample(rng) }
-}
-
-impl<'a, T: Clone> IndependentSample<T> for WeightedChoice<'a, T> {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> T {
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
+impl<'a, T: Clone> Distribution<T> for WeightedChoice<'a, T> {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
// we want to find the first element that has cumulative
// weight > sample_weight, which we do by binary since the
// cumulative weights of self.items are sorted.
// choose a weight in [0, total_weight)
- let sample_weight = self.weight_range.ind_sample(rng);
+ let sample_weight = self.weight_range.sample(rng);
// short circuit when it's the first item
if sample_weight < self.items[0].weight {
@@ -208,163 +490,78 @@ impl<'a, T: Clone> IndependentSample<T> for WeightedChoice<'a, T> {
}
modifier /= 2;
}
- return self.items[idx + 1].item.clone();
- }
-}
-
-/// Sample a random number using the Ziggurat method (specifically the
-/// ZIGNOR variant from Doornik 2005). Most of the arguments are
-/// directly from the paper:
-///
-/// * `rng`: source of randomness
-/// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0.
-/// * `X`: the $x_i$ abscissae.
-/// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$)
-/// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$
-/// * `pdf`: the probability density function
-/// * `zero_case`: manual sampling from the tail when we chose the
-/// bottom box (i.e. i == 0)
-
-// the perf improvement (25-50%) is definitely worth the extra code
-// size from force-inlining.
-#[cfg(feature="std")]
-#[inline(always)]
-fn ziggurat<R: Rng, P, Z>(
- rng: &mut R,
- symmetric: bool,
- x_tab: ziggurat_tables::ZigTable,
- f_tab: ziggurat_tables::ZigTable,
- mut pdf: P,
- mut zero_case: Z)
- -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64 {
- const SCALE: f64 = (1u64 << 53) as f64;
- loop {
- // reimplement the f64 generation as an optimisation suggested
- // by the Doornik paper: we have a lot of precision-space
- // (i.e. there are 11 bits of the 64 of a u64 to use after
- // creating a f64), so we might as well reuse some to save
- // generating a whole extra random number. (Seems to be 15%
- // faster.)
- //
- // This unfortunately misses out on the benefits of direct
- // floating point generation if an RNG like dSMFT is
- // used. (That is, such RNGs create floats directly, highly
- // efficiently and overload next_f32/f64, so by not calling it
- // this may be slower than it would be otherwise.)
- // FIXME: investigate/optimise for the above.
- let bits: u64 = rng.gen();
- let i = (bits & 0xff) as usize;
- let f = (bits >> 11) as f64 / SCALE;
-
- // u is either U(-1, 1) or U(0, 1) depending on if this is a
- // symmetric distribution or not.
- let u = if symmetric {2.0 * f - 1.0} else {f};
- let x = u * x_tab[i];
-
- let test_x = if symmetric { x.abs() } else {x};
-
- // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i])
- if test_x < x_tab[i + 1] {
- return x;
- }
- if i == 0 {
- return zero_case(rng, u);
- }
- // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1
- if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) {
- return x;
- }
+ self.items[idx + 1].item.clone()
}
}
#[cfg(test)]
mod tests {
+ use rngs::mock::StepRng;
+ #[allow(deprecated)]
+ use super::{WeightedChoice, Weighted, Distribution};
- use {Rng, Rand};
- use super::{RandSample, WeightedChoice, Weighted, Sample, IndependentSample};
-
- #[derive(PartialEq, Debug)]
- struct ConstRand(usize);
- impl Rand for ConstRand {
- fn rand<R: Rng>(_: &mut R) -> ConstRand {
- ConstRand(0)
- }
- }
-
- // 0, 1, 2, 3, ...
- struct CountingRng { i: u32 }
- impl Rng for CountingRng {
- fn next_u32(&mut self) -> u32 {
- self.i += 1;
- self.i - 1
- }
- fn next_u64(&mut self) -> u64 {
- self.next_u32() as u64
- }
- }
-
- #[test]
- fn test_rand_sample() {
- let mut rand_sample = RandSample::<ConstRand>::new();
-
- assert_eq!(rand_sample.sample(&mut ::test::rng()), ConstRand(0));
- assert_eq!(rand_sample.ind_sample(&mut ::test::rng()), ConstRand(0));
- }
#[test]
+ #[allow(deprecated)]
fn test_weighted_choice() {
// this makes assumptions about the internal implementation of
- // WeightedChoice, specifically: it doesn't reorder the items,
- // it doesn't do weird things to the RNG (so 0 maps to 0, 1 to
- // 1, internally; modulo a modulo operation).
+ // WeightedChoice. It may fail when the implementation in
+ // `distributions::uniform::UniformInt` changes.
macro_rules! t {
($items:expr, $expected:expr) => {{
let mut items = $items;
+ let mut total_weight = 0;
+ for item in &items { total_weight += item.weight; }
+
let wc = WeightedChoice::new(&mut items);
let expected = $expected;
- let mut rng = CountingRng { i: 0 };
+ // Use extremely large steps between the random numbers, because
+ // we test with small ranges and `UniformInt` is designed to prefer
+ // the most significant bits.
+ let mut rng = StepRng::new(0, !0 / (total_weight as u64));
for &val in expected.iter() {
- assert_eq!(wc.ind_sample(&mut rng), val)
+ assert_eq!(wc.sample(&mut rng), val)
}
}}
}
- t!(vec!(Weighted { weight: 1, item: 10}), [10]);
+ t!([Weighted { weight: 1, item: 10}], [10]);
// skip some
- t!(vec!(Weighted { weight: 0, item: 20},
- Weighted { weight: 2, item: 21},
- Weighted { weight: 0, item: 22},
- Weighted { weight: 1, item: 23}),
- [21,21, 23]);
+ t!([Weighted { weight: 0, item: 20},
+ Weighted { weight: 2, item: 21},
+ Weighted { weight: 0, item: 22},
+ Weighted { weight: 1, item: 23}],
+ [21, 21, 23]);
// different weights
- t!(vec!(Weighted { weight: 4, item: 30},
- Weighted { weight: 3, item: 31}),
- [30,30,30,30, 31,31,31]);
+ t!([Weighted { weight: 4, item: 30},
+ Weighted { weight: 3, item: 31}],
+ [30, 31, 30, 31, 30, 31, 30]);
// check that we're binary searching
// correctly with some vectors of odd
// length.
- t!(vec!(Weighted { weight: 1, item: 40},
- Weighted { weight: 1, item: 41},
- Weighted { weight: 1, item: 42},
- Weighted { weight: 1, item: 43},
- Weighted { weight: 1, item: 44}),
+ t!([Weighted { weight: 1, item: 40},
+ Weighted { weight: 1, item: 41},
+ Weighted { weight: 1, item: 42},
+ Weighted { weight: 1, item: 43},
+ Weighted { weight: 1, item: 44}],
[40, 41, 42, 43, 44]);
- t!(vec!(Weighted { weight: 1, item: 50},
- Weighted { weight: 1, item: 51},
- Weighted { weight: 1, item: 52},
- Weighted { weight: 1, item: 53},
- Weighted { weight: 1, item: 54},
- Weighted { weight: 1, item: 55},
- Weighted { weight: 1, item: 56}),
- [50, 51, 52, 53, 54, 55, 56]);
+ t!([Weighted { weight: 1, item: 50},
+ Weighted { weight: 1, item: 51},
+ Weighted { weight: 1, item: 52},
+ Weighted { weight: 1, item: 53},
+ Weighted { weight: 1, item: 54},
+ Weighted { weight: 1, item: 55},
+ Weighted { weight: 1, item: 56}],
+ [50, 54, 51, 55, 52, 56, 53]);
}
#[test]
+ #[allow(deprecated)]
fn test_weighted_clone_initialization() {
let initial : Weighted<u32> = Weighted {weight: 1, item: 1};
let clone = initial.clone();
@@ -373,6 +570,7 @@ mod tests {
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_clone_change_weight() {
let initial : Weighted<u32> = Weighted {weight: 1, item: 1};
let mut clone = initial.clone();
@@ -381,6 +579,7 @@ mod tests {
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_clone_change_item() {
let initial : Weighted<u32> = Weighted {weight: 1, item: 1};
let mut clone = initial.clone();
@@ -390,20 +589,33 @@ mod tests {
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_choice_no_items() {
WeightedChoice::<isize>::new(&mut []);
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_choice_zero_weight() {
WeightedChoice::new(&mut [Weighted { weight: 0, item: 0},
Weighted { weight: 0, item: 1}]);
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_choice_weight_overflows() {
- let x = ::std::u32::MAX / 2; // x + x + 2 is the overflow
+ let x = ::core::u32::MAX / 2; // x + x + 2 is the overflow
WeightedChoice::new(&mut [Weighted { weight: x, item: 0 },
Weighted { weight: 1, item: 1 },
Weighted { weight: x, item: 2 },
Weighted { weight: 1, item: 3 }]);
}
+
+ #[cfg(feature="std")]
+ #[test]
+ fn test_distributions_iter() {
+ use distributions::Normal;
+ let mut rng = ::test::rng(210);
+ let distr = Normal::new(10.0, 10.0);
+ let results: Vec<_> = distr.sample_iter(&mut rng).take(100).collect();
+ println!("{:?}", results);
+ }
}
diff --git a/rand/src/distributions/normal.rs b/rand/src/distributions/normal.rs
index 280613d..b8d632e 100644
--- a/rand/src/distributions/normal.rs
+++ b/rand/src/distributions/normal.rs
@@ -1,49 +1,50 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The normal and derived distributions.
-use {Rng, Rand, Open01};
-use distributions::{ziggurat, ziggurat_tables, Sample, IndependentSample};
+use Rng;
+use distributions::{ziggurat_tables, Distribution, Open01};
+use distributions::utils::ziggurat;
-/// A wrapper around an `f64` to generate N(0, 1) random numbers
-/// (a.k.a. a standard normal, or Gaussian).
+/// Samples floating-point numbers according to the normal distribution
+/// `N(0, 1)` (a.k.a. a standard normal, or Gaussian). This is equivalent to
+/// `Normal::new(0.0, 1.0)` but faster.
///
/// See `Normal` for the general normal distribution.
///
-/// Implemented via the ZIGNOR variant[1] of the Ziggurat method.
+/// Implemented via the ZIGNOR variant[^1] of the Ziggurat method.
///
-/// [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
-/// Generate Normal Random
-/// Samples*](http://www.doornik.com/research/ziggurat.pdf). Nuffield
-/// College, Oxford
+/// [^1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
+/// Generate Normal Random Samples*](
+/// https://www.doornik.com/research/ziggurat.pdf).
+/// Nuffield College, Oxford
///
/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::StandardNormal;
///
-/// ```rust
-/// use rand::distributions::normal::StandardNormal;
-///
-/// let StandardNormal(x) = rand::random();
-/// println!("{}", x);
+/// let val: f64 = SmallRng::from_entropy().sample(StandardNormal);
+/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
-pub struct StandardNormal(pub f64);
+pub struct StandardNormal;
-impl Rand for StandardNormal {
- fn rand<R:Rng>(rng: &mut R) -> StandardNormal {
+impl Distribution<f64> for StandardNormal {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
#[inline]
fn pdf(x: f64) -> f64 {
(-x*x/2.0).exp()
}
#[inline]
- fn zero_case<R:Rng>(rng: &mut R, u: f64) -> f64 {
+ fn zero_case<R: Rng + ?Sized>(rng: &mut R, u: f64) -> f64 {
// compute a random number in the tail by hand
// strange initial conditions, because the loop is not
@@ -54,8 +55,8 @@ impl Rand for StandardNormal {
let mut y = 0.0f64;
while -2.0 * y < x * x {
- let Open01(x_) = rng.gen::<Open01<f64>>();
- let Open01(y_) = rng.gen::<Open01<f64>>();
+ let x_: f64 = rng.sample(Open01);
+ let y_: f64 = rng.sample(Open01);
x = x_.ln() / ziggurat_tables::ZIG_NORM_R;
y = y_.ln();
@@ -64,30 +65,33 @@ impl Rand for StandardNormal {
if u < 0.0 { x - ziggurat_tables::ZIG_NORM_R } else { ziggurat_tables::ZIG_NORM_R - x }
}
- StandardNormal(ziggurat(
- rng,
- true, // this is symmetric
- &ziggurat_tables::ZIG_NORM_X,
- &ziggurat_tables::ZIG_NORM_F,
- pdf, zero_case))
+ ziggurat(rng, true, // this is symmetric
+ &ziggurat_tables::ZIG_NORM_X,
+ &ziggurat_tables::ZIG_NORM_F,
+ pdf, zero_case)
}
}
/// The normal distribution `N(mean, std_dev**2)`.
///
-/// This uses the ZIGNOR variant of the Ziggurat method, see
-/// `StandardNormal` for more details.
+/// This uses the ZIGNOR variant of the Ziggurat method, see [`StandardNormal`]
+/// for more details.
+///
+/// Note that [`StandardNormal`] is an optimised implementation for mean 0, and
+/// standard deviation 1.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{Normal, IndependentSample};
+/// ```
+/// use rand::distributions::{Normal, Distribution};
///
/// // mean 2, standard deviation 3
/// let normal = Normal::new(2.0, 3.0);
-/// let v = normal.ind_sample(&mut rand::thread_rng());
+/// let v = normal.sample(&mut rand::thread_rng());
/// println!("{} is from a N(2, 9) distribution", v)
/// ```
+///
+/// [`StandardNormal`]: struct.StandardNormal.html
#[derive(Clone, Copy, Debug)]
pub struct Normal {
mean: f64,
@@ -105,17 +109,14 @@ impl Normal {
pub fn new(mean: f64, std_dev: f64) -> Normal {
assert!(std_dev >= 0.0, "Normal::new called with `std_dev` < 0");
Normal {
- mean: mean,
- std_dev: std_dev
+ mean,
+ std_dev
}
}
}
-impl Sample<f64> for Normal {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for Normal {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let StandardNormal(n) = rng.gen::<StandardNormal>();
+impl Distribution<f64> for Normal {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let n = rng.sample(StandardNormal);
self.mean + self.std_dev * n
}
}
@@ -123,17 +124,17 @@ impl IndependentSample<f64> for Normal {
/// The log-normal distribution `ln N(mean, std_dev**2)`.
///
-/// If `X` is log-normal distributed, then `ln(X)` is `N(mean,
-/// std_dev**2)` distributed.
+/// If `X` is log-normal distributed, then `ln(X)` is `N(mean, std_dev**2)`
+/// distributed.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{LogNormal, IndependentSample};
+/// ```
+/// use rand::distributions::{LogNormal, Distribution};
///
/// // mean 2, standard deviation 3
/// let log_normal = LogNormal::new(2.0, 3.0);
-/// let v = log_normal.ind_sample(&mut rand::thread_rng());
+/// let v = log_normal.sample(&mut rand::thread_rng());
/// println!("{} is from an ln N(2, 9) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -154,27 +155,23 @@ impl LogNormal {
LogNormal { norm: Normal::new(mean, std_dev) }
}
}
-impl Sample<f64> for LogNormal {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for LogNormal {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- self.norm.ind_sample(rng).exp()
+impl Distribution<f64> for LogNormal {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ self.norm.sample(rng).exp()
}
}
#[cfg(test)]
mod tests {
- use distributions::{Sample, IndependentSample};
+ use distributions::Distribution;
use super::{Normal, LogNormal};
#[test]
fn test_normal() {
- let mut norm = Normal::new(10.0, 10.0);
- let mut rng = ::test::rng();
+ let norm = Normal::new(10.0, 10.0);
+ let mut rng = ::test::rng(210);
for _ in 0..1000 {
norm.sample(&mut rng);
- norm.ind_sample(&mut rng);
}
}
#[test]
@@ -186,11 +183,10 @@ mod tests {
#[test]
fn test_log_normal() {
- let mut lnorm = LogNormal::new(10.0, 10.0);
- let mut rng = ::test::rng();
+ let lnorm = LogNormal::new(10.0, 10.0);
+ let mut rng = ::test::rng(211);
for _ in 0..1000 {
lnorm.sample(&mut rng);
- lnorm.ind_sample(&mut rng);
}
}
#[test]
diff --git a/rand/src/distributions/other.rs b/rand/src/distributions/other.rs
new file mode 100644
index 0000000..2295f79
--- /dev/null
+++ b/rand/src/distributions/other.rs
@@ -0,0 +1,219 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The implementations of the `Standard` distribution for other built-in types.
+
+use core::char;
+use core::num::Wrapping;
+
+use {Rng};
+use distributions::{Distribution, Standard, Uniform};
+
+// ----- Sampling distributions -----
+
+/// Sample a `char`, uniformly distributed over ASCII letters and numbers:
+/// a-z, A-Z and 0-9.
+///
+/// # Example
+///
+/// ```
+/// use std::iter;
+/// use rand::{Rng, thread_rng};
+/// use rand::distributions::Alphanumeric;
+///
+/// let mut rng = thread_rng();
+/// let chars: String = iter::repeat(())
+/// .map(|()| rng.sample(Alphanumeric))
+/// .take(7)
+/// .collect();
+/// println!("Random chars: {}", chars);
+/// ```
+#[derive(Debug)]
+pub struct Alphanumeric;
+
+
+// ----- Implementations of distributions -----
+
+impl Distribution<char> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
+ // A valid `char` is either in the interval `[0, 0xD800)` or
+ // `(0xDFFF, 0x11_0000)`. All `char`s must therefore be in
+ // `[0, 0x11_0000)` but not in the "gap" `[0xD800, 0xDFFF]` which is
+ // reserved for surrogates. This is the size of that gap.
+ const GAP_SIZE: u32 = 0xDFFF - 0xD800 + 1;
+
+ // Uniform::new(0, 0x11_0000 - GAP_SIZE) can also be used but it
+ // seemed slower.
+ let range = Uniform::new(GAP_SIZE, 0x11_0000);
+
+ let mut n = range.sample(rng);
+ if n <= 0xDFFF {
+ n -= GAP_SIZE;
+ }
+ unsafe { char::from_u32_unchecked(n) }
+ }
+}
+
+impl Distribution<char> for Alphanumeric {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
+ const RANGE: u32 = 26 + 26 + 10;
+ const GEN_ASCII_STR_CHARSET: &[u8] =
+ b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
+ abcdefghijklmnopqrstuvwxyz\
+ 0123456789";
+ // We can pick from 62 characters. This is so close to a power of 2, 64,
+ // that we can do better than `Uniform`. Use a simple bitshift and
+ // rejection sampling. We do not use a bitmask, because for small RNGs
+ // the most significant bits are usually of higher quality.
+ loop {
+ let var = rng.next_u32() >> (32 - 6);
+ if var < RANGE {
+ return GEN_ASCII_STR_CHARSET[var as usize] as char
+ }
+ }
+ }
+}
+
+impl Distribution<bool> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
+ // We can compare against an arbitrary bit of an u32 to get a bool.
+ // Because the least significant bits of a lower quality RNG can have
+ // simple patterns, we compare against the most significant bit. This is
+ // easiest done using a sign test.
+ (rng.next_u32() as i32) < 0
+ }
+}
+
+macro_rules! tuple_impl {
+ // use variables to indicate the arity of the tuple
+ ($($tyvar:ident),* ) => {
+ // the trailing commas are for the 1 tuple
+ impl< $( $tyvar ),* >
+ Distribution<( $( $tyvar ),* , )>
+ for Standard
+ where $( Standard: Distribution<$tyvar> ),*
+ {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> ( $( $tyvar ),* , ) {
+ (
+ // use the $tyvar's to get the appropriate number of
+ // repeats (they're not actually needed)
+ $(
+ _rng.gen::<$tyvar>()
+ ),*
+ ,
+ )
+ }
+ }
+ }
+}
+
+impl Distribution<()> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, _: &mut R) -> () { () }
+}
+tuple_impl!{A}
+tuple_impl!{A, B}
+tuple_impl!{A, B, C}
+tuple_impl!{A, B, C, D}
+tuple_impl!{A, B, C, D, E}
+tuple_impl!{A, B, C, D, E, F}
+tuple_impl!{A, B, C, D, E, F, G}
+tuple_impl!{A, B, C, D, E, F, G, H}
+tuple_impl!{A, B, C, D, E, F, G, H, I}
+tuple_impl!{A, B, C, D, E, F, G, H, I, J}
+tuple_impl!{A, B, C, D, E, F, G, H, I, J, K}
+tuple_impl!{A, B, C, D, E, F, G, H, I, J, K, L}
+
+macro_rules! array_impl {
+ // recursive, given at least one type parameter:
+ {$n:expr, $t:ident, $($ts:ident,)*} => {
+ array_impl!{($n - 1), $($ts,)*}
+
+ impl<T> Distribution<[T; $n]> for Standard where Standard: Distribution<T> {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] {
+ [_rng.gen::<$t>(), $(_rng.gen::<$ts>()),*]
+ }
+ }
+ };
+ // empty case:
+ {$n:expr,} => {
+ impl<T> Distribution<[T; $n]> for Standard {
+ fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] { [] }
+ }
+ };
+}
+
+array_impl!{32, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T,}
+
+impl<T> Distribution<Option<T>> for Standard where Standard: Distribution<T> {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Option<T> {
+ // UFCS is needed here: https://github.com/rust-lang/rust/issues/24066
+ if rng.gen::<bool>() {
+ Some(rng.gen())
+ } else {
+ None
+ }
+ }
+}
+
+impl<T> Distribution<Wrapping<T>> for Standard where Standard: Distribution<T> {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Wrapping<T> {
+ Wrapping(rng.gen())
+ }
+}
+
+
+#[cfg(test)]
+mod tests {
+ use {Rng, RngCore, Standard};
+ use distributions::Alphanumeric;
+ #[cfg(all(not(feature="std"), feature="alloc"))] use alloc::string::String;
+
+ #[test]
+ fn test_misc() {
+ let rng: &mut RngCore = &mut ::test::rng(820);
+
+ rng.sample::<char, _>(Standard);
+ rng.sample::<bool, _>(Standard);
+ }
+
+ #[cfg(feature="alloc")]
+ #[test]
+ fn test_chars() {
+ use core::iter;
+ let mut rng = ::test::rng(805);
+
+ // Test by generating a relatively large number of chars, so we also
+ // take the rejection sampling path.
+ let word: String = iter::repeat(())
+ .map(|()| rng.gen::<char>()).take(1000).collect();
+ assert!(word.len() != 0);
+ }
+
+ #[test]
+ fn test_alphanumeric() {
+ let mut rng = ::test::rng(806);
+
+ // Test by generating a relatively large number of chars, so we also
+ // take the rejection sampling path.
+ let mut incorrect = false;
+ for _ in 0..100 {
+ let c = rng.sample(Alphanumeric);
+ incorrect |= !((c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') );
+ }
+ assert!(incorrect == false);
+ }
+}
diff --git a/rand/src/distributions/pareto.rs b/rand/src/distributions/pareto.rs
new file mode 100644
index 0000000..744a157
--- /dev/null
+++ b/rand/src/distributions/pareto.rs
@@ -0,0 +1,74 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Pareto distribution.
+
+use Rng;
+use distributions::{Distribution, OpenClosed01};
+
+/// Samples floating-point numbers according to the Pareto distribution
+///
+/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Pareto;
+///
+/// let val: f64 = SmallRng::from_entropy().sample(Pareto::new(1., 2.));
+/// println!("{}", val);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Pareto {
+ scale: f64,
+ inv_neg_shape: f64,
+}
+
+impl Pareto {
+ /// Construct a new Pareto distribution with given `scale` and `shape`.
+ ///
+ /// In the literature, `scale` is commonly written as x<sub>m</sub> or k and
+ /// `shape` is often written as α.
+ ///
+ /// # Panics
+ ///
+ /// `scale` and `shape` have to be non-zero and positive.
+ pub fn new(scale: f64, shape: f64) -> Pareto {
+ assert!((scale > 0.) & (shape > 0.));
+ Pareto { scale, inv_neg_shape: -1.0 / shape }
+ }
+}
+
+impl Distribution<f64> for Pareto {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let u: f64 = rng.sample(OpenClosed01);
+ self.scale * u.powf(self.inv_neg_shape)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::Pareto;
+
+ #[test]
+ #[should_panic]
+ fn invalid() {
+ Pareto::new(0., 0.);
+ }
+
+ #[test]
+ fn sample() {
+ let scale = 1.0;
+ let shape = 2.0;
+ let d = Pareto::new(scale, shape);
+ let mut rng = ::test::rng(1);
+ for _ in 0..1000 {
+ let r = d.sample(&mut rng);
+ assert!(r >= scale);
+ }
+ }
+}
diff --git a/rand/src/distributions/poisson.rs b/rand/src/distributions/poisson.rs
new file mode 100644
index 0000000..1244caa
--- /dev/null
+++ b/rand/src/distributions/poisson.rs
@@ -0,0 +1,157 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2016-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Poisson distribution.
+
+use Rng;
+use distributions::{Distribution, Cauchy};
+use distributions::utils::log_gamma;
+
+/// The Poisson distribution `Poisson(lambda)`.
+///
+/// This distribution has a density function:
+/// `f(k) = lambda^k * exp(-lambda) / k!` for `k >= 0`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Poisson, Distribution};
+///
+/// let poi = Poisson::new(2.0);
+/// let v = poi.sample(&mut rand::thread_rng());
+/// println!("{} is from a Poisson(2) distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Poisson {
+ lambda: f64,
+ // precalculated values
+ exp_lambda: f64,
+ log_lambda: f64,
+ sqrt_2lambda: f64,
+ magic_val: f64,
+}
+
+impl Poisson {
+ /// Construct a new `Poisson` with the given shape parameter
+ /// `lambda`. Panics if `lambda <= 0`.
+ pub fn new(lambda: f64) -> Poisson {
+ assert!(lambda > 0.0, "Poisson::new called with lambda <= 0");
+ let log_lambda = lambda.ln();
+ Poisson {
+ lambda,
+ exp_lambda: (-lambda).exp(),
+ log_lambda,
+ sqrt_2lambda: (2.0 * lambda).sqrt(),
+ magic_val: lambda * log_lambda - log_gamma(1.0 + lambda),
+ }
+ }
+}
+
+impl Distribution<u64> for Poisson {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+ // using the algorithm from Numerical Recipes in C
+
+ // for low expected values use the Knuth method
+ if self.lambda < 12.0 {
+ let mut result = 0;
+ let mut p = 1.0;
+ while p > self.exp_lambda {
+ p *= rng.gen::<f64>();
+ result += 1;
+ }
+ result - 1
+ }
+ // high expected values - rejection method
+ else {
+ let mut int_result: u64;
+
+ // we use the Cauchy distribution as the comparison distribution
+ // f(x) ~ 1/(1+x^2)
+ let cauchy = Cauchy::new(0.0, 1.0);
+
+ loop {
+ let mut result;
+ let mut comp_dev;
+
+ loop {
+ // draw from the Cauchy distribution
+ comp_dev = rng.sample(cauchy);
+ // shift the peak of the comparison ditribution
+ result = self.sqrt_2lambda * comp_dev + self.lambda;
+ // repeat the drawing until we are in the range of possible values
+ if result >= 0.0 {
+ break;
+ }
+ }
+ // now the result is a random variable greater than 0 with Cauchy distribution
+ // the result should be an integer value
+ result = result.floor();
+ int_result = result as u64;
+
+ // this is the ratio of the Poisson distribution to the comparison distribution
+ // the magic value scales the distribution function to a range of approximately 0-1
+ // since it is not exact, we multiply the ratio by 0.9 to avoid ratios greater than 1
+ // this doesn't change the resulting distribution, only increases the rate of failed drawings
+ let check = 0.9 * (1.0 + comp_dev * comp_dev)
+ * (result * self.log_lambda - log_gamma(1.0 + result) - self.magic_val).exp();
+
+ // check with uniform random value - if below the threshold, we are within the target distribution
+ if rng.gen::<f64>() <= check {
+ break;
+ }
+ }
+ int_result
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use distributions::Distribution;
+ use super::Poisson;
+
+ #[test]
+ fn test_poisson_10() {
+ let poisson = Poisson::new(10.0);
+ let mut rng = ::test::rng(123);
+ let mut sum = 0;
+ for _ in 0..1000 {
+ sum += poisson.sample(&mut rng);
+ }
+ let avg = (sum as f64) / 1000.0;
+ println!("Poisson average: {}", avg);
+ assert!((avg - 10.0).abs() < 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ fn test_poisson_15() {
+ // Take the 'high expected values' path
+ let poisson = Poisson::new(15.0);
+ let mut rng = ::test::rng(123);
+ let mut sum = 0;
+ for _ in 0..1000 {
+ sum += poisson.sample(&mut rng);
+ }
+ let avg = (sum as f64) / 1000.0;
+ println!("Poisson average: {}", avg);
+ assert!((avg - 15.0).abs() < 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_poisson_invalid_lambda_zero() {
+ Poisson::new(0.0);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_poisson_invalid_lambda_neg() {
+ Poisson::new(-10.0);
+ }
+}
diff --git a/rand/src/distributions/range.rs b/rand/src/distributions/range.rs
deleted file mode 100644
index 935a00a..0000000
--- a/rand/src/distributions/range.rs
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Generating numbers between two others.
-
-// this is surprisingly complicated to be both generic & correct
-
-use core::num::Wrapping as w;
-
-use Rng;
-use distributions::{Sample, IndependentSample};
-
-/// Sample values uniformly between two bounds.
-///
-/// This gives a uniform distribution (assuming the RNG used to sample
-/// it is itself uniform & the `SampleRange` implementation for the
-/// given type is correct), even for edge cases like `low = 0u8`,
-/// `high = 170u8`, for which a naive modulo operation would return
-/// numbers less than 85 with double the probability to those greater
-/// than 85.
-///
-/// Types should attempt to sample in `[low, high)`, i.e., not
-/// including `high`, but this may be very difficult. All the
-/// primitive integer types satisfy this property, and the float types
-/// normally satisfy it, but rounding may mean `high` can occur.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::distributions::{IndependentSample, Range};
-///
-/// fn main() {
-/// let between = Range::new(10, 10000);
-/// let mut rng = rand::thread_rng();
-/// let mut sum = 0;
-/// for _ in 0..1000 {
-/// sum += between.ind_sample(&mut rng);
-/// }
-/// println!("{}", sum);
-/// }
-/// ```
-#[derive(Clone, Copy, Debug)]
-pub struct Range<X> {
- low: X,
- range: X,
- accept_zone: X
-}
-
-impl<X: SampleRange + PartialOrd> Range<X> {
- /// Create a new `Range` instance that samples uniformly from
- /// `[low, high)`. Panics if `low >= high`.
- pub fn new(low: X, high: X) -> Range<X> {
- assert!(low < high, "Range::new called with `low >= high`");
- SampleRange::construct_range(low, high)
- }
-}
-
-impl<Sup: SampleRange> Sample<Sup> for Range<Sup> {
- #[inline]
- fn sample<R: Rng>(&mut self, rng: &mut R) -> Sup { self.ind_sample(rng) }
-}
-impl<Sup: SampleRange> IndependentSample<Sup> for Range<Sup> {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> Sup {
- SampleRange::sample_range(self, rng)
- }
-}
-
-/// The helper trait for types that have a sensible way to sample
-/// uniformly between two values. This should not be used directly,
-/// and is only to facilitate `Range`.
-pub trait SampleRange : Sized {
- /// Construct the `Range` object that `sample_range`
- /// requires. This should not ever be called directly, only via
- /// `Range::new`, which will check that `low < high`, so this
- /// function doesn't have to repeat the check.
- fn construct_range(low: Self, high: Self) -> Range<Self>;
-
- /// Sample a value from the given `Range` with the given `Rng` as
- /// a source of randomness.
- fn sample_range<R: Rng>(r: &Range<Self>, rng: &mut R) -> Self;
-}
-
-macro_rules! integer_impl {
- ($ty:ty, $unsigned:ident) => {
- impl SampleRange for $ty {
- // we play free and fast with unsigned vs signed here
- // (when $ty is signed), but that's fine, since the
- // contract of this macro is for $ty and $unsigned to be
- // "bit-equal", so casting between them is a no-op & a
- // bijection.
-
- #[inline]
- fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
- let range = (w(high as $unsigned) - w(low as $unsigned)).0;
- let unsigned_max: $unsigned = ::core::$unsigned::MAX;
-
- // this is the largest number that fits into $unsigned
- // that `range` divides evenly, so, if we've sampled
- // `n` uniformly from this region, then `n % range` is
- // uniform in [0, range)
- let zone = unsigned_max - unsigned_max % range;
-
- Range {
- low: low,
- range: range as $ty,
- accept_zone: zone as $ty
- }
- }
-
- #[inline]
- fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
- loop {
- // rejection sample
- let v = rng.gen::<$unsigned>();
- // until we find something that fits into the
- // region which r.range evenly divides (this will
- // be uniformly distributed)
- if v < r.accept_zone as $unsigned {
- // and return it, with some adjustments
- return (w(r.low) + w((v % r.range as $unsigned) as $ty)).0;
- }
- }
- }
- }
- }
-}
-
-integer_impl! { i8, u8 }
-integer_impl! { i16, u16 }
-integer_impl! { i32, u32 }
-integer_impl! { i64, u64 }
-#[cfg(feature = "i128_support")]
-integer_impl! { i128, u128 }
-integer_impl! { isize, usize }
-integer_impl! { u8, u8 }
-integer_impl! { u16, u16 }
-integer_impl! { u32, u32 }
-integer_impl! { u64, u64 }
-#[cfg(feature = "i128_support")]
-integer_impl! { u128, u128 }
-integer_impl! { usize, usize }
-
-macro_rules! float_impl {
- ($ty:ty) => {
- impl SampleRange for $ty {
- fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
- Range {
- low: low,
- range: high - low,
- accept_zone: 0.0 // unused
- }
- }
- fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
- r.low + r.range * rng.gen::<$ty>()
- }
- }
- }
-}
-
-float_impl! { f32 }
-float_impl! { f64 }
-
-#[cfg(test)]
-mod tests {
- use distributions::{Sample, IndependentSample};
- use super::Range as Range;
-
- #[should_panic]
- #[test]
- fn test_range_bad_limits_equal() {
- Range::new(10, 10);
- }
- #[should_panic]
- #[test]
- fn test_range_bad_limits_flipped() {
- Range::new(10, 5);
- }
-
- #[test]
- fn test_integers() {
- let mut rng = ::test::rng();
- macro_rules! t {
- ($($ty:ident),*) => {{
- $(
- let v: &[($ty, $ty)] = &[(0, 10),
- (10, 127),
- (::core::$ty::MIN, ::core::$ty::MAX)];
- for &(low, high) in v.iter() {
- let mut sampler: Range<$ty> = Range::new(low, high);
- for _ in 0..1000 {
- let v = sampler.sample(&mut rng);
- assert!(low <= v && v < high);
- let v = sampler.ind_sample(&mut rng);
- assert!(low <= v && v < high);
- }
- }
- )*
- }}
- }
- #[cfg(not(feature = "i128_support"))]
- t!(i8, i16, i32, i64, isize,
- u8, u16, u32, u64, usize);
- #[cfg(feature = "i128_support")]
- t!(i8, i16, i32, i64, i128, isize,
- u8, u16, u32, u64, u128, usize);
- }
-
- #[test]
- fn test_floats() {
- let mut rng = ::test::rng();
- macro_rules! t {
- ($($ty:ty),*) => {{
- $(
- let v: &[($ty, $ty)] = &[(0.0, 100.0),
- (-1e35, -1e25),
- (1e-35, 1e-25),
- (-1e35, 1e35)];
- for &(low, high) in v.iter() {
- let mut sampler: Range<$ty> = Range::new(low, high);
- for _ in 0..1000 {
- let v = sampler.sample(&mut rng);
- assert!(low <= v && v < high);
- let v = sampler.ind_sample(&mut rng);
- assert!(low <= v && v < high);
- }
- }
- )*
- }}
- }
-
- t!(f32, f64)
- }
-
-}
diff --git a/rand/src/distributions/triangular.rs b/rand/src/distributions/triangular.rs
new file mode 100644
index 0000000..a6eef5c
--- /dev/null
+++ b/rand/src/distributions/triangular.rs
@@ -0,0 +1,86 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//! The triangular distribution.
+
+use Rng;
+use distributions::{Distribution, Standard};
+
+/// The triangular distribution.
+///
+/// # Example
+///
+/// ```rust
+/// use rand::distributions::{Triangular, Distribution};
+///
+/// let d = Triangular::new(0., 5., 2.5);
+/// let v = d.sample(&mut rand::thread_rng());
+/// println!("{} is from a triangular distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Triangular {
+ min: f64,
+ max: f64,
+ mode: f64,
+}
+
+impl Triangular {
+ /// Construct a new `Triangular` with minimum `min`, maximum `max` and mode
+ /// `mode`.
+ ///
+ /// # Panics
+ ///
+ /// If `max < mode`, `mode < max` or `max == min`.
+ ///
+ #[inline]
+ pub fn new(min: f64, max: f64, mode: f64) -> Triangular {
+ assert!(max >= mode);
+ assert!(mode >= min);
+ assert!(max != min);
+ Triangular { min, max, mode }
+ }
+}
+
+impl Distribution<f64> for Triangular {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let f: f64 = rng.sample(Standard);
+ let diff_mode_min = self.mode - self.min;
+ let diff_max_min = self.max - self.min;
+ if f * diff_max_min < diff_mode_min {
+ self.min + (f * diff_max_min * diff_mode_min).sqrt()
+ } else {
+ self.max - ((1. - f) * diff_max_min * (self.max - self.mode)).sqrt()
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use distributions::Distribution;
+ use super::Triangular;
+
+ #[test]
+ fn test_new() {
+ for &(min, max, mode) in &[
+ (-1., 1., 0.), (1., 2., 1.), (5., 25., 25.), (1e-5, 1e5, 1e-3),
+ (0., 1., 0.9), (-4., -0.5, -2.), (-13.039, 8.41, 1.17),
+ ] {
+ println!("{} {} {}", min, max, mode);
+ let _ = Triangular::new(min, max, mode);
+ }
+ }
+
+ #[test]
+ fn test_sample() {
+ let norm = Triangular::new(0., 1., 0.5);
+ let mut rng = ::test::rng(1);
+ for _ in 0..1000 {
+ norm.sample(&mut rng);
+ }
+ }
+}
diff --git a/rand/src/distributions/uniform.rs b/rand/src/distributions/uniform.rs
new file mode 100644
index 0000000..5fb89e3
--- /dev/null
+++ b/rand/src/distributions/uniform.rs
@@ -0,0 +1,1297 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A distribution uniformly sampling numbers within a given range.
+//!
+//! [`Uniform`] is the standard distribution to sample uniformly from a range;
+//! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a
+//! standard die. [`Rng::gen_range`] supports any type supported by
+//! [`Uniform`].
+//!
+//! This distribution is provided with support for several primitive types
+//! (all integer and floating-point types) as well as `std::time::Duration`,
+//! and supports extension to user-defined types via a type-specific *back-end*
+//! implementation.
+//!
+//! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the
+//! back-ends supporting sampling from primitive integer and floating-point
+//! ranges as well as from `std::time::Duration`; these types do not normally
+//! need to be used directly (unless implementing a derived back-end).
+//!
+//! # Example usage
+//!
+//! ```
+//! use rand::{Rng, thread_rng};
+//! use rand::distributions::Uniform;
+//!
+//! let mut rng = thread_rng();
+//! let side = Uniform::new(-10.0, 10.0);
+//!
+//! // sample between 1 and 10 points
+//! for _ in 0..rng.gen_range(1, 11) {
+//! // sample a point from the square with sides -10 - 10 in two dimensions
+//! let (x, y) = (rng.sample(side), rng.sample(side));
+//! println!("Point: {}, {}", x, y);
+//! }
+//! ```
+//!
+//! # Extending `Uniform` to support a custom type
+//!
+//! To extend [`Uniform`] to support your own types, write a back-end which
+//! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
+//! helper trait to "register" your back-end. See the `MyF32` example below.
+//!
+//! At a minimum, the back-end needs to store any parameters needed for sampling
+//! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
+//! Those methods should include an assert to check the range is valid (i.e.
+//! `low < high`). The example below merely wraps another back-end.
+//!
+//! The `new`, `new_inclusive` and `sample_single` functions use arguments of
+//! type SampleBorrow<X> in order to support passing in values by reference or
+//! by value. In the implementation of these functions, you can choose to
+//! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
+//! to copy or clone the value, whatever is appropriate for your type.
+//!
+//! ```
+//! use rand::prelude::*;
+//! use rand::distributions::uniform::{Uniform, SampleUniform,
+//! UniformSampler, UniformFloat, SampleBorrow};
+//!
+//! struct MyF32(f32);
+//!
+//! #[derive(Clone, Copy, Debug)]
+//! struct UniformMyF32 {
+//! inner: UniformFloat<f32>,
+//! }
+//!
+//! impl UniformSampler for UniformMyF32 {
+//! type X = MyF32;
+//! fn new<B1, B2>(low: B1, high: B2) -> Self
+//! where B1: SampleBorrow<Self::X> + Sized,
+//! B2: SampleBorrow<Self::X> + Sized
+//! {
+//! UniformMyF32 {
+//! inner: UniformFloat::<f32>::new(low.borrow().0, high.borrow().0),
+//! }
+//! }
+//! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+//! where B1: SampleBorrow<Self::X> + Sized,
+//! B2: SampleBorrow<Self::X> + Sized
+//! {
+//! UniformSampler::new(low, high)
+//! }
+//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+//! MyF32(self.inner.sample(rng))
+//! }
+//! }
+//!
+//! impl SampleUniform for MyF32 {
+//! type Sampler = UniformMyF32;
+//! }
+//!
+//! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
+//! let uniform = Uniform::new(low, high);
+//! let x = uniform.sample(&mut thread_rng());
+//! ```
+//!
+//! [`Uniform`]: struct.Uniform.html
+//! [`Rng::gen_range`]: ../../trait.Rng.html#method.gen_range
+//! [`SampleUniform`]: trait.SampleUniform.html
+//! [`UniformSampler`]: trait.UniformSampler.html
+//! [`UniformInt`]: struct.UniformInt.html
+//! [`UniformFloat`]: struct.UniformFloat.html
+//! [`UniformDuration`]: struct.UniformDuration.html
+//! [`SampleBorrow::borrow`]: trait.SampleBorrow.html#method.borrow
+
+#[cfg(feature = "std")]
+use std::time::Duration;
+#[cfg(all(not(feature = "std"), rust_1_25))]
+use core::time::Duration;
+
+use Rng;
+use distributions::Distribution;
+use distributions::float::IntoFloat;
+use distributions::utils::{WideningMultiply, FloatSIMDUtils, FloatAsSIMD, BoolAsSIMD};
+
+#[cfg(not(feature = "std"))]
+#[allow(unused_imports)] // rustc doesn't detect that this is actually used
+use distributions::utils::Float;
+
+
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+
+/// Sample values uniformly between two bounds.
+///
+/// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
+/// distribution sampling from the given range; these functions may do extra
+/// work up front to make sampling of multiple values faster.
+///
+/// When sampling from a constant range, many calculations can happen at
+/// compile-time and all methods should be fast; for floating-point ranges and
+/// the full range of integer types this should have comparable performance to
+/// the `Standard` distribution.
+///
+/// Steps are taken to avoid bias which might be present in naive
+/// implementations; for example `rng.gen::<u8>() % 170` samples from the range
+/// `[0, 169]` but is twice as likely to select numbers less than 85 than other
+/// values. Further, the implementations here give more weight to the high-bits
+/// generated by the RNG than the low bits, since with some RNGs the low-bits
+/// are of lower quality than the high bits.
+///
+/// Implementations must sample in `[low, high)` range for
+/// `Uniform::new(low, high)`, i.e., excluding `high`. In particular care must
+/// be taken to ensure that rounding never results values `< low` or `>= high`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Distribution, Uniform};
+///
+/// fn main() {
+/// let between = Uniform::from(10..10000);
+/// let mut rng = rand::thread_rng();
+/// let mut sum = 0;
+/// for _ in 0..1000 {
+/// sum += between.sample(&mut rng);
+/// }
+/// println!("{}", sum);
+/// }
+/// ```
+///
+/// [`Uniform::new`]: struct.Uniform.html#method.new
+/// [`Uniform::new_inclusive`]: struct.Uniform.html#method.new_inclusive
+/// [`new`]: struct.Uniform.html#method.new
+/// [`new_inclusive`]: struct.Uniform.html#method.new_inclusive
+#[derive(Clone, Copy, Debug)]
+pub struct Uniform<X: SampleUniform> {
+ inner: X::Sampler,
+}
+
+impl<X: SampleUniform> Uniform<X> {
+ /// Create a new `Uniform` instance which samples uniformly from the half
+ /// open range `[low, high)` (excluding `high`). Panics if `low >= high`.
+ pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X>
+ where B1: SampleBorrow<X> + Sized,
+ B2: SampleBorrow<X> + Sized
+ {
+ Uniform { inner: X::Sampler::new(low, high) }
+ }
+
+ /// Create a new `Uniform` instance which samples uniformly from the closed
+ /// range `[low, high]` (inclusive). Panics if `low > high`.
+ pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X>
+ where B1: SampleBorrow<X> + Sized,
+ B2: SampleBorrow<X> + Sized
+ {
+ Uniform { inner: X::Sampler::new_inclusive(low, high) }
+ }
+}
+
+impl<X: SampleUniform> Distribution<X> for Uniform<X> {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
+ self.inner.sample(rng)
+ }
+}
+
+/// Helper trait for creating objects using the correct implementation of
+/// [`UniformSampler`] for the sampling type.
+///
+/// See the [module documentation] on how to implement [`Uniform`] range
+/// sampling for a custom type.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [module documentation]: index.html
+/// [`Uniform`]: struct.Uniform.html
+pub trait SampleUniform: Sized {
+ /// The `UniformSampler` implementation supporting type `X`.
+ type Sampler: UniformSampler<X = Self>;
+}
+
+/// Helper trait handling actual uniform sampling.
+///
+/// See the [module documentation] on how to implement [`Uniform`] range
+/// sampling for a custom type.
+///
+/// Implementation of [`sample_single`] is optional, and is only useful when
+/// the implementation can be faster than `Self::new(low, high).sample(rng)`.
+///
+/// [module documentation]: index.html
+/// [`Uniform`]: struct.Uniform.html
+/// [`sample_single`]: trait.UniformSampler.html#method.sample_single
+pub trait UniformSampler: Sized {
+ /// The type sampled by this implementation.
+ type X;
+
+ /// Construct self, with inclusive lower bound and exclusive upper bound
+ /// `[low, high)`.
+ ///
+ /// Usually users should not call this directly but instead use
+ /// `Uniform::new`, which asserts that `low < high` before calling this.
+ fn new<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized;
+
+ /// Construct self, with inclusive bounds `[low, high]`.
+ ///
+ /// Usually users should not call this directly but instead use
+ /// `Uniform::new_inclusive`, which asserts that `low <= high` before
+ /// calling this.
+ fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized;
+
+ /// Sample a value.
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
+
+ /// Sample a single value uniformly from a range with inclusive lower bound
+ /// and exclusive upper bound `[low, high)`.
+ ///
+ /// Usually users should not call this directly but instead use
+ /// `Uniform::sample_single`, which asserts that `low < high` before calling
+ /// this.
+ ///
+ /// Via this method, implementations can provide a method optimized for
+ /// sampling only a single value from the specified range. The default
+ /// implementation simply calls `UniformSampler::new` then `sample` on the
+ /// result.
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R)
+ -> Self::X
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let uniform: Self = UniformSampler::new(low, high);
+ uniform.sample(rng)
+ }
+}
+
+impl<X: SampleUniform> From<::core::ops::Range<X>> for Uniform<X> {
+ fn from(r: ::core::ops::Range<X>) -> Uniform<X> {
+ Uniform::new(r.start, r.end)
+ }
+}
+
+#[cfg(rust_1_27)]
+impl<X: SampleUniform> From<::core::ops::RangeInclusive<X>> for Uniform<X> {
+ fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> {
+ Uniform::new_inclusive(r.start(), r.end())
+ }
+}
+
+/// Helper trait similar to [`Borrow`] but implemented
+/// only for SampleUniform and references to SampleUniform in
+/// order to resolve ambiguity issues.
+///
+/// [`Borrow`]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html
+pub trait SampleBorrow<Borrowed> {
+ /// Immutably borrows from an owned value. See [`Borrow::borrow`]
+ ///
+ /// [`Borrow::borrow`]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html#tymethod.borrow
+ fn borrow(&self) -> &Borrowed;
+}
+impl<Borrowed> SampleBorrow<Borrowed> for Borrowed where Borrowed: SampleUniform {
+ #[inline(always)]
+ fn borrow(&self) -> &Borrowed { self }
+}
+impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed where Borrowed: SampleUniform {
+ #[inline(always)]
+ fn borrow(&self) -> &Borrowed { *self }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// What follows are all back-ends.
+
+
+/// The back-end implementing [`UniformSampler`] for integer types.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// # Implementation notes
+///
+/// For a closed range, the number of possible numbers we should generate is
+/// `range = (high - low + 1)`. It is not possible to end up with a uniform
+/// distribution if we map *all* the random integers that can be generated to
+/// this range. We have to map integers from a `zone` that is a multiple of the
+/// range. The rest of the integers, that cause a bias, are rejected.
+///
+/// The problem with `range` is that to cover the full range of the type, it has
+/// to store `unsigned_max + 1`, which can't be represented. But if the range
+/// covers the full range of the type, no modulus is needed. A range of size 0
+/// can't exist, so we use that to represent this special case. Wrapping
+/// arithmetic even makes representing `unsigned_max + 1` as 0 simple.
+///
+/// We don't calculate `zone` directly, but first calculate the number of
+/// integers to reject. To handle `unsigned_max + 1` not fitting in the type,
+/// we use:
+/// `ints_to_reject = (unsigned_max + 1) % range;`
+/// `ints_to_reject = (unsigned_max - range + 1) % range;`
+///
+/// The smallest integer PRNGs generate is `u32`. That is why for small integer
+/// sizes (`i8`/`u8` and `i16`/`u16`) there is an optimization: don't pick the
+/// largest zone that can fit in the small type, but pick the largest zone that
+/// can fit in an `u32`. `ints_to_reject` is always less than half the size of
+/// the small integer. This means the first bit of `zone` is always 1, and so
+/// are all the other preceding bits of a larger integer. The easiest way to
+/// grow the `zone` for the larger type is to simply sign extend it.
+///
+/// An alternative to using a modulus is widening multiply: After a widening
+/// multiply by `range`, the result is in the high word. Then comparing the low
+/// word against `zone` makes sure our distribution is uniform.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [`Uniform`]: struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct UniformInt<X> {
+ low: X,
+ range: X,
+ zone: X,
+}
+
+macro_rules! uniform_int_impl {
+ ($ty:ty, $signed:ty, $unsigned:ident,
+ $i_large:ident, $u_large:ident) => {
+ impl SampleUniform for $ty {
+ type Sampler = UniformInt<$ty>;
+ }
+
+ impl UniformSampler for UniformInt<$ty> {
+ // We play free and fast with unsigned vs signed here
+ // (when $ty is signed), but that's fine, since the
+ // contract of this macro is for $ty and $unsigned to be
+ // "bit-equal", so casting between them is a no-op.
+
+ type X = $ty;
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low < high, "Uniform::new called with `low >= high`");
+ UniformSampler::new_inclusive(low, high - 1)
+ }
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low <= high,
+ "Uniform::new_inclusive called with `low > high`");
+ let unsigned_max = ::core::$unsigned::MAX;
+
+ let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned;
+ let ints_to_reject =
+ if range > 0 {
+ (unsigned_max - range + 1) % range
+ } else {
+ 0
+ };
+ let zone = unsigned_max - ints_to_reject;
+
+ UniformInt {
+ low: low,
+ // These are really $unsigned values, but store as $ty:
+ range: range as $ty,
+ zone: zone as $ty
+ }
+ }
+
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ let range = self.range as $unsigned as $u_large;
+ if range > 0 {
+ // Grow `zone` to fit a type of at least 32 bits, by
+ // sign-extending it (the first bit is always 1, so are all
+ // the preceding bits of the larger type).
+ // For types that already have the right size, all the
+ // casting is a no-op.
+ let zone = self.zone as $signed as $i_large as $u_large;
+ loop {
+ let v: $u_large = rng.gen();
+ let (hi, lo) = v.wmul(range);
+ if lo <= zone {
+ return self.low.wrapping_add(hi as $ty);
+ }
+ }
+ } else {
+ // Sample from the entire integer range.
+ rng.gen()
+ }
+ }
+
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R)
+ -> Self::X
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low < high,
+ "Uniform::sample_single called with low >= high");
+ let range = high.wrapping_sub(low) as $unsigned as $u_large;
+ let zone =
+ if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned {
+ // Using a modulus is faster than the approximation for
+ // i8 and i16. I suppose we trade the cost of one
+ // modulus for near-perfect branch prediction.
+ let unsigned_max: $u_large = ::core::$u_large::MAX;
+ let ints_to_reject = (unsigned_max - range + 1) % range;
+ unsigned_max - ints_to_reject
+ } else {
+ // conservative but fast approximation
+ range << range.leading_zeros()
+ };
+
+ loop {
+ let v: $u_large = rng.gen();
+ let (hi, lo) = v.wmul(range);
+ if lo <= zone {
+ return low.wrapping_add(hi as $ty);
+ }
+ }
+ }
+ }
+ }
+}
+
+uniform_int_impl! { i8, i8, u8, i32, u32 }
+uniform_int_impl! { i16, i16, u16, i32, u32 }
+uniform_int_impl! { i32, i32, u32, i32, u32 }
+uniform_int_impl! { i64, i64, u64, i64, u64 }
+#[cfg(rust_1_26)]
+uniform_int_impl! { i128, i128, u128, u128, u128 }
+uniform_int_impl! { isize, isize, usize, isize, usize }
+uniform_int_impl! { u8, i8, u8, i32, u32 }
+uniform_int_impl! { u16, i16, u16, i32, u32 }
+uniform_int_impl! { u32, i32, u32, i32, u32 }
+uniform_int_impl! { u64, i64, u64, i64, u64 }
+uniform_int_impl! { usize, isize, usize, isize, usize }
+#[cfg(rust_1_26)]
+uniform_int_impl! { u128, u128, u128, i128, u128 }
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+macro_rules! uniform_simd_int_impl {
+ ($ty:ident, $unsigned:ident, $u_scalar:ident) => {
+ // The "pick the largest zone that can fit in an `u32`" optimization
+ // is less useful here. Multiple lanes complicate things, we don't
+ // know the PRNG's minimal output size, and casting to a larger vector
+ // is generally a bad idea for SIMD performance. The user can still
+ // implement it manually.
+
+ // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality
+ // perhaps `impl SampleUniform for $u_scalar`?
+ impl SampleUniform for $ty {
+ type Sampler = UniformInt<$ty>;
+ }
+
+ impl UniformSampler for UniformInt<$ty> {
+ type X = $ty;
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.lt(high).all(), "Uniform::new called with `low >= high`");
+ UniformSampler::new_inclusive(low, high - 1)
+ }
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.le(high).all(),
+ "Uniform::new_inclusive called with `low > high`");
+ let unsigned_max = ::core::$u_scalar::MAX;
+
+ // NOTE: these may need to be replaced with explicitly
+ // wrapping operations if `packed_simd` changes
+ let range: $unsigned = ((high - low) + 1).cast();
+ // `% 0` will panic at runtime.
+ let not_full_range = range.gt($unsigned::splat(0));
+ // replacing 0 with `unsigned_max` allows a faster `select`
+ // with bitwise OR
+ let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max));
+ // wrapping addition
+ let ints_to_reject = (unsigned_max - range + 1) % modulo;
+ // When `range` is 0, `lo` of `v.wmul(range)` will always be
+ // zero which means only one sample is needed.
+ let zone = unsigned_max - ints_to_reject;
+
+ UniformInt {
+ low: low,
+ // These are really $unsigned values, but store as $ty:
+ range: range.cast(),
+ zone: zone.cast(),
+ }
+ }
+
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ let range: $unsigned = self.range.cast();
+ let zone: $unsigned = self.zone.cast();
+
+ // This might seem very slow, generating a whole new
+ // SIMD vector for every sample rejection. For most uses
+ // though, the chance of rejection is small and provides good
+ // general performance. With multiple lanes, that chance is
+ // multiplied. To mitigate this, we replace only the lanes of
+ // the vector which fail, iteratively reducing the chance of
+ // rejection. The replacement method does however add a little
+ // overhead. Benchmarking or calculating probabilities might
+ // reveal contexts where this replacement method is slower.
+ let mut v: $unsigned = rng.gen();
+ loop {
+ let (hi, lo) = v.wmul(range);
+ let mask = lo.le(zone);
+ if mask.all() {
+ let hi: $ty = hi.cast();
+ // wrapping addition
+ let result = self.low + hi;
+ // `select` here compiles to a blend operation
+ // When `range.eq(0).none()` the compare and blend
+ // operations are avoided.
+ let v: $ty = v.cast();
+ return range.gt($unsigned::splat(0)).select(result, v);
+ }
+ // Replace only the failing lanes
+ v = mask.select(v, rng.gen());
+ }
+ }
+ }
+ };
+
+ // bulk implementation
+ ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => {
+ $(
+ uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar);
+ uniform_simd_int_impl!($signed, $unsigned, $u_scalar);
+ )+
+ };
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u64x2, i64x2),
+ (u64x4, i64x4),
+ (u64x8, i64x8),
+ u64
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u32x2, i32x2),
+ (u32x4, i32x4),
+ (u32x8, i32x8),
+ (u32x16, i32x16),
+ u32
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u16x2, i16x2),
+ (u16x4, i16x4),
+ (u16x8, i16x8),
+ (u16x16, i16x16),
+ (u16x32, i16x32),
+ u16
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u8x2, i8x2),
+ (u8x4, i8x4),
+ (u8x8, i8x8),
+ (u8x16, i8x16),
+ (u8x32, i8x32),
+ (u8x64, i8x64),
+ u8
+}
+
+
+/// The back-end implementing [`UniformSampler`] for floating-point types.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// # Implementation notes
+///
+/// Instead of generating a float in the `[0, 1)` range using [`Standard`], the
+/// `UniformFloat` implementation converts the output of an PRNG itself. This
+/// way one or two steps can be optimized out.
+///
+/// The floats are first converted to a value in the `[1, 2)` interval using a
+/// transmute-based method, and then mapped to the expected range with a
+/// multiply and addition. Values produced this way have what equals 22 bits of
+/// random digits for an `f32`, and 52 for an `f64`.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [`new`]: trait.UniformSampler.html#tymethod.new
+/// [`new_inclusive`]: trait.UniformSampler.html#tymethod.new_inclusive
+/// [`Uniform`]: struct.Uniform.html
+/// [`Standard`]: ../struct.Standard.html
+#[derive(Clone, Copy, Debug)]
+pub struct UniformFloat<X> {
+ low: X,
+ scale: X,
+}
+
+macro_rules! uniform_float_impl {
+ ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
+ impl SampleUniform for $ty {
+ type Sampler = UniformFloat<$ty>;
+ }
+
+ impl UniformSampler for UniformFloat<$ty> {
+ type X = $ty;
+
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.all_lt(high),
+ "Uniform::new called with `low >= high`");
+ assert!(low.all_finite() && high.all_finite(),
+ "Uniform::new called with non-finite boundaries");
+ let max_rand = <$ty>::splat((::core::$u_scalar::MAX >> $bits_to_discard)
+ .into_float_with_exponent(0) - 1.0);
+
+ let mut scale = high - low;
+
+ loop {
+ let mask = (scale * max_rand + low).ge_mask(high);
+ if mask.none() {
+ break;
+ }
+ scale = scale.decrease_masked(mask);
+ }
+
+ debug_assert!(<$ty>::splat(0.0).all_le(scale));
+
+ UniformFloat { low, scale }
+ }
+
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.all_le(high),
+ "Uniform::new_inclusive called with `low > high`");
+ assert!(low.all_finite() && high.all_finite(),
+ "Uniform::new_inclusive called with non-finite boundaries");
+ let max_rand = <$ty>::splat((::core::$u_scalar::MAX >> $bits_to_discard)
+ .into_float_with_exponent(0) - 1.0);
+
+ let mut scale = (high - low) / max_rand;
+
+ loop {
+ let mask = (scale * max_rand + low).gt_mask(high);
+ if mask.none() {
+ break;
+ }
+ scale = scale.decrease_masked(mask);
+ }
+
+ debug_assert!(<$ty>::splat(0.0).all_le(scale));
+
+ UniformFloat { low, scale }
+ }
+
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ // Generate a value in the range [1, 2)
+ let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard)
+ .into_float_with_exponent(0);
+
+ // Get a value in the range [0, 1) in order to avoid
+ // overflowing into infinity when multiplying with scale
+ let value0_1 = value1_2 - 1.0;
+
+ // We don't use `f64::mul_add`, because it is not available with
+ // `no_std`. Furthermore, it is slower for some targets (but
+ // faster for others). However, the order of multiplication and
+ // addition is important, because on some platforms (e.g. ARM)
+ // it will be optimized to a single (non-FMA) instruction.
+ value0_1 * self.scale + self.low
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R)
+ -> Self::X
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.all_lt(high),
+ "Uniform::sample_single called with low >= high");
+ let mut scale = high - low;
+
+ loop {
+ // Generate a value in the range [1, 2)
+ let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard)
+ .into_float_with_exponent(0);
+
+ // Get a value in the range [0, 1) in order to avoid
+ // overflowing into infinity when multiplying with scale
+ let value0_1 = value1_2 - 1.0;
+
+ // Doing multiply before addition allows some architectures
+ // to use a single instruction.
+ let res = value0_1 * scale + low;
+
+ debug_assert!(low.all_le(res) || !scale.all_finite());
+ if res.all_lt(high) {
+ return res;
+ }
+
+ // This handles a number of edge cases.
+ // * `low` or `high` is NaN. In this case `scale` and
+ // `res` are going to end up as NaN.
+ // * `low` is negative infinity and `high` is finite.
+ // `scale` is going to be infinite and `res` will be
+ // NaN.
+ // * `high` is positive infinity and `low` is finite.
+ // `scale` is going to be infinite and `res` will
+ // be infinite or NaN (if value0_1 is 0).
+ // * `low` is negative infinity and `high` is positive
+ // infinity. `scale` will be infinite and `res` will
+ // be NaN.
+ // * `low` and `high` are finite, but `high - low`
+ // overflows to infinite. `scale` will be infinite
+ // and `res` will be infinite or NaN (if value0_1 is 0).
+ // So if `high` or `low` are non-finite, we are guaranteed
+ // to fail the `res < high` check above and end up here.
+ //
+ // While we technically should check for non-finite `low`
+ // and `high` before entering the loop, by doing the checks
+ // here instead, we allow the common case to avoid these
+ // checks. But we are still guaranteed that if `low` or
+ // `high` are non-finite we'll end up here and can do the
+ // appropriate checks.
+ //
+ // Likewise `high - low` overflowing to infinity is also
+ // rare, so handle it here after the common case.
+ let mask = !scale.finite_mask();
+ if mask.any() {
+ assert!(low.all_finite() && high.all_finite(),
+ "Uniform::sample_single called with non-finite boundaries");
+ scale = scale.decrease_masked(mask);
+ }
+ }
+ }
+ }
+ }
+}
+
+uniform_float_impl! { f32, u32, f32, u32, 32 - 23 }
+uniform_float_impl! { f64, u64, f64, u64, 64 - 52 }
+
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 }
+
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 }
+
+
+
+/// The back-end implementing [`UniformSampler`] for `Duration`.
+///
+/// Unless you are implementing [`UniformSampler`] for your own types, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [`Uniform`]: struct.Uniform.html
+#[cfg(any(feature = "std", rust_1_25))]
+#[derive(Clone, Copy, Debug)]
+pub struct UniformDuration {
+ mode: UniformDurationMode,
+ offset: u32,
+}
+
+#[cfg(any(feature = "std", rust_1_25))]
+#[derive(Debug, Copy, Clone)]
+enum UniformDurationMode {
+ Small {
+ secs: u64,
+ nanos: Uniform<u32>,
+ },
+ Medium {
+ nanos: Uniform<u64>,
+ },
+ Large {
+ max_secs: u64,
+ max_nanos: u32,
+ secs: Uniform<u64>,
+ }
+}
+
+#[cfg(any(feature = "std", rust_1_25))]
+impl SampleUniform for Duration {
+ type Sampler = UniformDuration;
+}
+
+#[cfg(any(feature = "std", rust_1_25))]
+impl UniformSampler for UniformDuration {
+ type X = Duration;
+
+ #[inline]
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low < high, "Uniform::new called with `low >= high`");
+ UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
+ }
+
+ #[inline]
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low <= high, "Uniform::new_inclusive called with `low > high`");
+
+ let low_s = low.as_secs();
+ let low_n = low.subsec_nanos();
+ let mut high_s = high.as_secs();
+ let mut high_n = high.subsec_nanos();
+
+ if high_n < low_n {
+ high_s = high_s - 1;
+ high_n = high_n + 1_000_000_000;
+ }
+
+ let mode = if low_s == high_s {
+ UniformDurationMode::Small {
+ secs: low_s,
+ nanos: Uniform::new_inclusive(low_n, high_n),
+ }
+ } else {
+ let max = high_s
+ .checked_mul(1_000_000_000)
+ .and_then(|n| n.checked_add(high_n as u64));
+
+ if let Some(higher_bound) = max {
+ let lower_bound = low_s * 1_000_000_000 + low_n as u64;
+ UniformDurationMode::Medium {
+ nanos: Uniform::new_inclusive(lower_bound, higher_bound),
+ }
+ } else {
+ // An offset is applied to simplify generation of nanoseconds
+ let max_nanos = high_n - low_n;
+ UniformDurationMode::Large {
+ max_secs: high_s,
+ max_nanos,
+ secs: Uniform::new_inclusive(low_s, high_s),
+ }
+ }
+ };
+ UniformDuration {
+ mode,
+ offset: low_n,
+ }
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
+ match self.mode {
+ UniformDurationMode::Small { secs, nanos } => {
+ let n = nanos.sample(rng);
+ Duration::new(secs, n)
+ }
+ UniformDurationMode::Medium { nanos } => {
+ let nanos = nanos.sample(rng);
+ Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
+ }
+ UniformDurationMode::Large { max_secs, max_nanos, secs } => {
+ // constant folding means this is at least as fast as `gen_range`
+ let nano_range = Uniform::new(0, 1_000_000_000);
+ loop {
+ let s = secs.sample(rng);
+ let n = nano_range.sample(rng);
+ if !(s == max_secs && n > max_nanos) {
+ let sum = n + self.offset;
+ break Duration::new(s, sum);
+ }
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use Rng;
+ use rngs::mock::StepRng;
+ use distributions::uniform::Uniform;
+ use distributions::utils::FloatAsSIMD;
+ #[cfg(feature="simd_support")] use packed_simd::*;
+
+ #[should_panic]
+ #[test]
+ fn test_uniform_bad_limits_equal_int() {
+ Uniform::new(10, 10);
+ }
+
+ #[test]
+ fn test_uniform_good_limits_equal_int() {
+ let mut rng = ::test::rng(804);
+ let dist = Uniform::new_inclusive(10, 10);
+ for _ in 0..20 {
+ assert_eq!(rng.sample(dist), 10);
+ }
+ }
+
+ #[should_panic]
+ #[test]
+ fn test_uniform_bad_limits_flipped_int() {
+ Uniform::new(10, 5);
+ }
+
+ #[test]
+ fn test_integers() {
+ use core::{i8, i16, i32, i64, isize};
+ use core::{u8, u16, u32, u64, usize};
+ #[cfg(rust_1_26)]
+ use core::{i128, u128};
+
+ let mut rng = ::test::rng(251);
+ macro_rules! t {
+ ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
+ for &(low, high) in $v.iter() {
+ let my_uniform = Uniform::new(low, high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $lt(v, high));
+ }
+
+ let my_uniform = Uniform::new_inclusive(low, high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $le(v, high));
+ }
+
+ let my_uniform = Uniform::new(&low, high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $lt(v, high));
+ }
+
+ let my_uniform = Uniform::new_inclusive(&low, &high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $le(v, high));
+ }
+
+ for _ in 0..1000 {
+ let v: $ty = rng.gen_range(low, high);
+ assert!($le(low, v) && $lt(v, high));
+ }
+ }
+ }};
+
+ // scalar bulk
+ ($($ty:ident),*) => {{
+ $(t!(
+ $ty,
+ [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
+ |x, y| x <= y,
+ |x, y| x < y
+ );)*
+ }};
+
+ // simd bulk
+ ($($ty:ident),* => $scalar:ident) => {{
+ $(t!(
+ $ty,
+ [
+ ($ty::splat(0), $ty::splat(10)),
+ ($ty::splat(10), $ty::splat(127)),
+ ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
+ ],
+ |x: $ty, y| x.le(y).all(),
+ |x: $ty, y| x.lt(y).all()
+ );)*
+ }};
+ }
+ t!(i8, i16, i32, i64, isize,
+ u8, u16, u32, u64, usize);
+ #[cfg(rust_1_26)]
+ t!(i128, u128);
+
+ #[cfg(all(feature = "simd_support", feature = "nightly"))]
+ {
+ t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
+ t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
+ t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
+ t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
+ t!(u32x2, u32x4, u32x8, u32x16 => u32);
+ t!(i32x2, i32x4, i32x8, i32x16 => i32);
+ t!(u64x2, u64x4, u64x8 => u64);
+ t!(i64x2, i64x4, i64x8 => i64);
+ }
+ }
+
+ #[test]
+ fn test_floats() {
+ let mut rng = ::test::rng(252);
+ let mut zero_rng = StepRng::new(0, 0);
+ let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0);
+ macro_rules! t {
+ ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
+ let v: &[($f_scalar, $f_scalar)]=
+ &[(0.0, 100.0),
+ (-1e35, -1e25),
+ (1e-35, 1e-25),
+ (-1e35, 1e35),
+ (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
+ (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
+ (-<$f_scalar>::from_bits(5), 0.0),
+ (-<$f_scalar>::from_bits(7), -0.0),
+ (10.0, ::core::$f_scalar::MAX),
+ (-100.0, ::core::$f_scalar::MAX),
+ (-::core::$f_scalar::MAX / 5.0, ::core::$f_scalar::MAX),
+ (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX / 5.0),
+ (-::core::$f_scalar::MAX * 0.8, ::core::$f_scalar::MAX * 0.7),
+ (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX),
+ ];
+ for &(low_scalar, high_scalar) in v.iter() {
+ for lane in 0..<$ty>::lanes() {
+ let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
+ let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
+ let my_uniform = Uniform::new(low, high);
+ let my_incl_uniform = Uniform::new_inclusive(low, high);
+ for _ in 0..100 {
+ let v = rng.sample(my_uniform).extract(lane);
+ assert!(low_scalar <= v && v < high_scalar);
+ let v = rng.sample(my_incl_uniform).extract(lane);
+ assert!(low_scalar <= v && v <= high_scalar);
+ let v = rng.gen_range(low, high).extract(lane);
+ assert!(low_scalar <= v && v < high_scalar);
+ }
+
+ assert_eq!(rng.sample(Uniform::new_inclusive(low, low)).extract(lane), low_scalar);
+
+ assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar);
+ assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar);
+ assert_eq!(zero_rng.gen_range(low, high).extract(lane), low_scalar);
+ assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar);
+ assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar);
+
+ // Don't run this test for really tiny differences between high and low
+ // since for those rounding might result in selecting high for a very
+ // long time.
+ if (high_scalar - low_scalar) > 0.0001 {
+ let mut lowering_max_rng =
+ StepRng::new(0xffff_ffff_ffff_ffff,
+ (-1i64 << $bits_shifted) as u64);
+ assert!(lowering_max_rng.gen_range(low, high).extract(lane) < high_scalar);
+ }
+ }
+ }
+
+ assert_eq!(rng.sample(Uniform::new_inclusive(::core::$f_scalar::MAX,
+ ::core::$f_scalar::MAX)),
+ ::core::$f_scalar::MAX);
+ assert_eq!(rng.sample(Uniform::new_inclusive(-::core::$f_scalar::MAX,
+ -::core::$f_scalar::MAX)),
+ -::core::$f_scalar::MAX);
+ }}
+ }
+
+ t!(f32, f32, 32 - 23);
+ t!(f64, f64, 64 - 52);
+ #[cfg(feature="simd_support")]
+ {
+ t!(f32x2, f32, 32 - 23);
+ t!(f32x4, f32, 32 - 23);
+ t!(f32x8, f32, 32 - 23);
+ t!(f32x16, f32, 32 - 23);
+ t!(f64x2, f64, 64 - 52);
+ t!(f64x4, f64, 64 - 52);
+ t!(f64x8, f64, 64 - 52);
+ }
+ }
+
+ #[test]
+ #[cfg(all(feature="std",
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs")))]
+ fn test_float_assertions() {
+ use std::panic::catch_unwind;
+ use super::SampleUniform;
+ fn range<T: SampleUniform>(low: T, high: T) {
+ let mut rng = ::test::rng(253);
+ rng.gen_range(low, high);
+ }
+
+ macro_rules! t {
+ ($ty:ident, $f_scalar:ident) => {{
+ let v: &[($f_scalar, $f_scalar)] =
+ &[(::std::$f_scalar::NAN, 0.0),
+ (1.0, ::std::$f_scalar::NAN),
+ (::std::$f_scalar::NAN, ::std::$f_scalar::NAN),
+ (1.0, 0.5),
+ (::std::$f_scalar::MAX, -::std::$f_scalar::MAX),
+ (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY),
+ (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NEG_INFINITY),
+ (::std::$f_scalar::NEG_INFINITY, 5.0),
+ (5.0, ::std::$f_scalar::INFINITY),
+ (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY),
+ (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN),
+ (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY),
+ ];
+ for &(low_scalar, high_scalar) in v.iter() {
+ for lane in 0..<$ty>::lanes() {
+ let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
+ let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
+ assert!(catch_unwind(|| range(low, high)).is_err());
+ assert!(catch_unwind(|| Uniform::new(low, high)).is_err());
+ assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err());
+ assert!(catch_unwind(|| range(low, low)).is_err());
+ assert!(catch_unwind(|| Uniform::new(low, low)).is_err());
+ }
+ }
+ }}
+ }
+
+ t!(f32, f32);
+ t!(f64, f64);
+ #[cfg(feature="simd_support")]
+ {
+ t!(f32x2, f32);
+ t!(f32x4, f32);
+ t!(f32x8, f32);
+ t!(f32x16, f32);
+ t!(f64x2, f64);
+ t!(f64x4, f64);
+ t!(f64x8, f64);
+ }
+ }
+
+
+ #[test]
+ #[cfg(any(feature = "std", rust_1_25))]
+ fn test_durations() {
+ #[cfg(feature = "std")]
+ use std::time::Duration;
+ #[cfg(all(not(feature = "std"), rust_1_25))]
+ use core::time::Duration;
+
+ let mut rng = ::test::rng(253);
+
+ let v = &[(Duration::new(10, 50000), Duration::new(100, 1234)),
+ (Duration::new(0, 100), Duration::new(1, 50)),
+ (Duration::new(0, 0), Duration::new(u64::max_value(), 999_999_999))];
+ for &(low, high) in v.iter() {
+ let my_uniform = Uniform::new(low, high);
+ for _ in 0..1000 {
+ let v = rng.sample(my_uniform);
+ assert!(low <= v && v < high);
+ }
+ }
+ }
+
+ #[test]
+ fn test_custom_uniform() {
+ use distributions::uniform::{UniformSampler, UniformFloat, SampleUniform, SampleBorrow};
+ #[derive(Clone, Copy, PartialEq, PartialOrd)]
+ struct MyF32 {
+ x: f32,
+ }
+ #[derive(Clone, Copy, Debug)]
+ struct UniformMyF32 {
+ inner: UniformFloat<f32>,
+ }
+ impl UniformSampler for UniformMyF32 {
+ type X = MyF32;
+ fn new<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ UniformMyF32 {
+ inner: UniformFloat::<f32>::new(low.borrow().x, high.borrow().x),
+ }
+ }
+ fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ UniformSampler::new(low, high)
+ }
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ MyF32 { x: self.inner.sample(rng) }
+ }
+ }
+ impl SampleUniform for MyF32 {
+ type Sampler = UniformMyF32;
+ }
+
+ let (low, high) = (MyF32{ x: 17.0f32 }, MyF32{ x: 22.0f32 });
+ let uniform = Uniform::new(low, high);
+ let mut rng = ::test::rng(804);
+ for _ in 0..100 {
+ let x: MyF32 = rng.sample(uniform);
+ assert!(low <= x && x < high);
+ }
+ }
+
+ #[test]
+ fn test_uniform_from_std_range() {
+ let r = Uniform::from(2u32..7);
+ assert_eq!(r.inner.low, 2);
+ assert_eq!(r.inner.range, 5);
+ let r = Uniform::from(2.0f64..7.0);
+ assert_eq!(r.inner.low, 2.0);
+ assert_eq!(r.inner.scale, 5.0);
+ }
+
+ #[cfg(rust_1_27)]
+ #[test]
+ fn test_uniform_from_std_range_inclusive() {
+ let r = Uniform::from(2u32..=6);
+ assert_eq!(r.inner.low, 2);
+ assert_eq!(r.inner.range, 5);
+ let r = Uniform::from(2.0f64..=7.0);
+ assert_eq!(r.inner.low, 2.0);
+ assert!(r.inner.scale > 5.0);
+ assert!(r.inner.scale < 5.0 + 1e-14);
+ }
+}
diff --git a/rand/src/distributions/unit_circle.rs b/rand/src/distributions/unit_circle.rs
new file mode 100644
index 0000000..abb36dc
--- /dev/null
+++ b/rand/src/distributions/unit_circle.rs
@@ -0,0 +1,102 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Rng;
+use distributions::{Distribution, Uniform};
+
+/// Samples uniformly from the edge of the unit circle in two dimensions.
+///
+/// Implemented via a method by von Neumann[^1].
+///
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{UnitCircle, Distribution};
+///
+/// let circle = UnitCircle::new();
+/// let v = circle.sample(&mut rand::thread_rng());
+/// println!("{:?} is from the unit circle.", v)
+/// ```
+///
+/// [^1]: von Neumann, J. (1951) [*Various Techniques Used in Connection with
+/// Random Digits.*](https://mcnp.lanl.gov/pdf_files/nbs_vonneumann.pdf)
+/// NBS Appl. Math. Ser., No. 12. Washington, DC: U.S. Government Printing
+/// Office, pp. 36-38.
+#[derive(Clone, Copy, Debug)]
+pub struct UnitCircle {
+ uniform: Uniform<f64>,
+}
+
+impl UnitCircle {
+ /// Construct a new `UnitCircle` distribution.
+ #[inline]
+ pub fn new() -> UnitCircle {
+ UnitCircle { uniform: Uniform::new(-1., 1.) }
+ }
+}
+
+impl Distribution<[f64; 2]> for UnitCircle {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [f64; 2] {
+ let mut x1;
+ let mut x2;
+ let mut sum;
+ loop {
+ x1 = self.uniform.sample(rng);
+ x2 = self.uniform.sample(rng);
+ sum = x1*x1 + x2*x2;
+ if sum < 1. {
+ break;
+ }
+ }
+ let diff = x1*x1 - x2*x2;
+ [diff / sum, 2.*x1*x2 / sum]
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::UnitCircle;
+
+ /// Assert that two numbers are almost equal to each other.
+ ///
+ /// On panic, this macro will print the values of the expressions with their
+ /// debug representations.
+ macro_rules! assert_almost_eq {
+ ($a:expr, $b:expr, $prec:expr) => (
+ let diff = ($a - $b).abs();
+ if diff > $prec {
+ panic!(format!(
+ "assertion failed: `abs(left - right) = {:.1e} < {:e}`, \
+ (left: `{}`, right: `{}`)",
+ diff, $prec, $a, $b));
+ }
+ );
+ }
+
+ #[test]
+ fn norm() {
+ let mut rng = ::test::rng(1);
+ let dist = UnitCircle::new();
+ for _ in 0..1000 {
+ let x = dist.sample(&mut rng);
+ assert_almost_eq!(x[0]*x[0] + x[1]*x[1], 1., 1e-15);
+ }
+ }
+
+ #[test]
+ fn value_stability() {
+ let mut rng = ::test::rng(2);
+ let dist = UnitCircle::new();
+ assert_eq!(dist.sample(&mut rng), [-0.8032118336637037, 0.5956935036263119]);
+ assert_eq!(dist.sample(&mut rng), [-0.4742919588505423, -0.880367615130018]);
+ assert_eq!(dist.sample(&mut rng), [0.9297328981467168, 0.368234623716601]);
+ }
+}
diff --git a/rand/src/distributions/unit_sphere.rs b/rand/src/distributions/unit_sphere.rs
new file mode 100644
index 0000000..61cbda5
--- /dev/null
+++ b/rand/src/distributions/unit_sphere.rs
@@ -0,0 +1,100 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Rng;
+use distributions::{Distribution, Uniform};
+
+/// Samples uniformly from the surface of the unit sphere in three dimensions.
+///
+/// Implemented via a method by Marsaglia[^1].
+///
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{UnitSphereSurface, Distribution};
+///
+/// let sphere = UnitSphereSurface::new();
+/// let v = sphere.sample(&mut rand::thread_rng());
+/// println!("{:?} is from the unit sphere surface.", v)
+/// ```
+///
+/// [^1]: Marsaglia, George (1972). [*Choosing a Point from the Surface of a
+/// Sphere.*](https://doi.org/10.1214/aoms/1177692644)
+/// Ann. Math. Statist. 43, no. 2, 645--646.
+#[derive(Clone, Copy, Debug)]
+pub struct UnitSphereSurface {
+ uniform: Uniform<f64>,
+}
+
+impl UnitSphereSurface {
+ /// Construct a new `UnitSphereSurface` distribution.
+ #[inline]
+ pub fn new() -> UnitSphereSurface {
+ UnitSphereSurface { uniform: Uniform::new(-1., 1.) }
+ }
+}
+
+impl Distribution<[f64; 3]> for UnitSphereSurface {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [f64; 3] {
+ loop {
+ let (x1, x2) = (self.uniform.sample(rng), self.uniform.sample(rng));
+ let sum = x1*x1 + x2*x2;
+ if sum >= 1. {
+ continue;
+ }
+ let factor = 2. * (1.0_f64 - sum).sqrt();
+ return [x1 * factor, x2 * factor, 1. - 2.*sum];
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::UnitSphereSurface;
+
+ /// Assert that two numbers are almost equal to each other.
+ ///
+ /// On panic, this macro will print the values of the expressions with their
+ /// debug representations.
+ macro_rules! assert_almost_eq {
+ ($a:expr, $b:expr, $prec:expr) => (
+ let diff = ($a - $b).abs();
+ if diff > $prec {
+ panic!(format!(
+ "assertion failed: `abs(left - right) = {:.1e} < {:e}`, \
+ (left: `{}`, right: `{}`)",
+ diff, $prec, $a, $b));
+ }
+ );
+ }
+
+ #[test]
+ fn norm() {
+ let mut rng = ::test::rng(1);
+ let dist = UnitSphereSurface::new();
+ for _ in 0..1000 {
+ let x = dist.sample(&mut rng);
+ assert_almost_eq!(x[0]*x[0] + x[1]*x[1] + x[2]*x[2], 1., 1e-15);
+ }
+ }
+
+ #[test]
+ fn value_stability() {
+ let mut rng = ::test::rng(2);
+ let dist = UnitSphereSurface::new();
+ assert_eq!(dist.sample(&mut rng),
+ [-0.24950027180862533, -0.7552572587896719, 0.6060825747478084]);
+ assert_eq!(dist.sample(&mut rng),
+ [0.47604534507233487, -0.797200864987207, -0.3712837328763685]);
+ assert_eq!(dist.sample(&mut rng),
+ [0.9795722330927367, 0.18692349236651176, 0.07414747571708524]);
+ }
+}
diff --git a/rand/src/distributions/utils.rs b/rand/src/distributions/utils.rs
new file mode 100644
index 0000000..a2112fd
--- /dev/null
+++ b/rand/src/distributions/utils.rs
@@ -0,0 +1,504 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Math helper functions
+
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+#[cfg(feature="std")]
+use distributions::ziggurat_tables;
+#[cfg(feature="std")]
+use Rng;
+
+
+pub trait WideningMultiply<RHS = Self> {
+ type Output;
+
+ fn wmul(self, x: RHS) -> Self::Output;
+}
+
+macro_rules! wmul_impl {
+ ($ty:ty, $wide:ty, $shift:expr) => {
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, x: $ty) -> Self::Output {
+ let tmp = (self as $wide) * (x as $wide);
+ ((tmp >> $shift) as $ty, tmp as $ty)
+ }
+ }
+ };
+
+ // simd bulk implementation
+ ($(($ty:ident, $wide:ident),)+, $shift:expr) => {
+ $(
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, x: $ty) -> Self::Output {
+ // For supported vectors, this should compile to a couple
+ // supported multiply & swizzle instructions (no actual
+ // casting).
+ // TODO: optimize
+ let y: $wide = self.cast();
+ let x: $wide = x.cast();
+ let tmp = y * x;
+ let hi: $ty = (tmp >> $shift).cast();
+ let lo: $ty = tmp.cast();
+ (hi, lo)
+ }
+ }
+ )+
+ };
+}
+wmul_impl! { u8, u16, 8 }
+wmul_impl! { u16, u32, 16 }
+wmul_impl! { u32, u64, 32 }
+#[cfg(rust_1_26)]
+wmul_impl! { u64, u128, 64 }
+
+// This code is a translation of the __mulddi3 function in LLVM's
+// compiler-rt. It is an optimised variant of the common method
+// `(a + b) * (c + d) = ac + ad + bc + bd`.
+//
+// For some reason LLVM can optimise the C version very well, but
+// keeps shuffling registers in this Rust translation.
+macro_rules! wmul_impl_large {
+ ($ty:ty, $half:expr) => {
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, b: $ty) -> Self::Output {
+ const LOWER_MASK: $ty = !0 >> $half;
+ let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK);
+ let mut t = low >> $half;
+ low &= LOWER_MASK;
+ t += (self >> $half).wrapping_mul(b & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ let mut high = t >> $half;
+ t = low >> $half;
+ low &= LOWER_MASK;
+ t += (b >> $half).wrapping_mul(self & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ high += t >> $half;
+ high += (self >> $half).wrapping_mul(b >> $half);
+
+ (high, low)
+ }
+ }
+ };
+
+ // simd bulk implementation
+ (($($ty:ty,)+) $scalar:ty, $half:expr) => {
+ $(
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, b: $ty) -> Self::Output {
+ // needs wrapping multiplication
+ const LOWER_MASK: $scalar = !0 >> $half;
+ let mut low = (self & LOWER_MASK) * (b & LOWER_MASK);
+ let mut t = low >> $half;
+ low &= LOWER_MASK;
+ t += (self >> $half) * (b & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ let mut high = t >> $half;
+ t = low >> $half;
+ low &= LOWER_MASK;
+ t += (b >> $half) * (self & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ high += t >> $half;
+ high += (self >> $half) * (b >> $half);
+
+ (high, low)
+ }
+ }
+ )+
+ };
+}
+#[cfg(not(rust_1_26))]
+wmul_impl_large! { u64, 32 }
+#[cfg(rust_1_26)]
+wmul_impl_large! { u128, 64 }
+
+macro_rules! wmul_impl_usize {
+ ($ty:ty) => {
+ impl WideningMultiply for usize {
+ type Output = (usize, usize);
+
+ #[inline(always)]
+ fn wmul(self, x: usize) -> Self::Output {
+ let (high, low) = (self as $ty).wmul(x as $ty);
+ (high as usize, low as usize)
+ }
+ }
+ }
+}
+#[cfg(target_pointer_width = "32")]
+wmul_impl_usize! { u32 }
+#[cfg(target_pointer_width = "64")]
+wmul_impl_usize! { u64 }
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+mod simd_wmul {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ use super::*;
+
+ wmul_impl! {
+ (u8x2, u16x2),
+ (u8x4, u16x4),
+ (u8x8, u16x8),
+ (u8x16, u16x16),
+ (u8x32, u16x32),,
+ 8
+ }
+
+ wmul_impl! { (u16x2, u32x2),, 16 }
+ #[cfg(not(target_feature = "sse2"))]
+ wmul_impl! { (u16x4, u32x4),, 16 }
+ #[cfg(not(target_feature = "sse4.2"))]
+ wmul_impl! { (u16x8, u32x8),, 16 }
+ #[cfg(not(target_feature = "avx2"))]
+ wmul_impl! { (u16x16, u32x16),, 16 }
+
+ // 16-bit lane widths allow use of the x86 `mulhi` instructions, which
+ // means `wmul` can be implemented with only two instructions.
+ #[allow(unused_macros)]
+ macro_rules! wmul_impl_16 {
+ ($ty:ident, $intrinsic:ident, $mulhi:ident, $mullo:ident) => {
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, x: $ty) -> Self::Output {
+ let b = $intrinsic::from_bits(x);
+ let a = $intrinsic::from_bits(self);
+ let hi = $ty::from_bits(unsafe { $mulhi(a, b) });
+ let lo = $ty::from_bits(unsafe { $mullo(a, b) });
+ (hi, lo)
+ }
+ }
+ };
+ }
+
+ #[cfg(target_feature = "sse2")]
+ wmul_impl_16! { u16x4, __m64, _mm_mulhi_pu16, _mm_mullo_pi16 }
+ #[cfg(target_feature = "sse4.2")]
+ wmul_impl_16! { u16x8, __m128i, _mm_mulhi_epu16, _mm_mullo_epi16 }
+ #[cfg(target_feature = "avx2")]
+ wmul_impl_16! { u16x16, __m256i, _mm256_mulhi_epu16, _mm256_mullo_epi16 }
+ // FIXME: there are no `__m512i` types in stdsimd yet, so `wmul::<u16x32>`
+ // cannot use the same implementation.
+
+ wmul_impl! {
+ (u32x2, u64x2),
+ (u32x4, u64x4),
+ (u32x8, u64x8),,
+ 32
+ }
+
+ // TODO: optimize, this seems to seriously slow things down
+ wmul_impl_large! { (u8x64,) u8, 4 }
+ wmul_impl_large! { (u16x32,) u16, 8 }
+ wmul_impl_large! { (u32x16,) u32, 16 }
+ wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 }
+}
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+pub use self::simd_wmul::*;
+
+
+/// Helper trait when dealing with scalar and SIMD floating point types.
+pub(crate) trait FloatSIMDUtils {
+ // `PartialOrd` for vectors compares lexicographically. We want to compare all
+ // the individual SIMD lanes instead, and get the combined result over all
+ // lanes. This is possible using something like `a.lt(b).all()`, but we
+ // implement it as a trait so we can write the same code for `f32` and `f64`.
+ // Only the comparison functions we need are implemented.
+ fn all_lt(self, other: Self) -> bool;
+ fn all_le(self, other: Self) -> bool;
+ fn all_finite(self) -> bool;
+
+ type Mask;
+ fn finite_mask(self) -> Self::Mask;
+ fn gt_mask(self, other: Self) -> Self::Mask;
+ fn ge_mask(self, other: Self) -> Self::Mask;
+
+ // Decrease all lanes where the mask is `true` to the next lower value
+ // representable by the floating-point type. At least one of the lanes
+ // must be set.
+ fn decrease_masked(self, mask: Self::Mask) -> Self;
+
+ // Convert from int value. Conversion is done while retaining the numerical
+ // value, not by retaining the binary representation.
+ type UInt;
+ fn cast_from_int(i: Self::UInt) -> Self;
+}
+
+/// Implement functions available in std builds but missing from core primitives
+#[cfg(not(std))]
+pub(crate) trait Float : Sized {
+ type Bits;
+
+ fn is_nan(self) -> bool;
+ fn is_infinite(self) -> bool;
+ fn is_finite(self) -> bool;
+ fn to_bits(self) -> Self::Bits;
+ fn from_bits(v: Self::Bits) -> Self;
+}
+
+/// Implement functions on f32/f64 to give them APIs similar to SIMD types
+pub(crate) trait FloatAsSIMD : Sized {
+ #[inline(always)]
+ fn lanes() -> usize { 1 }
+ #[inline(always)]
+ fn splat(scalar: Self) -> Self { scalar }
+ #[inline(always)]
+ fn extract(self, index: usize) -> Self { debug_assert_eq!(index, 0); self }
+ #[inline(always)]
+ fn replace(self, index: usize, new_value: Self) -> Self { debug_assert_eq!(index, 0); new_value }
+}
+
+pub(crate) trait BoolAsSIMD : Sized {
+ fn any(self) -> bool;
+ fn all(self) -> bool;
+ fn none(self) -> bool;
+}
+
+impl BoolAsSIMD for bool {
+ #[inline(always)]
+ fn any(self) -> bool { self }
+ #[inline(always)]
+ fn all(self) -> bool { self }
+ #[inline(always)]
+ fn none(self) -> bool { !self }
+}
+
+macro_rules! scalar_float_impl {
+ ($ty:ident, $uty:ident) => {
+ #[cfg(not(std))]
+ impl Float for $ty {
+ type Bits = $uty;
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ self != self
+ }
+
+ #[inline]
+ fn is_infinite(self) -> bool {
+ self == ::core::$ty::INFINITY || self == ::core::$ty::NEG_INFINITY
+ }
+
+ #[inline]
+ fn is_finite(self) -> bool {
+ !(self.is_nan() || self.is_infinite())
+ }
+
+ #[inline]
+ fn to_bits(self) -> Self::Bits {
+ unsafe { ::core::mem::transmute(self) }
+ }
+
+ #[inline]
+ fn from_bits(v: Self::Bits) -> Self {
+ // It turns out the safety issues with sNaN were overblown! Hooray!
+ unsafe { ::core::mem::transmute(v) }
+ }
+ }
+
+ impl FloatSIMDUtils for $ty {
+ type Mask = bool;
+ #[inline(always)]
+ fn all_lt(self, other: Self) -> bool { self < other }
+ #[inline(always)]
+ fn all_le(self, other: Self) -> bool { self <= other }
+ #[inline(always)]
+ fn all_finite(self) -> bool { self.is_finite() }
+ #[inline(always)]
+ fn finite_mask(self) -> Self::Mask { self.is_finite() }
+ #[inline(always)]
+ fn gt_mask(self, other: Self) -> Self::Mask { self > other }
+ #[inline(always)]
+ fn ge_mask(self, other: Self) -> Self::Mask { self >= other }
+ #[inline(always)]
+ fn decrease_masked(self, mask: Self::Mask) -> Self {
+ debug_assert!(mask, "At least one lane must be set");
+ <$ty>::from_bits(self.to_bits() - 1)
+ }
+ type UInt = $uty;
+ fn cast_from_int(i: Self::UInt) -> Self { i as $ty }
+ }
+
+ impl FloatAsSIMD for $ty {}
+ }
+}
+
+scalar_float_impl!(f32, u32);
+scalar_float_impl!(f64, u64);
+
+
+#[cfg(feature="simd_support")]
+macro_rules! simd_impl {
+ ($ty:ident, $f_scalar:ident, $mty:ident, $uty:ident) => {
+ impl FloatSIMDUtils for $ty {
+ type Mask = $mty;
+ #[inline(always)]
+ fn all_lt(self, other: Self) -> bool { self.lt(other).all() }
+ #[inline(always)]
+ fn all_le(self, other: Self) -> bool { self.le(other).all() }
+ #[inline(always)]
+ fn all_finite(self) -> bool { self.finite_mask().all() }
+ #[inline(always)]
+ fn finite_mask(self) -> Self::Mask {
+ // This can possibly be done faster by checking bit patterns
+ let neg_inf = $ty::splat(::core::$f_scalar::NEG_INFINITY);
+ let pos_inf = $ty::splat(::core::$f_scalar::INFINITY);
+ self.gt(neg_inf) & self.lt(pos_inf)
+ }
+ #[inline(always)]
+ fn gt_mask(self, other: Self) -> Self::Mask { self.gt(other) }
+ #[inline(always)]
+ fn ge_mask(self, other: Self) -> Self::Mask { self.ge(other) }
+ #[inline(always)]
+ fn decrease_masked(self, mask: Self::Mask) -> Self {
+ // Casting a mask into ints will produce all bits set for
+ // true, and 0 for false. Adding that to the binary
+ // representation of a float means subtracting one from
+ // the binary representation, resulting in the next lower
+ // value representable by $ty. This works even when the
+ // current value is infinity.
+ debug_assert!(mask.any(), "At least one lane must be set");
+ <$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask))
+ }
+ type UInt = $uty;
+ fn cast_from_int(i: Self::UInt) -> Self { i.cast() }
+ }
+ }
+}
+
+#[cfg(feature="simd_support")] simd_impl! { f32x2, f32, m32x2, u32x2 }
+#[cfg(feature="simd_support")] simd_impl! { f32x4, f32, m32x4, u32x4 }
+#[cfg(feature="simd_support")] simd_impl! { f32x8, f32, m32x8, u32x8 }
+#[cfg(feature="simd_support")] simd_impl! { f32x16, f32, m32x16, u32x16 }
+#[cfg(feature="simd_support")] simd_impl! { f64x2, f64, m64x2, u64x2 }
+#[cfg(feature="simd_support")] simd_impl! { f64x4, f64, m64x4, u64x4 }
+#[cfg(feature="simd_support")] simd_impl! { f64x8, f64, m64x8, u64x8 }
+
+/// Calculates ln(gamma(x)) (natural logarithm of the gamma
+/// function) using the Lanczos approximation.
+///
+/// The approximation expresses the gamma function as:
+/// `gamma(z+1) = sqrt(2*pi)*(z+g+0.5)^(z+0.5)*exp(-z-g-0.5)*Ag(z)`
+/// `g` is an arbitrary constant; we use the approximation with `g=5`.
+///
+/// Noting that `gamma(z+1) = z*gamma(z)` and applying `ln` to both sides:
+/// `ln(gamma(z)) = (z+0.5)*ln(z+g+0.5)-(z+g+0.5) + ln(sqrt(2*pi)*Ag(z)/z)`
+///
+/// `Ag(z)` is an infinite series with coefficients that can be calculated
+/// ahead of time - we use just the first 6 terms, which is good enough
+/// for most purposes.
+#[cfg(feature="std")]
+pub fn log_gamma(x: f64) -> f64 {
+ // precalculated 6 coefficients for the first 6 terms of the series
+ let coefficients: [f64; 6] = [
+ 76.18009172947146,
+ -86.50532032941677,
+ 24.01409824083091,
+ -1.231739572450155,
+ 0.1208650973866179e-2,
+ -0.5395239384953e-5,
+ ];
+
+ // (x+0.5)*ln(x+g+0.5)-(x+g+0.5)
+ let tmp = x + 5.5;
+ let log = (x + 0.5) * tmp.ln() - tmp;
+
+ // the first few terms of the series for Ag(x)
+ let mut a = 1.000000000190015;
+ let mut denom = x;
+ for coeff in &coefficients {
+ denom += 1.0;
+ a += coeff / denom;
+ }
+
+ // get everything together
+ // a is Ag(x)
+ // 2.5066... is sqrt(2pi)
+ log + (2.5066282746310005 * a / x).ln()
+}
+
+/// Sample a random number using the Ziggurat method (specifically the
+/// ZIGNOR variant from Doornik 2005). Most of the arguments are
+/// directly from the paper:
+///
+/// * `rng`: source of randomness
+/// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0.
+/// * `X`: the $x_i$ abscissae.
+/// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$)
+/// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$
+/// * `pdf`: the probability density function
+/// * `zero_case`: manual sampling from the tail when we chose the
+/// bottom box (i.e. i == 0)
+
+// the perf improvement (25-50%) is definitely worth the extra code
+// size from force-inlining.
+#[cfg(feature="std")]
+#[inline(always)]
+pub fn ziggurat<R: Rng + ?Sized, P, Z>(
+ rng: &mut R,
+ symmetric: bool,
+ x_tab: ziggurat_tables::ZigTable,
+ f_tab: ziggurat_tables::ZigTable,
+ mut pdf: P,
+ mut zero_case: Z)
+ -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64 {
+ use distributions::float::IntoFloat;
+ loop {
+ // As an optimisation we re-implement the conversion to a f64.
+ // From the remaining 12 most significant bits we use 8 to construct `i`.
+ // This saves us generating a whole extra random number, while the added
+ // precision of using 64 bits for f64 does not buy us much.
+ let bits = rng.next_u64();
+ let i = bits as usize & 0xff;
+
+ let u = if symmetric {
+ // Convert to a value in the range [2,4) and substract to get [-1,1)
+ // We can't convert to an open range directly, that would require
+ // substracting `3.0 - EPSILON`, which is not representable.
+ // It is possible with an extra step, but an open range does not
+ // seem neccesary for the ziggurat algorithm anyway.
+ (bits >> 12).into_float_with_exponent(1) - 3.0
+ } else {
+ // Convert to a value in the range [1,2) and substract to get (0,1)
+ (bits >> 12).into_float_with_exponent(0)
+ - (1.0 - ::core::f64::EPSILON / 2.0)
+ };
+ let x = u * x_tab[i];
+
+ let test_x = if symmetric { x.abs() } else {x};
+
+ // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i])
+ if test_x < x_tab[i + 1] {
+ return x;
+ }
+ if i == 0 {
+ return zero_case(rng, u);
+ }
+ // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1
+ if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) {
+ return x;
+ }
+ }
+}
diff --git a/rand/src/distributions/weibull.rs b/rand/src/distributions/weibull.rs
new file mode 100644
index 0000000..5fbe10a
--- /dev/null
+++ b/rand/src/distributions/weibull.rs
@@ -0,0 +1,71 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Weibull distribution.
+
+use Rng;
+use distributions::{Distribution, OpenClosed01};
+
+/// Samples floating-point numbers according to the Weibull distribution
+///
+/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Weibull;
+///
+/// let val: f64 = SmallRng::from_entropy().sample(Weibull::new(1., 10.));
+/// println!("{}", val);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Weibull {
+ inv_shape: f64,
+ scale: f64,
+}
+
+impl Weibull {
+ /// Construct a new `Weibull` distribution with given `scale` and `shape`.
+ ///
+ /// # Panics
+ ///
+ /// `scale` and `shape` have to be non-zero and positive.
+ pub fn new(scale: f64, shape: f64) -> Weibull {
+ assert!((scale > 0.) & (shape > 0.));
+ Weibull { inv_shape: 1./shape, scale }
+ }
+}
+
+impl Distribution<f64> for Weibull {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let x: f64 = rng.sample(OpenClosed01);
+ self.scale * (-x.ln()).powf(self.inv_shape)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::Weibull;
+
+ #[test]
+ #[should_panic]
+ fn invalid() {
+ Weibull::new(0., 0.);
+ }
+
+ #[test]
+ fn sample() {
+ let scale = 1.0;
+ let shape = 2.0;
+ let d = Weibull::new(scale, shape);
+ let mut rng = ::test::rng(1);
+ for _ in 0..1000 {
+ let r = d.sample(&mut rng);
+ assert!(r >= 0.);
+ }
+ }
+}
diff --git a/rand/src/distributions/weighted.rs b/rand/src/distributions/weighted.rs
new file mode 100644
index 0000000..01c8fe6
--- /dev/null
+++ b/rand/src/distributions/weighted.rs
@@ -0,0 +1,232 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Rng;
+use distributions::Distribution;
+use distributions::uniform::{UniformSampler, SampleUniform, SampleBorrow};
+use ::core::cmp::PartialOrd;
+use core::fmt;
+
+// Note that this whole module is only imported if feature="alloc" is enabled.
+#[cfg(not(feature="std"))] use alloc::vec::Vec;
+
+/// A distribution using weighted sampling to pick a discretely selected
+/// item.
+///
+/// Sampling a `WeightedIndex` distribution returns the index of a randomly
+/// selected element from the iterator used when the `WeightedIndex` was
+/// created. The chance of a given element being picked is proportional to the
+/// value of the element. The weights can use any type `X` for which an
+/// implementation of [`Uniform<X>`] exists.
+///
+/// # Performance
+///
+/// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its
+/// size is the sum of the size of those objects, possibly plus some alignment.
+///
+/// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1`
+/// weights of type `X`, where `N` is the number of weights. However, since
+/// `Vec` doesn't guarantee a particular growth strategy, additional memory
+/// might be allocated but not used. Since the `WeightedIndex` object also
+/// contains, this might cause additional allocations, though for primitive
+/// types, ['Uniform<X>`] doesn't allocate any memory.
+///
+/// Time complexity of sampling from `WeightedIndex` is `O(log N)` where
+/// `N` is the number of weights.
+///
+/// Sampling from `WeightedIndex` will result in a single call to
+/// [`Uniform<X>::sample`], which typically will request a single value from
+/// the underlying [`RngCore`], though the exact number depends on the
+/// implementaiton of [`Uniform<X>::sample`].
+///
+/// # Example
+///
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::WeightedIndex;
+///
+/// let choices = ['a', 'b', 'c'];
+/// let weights = [2, 1, 1];
+/// let dist = WeightedIndex::new(&weights).unwrap();
+/// let mut rng = thread_rng();
+/// for _ in 0..100 {
+/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
+/// println!("{}", choices[dist.sample(&mut rng)]);
+/// }
+///
+/// let items = [('a', 0), ('b', 3), ('c', 7)];
+/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap();
+/// for _ in 0..100 {
+/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c'
+/// println!("{}", items[dist2.sample(&mut rng)].0);
+/// }
+/// ```
+///
+/// [`Uniform<X>`]: struct.Uniform.html
+/// [`Uniform<X>::sample`]: struct.Uniform.html#method.sample
+/// [`RngCore`]: ../trait.RngCore.html
+#[derive(Debug, Clone)]
+pub struct WeightedIndex<X: SampleUniform + PartialOrd> {
+ cumulative_weights: Vec<X>,
+ weight_distribution: X::Sampler,
+}
+
+impl<X: SampleUniform + PartialOrd> WeightedIndex<X> {
+ /// Creates a new a `WeightedIndex` [`Distribution`] using the values
+ /// in `weights`. The weights can use any type `X` for which an
+ /// implementation of [`Uniform<X>`] exists.
+ ///
+ /// Returns an error if the iterator is empty, if any weight is `< 0`, or
+ /// if its total value is 0.
+ ///
+ /// [`Distribution`]: trait.Distribution.html
+ /// [`Uniform<X>`]: struct.Uniform.html
+ pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError>
+ where I: IntoIterator,
+ I::Item: SampleBorrow<X>,
+ X: for<'a> ::core::ops::AddAssign<&'a X> +
+ Clone +
+ Default {
+ let mut iter = weights.into_iter();
+ let mut total_weight: X = iter.next()
+ .ok_or(WeightedError::NoItem)?
+ .borrow()
+ .clone();
+
+ let zero = <X as Default>::default();
+ if total_weight < zero {
+ return Err(WeightedError::NegativeWeight);
+ }
+
+ let mut weights = Vec::<X>::with_capacity(iter.size_hint().0);
+ for w in iter {
+ if *w.borrow() < zero {
+ return Err(WeightedError::NegativeWeight);
+ }
+ weights.push(total_weight.clone());
+ total_weight += w.borrow();
+ }
+
+ if total_weight == zero {
+ return Err(WeightedError::AllWeightsZero);
+ }
+ let distr = X::Sampler::new(zero, total_weight);
+
+ Ok(WeightedIndex { cumulative_weights: weights, weight_distribution: distr })
+ }
+}
+
+impl<X> Distribution<usize> for WeightedIndex<X> where
+ X: SampleUniform + PartialOrd {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+ use ::core::cmp::Ordering;
+ let chosen_weight = self.weight_distribution.sample(rng);
+ // Find the first item which has a weight *higher* than the chosen weight.
+ self.cumulative_weights.binary_search_by(
+ |w| if *w <= chosen_weight { Ordering::Less } else { Ordering::Greater }).unwrap_err()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_weightedindex() {
+ let mut r = ::test::rng(700);
+ const N_REPS: u32 = 5000;
+ let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
+ let total_weight = weights.iter().sum::<u32>() as f32;
+
+ let verify = |result: [i32; 14]| {
+ for (i, count) in result.iter().enumerate() {
+ let exp = (weights[i] * N_REPS) as f32 / total_weight;
+ let mut err = (*count as f32 - exp).abs();
+ if err != 0.0 {
+ err /= exp;
+ }
+ assert!(err <= 0.25);
+ }
+ };
+
+ // WeightedIndex from vec
+ let mut chosen = [0i32; 14];
+ let distr = WeightedIndex::new(weights.to_vec()).unwrap();
+ for _ in 0..N_REPS {
+ chosen[distr.sample(&mut r)] += 1;
+ }
+ verify(chosen);
+
+ // WeightedIndex from slice
+ chosen = [0i32; 14];
+ let distr = WeightedIndex::new(&weights[..]).unwrap();
+ for _ in 0..N_REPS {
+ chosen[distr.sample(&mut r)] += 1;
+ }
+ verify(chosen);
+
+ // WeightedIndex from iterator
+ chosen = [0i32; 14];
+ let distr = WeightedIndex::new(weights.iter()).unwrap();
+ for _ in 0..N_REPS {
+ chosen[distr.sample(&mut r)] += 1;
+ }
+ verify(chosen);
+
+ for _ in 0..5 {
+ assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1);
+ assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0);
+ assert_eq!(WeightedIndex::new(&[0, 0, 0, 0, 10, 0]).unwrap().sample(&mut r), 4);
+ }
+
+ assert_eq!(WeightedIndex::new(&[10][0..0]).unwrap_err(), WeightedError::NoItem);
+ assert_eq!(WeightedIndex::new(&[0]).unwrap_err(), WeightedError::AllWeightsZero);
+ assert_eq!(WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), WeightedError::NegativeWeight);
+ assert_eq!(WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), WeightedError::NegativeWeight);
+ assert_eq!(WeightedIndex::new(&[-10]).unwrap_err(), WeightedError::NegativeWeight);
+ }
+}
+
+/// Error type returned from `WeightedIndex::new`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum WeightedError {
+ /// The provided iterator contained no items.
+ NoItem,
+
+ /// A weight lower than zero was used.
+ NegativeWeight,
+
+ /// All items in the provided iterator had a weight of zero.
+ AllWeightsZero,
+}
+
+impl WeightedError {
+ fn msg(&self) -> &str {
+ match *self {
+ WeightedError::NoItem => "No items found",
+ WeightedError::NegativeWeight => "Item has negative weight",
+ WeightedError::AllWeightsZero => "All items had weight zero",
+ }
+ }
+}
+
+#[cfg(feature="std")]
+impl ::std::error::Error for WeightedError {
+ fn description(&self) -> &str {
+ self.msg()
+ }
+ fn cause(&self) -> Option<&::std::error::Error> {
+ None
+ }
+}
+
+impl fmt::Display for WeightedError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.msg())
+ }
+}
diff --git a/rand/src/distributions/ziggurat_tables.rs b/rand/src/distributions/ziggurat_tables.rs
index b6de4bf..ca1ce30 100644
--- a/rand/src/distributions/ziggurat_tables.rs
+++ b/rand/src/distributions/ziggurat_tables.rs
@@ -1,10 +1,9 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
diff --git a/rand/src/lib.rs b/rand/src/lib.rs
index 7b22dd4..d364bd1 100644
--- a/rand/src/lib.rs
+++ b/rand/src/lib.rs
@@ -1,921 +1,673 @@
-// Copyright 2013-2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Utilities for random number generation
//!
-//! The key functions are `random()` and `Rng::gen()`. These are polymorphic and
-//! so can be used to generate any type that implements `Rand`. Type inference
-//! means that often a simple call to `rand::random()` or `rng.gen()` will
-//! suffice, but sometimes an annotation is required, e.g.
-//! `rand::random::<f64>()`.
+//! Rand provides utilities to generate random numbers, to convert them to
+//! useful types and distributions, and some randomness-related algorithms.
//!
-//! See the `distributions` submodule for sampling random numbers from
-//! distributions like normal and exponential.
+//! # Quick Start
+//!
+//! To get you started quickly, the easiest and highest-level way to get
+//! a random value is to use [`random()`]; alternatively you can use
+//! [`thread_rng()`]. The [`Rng`] trait provides a useful API on all RNGs, while
+//! the [`distributions` module] and [`seq` module] provide further
+//! functionality on top of RNGs.
//!
-//! # Usage
-//!
-//! This crate is [on crates.io](https://crates.io/crates/rand) and can be
-//! used by adding `rand` to the dependencies in your project's `Cargo.toml`.
-//!
-//! ```toml
-//! [dependencies]
-//! rand = "0.4"
-//! ```
-//!
-//! and this to your crate root:
-//!
-//! ```rust
-//! extern crate rand;
//! ```
-//!
-//! # Thread-local RNG
-//!
-//! There is built-in support for a RNG associated with each thread stored
-//! in thread-local storage. This RNG can be accessed via `thread_rng`, or
-//! used implicitly via `random`. This RNG is normally randomly seeded
-//! from an operating-system source of randomness, e.g. `/dev/urandom` on
-//! Unix systems, and will automatically reseed itself from this source
-//! after generating 32 KiB of random data.
-//!
-//! # Cryptographic security
-//!
-//! An application that requires an entropy source for cryptographic purposes
-//! must use `OsRng`, which reads randomness from the source that the operating
-//! system provides (e.g. `/dev/urandom` on Unixes or `CryptGenRandom()` on
-//! Windows).
-//! The other random number generators provided by this module are not suitable
-//! for such purposes.
-//!
-//! *Note*: many Unix systems provide `/dev/random` as well as `/dev/urandom`.
-//! This module uses `/dev/urandom` for the following reasons:
-//!
-//! - On Linux, `/dev/random` may block if entropy pool is empty;
-//! `/dev/urandom` will not block. This does not mean that `/dev/random`
-//! provides better output than `/dev/urandom`; the kernel internally runs a
-//! cryptographically secure pseudorandom number generator (CSPRNG) based on
-//! entropy pool for random number generation, so the "quality" of
-//! `/dev/random` is not better than `/dev/urandom` in most cases. However,
-//! this means that `/dev/urandom` can yield somewhat predictable randomness
-//! if the entropy pool is very small, such as immediately after first
-//! booting. Linux 3.17 added the `getrandom(2)` system call which solves
-//! the issue: it blocks if entropy pool is not initialized yet, but it does
-//! not block once initialized. `OsRng` tries to use `getrandom(2)` if
-//! available, and use `/dev/urandom` fallback if not. If an application
-//! does not have `getrandom` and likely to be run soon after first booting,
-//! or on a system with very few entropy sources, one should consider using
-//! `/dev/random` via `ReadRng`.
-//! - On some systems (e.g. FreeBSD, OpenBSD and Mac OS X) there is no
-//! difference between the two sources. (Also note that, on some systems
-//! e.g. FreeBSD, both `/dev/random` and `/dev/urandom` may block once if
-//! the CSPRNG has not seeded yet.)
-//!
-//! # Examples
-//!
-//! ```rust
-//! use rand::Rng;
-//!
-//! let mut rng = rand::thread_rng();
-//! if rng.gen() { // random bool
-//! println!("i32: {}, u32: {}", rng.gen::<i32>(), rng.gen::<u32>())
+//! use rand::prelude::*;
+//!
+//! if rand::random() { // generates a boolean
+//! // Try printing a random unicode code point (probably a bad idea)!
+//! println!("char: {}", rand::random::<char>());
//! }
-//! ```
-//!
-//! ```rust
-//! let tuple = rand::random::<(f64, char)>();
-//! println!("{:?}", tuple)
-//! ```
-//!
-//! ## Monte Carlo estimation of π
-//!
-//! For this example, imagine we have a square with sides of length 2 and a unit
-//! circle, both centered at the origin. Since the area of a unit circle is π,
-//! we have:
-//!
-//! ```text
-//! (area of unit circle) / (area of square) = π / 4
-//! ```
//!
-//! So if we sample many points randomly from the square, roughly π / 4 of them
-//! should be inside the circle.
-//!
-//! We can use the above fact to estimate the value of π: pick many points in
-//! the square at random, calculate the fraction that fall within the circle,
-//! and multiply this fraction by 4.
-//!
-//! ```
-//! use rand::distributions::{IndependentSample, Range};
-//!
-//! fn main() {
-//! let between = Range::new(-1f64, 1.);
-//! let mut rng = rand::thread_rng();
-//!
-//! let total = 1_000_000;
-//! let mut in_circle = 0;
-//!
-//! for _ in 0..total {
-//! let a = between.ind_sample(&mut rng);
-//! let b = between.ind_sample(&mut rng);
-//! if a*a + b*b <= 1. {
-//! in_circle += 1;
-//! }
-//! }
-//!
-//! // prints something close to 3.14159...
-//! println!("{}", 4. * (in_circle as f64) / (total as f64));
-//! }
-//! ```
-//!
-//! ## Monty Hall Problem
-//!
-//! This is a simulation of the [Monty Hall Problem][]:
-//!
-//! > Suppose you're on a game show, and you're given the choice of three doors:
-//! > Behind one door is a car; behind the others, goats. You pick a door, say
-//! > No. 1, and the host, who knows what's behind the doors, opens another
-//! > door, say No. 3, which has a goat. He then says to you, "Do you want to
-//! > pick door No. 2?" Is it to your advantage to switch your choice?
-//!
-//! The rather unintuitive answer is that you will have a 2/3 chance of winning
-//! if you switch and a 1/3 chance of winning if you don't, so it's better to
-//! switch.
-//!
-//! This program will simulate the game show and with large enough simulation
-//! steps it will indeed confirm that it is better to switch.
-//!
-//! [Monty Hall Problem]: http://en.wikipedia.org/wiki/Monty_Hall_problem
+//! let mut rng = rand::thread_rng();
+//! let y: f64 = rng.gen(); // generates a float between 0 and 1
//!
+//! let mut nums: Vec<i32> = (1..100).collect();
+//! nums.shuffle(&mut rng);
//! ```
-//! use rand::Rng;
-//! use rand::distributions::{IndependentSample, Range};
-//!
-//! struct SimulationResult {
-//! win: bool,
-//! switch: bool,
-//! }
-//!
-//! // Run a single simulation of the Monty Hall problem.
-//! fn simulate<R: Rng>(random_door: &Range<u32>, rng: &mut R)
-//! -> SimulationResult {
-//! let car = random_door.ind_sample(rng);
-//!
-//! // This is our initial choice
-//! let mut choice = random_door.ind_sample(rng);
-//!
-//! // The game host opens a door
-//! let open = game_host_open(car, choice, rng);
//!
-//! // Shall we switch?
-//! let switch = rng.gen();
-//! if switch {
-//! choice = switch_door(choice, open);
-//! }
+//! # The Book
+//!
+//! For the user guide and futher documentation, please read
+//! [The Rust Rand Book](https://rust-random.github.io/book).
//!
-//! SimulationResult { win: choice == car, switch: switch }
-//! }
-//!
-//! // Returns the door the game host opens given our choice and knowledge of
-//! // where the car is. The game host will never open the door with the car.
-//! fn game_host_open<R: Rng>(car: u32, choice: u32, rng: &mut R) -> u32 {
-//! let choices = free_doors(&[car, choice]);
-//! rand::seq::sample_slice(rng, &choices, 1)[0]
-//! }
-//!
-//! // Returns the door we switch to, given our current choice and
-//! // the open door. There will only be one valid door.
-//! fn switch_door(choice: u32, open: u32) -> u32 {
-//! free_doors(&[choice, open])[0]
-//! }
-//!
-//! fn free_doors(blocked: &[u32]) -> Vec<u32> {
-//! (0..3).filter(|x| !blocked.contains(x)).collect()
-//! }
-//!
-//! fn main() {
-//! // The estimation will be more accurate with more simulations
-//! let num_simulations = 10000;
-//!
-//! let mut rng = rand::thread_rng();
-//! let random_door = Range::new(0, 3);
-//!
-//! let (mut switch_wins, mut switch_losses) = (0, 0);
-//! let (mut keep_wins, mut keep_losses) = (0, 0);
-//!
-//! println!("Running {} simulations...", num_simulations);
-//! for _ in 0..num_simulations {
-//! let result = simulate(&random_door, &mut rng);
-//!
-//! match (result.win, result.switch) {
-//! (true, true) => switch_wins += 1,
-//! (true, false) => keep_wins += 1,
-//! (false, true) => switch_losses += 1,
-//! (false, false) => keep_losses += 1,
-//! }
-//! }
-//!
-//! let total_switches = switch_wins + switch_losses;
-//! let total_keeps = keep_wins + keep_losses;
-//!
-//! println!("Switched door {} times with {} wins and {} losses",
-//! total_switches, switch_wins, switch_losses);
-//!
-//! println!("Kept our choice {} times with {} wins and {} losses",
-//! total_keeps, keep_wins, keep_losses);
-//!
-//! // With a large number of simulations, the values should converge to
-//! // 0.667 and 0.333 respectively.
-//! println!("Estimated chance to win if we switch: {}",
-//! switch_wins as f32 / total_switches as f32);
-//! println!("Estimated chance to win if we don't: {}",
-//! keep_wins as f32 / total_keeps as f32);
-//! }
-//! ```
+//! [`distributions` module]: distributions/index.html
+//! [`random()`]: fn.random.html
+//! [`Rng`]: trait.Rng.html
+//! [`seq` module]: seq/index.html
+//! [`thread_rng()`]: fn.thread_rng.html
+
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
- html_root_url = "https://docs.rs/rand/0.4")]
+ html_root_url = "https://rust-random.github.io/rand/")]
+#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
+#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![cfg_attr(not(feature="std"), no_std)]
#![cfg_attr(all(feature="alloc", not(feature="std")), feature(alloc))]
-#![cfg_attr(feature = "i128_support", feature(i128_type, i128))]
+#![cfg_attr(all(feature="simd_support", feature="nightly"), feature(stdsimd))]
+#![cfg_attr(feature = "stdweb", recursion_limit="128")]
-#[cfg(feature="std")] extern crate std as core;
-#[cfg(all(feature = "alloc", not(feature="std")))] extern crate alloc;
+#[cfg(feature = "std")] extern crate core;
+#[cfg(all(feature = "alloc", not(feature="std")))] #[macro_use] extern crate alloc;
-use core::marker;
-use core::mem;
-#[cfg(feature="std")] use std::cell::RefCell;
-#[cfg(feature="std")] use std::io;
-#[cfg(feature="std")] use std::rc::Rc;
+#[cfg(feature="simd_support")] extern crate packed_simd;
-// external rngs
-pub use jitter::JitterRng;
-#[cfg(feature="std")] pub use os::OsRng;
+#[cfg(all(target_arch="wasm32", not(target_os="emscripten"), feature="stdweb"))]
+#[macro_use]
+extern crate stdweb;
-// pseudo rngs
-pub use isaac::{IsaacRng, Isaac64Rng};
-pub use chacha::ChaChaRng;
-pub use prng::XorShiftRng;
+#[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))]
+extern crate wasm_bindgen;
-// local use declarations
-#[cfg(target_pointer_width = "32")]
-use prng::IsaacRng as IsaacWordRng;
-#[cfg(target_pointer_width = "64")]
-use prng::Isaac64Rng as IsaacWordRng;
+extern crate rand_core;
+extern crate rand_isaac; // only for deprecations
+extern crate rand_chacha; // only for deprecations
+extern crate rand_hc;
+extern crate rand_pcg;
+extern crate rand_xorshift;
-use distributions::{Range, IndependentSample};
-use distributions::range::SampleRange;
+#[cfg(feature = "log")] #[macro_use] extern crate log;
+#[allow(unused)]
+#[cfg(not(feature = "log"))] macro_rules! trace { ($($x:tt)*) => () }
+#[allow(unused)]
+#[cfg(not(feature = "log"))] macro_rules! debug { ($($x:tt)*) => () }
+#[allow(unused)]
+#[cfg(not(feature = "log"))] macro_rules! info { ($($x:tt)*) => () }
+#[allow(unused)]
+#[cfg(not(feature = "log"))] macro_rules! warn { ($($x:tt)*) => () }
+#[allow(unused)]
+#[cfg(not(feature = "log"))] macro_rules! error { ($($x:tt)*) => () }
-// public modules
-pub mod distributions;
-pub mod jitter;
-#[cfg(feature="std")] pub mod os;
-#[cfg(feature="std")] pub mod read;
-pub mod reseeding;
-#[cfg(any(feature="std", feature = "alloc"))] pub mod seq;
-// These tiny modules are here to avoid API breakage, probably only temporarily
+// Re-exports from rand_core
+pub use rand_core::{RngCore, CryptoRng, SeedableRng};
+pub use rand_core::{ErrorKind, Error};
+
+// Public exports
+#[cfg(feature="std")] pub use rngs::thread::thread_rng;
+
+// Public modules
+pub mod distributions;
+pub mod prelude;
+#[deprecated(since="0.6.0")]
+pub mod prng;
+pub mod rngs;
+pub mod seq;
+
+////////////////////////////////////////////////////////////////////////////////
+// Compatibility re-exports. Documentation is hidden; will be removed eventually.
+
+#[doc(hidden)] mod deprecated;
+
+#[allow(deprecated)]
+#[doc(hidden)] pub use deprecated::ReseedingRng;
+
+#[allow(deprecated)]
+#[cfg(feature="std")] #[doc(hidden)] pub use deprecated::EntropyRng;
+
+#[allow(deprecated)]
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[doc(hidden)]
+pub use deprecated::OsRng;
+
+#[allow(deprecated)]
+#[doc(hidden)] pub use deprecated::{ChaChaRng, IsaacRng, Isaac64Rng, XorShiftRng};
+#[allow(deprecated)]
+#[doc(hidden)] pub use deprecated::StdRng;
+
+
+#[allow(deprecated)]
+#[doc(hidden)]
+pub mod jitter {
+ pub use deprecated::JitterRng;
+ pub use rngs::TimerError;
+}
+#[allow(deprecated)]
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[doc(hidden)]
+pub mod os {
+ pub use deprecated::OsRng;
+}
+#[allow(deprecated)]
+#[doc(hidden)]
pub mod chacha {
- //! The ChaCha random number generator.
- pub use prng::ChaChaRng;
+ pub use deprecated::ChaChaRng;
}
+#[allow(deprecated)]
+#[doc(hidden)]
pub mod isaac {
- //! The ISAAC random number generator.
- pub use prng::{IsaacRng, Isaac64Rng};
+ pub use deprecated::{IsaacRng, Isaac64Rng};
}
+#[allow(deprecated)]
+#[cfg(feature="std")]
+#[doc(hidden)]
+pub mod read {
+ pub use deprecated::ReadRng;
+}
+
+#[allow(deprecated)]
+#[cfg(feature="std")] #[doc(hidden)] pub use deprecated::ThreadRng;
-// private modules
-mod rand_impls;
-mod prng;
+////////////////////////////////////////////////////////////////////////////////
-/// A type that can be randomly generated using an `Rng`.
+use core::{mem, slice};
+use distributions::{Distribution, Standard};
+use distributions::uniform::{SampleUniform, UniformSampler, SampleBorrow};
+
+/// An automatically-implemented extension trait on [`RngCore`] providing high-level
+/// generic methods for sampling values and other convenience methods.
///
-/// ## Built-in Implementations
+/// This is the primary trait to use when generating random values.
///
-/// This crate implements `Rand` for various primitive types. Assuming the
-/// provided `Rng` is well-behaved, these implementations generate values with
-/// the following ranges and distributions:
+/// # Generic usage
///
-/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed
-/// over all values of the type.
-/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all
-/// code points in the range `0...0x10_FFFF`, except for the range
-/// `0xD800...0xDFFF` (the surrogate code points). This includes
-/// unassigned/reserved code points.
-/// * `bool`: Generates `false` or `true`, each with probability 0.5.
-/// * Floating point types (`f32` and `f64`): Uniformly distributed in the
-/// half-open range `[0, 1)`. (The [`Open01`], [`Closed01`], [`Exp1`], and
-/// [`StandardNormal`] wrapper types produce floating point numbers with
-/// alternative ranges or distributions.)
+/// The basic pattern is `fn foo<R: Rng + ?Sized>(rng: &mut R)`. Some
+/// things are worth noting here:
///
-/// [`Open01`]: struct.Open01.html
-/// [`Closed01`]: struct.Closed01.html
-/// [`Exp1`]: distributions/exponential/struct.Exp1.html
-/// [`StandardNormal`]: distributions/normal/struct.StandardNormal.html
+/// - Since `Rng: RngCore` and every `RngCore` implements `Rng`, it makes no
+/// difference whether we use `R: Rng` or `R: RngCore`.
+/// - The `+ ?Sized` un-bounding allows functions to be called directly on
+/// type-erased references; i.e. `foo(r)` where `r: &mut RngCore`. Without
+/// this it would be necessary to write `foo(&mut r)`.
///
-/// The following aggregate types also implement `Rand` as long as their
-/// component types implement it:
+/// An alternative pattern is possible: `fn foo<R: Rng>(rng: R)`. This has some
+/// trade-offs. It allows the argument to be consumed directly without a `&mut`
+/// (which is how `from_rng(thread_rng())` works); also it still works directly
+/// on references (including type-erased references). Unfortunately within the
+/// function `foo` it is not known whether `rng` is a reference type or not,
+/// hence many uses of `rng` require an extra reference, either explicitly
+/// (`distr.sample(&mut rng)`) or implicitly (`rng.gen()`); one may hope the
+/// optimiser can remove redundant references later.
///
-/// * Tuples and arrays: Each element of the tuple or array is generated
-/// independently, using its own `Rand` implementation.
-/// * `Option<T>`: Returns `None` with probability 0.5; otherwise generates a
-/// random `T` and returns `Some(T)`.
-pub trait Rand : Sized {
- /// Generates a random instance of this type using the specified source of
- /// randomness.
- fn rand<R: Rng>(rng: &mut R) -> Self;
-}
-
-/// A random number generator.
-pub trait Rng {
- /// Return the next random u32.
- ///
- /// This rarely needs to be called directly, prefer `r.gen()` to
- /// `r.next_u32()`.
- // FIXME #rust-lang/rfcs#628: Should be implemented in terms of next_u64
- fn next_u32(&mut self) -> u32;
-
- /// Return the next random u64.
- ///
- /// By default this is implemented in terms of `next_u32`. An
- /// implementation of this trait must provide at least one of
- /// these two methods. Similarly to `next_u32`, this rarely needs
- /// to be called directly, prefer `r.gen()` to `r.next_u64()`.
- fn next_u64(&mut self) -> u64 {
- ((self.next_u32() as u64) << 32) | (self.next_u32() as u64)
- }
-
- /// Return the next random f32 selected from the half-open
- /// interval `[0, 1)`.
- ///
- /// This uses a technique described by Saito and Matsumoto at
- /// MCQMC'08. Given that the IEEE floating point numbers are
- /// uniformly distributed over [1,2), we generate a number in
- /// this range and then offset it onto the range [0,1). Our
- /// choice of bits (masking v. shifting) is arbitrary and
- /// should be immaterial for high quality generators. For low
- /// quality generators (ex. LCG), prefer bitshifting due to
- /// correlation between sequential low order bits.
- ///
- /// See:
- /// A PRNG specialized in double precision floating point numbers using
- /// an affine transition
- ///
- /// * <http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/ARTICLES/dSFMT.pdf>
- /// * <http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/dSFMT-slide-e.pdf>
+/// Example:
+///
+/// ```
+/// # use rand::thread_rng;
+/// use rand::Rng;
+///
+/// fn foo<R: Rng + ?Sized>(rng: &mut R) -> f32 {
+/// rng.gen()
+/// }
+///
+/// # let v = foo(&mut thread_rng());
+/// ```
+///
+/// [`RngCore`]: trait.RngCore.html
+pub trait Rng: RngCore {
+ /// Return a random value supporting the [`Standard`] distribution.
///
- /// By default this is implemented in terms of `next_u32`, but a
- /// random number generator which can generate numbers satisfying
- /// the requirements directly can overload this for performance.
- /// It is required that the return value lies in `[0, 1)`.
+ /// [`Standard`]: distributions/struct.Standard.html
///
- /// See `Closed01` for the closed interval `[0,1]`, and
- /// `Open01` for the open interval `(0,1)`.
- fn next_f32(&mut self) -> f32 {
- const UPPER_MASK: u32 = 0x3F800000;
- const LOWER_MASK: u32 = 0x7FFFFF;
- let tmp = UPPER_MASK | (self.next_u32() & LOWER_MASK);
- let result: f32 = unsafe { mem::transmute(tmp) };
- result - 1.0
- }
-
- /// Return the next random f64 selected from the half-open
- /// interval `[0, 1)`.
+ /// # Example
///
- /// By default this is implemented in terms of `next_u64`, but a
- /// random number generator which can generate numbers satisfying
- /// the requirements directly can overload this for performance.
- /// It is required that the return value lies in `[0, 1)`.
+ /// ```
+ /// use rand::{thread_rng, Rng};
///
- /// See `Closed01` for the closed interval `[0,1]`, and
- /// `Open01` for the open interval `(0,1)`.
- fn next_f64(&mut self) -> f64 {
- const UPPER_MASK: u64 = 0x3FF0000000000000;
- const LOWER_MASK: u64 = 0xFFFFFFFFFFFFF;
- let tmp = UPPER_MASK | (self.next_u64() & LOWER_MASK);
- let result: f64 = unsafe { mem::transmute(tmp) };
- result - 1.0
+ /// let mut rng = thread_rng();
+ /// let x: u32 = rng.gen();
+ /// println!("{}", x);
+ /// println!("{:?}", rng.gen::<(f64, bool)>());
+ /// ```
+ #[inline]
+ fn gen<T>(&mut self) -> T where Standard: Distribution<T> {
+ Standard.sample(self)
}
- /// Fill `dest` with random data.
+ /// Generate a random value in the range [`low`, `high`), i.e. inclusive of
+ /// `low` and exclusive of `high`.
///
- /// This has a default implementation in terms of `next_u64` and
- /// `next_u32`, but should be overridden by implementations that
- /// offer a more efficient solution than just calling those
- /// methods repeatedly.
+ /// This function is optimised for the case that only a single sample is
+ /// made from the given range. See also the [`Uniform`] distribution
+ /// type which may be faster if sampling from the same range repeatedly.
///
- /// This method does *not* have a requirement to bear any fixed
- /// relationship to the other methods, for example, it does *not*
- /// have to result in the same output as progressively filling
- /// `dest` with `self.gen::<u8>()`, and any such behaviour should
- /// not be relied upon.
+ /// # Panics
///
- /// This method should guarantee that `dest` is entirely filled
- /// with new data, and may panic if this is impossible
- /// (e.g. reading past the end of a file that is being used as the
- /// source of randomness).
+ /// Panics if `low >= high`.
///
/// # Example
///
- /// ```rust
+ /// ```
/// use rand::{thread_rng, Rng};
///
- /// let mut v = [0u8; 13579];
- /// thread_rng().fill_bytes(&mut v);
- /// println!("{:?}", &v[..]);
+ /// let mut rng = thread_rng();
+ /// let n: u32 = rng.gen_range(0, 10);
+ /// println!("{}", n);
+ /// let m: f64 = rng.gen_range(-40.0f64, 1.3e5f64);
+ /// println!("{}", m);
/// ```
- fn fill_bytes(&mut self, dest: &mut [u8]) {
- // this could, in theory, be done by transmuting dest to a
- // [u64], but this is (1) likely to be undefined behaviour for
- // LLVM, (2) has to be very careful about alignment concerns,
- // (3) adds more `unsafe` that needs to be checked, (4)
- // probably doesn't give much performance gain if
- // optimisations are on.
- let mut count = 0;
- let mut num = 0;
- for byte in dest.iter_mut() {
- if count == 0 {
- // we could micro-optimise here by generating a u32 if
- // we only need a few more bytes to fill the vector
- // (i.e. at most 4).
- num = self.next_u64();
- count = 8;
- }
-
- *byte = (num & 0xff) as u8;
- num >>= 8;
- count -= 1;
- }
+ ///
+ /// [`Uniform`]: distributions/uniform/struct.Uniform.html
+ fn gen_range<T: SampleUniform, B1, B2>(&mut self, low: B1, high: B2) -> T
+ where B1: SampleBorrow<T> + Sized,
+ B2: SampleBorrow<T> + Sized {
+ T::Sampler::sample_single(low, high, self)
}
- /// Return a random value of a `Rand` type.
+ /// Sample a new value, using the given distribution.
///
- /// # Example
+ /// ### Example
///
- /// ```rust
+ /// ```
/// use rand::{thread_rng, Rng};
+ /// use rand::distributions::Uniform;
///
/// let mut rng = thread_rng();
- /// let x: u32 = rng.gen();
- /// println!("{}", x);
- /// println!("{:?}", rng.gen::<(f64, bool)>());
+ /// let x = rng.sample(Uniform::new(10u32, 15));
+ /// // Type annotation requires two types, the type and distribution; the
+ /// // distribution can be inferred.
+ /// let y = rng.sample::<u16, _>(Uniform::new(10, 15));
/// ```
- #[inline(always)]
- fn gen<T: Rand>(&mut self) -> T where Self: Sized {
- Rand::rand(self)
+ fn sample<T, D: Distribution<T>>(&mut self, distr: D) -> T {
+ distr.sample(self)
}
- /// Return an iterator that will yield an infinite number of randomly
- /// generated items.
+ /// Create an iterator that generates values using the given distribution.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
+ /// use rand::distributions::{Alphanumeric, Uniform, Standard};
///
/// let mut rng = thread_rng();
- /// let x = rng.gen_iter::<u32>().take(10).collect::<Vec<u32>>();
- /// println!("{:?}", x);
- /// println!("{:?}", rng.gen_iter::<(f64, bool)>().take(5)
- /// .collect::<Vec<(f64, bool)>>());
- /// ```
- fn gen_iter<'a, T: Rand>(&'a mut self) -> Generator<'a, T, Self> where Self: Sized {
- Generator { rng: self, _marker: marker::PhantomData }
- }
-
- /// Generate a random value in the range [`low`, `high`).
- ///
- /// This is a convenience wrapper around
- /// `distributions::Range`. If this function will be called
- /// repeatedly with the same arguments, one should use `Range`, as
- /// that will amortize the computations that allow for perfect
- /// uniformity, as they only happen on initialization.
- ///
- /// # Panics
///
- /// Panics if `low >= high`.
+ /// // Vec of 16 x f32:
+ /// let v: Vec<f32> = thread_rng().sample_iter(&Standard).take(16).collect();
///
- /// # Example
+ /// // String:
+ /// let s: String = rng.sample_iter(&Alphanumeric).take(7).collect();
///
- /// ```rust
- /// use rand::{thread_rng, Rng};
+ /// // Combined values
+ /// println!("{:?}", thread_rng().sample_iter(&Standard).take(5)
+ /// .collect::<Vec<(f64, bool)>>());
///
- /// let mut rng = thread_rng();
- /// let n: u32 = rng.gen_range(0, 10);
- /// println!("{}", n);
- /// let m: f64 = rng.gen_range(-40.0f64, 1.3e5f64);
- /// println!("{}", m);
+ /// // Dice-rolling:
+ /// let die_range = Uniform::new_inclusive(1, 6);
+ /// let mut roll_die = rng.sample_iter(&die_range);
+ /// while roll_die.next().unwrap() != 6 {
+ /// println!("Not a 6; rolling again!");
+ /// }
/// ```
- fn gen_range<T: PartialOrd + SampleRange>(&mut self, low: T, high: T) -> T where Self: Sized {
- assert!(low < high, "Rng.gen_range called with low >= high");
- Range::new(low, high).ind_sample(self)
+ fn sample_iter<'a, T, D: Distribution<T>>(&'a mut self, distr: &'a D)
+ -> distributions::DistIter<'a, D, Self, T> where Self: Sized
+ {
+ distr.sample_iter(self)
}
- /// Return a bool with a 1 in n chance of true
+ /// Fill `dest` entirely with random bytes (uniform value distribution),
+ /// where `dest` is any type supporting [`AsByteSliceMut`], namely slices
+ /// and arrays over primitive integer types (`i8`, `i16`, `u32`, etc.).
+ ///
+ /// On big-endian platforms this performs byte-swapping to ensure
+ /// portability of results from reproducible generators.
+ ///
+ /// This uses [`fill_bytes`] internally which may handle some RNG errors
+ /// implicitly (e.g. waiting if the OS generator is not ready), but panics
+ /// on other errors. See also [`try_fill`] which returns errors.
///
/// # Example
///
- /// ```rust
+ /// ```
/// use rand::{thread_rng, Rng};
///
- /// let mut rng = thread_rng();
- /// println!("{}", rng.gen_weighted_bool(3));
+ /// let mut arr = [0i8; 20];
+ /// thread_rng().fill(&mut arr[..]);
/// ```
- fn gen_weighted_bool(&mut self, n: u32) -> bool where Self: Sized {
- n <= 1 || self.gen_range(0, n) == 0
+ ///
+ /// [`fill_bytes`]: trait.RngCore.html#method.fill_bytes
+ /// [`try_fill`]: trait.Rng.html#method.try_fill
+ /// [`AsByteSliceMut`]: trait.AsByteSliceMut.html
+ fn fill<T: AsByteSliceMut + ?Sized>(&mut self, dest: &mut T) {
+ self.fill_bytes(dest.as_byte_slice_mut());
+ dest.to_le();
}
- /// Return an iterator of random characters from the set A-Z,a-z,0-9.
+ /// Fill `dest` entirely with random bytes (uniform value distribution),
+ /// where `dest` is any type supporting [`AsByteSliceMut`], namely slices
+ /// and arrays over primitive integer types (`i8`, `i16`, `u32`, etc.).
+ ///
+ /// On big-endian platforms this performs byte-swapping to ensure
+ /// portability of results from reproducible generators.
+ ///
+ /// This uses [`try_fill_bytes`] internally and forwards all RNG errors. In
+ /// some cases errors may be resolvable; see [`ErrorKind`] and
+ /// documentation for the RNG in use. If you do not plan to handle these
+ /// errors you may prefer to use [`fill`].
///
/// # Example
///
- /// ```rust
+ /// ```
+ /// # use rand::Error;
/// use rand::{thread_rng, Rng};
///
- /// let s: String = thread_rng().gen_ascii_chars().take(10).collect();
- /// println!("{}", s);
+ /// # fn try_inner() -> Result<(), Error> {
+ /// let mut arr = [0u64; 4];
+ /// thread_rng().try_fill(&mut arr[..])?;
+ /// # Ok(())
+ /// # }
+ ///
+ /// # try_inner().unwrap()
/// ```
- fn gen_ascii_chars<'a>(&'a mut self) -> AsciiGenerator<'a, Self> where Self: Sized {
- AsciiGenerator { rng: self }
+ ///
+ /// [`ErrorKind`]: enum.ErrorKind.html
+ /// [`try_fill_bytes`]: trait.RngCore.html#method.try_fill_bytes
+ /// [`fill`]: trait.Rng.html#method.fill
+ /// [`AsByteSliceMut`]: trait.AsByteSliceMut.html
+ fn try_fill<T: AsByteSliceMut + ?Sized>(&mut self, dest: &mut T) -> Result<(), Error> {
+ self.try_fill_bytes(dest.as_byte_slice_mut())?;
+ dest.to_le();
+ Ok(())
}
- /// Return a random element from `values`.
+ /// Return a bool with a probability `p` of being true.
///
- /// Return `None` if `values` is empty.
+ /// See also the [`Bernoulli`] distribution, which may be faster if
+ /// sampling from the same probability repeatedly.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
- /// let choices = [1, 2, 4, 8, 16, 32];
/// let mut rng = thread_rng();
- /// println!("{:?}", rng.choose(&choices));
- /// assert_eq!(rng.choose(&choices[..0]), None);
+ /// println!("{}", rng.gen_bool(1.0 / 3.0));
/// ```
- fn choose<'a, T>(&mut self, values: &'a [T]) -> Option<&'a T> where Self: Sized {
- if values.is_empty() {
- None
- } else {
- Some(&values[self.gen_range(0, values.len())])
- }
- }
-
- /// Return a mutable pointer to a random element from `values`.
///
- /// Return `None` if `values` is empty.
- fn choose_mut<'a, T>(&mut self, values: &'a mut [T]) -> Option<&'a mut T> where Self: Sized {
- if values.is_empty() {
- None
- } else {
- let len = values.len();
- Some(&mut values[self.gen_range(0, len)])
- }
+ /// # Panics
+ ///
+ /// If `p < 0` or `p > 1`.
+ ///
+ /// [`Bernoulli`]: distributions/bernoulli/struct.Bernoulli.html
+ #[inline]
+ fn gen_bool(&mut self, p: f64) -> bool {
+ let d = distributions::Bernoulli::new(p);
+ self.sample(d)
}
- /// Shuffle a mutable slice in place.
+ /// Return a bool with a probability of `numerator/denominator` of being
+ /// true. I.e. `gen_ratio(2, 3)` has chance of 2 in 3, or about 67%, of
+ /// returning true. If `numerator == denominator`, then the returned value
+ /// is guaranteed to be `true`. If `numerator == 0`, then the returned
+ /// value is guaranteed to be `false`.
+ ///
+ /// See also the [`Bernoulli`] distribution, which may be faster if
+ /// sampling from the same `numerator` and `denominator` repeatedly.
///
- /// This applies Durstenfeld's algorithm for the [Fisher–Yates shuffle](https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm)
- /// which produces an unbiased permutation.
+ /// # Panics
+ ///
+ /// If `denominator == 0` or `numerator > denominator`.
///
/// # Example
///
- /// ```rust
+ /// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
- /// let mut y = [1, 2, 3];
- /// rng.shuffle(&mut y);
- /// println!("{:?}", y);
- /// rng.shuffle(&mut y);
- /// println!("{:?}", y);
+ /// println!("{}", rng.gen_ratio(2, 3));
/// ```
- fn shuffle<T>(&mut self, values: &mut [T]) where Self: Sized {
- let mut i = values.len();
- while i >= 2 {
- // invariant: elements with index >= i have been locked in place.
- i -= 1;
- // lock element i in place.
- values.swap(i, self.gen_range(0, i + 1));
- }
- }
-}
-
-impl<'a, R: ?Sized> Rng for &'a mut R where R: Rng {
- fn next_u32(&mut self) -> u32 {
- (**self).next_u32()
- }
-
- fn next_u64(&mut self) -> u64 {
- (**self).next_u64()
+ ///
+ /// [`Bernoulli`]: distributions/bernoulli/struct.Bernoulli.html
+ #[inline]
+ fn gen_ratio(&mut self, numerator: u32, denominator: u32) -> bool {
+ let d = distributions::Bernoulli::from_ratio(numerator, denominator);
+ self.sample(d)
}
- fn next_f32(&mut self) -> f32 {
- (**self).next_f32()
+ /// Return a random element from `values`.
+ ///
+ /// Deprecated: use [`SliceRandom::choose`] instead.
+ ///
+ /// [`SliceRandom::choose`]: seq/trait.SliceRandom.html#method.choose
+ #[deprecated(since="0.6.0", note="use SliceRandom::choose instead")]
+ fn choose<'a, T>(&mut self, values: &'a [T]) -> Option<&'a T> {
+ use seq::SliceRandom;
+ values.choose(self)
}
- fn next_f64(&mut self) -> f64 {
- (**self).next_f64()
+ /// Return a mutable pointer to a random element from `values`.
+ ///
+ /// Deprecated: use [`SliceRandom::choose_mut`] instead.
+ ///
+ /// [`SliceRandom::choose_mut`]: seq/trait.SliceRandom.html#method.choose_mut
+ #[deprecated(since="0.6.0", note="use SliceRandom::choose_mut instead")]
+ fn choose_mut<'a, T>(&mut self, values: &'a mut [T]) -> Option<&'a mut T> {
+ use seq::SliceRandom;
+ values.choose_mut(self)
}
- fn fill_bytes(&mut self, dest: &mut [u8]) {
- (**self).fill_bytes(dest)
+ /// Shuffle a mutable slice in place.
+ ///
+ /// Deprecated: use [`SliceRandom::shuffle`] instead.
+ ///
+ /// [`SliceRandom::shuffle`]: seq/trait.SliceRandom.html#method.shuffle
+ #[deprecated(since="0.6.0", note="use SliceRandom::shuffle instead")]
+ fn shuffle<T>(&mut self, values: &mut [T]) {
+ use seq::SliceRandom;
+ values.shuffle(self)
}
}
-#[cfg(feature="std")]
-impl<R: ?Sized> Rng for Box<R> where R: Rng {
- fn next_u32(&mut self) -> u32 {
- (**self).next_u32()
- }
-
- fn next_u64(&mut self) -> u64 {
- (**self).next_u64()
- }
-
- fn next_f32(&mut self) -> f32 {
- (**self).next_f32()
- }
-
- fn next_f64(&mut self) -> f64 {
- (**self).next_f64()
- }
-
- fn fill_bytes(&mut self, dest: &mut [u8]) {
- (**self).fill_bytes(dest)
- }
-}
+impl<R: RngCore + ?Sized> Rng for R {}
-/// Iterator which will generate a stream of random items.
+/// Trait for casting types to byte slices
///
-/// This iterator is created via the [`gen_iter`] method on [`Rng`].
+/// This is used by the [`fill`] and [`try_fill`] methods.
///
-/// [`gen_iter`]: trait.Rng.html#method.gen_iter
-/// [`Rng`]: trait.Rng.html
-#[derive(Debug)]
-pub struct Generator<'a, T, R:'a> {
- rng: &'a mut R,
- _marker: marker::PhantomData<fn() -> T>,
-}
-
-impl<'a, T: Rand, R: Rng> Iterator for Generator<'a, T, R> {
- type Item = T;
+/// [`fill`]: trait.Rng.html#method.fill
+/// [`try_fill`]: trait.Rng.html#method.try_fill
+pub trait AsByteSliceMut {
+ /// Return a mutable reference to self as a byte slice
+ fn as_byte_slice_mut(&mut self) -> &mut [u8];
- fn next(&mut self) -> Option<T> {
- Some(self.rng.gen())
- }
-}
-
-/// Iterator which will continuously generate random ascii characters.
-///
-/// This iterator is created via the [`gen_ascii_chars`] method on [`Rng`].
-///
-/// [`gen_ascii_chars`]: trait.Rng.html#method.gen_ascii_chars
-/// [`Rng`]: trait.Rng.html
-#[derive(Debug)]
-pub struct AsciiGenerator<'a, R:'a> {
- rng: &'a mut R,
+ /// Call `to_le` on each element (i.e. byte-swap on Big Endian platforms).
+ fn to_le(&mut self);
}
-impl<'a, R: Rng> Iterator for AsciiGenerator<'a, R> {
- type Item = char;
-
- fn next(&mut self) -> Option<char> {
- const GEN_ASCII_STR_CHARSET: &'static [u8] =
- b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
- abcdefghijklmnopqrstuvwxyz\
- 0123456789";
- Some(*self.rng.choose(GEN_ASCII_STR_CHARSET).unwrap() as char)
+impl AsByteSliceMut for [u8] {
+ fn as_byte_slice_mut(&mut self) -> &mut [u8] {
+ self
}
-}
-/// A random number generator that can be explicitly seeded to produce
-/// the same stream of randomness multiple times.
-pub trait SeedableRng<Seed>: Rng {
- /// Reseed an RNG with the given seed.
- ///
- /// # Example
- ///
- /// ```rust
- /// use rand::{Rng, SeedableRng, StdRng};
- ///
- /// let seed: &[_] = &[1, 2, 3, 4];
- /// let mut rng: StdRng = SeedableRng::from_seed(seed);
- /// println!("{}", rng.gen::<f64>());
- /// rng.reseed(&[5, 6, 7, 8]);
- /// println!("{}", rng.gen::<f64>());
- /// ```
- fn reseed(&mut self, Seed);
-
- /// Create a new RNG with the given seed.
- ///
- /// # Example
- ///
- /// ```rust
- /// use rand::{Rng, SeedableRng, StdRng};
- ///
- /// let seed: &[_] = &[1, 2, 3, 4];
- /// let mut rng: StdRng = SeedableRng::from_seed(seed);
- /// println!("{}", rng.gen::<f64>());
- /// ```
- fn from_seed(seed: Seed) -> Self;
-}
-
-/// A wrapper for generating floating point numbers uniformly in the
-/// open interval `(0,1)` (not including either endpoint).
-///
-/// Use `Closed01` for the closed interval `[0,1]`, and the default
-/// `Rand` implementation for `f32` and `f64` for the half-open
-/// `[0,1)`.
-///
-/// # Example
-/// ```rust
-/// use rand::{random, Open01};
-///
-/// let Open01(val) = random::<Open01<f32>>();
-/// println!("f32 from (0,1): {}", val);
-/// ```
-#[derive(Debug)]
-pub struct Open01<F>(pub F);
-
-/// A wrapper for generating floating point numbers uniformly in the
-/// closed interval `[0,1]` (including both endpoints).
-///
-/// Use `Open01` for the closed interval `(0,1)`, and the default
-/// `Rand` implementation of `f32` and `f64` for the half-open
-/// `[0,1)`.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{random, Closed01};
-///
-/// let Closed01(val) = random::<Closed01<f32>>();
-/// println!("f32 from [0,1]: {}", val);
-/// ```
-#[derive(Debug)]
-pub struct Closed01<F>(pub F);
-
-/// The standard RNG. This is designed to be efficient on the current
-/// platform.
-#[derive(Copy, Clone, Debug)]
-pub struct StdRng {
- rng: IsaacWordRng,
+ fn to_le(&mut self) {}
}
-impl StdRng {
- /// Create a randomly seeded instance of `StdRng`.
- ///
- /// This is a very expensive operation as it has to read
- /// randomness from the operating system and use this in an
- /// expensive seeding operation. If one is only generating a small
- /// number of random numbers, or doesn't need the utmost speed for
- /// generating each number, `thread_rng` and/or `random` may be more
- /// appropriate.
- ///
- /// Reading the randomness from the OS may fail, and any error is
- /// propagated via the `io::Result` return value.
- #[cfg(feature="std")]
- pub fn new() -> io::Result<StdRng> {
- match OsRng::new() {
- Ok(mut r) => Ok(StdRng { rng: r.gen() }),
- Err(e1) => {
- match JitterRng::new() {
- Ok(mut r) => Ok(StdRng { rng: r.gen() }),
- Err(_) => {
- Err(e1)
+macro_rules! impl_as_byte_slice {
+ ($t:ty) => {
+ impl AsByteSliceMut for [$t] {
+ fn as_byte_slice_mut(&mut self) -> &mut [u8] {
+ if self.len() == 0 {
+ unsafe {
+ // must not use null pointer
+ slice::from_raw_parts_mut(0x1 as *mut u8, 0)
+ }
+ } else {
+ unsafe {
+ slice::from_raw_parts_mut(&mut self[0]
+ as *mut $t
+ as *mut u8,
+ self.len() * mem::size_of::<$t>()
+ )
}
}
}
- }
- }
-}
-impl Rng for StdRng {
- #[inline]
- fn next_u32(&mut self) -> u32 {
- self.rng.next_u32()
- }
-
- #[inline]
- fn next_u64(&mut self) -> u64 {
- self.rng.next_u64()
- }
-}
-
-impl<'a> SeedableRng<&'a [usize]> for StdRng {
- fn reseed(&mut self, seed: &'a [usize]) {
- // the internal RNG can just be seeded from the above
- // randomness.
- self.rng.reseed(unsafe {mem::transmute(seed)})
- }
-
- fn from_seed(seed: &'a [usize]) -> StdRng {
- StdRng { rng: SeedableRng::from_seed(unsafe {mem::transmute(seed)}) }
+ fn to_le(&mut self) {
+ for x in self {
+ *x = x.to_le();
+ }
+ }
+ }
}
}
-/// Create a weak random number generator with a default algorithm and seed.
-///
-/// It returns the fastest `Rng` algorithm currently available in Rust without
-/// consideration for cryptography or security. If you require a specifically
-/// seeded `Rng` for consistency over time you should pick one algorithm and
-/// create the `Rng` yourself.
-///
-/// This will seed the generator with randomness from thread_rng.
-#[cfg(feature="std")]
-pub fn weak_rng() -> XorShiftRng {
- thread_rng().gen()
-}
+impl_as_byte_slice!(u16);
+impl_as_byte_slice!(u32);
+impl_as_byte_slice!(u64);
+#[cfg(rust_1_26)] impl_as_byte_slice!(u128);
+impl_as_byte_slice!(usize);
+impl_as_byte_slice!(i8);
+impl_as_byte_slice!(i16);
+impl_as_byte_slice!(i32);
+impl_as_byte_slice!(i64);
+#[cfg(rust_1_26)] impl_as_byte_slice!(i128);
+impl_as_byte_slice!(isize);
+
+macro_rules! impl_as_byte_slice_arrays {
+ ($n:expr,) => {};
+ ($n:expr, $N:ident, $($NN:ident,)*) => {
+ impl_as_byte_slice_arrays!($n - 1, $($NN,)*);
+
+ impl<T> AsByteSliceMut for [T; $n] where [T]: AsByteSliceMut {
+ fn as_byte_slice_mut(&mut self) -> &mut [u8] {
+ self[..].as_byte_slice_mut()
+ }
-/// Controls how the thread-local RNG is reseeded.
-#[cfg(feature="std")]
-#[derive(Debug)]
-struct ThreadRngReseeder;
+ fn to_le(&mut self) {
+ self[..].to_le()
+ }
+ }
+ };
+ (!div $n:expr,) => {};
+ (!div $n:expr, $N:ident, $($NN:ident,)*) => {
+ impl_as_byte_slice_arrays!(!div $n / 2, $($NN,)*);
+
+ impl<T> AsByteSliceMut for [T; $n] where [T]: AsByteSliceMut {
+ fn as_byte_slice_mut(&mut self) -> &mut [u8] {
+ self[..].as_byte_slice_mut()
+ }
-#[cfg(feature="std")]
-impl reseeding::Reseeder<StdRng> for ThreadRngReseeder {
- fn reseed(&mut self, rng: &mut StdRng) {
- match StdRng::new() {
- Ok(r) => *rng = r,
- Err(e) => panic!("No entropy available: {}", e),
+ fn to_le(&mut self) {
+ self[..].to_le()
+ }
}
- }
+ };
}
-#[cfg(feature="std")]
-const THREAD_RNG_RESEED_THRESHOLD: u64 = 32_768;
-#[cfg(feature="std")]
-type ThreadRngInner = reseeding::ReseedingRng<StdRng, ThreadRngReseeder>;
+impl_as_byte_slice_arrays!(32, N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,);
+impl_as_byte_slice_arrays!(!div 4096, N,N,N,N,N,N,N,);
-/// The thread-local RNG.
-#[cfg(feature="std")]
-#[derive(Clone, Debug)]
-pub struct ThreadRng {
- rng: Rc<RefCell<ThreadRngInner>>,
-}
-/// Retrieve the lazily-initialized thread-local random number
-/// generator, seeded by the system. Intended to be used in method
-/// chaining style, e.g. `thread_rng().gen::<i32>()`.
+/// A convenience extension to [`SeedableRng`] allowing construction from fresh
+/// entropy. This trait is automatically implemented for any PRNG implementing
+/// [`SeedableRng`] and is not intended to be implemented by users.
+///
+/// This is equivalent to using `SeedableRng::from_rng(EntropyRng::new())` then
+/// unwrapping the result.
+///
+/// Since this is convenient and secure, it is the recommended way to create
+/// PRNGs, though two alternatives may be considered:
+///
+/// * Deterministic creation using [`SeedableRng::from_seed`] with a fixed seed
+/// * Seeding from `thread_rng`: `SeedableRng::from_rng(thread_rng())?`;
+/// this will usually be faster and should also be secure, but requires
+/// trusting one extra component.
///
-/// After generating a certain amount of randomness, the RNG will reseed itself
-/// from the operating system or, if the operating system RNG returns an error,
-/// a seed based on the current system time.
+/// ## Example
///
-/// The internal RNG used is platform and architecture dependent, even
-/// if the operating system random number generator is rigged to give
-/// the same sequence always. If absolute consistency is required,
-/// explicitly select an RNG, e.g. `IsaacRng` or `Isaac64Rng`.
+/// ```
+/// use rand::{Rng, FromEntropy};
+/// use rand::rngs::StdRng;
+///
+/// let mut rng = StdRng::from_entropy();
+/// println!("Random die roll: {}", rng.gen_range(1, 7));
+/// ```
+///
+/// [`EntropyRng`]: rngs/struct.EntropyRng.html
+/// [`SeedableRng`]: trait.SeedableRng.html
+/// [`SeedableRng::from_seed`]: trait.SeedableRng.html#tymethod.from_seed
#[cfg(feature="std")]
-pub fn thread_rng() -> ThreadRng {
- // used to make space in TLS for a random number generator
- thread_local!(static THREAD_RNG_KEY: Rc<RefCell<ThreadRngInner>> = {
- let r = match StdRng::new() {
- Ok(r) => r,
- Err(e) => panic!("No entropy available: {}", e),
- };
- let rng = reseeding::ReseedingRng::new(r,
- THREAD_RNG_RESEED_THRESHOLD,
- ThreadRngReseeder);
- Rc::new(RefCell::new(rng))
- });
-
- ThreadRng { rng: THREAD_RNG_KEY.with(|t| t.clone()) }
+pub trait FromEntropy: SeedableRng {
+ /// Creates a new instance, automatically seeded with fresh entropy.
+ ///
+ /// Normally this will use `OsRng`, but if that fails `JitterRng` will be
+ /// used instead. Both should be suitable for cryptography. It is possible
+ /// that both entropy sources will fail though unlikely; failures would
+ /// almost certainly be platform limitations or build issues, i.e. most
+ /// applications targetting PC/mobile platforms should not need to worry
+ /// about this failing.
+ ///
+ /// # Panics
+ ///
+ /// If all entropy sources fail this will panic. If you need to handle
+ /// errors, use the following code, equivalent aside from error handling:
+ ///
+ /// ```
+ /// # use rand::Error;
+ /// use rand::prelude::*;
+ /// use rand::rngs::EntropyRng;
+ ///
+ /// # fn try_inner() -> Result<(), Error> {
+ /// // This uses StdRng, but is valid for any R: SeedableRng
+ /// let mut rng = StdRng::from_rng(EntropyRng::new())?;
+ ///
+ /// println!("random number: {}", rng.gen_range(1, 10));
+ /// # Ok(())
+ /// # }
+ ///
+ /// # try_inner().unwrap()
+ /// ```
+ fn from_entropy() -> Self;
}
#[cfg(feature="std")]
-impl Rng for ThreadRng {
- fn next_u32(&mut self) -> u32 {
- self.rng.borrow_mut().next_u32()
- }
-
- fn next_u64(&mut self) -> u64 {
- self.rng.borrow_mut().next_u64()
- }
-
- #[inline]
- fn fill_bytes(&mut self, bytes: &mut [u8]) {
- self.rng.borrow_mut().fill_bytes(bytes)
+impl<R: SeedableRng> FromEntropy for R {
+ fn from_entropy() -> R {
+ R::from_rng(rngs::EntropyRng::new()).unwrap_or_else(|err|
+ panic!("FromEntropy::from_entropy() failed: {}", err))
}
}
+
/// Generates a random value using the thread-local random number generator.
///
-/// `random()` can generate various types of random things, and so may require
-/// type hinting to generate the specific type you want.
-///
-/// This function uses the thread local random number generator. This means
-/// that if you're calling `random()` in a loop, caching the generator can
-/// increase performance. An example is shown below.
+/// This is simply a shortcut for `thread_rng().gen()`. See [`thread_rng`] for
+/// documentation of the entropy source and [`Standard`] for documentation of
+/// distributions and type-specific generation.
///
/// # Examples
///
@@ -931,7 +683,8 @@ impl Rng for ThreadRng {
/// }
/// ```
///
-/// Caching the thread local random number generator:
+/// If you're calling `random()` in a loop, caching the generator as in the
+/// following example can increase performance.
///
/// ```
/// use rand::Rng;
@@ -950,93 +703,109 @@ impl Rng for ThreadRng {
/// *x = rng.gen();
/// }
/// ```
+///
+/// [`thread_rng`]: fn.thread_rng.html
+/// [`Standard`]: distributions/struct.Standard.html
#[cfg(feature="std")]
#[inline]
-pub fn random<T: Rand>() -> T {
+pub fn random<T>() -> T where Standard: Distribution<T> {
thread_rng().gen()
}
-/// DEPRECATED: use `seq::sample_iter` instead.
-///
-/// Randomly sample up to `amount` elements from a finite iterator.
-/// The order of elements in the sample is not random.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{thread_rng, sample};
-///
-/// let mut rng = thread_rng();
-/// let sample = sample(&mut rng, 1..100, 5);
-/// println!("{:?}", sample);
-/// ```
-#[cfg(feature="std")]
-#[inline(always)]
-#[deprecated(since="0.4.0", note="renamed to seq::sample_iter")]
-pub fn sample<T, I, R>(rng: &mut R, iterable: I, amount: usize) -> Vec<T>
- where I: IntoIterator<Item=T>,
- R: Rng,
-{
- // the legacy sample didn't care whether amount was met
- seq::sample_iter(rng, iterable, amount)
- .unwrap_or_else(|e| e)
+// Due to rustwasm/wasm-bindgen#201 this can't be defined in the inner os
+// modules, so hack around it for now and place it at the root.
+#[cfg(all(feature = "wasm-bindgen", target_arch = "wasm32"))]
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+pub mod __wbg_shims {
+
+ // `extern { type Foo; }` isn't supported on 1.22 syntactically, so use a
+ // macro to work around that.
+ macro_rules! rust_122_compat {
+ ($($t:tt)*) => ($($t)*)
+ }
+
+ rust_122_compat! {
+ extern crate wasm_bindgen;
+
+ pub use wasm_bindgen::prelude::*;
+
+ #[wasm_bindgen]
+ extern "C" {
+ pub type Function;
+ #[wasm_bindgen(constructor)]
+ pub fn new(s: &str) -> Function;
+ #[wasm_bindgen(method)]
+ pub fn call(this: &Function, self_: &JsValue) -> JsValue;
+
+ pub type This;
+ #[wasm_bindgen(method, getter, structural, js_name = self)]
+ pub fn self_(me: &This) -> JsValue;
+ #[wasm_bindgen(method, getter, structural)]
+ pub fn crypto(me: &This) -> JsValue;
+
+ #[derive(Clone, Debug)]
+ pub type BrowserCrypto;
+
+ // TODO: these `structural` annotations here ideally wouldn't be here to
+ // avoid a JS shim, but for now with feature detection they're
+ // unavoidable.
+ #[wasm_bindgen(method, js_name = getRandomValues, structural, getter)]
+ pub fn get_random_values_fn(me: &BrowserCrypto) -> JsValue;
+ #[wasm_bindgen(method, js_name = getRandomValues, structural)]
+ pub fn get_random_values(me: &BrowserCrypto, buf: &mut [u8]);
+
+ #[wasm_bindgen(js_name = require)]
+ pub fn node_require(s: &str) -> NodeCrypto;
+
+ #[derive(Clone, Debug)]
+ pub type NodeCrypto;
+
+ #[wasm_bindgen(method, js_name = randomFillSync, structural)]
+ pub fn random_fill_sync(me: &NodeCrypto, buf: &mut [u8]);
+ }
+ }
}
#[cfg(test)]
mod test {
- use super::{Rng, thread_rng, random, SeedableRng, StdRng, weak_rng};
- use std::iter::repeat;
+ use rngs::mock::StepRng;
+ use rngs::StdRng;
+ use super::*;
+ #[cfg(all(not(feature="std"), feature="alloc"))] use alloc::boxed::Box;
- pub struct MyRng<R> { inner: R }
+ pub struct TestRng<R> { inner: R }
- impl<R: Rng> Rng for MyRng<R> {
+ impl<R: RngCore> RngCore for TestRng<R> {
fn next_u32(&mut self) -> u32 {
- fn next<T: Rng>(t: &mut T) -> u32 {
- t.next_u32()
- }
- next(&mut self.inner)
+ self.inner.next_u32()
+ }
+ fn next_u64(&mut self) -> u64 {
+ self.inner.next_u64()
+ }
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.inner.fill_bytes(dest)
+ }
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.inner.try_fill_bytes(dest)
}
}
- pub fn rng() -> MyRng<::ThreadRng> {
- MyRng { inner: ::thread_rng() }
- }
-
- struct ConstRng { i: u64 }
- impl Rng for ConstRng {
- fn next_u32(&mut self) -> u32 { self.i as u32 }
- fn next_u64(&mut self) -> u64 { self.i }
-
- // no fill_bytes on purpose
- }
-
- pub fn iter_eq<I, J>(i: I, j: J) -> bool
- where I: IntoIterator,
- J: IntoIterator<Item=I::Item>,
- I::Item: Eq
- {
- // make sure the iterators have equal length
- let mut i = i.into_iter();
- let mut j = j.into_iter();
- loop {
- match (i.next(), j.next()) {
- (Some(ref ei), Some(ref ej)) if ei == ej => { }
- (None, None) => return true,
- _ => return false,
- }
- }
+ pub fn rng(seed: u64) -> TestRng<StdRng> {
+ TestRng { inner: StdRng::seed_from_u64(seed) }
}
#[test]
fn test_fill_bytes_default() {
- let mut r = ConstRng { i: 0x11_22_33_44_55_66_77_88 };
+ let mut r = StepRng::new(0x11_22_33_44_55_66_77_88, 0);
// check every remainder mod 8, both in small and big vectors.
let lengths = [0, 1, 2, 3, 4, 5, 6, 7,
80, 81, 82, 83, 84, 85, 86, 87];
for &n in lengths.iter() {
- let mut v = repeat(0u8).take(n).collect::<Vec<_>>();
- r.fill_bytes(&mut v);
+ let mut buffer = [0u8; 87];
+ let v = &mut buffer[0..n];
+ r.fill_bytes(v);
// use this to get nicer error messages.
for (i, &byte) in v.iter().enumerate() {
@@ -1048,127 +817,100 @@ mod test {
}
#[test]
- fn test_gen_range() {
- let mut r = thread_rng();
- for _ in 0..1000 {
- let a = r.gen_range(-3, 42);
- assert!(a >= -3 && a < 42);
- assert_eq!(r.gen_range(0, 1), 0);
- assert_eq!(r.gen_range(-12, -11), -12);
- }
+ fn test_fill() {
+ let x = 9041086907909331047; // a random u64
+ let mut rng = StepRng::new(x, 0);
+
+ // Convert to byte sequence and back to u64; byte-swap twice if BE.
+ let mut array = [0u64; 2];
+ rng.fill(&mut array[..]);
+ assert_eq!(array, [x, x]);
+ assert_eq!(rng.next_u64(), x);
+ // Convert to bytes then u32 in LE order
+ let mut array = [0u32; 2];
+ rng.fill(&mut array[..]);
+ assert_eq!(array, [x as u32, (x >> 32) as u32]);
+ assert_eq!(rng.next_u32(), x as u32);
+ }
+
+ #[test]
+ fn test_fill_empty() {
+ let mut array = [0u32; 0];
+ let mut rng = StepRng::new(0, 1);
+ rng.fill(&mut array);
+ rng.fill(&mut array[..]);
+ }
+
+ #[test]
+ fn test_gen_range() {
+ let mut r = rng(101);
for _ in 0..1000 {
- let a = r.gen_range(10, 42);
- assert!(a >= 10 && a < 42);
- assert_eq!(r.gen_range(0, 1), 0);
+ let a = r.gen_range(-4711, 17);
+ assert!(a >= -4711 && a < 17);
+ let a = r.gen_range(-3i8, 42);
+ assert!(a >= -3i8 && a < 42i8);
+ let a = r.gen_range(&10u16, 99);
+ assert!(a >= 10u16 && a < 99u16);
+ let a = r.gen_range(-100i32, &2000);
+ assert!(a >= -100i32 && a < 2000i32);
+ let a = r.gen_range(&12u32, &24u32);
+ assert!(a >= 12u32 && a < 24u32);
+
+ assert_eq!(r.gen_range(0u32, 1), 0u32);
+ assert_eq!(r.gen_range(-12i64, -11), -12i64);
assert_eq!(r.gen_range(3_000_000, 3_000_001), 3_000_000);
}
-
}
#[test]
#[should_panic]
fn test_gen_range_panic_int() {
- let mut r = thread_rng();
+ let mut r = rng(102);
r.gen_range(5, -2);
}
#[test]
#[should_panic]
fn test_gen_range_panic_usize() {
- let mut r = thread_rng();
+ let mut r = rng(103);
r.gen_range(5, 2);
}
#[test]
- fn test_gen_weighted_bool() {
- let mut r = thread_rng();
- assert_eq!(r.gen_weighted_bool(0), true);
- assert_eq!(r.gen_weighted_bool(1), true);
- }
-
- #[test]
- fn test_gen_ascii_str() {
- let mut r = thread_rng();
- assert_eq!(r.gen_ascii_chars().take(0).count(), 0);
- assert_eq!(r.gen_ascii_chars().take(10).count(), 10);
- assert_eq!(r.gen_ascii_chars().take(16).count(), 16);
- }
-
- #[test]
- fn test_gen_vec() {
- let mut r = thread_rng();
- assert_eq!(r.gen_iter::<u8>().take(0).count(), 0);
- assert_eq!(r.gen_iter::<u8>().take(10).count(), 10);
- assert_eq!(r.gen_iter::<f64>().take(16).count(), 16);
- }
-
- #[test]
- fn test_choose() {
- let mut r = thread_rng();
- assert_eq!(r.choose(&[1, 1, 1]).map(|&x|x), Some(1));
-
- let v: &[isize] = &[];
- assert_eq!(r.choose(v), None);
- }
-
- #[test]
- fn test_shuffle() {
- let mut r = thread_rng();
- let empty: &mut [isize] = &mut [];
- r.shuffle(empty);
- let mut one = [1];
- r.shuffle(&mut one);
- let b: &[_] = &[1];
- assert_eq!(one, b);
-
- let mut two = [1, 2];
- r.shuffle(&mut two);
- assert!(two == [1, 2] || two == [2, 1]);
-
- let mut x = [1, 1, 1];
- r.shuffle(&mut x);
- let b: &[_] = &[1, 1, 1];
- assert_eq!(x, b);
+ fn test_gen_bool() {
+ let mut r = rng(105);
+ for _ in 0..5 {
+ assert_eq!(r.gen_bool(0.0), false);
+ assert_eq!(r.gen_bool(1.0), true);
+ }
}
#[test]
- fn test_thread_rng() {
- let mut r = thread_rng();
+ fn test_rng_trait_object() {
+ use distributions::{Distribution, Standard};
+ let mut rng = rng(109);
+ let mut r = &mut rng as &mut RngCore;
+ r.next_u32();
r.gen::<i32>();
- let mut v = [1, 1, 1];
- r.shuffle(&mut v);
- let b: &[_] = &[1, 1, 1];
- assert_eq!(v, b);
assert_eq!(r.gen_range(0, 1), 0);
+ let _c: u8 = Standard.sample(&mut r);
}
#[test]
- fn test_rng_trait_object() {
- let mut rng = thread_rng();
- {
- let mut r = &mut rng as &mut Rng;
- r.next_u32();
- (&mut r).gen::<i32>();
- let mut v = [1, 1, 1];
- (&mut r).shuffle(&mut v);
- let b: &[_] = &[1, 1, 1];
- assert_eq!(v, b);
- assert_eq!((&mut r).gen_range(0, 1), 0);
- }
- {
- let mut r = Box::new(rng) as Box<Rng>;
- r.next_u32();
- r.gen::<i32>();
- let mut v = [1, 1, 1];
- r.shuffle(&mut v);
- let b: &[_] = &[1, 1, 1];
- assert_eq!(v, b);
- assert_eq!(r.gen_range(0, 1), 0);
- }
+ #[cfg(feature="alloc")]
+ fn test_rng_boxed_trait() {
+ use distributions::{Distribution, Standard};
+ let rng = rng(110);
+ let mut r = Box::new(rng) as Box<RngCore>;
+ r.next_u32();
+ r.gen::<i32>();
+ assert_eq!(r.gen_range(0, 1), 0);
+ let _c: u8 = Standard.sample(&mut r);
}
#[test]
+ #[cfg(feature="std")]
fn test_random() {
// not sure how to test this aside from just getting some values
let _n : usize = random();
@@ -1183,32 +925,20 @@ mod test {
}
#[test]
- fn test_std_rng_seeded() {
- let s = thread_rng().gen_iter::<usize>().take(256).collect::<Vec<usize>>();
- let mut ra: StdRng = SeedableRng::from_seed(&s[..]);
- let mut rb: StdRng = SeedableRng::from_seed(&s[..]);
- assert!(iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_std_rng_reseed() {
- let s = thread_rng().gen_iter::<usize>().take(256).collect::<Vec<usize>>();
- let mut r: StdRng = SeedableRng::from_seed(&s[..]);
- let string1 = r.gen_ascii_chars().take(100).collect::<String>();
-
- r.reseed(&s);
-
- let string2 = r.gen_ascii_chars().take(100).collect::<String>();
- assert_eq!(string1, string2);
- }
-
- #[test]
- fn test_weak_rng() {
- let s = weak_rng().gen_iter::<usize>().take(256).collect::<Vec<usize>>();
- let mut ra: StdRng = SeedableRng::from_seed(&s[..]);
- let mut rb: StdRng = SeedableRng::from_seed(&s[..]);
- assert!(iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
+ fn test_gen_ratio_average() {
+ const NUM: u32 = 3;
+ const DENOM: u32 = 10;
+ const N: u32 = 100_000;
+
+ let mut sum: u32 = 0;
+ let mut rng = rng(111);
+ for _ in 0..N {
+ if rng.gen_ratio(NUM, DENOM) {
+ sum += 1;
+ }
+ }
+ // Have Binomial(N, NUM/DENOM) distribution
+ let expected = (NUM * N) / DENOM; // exact integer
+ assert!(((sum - expected) as i32).abs() < 500);
}
}
diff --git a/rand/src/os.rs b/rand/src/os.rs
deleted file mode 100644
index 10022fb..0000000
--- a/rand/src/os.rs
+++ /dev/null
@@ -1,617 +0,0 @@
-// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Interfaces to the operating system provided random number
-//! generators.
-
-use std::{io, mem, fmt};
-use Rng;
-
-/// A random number generator that retrieves randomness straight from
-/// the operating system. Platform sources:
-///
-/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
-/// `/dev/urandom`, or from `getrandom(2)` system call if available.
-/// - OpenBSD: calls `getentropy(2)`
-/// - FreeBSD: uses the `kern.arandom` `sysctl(2)` mib
-/// - Windows: calls `RtlGenRandom`, exported from `advapi32.dll` as
-/// `SystemFunction036`.
-/// - iOS: calls SecRandomCopyBytes as /dev/(u)random is sandboxed.
-/// - PNaCl: calls into the `nacl-irt-random-0.1` IRT interface.
-///
-/// This usually does not block. On some systems (e.g. FreeBSD, OpenBSD,
-/// Max OS X, and modern Linux) this may block very early in the init
-/// process, if the CSPRNG has not been seeded yet.[1]
-///
-/// [1] See <https://www.python.org/dev/peps/pep-0524/> for a more
-/// in-depth discussion.
-pub struct OsRng(imp::OsRng);
-
-impl OsRng {
- /// Create a new `OsRng`.
- pub fn new() -> io::Result<OsRng> {
- imp::OsRng::new().map(OsRng)
- }
-}
-
-impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 { self.0.next_u32() }
- fn next_u64(&mut self) -> u64 { self.0.next_u64() }
- fn fill_bytes(&mut self, v: &mut [u8]) { self.0.fill_bytes(v) }
-}
-
-impl fmt::Debug for OsRng {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "OsRng {{}}")
- }
-}
-
-fn next_u32(fill_buf: &mut FnMut(&mut [u8])) -> u32 {
- let mut buf: [u8; 4] = [0; 4];
- fill_buf(&mut buf);
- unsafe { mem::transmute::<[u8; 4], u32>(buf) }
-}
-
-fn next_u64(fill_buf: &mut FnMut(&mut [u8])) -> u64 {
- let mut buf: [u8; 8] = [0; 8];
- fill_buf(&mut buf);
- unsafe { mem::transmute::<[u8; 8], u64>(buf) }
-}
-
-#[cfg(all(unix, not(target_os = "ios"),
- not(target_os = "nacl"),
- not(target_os = "freebsd"),
- not(target_os = "fuchsia"),
- not(target_os = "openbsd"),
- not(target_os = "redox")))]
-mod imp {
- extern crate libc;
-
- use super::{next_u32, next_u64};
- use self::OsRngInner::*;
-
- use std::io;
- use std::fs::File;
- use Rng;
- use read::ReadRng;
-
- #[cfg(all(target_os = "linux",
- any(target_arch = "x86_64",
- target_arch = "x86",
- target_arch = "arm",
- target_arch = "aarch64",
- target_arch = "powerpc")))]
- fn getrandom(buf: &mut [u8]) -> libc::c_long {
- extern "C" {
- fn syscall(number: libc::c_long, ...) -> libc::c_long;
- }
-
- #[cfg(target_arch = "x86_64")]
- const NR_GETRANDOM: libc::c_long = 318;
- #[cfg(target_arch = "x86")]
- const NR_GETRANDOM: libc::c_long = 355;
- #[cfg(target_arch = "arm")]
- const NR_GETRANDOM: libc::c_long = 384;
- #[cfg(target_arch = "aarch64")]
- const NR_GETRANDOM: libc::c_long = 278;
- #[cfg(target_arch = "powerpc")]
- const NR_GETRANDOM: libc::c_long = 359;
-
- unsafe {
- syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), 0)
- }
- }
-
- #[cfg(not(all(target_os = "linux",
- any(target_arch = "x86_64",
- target_arch = "x86",
- target_arch = "arm",
- target_arch = "aarch64",
- target_arch = "powerpc"))))]
- fn getrandom(_buf: &mut [u8]) -> libc::c_long { -1 }
-
- fn getrandom_fill_bytes(v: &mut [u8]) {
- let mut read = 0;
- let len = v.len();
- while read < len {
- let result = getrandom(&mut v[read..]);
- if result == -1 {
- let err = io::Error::last_os_error();
- if err.kind() == io::ErrorKind::Interrupted {
- continue
- } else {
- panic!("unexpected getrandom error: {}", err);
- }
- } else {
- read += result as usize;
- }
- }
- }
-
- #[cfg(all(target_os = "linux",
- any(target_arch = "x86_64",
- target_arch = "x86",
- target_arch = "arm",
- target_arch = "aarch64",
- target_arch = "powerpc")))]
- fn is_getrandom_available() -> bool {
- use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
- use std::sync::{Once, ONCE_INIT};
-
- static CHECKER: Once = ONCE_INIT;
- static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
-
- CHECKER.call_once(|| {
- let mut buf: [u8; 0] = [];
- let result = getrandom(&mut buf);
- let available = if result == -1 {
- let err = io::Error::last_os_error().raw_os_error();
- err != Some(libc::ENOSYS)
- } else {
- true
- };
- AVAILABLE.store(available, Ordering::Relaxed);
- });
-
- AVAILABLE.load(Ordering::Relaxed)
- }
-
- #[cfg(not(all(target_os = "linux",
- any(target_arch = "x86_64",
- target_arch = "x86",
- target_arch = "arm",
- target_arch = "aarch64",
- target_arch = "powerpc"))))]
- fn is_getrandom_available() -> bool { false }
-
- pub struct OsRng {
- inner: OsRngInner,
- }
-
- enum OsRngInner {
- OsGetrandomRng,
- OsReadRng(ReadRng<File>),
- }
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- if is_getrandom_available() {
- return Ok(OsRng { inner: OsGetrandomRng });
- }
-
- let reader = try!(File::open("/dev/urandom"));
- let reader_rng = ReadRng::new(reader);
-
- Ok(OsRng { inner: OsReadRng(reader_rng) })
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- match self.inner {
- OsGetrandomRng => next_u32(&mut getrandom_fill_bytes),
- OsReadRng(ref mut rng) => rng.next_u32(),
- }
- }
- fn next_u64(&mut self) -> u64 {
- match self.inner {
- OsGetrandomRng => next_u64(&mut getrandom_fill_bytes),
- OsReadRng(ref mut rng) => rng.next_u64(),
- }
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- match self.inner {
- OsGetrandomRng => getrandom_fill_bytes(v),
- OsReadRng(ref mut rng) => rng.fill_bytes(v)
- }
- }
- }
-}
-
-#[cfg(target_os = "ios")]
-mod imp {
- extern crate libc;
-
- use super::{next_u32, next_u64};
-
- use std::io;
- use Rng;
- use self::libc::{c_int, size_t};
-
- #[derive(Debug)]
- pub struct OsRng;
-
- enum SecRandom {}
-
- #[allow(non_upper_case_globals)]
- const kSecRandomDefault: *const SecRandom = 0 as *const SecRandom;
-
- #[link(name = "Security", kind = "framework")]
- extern {
- fn SecRandomCopyBytes(rnd: *const SecRandom,
- count: size_t, bytes: *mut u8) -> c_int;
- }
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- Ok(OsRng)
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- next_u32(&mut |v| self.fill_bytes(v))
- }
- fn next_u64(&mut self) -> u64 {
- next_u64(&mut |v| self.fill_bytes(v))
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- let ret = unsafe {
- SecRandomCopyBytes(kSecRandomDefault, v.len() as size_t, v.as_mut_ptr())
- };
- if ret == -1 {
- panic!("couldn't generate random bytes: {}", io::Error::last_os_error());
- }
- }
- }
-}
-
-#[cfg(target_os = "freebsd")]
-mod imp {
- extern crate libc;
-
- use std::{io, ptr};
- use Rng;
-
- use super::{next_u32, next_u64};
-
- #[derive(Debug)]
- pub struct OsRng;
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- Ok(OsRng)
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- next_u32(&mut |v| self.fill_bytes(v))
- }
- fn next_u64(&mut self) -> u64 {
- next_u64(&mut |v| self.fill_bytes(v))
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- let mib = [libc::CTL_KERN, libc::KERN_ARND];
- // kern.arandom permits a maximum buffer size of 256 bytes
- for s in v.chunks_mut(256) {
- let mut s_len = s.len();
- let ret = unsafe {
- libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint,
- s.as_mut_ptr() as *mut _, &mut s_len,
- ptr::null(), 0)
- };
- if ret == -1 || s_len != s.len() {
- panic!("kern.arandom sysctl failed! (returned {}, s.len() {}, oldlenp {})",
- ret, s.len(), s_len);
- }
- }
- }
- }
-}
-
-#[cfg(target_os = "openbsd")]
-mod imp {
- extern crate libc;
-
- use std::io;
- use Rng;
-
- use super::{next_u32, next_u64};
-
- #[derive(Debug)]
- pub struct OsRng;
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- Ok(OsRng)
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- next_u32(&mut |v| self.fill_bytes(v))
- }
- fn next_u64(&mut self) -> u64 {
- next_u64(&mut |v| self.fill_bytes(v))
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- // getentropy(2) permits a maximum buffer size of 256 bytes
- for s in v.chunks_mut(256) {
- let ret = unsafe {
- libc::getentropy(s.as_mut_ptr() as *mut libc::c_void, s.len())
- };
- if ret == -1 {
- let err = io::Error::last_os_error();
- panic!("getentropy failed: {}", err);
- }
- }
- }
- }
-}
-
-#[cfg(target_os = "redox")]
-mod imp {
- use std::io;
- use std::fs::File;
- use Rng;
- use read::ReadRng;
-
- #[derive(Debug)]
- pub struct OsRng {
- inner: ReadRng<File>,
- }
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- let reader = try!(File::open("rand:"));
- let reader_rng = ReadRng::new(reader);
-
- Ok(OsRng { inner: reader_rng })
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- self.inner.next_u32()
- }
- fn next_u64(&mut self) -> u64 {
- self.inner.next_u64()
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- self.inner.fill_bytes(v)
- }
- }
-}
-
-#[cfg(target_os = "fuchsia")]
-mod imp {
- extern crate fuchsia_zircon;
-
- use std::io;
- use Rng;
-
- use super::{next_u32, next_u64};
-
- #[derive(Debug)]
- pub struct OsRng;
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- Ok(OsRng)
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- next_u32(&mut |v| self.fill_bytes(v))
- }
- fn next_u64(&mut self) -> u64 {
- next_u64(&mut |v| self.fill_bytes(v))
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- for s in v.chunks_mut(fuchsia_zircon::sys::ZX_CPRNG_DRAW_MAX_LEN) {
- let mut filled = 0;
- while filled < s.len() {
- match fuchsia_zircon::cprng_draw(&mut s[filled..]) {
- Ok(actual) => filled += actual,
- Err(e) => panic!("cprng_draw failed: {:?}", e),
- };
- }
- }
- }
- }
-}
-
-#[cfg(windows)]
-mod imp {
- extern crate winapi;
-
- use std::io;
- use Rng;
-
- use super::{next_u32, next_u64};
-
- use self::winapi::shared::minwindef::ULONG;
- use self::winapi::um::ntsecapi::RtlGenRandom;
- use self::winapi::um::winnt::PVOID;
-
- #[derive(Debug)]
- pub struct OsRng;
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- Ok(OsRng)
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- next_u32(&mut |v| self.fill_bytes(v))
- }
- fn next_u64(&mut self) -> u64 {
- next_u64(&mut |v| self.fill_bytes(v))
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- // RtlGenRandom takes an ULONG (u32) for the length so we need to
- // split up the buffer.
- for slice in v.chunks_mut(<ULONG>::max_value() as usize) {
- let ret = unsafe {
- RtlGenRandom(slice.as_mut_ptr() as PVOID, slice.len() as ULONG)
- };
- if ret == 0 {
- panic!("couldn't generate random bytes: {}",
- io::Error::last_os_error());
- }
- }
- }
- }
-}
-
-#[cfg(target_os = "nacl")]
-mod imp {
- extern crate libc;
-
- use std::io;
- use std::mem;
- use Rng;
-
- use super::{next_u32, next_u64};
-
- #[derive(Debug)]
- pub struct OsRng(extern fn(dest: *mut libc::c_void,
- bytes: libc::size_t,
- read: *mut libc::size_t) -> libc::c_int);
-
- extern {
- fn nacl_interface_query(name: *const libc::c_char,
- table: *mut libc::c_void,
- table_size: libc::size_t) -> libc::size_t;
- }
-
- const INTERFACE: &'static [u8] = b"nacl-irt-random-0.1\0";
-
- #[repr(C)]
- struct NaClIRTRandom {
- get_random_bytes: Option<extern fn(dest: *mut libc::c_void,
- bytes: libc::size_t,
- read: *mut libc::size_t) -> libc::c_int>,
- }
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- let mut iface = NaClIRTRandom {
- get_random_bytes: None,
- };
- let result = unsafe {
- nacl_interface_query(INTERFACE.as_ptr() as *const _,
- mem::transmute(&mut iface),
- mem::size_of::<NaClIRTRandom>() as libc::size_t)
- };
- if result != 0 {
- assert!(iface.get_random_bytes.is_some());
- let result = OsRng(iface.get_random_bytes.take().unwrap());
- Ok(result)
- } else {
- let error = io::ErrorKind::NotFound;
- let error = io::Error::new(error, "IRT random interface missing");
- Err(error)
- }
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- next_u32(&mut |v| self.fill_bytes(v))
- }
- fn next_u64(&mut self) -> u64 {
- next_u64(&mut |v| self.fill_bytes(v))
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- let mut read = 0;
- loop {
- let mut r: libc::size_t = 0;
- let len = v.len();
- let error = (self.0)(v[read..].as_mut_ptr() as *mut _,
- (len - read) as libc::size_t,
- &mut r as *mut _);
- assert!(error == 0, "`get_random_bytes` failed!");
- read += r as usize;
-
- if read >= v.len() { break; }
- }
- }
- }
-}
-
-#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
-mod imp {
- use std::io;
- use Rng;
-
- #[derive(Debug)]
- pub struct OsRng;
-
- impl OsRng {
- pub fn new() -> io::Result<OsRng> {
- Err(io::Error::new(io::ErrorKind::Other, "Not supported"))
- }
- }
-
- impl Rng for OsRng {
- fn next_u32(&mut self) -> u32 {
- panic!("Not supported")
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use std::sync::mpsc::channel;
- use Rng;
- use OsRng;
- use std::thread;
-
- #[test]
- fn test_os_rng() {
- let mut r = OsRng::new().unwrap();
-
- r.next_u32();
- r.next_u64();
-
- let mut v = [0u8; 1000];
- r.fill_bytes(&mut v);
- }
-
- #[test]
- fn test_os_rng_tasks() {
-
- let mut txs = vec!();
- for _ in 0..20 {
- let (tx, rx) = channel();
- txs.push(tx);
-
- thread::spawn(move|| {
- // wait until all the tasks are ready to go.
- rx.recv().unwrap();
-
- // deschedule to attempt to interleave things as much
- // as possible (XXX: is this a good test?)
- let mut r = OsRng::new().unwrap();
- thread::yield_now();
- let mut v = [0u8; 1000];
-
- for _ in 0..100 {
- r.next_u32();
- thread::yield_now();
- r.next_u64();
- thread::yield_now();
- r.fill_bytes(&mut v);
- thread::yield_now();
- }
- });
- }
-
- // start all the tasks
- for tx in txs.iter() {
- tx.send(()).unwrap();
- }
- }
-}
diff --git a/rand/src/prelude.rs b/rand/src/prelude.rs
new file mode 100644
index 0000000..5d8a0e9
--- /dev/null
+++ b/rand/src/prelude.rs
@@ -0,0 +1,27 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Convenience re-export of common members
+//!
+//! Like the standard library's prelude, this module simplifies importing of
+//! common items. Unlike the standard prelude, the contents of this module must
+//! be imported manually:
+//!
+//! ```
+//! use rand::prelude::*;
+//! # let _ = StdRng::from_entropy();
+//! # let mut r = SmallRng::from_rng(thread_rng()).unwrap();
+//! # let _: f32 = r.gen();
+//! ```
+
+#[doc(no_inline)] pub use distributions::Distribution;
+#[doc(no_inline)] pub use rngs::{SmallRng, StdRng};
+#[doc(no_inline)] #[cfg(feature="std")] pub use rngs::ThreadRng;
+#[doc(no_inline)] pub use {Rng, RngCore, CryptoRng, SeedableRng};
+#[doc(no_inline)] #[cfg(feature="std")] pub use {FromEntropy, random, thread_rng};
+#[doc(no_inline)] pub use seq::{SliceRandom, IteratorRandom};
diff --git a/rand/src/prng/chacha.rs b/rand/src/prng/chacha.rs
deleted file mode 100644
index a73e8e7..0000000
--- a/rand/src/prng/chacha.rs
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The ChaCha random number generator.
-
-use core::num::Wrapping as w;
-use {Rng, SeedableRng, Rand};
-
-#[allow(bad_style)]
-type w32 = w<u32>;
-
-const KEY_WORDS : usize = 8; // 8 words for the 256-bit key
-const STATE_WORDS : usize = 16;
-const CHACHA_ROUNDS: u32 = 20; // Cryptographically secure from 8 upwards as of this writing
-
-/// A random number generator that uses the ChaCha20 algorithm [1].
-///
-/// The ChaCha algorithm is widely accepted as suitable for
-/// cryptographic purposes, but this implementation has not been
-/// verified as such. Prefer a generator like `OsRng` that defers to
-/// the operating system for cases that need high security.
-///
-/// [1]: D. J. Bernstein, [*ChaCha, a variant of
-/// Salsa20*](http://cr.yp.to/chacha.html)
-#[derive(Copy, Clone, Debug)]
-pub struct ChaChaRng {
- buffer: [w32; STATE_WORDS], // Internal buffer of output
- state: [w32; STATE_WORDS], // Initial state
- index: usize, // Index into state
-}
-
-static EMPTY: ChaChaRng = ChaChaRng {
- buffer: [w(0); STATE_WORDS],
- state: [w(0); STATE_WORDS],
- index: STATE_WORDS
-};
-
-
-macro_rules! quarter_round{
- ($a: expr, $b: expr, $c: expr, $d: expr) => {{
- $a = $a + $b; $d = $d ^ $a; $d = w($d.0.rotate_left(16));
- $c = $c + $d; $b = $b ^ $c; $b = w($b.0.rotate_left(12));
- $a = $a + $b; $d = $d ^ $a; $d = w($d.0.rotate_left( 8));
- $c = $c + $d; $b = $b ^ $c; $b = w($b.0.rotate_left( 7));
- }}
-}
-
-macro_rules! double_round{
- ($x: expr) => {{
- // Column round
- quarter_round!($x[ 0], $x[ 4], $x[ 8], $x[12]);
- quarter_round!($x[ 1], $x[ 5], $x[ 9], $x[13]);
- quarter_round!($x[ 2], $x[ 6], $x[10], $x[14]);
- quarter_round!($x[ 3], $x[ 7], $x[11], $x[15]);
- // Diagonal round
- quarter_round!($x[ 0], $x[ 5], $x[10], $x[15]);
- quarter_round!($x[ 1], $x[ 6], $x[11], $x[12]);
- quarter_round!($x[ 2], $x[ 7], $x[ 8], $x[13]);
- quarter_round!($x[ 3], $x[ 4], $x[ 9], $x[14]);
- }}
-}
-
-#[inline]
-fn core(output: &mut [w32; STATE_WORDS], input: &[w32; STATE_WORDS]) {
- *output = *input;
-
- for _ in 0..CHACHA_ROUNDS / 2 {
- double_round!(output);
- }
-
- for i in 0..STATE_WORDS {
- output[i] = output[i] + input[i];
- }
-}
-
-impl ChaChaRng {
-
- /// Create an ChaCha random number generator using the default
- /// fixed key of 8 zero words.
- ///
- /// # Examples
- ///
- /// ```rust
- /// use rand::{Rng, ChaChaRng};
- ///
- /// let mut ra = ChaChaRng::new_unseeded();
- /// println!("{:?}", ra.next_u32());
- /// println!("{:?}", ra.next_u32());
- /// ```
- ///
- /// Since this equivalent to a RNG with a fixed seed, repeated executions
- /// of an unseeded RNG will produce the same result. This code sample will
- /// consistently produce:
- ///
- /// - 2917185654
- /// - 2419978656
- pub fn new_unseeded() -> ChaChaRng {
- let mut rng = EMPTY;
- rng.init(&[0; KEY_WORDS]);
- rng
- }
-
- /// Sets the internal 128-bit ChaCha counter to
- /// a user-provided value. This permits jumping
- /// arbitrarily ahead (or backwards) in the pseudorandom stream.
- ///
- /// Since the nonce words are used to extend the counter to 128 bits,
- /// users wishing to obtain the conventional ChaCha pseudorandom stream
- /// associated with a particular nonce can call this function with
- /// arguments `0, desired_nonce`.
- ///
- /// # Examples
- ///
- /// ```rust
- /// use rand::{Rng, ChaChaRng};
- ///
- /// let mut ra = ChaChaRng::new_unseeded();
- /// ra.set_counter(0u64, 1234567890u64);
- /// println!("{:?}", ra.next_u32());
- /// println!("{:?}", ra.next_u32());
- /// ```
- pub fn set_counter(&mut self, counter_low: u64, counter_high: u64) {
- self.state[12] = w((counter_low >> 0) as u32);
- self.state[13] = w((counter_low >> 32) as u32);
- self.state[14] = w((counter_high >> 0) as u32);
- self.state[15] = w((counter_high >> 32) as u32);
- self.index = STATE_WORDS; // force recomputation
- }
-
- /// Initializes `self.state` with the appropriate key and constants
- ///
- /// We deviate slightly from the ChaCha specification regarding
- /// the nonce, which is used to extend the counter to 128 bits.
- /// This is provably as strong as the original cipher, though,
- /// since any distinguishing attack on our variant also works
- /// against ChaCha with a chosen-nonce. See the XSalsa20 [1]
- /// security proof for a more involved example of this.
- ///
- /// The modified word layout is:
- /// ```text
- /// constant constant constant constant
- /// key key key key
- /// key key key key
- /// counter counter counter counter
- /// ```
- /// [1]: Daniel J. Bernstein. [*Extending the Salsa20
- /// nonce.*](http://cr.yp.to/papers.html#xsalsa)
- fn init(&mut self, key: &[u32; KEY_WORDS]) {
- self.state[0] = w(0x61707865);
- self.state[1] = w(0x3320646E);
- self.state[2] = w(0x79622D32);
- self.state[3] = w(0x6B206574);
-
- for i in 0..KEY_WORDS {
- self.state[4+i] = w(key[i]);
- }
-
- self.state[12] = w(0);
- self.state[13] = w(0);
- self.state[14] = w(0);
- self.state[15] = w(0);
-
- self.index = STATE_WORDS;
- }
-
- /// Refill the internal output buffer (`self.buffer`)
- fn update(&mut self) {
- core(&mut self.buffer, &self.state);
- self.index = 0;
- // update 128-bit counter
- self.state[12] = self.state[12] + w(1);
- if self.state[12] != w(0) { return };
- self.state[13] = self.state[13] + w(1);
- if self.state[13] != w(0) { return };
- self.state[14] = self.state[14] + w(1);
- if self.state[14] != w(0) { return };
- self.state[15] = self.state[15] + w(1);
- }
-}
-
-impl Rng for ChaChaRng {
- #[inline]
- fn next_u32(&mut self) -> u32 {
- if self.index == STATE_WORDS {
- self.update();
- }
-
- let value = self.buffer[self.index % STATE_WORDS];
- self.index += 1;
- value.0
- }
-}
-
-impl<'a> SeedableRng<&'a [u32]> for ChaChaRng {
-
- fn reseed(&mut self, seed: &'a [u32]) {
- // reset state
- self.init(&[0u32; KEY_WORDS]);
- // set key in place
- let key = &mut self.state[4 .. 4+KEY_WORDS];
- for (k, s) in key.iter_mut().zip(seed.iter()) {
- *k = w(*s);
- }
- }
-
- /// Create a ChaCha generator from a seed,
- /// obtained from a variable-length u32 array.
- /// Only up to 8 words are used; if less than 8
- /// words are used, the remaining are set to zero.
- fn from_seed(seed: &'a [u32]) -> ChaChaRng {
- let mut rng = EMPTY;
- rng.reseed(seed);
- rng
- }
-}
-
-impl Rand for ChaChaRng {
- fn rand<R: Rng>(other: &mut R) -> ChaChaRng {
- let mut key : [u32; KEY_WORDS] = [0; KEY_WORDS];
- for word in key.iter_mut() {
- *word = other.gen();
- }
- SeedableRng::from_seed(&key[..])
- }
-}
-
-
-#[cfg(test)]
-mod test {
- use {Rng, SeedableRng};
- use super::ChaChaRng;
-
- #[test]
- fn test_rng_rand_seeded() {
- let s = ::test::rng().gen_iter::<u32>().take(8).collect::<Vec<u32>>();
- let mut ra: ChaChaRng = SeedableRng::from_seed(&s[..]);
- let mut rb: ChaChaRng = SeedableRng::from_seed(&s[..]);
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_seeded() {
- let seed : &[_] = &[0,1,2,3,4,5,6,7];
- let mut ra: ChaChaRng = SeedableRng::from_seed(seed);
- let mut rb: ChaChaRng = SeedableRng::from_seed(seed);
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_reseed() {
- let s = ::test::rng().gen_iter::<u32>().take(8).collect::<Vec<u32>>();
- let mut r: ChaChaRng = SeedableRng::from_seed(&s[..]);
- let string1: String = r.gen_ascii_chars().take(100).collect();
-
- r.reseed(&s);
-
- let string2: String = r.gen_ascii_chars().take(100).collect();
- assert_eq!(string1, string2);
- }
-
- #[test]
- fn test_rng_true_values() {
- // Test vectors 1 and 2 from
- // http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04
- let seed : &[_] = &[0u32; 8];
- let mut ra: ChaChaRng = SeedableRng::from_seed(seed);
-
- let v = (0..16).map(|_| ra.next_u32()).collect::<Vec<_>>();
- assert_eq!(v,
- vec!(0xade0b876, 0x903df1a0, 0xe56a5d40, 0x28bd8653,
- 0xb819d2bd, 0x1aed8da0, 0xccef36a8, 0xc70d778b,
- 0x7c5941da, 0x8d485751, 0x3fe02477, 0x374ad8b8,
- 0xf4b8436a, 0x1ca11815, 0x69b687c3, 0x8665eeb2));
-
- let v = (0..16).map(|_| ra.next_u32()).collect::<Vec<_>>();
- assert_eq!(v,
- vec!(0xbee7079f, 0x7a385155, 0x7c97ba98, 0x0d082d73,
- 0xa0290fcb, 0x6965e348, 0x3e53c612, 0xed7aee32,
- 0x7621b729, 0x434ee69c, 0xb03371d5, 0xd539d874,
- 0x281fed31, 0x45fb0a51, 0x1f0ae1ac, 0x6f4d794b));
-
-
- let seed : &[_] = &[0,1,2,3,4,5,6,7];
- let mut ra: ChaChaRng = SeedableRng::from_seed(seed);
-
- // Store the 17*i-th 32-bit word,
- // i.e., the i-th word of the i-th 16-word block
- let mut v : Vec<u32> = Vec::new();
- for _ in 0..16 {
- v.push(ra.next_u32());
- for _ in 0..16 {
- ra.next_u32();
- }
- }
-
- assert_eq!(v,
- vec!(0xf225c81a, 0x6ab1be57, 0x04d42951, 0x70858036,
- 0x49884684, 0x64efec72, 0x4be2d186, 0x3615b384,
- 0x11cfa18e, 0xd3c50049, 0x75c775f6, 0x434c6530,
- 0x2c5bad8f, 0x898881dc, 0x5f1c86d9, 0xc1f8e7f4));
- }
-
- #[test]
- fn test_rng_clone() {
- let seed : &[_] = &[0u32; 8];
- let mut rng: ChaChaRng = SeedableRng::from_seed(seed);
- let mut clone = rng.clone();
- for _ in 0..16 {
- assert_eq!(rng.next_u64(), clone.next_u64());
- }
- }
-}
diff --git a/rand/src/prng/isaac.rs b/rand/src/prng/isaac.rs
deleted file mode 100644
index cf5eb67..0000000
--- a/rand/src/prng/isaac.rs
+++ /dev/null
@@ -1,328 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The ISAAC random number generator.
-
-#![allow(non_camel_case_types)]
-
-use core::slice;
-use core::iter::repeat;
-use core::num::Wrapping as w;
-use core::fmt;
-
-use {Rng, SeedableRng, Rand};
-
-#[allow(bad_style)]
-type w32 = w<u32>;
-
-const RAND_SIZE_LEN: usize = 8;
-const RAND_SIZE: u32 = 1 << RAND_SIZE_LEN;
-const RAND_SIZE_USIZE: usize = 1 << RAND_SIZE_LEN;
-
-/// A random number generator that uses the ISAAC algorithm[1].
-///
-/// The ISAAC algorithm is generally accepted as suitable for
-/// cryptographic purposes, but this implementation has not be
-/// verified as such. Prefer a generator like `OsRng` that defers to
-/// the operating system for cases that need high security.
-///
-/// [1]: Bob Jenkins, [*ISAAC: A fast cryptographic random number
-/// generator*](http://www.burtleburtle.net/bob/rand/isaacafa.html)
-#[derive(Copy)]
-pub struct IsaacRng {
- cnt: u32,
- rsl: [w32; RAND_SIZE_USIZE],
- mem: [w32; RAND_SIZE_USIZE],
- a: w32,
- b: w32,
- c: w32,
-}
-
-static EMPTY: IsaacRng = IsaacRng {
- cnt: 0,
- rsl: [w(0); RAND_SIZE_USIZE],
- mem: [w(0); RAND_SIZE_USIZE],
- a: w(0), b: w(0), c: w(0),
-};
-
-impl IsaacRng {
-
- /// Create an ISAAC random number generator using the default
- /// fixed seed.
- pub fn new_unseeded() -> IsaacRng {
- let mut rng = EMPTY;
- rng.init(false);
- rng
- }
-
- /// Initialises `self`. If `use_rsl` is true, then use the current value
- /// of `rsl` as a seed, otherwise construct one algorithmically (not
- /// randomly).
- fn init(&mut self, use_rsl: bool) {
- let mut a = w(0x9e3779b9);
- let mut b = a;
- let mut c = a;
- let mut d = a;
- let mut e = a;
- let mut f = a;
- let mut g = a;
- let mut h = a;
-
- macro_rules! mix {
- () => {{
- a=a^(b<<11); d=d+a; b=b+c;
- b=b^(c>>2); e=e+b; c=c+d;
- c=c^(d<<8); f=f+c; d=d+e;
- d=d^(e>>16); g=g+d; e=e+f;
- e=e^(f<<10); h=h+e; f=f+g;
- f=f^(g>>4); a=a+f; g=g+h;
- g=g^(h<<8); b=b+g; h=h+a;
- h=h^(a>>9); c=c+h; a=a+b;
- }}
- }
-
- for _ in 0..4 {
- mix!();
- }
-
- if use_rsl {
- macro_rules! memloop {
- ($arr:expr) => {{
- for i in (0..RAND_SIZE_USIZE/8).map(|i| i * 8) {
- a=a+$arr[i ]; b=b+$arr[i+1];
- c=c+$arr[i+2]; d=d+$arr[i+3];
- e=e+$arr[i+4]; f=f+$arr[i+5];
- g=g+$arr[i+6]; h=h+$arr[i+7];
- mix!();
- self.mem[i ]=a; self.mem[i+1]=b;
- self.mem[i+2]=c; self.mem[i+3]=d;
- self.mem[i+4]=e; self.mem[i+5]=f;
- self.mem[i+6]=g; self.mem[i+7]=h;
- }
- }}
- }
-
- memloop!(self.rsl);
- memloop!(self.mem);
- } else {
- for i in (0..RAND_SIZE_USIZE/8).map(|i| i * 8) {
- mix!();
- self.mem[i ]=a; self.mem[i+1]=b;
- self.mem[i+2]=c; self.mem[i+3]=d;
- self.mem[i+4]=e; self.mem[i+5]=f;
- self.mem[i+6]=g; self.mem[i+7]=h;
- }
- }
-
- self.isaac();
- }
-
- /// Refills the output buffer (`self.rsl`)
- #[inline]
- fn isaac(&mut self) {
- self.c = self.c + w(1);
- // abbreviations
- let mut a = self.a;
- let mut b = self.b + self.c;
-
- const MIDPOINT: usize = RAND_SIZE_USIZE / 2;
-
- macro_rules! ind {
- ($x:expr) => ( self.mem[($x >> 2usize).0 as usize & (RAND_SIZE_USIZE - 1)] )
- }
-
- let r = [(0, MIDPOINT), (MIDPOINT, 0)];
- for &(mr_offset, m2_offset) in r.iter() {
-
- macro_rules! rngstepp {
- ($j:expr, $shift:expr) => {{
- let base = $j;
- let mix = a << $shift;
-
- let x = self.mem[base + mr_offset];
- a = (a ^ mix) + self.mem[base + m2_offset];
- let y = ind!(x) + a + b;
- self.mem[base + mr_offset] = y;
-
- b = ind!(y >> RAND_SIZE_LEN) + x;
- self.rsl[base + mr_offset] = b;
- }}
- }
-
- macro_rules! rngstepn {
- ($j:expr, $shift:expr) => {{
- let base = $j;
- let mix = a >> $shift;
-
- let x = self.mem[base + mr_offset];
- a = (a ^ mix) + self.mem[base + m2_offset];
- let y = ind!(x) + a + b;
- self.mem[base + mr_offset] = y;
-
- b = ind!(y >> RAND_SIZE_LEN) + x;
- self.rsl[base + mr_offset] = b;
- }}
- }
-
- for i in (0..MIDPOINT/4).map(|i| i * 4) {
- rngstepp!(i + 0, 13);
- rngstepn!(i + 1, 6);
- rngstepp!(i + 2, 2);
- rngstepn!(i + 3, 16);
- }
- }
-
- self.a = a;
- self.b = b;
- self.cnt = RAND_SIZE;
- }
-}
-
-// Cannot be derived because [u32; 256] does not implement Clone
-impl Clone for IsaacRng {
- fn clone(&self) -> IsaacRng {
- *self
- }
-}
-
-impl Rng for IsaacRng {
- #[inline]
- fn next_u32(&mut self) -> u32 {
- if self.cnt == 0 {
- // make some more numbers
- self.isaac();
- }
- self.cnt -= 1;
-
- // self.cnt is at most RAND_SIZE, but that is before the
- // subtraction above. We want to index without bounds
- // checking, but this could lead to incorrect code if someone
- // misrefactors, so we check, sometimes.
- //
- // (Changes here should be reflected in Isaac64Rng.next_u64.)
- debug_assert!(self.cnt < RAND_SIZE);
-
- // (the % is cheaply telling the optimiser that we're always
- // in bounds, without unsafe. NB. this is a power of two, so
- // it optimises to a bitwise mask).
- self.rsl[(self.cnt % RAND_SIZE) as usize].0
- }
-}
-
-impl<'a> SeedableRng<&'a [u32]> for IsaacRng {
- fn reseed(&mut self, seed: &'a [u32]) {
- // make the seed into [seed[0], seed[1], ..., seed[seed.len()
- // - 1], 0, 0, ...], to fill rng.rsl.
- let seed_iter = seed.iter().map(|&x| x).chain(repeat(0u32));
-
- for (rsl_elem, seed_elem) in self.rsl.iter_mut().zip(seed_iter) {
- *rsl_elem = w(seed_elem);
- }
- self.cnt = 0;
- self.a = w(0);
- self.b = w(0);
- self.c = w(0);
-
- self.init(true);
- }
-
- /// Create an ISAAC random number generator with a seed. This can
- /// be any length, although the maximum number of elements used is
- /// 256 and any more will be silently ignored. A generator
- /// constructed with a given seed will generate the same sequence
- /// of values as all other generators constructed with that seed.
- fn from_seed(seed: &'a [u32]) -> IsaacRng {
- let mut rng = EMPTY;
- rng.reseed(seed);
- rng
- }
-}
-
-impl Rand for IsaacRng {
- fn rand<R: Rng>(other: &mut R) -> IsaacRng {
- let mut ret = EMPTY;
- unsafe {
- let ptr = ret.rsl.as_mut_ptr() as *mut u8;
-
- let slice = slice::from_raw_parts_mut(ptr, RAND_SIZE_USIZE * 4);
- other.fill_bytes(slice);
- }
- ret.cnt = 0;
- ret.a = w(0);
- ret.b = w(0);
- ret.c = w(0);
-
- ret.init(true);
- return ret;
- }
-}
-
-impl fmt::Debug for IsaacRng {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "IsaacRng {{}}")
- }
-}
-
-#[cfg(test)]
-mod test {
- use {Rng, SeedableRng};
- use super::IsaacRng;
-
- #[test]
- fn test_rng_32_rand_seeded() {
- let s = ::test::rng().gen_iter::<u32>().take(256).collect::<Vec<u32>>();
- let mut ra: IsaacRng = SeedableRng::from_seed(&s[..]);
- let mut rb: IsaacRng = SeedableRng::from_seed(&s[..]);
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_32_seeded() {
- let seed: &[_] = &[1, 23, 456, 7890, 12345];
- let mut ra: IsaacRng = SeedableRng::from_seed(seed);
- let mut rb: IsaacRng = SeedableRng::from_seed(seed);
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_32_reseed() {
- let s = ::test::rng().gen_iter::<u32>().take(256).collect::<Vec<u32>>();
- let mut r: IsaacRng = SeedableRng::from_seed(&s[..]);
- let string1: String = r.gen_ascii_chars().take(100).collect();
-
- r.reseed(&s[..]);
-
- let string2: String = r.gen_ascii_chars().take(100).collect();
- assert_eq!(string1, string2);
- }
-
- #[test]
- fn test_rng_32_true_values() {
- let seed: &[_] = &[1, 23, 456, 7890, 12345];
- let mut ra: IsaacRng = SeedableRng::from_seed(seed);
- // Regression test that isaac is actually using the above vector
- let v = (0..10).map(|_| ra.next_u32()).collect::<Vec<_>>();
- assert_eq!(v,
- vec!(2558573138, 873787463, 263499565, 2103644246, 3595684709,
- 4203127393, 264982119, 2765226902, 2737944514, 3900253796));
-
- let seed: &[_] = &[12345, 67890, 54321, 9876];
- let mut rb: IsaacRng = SeedableRng::from_seed(seed);
- // skip forward to the 10000th number
- for _ in 0..10000 { rb.next_u32(); }
-
- let v = (0..10).map(|_| rb.next_u32()).collect::<Vec<_>>();
- assert_eq!(v,
- vec!(3676831399, 3183332890, 2834741178, 3854698763, 2717568474,
- 1576568959, 3507990155, 179069555, 141456972, 2478885421));
- }
-}
diff --git a/rand/src/prng/isaac64.rs b/rand/src/prng/isaac64.rs
deleted file mode 100644
index b98e3fe..0000000
--- a/rand/src/prng/isaac64.rs
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The ISAAC-64 random number generator.
-
-use core::slice;
-use core::iter::repeat;
-use core::num::Wrapping as w;
-use core::fmt;
-
-use {Rng, SeedableRng, Rand};
-
-#[allow(bad_style)]
-type w64 = w<u64>;
-
-const RAND_SIZE_64_LEN: usize = 8;
-const RAND_SIZE_64: usize = 1 << RAND_SIZE_64_LEN;
-
-/// A random number generator that uses ISAAC-64[1], the 64-bit
-/// variant of the ISAAC algorithm.
-///
-/// The ISAAC algorithm is generally accepted as suitable for
-/// cryptographic purposes, but this implementation has not be
-/// verified as such. Prefer a generator like `OsRng` that defers to
-/// the operating system for cases that need high security.
-///
-/// [1]: Bob Jenkins, [*ISAAC: A fast cryptographic random number
-/// generator*](http://www.burtleburtle.net/bob/rand/isaacafa.html)
-#[derive(Copy)]
-pub struct Isaac64Rng {
- cnt: usize,
- rsl: [w64; RAND_SIZE_64],
- mem: [w64; RAND_SIZE_64],
- a: w64,
- b: w64,
- c: w64,
-}
-
-static EMPTY_64: Isaac64Rng = Isaac64Rng {
- cnt: 0,
- rsl: [w(0); RAND_SIZE_64],
- mem: [w(0); RAND_SIZE_64],
- a: w(0), b: w(0), c: w(0),
-};
-
-impl Isaac64Rng {
- /// Create a 64-bit ISAAC random number generator using the
- /// default fixed seed.
- pub fn new_unseeded() -> Isaac64Rng {
- let mut rng = EMPTY_64;
- rng.init(false);
- rng
- }
-
- /// Initialises `self`. If `use_rsl` is true, then use the current value
- /// of `rsl` as a seed, otherwise construct one algorithmically (not
- /// randomly).
- fn init(&mut self, use_rsl: bool) {
- macro_rules! init {
- ($var:ident) => (
- let mut $var = w(0x9e3779b97f4a7c13);
- )
- }
- init!(a); init!(b); init!(c); init!(d);
- init!(e); init!(f); init!(g); init!(h);
-
- macro_rules! mix {
- () => {{
- a=a-e; f=f^(h>>9); h=h+a;
- b=b-f; g=g^(a<<9); a=a+b;
- c=c-g; h=h^(b>>23); b=b+c;
- d=d-h; a=a^(c<<15); c=c+d;
- e=e-a; b=b^(d>>14); d=d+e;
- f=f-b; c=c^(e<<20); e=e+f;
- g=g-c; d=d^(f>>17); f=f+g;
- h=h-d; e=e^(g<<14); g=g+h;
- }}
- }
-
- for _ in 0..4 {
- mix!();
- }
-
- if use_rsl {
- macro_rules! memloop {
- ($arr:expr) => {{
- for i in (0..RAND_SIZE_64 / 8).map(|i| i * 8) {
- a=a+$arr[i ]; b=b+$arr[i+1];
- c=c+$arr[i+2]; d=d+$arr[i+3];
- e=e+$arr[i+4]; f=f+$arr[i+5];
- g=g+$arr[i+6]; h=h+$arr[i+7];
- mix!();
- self.mem[i ]=a; self.mem[i+1]=b;
- self.mem[i+2]=c; self.mem[i+3]=d;
- self.mem[i+4]=e; self.mem[i+5]=f;
- self.mem[i+6]=g; self.mem[i+7]=h;
- }
- }}
- }
-
- memloop!(self.rsl);
- memloop!(self.mem);
- } else {
- for i in (0..RAND_SIZE_64 / 8).map(|i| i * 8) {
- mix!();
- self.mem[i ]=a; self.mem[i+1]=b;
- self.mem[i+2]=c; self.mem[i+3]=d;
- self.mem[i+4]=e; self.mem[i+5]=f;
- self.mem[i+6]=g; self.mem[i+7]=h;
- }
- }
-
- self.isaac64();
- }
-
- /// Refills the output buffer (`self.rsl`)
- fn isaac64(&mut self) {
- self.c = self.c + w(1);
- // abbreviations
- let mut a = self.a;
- let mut b = self.b + self.c;
- const MIDPOINT: usize = RAND_SIZE_64 / 2;
- const MP_VEC: [(usize, usize); 2] = [(0,MIDPOINT), (MIDPOINT, 0)];
- macro_rules! ind {
- ($x:expr) => {
- *self.mem.get_unchecked((($x >> 3usize).0 as usize) & (RAND_SIZE_64 - 1))
- }
- }
-
- for &(mr_offset, m2_offset) in MP_VEC.iter() {
- for base in (0..MIDPOINT / 4).map(|i| i * 4) {
-
- macro_rules! rngstepp {
- ($j:expr, $shift:expr) => {{
- let base = base + $j;
- let mix = a ^ (a << $shift);
- let mix = if $j == 0 {!mix} else {mix};
-
- unsafe {
- let x = *self.mem.get_unchecked(base + mr_offset);
- a = mix + *self.mem.get_unchecked(base + m2_offset);
- let y = ind!(x) + a + b;
- *self.mem.get_unchecked_mut(base + mr_offset) = y;
-
- b = ind!(y >> RAND_SIZE_64_LEN) + x;
- *self.rsl.get_unchecked_mut(base + mr_offset) = b;
- }
- }}
- }
-
- macro_rules! rngstepn {
- ($j:expr, $shift:expr) => {{
- let base = base + $j;
- let mix = a ^ (a >> $shift);
- let mix = if $j == 0 {!mix} else {mix};
-
- unsafe {
- let x = *self.mem.get_unchecked(base + mr_offset);
- a = mix + *self.mem.get_unchecked(base + m2_offset);
- let y = ind!(x) + a + b;
- *self.mem.get_unchecked_mut(base + mr_offset) = y;
-
- b = ind!(y >> RAND_SIZE_64_LEN) + x;
- *self.rsl.get_unchecked_mut(base + mr_offset) = b;
- }
- }}
- }
-
- rngstepp!(0, 21);
- rngstepn!(1, 5);
- rngstepp!(2, 12);
- rngstepn!(3, 33);
- }
- }
-
- self.a = a;
- self.b = b;
- self.cnt = RAND_SIZE_64;
- }
-}
-
-// Cannot be derived because [u32; 256] does not implement Clone
-impl Clone for Isaac64Rng {
- fn clone(&self) -> Isaac64Rng {
- *self
- }
-}
-
-impl Rng for Isaac64Rng {
- #[inline]
- fn next_u32(&mut self) -> u32 {
- self.next_u64() as u32
- }
-
- #[inline]
- fn next_u64(&mut self) -> u64 {
- if self.cnt == 0 {
- // make some more numbers
- self.isaac64();
- }
- self.cnt -= 1;
-
- // See corresponding location in IsaacRng.next_u32 for
- // explanation.
- debug_assert!(self.cnt < RAND_SIZE_64);
- self.rsl[(self.cnt % RAND_SIZE_64) as usize].0
- }
-}
-
-impl<'a> SeedableRng<&'a [u64]> for Isaac64Rng {
- fn reseed(&mut self, seed: &'a [u64]) {
- // make the seed into [seed[0], seed[1], ..., seed[seed.len()
- // - 1], 0, 0, ...], to fill rng.rsl.
- let seed_iter = seed.iter().map(|&x| x).chain(repeat(0u64));
-
- for (rsl_elem, seed_elem) in self.rsl.iter_mut().zip(seed_iter) {
- *rsl_elem = w(seed_elem);
- }
- self.cnt = 0;
- self.a = w(0);
- self.b = w(0);
- self.c = w(0);
-
- self.init(true);
- }
-
- /// Create an ISAAC random number generator with a seed. This can
- /// be any length, although the maximum number of elements used is
- /// 256 and any more will be silently ignored. A generator
- /// constructed with a given seed will generate the same sequence
- /// of values as all other generators constructed with that seed.
- fn from_seed(seed: &'a [u64]) -> Isaac64Rng {
- let mut rng = EMPTY_64;
- rng.reseed(seed);
- rng
- }
-}
-
-impl Rand for Isaac64Rng {
- fn rand<R: Rng>(other: &mut R) -> Isaac64Rng {
- let mut ret = EMPTY_64;
- unsafe {
- let ptr = ret.rsl.as_mut_ptr() as *mut u8;
-
- let slice = slice::from_raw_parts_mut(ptr, RAND_SIZE_64 * 8);
- other.fill_bytes(slice);
- }
- ret.cnt = 0;
- ret.a = w(0);
- ret.b = w(0);
- ret.c = w(0);
-
- ret.init(true);
- return ret;
- }
-}
-
-impl fmt::Debug for Isaac64Rng {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "Isaac64Rng {{}}")
- }
-}
-
-#[cfg(test)]
-mod test {
- use {Rng, SeedableRng};
- use super::Isaac64Rng;
-
- #[test]
- fn test_rng_64_rand_seeded() {
- let s = ::test::rng().gen_iter::<u64>().take(256).collect::<Vec<u64>>();
- let mut ra: Isaac64Rng = SeedableRng::from_seed(&s[..]);
- let mut rb: Isaac64Rng = SeedableRng::from_seed(&s[..]);
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_64_seeded() {
- let seed: &[_] = &[1, 23, 456, 7890, 12345];
- let mut ra: Isaac64Rng = SeedableRng::from_seed(seed);
- let mut rb: Isaac64Rng = SeedableRng::from_seed(seed);
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_64_reseed() {
- let s = ::test::rng().gen_iter::<u64>().take(256).collect::<Vec<u64>>();
- let mut r: Isaac64Rng = SeedableRng::from_seed(&s[..]);
- let string1: String = r.gen_ascii_chars().take(100).collect();
-
- r.reseed(&s[..]);
-
- let string2: String = r.gen_ascii_chars().take(100).collect();
- assert_eq!(string1, string2);
- }
-
- #[test]
- fn test_rng_64_true_values() {
- let seed: &[_] = &[1, 23, 456, 7890, 12345];
- let mut ra: Isaac64Rng = SeedableRng::from_seed(seed);
- // Regression test that isaac is actually using the above vector
- let v = (0..10).map(|_| ra.next_u64()).collect::<Vec<_>>();
- assert_eq!(v,
- vec!(547121783600835980, 14377643087320773276, 17351601304698403469,
- 1238879483818134882, 11952566807690396487, 13970131091560099343,
- 4469761996653280935, 15552757044682284409, 6860251611068737823,
- 13722198873481261842));
-
- let seed: &[_] = &[12345, 67890, 54321, 9876];
- let mut rb: Isaac64Rng = SeedableRng::from_seed(seed);
- // skip forward to the 10000th number
- for _ in 0..10000 { rb.next_u64(); }
-
- let v = (0..10).map(|_| rb.next_u64()).collect::<Vec<_>>();
- assert_eq!(v,
- vec!(18143823860592706164, 8491801882678285927, 2699425367717515619,
- 17196852593171130876, 2606123525235546165, 15790932315217671084,
- 596345674630742204, 9947027391921273664, 11788097613744130851,
- 10391409374914919106));
- }
-
- #[test]
- fn test_rng_clone() {
- let seed: &[_] = &[1, 23, 456, 7890, 12345];
- let mut rng: Isaac64Rng = SeedableRng::from_seed(seed);
- let mut clone = rng.clone();
- for _ in 0..16 {
- assert_eq!(rng.next_u64(), clone.next_u64());
- }
- }
-}
diff --git a/rand/src/prng/mod.rs b/rand/src/prng/mod.rs
index ed3e018..3c0d27b 100644
--- a/rand/src/prng/mod.rs
+++ b/rand/src/prng/mod.rs
@@ -1,51 +1,37 @@
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Pseudo random number generators are algorithms to produce *apparently
-//! random* numbers deterministically, and usually fairly quickly.
+//! Pseudo-random number generators.
+//!
+//! This module is deprecated:
//!
-//! So long as the algorithm is computationally secure, is initialised with
-//! sufficient entropy (i.e. unknown by an attacker), and its internal state is
-//! also protected (unknown to an attacker), the output will also be
-//! *computationally secure*. Computationally Secure Pseudo Random Number
-//! Generators (CSPRNGs) are thus suitable sources of random numbers for
-//! cryptography. There are a couple of gotchas here, however. First, the seed
-//! used for initialisation must be unknown. Usually this should be provided by
-//! the operating system and should usually be secure, however this may not
-//! always be the case (especially soon after startup). Second, user-space
-//! memory may be vulnerable, for example when written to swap space, and after
-//! forking a child process should reinitialise any user-space PRNGs. For this
-//! reason it may be preferable to source random numbers directly from the OS
-//! for cryptographic applications.
-//!
-//! PRNGs are also widely used for non-cryptographic uses: randomised
-//! algorithms, simulations, games. In these applications it is usually not
-//! important for numbers to be cryptographically *unguessable*, but even
-//! distribution and independence from other samples (from the point of view
-//! of someone unaware of the algorithm used, at least) may still be important.
-//! Good PRNGs should satisfy these properties, but do not take them for
-//! granted; Wikipedia's article on
-//! [Pseudorandom number generators](https://en.wikipedia.org/wiki/Pseudorandom_number_generator)
-//! provides some background on this topic.
-//!
-//! Care should be taken when seeding (initialising) PRNGs. Some PRNGs have
-//! short periods for some seeds. If one PRNG is seeded from another using the
-//! same algorithm, it is possible that both will yield the same sequence of
-//! values (with some lag).
-
-mod chacha;
-mod isaac;
-mod isaac64;
-mod xorshift;
+//! - documentation has moved to
+//! [The Book](https://rust-random.github.io/book/guide-rngs.html),
+//! - PRNGs have moved to other `rand_*` crates.
-pub use self::chacha::ChaChaRng;
-pub use self::isaac::IsaacRng;
-pub use self::isaac64::Isaac64Rng;
-pub use self::xorshift::XorShiftRng;
+// Deprecations (to be removed in 0.7)
+#[doc(hidden)] #[allow(deprecated)]
+pub use deprecated::XorShiftRng;
+#[doc(hidden)] pub mod isaac {
+ // Note: we miss `IsaacCore` here but probably unimportant.
+ #[allow(deprecated)] pub use deprecated::IsaacRng;
+}
+#[doc(hidden)] pub mod isaac64 {
+ #[allow(deprecated)] pub use deprecated::Isaac64Rng;
+}
+#[doc(hidden)] #[allow(deprecated)] pub use deprecated::{IsaacRng, Isaac64Rng};
+#[doc(hidden)] pub mod chacha {
+ // Note: we miss `ChaChaCore` here but probably unimportant.
+ #[allow(deprecated)] pub use deprecated::ChaChaRng;
+}
+#[doc(hidden)] #[allow(deprecated)] pub use deprecated::ChaChaRng;
+#[doc(hidden)] pub mod hc128 {
+ // Note: we miss `Hc128Core` here but probably unimportant.
+ #[allow(deprecated)] pub use deprecated::Hc128Rng;
+}
+#[doc(hidden)] #[allow(deprecated)] pub use deprecated::Hc128Rng;
diff --git a/rand/src/prng/xorshift.rs b/rand/src/prng/xorshift.rs
deleted file mode 100644
index dd367e9..0000000
--- a/rand/src/prng/xorshift.rs
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Xorshift generators
-
-use core::num::Wrapping as w;
-use {Rng, SeedableRng, Rand};
-
-/// An Xorshift[1] random number
-/// generator.
-///
-/// The Xorshift algorithm is not suitable for cryptographic purposes
-/// but is very fast. If you do not know for sure that it fits your
-/// requirements, use a more secure one such as `IsaacRng` or `OsRng`.
-///
-/// [1]: Marsaglia, George (July 2003). ["Xorshift
-/// RNGs"](http://www.jstatsoft.org/v08/i14/paper). *Journal of
-/// Statistical Software*. Vol. 8 (Issue 14).
-#[allow(missing_copy_implementations)]
-#[derive(Clone, Debug)]
-pub struct XorShiftRng {
- x: w<u32>,
- y: w<u32>,
- z: w<u32>,
- w: w<u32>,
-}
-
-impl XorShiftRng {
- /// Creates a new XorShiftRng instance which is not seeded.
- ///
- /// The initial values of this RNG are constants, so all generators created
- /// by this function will yield the same stream of random numbers. It is
- /// highly recommended that this is created through `SeedableRng` instead of
- /// this function
- pub fn new_unseeded() -> XorShiftRng {
- XorShiftRng {
- x: w(0x193a6754),
- y: w(0xa8a7d469),
- z: w(0x97830e05),
- w: w(0x113ba7bb),
- }
- }
-}
-
-impl Rng for XorShiftRng {
- #[inline]
- fn next_u32(&mut self) -> u32 {
- let x = self.x;
- let t = x ^ (x << 11);
- self.x = self.y;
- self.y = self.z;
- self.z = self.w;
- let w_ = self.w;
- self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
- self.w.0
- }
-}
-
-impl SeedableRng<[u32; 4]> for XorShiftRng {
- /// Reseed an XorShiftRng. This will panic if `seed` is entirely 0.
- fn reseed(&mut self, seed: [u32; 4]) {
- assert!(!seed.iter().all(|&x| x == 0),
- "XorShiftRng.reseed called with an all zero seed.");
-
- self.x = w(seed[0]);
- self.y = w(seed[1]);
- self.z = w(seed[2]);
- self.w = w(seed[3]);
- }
-
- /// Create a new XorShiftRng. This will panic if `seed` is entirely 0.
- fn from_seed(seed: [u32; 4]) -> XorShiftRng {
- assert!(!seed.iter().all(|&x| x == 0),
- "XorShiftRng::from_seed called with an all zero seed.");
-
- XorShiftRng {
- x: w(seed[0]),
- y: w(seed[1]),
- z: w(seed[2]),
- w: w(seed[3]),
- }
- }
-}
-
-impl Rand for XorShiftRng {
- fn rand<R: Rng>(rng: &mut R) -> XorShiftRng {
- let mut tuple: (u32, u32, u32, u32) = rng.gen();
- while tuple == (0, 0, 0, 0) {
- tuple = rng.gen();
- }
- let (x, y, z, w_) = tuple;
- XorShiftRng { x: w(x), y: w(y), z: w(z), w: w(w_) }
- }
-}
diff --git a/rand/src/rand_impls.rs b/rand/src/rand_impls.rs
deleted file mode 100644
index a865bb6..0000000
--- a/rand/src/rand_impls.rs
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The implementations of `Rand` for the built-in types.
-
-use core::{char, mem};
-
-use {Rand,Rng};
-
-impl Rand for isize {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> isize {
- if mem::size_of::<isize>() == 4 {
- rng.gen::<i32>() as isize
- } else {
- rng.gen::<i64>() as isize
- }
- }
-}
-
-impl Rand for i8 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> i8 {
- rng.next_u32() as i8
- }
-}
-
-impl Rand for i16 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> i16 {
- rng.next_u32() as i16
- }
-}
-
-impl Rand for i32 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> i32 {
- rng.next_u32() as i32
- }
-}
-
-impl Rand for i64 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> i64 {
- rng.next_u64() as i64
- }
-}
-
-#[cfg(feature = "i128_support")]
-impl Rand for i128 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> i128 {
- rng.gen::<u128>() as i128
- }
-}
-
-impl Rand for usize {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> usize {
- if mem::size_of::<usize>() == 4 {
- rng.gen::<u32>() as usize
- } else {
- rng.gen::<u64>() as usize
- }
- }
-}
-
-impl Rand for u8 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> u8 {
- rng.next_u32() as u8
- }
-}
-
-impl Rand for u16 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> u16 {
- rng.next_u32() as u16
- }
-}
-
-impl Rand for u32 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> u32 {
- rng.next_u32()
- }
-}
-
-impl Rand for u64 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> u64 {
- rng.next_u64()
- }
-}
-
-#[cfg(feature = "i128_support")]
-impl Rand for u128 {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> u128 {
- ((rng.next_u64() as u128) << 64) | (rng.next_u64() as u128)
- }
-}
-
-
-macro_rules! float_impls {
- ($mod_name:ident, $ty:ty, $mantissa_bits:expr, $method_name:ident) => {
- mod $mod_name {
- use {Rand, Rng, Open01, Closed01};
-
- const SCALE: $ty = (1u64 << $mantissa_bits) as $ty;
-
- impl Rand for $ty {
- /// Generate a floating point number in the half-open
- /// interval `[0,1)`.
- ///
- /// See `Closed01` for the closed interval `[0,1]`,
- /// and `Open01` for the open interval `(0,1)`.
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> $ty {
- rng.$method_name()
- }
- }
- impl Rand for Open01<$ty> {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> Open01<$ty> {
- // add a small amount (specifically 2 bits below
- // the precision of f64/f32 at 1.0), so that small
- // numbers are larger than 0, but large numbers
- // aren't pushed to/above 1.
- Open01(rng.$method_name() + 0.25 / SCALE)
- }
- }
- impl Rand for Closed01<$ty> {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> Closed01<$ty> {
- // rescale so that 1.0 - epsilon becomes 1.0
- // precisely.
- Closed01(rng.$method_name() * SCALE / (SCALE - 1.0))
- }
- }
- }
- }
-}
-float_impls! { f64_rand_impls, f64, 53, next_f64 }
-float_impls! { f32_rand_impls, f32, 24, next_f32 }
-
-impl Rand for char {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> char {
- // a char is 21 bits
- const CHAR_MASK: u32 = 0x001f_ffff;
- loop {
- // Rejection sampling. About 0.2% of numbers with at most
- // 21-bits are invalid codepoints (surrogates), so this
- // will succeed first go almost every time.
- match char::from_u32(rng.next_u32() & CHAR_MASK) {
- Some(c) => return c,
- None => {}
- }
- }
- }
-}
-
-impl Rand for bool {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> bool {
- rng.gen::<u8>() & 1 == 1
- }
-}
-
-macro_rules! tuple_impl {
- // use variables to indicate the arity of the tuple
- ($($tyvar:ident),* ) => {
- // the trailing commas are for the 1 tuple
- impl<
- $( $tyvar : Rand ),*
- > Rand for ( $( $tyvar ),* , ) {
-
- #[inline]
- fn rand<R: Rng>(_rng: &mut R) -> ( $( $tyvar ),* , ) {
- (
- // use the $tyvar's to get the appropriate number of
- // repeats (they're not actually needed)
- $(
- _rng.gen::<$tyvar>()
- ),*
- ,
- )
- }
- }
- }
-}
-
-impl Rand for () {
- #[inline]
- fn rand<R: Rng>(_: &mut R) -> () { () }
-}
-tuple_impl!{A}
-tuple_impl!{A, B}
-tuple_impl!{A, B, C}
-tuple_impl!{A, B, C, D}
-tuple_impl!{A, B, C, D, E}
-tuple_impl!{A, B, C, D, E, F}
-tuple_impl!{A, B, C, D, E, F, G}
-tuple_impl!{A, B, C, D, E, F, G, H}
-tuple_impl!{A, B, C, D, E, F, G, H, I}
-tuple_impl!{A, B, C, D, E, F, G, H, I, J}
-tuple_impl!{A, B, C, D, E, F, G, H, I, J, K}
-tuple_impl!{A, B, C, D, E, F, G, H, I, J, K, L}
-
-macro_rules! array_impl {
- {$n:expr, $t:ident, $($ts:ident,)*} => {
- array_impl!{($n - 1), $($ts,)*}
-
- impl<T> Rand for [T; $n] where T: Rand {
- #[inline]
- fn rand<R: Rng>(_rng: &mut R) -> [T; $n] {
- [_rng.gen::<$t>(), $(_rng.gen::<$ts>()),*]
- }
- }
- };
- {$n:expr,} => {
- impl<T> Rand for [T; $n] {
- fn rand<R: Rng>(_rng: &mut R) -> [T; $n] { [] }
- }
- };
-}
-
-array_impl!{32, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T,}
-
-impl<T:Rand> Rand for Option<T> {
- #[inline]
- fn rand<R: Rng>(rng: &mut R) -> Option<T> {
- if rng.gen() {
- Some(rng.gen())
- } else {
- None
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use {Rng, thread_rng, Open01, Closed01};
-
- struct ConstantRng(u64);
- impl Rng for ConstantRng {
- fn next_u32(&mut self) -> u32 {
- let ConstantRng(v) = *self;
- v as u32
- }
- fn next_u64(&mut self) -> u64 {
- let ConstantRng(v) = *self;
- v
- }
- }
-
- #[test]
- fn floating_point_edge_cases() {
- // the test for exact equality is correct here.
- assert!(ConstantRng(0xffff_ffff).gen::<f32>() != 1.0);
- assert!(ConstantRng(0xffff_ffff_ffff_ffff).gen::<f64>() != 1.0);
- }
-
- #[test]
- fn rand_open() {
- // this is unlikely to catch an incorrect implementation that
- // generates exactly 0 or 1, but it keeps it sane.
- let mut rng = thread_rng();
- for _ in 0..1_000 {
- // strict inequalities
- let Open01(f) = rng.gen::<Open01<f64>>();
- assert!(0.0 < f && f < 1.0);
-
- let Open01(f) = rng.gen::<Open01<f32>>();
- assert!(0.0 < f && f < 1.0);
- }
- }
-
- #[test]
- fn rand_closed() {
- let mut rng = thread_rng();
- for _ in 0..1_000 {
- // strict inequalities
- let Closed01(f) = rng.gen::<Closed01<f64>>();
- assert!(0.0 <= f && f <= 1.0);
-
- let Closed01(f) = rng.gen::<Closed01<f32>>();
- assert!(0.0 <= f && f <= 1.0);
- }
- }
-}
diff --git a/rand/src/read.rs b/rand/src/read.rs
deleted file mode 100644
index c7351b7..0000000
--- a/rand/src/read.rs
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A wrapper around any Read to treat it as an RNG.
-
-use std::io::{self, Read};
-use std::mem;
-use Rng;
-
-/// An RNG that reads random bytes straight from a `Read`. This will
-/// work best with an infinite reader, but this is not required.
-///
-/// # Panics
-///
-/// It will panic if it there is insufficient data to fulfill a request.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{read, Rng};
-///
-/// let data = vec![1, 2, 3, 4, 5, 6, 7, 8];
-/// let mut rng = read::ReadRng::new(&data[..]);
-/// println!("{:x}", rng.gen::<u32>());
-/// ```
-#[derive(Debug)]
-pub struct ReadRng<R> {
- reader: R
-}
-
-impl<R: Read> ReadRng<R> {
- /// Create a new `ReadRng` from a `Read`.
- pub fn new(r: R) -> ReadRng<R> {
- ReadRng {
- reader: r
- }
- }
-}
-
-impl<R: Read> Rng for ReadRng<R> {
- fn next_u32(&mut self) -> u32 {
- // This is designed for speed: reading a LE integer on a LE
- // platform just involves blitting the bytes into the memory
- // of the u32, similarly for BE on BE; avoiding byteswapping.
- let mut buf = [0; 4];
- fill(&mut self.reader, &mut buf).unwrap();
- unsafe { *(buf.as_ptr() as *const u32) }
- }
- fn next_u64(&mut self) -> u64 {
- // see above for explanation.
- let mut buf = [0; 8];
- fill(&mut self.reader, &mut buf).unwrap();
- unsafe { *(buf.as_ptr() as *const u64) }
- }
- fn fill_bytes(&mut self, v: &mut [u8]) {
- if v.len() == 0 { return }
- fill(&mut self.reader, v).unwrap();
- }
-}
-
-fn fill(r: &mut Read, mut buf: &mut [u8]) -> io::Result<()> {
- while buf.len() > 0 {
- match try!(r.read(buf)) {
- 0 => return Err(io::Error::new(io::ErrorKind::Other,
- "end of file reached")),
- n => buf = &mut mem::replace(&mut buf, &mut [])[n..],
- }
- }
- Ok(())
-}
-
-#[cfg(test)]
-mod test {
- use super::ReadRng;
- use Rng;
-
- #[test]
- fn test_reader_rng_u64() {
- // transmute from the target to avoid endianness concerns.
- let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
- 0 , 0, 0, 0, 0, 0, 0, 2,
- 0, 0, 0, 0, 0, 0, 0, 3];
- let mut rng = ReadRng::new(&v[..]);
-
- assert_eq!(rng.next_u64(), 1_u64.to_be());
- assert_eq!(rng.next_u64(), 2_u64.to_be());
- assert_eq!(rng.next_u64(), 3_u64.to_be());
- }
- #[test]
- fn test_reader_rng_u32() {
- let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
- let mut rng = ReadRng::new(&v[..]);
-
- assert_eq!(rng.next_u32(), 1_u32.to_be());
- assert_eq!(rng.next_u32(), 2_u32.to_be());
- assert_eq!(rng.next_u32(), 3_u32.to_be());
- }
- #[test]
- fn test_reader_rng_fill_bytes() {
- let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
- let mut w = [0u8; 8];
-
- let mut rng = ReadRng::new(&v[..]);
- rng.fill_bytes(&mut w);
-
- assert!(v == w);
- }
-
- #[test]
- #[should_panic]
- fn test_reader_rng_insufficient_bytes() {
- let mut rng = ReadRng::new(&[][..]);
- let mut v = [0u8; 3];
- rng.fill_bytes(&mut v);
- }
-}
diff --git a/rand/src/reseeding.rs b/rand/src/reseeding.rs
deleted file mode 100644
index 1f24e20..0000000
--- a/rand/src/reseeding.rs
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A wrapper around another RNG that reseeds it after it
-//! generates a certain number of random bytes.
-
-use core::default::Default;
-
-use {Rng, SeedableRng};
-
-/// How many bytes of entropy the underling RNG is allowed to generate
-/// before it is reseeded
-const DEFAULT_GENERATION_THRESHOLD: u64 = 32 * 1024;
-
-/// A wrapper around any RNG which reseeds the underlying RNG after it
-/// has generated a certain number of random bytes.
-#[derive(Debug)]
-pub struct ReseedingRng<R, Rsdr> {
- rng: R,
- generation_threshold: u64,
- bytes_generated: u64,
- /// Controls the behaviour when reseeding the RNG.
- pub reseeder: Rsdr,
-}
-
-impl<R: Rng, Rsdr: Reseeder<R>> ReseedingRng<R, Rsdr> {
- /// Create a new `ReseedingRng` with the given parameters.
- ///
- /// # Arguments
- ///
- /// * `rng`: the random number generator to use.
- /// * `generation_threshold`: the number of bytes of entropy at which to reseed the RNG.
- /// * `reseeder`: the reseeding object to use.
- pub fn new(rng: R, generation_threshold: u64, reseeder: Rsdr) -> ReseedingRng<R,Rsdr> {
- ReseedingRng {
- rng: rng,
- generation_threshold: generation_threshold,
- bytes_generated: 0,
- reseeder: reseeder
- }
- }
-
- /// Reseed the internal RNG if the number of bytes that have been
- /// generated exceed the threshold.
- pub fn reseed_if_necessary(&mut self) {
- if self.bytes_generated >= self.generation_threshold {
- self.reseeder.reseed(&mut self.rng);
- self.bytes_generated = 0;
- }
- }
-}
-
-
-impl<R: Rng, Rsdr: Reseeder<R>> Rng for ReseedingRng<R, Rsdr> {
- fn next_u32(&mut self) -> u32 {
- self.reseed_if_necessary();
- self.bytes_generated += 4;
- self.rng.next_u32()
- }
-
- fn next_u64(&mut self) -> u64 {
- self.reseed_if_necessary();
- self.bytes_generated += 8;
- self.rng.next_u64()
- }
-
- fn fill_bytes(&mut self, dest: &mut [u8]) {
- self.reseed_if_necessary();
- self.bytes_generated += dest.len() as u64;
- self.rng.fill_bytes(dest)
- }
-}
-
-impl<S, R: SeedableRng<S>, Rsdr: Reseeder<R> + Default>
- SeedableRng<(Rsdr, S)> for ReseedingRng<R, Rsdr> {
- fn reseed(&mut self, (rsdr, seed): (Rsdr, S)) {
- self.rng.reseed(seed);
- self.reseeder = rsdr;
- self.bytes_generated = 0;
- }
-
- /// Create a new `ReseedingRng` from the given reseeder and
- /// seed. This uses a default value for `generation_threshold`.
- fn from_seed((rsdr, seed): (Rsdr, S)) -> ReseedingRng<R, Rsdr> {
- ReseedingRng {
- rng: SeedableRng::from_seed(seed),
- generation_threshold: DEFAULT_GENERATION_THRESHOLD,
- bytes_generated: 0,
- reseeder: rsdr
- }
- }
-}
-
-/// Something that can be used to reseed an RNG via `ReseedingRng`.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{Rng, SeedableRng, StdRng};
-/// use rand::reseeding::{Reseeder, ReseedingRng};
-///
-/// struct TickTockReseeder { tick: bool }
-/// impl Reseeder<StdRng> for TickTockReseeder {
-/// fn reseed(&mut self, rng: &mut StdRng) {
-/// let val = if self.tick {0} else {1};
-/// rng.reseed(&[val]);
-/// self.tick = !self.tick;
-/// }
-/// }
-/// fn main() {
-/// let rsdr = TickTockReseeder { tick: true };
-///
-/// let inner = StdRng::new().unwrap();
-/// let mut rng = ReseedingRng::new(inner, 10, rsdr);
-///
-/// // this will repeat, because it gets reseeded very regularly.
-/// let s: String = rng.gen_ascii_chars().take(100).collect();
-/// println!("{}", s);
-/// }
-///
-/// ```
-pub trait Reseeder<R> {
- /// Reseed the given RNG.
- fn reseed(&mut self, rng: &mut R);
-}
-
-/// Reseed an RNG using a `Default` instance. This reseeds by
-/// replacing the RNG with the result of a `Default::default` call.
-#[derive(Clone, Copy, Debug)]
-pub struct ReseedWithDefault;
-
-impl<R: Rng + Default> Reseeder<R> for ReseedWithDefault {
- fn reseed(&mut self, rng: &mut R) {
- *rng = Default::default();
- }
-}
-impl Default for ReseedWithDefault {
- fn default() -> ReseedWithDefault { ReseedWithDefault }
-}
-
-#[cfg(test)]
-mod test {
- use std::default::Default;
- use std::iter::repeat;
- use super::{ReseedingRng, ReseedWithDefault};
- use {SeedableRng, Rng};
-
- struct Counter {
- i: u32
- }
-
- impl Rng for Counter {
- fn next_u32(&mut self) -> u32 {
- self.i += 1;
- // very random
- self.i - 1
- }
- }
- impl Default for Counter {
- fn default() -> Counter {
- Counter { i: 0 }
- }
- }
- impl SeedableRng<u32> for Counter {
- fn reseed(&mut self, seed: u32) {
- self.i = seed;
- }
- fn from_seed(seed: u32) -> Counter {
- Counter { i: seed }
- }
- }
- type MyRng = ReseedingRng<Counter, ReseedWithDefault>;
-
- #[test]
- fn test_reseeding() {
- let mut rs = ReseedingRng::new(Counter {i:0}, 400, ReseedWithDefault);
-
- let mut i = 0;
- for _ in 0..1000 {
- assert_eq!(rs.next_u32(), i % 100);
- i += 1;
- }
- }
-
- #[test]
- fn test_rng_seeded() {
- let mut ra: MyRng = SeedableRng::from_seed((ReseedWithDefault, 2));
- let mut rb: MyRng = SeedableRng::from_seed((ReseedWithDefault, 2));
- assert!(::test::iter_eq(ra.gen_ascii_chars().take(100),
- rb.gen_ascii_chars().take(100)));
- }
-
- #[test]
- fn test_rng_reseed() {
- let mut r: MyRng = SeedableRng::from_seed((ReseedWithDefault, 3));
- let string1: String = r.gen_ascii_chars().take(100).collect();
-
- r.reseed((ReseedWithDefault, 3));
-
- let string2: String = r.gen_ascii_chars().take(100).collect();
- assert_eq!(string1, string2);
- }
-
- const FILL_BYTES_V_LEN: usize = 13579;
- #[test]
- fn test_rng_fill_bytes() {
- let mut v = repeat(0u8).take(FILL_BYTES_V_LEN).collect::<Vec<_>>();
- ::test::rng().fill_bytes(&mut v);
-
- // Sanity test: if we've gotten here, `fill_bytes` has not infinitely
- // recursed.
- assert_eq!(v.len(), FILL_BYTES_V_LEN);
-
- // To test that `fill_bytes` actually did something, check that the
- // average of `v` is not 0.
- let mut sum = 0.0;
- for &x in v.iter() {
- sum += x as f64;
- }
- assert!(sum / v.len() as f64 != 0.0);
- }
-}
diff --git a/rand/src/rngs/adapter/mod.rs b/rand/src/rngs/adapter/mod.rs
new file mode 100644
index 0000000..60b832e
--- /dev/null
+++ b/rand/src/rngs/adapter/mod.rs
@@ -0,0 +1,15 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Wrappers / adapters forming RNGs
+
+#[cfg(feature="std")] #[doc(hidden)] pub mod read;
+mod reseeding;
+
+#[cfg(feature="std")] pub use self::read::ReadRng;
+pub use self::reseeding::ReseedingRng;
diff --git a/rand/src/rngs/adapter/read.rs b/rand/src/rngs/adapter/read.rs
new file mode 100644
index 0000000..30b6de6
--- /dev/null
+++ b/rand/src/rngs/adapter/read.rs
@@ -0,0 +1,137 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A wrapper around any Read to treat it as an RNG.
+
+use std::io::Read;
+
+use rand_core::{RngCore, Error, ErrorKind, impls};
+
+
+/// An RNG that reads random bytes straight from any type supporting
+/// `std::io::Read`, for example files.
+///
+/// This will work best with an infinite reader, but that is not required.
+///
+/// This can be used with `/dev/urandom` on Unix but it is recommended to use
+/// [`OsRng`] instead.
+///
+/// # Panics
+///
+/// `ReadRng` uses `std::io::read_exact`, which retries on interrupts. All other
+/// errors from the underlying reader, including when it does not have enough
+/// data, will only be reported through [`try_fill_bytes`]. The other
+/// [`RngCore`] methods will panic in case of an error.
+///
+/// # Example
+///
+/// ```
+/// use rand::Rng;
+/// use rand::rngs::adapter::ReadRng;
+///
+/// let data = vec![1, 2, 3, 4, 5, 6, 7, 8];
+/// let mut rng = ReadRng::new(&data[..]);
+/// println!("{:x}", rng.gen::<u32>());
+/// ```
+///
+/// [`OsRng`]: ../struct.OsRng.html
+/// [`RngCore`]: ../../trait.RngCore.html
+/// [`try_fill_bytes`]: ../../trait.RngCore.html#method.tymethod.try_fill_bytes
+#[derive(Debug)]
+pub struct ReadRng<R> {
+ reader: R
+}
+
+impl<R: Read> ReadRng<R> {
+ /// Create a new `ReadRng` from a `Read`.
+ pub fn new(r: R) -> ReadRng<R> {
+ ReadRng {
+ reader: r
+ }
+ }
+}
+
+impl<R: Read> RngCore for ReadRng<R> {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.try_fill_bytes(dest).unwrap_or_else(|err|
+ panic!("reading random bytes from Read implementation failed; error: {}", err));
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ if dest.len() == 0 { return Ok(()); }
+ // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
+ self.reader.read_exact(dest).map_err(|err| {
+ match err.kind() {
+ ::std::io::ErrorKind::UnexpectedEof => Error::with_cause(
+ ErrorKind::Unavailable,
+ "not enough bytes available, reached end of source", err),
+ _ => Error::with_cause(ErrorKind::Unavailable,
+ "error reading from Read source", err)
+ }
+ })
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::ReadRng;
+ use {RngCore, ErrorKind};
+
+ #[test]
+ fn test_reader_rng_u64() {
+ // transmute from the target to avoid endianness concerns.
+ let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
+ 0 , 0, 0, 0, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 3];
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert_eq!(rng.next_u64(), 1_u64.to_be());
+ assert_eq!(rng.next_u64(), 2_u64.to_be());
+ assert_eq!(rng.next_u64(), 3_u64.to_be());
+ }
+
+ #[test]
+ fn test_reader_rng_u32() {
+ let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert_eq!(rng.next_u32(), 1_u32.to_be());
+ assert_eq!(rng.next_u32(), 2_u32.to_be());
+ assert_eq!(rng.next_u32(), 3_u32.to_be());
+ }
+
+ #[test]
+ fn test_reader_rng_fill_bytes() {
+ let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
+ let mut w = [0u8; 8];
+
+ let mut rng = ReadRng::new(&v[..]);
+ rng.fill_bytes(&mut w);
+
+ assert!(v == w);
+ }
+
+ #[test]
+ fn test_reader_rng_insufficient_bytes() {
+ let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
+ let mut w = [0u8; 9];
+
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert!(rng.try_fill_bytes(&mut w).err().unwrap().kind == ErrorKind::Unavailable);
+ }
+}
diff --git a/rand/src/rngs/adapter/reseeding.rs b/rand/src/rngs/adapter/reseeding.rs
new file mode 100644
index 0000000..016afab
--- /dev/null
+++ b/rand/src/rngs/adapter/reseeding.rs
@@ -0,0 +1,370 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A wrapper around another PRNG that reseeds it after it
+//! generates a certain number of random bytes.
+
+use core::mem::size_of;
+
+use rand_core::{RngCore, CryptoRng, SeedableRng, Error, ErrorKind};
+use rand_core::block::{BlockRngCore, BlockRng};
+
+/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the
+/// ability to reseed it.
+///
+/// `ReseedingRng` reseeds the underlying PRNG in the following cases:
+///
+/// - On a manual call to [`reseed()`].
+/// - After `clone()`, the clone will be reseeded on first use.
+/// - After a process is forked, the RNG in the child process is reseeded within
+/// the next few generated values, depending on the block size of the
+/// underlying PRNG. For [`ChaChaCore`] and [`Hc128Core`] this is a maximum of
+/// 15 `u32` values before reseeding.
+/// - After the PRNG has generated a configurable number of random bytes.
+///
+/// # When should reseeding after a fixed number of generated bytes be used?
+///
+/// Reseeding after a fixed number of generated bytes is never strictly
+/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they
+/// can output, or at least not a limit reachable in any practical way. There is
+/// no such thing as 'running out of entropy'.
+///
+/// Occasionally reseeding can be seen as some form of 'security in depth'. Even
+/// if in the future a cryptographic weakness is found in the CSPRNG being used,
+/// or a flaw in the implementation, occasionally reseeding should make
+/// exploiting it much more difficult or even impossible.
+///
+/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
+/// after a fixed number of generated bytes.
+///
+/// # Error handling
+///
+/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
+/// never panic but try to handle the error intelligently through some
+/// combination of retrying and delaying reseeding until later.
+/// If handling the source error fails `ReseedingRng` will continue generating
+/// data from the wrapped PRNG without reseeding.
+///
+/// Manually calling [`reseed()`] will not have this retry or delay logic, but
+/// reports the error.
+///
+/// # Example
+///
+/// ```
+/// # extern crate rand;
+/// # extern crate rand_chacha;
+/// # fn main() {
+/// use rand::prelude::*;
+/// use rand_chacha::ChaChaCore; // Internal part of ChaChaRng that
+/// // implements BlockRngCore
+/// use rand::rngs::OsRng;
+/// use rand::rngs::adapter::ReseedingRng;
+///
+/// let prng = ChaChaCore::from_entropy();
+// FIXME: it is better to use EntropyRng as reseeder, but that doesn't implement
+// clone yet.
+/// let reseeder = OsRng::new().unwrap();
+/// let mut reseeding_rng = ReseedingRng::new(prng, 0, reseeder);
+///
+/// println!("{}", reseeding_rng.gen::<u64>());
+///
+/// let mut cloned_rng = reseeding_rng.clone();
+/// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>());
+/// # }
+/// ```
+///
+/// [`ChaChaCore`]: ../../../rand_chacha/struct.ChaChaCore.html
+/// [`Hc128Core`]: ../../../rand_hc/struct.Hc128Core.html
+/// [`BlockRngCore`]: ../../../rand_core/block/trait.BlockRngCore.html
+/// [`ReseedingRng::new`]: struct.ReseedingRng.html#method.new
+/// [`reseed()`]: struct.ReseedingRng.html#method.reseed
+#[derive(Debug)]
+pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>)
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore;
+
+impl<R, Rsdr> ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ /// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG
+ /// to use as reseeder.
+ ///
+ /// `threshold` sets the number of generated bytes after which to reseed the
+ /// PRNG. Set it to zero to never reseed based on the number of generated
+ /// values.
+ pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder)))
+ }
+
+ /// Reseed the internal PRNG.
+ pub fn reseed(&mut self) -> Result<(), Error> {
+ self.0.core.reseed()
+ }
+}
+
+// TODO: this should be implemented for any type where the inner type
+// implements RngCore, but we can't specify that because ReseedingCore is private
+impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr>
+where R: BlockRngCore<Item = u32> + SeedableRng,
+ <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest)
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng + Clone,
+ Rsdr: RngCore + Clone
+{
+ fn clone(&self) -> ReseedingRng<R, Rsdr> {
+ // Recreating `BlockRng` seems easier than cloning it and resetting
+ // the index.
+ ReseedingRng(BlockRng::new(self.0.core.clone()))
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
+where R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng {}
+
+#[derive(Debug)]
+struct ReseedingCore<R, Rsdr> {
+ inner: R,
+ reseeder: Rsdr,
+ threshold: i64,
+ bytes_until_reseed: i64,
+ fork_counter: usize,
+}
+
+impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ type Item = <R as BlockRngCore>::Item;
+ type Results = <R as BlockRngCore>::Results;
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ let global_fork_counter = fork::get_fork_counter();
+ if self.bytes_until_reseed <= 0 ||
+ self.is_forked(global_fork_counter) {
+ // We get better performance by not calling only `reseed` here
+ // and continuing with the rest of the function, but by directly
+ // returning from a non-inlined function.
+ return self.reseed_and_generate(results, global_fork_counter);
+ }
+ let num_bytes = results.as_ref().len() * size_of::<Self::Item>();
+ self.bytes_until_reseed -= num_bytes as i64;
+ self.inner.generate(results);
+ }
+}
+
+impl<R, Rsdr> ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore
+{
+ /// Create a new `ReseedingCore`.
+ fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ use ::core::i64::MAX;
+ fork::register_fork_handler();
+
+ // Because generating more values than `i64::MAX` takes centuries on
+ // current hardware, we just clamp to that value.
+ // Also we set a threshold of 0, which indicates no limit, to that
+ // value.
+ let threshold =
+ if threshold == 0 { MAX }
+ else if threshold <= MAX as u64 { threshold as i64 }
+ else { MAX };
+
+ ReseedingCore {
+ inner: rng,
+ reseeder,
+ threshold: threshold as i64,
+ bytes_until_reseed: threshold as i64,
+ fork_counter: 0,
+ }
+ }
+
+ /// Reseed the internal PRNG.
+ fn reseed(&mut self) -> Result<(), Error> {
+ R::from_rng(&mut self.reseeder).map(|result| {
+ self.bytes_until_reseed = self.threshold;
+ self.inner = result
+ })
+ }
+
+ fn is_forked(&self, global_fork_counter: usize) -> bool {
+ // In theory, on 32-bit platforms, it is possible for
+ // `global_fork_counter` to wrap around after ~4e9 forks.
+ //
+ // This check will detect a fork in the normal case where
+ // `fork_counter < global_fork_counter`, and also when the difference
+ // between both is greater than `isize::MAX` (wrapped around).
+ //
+ // It will still fail to detect a fork if there have been more than
+ // `isize::MAX` forks, without any reseed in between. Seems unlikely
+ // enough.
+ (self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0
+ }
+
+ #[inline(never)]
+ fn reseed_and_generate(&mut self,
+ results: &mut <Self as BlockRngCore>::Results,
+ global_fork_counter: usize)
+ {
+ if self.is_forked(global_fork_counter) {
+ info!("Fork detected, reseeding RNG");
+ } else {
+ trace!("Reseeding RNG (periodic reseed)");
+ }
+
+ let num_bytes =
+ results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>();
+
+ let threshold = if let Err(e) = self.reseed() {
+ let delay = match e.kind {
+ ErrorKind::Transient => num_bytes as i64,
+ kind @ _ if kind.should_retry() => self.threshold >> 8,
+ _ => self.threshold,
+ };
+ warn!("Reseeding RNG delayed reseeding by {} bytes due to \
+ error from source: {}", delay, e);
+ delay
+ } else {
+ self.fork_counter = global_fork_counter;
+ self.threshold
+ };
+
+ self.bytes_until_reseed = threshold - num_bytes as i64;
+ self.inner.generate(results);
+ }
+}
+
+impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng + Clone,
+ Rsdr: RngCore + Clone
+{
+ fn clone(&self) -> ReseedingCore<R, Rsdr> {
+ ReseedingCore {
+ inner: self.inner.clone(),
+ reseeder: self.reseeder.clone(),
+ threshold: self.threshold,
+ bytes_until_reseed: 0, // reseed clone on first use
+ fork_counter: self.fork_counter,
+ }
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingCore<R, Rsdr>
+where R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng {}
+
+
+#[cfg(all(feature="std", unix, not(target_os="emscripten")))]
+mod fork {
+ extern crate libc;
+
+ use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT};
+
+ // Fork protection
+ //
+ // We implement fork protection on Unix using `pthread_atfork`.
+ // When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`.
+ // Every `ReseedingRng` stores the last known value of the static in
+ // `fork_counter`. If the cached `fork_counter` is less than
+ // `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG.
+ //
+ // If reseeding fails, we don't deal with this by setting a delay, but just
+ // don't update `fork_counter`, so a reseed is attempted as soon as
+ // possible.
+
+ static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
+
+ pub fn get_fork_counter() -> usize {
+ RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed)
+ }
+
+ static FORK_HANDLER_REGISTERED: AtomicBool = ATOMIC_BOOL_INIT;
+
+ extern fn fork_handler() {
+ // Note: fetch_add is defined to wrap on overflow
+ // (which is what we want).
+ RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed);
+ }
+
+ pub fn register_fork_handler() {
+ if FORK_HANDLER_REGISTERED.load(Ordering::Relaxed) == false {
+ unsafe { libc::pthread_atfork(None, None, Some(fork_handler)) };
+ FORK_HANDLER_REGISTERED.store(true, Ordering::Relaxed);
+ }
+ }
+}
+
+#[cfg(not(all(feature="std", unix, not(target_os="emscripten"))))]
+mod fork {
+ pub fn get_fork_counter() -> usize { 0 }
+ pub fn register_fork_handler() {}
+}
+
+
+#[cfg(test)]
+mod test {
+ use {Rng, SeedableRng};
+ use rand_chacha::ChaChaCore;
+ use rngs::mock::StepRng;
+ use super::ReseedingRng;
+
+ #[test]
+ fn test_reseeding() {
+ let mut zero = StepRng::new(0, 0);
+ let rng = ChaChaCore::from_rng(&mut zero).unwrap();
+ let mut reseeding = ReseedingRng::new(rng, 32*4, zero);
+
+ // Currently we only support for arrays up to length 32.
+ // TODO: cannot generate seq via Rng::gen because it uses different alg
+ let mut buf = [0u32; 32]; // Needs to be a multiple of the RNGs result
+ // size to test exactly.
+ reseeding.fill(&mut buf);
+ let seq = buf;
+ for _ in 0..10 {
+ reseeding.fill(&mut buf);
+ assert_eq!(buf, seq);
+ }
+ }
+
+ #[test]
+ fn test_clone_reseeding() {
+ let mut zero = StepRng::new(0, 0);
+ let rng = ChaChaCore::from_rng(&mut zero).unwrap();
+ let mut rng1 = ReseedingRng::new(rng, 32*4, zero);
+
+ let first: u32 = rng1.gen();
+ for _ in 0..10 { let _ = rng1.gen::<u32>(); }
+
+ let mut rng2 = rng1.clone();
+ assert_eq!(first, rng2.gen::<u32>());
+ }
+}
diff --git a/rand/src/rngs/entropy.rs b/rand/src/rngs/entropy.rs
new file mode 100644
index 0000000..8736324
--- /dev/null
+++ b/rand/src/rngs/entropy.rs
@@ -0,0 +1,297 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Entropy generator, or wrapper around external generators
+
+use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls};
+#[allow(unused)]
+use rngs;
+
+/// An interface returning random data from external source(s), provided
+/// specifically for securely seeding algorithmic generators (PRNGs).
+///
+/// Where possible, `EntropyRng` retrieves random data from the operating
+/// system's interface for random numbers ([`OsRng`]); if that fails it will
+/// fall back to the [`JitterRng`] entropy collector. In the latter case it will
+/// still try to use [`OsRng`] on the next usage.
+///
+/// If no secure source of entropy is available `EntropyRng` will panic on use;
+/// i.e. it should never output predictable data.
+///
+/// This is either a little slow ([`OsRng`] requires a system call) or extremely
+/// slow ([`JitterRng`] must use significant CPU time to generate sufficient
+/// jitter); for better performance it is common to seed a local PRNG from
+/// external entropy then primarily use the local PRNG ([`thread_rng`] is
+/// provided as a convenient, local, automatically-seeded CSPRNG).
+///
+/// # Panics
+///
+/// On most systems, like Windows, Linux, macOS and *BSD on common hardware, it
+/// is highly unlikely for both [`OsRng`] and [`JitterRng`] to fail. But on
+/// combinations like webassembly without Emscripten or stdweb both sources are
+/// unavailable. If both sources fail, only [`try_fill_bytes`] is able to
+/// report the error, and only the one from `OsRng`. The other [`RngCore`]
+/// methods will panic in case of an error.
+///
+/// [`OsRng`]: struct.OsRng.html
+/// [`JitterRng`]: jitter/struct.JitterRng.html
+/// [`thread_rng`]: ../fn.thread_rng.html
+/// [`RngCore`]: ../trait.RngCore.html
+/// [`try_fill_bytes`]: ../trait.RngCore.html#method.tymethod.try_fill_bytes
+#[derive(Debug)]
+pub struct EntropyRng {
+ source: Source,
+}
+
+#[derive(Debug)]
+enum Source {
+ Os(Os),
+ Custom(Custom),
+ Jitter(Jitter),
+ None,
+}
+
+impl EntropyRng {
+ /// Create a new `EntropyRng`.
+ ///
+ /// This method will do no system calls or other initialization routines,
+ /// those are done on first use. This is done to make `new` infallible,
+ /// and `try_fill_bytes` the only place to report errors.
+ pub fn new() -> Self {
+ EntropyRng { source: Source::None }
+ }
+}
+
+impl Default for EntropyRng {
+ fn default() -> Self {
+ EntropyRng::new()
+ }
+}
+
+impl RngCore for EntropyRng {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.try_fill_bytes(dest).unwrap_or_else(|err|
+ panic!("all entropy sources failed; first error: {}", err))
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let mut reported_error = None;
+
+ if let Source::Os(ref mut os_rng) = self.source {
+ match os_rng.fill(dest) {
+ Ok(()) => return Ok(()),
+ Err(err) => {
+ warn!("EntropyRng: OsRng failed \
+ [trying other entropy sources]: {}", err);
+ reported_error = Some(err);
+ },
+ }
+ } else if Os::is_supported() {
+ match Os::new_and_fill(dest) {
+ Ok(os_rng) => {
+ debug!("EntropyRng: using OsRng");
+ self.source = Source::Os(os_rng);
+ return Ok(());
+ },
+ Err(err) => { reported_error = reported_error.or(Some(err)) },
+ }
+ }
+
+ if let Source::Custom(ref mut rng) = self.source {
+ match rng.fill(dest) {
+ Ok(()) => return Ok(()),
+ Err(err) => {
+ warn!("EntropyRng: custom entropy source failed \
+ [trying other entropy sources]: {}", err);
+ reported_error = Some(err);
+ },
+ }
+ } else if Custom::is_supported() {
+ match Custom::new_and_fill(dest) {
+ Ok(custom) => {
+ debug!("EntropyRng: using custom entropy source");
+ self.source = Source::Custom(custom);
+ return Ok(());
+ },
+ Err(err) => { reported_error = reported_error.or(Some(err)) },
+ }
+ }
+
+ if let Source::Jitter(ref mut jitter_rng) = self.source {
+ match jitter_rng.fill(dest) {
+ Ok(()) => return Ok(()),
+ Err(err) => {
+ warn!("EntropyRng: JitterRng failed: {}", err);
+ reported_error = Some(err);
+ },
+ }
+ } else if Jitter::is_supported() {
+ match Jitter::new_and_fill(dest) {
+ Ok(jitter_rng) => {
+ debug!("EntropyRng: using JitterRng");
+ self.source = Source::Jitter(jitter_rng);
+ return Ok(());
+ },
+ Err(err) => { reported_error = reported_error.or(Some(err)) },
+ }
+ }
+
+ if let Some(err) = reported_error {
+ Err(Error::with_cause(ErrorKind::Unavailable,
+ "All entropy sources failed",
+ err))
+ } else {
+ Err(Error::new(ErrorKind::Unavailable,
+ "No entropy sources available"))
+ }
+ }
+}
+
+impl CryptoRng for EntropyRng {}
+
+
+
+trait EntropySource {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error>
+ where Self: Sized;
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error>;
+
+ fn is_supported() -> bool { true }
+}
+
+#[allow(unused)]
+#[derive(Clone, Debug)]
+struct NoSource;
+
+#[allow(unused)]
+impl EntropySource for NoSource {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> {
+ Err(Error::new(ErrorKind::Unavailable, "Source not supported"))
+ }
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ unreachable!()
+ }
+
+ fn is_supported() -> bool { false }
+}
+
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+#[derive(Clone, Debug)]
+pub struct Os(rngs::OsRng);
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+impl EntropySource for Os {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> {
+ let mut rng = rngs::OsRng::new()?;
+ rng.try_fill_bytes(dest)?;
+ Ok(Os(rng))
+ }
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(not(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+))))]
+type Os = NoSource;
+
+
+type Custom = NoSource;
+
+
+#[cfg(not(target_arch = "wasm32"))]
+#[derive(Clone, Debug)]
+pub struct Jitter(rngs::JitterRng);
+
+#[cfg(not(target_arch = "wasm32"))]
+impl EntropySource for Jitter {
+ fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> {
+ let mut rng = rngs::JitterRng::new()?;
+ rng.try_fill_bytes(dest)?;
+ Ok(Jitter(rng))
+ }
+
+ fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+#[cfg(target_arch = "wasm32")]
+type Jitter = NoSource;
+
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_entropy() {
+ let mut rng = EntropyRng::new();
+ let n = (rng.next_u32() ^ rng.next_u32()).count_ones();
+ assert!(n >= 2); // p(failure) approx 1e-7
+ }
+}
diff --git a/rand/src/jitter.rs b/rand/src/rngs/jitter.rs
index 3693481..3e93477 100644
--- a/rand/src/jitter.rs
+++ b/rand/src/rngs/jitter.rs
@@ -1,10 +1,8 @@
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
@@ -16,10 +14,15 @@
//! Non-physical true random number generator based on timing jitter.
-use Rng;
+// Note: the C implementation of `Jitterentropy` relies on being compiled
+// without optimizations. This implementation goes through lengths to make the
+// compiler not optimize out code which does influence timing jitter, but is
+// technically dead code.
+
+use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls};
use core::{fmt, mem, ptr};
-#[cfg(feature="std")]
+#[cfg(all(feature="std", not(target_arch = "wasm32")))]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
const MEMORY_BLOCKS: usize = 64;
@@ -31,8 +34,8 @@ const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
///
/// This is a true random number generator, as opposed to pseudo-random
/// generators. Random numbers generated by `JitterRng` can be seen as fresh
-/// entropy. A consequence is that is orders of magnitude slower than `OsRng`
-/// and PRNGs (about 10^3 .. 10^6 slower).
+/// entropy. A consequence is that is orders of magnitude slower than [`OsRng`]
+/// and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower).
///
/// There are very few situations where using this RNG is appropriate. Only very
/// few applications require true entropy. A normal PRNG can be statistically
@@ -40,29 +43,155 @@ const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
/// predict.
///
/// Use of `JitterRng` is recommended for initializing cryptographic PRNGs when
-/// `OsRng` is not available.
+/// [`OsRng`] is not available.
+///
+/// `JitterRng` can be used without the standard library, but not conveniently,
+/// you must provide a high-precision timer and carefully have to follow the
+/// instructions of [`new_with_timer`].
///
/// This implementation is based on
/// [Jitterentropy](http://www.chronox.de/jent.html) version 2.1.0.
-//
-// Note: the C implementation relies on being compiled without optimizations.
-// This implementation goes through lengths to make the compiler not optimise
-// out what is technically dead code, but that does influence timing jitter.
+///
+/// Note: There is no accurate timer available on Wasm platforms, to help
+/// prevent fingerprinting or timing side-channel attacks. Therefore
+/// [`JitterRng::new()`] is not available on Wasm.
+///
+/// # Quality testing
+///
+/// [`JitterRng::new()`] has build-in, but limited, quality testing, however
+/// before using `JitterRng` on untested hardware, or after changes that could
+/// effect how the code is optimized (such as a new LLVM version), it is
+/// recommend to run the much more stringent
+/// [NIST SP 800-90B Entropy Estimation Suite](
+/// https://github.com/usnistgov/SP800-90B_EntropyAssessment).
+///
+/// Use the following code using [`timer_stats`] to collect the data:
+///
+/// ```no_run
+/// use rand::rngs::JitterRng;
+/// #
+/// # use std::error::Error;
+/// # use std::fs::File;
+/// # use std::io::Write;
+/// #
+/// # fn try_main() -> Result<(), Box<Error>> {
+/// let mut rng = JitterRng::new()?;
+///
+/// // 1_000_000 results are required for the
+/// // NIST SP 800-90B Entropy Estimation Suite
+/// const ROUNDS: usize = 1_000_000;
+/// let mut deltas_variable: Vec<u8> = Vec::with_capacity(ROUNDS);
+/// let mut deltas_minimal: Vec<u8> = Vec::with_capacity(ROUNDS);
+///
+/// for _ in 0..ROUNDS {
+/// deltas_variable.push(rng.timer_stats(true) as u8);
+/// deltas_minimal.push(rng.timer_stats(false) as u8);
+/// }
+///
+/// // Write out after the statistics collection loop, to not disturb the
+/// // test results.
+/// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?;
+/// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?;
+/// #
+/// # Ok(())
+/// # }
+/// #
+/// # fn main() {
+/// # try_main().unwrap();
+/// # }
+/// ```
+///
+/// This will produce two files: `jitter_rng_var.bin` and `jitter_rng_min.bin`.
+/// Run the Entropy Estimation Suite in three configurations, as outlined below.
+/// Every run has two steps. One step to produce an estimation, another to
+/// validate the estimation.
+///
+/// 1. Estimate the expected amount of entropy that is at least available with
+/// each round of the entropy collector. This number should be greater than
+/// the amount estimated with `64 / test_timer()`.
+/// ```sh
+/// python noniid_main.py -v jitter_rng_var.bin 8
+/// restart.py -v jitter_rng_var.bin 8 <min-entropy>
+/// ```
+/// 2. Estimate the expected amount of entropy that is available in the last 4
+/// bits of the timer delta after running noice sources. Note that a value of
+/// `3.70` is the minimum estimated entropy for true randomness.
+/// ```sh
+/// python noniid_main.py -v -u 4 jitter_rng_var.bin 4
+/// restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>
+/// ```
+/// 3. Estimate the expected amount of entropy that is available to the entropy
+/// collector if both noice sources only run their minimal number of times.
+/// This measures the absolute worst-case, and gives a lower bound for the
+/// available entropy.
+/// ```sh
+/// python noniid_main.py -v -u 4 jitter_rng_min.bin 4
+/// restart.py -v -u 4 jitter_rng_min.bin 4 <min-entropy>
+/// ```
+///
+/// [`OsRng`]: struct.OsRng.html
+/// [`JitterRng::new()`]: struct.JitterRng.html#method.new
+/// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer
+/// [`timer_stats`]: struct.JitterRng.html#method.timer_stats
pub struct JitterRng {
data: u64, // Actual random number
// Number of rounds to run the entropy collector per 64 bits
- rounds: u32,
- // Timer and previous time stamp, used by `measure_jitter`
+ rounds: u8,
+ // Timer used by `measure_jitter`
timer: fn() -> u64,
+ // Memory for the Memory Access noise source
+ mem_prev_index: u16,
+ // Make `next_u32` not waste 32 bits
+ data_half_used: bool,
+}
+
+// Note: `JitterRng` maintains a small 64-bit entropy pool. With every
+// `generate` 64 new bits should be integrated in the pool. If a round of
+// `generate` were to collect less than the expected 64 bit, then the returned
+// value, and the new state of the entropy pool, would be in some way related to
+// the initial state. It is therefore better if the initial state of the entropy
+// pool is different on each call to `generate`. This has a few implications:
+// - `generate` should be called once before using `JitterRng` to produce the
+// first usable value (this is done by default in `new`);
+// - We do not zero the entropy pool after generating a result. The reference
+// implementation also does not support zeroing, but recommends generating a
+// new value without using it if you want to protect a previously generated
+// 'secret' value from someone inspecting the memory;
+// - Implementing `Clone` seems acceptable, as it would not cause the systematic
+// bias a constant might cause. Only instead of one value that could be
+// potentially related to the same initial state, there are now two.
+
+// Entropy collector state.
+// These values are not necessary to preserve across runs.
+struct EcState {
+ // Previous time stamp to determine the timer delta
prev_time: u64,
// Deltas used for the stuck test
- last_delta: i64,
- last_delta2: i64,
+ last_delta: i32,
+ last_delta2: i32,
// Memory for the Memory Access noise source
- mem_prev_index: usize,
mem: [u8; MEMORY_SIZE],
- // Make `next_u32` not waste 32 bits
- data_remaining: Option<u32>,
+}
+
+impl EcState {
+ // Stuck test by checking the:
+ // - 1st derivation of the jitter measurement (time delta)
+ // - 2nd derivation of the jitter measurement (delta of time deltas)
+ // - 3rd derivation of the jitter measurement (delta of delta of time
+ // deltas)
+ //
+ // All values must always be non-zero.
+ // This test is a heuristic to see whether the last measurement holds
+ // entropy.
+ fn stuck(&mut self, current_delta: i32) -> bool {
+ let delta2 = self.last_delta - current_delta;
+ let delta3 = delta2 - self.last_delta2;
+
+ self.last_delta = current_delta;
+ self.last_delta2 = delta2;
+
+ current_delta == 0 || delta2 == 0 || delta3 == 0
+ }
}
// Custom Debug implementation that does not expose the internal state
@@ -72,7 +201,23 @@ impl fmt::Debug for JitterRng {
}
}
-/// An error that can occur when `test_timer` fails.
+impl Clone for JitterRng {
+ fn clone(&self) -> JitterRng {
+ JitterRng {
+ data: self.data,
+ rounds: self.rounds,
+ timer: self.timer,
+ mem_prev_index: self.mem_prev_index,
+ // The 32 bits that may still be unused from the previous round are
+ // for the original to use, not for the clone.
+ data_half_used: false,
+ }
+ }
+}
+
+/// An error that can occur when [`JitterRng::test_timer`] fails.
+///
+/// [`JitterRng::test_timer`]: struct.JitterRng.html#method.test_timer
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TimerError {
/// No timer available.
@@ -115,29 +260,43 @@ impl ::std::error::Error for TimerError {
}
}
+impl From<TimerError> for Error {
+ fn from(err: TimerError) -> Error {
+ // Timer check is already quite permissive of failures so we don't
+ // expect false-positive failures, i.e. any error is irrecoverable.
+ Error::with_cause(ErrorKind::Unavailable,
+ "timer jitter failed basic quality tests", err)
+ }
+}
+
// Initialise to zero; must be positive
-#[cfg(feature="std")]
+#[cfg(all(feature="std", not(target_arch = "wasm32")))]
static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT;
impl JitterRng {
- /// Create a new `JitterRng`.
- /// Makes use of `std::time` for a timer.
+ /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a
+ /// platform-specific function with higher accuracy if necessary and
+ /// available.
///
/// During initialization CPU execution timing jitter is measured a few
/// hundred times. If this does not pass basic quality tests, an error is
/// returned. The test result is cached to make subsequent calls faster.
- #[cfg(feature="std")]
+ #[cfg(all(feature="std", not(target_arch = "wasm32")))]
pub fn new() -> Result<JitterRng, TimerError> {
- let mut ec = JitterRng::new_with_timer(platform::get_nstime);
- let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u32;
+ let mut state = JitterRng::new_with_timer(platform::get_nstime);
+ let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8;
if rounds == 0 {
// No result yet: run test.
// This allows the timer test to run multiple times; we don't care.
- rounds = ec.test_timer()?;
+ rounds = state.test_timer()?;
JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed);
+ info!("JitterRng: using {} rounds per u64 output", rounds);
}
- ec.set_rounds(rounds);
- Ok(ec)
+ state.set_rounds(rounds);
+
+ // Fill `data` with a non-zero value.
+ state.gen_entropy();
+ Ok(state)
}
/// Create a new `JitterRng`.
@@ -147,44 +306,65 @@ impl JitterRng {
/// The timer must have nanosecond precision.
///
/// This method is more low-level than `new()`. It is the responsibility of
- /// the caller to run `test_timer` before using any numbers generated with
- /// `JitterRng`, and optionally call `set_rounds()`.
+ /// the caller to run [`test_timer`] before using any numbers generated with
+ /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to
+ /// consume at least one `u64` before using the first result to initialize
+ /// the entropy collection pool.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use rand::{Rng, Error};
+ /// use rand::rngs::JitterRng;
+ ///
+ /// # fn try_inner() -> Result<(), Error> {
+ /// fn get_nstime() -> u64 {
+ /// use std::time::{SystemTime, UNIX_EPOCH};
+ ///
+ /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
+ /// // The correct way to calculate the current time is
+ /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
+ /// // But this is faster, and the difference in terms of entropy is
+ /// // negligible (log2(10^9) == 29.9).
+ /// dur.as_secs() << 30 | dur.subsec_nanos() as u64
+ /// }
+ ///
+ /// let mut rng = JitterRng::new_with_timer(get_nstime);
+ /// let rounds = rng.test_timer()?;
+ /// rng.set_rounds(rounds); // optional
+ /// let _ = rng.gen::<u64>();
+ ///
+ /// // Ready for use
+ /// let v: u64 = rng.gen();
+ /// # Ok(())
+ /// # }
+ ///
+ /// # let _ = try_inner();
+ /// ```
+ ///
+ /// [`test_timer`]: struct.JitterRng.html#method.test_timer
+ /// [`set_rounds`]: struct.JitterRng.html#method.set_rounds
pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
- let mut ec = JitterRng {
+ JitterRng {
data: 0,
rounds: 64,
- timer: timer,
- prev_time: 0,
- last_delta: 0,
- last_delta2: 0,
+ timer,
mem_prev_index: 0,
- mem: [0; MEMORY_SIZE],
- data_remaining: None,
- };
-
- // Fill `data`, `prev_time`, `last_delta` and `last_delta2` with
- // non-zero values.
- ec.prev_time = timer();
- ec.gen_entropy();
-
- // Do a single read from `self.mem` to make sure the Memory Access noise
- // source is not optimised out.
- // Note: this read is important, it effects optimisations for the entire
- // module!
- black_box(ec.mem[0]);
-
- ec
+ data_half_used: false,
+ }
}
/// Configures how many rounds are used to generate each 64-bit value.
/// This must be greater than zero, and has a big impact on performance
/// and output quality.
///
- /// `new_with_timer` conservatively uses 64 rounds, but often less rounds
+ /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds
/// can be used. The `test_timer()` function returns the minimum number of
/// rounds required for full strength (platform dependent), so one may use
/// `rng.set_rounds(rng.test_timer()?);` or cache the value.
- pub fn set_rounds(&mut self, rounds: u32) {
+ ///
+ /// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer
+ pub fn set_rounds(&mut self, rounds: u8) {
assert!(rounds > 0);
self.rounds = rounds;
}
@@ -212,7 +392,7 @@ impl JitterRng {
let mask = (1 << n_bits) - 1;
for _ in 0..folds {
rounds ^= time & mask;
- time = time >> n_bits;
+ time >>= n_bits;
}
rounds as u32
@@ -233,7 +413,7 @@ impl JitterRng {
fn lfsr(mut data: u64, time: u64) -> u64{
for i in 1..65 {
let mut tmp = time << (64 - i);
- tmp = tmp >> (64 - 1);
+ tmp >>= 64 - 1;
// Fibonacci LSFR with polynomial of
// x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
@@ -288,11 +468,11 @@ impl JitterRng {
// range of wait states. However, to reliably access either L3 or memory,
// the `self.mem` memory must be quite large which is usually not desirable.
#[inline(never)]
- fn memaccess(&mut self, var_rounds: bool) {
+ fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) {
let mut acc_loop_cnt = 128;
if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) };
- let mut index = self.mem_prev_index;
+ let mut index = self.mem_prev_index as usize;
for _ in 0..acc_loop_cnt {
// Addition of memblocksize - 1 to index with wrap around logic to
// ensure that every memory location is hit evenly.
@@ -302,42 +482,21 @@ impl JitterRng {
// memory access: just add 1 to one byte
// memory access implies read from and write to memory location
- let tmp = self.mem[index];
- self.mem[index] = tmp.wrapping_add(1);
+ mem[index] = mem[index].wrapping_add(1);
}
- self.mem_prev_index = index;
- }
-
-
- // Stuck test by checking the:
- // - 1st derivation of the jitter measurement (time delta)
- // - 2nd derivation of the jitter measurement (delta of time deltas)
- // - 3rd derivation of the jitter measurement (delta of delta of time
- // deltas)
- //
- // All values must always be non-zero.
- // This test is a heuristic to see whether the last measurement holds
- // entropy.
- fn stuck(&mut self, current_delta: i64) -> bool {
- let delta2 = self.last_delta - current_delta;
- let delta3 = delta2 - self.last_delta2;
-
- self.last_delta = current_delta;
- self.last_delta2 = delta2;
-
- current_delta == 0 || delta2 == 0 || delta3 == 0
+ self.mem_prev_index = index as u16;
}
// This is the heart of the entropy generation: calculate time deltas and
// use the CPU jitter in the time deltas. The jitter is injected into the
// entropy pool.
//
- // Ensure that `self.prev_time` is primed before using the output of this
+ // Ensure that `ec.prev_time` is primed before using the output of this
// function. This can be done by calling this function and not using its
// result.
- fn measure_jitter(&mut self) -> Option<()> {
+ fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> {
// Invoke one noise source before time measurement to add variations
- self.memaccess(true);
+ self.memaccess(&mut ec.mem, true);
// Get time stamp and calculate time delta to previous
// invocation to measure the timing variations
@@ -345,15 +504,15 @@ impl JitterRng {
// Note: wrapping_sub combined with a cast to `i64` generates a correct
// delta, even in the unlikely case this is a timer that is not strictly
// monotonic.
- let current_delta = time.wrapping_sub(self.prev_time) as i64;
- self.prev_time = time;
+ let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32;
+ ec.prev_time = time;
// Call the next noise source which also injects the data
self.lfsr_time(current_delta as u64, true);
// Check whether we have a stuck measurement (i.e. does the last
// measurement holds entropy?).
- if self.stuck(current_delta) { return None };
+ if ec.stuck(current_delta) { return None };
// Rotate the data buffer by a prime number (any odd number would
// do) to ensure that every bit position of the input time stamp
@@ -415,35 +574,47 @@ impl JitterRng {
}
fn gen_entropy(&mut self) -> u64 {
- // Prime `self.prev_time`, and run the noice sources to make sure the
+ trace!("JitterRng: collecting entropy");
+
+ // Prime `ec.prev_time`, and run the noice sources to make sure the
// first loop round collects the expected entropy.
- let _ = self.measure_jitter();
+ let mut ec = EcState {
+ prev_time: (self.timer)(),
+ last_delta: 0,
+ last_delta2: 0,
+ mem: [0; MEMORY_SIZE],
+ };
+ let _ = self.measure_jitter(&mut ec);
for _ in 0..self.rounds {
// If a stuck measurement is received, repeat measurement
// Note: we do not guard against an infinite loop, that would mean
// the timer suddenly became broken.
- while self.measure_jitter().is_none() {}
+ while self.measure_jitter(&mut ec).is_none() {}
}
+ // Do a single read from `self.mem` to make sure the Memory Access noise
+ // source is not optimised out.
+ black_box(ec.mem[0]);
+
self.stir_pool();
self.data
}
-
+
/// Basic quality tests on the timer, by measuring CPU timing jitter a few
/// hundred times.
///
/// If succesful, this will return the estimated number of rounds necessary
- /// to collect 64 bits of entropy. Otherwise a `TimerError` with the cause
+ /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause
/// of the failure will be returned.
- pub fn test_timer(&mut self) -> Result<u32, TimerError> {
+ ///
+ /// [`TimerError`]: enum.TimerError.html
+ pub fn test_timer(&mut self) -> Result<u8, TimerError> {
+ debug!("JitterRng: testing timer ...");
// We could add a check for system capabilities such as `clock_getres`
// or check for `CONFIG_X86_TSC`, but it does not make much sense as the
// following sanity checks verify that we have a high-resolution timer.
- #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
- return Err(TimerError::NoTimer);
-
let mut delta_sum = 0;
let mut old_delta = 0;
@@ -451,6 +622,13 @@ impl JitterRng {
let mut count_mod = 0;
let mut count_stuck = 0;
+ let mut ec = EcState {
+ prev_time: (self.timer)(),
+ last_delta: 0,
+ last_delta2: 0,
+ mem: [0; MEMORY_SIZE],
+ };
+
// TESTLOOPCOUNT needs some loops to identify edge systems.
// 100 is definitely too little.
const TESTLOOPCOUNT: u64 = 300;
@@ -459,7 +637,7 @@ impl JitterRng {
for i in 0..(CLEARCACHE + TESTLOOPCOUNT) {
// Measure time delta of core entropy collection logic
let time = (self.timer)();
- self.memaccess(true);
+ self.memaccess(&mut ec.mem, true);
self.lfsr_time(time, true);
let time2 = (self.timer)();
@@ -467,7 +645,7 @@ impl JitterRng {
if time == 0 || time2 == 0 {
return Err(TimerError::NoTimer);
}
- let delta = time2.wrapping_sub(time) as i64;
+ let delta = time2.wrapping_sub(time) as i64 as i32;
// Test whether timer is fine grained enough to provide delta even
// when called shortly after each other -- this implies that we also
@@ -483,7 +661,7 @@ impl JitterRng {
// measurements.
if i < CLEARCACHE { continue; }
- if self.stuck(delta) { count_stuck += 1; }
+ if ec.stuck(delta) { count_stuck += 1; }
// Test whether we have an increasing timer.
if !(time2 > time) { time_backwards += 1; }
@@ -499,6 +677,10 @@ impl JitterRng {
old_delta = delta;
}
+ // Do a single read from `self.mem` to make sure the Memory Access noise
+ // source is not optimised out.
+ black_box(ec.mem[0]);
+
// We allow the time to run backwards for up to three times.
// This can happen if the clock is being adjusted by NTP operations.
// If such an operation just happens to interfere with our test, it
@@ -539,140 +721,69 @@ impl JitterRng {
// available bits of entropy per round here for two reasons:
// 1. Simple estimates of the available bits (like Shannon entropy) are
// too optimistic.
- // 2) Unless we want to waste a lot of time during intialization, there
- // only a small number of samples are available.
+ // 2. Unless we want to waste a lot of time during intialization, there
+ // only a small number of samples are available.
//
// Therefore we use a very simple and conservative estimate:
// `let bits_of_entropy = log2(delta_average) / 2`.
//
// The number of rounds `measure_jitter` should run to collect 64 bits
// of entropy is `64 / bits_of_entropy`.
- //
- // To have smaller rounding errors, intermediate values are multiplied
- // by `FACTOR`. To compensate for `log2` and division rounding down,
- // add 1.
let delta_average = delta_sum / TESTLOOPCOUNT;
- // println!("delta_average: {}", delta_average);
-
- const FACTOR: u32 = 3;
- fn log2(x: u64) -> u32 { 64 - x.leading_zeros() }
- // pow(δ, FACTOR) must be representable; if you have overflow reduce FACTOR
- Ok(64 * 2 * FACTOR / (log2(delta_average.pow(FACTOR)) + 1))
+ if delta_average >= 16 {
+ let log2 = 64 - delta_average.leading_zeros();
+ // Do something similar to roundup(64/(log2/2)):
+ Ok( ((64u32 * 2 + log2 - 1) / log2) as u8)
+ } else {
+ // For values < 16 the rounding error becomes too large, use a
+ // lookup table.
+ // Values 0 and 1 are invalid, and filtered out by the
+ // `delta_sum < TESTLOOPCOUNT` test above.
+ let log2_lookup = [0, 0, 128, 81, 64, 56, 50, 46,
+ 43, 41, 39, 38, 36, 35, 34, 33];
+ Ok(log2_lookup[delta_average as usize])
+ }
}
/// Statistical test: return the timer delta of one normal run of the
- /// `JitterEntropy` entropy collector.
+ /// `JitterRng` entropy collector.
///
/// Setting `var_rounds` to `true` will execute the memory access and the
/// CPU jitter noice sources a variable amount of times (just like a real
- /// `JitterEntropy` round).
+ /// `JitterRng` round).
///
/// Setting `var_rounds` to `false` will execute the noice sources the
/// minimal number of times. This can be used to measure the minimum amount
- /// of entropy one round of entropy collector can collect in the worst case.
- ///
- /// # Example
- ///
- /// Use `timer_stats` to run the [NIST SP 800-90B Entropy Estimation Suite]
- /// (https://github.com/usnistgov/SP800-90B_EntropyAssessment).
- ///
- /// This is the recommended way to test the quality of `JitterRng`. It
- /// should be run before using the RNG on untested hardware, after changes
- /// that could effect how the code is optimised, and after major compiler
- /// compiler changes, like a new LLVM version.
- ///
- /// First generate two files `jitter_rng_var.bin` and `jitter_rng_var.min`.
- ///
- /// Execute `python noniid_main.py -v jitter_rng_var.bin 8`, and validate it
- /// with `restart.py -v jitter_rng_var.bin 8 <min-entropy>`.
- /// This number is the expected amount of entropy that is at least available
- /// for each round of the entropy collector. This number should be greater
- /// than the amount estimated with `64 / test_timer()`.
- ///
- /// Execute `python noniid_main.py -v -u 4 jitter_rng_var.bin 4`, and
- /// validate it with `restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>`.
- /// This number is the expected amount of entropy that is available in the
- /// last 4 bits of the timer delta after running noice sources. Note that
- /// a value of 3.70 is the minimum estimated entropy for true randomness.
- ///
- /// Execute `python noniid_main.py -v -u 4 jitter_rng_var.bin 4`, and
- /// validate it with `restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy>`.
- /// This number is the expected amount of entropy that is available to the
- /// entropy collecter if both noice sources only run their minimal number of
- /// times. This measures the absolute worst-case, and gives a lower bound
- /// for the available entropy.
- ///
- /// ```rust,no_run
- /// use rand::JitterRng;
- ///
- /// # use std::error::Error;
- /// # use std::fs::File;
- /// # use std::io::Write;
- /// #
- /// # fn try_main() -> Result<(), Box<Error>> {
- /// fn get_nstime() -> u64 {
- /// use std::time::{SystemTime, UNIX_EPOCH};
- ///
- /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
- /// // The correct way to calculate the current time is
- /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
- /// // But this is faster, and the difference in terms of entropy is
- /// // negligible (log2(10^9) == 29.9).
- /// dur.as_secs() << 30 | dur.subsec_nanos() as u64
- /// }
- ///
- /// // Do not initialize with `JitterRng::new`, but with `new_with_timer`.
- /// // 'new' always runst `test_timer`, and can therefore fail to
- /// // initialize. We want to be able to get the statistics even when the
- /// // timer test fails.
- /// let mut rng = JitterRng::new_with_timer(get_nstime);
- ///
- /// // 1_000_000 results are required for the NIST SP 800-90B Entropy
- /// // Estimation Suite
- /// // FIXME: this number is smaller here, otherwise the Doc-test is too slow
- /// const ROUNDS: usize = 10_000;
- /// let mut deltas_variable: Vec<u8> = Vec::with_capacity(ROUNDS);
- /// let mut deltas_minimal: Vec<u8> = Vec::with_capacity(ROUNDS);
+ /// of entropy one round of the entropy collector can collect in the worst
+ /// case.
///
- /// for _ in 0..ROUNDS {
- /// deltas_variable.push(rng.timer_stats(true) as u8);
- /// deltas_minimal.push(rng.timer_stats(false) as u8);
- /// }
- ///
- /// // Write out after the statistics collection loop, to not disturb the
- /// // test results.
- /// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?;
- /// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?;
- /// #
- /// # Ok(())
- /// # }
- /// #
- /// # fn main() {
- /// # try_main().unwrap();
- /// # }
- /// ```
- #[cfg(feature="std")]
+ /// See [Quality testing](struct.JitterRng.html#quality-testing) on how to
+ /// use `timer_stats` to test the quality of `JitterRng`.
pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
- let time = platform::get_nstime();
- self.memaccess(var_rounds);
+ let mut mem = [0; MEMORY_SIZE];
+
+ let time = (self.timer)();
+ self.memaccess(&mut mem, var_rounds);
self.lfsr_time(time, var_rounds);
- let time2 = platform::get_nstime();
+ let time2 = (self.timer)();
time2.wrapping_sub(time) as i64
}
}
#[cfg(feature="std")]
mod platform {
- #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows", all(target_arch = "wasm32", not(target_os = "emscripten")))))]
+ #[cfg(not(any(target_os = "macos", target_os = "ios",
+ target_os = "windows",
+ target_arch = "wasm32")))]
pub fn get_nstime() -> u64 {
use std::time::{SystemTime, UNIX_EPOCH};
let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
// The correct way to calculate the current time is
// `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
- // But this is faster, and the difference in terms of entropy is negligible
- // (log2(10^9) == 29.9).
+ // But this is faster, and the difference in terms of entropy is
+ // negligible (log2(10^9) == 29.9).
dur.as_secs() << 30 | dur.subsec_nanos() as u64
}
@@ -680,11 +791,11 @@ mod platform {
pub fn get_nstime() -> u64 {
extern crate libc;
// On Mac OS and iOS std::time::SystemTime only has 1000ns resolution.
- // We use `mach_absolute_time` instead. This provides a CPU dependent unit,
- // to get real nanoseconds the result should by multiplied by numer/denom
- // from `mach_timebase_info`.
- // But we are not interested in the exact nanoseconds, just entropy. So we
- // use the raw result.
+ // We use `mach_absolute_time` instead. This provides a CPU dependent
+ // unit, to get real nanoseconds the result should by multiplied by
+ // numer/denom from `mach_timebase_info`.
+ // But we are not interested in the exact nanoseconds, just entropy. So
+ // we use the raw result.
unsafe { libc::mach_absolute_time() }
}
@@ -697,11 +808,6 @@ mod platform {
*t.QuadPart() as u64
}
}
-
- #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
- pub fn get_nstime() -> u64 {
- unreachable!()
- }
}
// A function that is opaque to the optimizer to assist in avoiding dead-code
@@ -714,41 +820,66 @@ fn black_box<T>(dummy: T) -> T {
}
}
-impl Rng for JitterRng {
+impl RngCore for JitterRng {
fn next_u32(&mut self) -> u32 {
// We want to use both parts of the generated entropy
- if let Some(high) = self.data_remaining.take() {
- high
+ if self.data_half_used {
+ self.data_half_used = false;
+ (self.data >> 32) as u32
} else {
- let data = self.next_u64();
- self.data_remaining = Some((data >> 32) as u32);
- data as u32
+ self.data = self.next_u64();
+ self.data_half_used = true;
+ self.data as u32
}
}
fn next_u64(&mut self) -> u64 {
+ self.data_half_used = false;
self.gen_entropy()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
- let mut left = dest;
- while left.len() >= 8 {
- let (l, r) = {left}.split_at_mut(8);
- left = r;
- let chunk: [u8; 8] = unsafe {
- mem::transmute(self.next_u64().to_le())
- };
- l.copy_from_slice(&chunk);
- }
- let n = left.len();
- if n > 0 {
- let chunk: [u8; 8] = unsafe {
- mem::transmute(self.next_u64().to_le())
- };
- left.copy_from_slice(&chunk[..n]);
- }
+ // Fill using `next_u32`. This is faster for filling small slices (four
+ // bytes or less), while the overhead is negligible.
+ //
+ // This is done especially for wrappers that implement `next_u32`
+ // themselves via `fill_bytes`.
+ impls::fill_bytes_via_next(self, dest)
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ Ok(self.fill_bytes(dest))
}
}
-// There are no tests included because (1) this is an "external" RNG, so output
-// is not reproducible and (2) `test_timer` *will* fail on some platforms.
+impl CryptoRng for JitterRng {}
+
+#[cfg(test)]
+mod test_jitter_init {
+ use super::JitterRng;
+
+ #[cfg(all(feature="std", not(target_arch = "wasm32")))]
+ #[test]
+ fn test_jitter_init() {
+ use RngCore;
+ // Because this is a debug build, measurements here are not representive
+ // of the final release build.
+ // Don't fail this test if initializing `JitterRng` fails because of a
+ // bad timer (the timer from the standard library may not have enough
+ // accuracy on all platforms).
+ match JitterRng::new() {
+ Ok(ref mut rng) => {
+ // false positives are possible, but extremely unlikely
+ assert!(rng.next_u32() | rng.next_u32() != 0);
+ },
+ Err(_) => {},
+ }
+ }
+
+ #[test]
+ fn test_jitter_bad_timer() {
+ fn bad_timer() -> u64 { 0 }
+ let mut rng = JitterRng::new_with_timer(bad_timer);
+ assert!(rng.test_timer().is_err());
+ }
+}
diff --git a/rand/src/rngs/mock.rs b/rand/src/rngs/mock.rs
new file mode 100644
index 0000000..3c9a994
--- /dev/null
+++ b/rand/src/rngs/mock.rs
@@ -0,0 +1,59 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Mock random number generator
+
+use rand_core::{RngCore, Error, impls};
+
+/// A simple implementation of `RngCore` for testing purposes.
+///
+/// This generates an arithmetic sequence (i.e. adds a constant each step)
+/// over a `u64` number, using wrapping arithmetic. If the increment is 0
+/// the generator yields a constant.
+///
+/// ```
+/// use rand::Rng;
+/// use rand::rngs::mock::StepRng;
+///
+/// let mut my_rng = StepRng::new(2, 1);
+/// let sample: [u64; 3] = my_rng.gen();
+/// assert_eq!(sample, [2, 3, 4]);
+/// ```
+#[derive(Debug, Clone)]
+pub struct StepRng {
+ v: u64,
+ a: u64,
+}
+
+impl StepRng {
+ /// Create a `StepRng`, yielding an arithmetic sequence starting with
+ /// `initial` and incremented by `increment` each time.
+ pub fn new(initial: u64, increment: u64) -> Self {
+ StepRng { v: initial, a: increment }
+ }
+}
+
+impl RngCore for StepRng {
+ fn next_u32(&mut self) -> u32 {
+ self.next_u64() as u32
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ let result = self.v;
+ self.v = self.v.wrapping_add(self.a);
+ result
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ impls::fill_bytes_via_next(self, dest);
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
diff --git a/rand/src/rngs/mod.rs b/rand/src/rngs/mod.rs
new file mode 100644
index 0000000..70c4506
--- /dev/null
+++ b/rand/src/rngs/mod.rs
@@ -0,0 +1,217 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Random number generators and adapters for common usage:
+//!
+//! - [`ThreadRng`], a fast, secure, auto-seeded thread-local generator
+//! - [`StdRng`] and [`SmallRng`], algorithms to cover typical usage
+//! - [`EntropyRng`], [`OsRng`] and [`JitterRng`] as entropy sources
+//! - [`mock::StepRng`] as a simple counter for tests
+//! - [`adapter::ReadRng`] to read from a file/stream
+//! - [`adapter::ReseedingRng`] to reseed a PRNG on clone / process fork etc.
+//!
+//! # Background — Random number generators (RNGs)
+//!
+//! Computers are inherently deterministic, so to get *random* numbers one
+//! either has to use a hardware generator or collect bits of *entropy* from
+//! various sources (e.g. event timestamps, or jitter). This is a relatively
+//! slow and complicated operation.
+//!
+//! Generally the operating system will collect some entropy, remove bias, and
+//! use that to seed its own PRNG; [`OsRng`] provides an interface to this.
+//! [`JitterRng`] is an entropy collector included with Rand that measures
+//! jitter in the CPU execution time, and jitter in memory access time.
+//! [`EntropyRng`] is a wrapper that uses the best entropy source that is
+//! available.
+//!
+//! ## Pseudo-random number generators
+//!
+//! What is commonly used instead of "true" random number renerators, are
+//! *pseudo-random number generators* (PRNGs), deterministic algorithms that
+//! produce an infinite stream of pseudo-random numbers from a small random
+//! seed. PRNGs are faster, and have better provable properties. The numbers
+//! produced can be statistically of very high quality and can be impossible to
+//! predict. (They can also have obvious correlations and be trivial to predict;
+//! quality varies.)
+//!
+//! There are two different types of PRNGs: those developed for simulations
+//! and statistics, and those developed for use in cryptography; the latter are
+//! called Cryptographically Secure PRNGs (CSPRNG or CPRNG). Both types can
+//! have good statistical quality but the latter also have to be impossible to
+//! predict, even after seeing many previous output values. Rand provides a good
+//! default algorithm from each class:
+//!
+//! - [`SmallRng`] is a PRNG chosen for low memory usage, high performance and
+//! good statistical quality.
+//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security
+//! (based on reviews, maturity and usage). The current algorithm is HC-128,
+//! which is one of the recommendations by ECRYPT's eSTREAM project.
+//!
+//! The above PRNGs do not cover all use-cases; more algorithms can be found in
+//! the [`prng` module], as well as in several other crates. For example, you
+//! may wish a CSPRNG with significantly lower memory usage than [`StdRng`]
+//! while being less concerned about performance, in which case [`ChaChaRng`]
+//! is a good choice.
+//!
+//! One complexity is that the internal state of a PRNG must change with every
+//! generated number. For APIs this generally means a mutable reference to the
+//! state of the PRNG has to be passed around.
+//!
+//! A solution is [`ThreadRng`]. This is a thread-local implementation of
+//! [`StdRng`] with automatic seeding on first use. It is the best choice if you
+//! "just" want a convenient, secure, fast random number source. Use via the
+//! [`thread_rng`] function, which gets a reference to the current thread's
+//! local instance.
+//!
+//! ## Seeding
+//!
+//! As mentioned above, PRNGs require a random seed in order to produce random
+//! output. This is especially important for CSPRNGs, which are still
+//! deterministic algorithms, thus can only be secure if their seed value is
+//! also secure. To seed a PRNG, use one of:
+//!
+//! - [`FromEntropy::from_entropy`]; this is the most convenient way to seed
+//! with fresh, secure random data.
+//! - [`SeedableRng::from_rng`]; this allows seeding from another PRNG or
+//! from an entropy source such as [`EntropyRng`].
+//! - [`SeedableRng::from_seed`]; this is mostly useful if you wish to be able
+//! to reproduce the output sequence by using a fixed seed. (Don't use
+//! [`StdRng`] or [`SmallRng`] in this case since different algorithms may be
+//! used by future versions of Rand; use an algorithm from the
+//! [`prng` module].)
+//!
+//! ## Conclusion
+//!
+//! - [`thread_rng`] is what you often want to use.
+//! - If you want more control, flexibility, or better performance, use
+//! [`StdRng`], [`SmallRng`] or an algorithm from the [`prng` module].
+//! - Use [`FromEntropy::from_entropy`] to seed new PRNGs.
+//! - If you need reproducibility, use [`SeedableRng::from_seed`] combined with
+//! a named PRNG.
+//!
+//! More information and notes on cryptographic security can be found
+//! in the [`prng` module].
+//!
+//! ## Examples
+//!
+//! Examples of seeding PRNGs:
+//!
+//! ```
+//! use rand::prelude::*;
+//! # use rand::Error;
+//!
+//! // StdRng seeded securely by the OS or local entropy collector:
+//! let mut rng = StdRng::from_entropy();
+//! # let v: u32 = rng.gen();
+//!
+//! // SmallRng seeded from thread_rng:
+//! # fn try_inner() -> Result<(), Error> {
+//! let mut rng = SmallRng::from_rng(thread_rng())?;
+//! # let v: u32 = rng.gen();
+//! # Ok(())
+//! # }
+//! # try_inner().unwrap();
+//!
+//! // SmallRng seeded by a constant, for deterministic results:
+//! let seed = [1,2,3,4, 5,6,7,8, 9,10,11,12, 13,14,15,16]; // byte array
+//! let mut rng = SmallRng::from_seed(seed);
+//! # let v: u32 = rng.gen();
+//! ```
+//!
+//!
+//! # Implementing custom RNGs
+//!
+//! If you want to implement custom RNG, see the [`rand_core`] crate. The RNG
+//! will have to implement the [`RngCore`] trait, where the [`Rng`] trait is
+//! build on top of.
+//!
+//! If the RNG needs seeding, also implement the [`SeedableRng`] trait.
+//!
+//! [`CryptoRng`] is a marker trait cryptographically secure PRNGs can
+//! implement.
+//!
+//!
+// This module:
+//! [`ThreadRng`]: struct.ThreadRng.html
+//! [`StdRng`]: struct.StdRng.html
+//! [`SmallRng`]: struct.SmallRng.html
+//! [`EntropyRng`]: struct.EntropyRng.html
+//! [`OsRng`]: struct.OsRng.html
+//! [`JitterRng`]: struct.JitterRng.html
+// Other traits and functions:
+//! [`rand_core`]: https://crates.io/crates/rand_core
+//! [`prng` module]: ../prng/index.html
+//! [`CryptoRng`]: ../trait.CryptoRng.html
+//! [`FromEntropy`]: ../trait.FromEntropy.html
+//! [`FromEntropy::from_entropy`]: ../trait.FromEntropy.html#tymethod.from_entropy
+//! [`RngCore`]: ../trait.RngCore.html
+//! [`Rng`]: ../trait.Rng.html
+//! [`SeedableRng`]: ../trait.SeedableRng.html
+//! [`SeedableRng::from_rng`]: ../trait.SeedableRng.html#tymethod.from_rng
+//! [`SeedableRng::from_seed`]: ../trait.SeedableRng.html#tymethod.from_seed
+//! [`thread_rng`]: ../fn.thread_rng.html
+//! [`mock::StepRng`]: mock/struct.StepRng.html
+//! [`adapter::ReadRng`]: adapter/struct.ReadRng.html
+//! [`adapter::ReseedingRng`]: adapter/struct.ReseedingRng.html
+//! [`ChaChaRng`]: ../../rand_chacha/struct.ChaChaRng.html
+
+pub mod adapter;
+
+#[cfg(feature="std")] mod entropy;
+mod jitter;
+pub mod mock; // Public so we don't export `StepRng` directly, making it a bit
+ // more clear it is intended for testing.
+mod small;
+mod std;
+#[cfg(feature="std")] pub(crate) mod thread;
+
+
+pub use self::jitter::{JitterRng, TimerError};
+#[cfg(feature="std")] pub use self::entropy::EntropyRng;
+
+pub use self::small::SmallRng;
+pub use self::std::StdRng;
+#[cfg(feature="std")] pub use self::thread::ThreadRng;
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+mod os;
+
+#[cfg(all(feature="std",
+ any(target_os = "linux", target_os = "android",
+ target_os = "netbsd",
+ target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "cloudabi",
+ target_os = "macos", target_os = "ios",
+ target_os = "freebsd",
+ target_os = "openbsd", target_os = "bitrig",
+ target_os = "redox",
+ target_os = "fuchsia",
+ windows,
+ all(target_arch = "wasm32", feature = "stdweb"),
+ all(target_arch = "wasm32", feature = "wasm-bindgen"),
+)))]
+pub use self::os::OsRng;
diff --git a/rand/src/rngs/os.rs b/rand/src/rngs/os.rs
new file mode 100644
index 0000000..e609c50
--- /dev/null
+++ b/rand/src/rngs/os.rs
@@ -0,0 +1,1275 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2015 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Interface to the random number generator of the operating system.
+
+use std::fmt;
+use rand_core::{CryptoRng, RngCore, Error, impls};
+
+/// A random number generator that retrieves randomness straight from the
+/// operating system.
+///
+/// This is the preferred external source of entropy for most applications.
+/// Commonly it is used to initialize a user-space RNG, which can then be used
+/// to generate random values with much less overhead than `OsRng`.
+///
+/// You may prefer to use [`EntropyRng`] instead of `OsRng`. It is unlikely, but
+/// not entirely theoretical, for `OsRng` to fail. In such cases [`EntropyRng`]
+/// falls back on a good alternative entropy source.
+///
+/// `OsRng::new()` is guaranteed to be very cheap (after the first successful
+/// call), and will never consume more than one file handle per process.
+///
+/// # Platform sources
+///
+/// | OS | interface
+/// |------------------|---------------------------------------------------------
+/// | Linux, Android | [`getrandom`][1] system call if available, otherwise [`/dev/urandom`][2] after reading from `/dev/random` once
+/// | Windows | [`RtlGenRandom`][3]
+/// | macOS, iOS | [`SecRandomCopyBytes`][4]
+/// | FreeBSD | [`kern.arandom`][5]
+/// | OpenBSD, Bitrig | [`getentropy`][6]
+/// | NetBSD | [`/dev/urandom`][7] after reading from `/dev/random` once
+/// | Dragonfly BSD | [`/dev/random`][8]
+/// | Solaris, illumos | [`getrandom`][9] system call if available, otherwise [`/dev/random`][10]
+/// | Fuchsia OS | [`cprng_draw`][11]
+/// | Redox | [`rand:`][12]
+/// | CloudABI | [`random_get`][13]
+/// | Haiku | `/dev/random` (identical to `/dev/urandom`)
+/// | Web browsers | [`Crypto.getRandomValues`][14] (see [Support for WebAssembly and ams.js][14])
+/// | Node.js | [`crypto.randomBytes`][15] (see [Support for WebAssembly and ams.js][16])
+///
+/// Rand doesn't have a blanket implementation for all Unix-like operating
+/// systems that reads from `/dev/urandom`. This ensures all supported operating
+/// systems are using the recommended interface and respect maximum buffer
+/// sizes.
+///
+/// ## Support for WebAssembly and ams.js
+///
+/// The three Emscripten targets `asmjs-unknown-emscripten`,
+/// `wasm32-unknown-emscripten` and `wasm32-experimental-emscripten` use
+/// Emscripten's emulation of `/dev/random` on web browsers and Node.js.
+///
+/// The bare Wasm target `wasm32-unknown-unknown` tries to call the javascript
+/// methods directly, using either `stdweb` in combination with `cargo-web` or
+/// `wasm-bindgen` depending on what features are activated for this crate.
+///
+/// ## Early boot
+///
+/// It is possible that early in the boot process the OS hasn't had enough time
+/// yet to collect entropy to securely seed its RNG, especially on virtual
+/// machines.
+///
+/// Some operating systems always block the thread until the RNG is securely
+/// seeded. This can take anywhere from a few seconds to more than a minute.
+/// Others make a best effort to use a seed from before the shutdown and don't
+/// document much.
+///
+/// A few, Linux, NetBSD and Solaris, offer a choice between blocking, and
+/// getting an error. With `try_fill_bytes` we choose to get the error
+/// ([`ErrorKind::NotReady`]), while the other methods use a blocking interface.
+///
+/// On Linux (when the `genrandom` system call is not available) and on NetBSD
+/// reading from `/dev/urandom` never blocks, even when the OS hasn't collected
+/// enough entropy yet. As a countermeasure we try to do a single read from
+/// `/dev/random` until we know the OS RNG is initialized (and store this in a
+/// global static).
+///
+/// # Panics
+///
+/// `OsRng` is extremely unlikely to fail if `OsRng::new()`, and one read from
+/// it, where succesfull. But in case it does fail, only [`try_fill_bytes`] is
+/// able to report the cause. Depending on the error the other [`RngCore`]
+/// methods will retry several times, and panic in case the error remains.
+///
+/// [`EntropyRng`]: struct.EntropyRng.html
+/// [`RngCore`]: ../trait.RngCore.html
+/// [`try_fill_bytes`]: ../trait.RngCore.html#method.tymethod.try_fill_bytes
+/// [`ErrorKind::NotReady`]: ../enum.ErrorKind.html#variant.NotReady
+///
+/// [1]: http://man7.org/linux/man-pages/man2/getrandom.2.html
+/// [2]: http://man7.org/linux/man-pages/man4/urandom.4.html
+/// [3]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx
+/// [4]: https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
+/// [5]: https://www.freebsd.org/cgi/man.cgi?query=random&sektion=4
+/// [6]: https://man.openbsd.org/getentropy.2
+/// [7]: http://netbsd.gw.com/cgi-bin/man-cgi?random+4+NetBSD-current
+/// [8]: https://leaf.dragonflybsd.org/cgi/web-man?command=random&section=4
+/// [9]: https://docs.oracle.com/cd/E88353_01/html/E37841/getrandom-2.html
+/// [10]: https://docs.oracle.com/cd/E86824_01/html/E54777/random-7d.html
+/// [11]: https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/cprng_draw.md
+/// [12]: https://github.com/redox-os/randd/blob/master/src/main.rs
+/// [13]: https://github.com/NuxiNL/cloudabi/blob/v0.20/cloudabi.txt#L1826
+/// [14]: https://www.w3.org/TR/WebCryptoAPI/#Crypto-method-getRandomValues
+/// [15]: https://nodejs.org/api/crypto.html#crypto_crypto_randombytes_size_callback
+/// [16]: #support-for-webassembly-and-amsjs
+
+
+#[derive(Clone)]
+pub struct OsRng(imp::OsRng);
+
+impl fmt::Debug for OsRng {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl OsRng {
+ /// Create a new `OsRng`.
+ pub fn new() -> Result<OsRng, Error> {
+ imp::OsRng::new().map(OsRng)
+ }
+}
+
+impl CryptoRng for OsRng {}
+
+impl RngCore for OsRng {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ use std::{time, thread};
+
+ // We cannot return Err(..), so we try to handle before panicking.
+ const MAX_RETRY_PERIOD: u32 = 10; // max 10s
+ const WAIT_DUR_MS: u32 = 100; // retry every 100ms
+ let wait_dur = time::Duration::from_millis(WAIT_DUR_MS as u64);
+ const RETRY_LIMIT: u32 = (MAX_RETRY_PERIOD * 1000) / WAIT_DUR_MS;
+ const TRANSIENT_RETRIES: u32 = 8;
+ let mut err_count = 0;
+ let mut error_logged = false;
+
+ // Maybe block until the OS RNG is initialized
+ let mut read = 0;
+ if let Ok(n) = self.0.test_initialized(dest, true) { read = n };
+ let dest = &mut dest[read..];
+
+ loop {
+ if let Err(e) = self.try_fill_bytes(dest) {
+ if err_count >= RETRY_LIMIT {
+ error!("OsRng failed too many times; last error: {}", e);
+ panic!("OsRng failed too many times; last error: {}", e);
+ }
+
+ if e.kind.should_wait() {
+ if !error_logged {
+ warn!("OsRng failed; waiting up to {}s and retrying. Error: {}",
+ MAX_RETRY_PERIOD, e);
+ error_logged = true;
+ }
+ err_count += 1;
+ thread::sleep(wait_dur);
+ continue;
+ } else if e.kind.should_retry() {
+ if !error_logged {
+ warn!("OsRng failed; retrying up to {} times. Error: {}",
+ TRANSIENT_RETRIES, e);
+ error_logged = true;
+ }
+ err_count += (RETRY_LIMIT + TRANSIENT_RETRIES - 1)
+ / TRANSIENT_RETRIES; // round up
+ continue;
+ } else {
+ error!("OsRng failed: {}", e);
+ panic!("OsRng fatal error: {}", e);
+ }
+ }
+
+ break;
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ // Some systems do not support reading 0 random bytes.
+ // (And why waste a system call?)
+ if dest.len() == 0 { return Ok(()); }
+
+ let read = self.0.test_initialized(dest, false)?;
+ let dest = &mut dest[read..];
+
+ let max = self.0.max_chunk_size();
+ if dest.len() <= max {
+ trace!("OsRng: reading {} bytes via {}",
+ dest.len(), self.0.method_str());
+ } else {
+ trace!("OsRng: reading {} bytes via {} in {} chunks of {} bytes",
+ dest.len(), self.0.method_str(), (dest.len() + max) / max, max);
+ }
+ for slice in dest.chunks_mut(max) {
+ self.0.fill_chunk(slice)?;
+ }
+ Ok(())
+ }
+}
+
+trait OsRngImpl where Self: Sized {
+ // Create a new `OsRng` platform interface.
+ fn new() -> Result<Self, Error>;
+
+ // Fill a chunk with random bytes.
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error>;
+
+ // Test whether the OS RNG is initialized. This method may not be possible
+ // to support cheaply (or at all) on all operating systems.
+ //
+ // If `blocking` is set, this will cause the OS the block execution until
+ // its RNG is initialized.
+ //
+ // Random values that are read while this are stored in `dest`, the amount
+ // of read bytes is returned.
+ fn test_initialized(&mut self, _dest: &mut [u8], _blocking: bool)
+ -> Result<usize, Error> { Ok(0) }
+
+ // Maximum chunk size supported.
+ fn max_chunk_size(&self) -> usize { ::core::usize::MAX }
+
+ // Name of the OS interface (used for logging).
+ fn method_str(&self) -> &'static str;
+}
+
+
+
+
+// Helper functions to read from a random device such as `/dev/urandom`.
+//
+// All instances use a single internal file handle, to prevent possible
+// exhaustion of file descriptors.
+#[cfg(any(target_os = "linux", target_os = "android",
+ target_os = "netbsd", target_os = "dragonfly",
+ target_os = "solaris", target_os = "redox",
+ target_os = "haiku", target_os = "emscripten"))]
+mod random_device {
+ use {Error, ErrorKind};
+ use std::fs::File;
+ use std::io;
+ use std::io::Read;
+ use std::sync::{Once, Mutex, ONCE_INIT};
+
+ // TODO: remove outer Option when `Mutex::new(None)` is a constant expression
+ static mut READ_RNG_FILE: Option<Mutex<Option<File>>> = None;
+ static READ_RNG_ONCE: Once = ONCE_INIT;
+
+ #[allow(unused)]
+ pub fn open<F>(path: &'static str, open_fn: F) -> Result<(), Error>
+ where F: Fn(&'static str) -> Result<File, io::Error>
+ {
+ READ_RNG_ONCE.call_once(|| {
+ unsafe { READ_RNG_FILE = Some(Mutex::new(None)) }
+ });
+
+ // We try opening the file outside the `call_once` fn because we cannot
+ // clone the error, thus we must retry on failure.
+
+ let mutex = unsafe { READ_RNG_FILE.as_ref().unwrap() };
+ let mut guard = mutex.lock().unwrap();
+ if (*guard).is_none() {
+ info!("OsRng: opening random device {}", path);
+ let file = open_fn(path).map_err(map_err)?;
+ *guard = Some(file);
+ };
+ Ok(())
+ }
+
+ pub fn read(dest: &mut [u8]) -> Result<(), Error> {
+ // We expect this function only to be used after `random_device::open`
+ // was succesful. Therefore we can assume that our memory was set with a
+ // valid object.
+ let mutex = unsafe { READ_RNG_FILE.as_ref().unwrap() };
+ let mut guard = mutex.lock().unwrap();
+ let file = (*guard).as_mut().unwrap();
+
+ // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
+ file.read_exact(dest).map_err(|err| {
+ Error::with_cause(ErrorKind::Unavailable,
+ "error reading random device", err)
+ })
+
+ }
+
+ pub fn map_err(err: io::Error) -> Error {
+ match err.kind() {
+ io::ErrorKind::Interrupted =>
+ Error::new(ErrorKind::Transient, "interrupted"),
+ io::ErrorKind::WouldBlock =>
+ Error::with_cause(ErrorKind::NotReady,
+ "OS RNG not yet seeded", err),
+ _ => Error::with_cause(ErrorKind::Unavailable,
+ "error while opening random device", err)
+ }
+ }
+}
+
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::random_device;
+ use super::OsRngImpl;
+
+ use std::io;
+ use std::io::Read;
+ use std::fs::{File, OpenOptions};
+ use std::os::unix::fs::OpenOptionsExt;
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+ use std::sync::{Once, ONCE_INIT};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng {
+ method: OsRngMethod,
+ initialized: bool,
+ }
+
+ #[derive(Clone, Debug)]
+ enum OsRngMethod {
+ GetRandom,
+ RandomDevice,
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ if is_getrandom_available() {
+ return Ok(OsRng { method: OsRngMethod::GetRandom,
+ initialized: false });
+ }
+ random_device::open("/dev/urandom", &|p| File::open(p))?;
+ Ok(OsRng { method: OsRngMethod::RandomDevice, initialized: false })
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ match self.method {
+ OsRngMethod::GetRandom => getrandom_try_fill(dest, false),
+ OsRngMethod::RandomDevice => random_device::read(dest),
+ }
+ }
+
+ fn test_initialized(&mut self, dest: &mut [u8], blocking: bool)
+ -> Result<usize, Error>
+ {
+ static OS_RNG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
+ if !self.initialized {
+ self.initialized = OS_RNG_INITIALIZED.load(Ordering::Relaxed);
+ }
+ if self.initialized { return Ok(0); }
+
+ let result = match self.method {
+ OsRngMethod::GetRandom => {
+ getrandom_try_fill(dest, blocking)?;
+ Ok(dest.len())
+ }
+ OsRngMethod::RandomDevice => {
+ info!("OsRng: testing random device /dev/random");
+ let mut file = OpenOptions::new()
+ .read(true)
+ .custom_flags(if blocking { 0 } else { libc::O_NONBLOCK })
+ .open("/dev/random")
+ .map_err(random_device::map_err)?;
+ file.read(&mut dest[..1]).map_err(random_device::map_err)?;
+ Ok(1)
+ }
+ };
+ OS_RNG_INITIALIZED.store(true, Ordering::Relaxed);
+ self.initialized = true;
+ result
+ }
+
+ fn method_str(&self) -> &'static str {
+ match self.method {
+ OsRngMethod::GetRandom => "getrandom",
+ OsRngMethod::RandomDevice => "/dev/urandom",
+ }
+ }
+ }
+
+ #[cfg(target_arch = "x86_64")]
+ const NR_GETRANDOM: libc::c_long = 318;
+ #[cfg(target_arch = "x86")]
+ const NR_GETRANDOM: libc::c_long = 355;
+ #[cfg(target_arch = "arm")]
+ const NR_GETRANDOM: libc::c_long = 384;
+ #[cfg(target_arch = "aarch64")]
+ const NR_GETRANDOM: libc::c_long = 278;
+ #[cfg(target_arch = "s390x")]
+ const NR_GETRANDOM: libc::c_long = 349;
+ #[cfg(target_arch = "powerpc")]
+ const NR_GETRANDOM: libc::c_long = 359;
+ #[cfg(target_arch = "powerpc64")]
+ const NR_GETRANDOM: libc::c_long = 359;
+ #[cfg(target_arch = "mips")] // old ABI
+ const NR_GETRANDOM: libc::c_long = 4353;
+ #[cfg(target_arch = "mips64")]
+ const NR_GETRANDOM: libc::c_long = 5313;
+ #[cfg(target_arch = "sparc")]
+ const NR_GETRANDOM: libc::c_long = 347;
+ #[cfg(target_arch = "sparc64")]
+ const NR_GETRANDOM: libc::c_long = 347;
+ #[cfg(not(any(target_arch = "x86_64", target_arch = "x86",
+ target_arch = "arm", target_arch = "aarch64",
+ target_arch = "s390x", target_arch = "powerpc",
+ target_arch = "powerpc64", target_arch = "mips",
+ target_arch = "mips64", target_arch = "sparc",
+ target_arch = "sparc64")))]
+ const NR_GETRANDOM: libc::c_long = 0;
+
+ fn getrandom(buf: &mut [u8], blocking: bool) -> libc::c_long {
+ const GRND_NONBLOCK: libc::c_uint = 0x0001;
+
+ if NR_GETRANDOM == 0 { return -1 };
+
+ unsafe {
+ libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(),
+ if blocking { 0 } else { GRND_NONBLOCK })
+ }
+ }
+
+ fn getrandom_try_fill(dest: &mut [u8], blocking: bool) -> Result<(), Error> {
+ let mut read = 0;
+ while read < dest.len() {
+ let result = getrandom(&mut dest[read..], blocking);
+ if result == -1 {
+ let err = io::Error::last_os_error();
+ let kind = err.kind();
+ if kind == io::ErrorKind::Interrupted {
+ continue;
+ } else if kind == io::ErrorKind::WouldBlock {
+ return Err(Error::with_cause(
+ ErrorKind::NotReady,
+ "getrandom not ready",
+ err,
+ ));
+ } else {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "unexpected getrandom error",
+ err,
+ ));
+ }
+ } else {
+ read += result as usize;
+ }
+ }
+ Ok(())
+ }
+
+ fn is_getrandom_available() -> bool {
+ static CHECKER: Once = ONCE_INIT;
+ static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
+
+ if NR_GETRANDOM == 0 { return false };
+
+ CHECKER.call_once(|| {
+ debug!("OsRng: testing getrandom");
+ let mut buf: [u8; 0] = [];
+ let result = getrandom(&mut buf, false);
+ let available = if result == -1 {
+ let err = io::Error::last_os_error().raw_os_error();
+ err != Some(libc::ENOSYS)
+ } else {
+ true
+ };
+ AVAILABLE.store(available, Ordering::Relaxed);
+ info!("OsRng: using {}", if available { "getrandom" } else { "/dev/urandom" });
+ });
+
+ AVAILABLE.load(Ordering::Relaxed)
+ }
+}
+
+
+#[cfg(target_os = "netbsd")]
+mod imp {
+ use Error;
+ use super::random_device;
+ use super::OsRngImpl;
+
+ use std::fs::File;
+ use std::io::Read;
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng { initialized: bool }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ random_device::open("/dev/urandom", &|p| File::open(p))?;
+ Ok(OsRng { initialized: false })
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ random_device::read(dest)
+ }
+
+ // Read a single byte from `/dev/random` to determine if the OS RNG is
+ // already seeded. NetBSD always blocks if not yet ready.
+ fn test_initialized(&mut self, dest: &mut [u8], _blocking: bool)
+ -> Result<usize, Error>
+ {
+ static OS_RNG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
+ if !self.initialized {
+ self.initialized = OS_RNG_INITIALIZED.load(Ordering::Relaxed);
+ }
+ if self.initialized { return Ok(0); }
+
+ info!("OsRng: testing random device /dev/random");
+ let mut file =
+ File::open("/dev/random").map_err(random_device::map_err)?;
+ file.read(&mut dest[..1]).map_err(random_device::map_err)?;
+
+ OS_RNG_INITIALIZED.store(true, Ordering::Relaxed);
+ self.initialized = true;
+ Ok(1)
+ }
+
+ fn method_str(&self) -> &'static str { "/dev/urandom" }
+ }
+}
+
+
+#[cfg(any(target_os = "dragonfly",
+ target_os = "haiku",
+ target_os = "emscripten"))]
+mod imp {
+ use Error;
+ use super::random_device;
+ use super::OsRngImpl;
+ use std::fs::File;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng();
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ random_device::open("/dev/random", &|p| File::open(p))?;
+ Ok(OsRng())
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ random_device::read(dest)
+ }
+
+ #[cfg(target_os = "emscripten")]
+ fn max_chunk_size(&self) -> usize {
+ // `Crypto.getRandomValues` documents `dest` should be at most 65536
+ // bytes. `crypto.randomBytes` documents: "To minimize threadpool
+ // task length variation, partition large randomBytes requests when
+ // doing so as part of fulfilling a client request.
+ 65536
+ }
+
+ fn method_str(&self) -> &'static str { "/dev/random" }
+ }
+}
+
+
+// Read from `/dev/random`, with chunks of limited size (1040 bytes).
+// `/dev/random` uses the Hash_DRBG with SHA512 algorithm from NIST SP 800-90A.
+// `/dev/urandom` uses the FIPS 186-2 algorithm, which is considered less
+// secure. We choose to read from `/dev/random`.
+//
+// Since Solaris 11.3 the `getrandom` syscall is available. To make sure we can
+// compile on both Solaris and on OpenSolaris derivatives, that do not have the
+// function, we do a direct syscall instead of calling a library function.
+//
+// We have no way to differentiate between Solaris, illumos, SmartOS, etc.
+#[cfg(target_os = "solaris")]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::random_device;
+ use super::OsRngImpl;
+
+ use std::io;
+ use std::io::Read;
+ use std::fs::{File, OpenOptions};
+ use std::os::unix::fs::OpenOptionsExt;
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng {
+ method: OsRngMethod,
+ initialized: bool,
+ }
+
+ #[derive(Clone, Debug)]
+ enum OsRngMethod {
+ GetRandom,
+ RandomDevice,
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ if is_getrandom_available() {
+ return Ok(OsRng { method: OsRngMethod::GetRandom,
+ initialized: false });
+ }
+ let open = |p| OpenOptions::new()
+ .read(true)
+ .custom_flags(libc::O_NONBLOCK)
+ .open(p);
+ random_device::open("/dev/random", &open)?;
+ Ok(OsRng { method: OsRngMethod::RandomDevice, initialized: false })
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ match self.method {
+ OsRngMethod::GetRandom => getrandom_try_fill(dest, false),
+ OsRngMethod::RandomDevice => random_device::read(dest),
+ }
+ }
+
+ fn test_initialized(&mut self, dest: &mut [u8], blocking: bool)
+ -> Result<usize, Error>
+ {
+ static OS_RNG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
+ if !self.initialized {
+ self.initialized = OS_RNG_INITIALIZED.load(Ordering::Relaxed);
+ }
+ if self.initialized { return Ok(0); }
+
+ let chunk_len = ::core::cmp::min(1024, dest.len());
+ let dest = &mut dest[..chunk_len];
+
+ match self.method {
+ OsRngMethod::GetRandom => getrandom_try_fill(dest, blocking)?,
+ OsRngMethod::RandomDevice => {
+ if blocking {
+ info!("OsRng: testing random device /dev/random");
+ // We already have a non-blocking handle, but now need a
+ // blocking one. Not much choice except opening it twice
+ let mut file = File::open("/dev/random")
+ .map_err(random_device::map_err)?;
+ file.read(dest).map_err(random_device::map_err)?;
+ } else {
+ self.fill_chunk(dest)?;
+ }
+ }
+ };
+ OS_RNG_INITIALIZED.store(true, Ordering::Relaxed);
+ self.initialized = true;
+ Ok(chunk_len)
+ }
+
+ fn max_chunk_size(&self) -> usize {
+ // The documentation says 1024 is the maximum for getrandom, but
+ // 1040 for /dev/random.
+ 1024
+ }
+
+ fn method_str(&self) -> &'static str {
+ match self.method {
+ OsRngMethod::GetRandom => "getrandom",
+ OsRngMethod::RandomDevice => "/dev/random",
+ }
+ }
+ }
+
+ fn getrandom(buf: &mut [u8], blocking: bool) -> libc::c_long {
+ extern "C" {
+ fn syscall(number: libc::c_long, ...) -> libc::c_long;
+ }
+
+ const SYS_GETRANDOM: libc::c_long = 143;
+ const GRND_NONBLOCK: libc::c_uint = 0x0001;
+ const GRND_RANDOM: libc::c_uint = 0x0002;
+
+ unsafe {
+ syscall(SYS_GETRANDOM, buf.as_mut_ptr(), buf.len(),
+ if blocking { 0 } else { GRND_NONBLOCK } | GRND_RANDOM)
+ }
+ }
+
+ fn getrandom_try_fill(dest: &mut [u8], blocking: bool) -> Result<(), Error> {
+ let result = getrandom(dest, blocking);
+ if result == -1 || result == 0 {
+ let err = io::Error::last_os_error();
+ let kind = err.kind();
+ if kind == io::ErrorKind::WouldBlock {
+ return Err(Error::with_cause(
+ ErrorKind::NotReady,
+ "getrandom not ready",
+ err,
+ ));
+ } else {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "unexpected getrandom error",
+ err,
+ ));
+ }
+ } else if result != dest.len() as i64 {
+ return Err(Error::new(ErrorKind::Unavailable,
+ "unexpected getrandom error"));
+ }
+ Ok(())
+ }
+
+ fn is_getrandom_available() -> bool {
+ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+ use std::sync::{Once, ONCE_INIT};
+
+ static CHECKER: Once = ONCE_INIT;
+ static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
+
+ CHECKER.call_once(|| {
+ debug!("OsRng: testing getrandom");
+ let mut buf: [u8; 0] = [];
+ let result = getrandom(&mut buf, false);
+ let available = if result == -1 {
+ let err = io::Error::last_os_error().raw_os_error();
+ err != Some(libc::ENOSYS)
+ } else {
+ true
+ };
+ AVAILABLE.store(available, Ordering::Relaxed);
+ info!("OsRng: using {}", if available { "getrandom" } else { "/dev/random" });
+ });
+
+ AVAILABLE.load(Ordering::Relaxed)
+ }
+}
+
+
+#[cfg(target_os = "cloudabi")]
+mod imp {
+ extern crate cloudabi;
+
+ use std::io;
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let errno = unsafe { cloudabi::random_get(dest) };
+ if errno == cloudabi::errno::SUCCESS {
+ Ok(())
+ } else {
+ // Cloudlibc provides its own `strerror` implementation so we
+ // can use `from_raw_os_error` here.
+ Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "random_get() system call failed",
+ io::Error::from_raw_os_error(errno as i32),
+ ))
+ }
+ }
+
+ fn method_str(&self) -> &'static str { "cloudabi::random_get" }
+ }
+}
+
+
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::io;
+ use self::libc::{c_int, size_t};
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ enum SecRandom {}
+
+ #[allow(non_upper_case_globals)]
+ const kSecRandomDefault: *const SecRandom = 0 as *const SecRandom;
+
+ #[link(name = "Security", kind = "framework")]
+ extern {
+ fn SecRandomCopyBytes(rnd: *const SecRandom,
+ count: size_t, bytes: *mut u8) -> c_int;
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let ret = unsafe {
+ SecRandomCopyBytes(kSecRandomDefault,
+ dest.len() as size_t,
+ dest.as_mut_ptr())
+ };
+ if ret == -1 {
+ Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "couldn't generate random bytes",
+ io::Error::last_os_error()))
+ } else {
+ Ok(())
+ }
+ }
+
+ fn method_str(&self) -> &'static str { "SecRandomCopyBytes" }
+ }
+}
+
+
+#[cfg(target_os = "freebsd")]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::ptr;
+ use std::io;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let mib = [libc::CTL_KERN, libc::KERN_ARND];
+ let mut len = dest.len();
+ let ret = unsafe {
+ libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint,
+ dest.as_mut_ptr() as *mut _, &mut len,
+ ptr::null(), 0)
+ };
+ if ret == -1 || len != dest.len() {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "kern.arandom sysctl failed",
+ io::Error::last_os_error()));
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize { 256 }
+
+ fn method_str(&self) -> &'static str { "kern.arandom" }
+ }
+}
+
+
+#[cfg(any(target_os = "openbsd", target_os = "bitrig"))]
+mod imp {
+ extern crate libc;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::io;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let ret = unsafe {
+ libc::getentropy(dest.as_mut_ptr() as *mut libc::c_void, dest.len())
+ };
+ if ret == -1 {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "getentropy failed",
+ io::Error::last_os_error()));
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize { 256 }
+
+ fn method_str(&self) -> &'static str { "getentropy" }
+ }
+}
+
+
+#[cfg(target_os = "redox")]
+mod imp {
+ use Error;
+ use super::random_device;
+ use super::OsRngImpl;
+ use std::fs::File;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng();
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ random_device::open("rand:", &|p| File::open(p))?;
+ Ok(OsRng())
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ random_device::read(dest)
+ }
+
+ fn method_str(&self) -> &'static str { "'rand:'" }
+ }
+}
+
+
+#[cfg(target_os = "fuchsia")]
+mod imp {
+ extern crate fuchsia_zircon;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let mut read = 0;
+ while read < dest.len() {
+ match fuchsia_zircon::cprng_draw(&mut dest[read..]) {
+ Ok(actual) => read += actual,
+ Err(e) => {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "cprng_draw failed",
+ e.into_io_error()));
+ }
+ };
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize {
+ fuchsia_zircon::sys::ZX_CPRNG_DRAW_MAX_LEN
+ }
+
+ fn method_str(&self) -> &'static str { "cprng_draw" }
+ }
+}
+
+
+#[cfg(windows)]
+mod imp {
+ extern crate winapi;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ use std::io;
+
+ use self::winapi::shared::minwindef::ULONG;
+ use self::winapi::um::ntsecapi::RtlGenRandom;
+ use self::winapi::um::winnt::PVOID;
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng;
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> { Ok(OsRng) }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ let ret = unsafe {
+ RtlGenRandom(dest.as_mut_ptr() as PVOID, dest.len() as ULONG)
+ };
+ if ret == 0 {
+ return Err(Error::with_cause(
+ ErrorKind::Unavailable,
+ "couldn't generate random bytes",
+ io::Error::last_os_error()));
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize { <ULONG>::max_value() as usize }
+
+ fn method_str(&self) -> &'static str { "RtlGenRandom" }
+ }
+}
+
+
+#[cfg(all(target_arch = "wasm32",
+ not(target_os = "emscripten"),
+ feature = "stdweb"))]
+mod imp {
+ use std::mem;
+ use stdweb::unstable::TryInto;
+ use stdweb::web::error::Error as WebError;
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ enum OsRngMethod {
+ Browser,
+ Node
+ }
+
+ #[derive(Clone, Debug)]
+ pub struct OsRng(OsRngMethod);
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ let result = js! {
+ try {
+ if (
+ typeof self === "object" &&
+ typeof self.crypto === "object" &&
+ typeof self.crypto.getRandomValues === "function"
+ ) {
+ return { success: true, ty: 1 };
+ }
+
+ if (typeof require("crypto").randomBytes === "function") {
+ return { success: true, ty: 2 };
+ }
+
+ return { success: false, error: new Error("not supported") };
+ } catch(err) {
+ return { success: false, error: err };
+ }
+ };
+
+ if js!{ return @{ result.as_ref() }.success } == true {
+ let ty = js!{ return @{ result }.ty };
+
+ if ty == 1 { Ok(OsRng(OsRngMethod::Browser)) }
+ else if ty == 2 { Ok(OsRng(OsRngMethod::Node)) }
+ else { unreachable!() }
+ } else {
+ let err: WebError = js!{ return @{ result }.error }.try_into().unwrap();
+ Err(Error::with_cause(ErrorKind::Unavailable, "WASM Error", err))
+ }
+ }
+
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ assert_eq!(mem::size_of::<usize>(), 4);
+
+ let len = dest.len() as u32;
+ let ptr = dest.as_mut_ptr() as i32;
+
+ let result = match self.0 {
+ OsRngMethod::Browser => js! {
+ try {
+ let array = new Uint8Array(@{ len });
+ self.crypto.getRandomValues(array);
+ HEAPU8.set(array, @{ ptr });
+
+ return { success: true };
+ } catch(err) {
+ return { success: false, error: err };
+ }
+ },
+ OsRngMethod::Node => js! {
+ try {
+ let bytes = require("crypto").randomBytes(@{ len });
+ HEAPU8.set(new Uint8Array(bytes), @{ ptr });
+
+ return { success: true };
+ } catch(err) {
+ return { success: false, error: err };
+ }
+ }
+ };
+
+ if js!{ return @{ result.as_ref() }.success } == true {
+ Ok(())
+ } else {
+ let err: WebError = js!{ return @{ result }.error }.try_into().unwrap();
+ Err(Error::with_cause(ErrorKind::Unexpected, "WASM Error", err))
+ }
+ }
+
+ fn max_chunk_size(&self) -> usize { 65536 }
+
+ fn method_str(&self) -> &'static str {
+ match self.0 {
+ OsRngMethod::Browser => "Crypto.getRandomValues",
+ OsRngMethod::Node => "crypto.randomBytes",
+ }
+ }
+ }
+}
+
+#[cfg(all(target_arch = "wasm32",
+ not(target_os = "emscripten"),
+ not(feature = "stdweb"),
+ feature = "wasm-bindgen"))]
+mod imp {
+ use __wbg_shims::*;
+
+ use {Error, ErrorKind};
+ use super::OsRngImpl;
+
+ #[derive(Clone, Debug)]
+ pub enum OsRng {
+ Node(NodeCrypto),
+ Browser(BrowserCrypto),
+ }
+
+ impl OsRngImpl for OsRng {
+ fn new() -> Result<OsRng, Error> {
+ // First up we need to detect if we're running in node.js or a
+ // browser. To do this we get ahold of the `this` object (in a bit
+ // of a roundabout fashion).
+ //
+ // Once we have `this` we look at its `self` property, which is
+ // only defined on the web (either a main window or web worker).
+ let this = Function::new("return this").call(&JsValue::undefined());
+ assert!(this != JsValue::undefined());
+ let this = This::from(this);
+ let is_browser = this.self_() != JsValue::undefined();
+
+ if !is_browser {
+ return Ok(OsRng::Node(node_require("crypto")))
+ }
+
+ // If `self` is defined then we're in a browser somehow (main window
+ // or web worker). Here we want to try to use
+ // `crypto.getRandomValues`, but if `crypto` isn't defined we assume
+ // we're in an older web browser and the OS RNG isn't available.
+ let crypto = this.crypto();
+ if crypto.is_undefined() {
+ let msg = "self.crypto is undefined";
+ return Err(Error::new(ErrorKind::Unavailable, msg))
+ }
+
+ // Test if `crypto.getRandomValues` is undefined as well
+ let crypto: BrowserCrypto = crypto.into();
+ if crypto.get_random_values_fn().is_undefined() {
+ let msg = "crypto.getRandomValues is undefined";
+ return Err(Error::new(ErrorKind::Unavailable, msg))
+ }
+
+ // Ok! `self.crypto.getRandomValues` is a defined value, so let's
+ // assume we can do browser crypto.
+ Ok(OsRng::Browser(crypto))
+ }
+
+ fn fill_chunk(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ match *self {
+ OsRng::Node(ref n) => n.random_fill_sync(dest),
+ OsRng::Browser(ref n) => n.get_random_values(dest),
+ }
+ Ok(())
+ }
+
+ fn max_chunk_size(&self) -> usize {
+ match *self {
+ OsRng::Node(_) => usize::max_value(),
+ OsRng::Browser(_) => {
+ // see https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues
+ //
+ // where it says:
+ //
+ // > A QuotaExceededError DOMException is thrown if the
+ // > requested length is greater than 65536 bytes.
+ 65536
+ }
+ }
+ }
+
+ fn method_str(&self) -> &'static str {
+ match *self {
+ OsRng::Node(_) => "crypto.randomFillSync",
+ OsRng::Browser(_) => "crypto.getRandomValues",
+ }
+ }
+ }
+}
+
+
+#[cfg(test)]
+mod test {
+ use RngCore;
+ use super::OsRng;
+
+ #[test]
+ fn test_os_rng() {
+ let mut r = OsRng::new().unwrap();
+
+ r.next_u32();
+ r.next_u64();
+
+ let mut v1 = [0u8; 1000];
+ r.fill_bytes(&mut v1);
+
+ let mut v2 = [0u8; 1000];
+ r.fill_bytes(&mut v2);
+
+ let mut n_diff_bits = 0;
+ for i in 0..v1.len() {
+ n_diff_bits += (v1[i] ^ v2[i]).count_ones();
+ }
+
+ // Check at least 1 bit per byte differs. p(failure) < 1e-1000 with random input.
+ assert!(n_diff_bits >= v1.len() as u32);
+ }
+
+ #[test]
+ fn test_os_rng_empty() {
+ let mut r = OsRng::new().unwrap();
+
+ let mut empty = [0u8; 0];
+ r.fill_bytes(&mut empty);
+ }
+
+ #[test]
+ fn test_os_rng_huge() {
+ let mut r = OsRng::new().unwrap();
+
+ let mut huge = [0u8; 100_000];
+ r.fill_bytes(&mut huge);
+ }
+
+ #[cfg(not(any(target_arch = "wasm32", target_arch = "asmjs")))]
+ #[test]
+ fn test_os_rng_tasks() {
+ use std::sync::mpsc::channel;
+ use std::thread;
+
+ let mut txs = vec!();
+ for _ in 0..20 {
+ let (tx, rx) = channel();
+ txs.push(tx);
+
+ thread::spawn(move|| {
+ // wait until all the tasks are ready to go.
+ rx.recv().unwrap();
+
+ // deschedule to attempt to interleave things as much
+ // as possible (XXX: is this a good test?)
+ let mut r = OsRng::new().unwrap();
+ thread::yield_now();
+ let mut v = [0u8; 1000];
+
+ for _ in 0..100 {
+ r.next_u32();
+ thread::yield_now();
+ r.next_u64();
+ thread::yield_now();
+ r.fill_bytes(&mut v);
+ thread::yield_now();
+ }
+ });
+ }
+
+ // start all the tasks
+ for tx in txs.iter() {
+ tx.send(()).unwrap();
+ }
+ }
+}
diff --git a/rand/src/rngs/small.rs b/rand/src/rngs/small.rs
new file mode 100644
index 0000000..e74a83e
--- /dev/null
+++ b/rand/src/rngs/small.rs
@@ -0,0 +1,105 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A small fast RNG
+
+use {RngCore, SeedableRng, Error};
+
+#[cfg(all(rust_1_26, target_pointer_width = "64"))]
+type Rng = ::rand_pcg::Pcg64Mcg;
+#[cfg(not(all(rust_1_26, target_pointer_width = "64")))]
+type Rng = ::rand_pcg::Pcg32;
+
+/// An RNG recommended when small state, cheap initialization and good
+/// performance are required. The PRNG algorithm in `SmallRng` is chosen to be
+/// efficient on the current platform, **without consideration for cryptography
+/// or security**. The size of its state is much smaller than for [`StdRng`].
+///
+/// Reproducibility of output from this generator is however not required, thus
+/// future library versions may use a different internal generator with
+/// different output. Further, this generator may not be portable and can
+/// produce different output depending on the architecture. If you require
+/// reproducible output, use a named RNG. Refer to the documentation on the
+/// [`prng` module](../prng/index.html).
+///
+/// The current algorithm is [`Pcg64Mcg`] on 64-bit platforms with Rust version
+/// 1.26 and later, or [`Pcg32`] otherwise.
+///
+/// # Examples
+///
+/// Initializing `SmallRng` with a random seed can be done using [`FromEntropy`]:
+///
+/// ```
+/// # use rand::Rng;
+/// use rand::FromEntropy;
+/// use rand::rngs::SmallRng;
+///
+/// // Create small, cheap to initialize and fast RNG with a random seed.
+/// // The randomness is supplied by the operating system.
+/// let mut small_rng = SmallRng::from_entropy();
+/// # let v: u32 = small_rng.gen();
+/// ```
+///
+/// When initializing a lot of `SmallRng`'s, using [`thread_rng`] can be more
+/// efficient:
+///
+/// ```
+/// use std::iter;
+/// use rand::{SeedableRng, thread_rng};
+/// use rand::rngs::SmallRng;
+///
+/// // Create a big, expensive to initialize and slower, but unpredictable RNG.
+/// // This is cached and done only once per thread.
+/// let mut thread_rng = thread_rng();
+/// // Create small, cheap to initialize and fast RNGs with random seeds.
+/// // One can generally assume this won't fail.
+/// let rngs: Vec<SmallRng> = iter::repeat(())
+/// .map(|()| SmallRng::from_rng(&mut thread_rng).unwrap())
+/// .take(10)
+/// .collect();
+/// ```
+///
+/// [`FromEntropy`]: ../trait.FromEntropy.html
+/// [`StdRng`]: struct.StdRng.html
+/// [`thread_rng`]: ../fn.thread_rng.html
+/// [`Pcg64Mcg`]: ../../rand_pcg/type.Pcg64Mcg.html
+/// [`Pcg32`]: ../../rand_pcg/type.Pcg32.html
+#[derive(Clone, Debug)]
+pub struct SmallRng(Rng);
+
+impl RngCore for SmallRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for SmallRng {
+ type Seed = <Rng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ SmallRng(Rng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ Rng::from_rng(rng).map(SmallRng)
+ }
+}
diff --git a/rand/src/rngs/std.rs b/rand/src/rngs/std.rs
new file mode 100644
index 0000000..ce1658b
--- /dev/null
+++ b/rand/src/rngs/std.rs
@@ -0,0 +1,81 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The standard RNG
+
+use {RngCore, CryptoRng, Error, SeedableRng};
+use rand_hc::Hc128Rng;
+
+/// The standard RNG. The PRNG algorithm in `StdRng` is chosen to be efficient
+/// on the current platform, to be statistically strong and unpredictable
+/// (meaning a cryptographically secure PRNG).
+///
+/// The current algorithm used on all platforms is [HC-128].
+///
+/// Reproducibility of output from this generator is however not required, thus
+/// future library versions may use a different internal generator with
+/// different output. Further, this generator may not be portable and can
+/// produce different output depending on the architecture. If you require
+/// reproducible output, use a named RNG, for example [`ChaChaRng`].
+///
+/// [HC-128]: ../../rand_hc/struct.Hc128Rng.html
+/// [`ChaChaRng`]: ../../rand_chacha/struct.ChaChaRng.html
+#[derive(Clone, Debug)]
+pub struct StdRng(Hc128Rng);
+
+impl RngCore for StdRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest);
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl SeedableRng for StdRng {
+ type Seed = <Hc128Rng as SeedableRng>::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ StdRng(Hc128Rng::from_seed(seed))
+ }
+
+ fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
+ Hc128Rng::from_rng(rng).map(StdRng)
+ }
+}
+
+impl CryptoRng for StdRng {}
+
+
+#[cfg(test)]
+mod test {
+ use {RngCore, SeedableRng};
+ use rngs::StdRng;
+
+ #[test]
+ fn test_stdrng_construction() {
+ let seed = [1,0,0,0, 23,0,0,0, 200,1,0,0, 210,30,0,0,
+ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0];
+ let mut rng1 = StdRng::from_seed(seed);
+ assert_eq!(rng1.next_u64(), 15759097995037006553);
+
+ let mut rng2 = StdRng::from_rng(rng1).unwrap();
+ assert_eq!(rng2.next_u64(), 6766915756997287454);
+ }
+}
diff --git a/rand/src/rngs/thread.rs b/rand/src/rngs/thread.rs
new file mode 100644
index 0000000..ff772e3
--- /dev/null
+++ b/rand/src/rngs/thread.rs
@@ -0,0 +1,135 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Thread-local random number generator
+
+use std::cell::UnsafeCell;
+
+use {RngCore, CryptoRng, SeedableRng, Error};
+use rngs::adapter::ReseedingRng;
+use rngs::EntropyRng;
+use rand_hc::Hc128Core;
+
+// Rationale for using `UnsafeCell` in `ThreadRng`:
+//
+// Previously we used a `RefCell`, with an overhead of ~15%. There will only
+// ever be one mutable reference to the interior of the `UnsafeCell`, because
+// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a
+// single thread (which is the definition of `ThreadRng`), there will only ever
+// be one of these methods active at a time.
+//
+// A possible scenario where there could be multiple mutable references is if
+// `ThreadRng` is used inside `next_u32` and co. But the implementation is
+// completely under our control. We just have to ensure none of them use
+// `ThreadRng` internally, which is nonsensical anyway. We should also never run
+// `ThreadRng` in destructors of its implementation, which is also nonsensical.
+//
+// The additional `Rc` is not strictly neccesary, and could be removed. For now
+// it ensures `ThreadRng` stays `!Send` and `!Sync`, and implements `Clone`.
+
+
+// Number of generated bytes after which to reseed `TreadRng`.
+//
+// The time it takes to reseed HC-128 is roughly equivalent to generating 7 KiB.
+// We pick a treshold here that is large enough to not reduce the average
+// performance too much, but also small enough to not make reseeding something
+// that basically never happens.
+const THREAD_RNG_RESEED_THRESHOLD: u64 = 32*1024*1024; // 32 MiB
+
+/// The type returned by [`thread_rng`], essentially just a reference to the
+/// PRNG in thread-local memory.
+///
+/// `ThreadRng` uses [`ReseedingRng`] wrapping the same PRNG as [`StdRng`],
+/// which is reseeded after generating 32 MiB of random data. A single instance
+/// is cached per thread and the returned `ThreadRng` is a reference to this
+/// instance — hence `ThreadRng` is neither `Send` nor `Sync` but is safe to use
+/// within a single thread. This RNG is seeded and reseeded via [`EntropyRng`]
+/// as required.
+///
+/// Note that the reseeding is done as an extra precaution against entropy
+/// leaks and is in theory unnecessary — to predict `ThreadRng`'s output, an
+/// attacker would have to either determine most of the RNG's seed or internal
+/// state, or crack the algorithm used.
+///
+/// Like [`StdRng`], `ThreadRng` is a cryptographically secure PRNG. The current
+/// algorithm used is [HC-128], which is an array-based PRNG that trades memory
+/// usage for better performance. This makes it similar to ISAAC, the algorithm
+/// used in `ThreadRng` before rand 0.5.
+///
+/// Cloning this handle just produces a new reference to the same thread-local
+/// generator.
+///
+/// [`thread_rng`]: ../fn.thread_rng.html
+/// [`ReseedingRng`]: adapter/struct.ReseedingRng.html
+/// [`StdRng`]: struct.StdRng.html
+/// [`EntropyRng`]: struct.EntropyRng.html
+/// [HC-128]: ../../rand_hc/struct.Hc128Rng.html
+#[derive(Clone, Debug)]
+pub struct ThreadRng {
+ // use of raw pointer implies type is neither Send nor Sync
+ rng: *mut ReseedingRng<Hc128Core, EntropyRng>,
+}
+
+thread_local!(
+ static THREAD_RNG_KEY: UnsafeCell<ReseedingRng<Hc128Core, EntropyRng>> = {
+ let mut entropy_source = EntropyRng::new();
+ let r = Hc128Core::from_rng(&mut entropy_source).unwrap_or_else(|err|
+ panic!("could not initialize thread_rng: {}", err));
+ let rng = ReseedingRng::new(r,
+ THREAD_RNG_RESEED_THRESHOLD,
+ entropy_source);
+ UnsafeCell::new(rng)
+ }
+);
+
+/// Retrieve the lazily-initialized thread-local random number
+/// generator, seeded by the system. Intended to be used in method
+/// chaining style, e.g. `thread_rng().gen::<i32>()`, or cached locally, e.g.
+/// `let mut rng = thread_rng();`.
+///
+/// For more information see [`ThreadRng`].
+///
+/// [`ThreadRng`]: rngs/struct.ThreadRng.html
+pub fn thread_rng() -> ThreadRng {
+ ThreadRng { rng: THREAD_RNG_KEY.with(|t| t.get()) }
+}
+
+impl RngCore for ThreadRng {
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ unsafe { (*self.rng).next_u32() }
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ unsafe { (*self.rng).next_u64() }
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ unsafe { (*self.rng).fill_bytes(dest) }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ unsafe { (*self.rng).try_fill_bytes(dest) }
+ }
+}
+
+impl CryptoRng for ThreadRng {}
+
+
+#[cfg(test)]
+mod test {
+ #[test]
+ #[cfg(not(feature="stdweb"))]
+ fn test_thread_rng() {
+ use Rng;
+ let mut r = ::thread_rng();
+ r.gen::<i32>();
+ assert_eq!(r.gen_range(0, 1), 0);
+ }
+}
diff --git a/rand/src/seq.rs b/rand/src/seq.rs
deleted file mode 100644
index a7889fe..0000000
--- a/rand/src/seq.rs
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Functions for randomly accessing and sampling sequences.
-
-use super::Rng;
-
-// This crate is only enabled when either std or alloc is available.
-// BTreeMap is not as fast in tests, but better than nothing.
-#[cfg(feature="std")] use std::collections::HashMap;
-#[cfg(not(feature="std"))] use alloc::btree_map::BTreeMap;
-
-#[cfg(not(feature="std"))] use alloc::Vec;
-
-/// Randomly sample `amount` elements from a finite iterator.
-///
-/// The following can be returned:
-/// - `Ok`: `Vec` of `amount` non-repeating randomly sampled elements. The order is not random.
-/// - `Err`: `Vec` of all the elements from `iterable` in sequential order. This happens when the
-/// length of `iterable` was less than `amount`. This is considered an error since exactly
-/// `amount` elements is typically expected.
-///
-/// This implementation uses `O(len(iterable))` time and `O(amount)` memory.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{thread_rng, seq};
-///
-/// let mut rng = thread_rng();
-/// let sample = seq::sample_iter(&mut rng, 1..100, 5).unwrap();
-/// println!("{:?}", sample);
-/// ```
-pub fn sample_iter<T, I, R>(rng: &mut R, iterable: I, amount: usize) -> Result<Vec<T>, Vec<T>>
- where I: IntoIterator<Item=T>,
- R: Rng,
-{
- let mut iter = iterable.into_iter();
- let mut reservoir = Vec::with_capacity(amount);
- reservoir.extend(iter.by_ref().take(amount));
-
- // Continue unless the iterator was exhausted
- //
- // note: this prevents iterators that "restart" from causing problems.
- // If the iterator stops once, then so do we.
- if reservoir.len() == amount {
- for (i, elem) in iter.enumerate() {
- let k = rng.gen_range(0, i + 1 + amount);
- if let Some(spot) = reservoir.get_mut(k) {
- *spot = elem;
- }
- }
- Ok(reservoir)
- } else {
- // Don't hang onto extra memory. There is a corner case where
- // `amount` was much less than `len(iterable)`.
- reservoir.shrink_to_fit();
- Err(reservoir)
- }
-}
-
-/// Randomly sample exactly `amount` values from `slice`.
-///
-/// The values are non-repeating and in random order.
-///
-/// This implementation uses `O(amount)` time and memory.
-///
-/// Panics if `amount > slice.len()`
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{thread_rng, seq};
-///
-/// let mut rng = thread_rng();
-/// let values = vec![5, 6, 1, 3, 4, 6, 7];
-/// println!("{:?}", seq::sample_slice(&mut rng, &values, 3));
-/// ```
-pub fn sample_slice<R, T>(rng: &mut R, slice: &[T], amount: usize) -> Vec<T>
- where R: Rng,
- T: Clone
-{
- let indices = sample_indices(rng, slice.len(), amount);
-
- let mut out = Vec::with_capacity(amount);
- out.extend(indices.iter().map(|i| slice[*i].clone()));
- out
-}
-
-/// Randomly sample exactly `amount` references from `slice`.
-///
-/// The references are non-repeating and in random order.
-///
-/// This implementation uses `O(amount)` time and memory.
-///
-/// Panics if `amount > slice.len()`
-///
-/// # Example
-///
-/// ```rust
-/// use rand::{thread_rng, seq};
-///
-/// let mut rng = thread_rng();
-/// let values = vec![5, 6, 1, 3, 4, 6, 7];
-/// println!("{:?}", seq::sample_slice_ref(&mut rng, &values, 3));
-/// ```
-pub fn sample_slice_ref<'a, R, T>(rng: &mut R, slice: &'a [T], amount: usize) -> Vec<&'a T>
- where R: Rng
-{
- let indices = sample_indices(rng, slice.len(), amount);
-
- let mut out = Vec::with_capacity(amount);
- out.extend(indices.iter().map(|i| &slice[*i]));
- out
-}
-
-/// Randomly sample exactly `amount` indices from `0..length`.
-///
-/// The values are non-repeating and in random order.
-///
-/// This implementation uses `O(amount)` time and memory.
-///
-/// This method is used internally by the slice sampling methods, but it can sometimes be useful to
-/// have the indices themselves so this is provided as an alternative.
-///
-/// Panics if `amount > length`
-pub fn sample_indices<R>(rng: &mut R, length: usize, amount: usize) -> Vec<usize>
- where R: Rng,
-{
- if amount > length {
- panic!("`amount` must be less than or equal to `slice.len()`");
- }
-
- // We are going to have to allocate at least `amount` for the output no matter what. However,
- // if we use the `cached` version we will have to allocate `amount` as a HashMap as well since
- // it inserts an element for every loop.
- //
- // Therefore, if `amount >= length / 2` then inplace will be both faster and use less memory.
- // In fact, benchmarks show the inplace version is faster for length up to about 20 times
- // faster than amount.
- //
- // TODO: there is probably even more fine-tuning that can be done here since
- // `HashMap::with_capacity(amount)` probably allocates more than `amount` in practice,
- // and a trade off could probably be made between memory/cpu, since hashmap operations
- // are slower than array index swapping.
- if amount >= length / 20 {
- sample_indices_inplace(rng, length, amount)
- } else {
- sample_indices_cache(rng, length, amount)
- }
-}
-
-/// Sample an amount of indices using an inplace partial fisher yates method.
-///
-/// This allocates the entire `length` of indices and randomizes only the first `amount`.
-/// It then truncates to `amount` and returns.
-///
-/// This is better than using a HashMap "cache" when `amount >= length / 2` since it does not
-/// require allocating an extra cache and is much faster.
-fn sample_indices_inplace<R>(rng: &mut R, length: usize, amount: usize) -> Vec<usize>
- where R: Rng,
-{
- debug_assert!(amount <= length);
- let mut indices: Vec<usize> = Vec::with_capacity(length);
- indices.extend(0..length);
- for i in 0..amount {
- let j: usize = rng.gen_range(i, length);
- let tmp = indices[i];
- indices[i] = indices[j];
- indices[j] = tmp;
- }
- indices.truncate(amount);
- debug_assert_eq!(indices.len(), amount);
- indices
-}
-
-
-/// This method performs a partial fisher-yates on a range of indices using a HashMap
-/// as a cache to record potential collisions.
-///
-/// The cache avoids allocating the entire `length` of values. This is especially useful when
-/// `amount <<< length`, i.e. select 3 non-repeating from 1_000_000
-fn sample_indices_cache<R>(
- rng: &mut R,
- length: usize,
- amount: usize,
-) -> Vec<usize>
- where R: Rng,
-{
- debug_assert!(amount <= length);
- #[cfg(feature="std")] let mut cache = HashMap::with_capacity(amount);
- #[cfg(not(feature="std"))] let mut cache = BTreeMap::new();
- let mut out = Vec::with_capacity(amount);
- for i in 0..amount {
- let j: usize = rng.gen_range(i, length);
-
- // equiv: let tmp = slice[i];
- let tmp = match cache.get(&i) {
- Some(e) => *e,
- None => i,
- };
-
- // equiv: slice[i] = slice[j];
- let x = match cache.get(&j) {
- Some(x) => *x,
- None => j,
- };
-
- // equiv: slice[j] = tmp;
- cache.insert(j, tmp);
-
- // note that in the inplace version, slice[i] is automatically "returned" value
- out.push(x);
- }
- debug_assert_eq!(out.len(), amount);
- out
-}
-
-#[cfg(test)]
-mod test {
- use super::*;
- use {thread_rng, XorShiftRng, SeedableRng};
-
- #[test]
- fn test_sample_iter() {
- let min_val = 1;
- let max_val = 100;
-
- let mut r = thread_rng();
- let vals = (min_val..max_val).collect::<Vec<i32>>();
- let small_sample = sample_iter(&mut r, vals.iter(), 5).unwrap();
- let large_sample = sample_iter(&mut r, vals.iter(), vals.len() + 5).unwrap_err();
-
- assert_eq!(small_sample.len(), 5);
- assert_eq!(large_sample.len(), vals.len());
- // no randomization happens when amount >= len
- assert_eq!(large_sample, vals.iter().collect::<Vec<_>>());
-
- assert!(small_sample.iter().all(|e| {
- **e >= min_val && **e <= max_val
- }));
- }
- #[test]
- fn test_sample_slice_boundaries() {
- let empty: &[u8] = &[];
-
- let mut r = thread_rng();
-
- // sample 0 items
- assert_eq!(sample_slice(&mut r, empty, 0), vec![]);
- assert_eq!(sample_slice(&mut r, &[42, 2, 42], 0), vec![]);
-
- // sample 1 item
- assert_eq!(sample_slice(&mut r, &[42], 1), vec![42]);
- let v = sample_slice(&mut r, &[1, 42], 1)[0];
- assert!(v == 1 || v == 42);
-
- // sample "all" the items
- let v = sample_slice(&mut r, &[42, 133], 2);
- assert!(v == vec![42, 133] || v == vec![133, 42]);
-
- assert_eq!(sample_indices_inplace(&mut r, 0, 0), vec![]);
- assert_eq!(sample_indices_inplace(&mut r, 1, 0), vec![]);
- assert_eq!(sample_indices_inplace(&mut r, 1, 1), vec![0]);
-
- assert_eq!(sample_indices_cache(&mut r, 0, 0), vec![]);
- assert_eq!(sample_indices_cache(&mut r, 1, 0), vec![]);
- assert_eq!(sample_indices_cache(&mut r, 1, 1), vec![0]);
-
- // Make sure lucky 777's aren't lucky
- let slice = &[42, 777];
- let mut num_42 = 0;
- let total = 1000;
- for _ in 0..total {
- let v = sample_slice(&mut r, slice, 1);
- assert_eq!(v.len(), 1);
- let v = v[0];
- assert!(v == 42 || v == 777);
- if v == 42 {
- num_42 += 1;
- }
- }
- let ratio_42 = num_42 as f64 / 1000 as f64;
- assert!(0.4 <= ratio_42 || ratio_42 <= 0.6, "{}", ratio_42);
- }
-
- #[test]
- fn test_sample_slice() {
- let xor_rng = XorShiftRng::from_seed;
-
- let max_range = 100;
- let mut r = thread_rng();
-
- for length in 1usize..max_range {
- let amount = r.gen_range(0, length);
- let seed: [u32; 4] = [
- r.next_u32(), r.next_u32(), r.next_u32(), r.next_u32()
- ];
-
- println!("Selecting indices: len={}, amount={}, seed={:?}", length, amount, seed);
-
- // assert that the two index methods give exactly the same result
- let inplace = sample_indices_inplace(
- &mut xor_rng(seed), length, amount);
- let cache = sample_indices_cache(
- &mut xor_rng(seed), length, amount);
- assert_eq!(inplace, cache);
-
- // assert the basics work
- let regular = sample_indices(
- &mut xor_rng(seed), length, amount);
- assert_eq!(regular.len(), amount);
- assert!(regular.iter().all(|e| *e < length));
- assert_eq!(regular, inplace);
-
- // also test that sampling the slice works
- let vec: Vec<usize> = (0..length).collect();
- {
- let result = sample_slice(&mut xor_rng(seed), &vec, amount);
- assert_eq!(result, regular);
- }
-
- {
- let result = sample_slice_ref(&mut xor_rng(seed), &vec, amount);
- let expected = regular.iter().map(|v| v).collect::<Vec<_>>();
- assert_eq!(result, expected);
- }
- }
- }
-}
diff --git a/rand/src/seq/index.rs b/rand/src/seq/index.rs
new file mode 100644
index 0000000..3d4df3a
--- /dev/null
+++ b/rand/src/seq/index.rs
@@ -0,0 +1,378 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Index sampling
+
+#[cfg(feature="alloc")] use core::slice;
+
+#[cfg(feature="std")] use std::vec;
+#[cfg(all(feature="alloc", not(feature="std")))] use alloc::vec::{self, Vec};
+// BTreeMap is not as fast in tests, but better than nothing.
+#[cfg(feature="std")] use std::collections::{HashSet};
+#[cfg(all(feature="alloc", not(feature="std")))] use alloc::collections::BTreeSet;
+
+#[cfg(feature="alloc")] use distributions::{Distribution, Uniform};
+use Rng;
+
+/// A vector of indices.
+///
+/// Multiple internal representations are possible.
+#[derive(Clone, Debug)]
+pub enum IndexVec {
+ #[doc(hidden)] U32(Vec<u32>),
+ #[doc(hidden)] USize(Vec<usize>),
+}
+
+impl IndexVec {
+ /// Returns the number of indices
+ pub fn len(&self) -> usize {
+ match self {
+ &IndexVec::U32(ref v) => v.len(),
+ &IndexVec::USize(ref v) => v.len(),
+ }
+ }
+
+ /// Return the value at the given `index`.
+ ///
+ /// (Note: we cannot implement `std::ops::Index` because of lifetime
+ /// restrictions.)
+ pub fn index(&self, index: usize) -> usize {
+ match self {
+ &IndexVec::U32(ref v) => v[index] as usize,
+ &IndexVec::USize(ref v) => v[index],
+ }
+ }
+
+ /// Return result as a `Vec<usize>`. Conversion may or may not be trivial.
+ pub fn into_vec(self) -> Vec<usize> {
+ match self {
+ IndexVec::U32(v) => v.into_iter().map(|i| i as usize).collect(),
+ IndexVec::USize(v) => v,
+ }
+ }
+
+ /// Iterate over the indices as a sequence of `usize` values
+ pub fn iter<'a>(&'a self) -> IndexVecIter<'a> {
+ match self {
+ &IndexVec::U32(ref v) => IndexVecIter::U32(v.iter()),
+ &IndexVec::USize(ref v) => IndexVecIter::USize(v.iter()),
+ }
+ }
+
+ /// Convert into an iterator over the indices as a sequence of `usize` values
+ pub fn into_iter(self) -> IndexVecIntoIter {
+ match self {
+ IndexVec::U32(v) => IndexVecIntoIter::U32(v.into_iter()),
+ IndexVec::USize(v) => IndexVecIntoIter::USize(v.into_iter()),
+ }
+ }
+}
+
+impl PartialEq for IndexVec {
+ fn eq(&self, other: &IndexVec) -> bool {
+ use self::IndexVec::*;
+ match (self, other) {
+ (&U32(ref v1), &U32(ref v2)) => v1 == v2,
+ (&USize(ref v1), &USize(ref v2)) => v1 == v2,
+ (&U32(ref v1), &USize(ref v2)) => (v1.len() == v2.len())
+ && (v1.iter().zip(v2.iter()).all(|(x, y)| *x as usize == *y)),
+ (&USize(ref v1), &U32(ref v2)) => (v1.len() == v2.len())
+ && (v1.iter().zip(v2.iter()).all(|(x, y)| *x == *y as usize)),
+ }
+ }
+}
+
+impl From<Vec<u32>> for IndexVec {
+ fn from(v: Vec<u32>) -> Self {
+ IndexVec::U32(v)
+ }
+}
+
+impl From<Vec<usize>> for IndexVec {
+ fn from(v: Vec<usize>) -> Self {
+ IndexVec::USize(v)
+ }
+}
+
+/// Return type of `IndexVec::iter`.
+#[derive(Debug)]
+pub enum IndexVecIter<'a> {
+ #[doc(hidden)] U32(slice::Iter<'a, u32>),
+ #[doc(hidden)] USize(slice::Iter<'a, usize>),
+}
+
+impl<'a> Iterator for IndexVecIter<'a> {
+ type Item = usize;
+ fn next(&mut self) -> Option<usize> {
+ use self::IndexVecIter::*;
+ match self {
+ &mut U32(ref mut iter) => iter.next().map(|i| *i as usize),
+ &mut USize(ref mut iter) => iter.next().cloned(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ &IndexVecIter::U32(ref v) => v.size_hint(),
+ &IndexVecIter::USize(ref v) => v.size_hint(),
+ }
+ }
+}
+
+impl<'a> ExactSizeIterator for IndexVecIter<'a> {}
+
+/// Return type of `IndexVec::into_iter`.
+#[derive(Clone, Debug)]
+pub enum IndexVecIntoIter {
+ #[doc(hidden)] U32(vec::IntoIter<u32>),
+ #[doc(hidden)] USize(vec::IntoIter<usize>),
+}
+
+impl Iterator for IndexVecIntoIter {
+ type Item = usize;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ use self::IndexVecIntoIter::*;
+ match self {
+ &mut U32(ref mut v) => v.next().map(|i| i as usize),
+ &mut USize(ref mut v) => v.next(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ use self::IndexVecIntoIter::*;
+ match self {
+ &U32(ref v) => v.size_hint(),
+ &USize(ref v) => v.size_hint(),
+ }
+ }
+}
+
+impl ExactSizeIterator for IndexVecIntoIter {}
+
+
+/// Randomly sample exactly `amount` distinct indices from `0..length`, and
+/// return them in random order (fully shuffled).
+///
+/// This method is used internally by the slice sampling methods, but it can
+/// sometimes be useful to have the indices themselves so this is provided as
+/// an alternative.
+///
+/// The implementation used is not specified; we automatically select the
+/// fastest available algorithm for the `length` and `amount` parameters
+/// (based on detailed profiling on an Intel Haswell CPU). Roughly speaking,
+/// complexity is `O(amount)`, except that when `amount` is small, performance
+/// is closer to `O(amount^2)`, and when `length` is close to `amount` then
+/// `O(length)`.
+///
+/// Note that performance is significantly better over `u32` indices than over
+/// `u64` indices. Because of this we hide the underlying type behind an
+/// abstraction, `IndexVec`.
+///
+/// If an allocation-free `no_std` function is required, it is suggested
+/// to adapt the internal `sample_floyd` implementation.
+///
+/// Panics if `amount > length`.
+pub fn sample<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec
+ where R: Rng + ?Sized,
+{
+ if amount > length {
+ panic!("`amount` of samples must be less than or equal to `length`");
+ }
+ if length > (::core::u32::MAX as usize) {
+ // We never want to use inplace here, but could use floyd's alg
+ // Lazy version: always use the cache alg.
+ return sample_rejection(rng, length, amount);
+ }
+ let amount = amount as u32;
+ let length = length as u32;
+
+ // Choice of algorithm here depends on both length and amount. See:
+ // https://github.com/rust-random/rand/pull/479
+ // We do some calculations with f32. Accuracy is not very important.
+
+ if amount < 163 {
+ const C: [[f32; 2]; 2] = [[1.6, 8.0/45.0], [10.0, 70.0/9.0]];
+ let j = if length < 500_000 { 0 } else { 1 };
+ let amount_fp = amount as f32;
+ let m4 = C[0][j] * amount_fp;
+ // Short-cut: when amount < 12, floyd's is always faster
+ if amount > 11 && (length as f32) < (C[1][j] + m4) * amount_fp {
+ sample_inplace(rng, length, amount)
+ } else {
+ sample_floyd(rng, length, amount)
+ }
+ } else {
+ const C: [f32; 2] = [270.0, 330.0/9.0];
+ let j = if length < 500_000 { 0 } else { 1 };
+ if (length as f32) < C[j] * (amount as f32) {
+ sample_inplace(rng, length, amount)
+ } else {
+ // note: could have a specific u32 impl, but I'm lazy and
+ // generics don't have usable conversions
+ sample_rejection(rng, length as usize, amount as usize)
+ }
+ }
+}
+
+/// Randomly sample exactly `amount` indices from `0..length`, using Floyd's
+/// combination algorithm.
+///
+/// The output values are fully shuffled. (Overhead is under 50%.)
+///
+/// This implementation uses `O(amount)` memory and `O(amount^2)` time.
+fn sample_floyd<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec
+ where R: Rng + ?Sized,
+{
+ // For small amount we use Floyd's fully-shuffled variant. For larger
+ // amounts this is slow due to Vec::insert performance, so we shuffle
+ // afterwards. Benchmarks show little overhead from extra logic.
+ let floyd_shuffle = amount < 50;
+
+ debug_assert!(amount <= length);
+ let mut indices = Vec::with_capacity(amount as usize);
+ for j in length - amount .. length {
+ let t = rng.gen_range(0, j + 1);
+ if floyd_shuffle {
+ if let Some(pos) = indices.iter().position(|&x| x == t) {
+ indices.insert(pos, j);
+ continue;
+ }
+ } else {
+ if indices.contains(&t) {
+ indices.push(j);
+ continue;
+ }
+ }
+ indices.push(t);
+ }
+ if !floyd_shuffle {
+ // Reimplement SliceRandom::shuffle with smaller indices
+ for i in (1..amount).rev() {
+ // invariant: elements with index > i have been locked in place.
+ indices.swap(i as usize, rng.gen_range(0, i + 1) as usize);
+ }
+ }
+ IndexVec::from(indices)
+}
+
+/// Randomly sample exactly `amount` indices from `0..length`, using an inplace
+/// partial Fisher-Yates method.
+/// Sample an amount of indices using an inplace partial fisher yates method.
+///
+/// This allocates the entire `length` of indices and randomizes only the first `amount`.
+/// It then truncates to `amount` and returns.
+///
+/// This method is not appropriate for large `length` and potentially uses a lot
+/// of memory; because of this we only implement for `u32` index (which improves
+/// performance in all cases).
+///
+/// Set-up is `O(length)` time and memory and shuffling is `O(amount)` time.
+fn sample_inplace<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec
+ where R: Rng + ?Sized,
+{
+ debug_assert!(amount <= length);
+ let mut indices: Vec<u32> = Vec::with_capacity(length as usize);
+ indices.extend(0..length);
+ for i in 0..amount {
+ let j: u32 = rng.gen_range(i, length);
+ indices.swap(i as usize, j as usize);
+ }
+ indices.truncate(amount as usize);
+ debug_assert_eq!(indices.len(), amount as usize);
+ IndexVec::from(indices)
+}
+
+/// Randomly sample exactly `amount` indices from `0..length`, using rejection
+/// sampling.
+///
+/// Since `amount <<< length` there is a low chance of a random sample in
+/// `0..length` being a duplicate. We test for duplicates and resample where
+/// necessary. The algorithm is `O(amount)` time and memory.
+fn sample_rejection<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec
+ where R: Rng + ?Sized,
+{
+ debug_assert!(amount < length);
+ #[cfg(feature="std")] let mut cache = HashSet::with_capacity(amount);
+ #[cfg(not(feature="std"))] let mut cache = BTreeSet::new();
+ let distr = Uniform::new(0, length);
+ let mut indices = Vec::with_capacity(amount);
+ for _ in 0..amount {
+ let mut pos = distr.sample(rng);
+ while !cache.insert(pos) {
+ pos = distr.sample(rng);
+ }
+ indices.push(pos);
+ }
+
+ debug_assert_eq!(indices.len(), amount);
+ IndexVec::from(indices)
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_sample_boundaries() {
+ let mut r = ::test::rng(404);
+
+ assert_eq!(sample_inplace(&mut r, 0, 0).len(), 0);
+ assert_eq!(sample_inplace(&mut r, 1, 0).len(), 0);
+ assert_eq!(sample_inplace(&mut r, 1, 1).into_vec(), vec![0]);
+
+ assert_eq!(sample_rejection(&mut r, 1, 0).len(), 0);
+
+ assert_eq!(sample_floyd(&mut r, 0, 0).len(), 0);
+ assert_eq!(sample_floyd(&mut r, 1, 0).len(), 0);
+ assert_eq!(sample_floyd(&mut r, 1, 1).into_vec(), vec![0]);
+
+ // These algorithms should be fast with big numbers. Test average.
+ let sum: usize = sample_rejection(&mut r, 1 << 25, 10)
+ .into_iter().sum();
+ assert!(1 << 25 < sum && sum < (1 << 25) * 25);
+
+ let sum: usize = sample_floyd(&mut r, 1 << 25, 10)
+ .into_iter().sum();
+ assert!(1 << 25 < sum && sum < (1 << 25) * 25);
+ }
+
+ #[test]
+ fn test_sample_alg() {
+ let seed_rng = ::test::rng;
+
+ // We can't test which algorithm is used directly, but Floyd's alg
+ // should produce different results from the others. (Also, `inplace`
+ // and `cached` currently use different sizes thus produce different results.)
+
+ // A small length and relatively large amount should use inplace
+ let (length, amount): (usize, usize) = (100, 50);
+ let v1 = sample(&mut seed_rng(420), length, amount);
+ let v2 = sample_inplace(&mut seed_rng(420), length as u32, amount as u32);
+ assert!(v1.iter().all(|e| e < length));
+ assert_eq!(v1, v2);
+
+ // Test Floyd's alg does produce different results
+ let v3 = sample_floyd(&mut seed_rng(420), length as u32, amount as u32);
+ assert!(v1 != v3);
+
+ // A large length and small amount should use Floyd
+ let (length, amount): (usize, usize) = (1<<20, 50);
+ let v1 = sample(&mut seed_rng(421), length, amount);
+ let v2 = sample_floyd(&mut seed_rng(421), length as u32, amount as u32);
+ assert!(v1.iter().all(|e| e < length));
+ assert_eq!(v1, v2);
+
+ // A large length and larger amount should use cache
+ let (length, amount): (usize, usize) = (1<<20, 600);
+ let v1 = sample(&mut seed_rng(422), length, amount);
+ let v2 = sample_rejection(&mut seed_rng(422), length, amount);
+ assert!(v1.iter().all(|e| e < length));
+ assert_eq!(v1, v2);
+ }
+}
diff --git a/rand/src/seq/mod.rs b/rand/src/seq/mod.rs
new file mode 100644
index 0000000..9959602
--- /dev/null
+++ b/rand/src/seq/mod.rs
@@ -0,0 +1,836 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Functions for randomly accessing and sampling sequences.
+//!
+//! TODO: module doc
+
+
+#[cfg(feature="alloc")] pub mod index;
+
+#[cfg(feature="alloc")] use core::ops::Index;
+
+#[cfg(all(feature="alloc", not(feature="std")))] use alloc::vec::Vec;
+
+use Rng;
+#[cfg(feature="alloc")] use distributions::WeightedError;
+#[cfg(feature="alloc")] use distributions::uniform::{SampleUniform, SampleBorrow};
+
+/// Extension trait on slices, providing random mutation and sampling methods.
+///
+/// An implementation is provided for slices. This may also be implementable for
+/// other types.
+pub trait SliceRandom {
+ /// The element type.
+ type Item;
+
+ /// Returns a reference to one random element of the slice, or `None` if the
+ /// slice is empty.
+ ///
+ /// Depending on the implementation, complexity is expected to be `O(1)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use rand::thread_rng;
+ /// use rand::seq::SliceRandom;
+ ///
+ /// let choices = [1, 2, 4, 8, 16, 32];
+ /// let mut rng = thread_rng();
+ /// println!("{:?}", choices.choose(&mut rng));
+ /// assert_eq!(choices[..0].choose(&mut rng), None);
+ /// ```
+ fn choose<R>(&self, rng: &mut R) -> Option<&Self::Item>
+ where R: Rng + ?Sized;
+
+ /// Returns a mutable reference to one random element of the slice, or
+ /// `None` if the slice is empty.
+ ///
+ /// Depending on the implementation, complexity is expected to be `O(1)`.
+ fn choose_mut<R>(&mut self, rng: &mut R) -> Option<&mut Self::Item>
+ where R: Rng + ?Sized;
+
+ /// Produces an iterator that chooses `amount` elements from the slice at
+ /// random without repeating any, and returns them in random order.
+ ///
+ /// In case this API is not sufficiently flexible, use `index::sample` then
+ /// apply the indices to the slice.
+ ///
+ /// Complexity is expected to be the same as `index::sample`.
+ ///
+ /// # Example
+ /// ```
+ /// use rand::seq::SliceRandom;
+ ///
+ /// let mut rng = &mut rand::thread_rng();
+ /// let sample = "Hello, audience!".as_bytes();
+ ///
+ /// // collect the results into a vector:
+ /// let v: Vec<u8> = sample.choose_multiple(&mut rng, 3).cloned().collect();
+ ///
+ /// // store in a buffer:
+ /// let mut buf = [0u8; 5];
+ /// for (b, slot) in sample.choose_multiple(&mut rng, buf.len()).zip(buf.iter_mut()) {
+ /// *slot = *b;
+ /// }
+ /// ```
+ #[cfg(feature = "alloc")]
+ fn choose_multiple<R>(&self, rng: &mut R, amount: usize) -> SliceChooseIter<Self, Self::Item>
+ where R: Rng + ?Sized;
+
+ /// Similar to [`choose`], where the likelihood of each outcome may be
+ /// specified. The specified function `weight` maps items `x` to a relative
+ /// likelihood `weight(x)`. The probability of each item being selected is
+ /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use rand::prelude::*;
+ ///
+ /// let choices = [('a', 2), ('b', 1), ('c', 1)];
+ /// let mut rng = thread_rng();
+ /// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
+ /// println!("{:?}", choices.choose_weighted(&mut rng, |item| item.1).unwrap().0);
+ /// ```
+ /// [`choose`]: trait.SliceRandom.html#method.choose
+ #[cfg(feature = "alloc")]
+ fn choose_weighted<R, F, B, X>(&self, rng: &mut R, weight: F) -> Result<&Self::Item, WeightedError>
+ where R: Rng + ?Sized,
+ F: Fn(&Self::Item) -> B,
+ B: SampleBorrow<X>,
+ X: SampleUniform +
+ for<'a> ::core::ops::AddAssign<&'a X> +
+ ::core::cmp::PartialOrd<X> +
+ Clone +
+ Default;
+
+ /// Similar to [`choose_mut`], where the likelihood of each outcome may be
+ /// specified. The specified function `weight` maps items `x` to a relative
+ /// likelihood `weight(x)`. The probability of each item being selected is
+ /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`.
+ ///
+ /// See also [`choose_weighted`].
+ ///
+ /// [`choose_mut`]: trait.SliceRandom.html#method.choose_mut
+ /// [`choose_weighted`]: trait.SliceRandom.html#method.choose_weighted
+ #[cfg(feature = "alloc")]
+ fn choose_weighted_mut<R, F, B, X>(&mut self, rng: &mut R, weight: F) -> Result<&mut Self::Item, WeightedError>
+ where R: Rng + ?Sized,
+ F: Fn(&Self::Item) -> B,
+ B: SampleBorrow<X>,
+ X: SampleUniform +
+ for<'a> ::core::ops::AddAssign<&'a X> +
+ ::core::cmp::PartialOrd<X> +
+ Clone +
+ Default;
+
+ /// Shuffle a mutable slice in place.
+ ///
+ /// Depending on the implementation, complexity is expected to be `O(1)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use rand::thread_rng;
+ /// use rand::seq::SliceRandom;
+ ///
+ /// let mut rng = thread_rng();
+ /// let mut y = [1, 2, 3, 4, 5];
+ /// println!("Unshuffled: {:?}", y);
+ /// y.shuffle(&mut rng);
+ /// println!("Shuffled: {:?}", y);
+ /// ```
+ fn shuffle<R>(&mut self, rng: &mut R) where R: Rng + ?Sized;
+
+ /// Shuffle a slice in place, but exit early.
+ ///
+ /// Returns two mutable slices from the source slice. The first contains
+ /// `amount` elements randomly permuted. The second has the remaining
+ /// elements that are not fully shuffled.
+ ///
+ /// This is an efficient method to select `amount` elements at random from
+ /// the slice, provided the slice may be mutated.
+ ///
+ /// If you only need to choose elements randomly and `amount > self.len()/2`
+ /// then you may improve performance by taking
+ /// `amount = values.len() - amount` and using only the second slice.
+ ///
+ /// If `amount` is greater than the number of elements in the slice, this
+ /// will perform a full shuffle.
+ ///
+ /// Complexity is expected to be `O(m)` where `m = amount`.
+ fn partial_shuffle<R>(&mut self, rng: &mut R, amount: usize)
+ -> (&mut [Self::Item], &mut [Self::Item]) where R: Rng + ?Sized;
+}
+
+/// Extension trait on iterators, providing random sampling methods.
+pub trait IteratorRandom: Iterator + Sized {
+ /// Choose one element at random from the iterator. If you have a slice,
+ /// it's significantly faster to call the [`choose`] or [`choose_mut`]
+ /// functions using the slice instead.
+ ///
+ /// Returns `None` if and only if the iterator is empty.
+ ///
+ /// Complexity is `O(n)`, where `n` is the length of the iterator.
+ /// This likely consumes multiple random numbers, but the exact number
+ /// is unspecified.
+ ///
+ /// [`choose`]: trait.SliceRandom.html#method.choose
+ /// [`choose_mut`]: trait.SliceRandom.html#method.choose_mut
+ fn choose<R>(mut self, rng: &mut R) -> Option<Self::Item>
+ where R: Rng + ?Sized
+ {
+ let (mut lower, mut upper) = self.size_hint();
+ let mut consumed = 0;
+ let mut result = None;
+
+ if upper == Some(lower) {
+ return if lower == 0 { None } else { self.nth(rng.gen_range(0, lower)) };
+ }
+
+ // Continue until the iterator is exhausted
+ loop {
+ if lower > 1 {
+ let ix = rng.gen_range(0, lower + consumed);
+ let skip;
+ if ix < lower {
+ result = self.nth(ix);
+ skip = lower - (ix + 1);
+ } else {
+ skip = lower;
+ }
+ if upper == Some(lower) {
+ return result;
+ }
+ consumed += lower;
+ if skip > 0 {
+ self.nth(skip - 1);
+ }
+ } else {
+ let elem = self.next();
+ if elem.is_none() {
+ return result;
+ }
+ consumed += 1;
+ let denom = consumed as f64; // accurate to 2^53 elements
+ if rng.gen_bool(1.0 / denom) {
+ result = elem;
+ }
+ }
+
+ let hint = self.size_hint();
+ lower = hint.0;
+ upper = hint.1;
+ }
+ }
+
+ /// Collects `amount` values at random from the iterator into a supplied
+ /// buffer.
+ ///
+ /// Although the elements are selected randomly, the order of elements in
+ /// the buffer is neither stable nor fully random. If random ordering is
+ /// desired, shuffle the result.
+ ///
+ /// Returns the number of elements added to the buffer. This equals `amount`
+ /// unless the iterator contains insufficient elements, in which case this
+ /// equals the number of elements available.
+ ///
+ /// Complexity is `O(n)` where `n` is the length of the iterator.
+ fn choose_multiple_fill<R>(mut self, rng: &mut R, buf: &mut [Self::Item])
+ -> usize where R: Rng + ?Sized
+ {
+ let amount = buf.len();
+ let mut len = 0;
+ while len < amount {
+ if let Some(elem) = self.next() {
+ buf[len] = elem;
+ len += 1;
+ } else {
+ // Iterator exhausted; stop early
+ return len;
+ }
+ }
+
+ // Continue, since the iterator was not exhausted
+ for (i, elem) in self.enumerate() {
+ let k = rng.gen_range(0, i + 1 + amount);
+ if let Some(slot) = buf.get_mut(k) {
+ *slot = elem;
+ }
+ }
+ len
+ }
+
+ /// Collects `amount` values at random from the iterator into a vector.
+ ///
+ /// This is equivalent to `choose_multiple_fill` except for the result type.
+ ///
+ /// Although the elements are selected randomly, the order of elements in
+ /// the buffer is neither stable nor fully random. If random ordering is
+ /// desired, shuffle the result.
+ ///
+ /// The length of the returned vector equals `amount` unless the iterator
+ /// contains insufficient elements, in which case it equals the number of
+ /// elements available.
+ ///
+ /// Complexity is `O(n)` where `n` is the length of the iterator.
+ #[cfg(feature = "alloc")]
+ fn choose_multiple<R>(mut self, rng: &mut R, amount: usize) -> Vec<Self::Item>
+ where R: Rng + ?Sized
+ {
+ let mut reservoir = Vec::with_capacity(amount);
+ reservoir.extend(self.by_ref().take(amount));
+
+ // Continue unless the iterator was exhausted
+ //
+ // note: this prevents iterators that "restart" from causing problems.
+ // If the iterator stops once, then so do we.
+ if reservoir.len() == amount {
+ for (i, elem) in self.enumerate() {
+ let k = rng.gen_range(0, i + 1 + amount);
+ if let Some(slot) = reservoir.get_mut(k) {
+ *slot = elem;
+ }
+ }
+ } else {
+ // Don't hang onto extra memory. There is a corner case where
+ // `amount` was much less than `self.len()`.
+ reservoir.shrink_to_fit();
+ }
+ reservoir
+ }
+}
+
+
+impl<T> SliceRandom for [T] {
+ type Item = T;
+
+ fn choose<R>(&self, rng: &mut R) -> Option<&Self::Item>
+ where R: Rng + ?Sized
+ {
+ if self.is_empty() {
+ None
+ } else {
+ Some(&self[rng.gen_range(0, self.len())])
+ }
+ }
+
+ fn choose_mut<R>(&mut self, rng: &mut R) -> Option<&mut Self::Item>
+ where R: Rng + ?Sized
+ {
+ if self.is_empty() {
+ None
+ } else {
+ let len = self.len();
+ Some(&mut self[rng.gen_range(0, len)])
+ }
+ }
+
+ #[cfg(feature = "alloc")]
+ fn choose_multiple<R>(&self, rng: &mut R, amount: usize)
+ -> SliceChooseIter<Self, Self::Item>
+ where R: Rng + ?Sized
+ {
+ let amount = ::core::cmp::min(amount, self.len());
+ SliceChooseIter {
+ slice: self,
+ _phantom: Default::default(),
+ indices: index::sample(rng, self.len(), amount).into_iter(),
+ }
+ }
+
+ #[cfg(feature = "alloc")]
+ fn choose_weighted<R, F, B, X>(&self, rng: &mut R, weight: F) -> Result<&Self::Item, WeightedError>
+ where R: Rng + ?Sized,
+ F: Fn(&Self::Item) -> B,
+ B: SampleBorrow<X>,
+ X: SampleUniform +
+ for<'a> ::core::ops::AddAssign<&'a X> +
+ ::core::cmp::PartialOrd<X> +
+ Clone +
+ Default {
+ use distributions::{Distribution, WeightedIndex};
+ let distr = WeightedIndex::new(self.iter().map(weight))?;
+ Ok(&self[distr.sample(rng)])
+ }
+
+ #[cfg(feature = "alloc")]
+ fn choose_weighted_mut<R, F, B, X>(&mut self, rng: &mut R, weight: F) -> Result<&mut Self::Item, WeightedError>
+ where R: Rng + ?Sized,
+ F: Fn(&Self::Item) -> B,
+ B: SampleBorrow<X>,
+ X: SampleUniform +
+ for<'a> ::core::ops::AddAssign<&'a X> +
+ ::core::cmp::PartialOrd<X> +
+ Clone +
+ Default {
+ use distributions::{Distribution, WeightedIndex};
+ let distr = WeightedIndex::new(self.iter().map(weight))?;
+ Ok(&mut self[distr.sample(rng)])
+ }
+
+ fn shuffle<R>(&mut self, rng: &mut R) where R: Rng + ?Sized
+ {
+ for i in (1..self.len()).rev() {
+ // invariant: elements with index > i have been locked in place.
+ self.swap(i, rng.gen_range(0, i + 1));
+ }
+ }
+
+ fn partial_shuffle<R>(&mut self, rng: &mut R, amount: usize)
+ -> (&mut [Self::Item], &mut [Self::Item]) where R: Rng + ?Sized
+ {
+ // This applies Durstenfeld's algorithm for the
+ // [Fisher–Yates shuffle](https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm)
+ // for an unbiased permutation, but exits early after choosing `amount`
+ // elements.
+
+ let len = self.len();
+ let end = if amount >= len { 0 } else { len - amount };
+
+ for i in (end..len).rev() {
+ // invariant: elements with index > i have been locked in place.
+ self.swap(i, rng.gen_range(0, i + 1));
+ }
+ let r = self.split_at_mut(end);
+ (r.1, r.0)
+ }
+}
+
+impl<I> IteratorRandom for I where I: Iterator + Sized {}
+
+
+/// Iterator over multiple choices, as returned by [`SliceRandom::choose_multiple](
+/// trait.SliceRandom.html#method.choose_multiple).
+#[cfg(feature = "alloc")]
+#[derive(Debug)]
+pub struct SliceChooseIter<'a, S: ?Sized + 'a, T: 'a> {
+ slice: &'a S,
+ _phantom: ::core::marker::PhantomData<T>,
+ indices: index::IndexVecIntoIter,
+}
+
+#[cfg(feature = "alloc")]
+impl<'a, S: Index<usize, Output = T> + ?Sized + 'a, T: 'a> Iterator for SliceChooseIter<'a, S, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // TODO: investigate using SliceIndex::get_unchecked when stable
+ self.indices.next().map(|i| &self.slice[i as usize])
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.indices.len(), Some(self.indices.len()))
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<'a, S: Index<usize, Output = T> + ?Sized + 'a, T: 'a> ExactSizeIterator
+ for SliceChooseIter<'a, S, T>
+{
+ fn len(&self) -> usize {
+ self.indices.len()
+ }
+}
+
+
+/// Randomly sample `amount` elements from a finite iterator.
+///
+/// Deprecated: use [`IteratorRandom::choose_multiple`] instead.
+///
+/// [`IteratorRandom::choose_multiple`]: trait.IteratorRandom.html#method.choose_multiple
+#[cfg(feature = "alloc")]
+#[deprecated(since="0.6.0", note="use IteratorRandom::choose_multiple instead")]
+pub fn sample_iter<T, I, R>(rng: &mut R, iterable: I, amount: usize) -> Result<Vec<T>, Vec<T>>
+ where I: IntoIterator<Item=T>,
+ R: Rng + ?Sized,
+{
+ use seq::IteratorRandom;
+ let iter = iterable.into_iter();
+ let result = iter.choose_multiple(rng, amount);
+ if result.len() == amount {
+ Ok(result)
+ } else {
+ Err(result)
+ }
+}
+
+/// Randomly sample exactly `amount` values from `slice`.
+///
+/// The values are non-repeating and in random order.
+///
+/// This implementation uses `O(amount)` time and memory.
+///
+/// Panics if `amount > slice.len()`
+///
+/// Deprecated: use [`SliceRandom::choose_multiple`] instead.
+///
+/// [`SliceRandom::choose_multiple`]: trait.SliceRandom.html#method.choose_multiple
+#[cfg(feature = "alloc")]
+#[deprecated(since="0.6.0", note="use SliceRandom::choose_multiple instead")]
+pub fn sample_slice<R, T>(rng: &mut R, slice: &[T], amount: usize) -> Vec<T>
+ where R: Rng + ?Sized,
+ T: Clone
+{
+ let indices = index::sample(rng, slice.len(), amount).into_iter();
+
+ let mut out = Vec::with_capacity(amount);
+ out.extend(indices.map(|i| slice[i].clone()));
+ out
+}
+
+/// Randomly sample exactly `amount` references from `slice`.
+///
+/// The references are non-repeating and in random order.
+///
+/// This implementation uses `O(amount)` time and memory.
+///
+/// Panics if `amount > slice.len()`
+///
+/// Deprecated: use [`SliceRandom::choose_multiple`] instead.
+///
+/// [`SliceRandom::choose_multiple`]: trait.SliceRandom.html#method.choose_multiple
+#[cfg(feature = "alloc")]
+#[deprecated(since="0.6.0", note="use SliceRandom::choose_multiple instead")]
+pub fn sample_slice_ref<'a, R, T>(rng: &mut R, slice: &'a [T], amount: usize) -> Vec<&'a T>
+ where R: Rng + ?Sized
+{
+ let indices = index::sample(rng, slice.len(), amount).into_iter();
+
+ let mut out = Vec::with_capacity(amount);
+ out.extend(indices.map(|i| &slice[i]));
+ out
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ #[cfg(feature = "alloc")] use {Rng, SeedableRng};
+ #[cfg(feature = "alloc")] use rngs::SmallRng;
+ #[cfg(all(feature="alloc", not(feature="std")))]
+ use alloc::vec::Vec;
+
+ #[test]
+ fn test_slice_choose() {
+ let mut r = ::test::rng(107);
+ let chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n'];
+ let mut chosen = [0i32; 14];
+ for _ in 0..1000 {
+ let picked = *chars.choose(&mut r).unwrap();
+ chosen[(picked as usize) - ('a' as usize)] += 1;
+ }
+ for count in chosen.iter() {
+ let err = *count - (1000 / (chars.len() as i32));
+ assert!(-20 <= err && err <= 20);
+ }
+
+ chosen.iter_mut().for_each(|x| *x = 0);
+ for _ in 0..1000 {
+ *chosen.choose_mut(&mut r).unwrap() += 1;
+ }
+ for count in chosen.iter() {
+ let err = *count - (1000 / (chosen.len() as i32));
+ assert!(-20 <= err && err <= 20);
+ }
+
+ let mut v: [isize; 0] = [];
+ assert_eq!(v.choose(&mut r), None);
+ assert_eq!(v.choose_mut(&mut r), None);
+ }
+
+ #[derive(Clone)]
+ struct UnhintedIterator<I: Iterator + Clone> {
+ iter: I,
+ }
+ impl<I: Iterator + Clone> Iterator for UnhintedIterator<I> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next()
+ }
+ }
+
+ #[derive(Clone)]
+ struct ChunkHintedIterator<I: ExactSizeIterator + Iterator + Clone> {
+ iter: I,
+ chunk_remaining: usize,
+ chunk_size: usize,
+ hint_total_size: bool,
+ }
+ impl<I: ExactSizeIterator + Iterator + Clone> Iterator for ChunkHintedIterator<I> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.chunk_remaining == 0 {
+ self.chunk_remaining = ::core::cmp::min(self.chunk_size,
+ self.iter.len());
+ }
+ self.chunk_remaining = self.chunk_remaining.saturating_sub(1);
+
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.chunk_remaining,
+ if self.hint_total_size { Some(self.iter.len()) } else { None })
+ }
+ }
+
+ #[derive(Clone)]
+ struct WindowHintedIterator<I: ExactSizeIterator + Iterator + Clone> {
+ iter: I,
+ window_size: usize,
+ hint_total_size: bool,
+ }
+ impl<I: ExactSizeIterator + Iterator + Clone> Iterator for WindowHintedIterator<I> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (::core::cmp::min(self.iter.len(), self.window_size),
+ if self.hint_total_size { Some(self.iter.len()) } else { None })
+ }
+ }
+
+ #[test]
+ fn test_iterator_choose() {
+ let r = &mut ::test::rng(109);
+ fn test_iter<R: Rng + ?Sized, Iter: Iterator<Item=usize> + Clone>(r: &mut R, iter: Iter) {
+ let mut chosen = [0i32; 9];
+ for _ in 0..1000 {
+ let picked = iter.clone().choose(r).unwrap();
+ chosen[picked] += 1;
+ }
+ for count in chosen.iter() {
+ // Samples should follow Binomial(1000, 1/9)
+ // Octave: binopdf(x, 1000, 1/9) gives the prob of *count == x
+ // Note: have seen 153, which is unlikely but not impossible.
+ assert!(72 < *count && *count < 154, "count not close to 1000/9: {}", count);
+ }
+ }
+
+ test_iter(r, 0..9);
+ test_iter(r, [0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned());
+ #[cfg(feature = "alloc")]
+ test_iter(r, (0..9).collect::<Vec<_>>().into_iter());
+ test_iter(r, UnhintedIterator { iter: 0..9 });
+ test_iter(r, ChunkHintedIterator { iter: 0..9, chunk_size: 4, chunk_remaining: 4, hint_total_size: false });
+ test_iter(r, ChunkHintedIterator { iter: 0..9, chunk_size: 4, chunk_remaining: 4, hint_total_size: true });
+ test_iter(r, WindowHintedIterator { iter: 0..9, window_size: 2, hint_total_size: false });
+ test_iter(r, WindowHintedIterator { iter: 0..9, window_size: 2, hint_total_size: true });
+
+ assert_eq!((0..0).choose(r), None);
+ assert_eq!(UnhintedIterator{ iter: 0..0 }.choose(r), None);
+ }
+
+ #[test]
+ fn test_shuffle() {
+ let mut r = ::test::rng(108);
+ let empty: &mut [isize] = &mut [];
+ empty.shuffle(&mut r);
+ let mut one = [1];
+ one.shuffle(&mut r);
+ let b: &[_] = &[1];
+ assert_eq!(one, b);
+
+ let mut two = [1, 2];
+ two.shuffle(&mut r);
+ assert!(two == [1, 2] || two == [2, 1]);
+
+ fn move_last(slice: &mut [usize], pos: usize) {
+ // use slice[pos..].rotate_left(1); once we can use that
+ let last_val = slice[pos];
+ for i in pos..slice.len() - 1 {
+ slice[i] = slice[i + 1];
+ }
+ *slice.last_mut().unwrap() = last_val;
+ }
+ let mut counts = [0i32; 24];
+ for _ in 0..10000 {
+ let mut arr: [usize; 4] = [0, 1, 2, 3];
+ arr.shuffle(&mut r);
+ let mut permutation = 0usize;
+ let mut pos_value = counts.len();
+ for i in 0..4 {
+ pos_value /= 4 - i;
+ let pos = arr.iter().position(|&x| x == i).unwrap();
+ assert!(pos < (4 - i));
+ permutation += pos * pos_value;
+ move_last(&mut arr, pos);
+ assert_eq!(arr[3], i);
+ }
+ for i in 0..4 {
+ assert_eq!(arr[i], i);
+ }
+ counts[permutation] += 1;
+ }
+ for count in counts.iter() {
+ let err = *count - 10000i32 / 24;
+ assert!(-50 <= err && err <= 50);
+ }
+ }
+
+ #[test]
+ fn test_partial_shuffle() {
+ let mut r = ::test::rng(118);
+
+ let mut empty: [u32; 0] = [];
+ let res = empty.partial_shuffle(&mut r, 10);
+ assert_eq!((res.0.len(), res.1.len()), (0, 0));
+
+ let mut v = [1, 2, 3, 4, 5];
+ let res = v.partial_shuffle(&mut r, 2);
+ assert_eq!((res.0.len(), res.1.len()), (2, 3));
+ assert!(res.0[0] != res.0[1]);
+ // First elements are only modified if selected, so at least one isn't modified:
+ assert!(res.1[0] == 1 || res.1[1] == 2 || res.1[2] == 3);
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn test_sample_iter() {
+ let min_val = 1;
+ let max_val = 100;
+
+ let mut r = ::test::rng(401);
+ let vals = (min_val..max_val).collect::<Vec<i32>>();
+ let small_sample = vals.iter().choose_multiple(&mut r, 5);
+ let large_sample = vals.iter().choose_multiple(&mut r, vals.len() + 5);
+
+ assert_eq!(small_sample.len(), 5);
+ assert_eq!(large_sample.len(), vals.len());
+ // no randomization happens when amount >= len
+ assert_eq!(large_sample, vals.iter().collect::<Vec<_>>());
+
+ assert!(small_sample.iter().all(|e| {
+ **e >= min_val && **e <= max_val
+ }));
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ #[allow(deprecated)]
+ fn test_sample_slice_boundaries() {
+ let empty: &[u8] = &[];
+
+ let mut r = ::test::rng(402);
+
+ // sample 0 items
+ assert_eq!(&sample_slice(&mut r, empty, 0)[..], [0u8; 0]);
+ assert_eq!(&sample_slice(&mut r, &[42, 2, 42], 0)[..], [0u8; 0]);
+
+ // sample 1 item
+ assert_eq!(&sample_slice(&mut r, &[42], 1)[..], [42]);
+ let v = sample_slice(&mut r, &[1, 42], 1)[0];
+ assert!(v == 1 || v == 42);
+
+ // sample "all" the items
+ let v = sample_slice(&mut r, &[42, 133], 2);
+ assert!(&v[..] == [42, 133] || v[..] == [133, 42]);
+
+ // Make sure lucky 777's aren't lucky
+ let slice = &[42, 777];
+ let mut num_42 = 0;
+ let total = 1000;
+ for _ in 0..total {
+ let v = sample_slice(&mut r, slice, 1);
+ assert_eq!(v.len(), 1);
+ let v = v[0];
+ assert!(v == 42 || v == 777);
+ if v == 42 {
+ num_42 += 1;
+ }
+ }
+ let ratio_42 = num_42 as f64 / 1000 as f64;
+ assert!(0.4 <= ratio_42 || ratio_42 <= 0.6, "{}", ratio_42);
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ #[allow(deprecated)]
+ fn test_sample_slice() {
+ let seeded_rng = SmallRng::from_seed;
+
+ let mut r = ::test::rng(403);
+
+ for n in 1..20 {
+ let length = 5*n - 4; // 1, 6, ...
+ let amount = r.gen_range(0, length);
+ let mut seed = [0u8; 16];
+ r.fill(&mut seed);
+
+ // assert the basics work
+ let regular = index::sample(&mut seeded_rng(seed), length, amount);
+ assert_eq!(regular.len(), amount);
+ assert!(regular.iter().all(|e| e < length));
+
+ // also test that sampling the slice works
+ let vec: Vec<u32> = (0..(length as u32)).collect();
+ let result = sample_slice(&mut seeded_rng(seed), &vec, amount);
+ assert_eq!(result, regular.iter().map(|i| i as u32).collect::<Vec<_>>());
+
+ let result = sample_slice_ref(&mut seeded_rng(seed), &vec, amount);
+ assert!(result.iter().zip(regular.iter()).all(|(i,j)| **i == j as u32));
+ }
+ }
+
+ #[test]
+ #[cfg(feature = "alloc")]
+ fn test_weighted() {
+ let mut r = ::test::rng(406);
+ const N_REPS: u32 = 3000;
+ let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
+ let total_weight = weights.iter().sum::<u32>() as f32;
+
+ let verify = |result: [i32; 14]| {
+ for (i, count) in result.iter().enumerate() {
+ let exp = (weights[i] * N_REPS) as f32 / total_weight;
+ let mut err = (*count as f32 - exp).abs();
+ if err != 0.0 {
+ err /= exp;
+ }
+ assert!(err <= 0.25);
+ }
+ };
+
+ // choose_weighted
+ fn get_weight<T>(item: &(u32, T)) -> u32 {
+ item.0
+ }
+ let mut chosen = [0i32; 14];
+ let mut items = [(0u32, 0usize); 14]; // (weight, index)
+ for (i, item) in items.iter_mut().enumerate() {
+ *item = (weights[i], i);
+ }
+ for _ in 0..N_REPS {
+ let item = items.choose_weighted(&mut r, get_weight).unwrap();
+ chosen[item.1] += 1;
+ }
+ verify(chosen);
+
+ // choose_weighted_mut
+ let mut items = [(0u32, 0i32); 14]; // (weight, count)
+ for (i, item) in items.iter_mut().enumerate() {
+ *item = (weights[i], 0);
+ }
+ for _ in 0..N_REPS {
+ items.choose_weighted_mut(&mut r, get_weight).unwrap().1 += 1;
+ }
+ for (ch, item) in chosen.iter_mut().zip(items.iter()) {
+ *ch = item.1;
+ }
+ verify(chosen);
+
+ // Check error cases
+ let empty_slice = &mut [10][0..0];
+ assert_eq!(empty_slice.choose_weighted(&mut r, |_| 1), Err(WeightedError::NoItem));
+ assert_eq!(empty_slice.choose_weighted_mut(&mut r, |_| 1), Err(WeightedError::NoItem));
+ assert_eq!(['x'].choose_weighted_mut(&mut r, |_| 0), Err(WeightedError::AllWeightsZero));
+ assert_eq!([0, -1].choose_weighted_mut(&mut r, |x| *x), Err(WeightedError::NegativeWeight));
+ assert_eq!([-1, 0].choose_weighted_mut(&mut r, |x| *x), Err(WeightedError::NegativeWeight));
+ }
+}