diff options
Diffstat (limited to 'rand/src')
37 files changed, 2118 insertions, 3579 deletions
diff --git a/rand/src/deprecated.rs b/rand/src/deprecated.rs deleted file mode 100644 index 88eb09f..0000000 --- a/rand/src/deprecated.rs +++ /dev/null @@ -1,544 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Deprecated re-exports (we can't add deprecation warnings otherwise) - -#![allow(deprecated)] - -use rngs; -use {RngCore, CryptoRng, SeedableRng, Error}; -use rand_core::block::BlockRngCore; -use rand_isaac; -use rand_chacha; -use rand_hc; - -#[cfg(feature="std")] -use std::io::Read; - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", - note="import from rand_isaac crate instead, or use the newer Hc128Rng")] -pub struct IsaacRng(rand_isaac::IsaacRng); - -impl RngCore for IsaacRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl SeedableRng for IsaacRng { - type Seed = <rand_isaac::IsaacRng as SeedableRng>::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - IsaacRng(rand_isaac::IsaacRng::from_seed(seed)) - } - - fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - rand_isaac::IsaacRng::from_rng(rng).map(IsaacRng) - } -} - -impl IsaacRng { - pub fn new_from_u64(seed: u64) -> Self { - IsaacRng(rand_isaac::IsaacRng::new_from_u64(seed)) - } -} - - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", - note="import from rand_isaac crate instead, or use newer Hc128Rng")] -pub struct Isaac64Rng(rand_isaac::Isaac64Rng); - -impl RngCore for Isaac64Rng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl SeedableRng for Isaac64Rng { - type Seed = <rand_isaac::Isaac64Rng as SeedableRng>::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - Isaac64Rng(rand_isaac::Isaac64Rng::from_seed(seed)) - } - - fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - rand_isaac::Isaac64Rng::from_rng(rng).map(Isaac64Rng) - } -} - -impl Isaac64Rng { - pub fn new_from_u64(seed: u64) -> Self { - Isaac64Rng(rand_isaac::Isaac64Rng::new_from_u64(seed)) - } -} - - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", note="import from rand_chacha crate instead")] -pub struct ChaChaRng(rand_chacha::ChaChaRng); - -impl RngCore for ChaChaRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl SeedableRng for ChaChaRng { - type Seed = <rand_chacha::ChaChaRng as SeedableRng>::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - ChaChaRng(rand_chacha::ChaChaRng::from_seed(seed)) - } - - fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - rand_chacha::ChaChaRng::from_rng(rng).map(ChaChaRng) - } -} - -impl ChaChaRng { - #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] - pub fn get_word_pos(&self) -> u128 { - self.0.get_word_pos() - } - - #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] - pub fn set_word_pos(&mut self, word_offset: u128) { - self.0.set_word_pos(word_offset) - } - - pub fn set_stream(&mut self, stream: u64) { - self.0.set_stream(stream) - } -} - -impl CryptoRng for ChaChaRng {} - - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", note="import from rand_hc crate instead")] -pub struct Hc128Rng(rand_hc::Hc128Rng); - -impl RngCore for Hc128Rng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl SeedableRng for Hc128Rng { - type Seed = <rand_hc::Hc128Rng as SeedableRng>::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - Hc128Rng(rand_hc::Hc128Rng::from_seed(seed)) - } - - fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - rand_hc::Hc128Rng::from_rng(rng).map(Hc128Rng) - } -} - -impl CryptoRng for Hc128Rng {} - - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", note="import from rand_xorshift crate instead")] -pub struct XorShiftRng(::rand_xorshift::XorShiftRng); - -impl RngCore for XorShiftRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl SeedableRng for XorShiftRng { - type Seed = <::rand_xorshift::XorShiftRng as SeedableRng>::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - XorShiftRng(::rand_xorshift::XorShiftRng::from_seed(seed)) - } - - fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - ::rand_xorshift::XorShiftRng::from_rng(rng).map(XorShiftRng) - } -} - - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", - note="import with rand::prelude::* or rand::rngs::StdRng instead")] -pub struct StdRng(rngs::StdRng); - -impl RngCore for StdRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl SeedableRng for StdRng { - type Seed = <rngs::StdRng as SeedableRng>::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - StdRng(rngs::StdRng::from_seed(seed)) - } - - fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - rngs::StdRng::from_rng(rng).map(StdRng) - } -} - -impl CryptoRng for StdRng {} - - -#[cfg(feature="rand_os")] -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", note="import with rand::rngs::OsRng instead")] -pub struct OsRng(rngs::OsRng); - -#[cfg(feature="rand_os")] -impl RngCore for OsRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -#[cfg(feature="rand_os")] -impl OsRng { - pub fn new() -> Result<Self, Error> { - rngs::OsRng::new().map(OsRng) - } -} - -#[cfg(feature="rand_os")] -impl CryptoRng for OsRng {} - - -#[cfg(feature="std")] -#[derive(Debug)] -#[deprecated(since="0.6.0", note="import with rand::rngs::EntropyRng instead")] -pub struct EntropyRng(rngs::EntropyRng); - -#[cfg(feature="std")] -impl RngCore for EntropyRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -#[cfg(feature="std")] -impl EntropyRng { - pub fn new() -> Self { - EntropyRng(rngs::EntropyRng::new()) - } -} - -#[cfg(feature="std")] -impl Default for EntropyRng { - fn default() -> Self { - EntropyRng::new() - } -} - -#[cfg(feature="std")] -impl CryptoRng for EntropyRng {} - - -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", note="import with rand::rngs::JitterRng instead")] -pub struct JitterRng(rngs::JitterRng); - -impl RngCore for JitterRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl JitterRng { - #[cfg(all(feature="std", not(target_arch = "wasm32")))] - pub fn new() -> Result<JitterRng, rngs::TimerError> { - rngs::JitterRng::new().map(JitterRng) - } - - pub fn new_with_timer(timer: fn() -> u64) -> JitterRng { - JitterRng(rngs::JitterRng::new_with_timer(timer)) - } - - pub fn set_rounds(&mut self, rounds: u8) { - self.0.set_rounds(rounds) - } - - pub fn test_timer(&mut self) -> Result<u8, rngs::TimerError> { - self.0.test_timer() - } - - #[cfg(feature="std")] - pub fn timer_stats(&mut self, var_rounds: bool) -> i64 { - self.0.timer_stats(var_rounds) - } -} - -impl CryptoRng for JitterRng {} - - -#[cfg(feature="std")] -#[derive(Clone, Debug)] -#[deprecated(since="0.6.0", - note="import with rand::prelude::* or rand::rngs::ThreadRng instead")] -pub struct ThreadRng(rngs::ThreadRng); - -#[cfg(feature="std")] -impl RngCore for ThreadRng { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -#[cfg(feature="std")] -impl CryptoRng for ThreadRng {} - - -#[cfg(feature="std")] -#[derive(Debug)] -#[deprecated(since="0.6.0", note="import with rand::rngs::adapter::ReadRng instead")] -pub struct ReadRng<R>(rngs::adapter::ReadRng<R>); - -#[cfg(feature="std")] -impl<R: Read> RngCore for ReadRng<R> { - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline(always)] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline(always)] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -#[cfg(feature="std")] -impl<R: Read> ReadRng<R> { - pub fn new(r: R) -> ReadRng<R> { - ReadRng(rngs::adapter::ReadRng::new(r)) - } -} - - -#[derive(Clone, Debug)] -pub struct ReseedingRng<R, Rsdr>(rngs::adapter::ReseedingRng<R, Rsdr>) -where R: BlockRngCore + SeedableRng, - Rsdr: RngCore; - -impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr> -where R: BlockRngCore<Item = u32> + SeedableRng, - <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]> -{ - #[inline(always)] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline(always)] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest) - } - - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -impl<R, Rsdr> ReseedingRng<R, Rsdr> -where R: BlockRngCore + SeedableRng, - Rsdr: RngCore -{ - pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self { - ReseedingRng(rngs::adapter::ReseedingRng::new(rng, threshold, reseeder)) - } - - pub fn reseed(&mut self) -> Result<(), Error> { - self.0.reseed() - } -} - -impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr> -where R: BlockRngCore + SeedableRng + CryptoRng, - Rsdr: RngCore + CryptoRng {} diff --git a/rand/src/distributions/bernoulli.rs b/rand/src/distributions/bernoulli.rs index f49618c..eadd056 100644 --- a/rand/src/distributions/bernoulli.rs +++ b/rand/src/distributions/bernoulli.rs @@ -8,8 +8,8 @@ //! The Bernoulli distribution. -use Rng; -use distributions::Distribution; +use crate::Rng; +use crate::distributions::Distribution; /// The Bernoulli distribution. /// @@ -20,7 +20,7 @@ use distributions::Distribution; /// ```rust /// use rand::distributions::{Bernoulli, Distribution}; /// -/// let d = Bernoulli::new(0.3); +/// let d = Bernoulli::new(0.3).unwrap(); /// let v = d.sample(&mut rand::thread_rng()); /// println!("{} is from a Bernoulli distribution", v); /// ``` @@ -61,13 +61,16 @@ const ALWAYS_TRUE: u64 = ::core::u64::MAX; // in `no_std` mode. const SCALE: f64 = 2.0 * (1u64 << 63) as f64; +/// Error type returned from `Bernoulli::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BernoulliError { + /// `p < 0` or `p > 1`. + InvalidProbability, +} + impl Bernoulli { /// Construct a new `Bernoulli` with the given probability of success `p`. /// - /// # Panics - /// - /// If `p < 0` or `p > 1`. - /// /// # Precision /// /// For `p = 1.0`, the resulting distribution will always generate true. @@ -77,12 +80,12 @@ impl Bernoulli { /// a multiple of 2<sup>-64</sup>. (Note that not all multiples of /// 2<sup>-64</sup> in `[0, 1]` can be represented as a `f64`.) #[inline] - pub fn new(p: f64) -> Bernoulli { + pub fn new(p: f64) -> Result<Bernoulli, BernoulliError> { if p < 0.0 || p >= 1.0 { - if p == 1.0 { return Bernoulli { p_int: ALWAYS_TRUE } } - panic!("Bernoulli::new not called with 0.0 <= p <= 1.0"); + if p == 1.0 { return Ok(Bernoulli { p_int: ALWAYS_TRUE }) } + return Err(BernoulliError::InvalidProbability); } - Bernoulli { p_int: (p * SCALE) as u64 } + Ok(Bernoulli { p_int: (p * SCALE) as u64 }) } /// Construct a new `Bernoulli` with the probability of success of @@ -91,19 +94,16 @@ impl Bernoulli { /// /// If `numerator == denominator` then the returned `Bernoulli` will always /// return `true`. If `numerator == 0` it will always return `false`. - /// - /// # Panics - /// - /// If `denominator == 0` or `numerator > denominator`. - /// #[inline] - pub fn from_ratio(numerator: u32, denominator: u32) -> Bernoulli { - assert!(numerator <= denominator); + pub fn from_ratio(numerator: u32, denominator: u32) -> Result<Bernoulli, BernoulliError> { + if numerator > denominator { + return Err(BernoulliError::InvalidProbability); + } if numerator == denominator { - return Bernoulli { p_int: ::core::u64::MAX } + return Ok(Bernoulli { p_int: ALWAYS_TRUE }) } - let p_int = ((numerator as f64 / denominator as f64) * SCALE) as u64; - Bernoulli { p_int } + let p_int = ((f64::from(numerator) / f64::from(denominator)) * SCALE) as u64; + Ok(Bernoulli { p_int }) } } @@ -119,15 +119,15 @@ impl Distribution<bool> for Bernoulli { #[cfg(test)] mod test { - use Rng; - use distributions::Distribution; + use crate::Rng; + use crate::distributions::Distribution; use super::Bernoulli; #[test] fn test_trivial() { - let mut r = ::test::rng(1); - let always_false = Bernoulli::new(0.0); - let always_true = Bernoulli::new(1.0); + let mut r = crate::test::rng(1); + let always_false = Bernoulli::new(0.0).unwrap(); + let always_true = Bernoulli::new(1.0).unwrap(); for _ in 0..5 { assert_eq!(r.sample::<bool, _>(&always_false), false); assert_eq!(r.sample::<bool, _>(&always_true), true); @@ -137,17 +137,18 @@ mod test { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_average() { const P: f64 = 0.3; const NUM: u32 = 3; const DENOM: u32 = 10; - let d1 = Bernoulli::new(P); - let d2 = Bernoulli::from_ratio(NUM, DENOM); + let d1 = Bernoulli::new(P).unwrap(); + let d2 = Bernoulli::from_ratio(NUM, DENOM).unwrap(); const N: u32 = 100_000; let mut sum1: u32 = 0; let mut sum2: u32 = 0; - let mut rng = ::test::rng(2); + let mut rng = crate::test::rng(2); for _ in 0..N { if d1.sample(&mut rng) { sum1 += 1; diff --git a/rand/src/distributions/binomial.rs b/rand/src/distributions/binomial.rs index 2df393e..8fc290a 100644 --- a/rand/src/distributions/binomial.rs +++ b/rand/src/distributions/binomial.rs @@ -8,25 +8,17 @@ // except according to those terms. //! The binomial distribution. +#![allow(deprecated)] +#![allow(clippy::all)] -use Rng; -use distributions::{Distribution, Bernoulli, Cauchy}; -use distributions::utils::log_gamma; +use crate::Rng; +use crate::distributions::{Distribution, Uniform}; /// The binomial distribution `Binomial(n, p)`. /// /// This distribution has density function: /// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{Binomial, Distribution}; -/// -/// let bin = Binomial::new(20, 0.3); -/// let v = bin.sample(&mut rand::thread_rng()); -/// println!("{} is from a binomial distribution", v); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Binomial { /// Number of trials. @@ -47,6 +39,13 @@ impl Binomial { } } +/// Convert a `f64` to an `i64`, panicing on overflow. +// In the future (Rust 1.34), this might be replaced with `TryFrom`. +fn f64_to_i64(x: f64) -> i64 { + assert!(x < (::std::i64::MAX as f64)); + x as i64 +} + impl Distribution<u64> for Binomial { fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 { // Handle these values directly. @@ -55,83 +54,217 @@ impl Distribution<u64> for Binomial { } else if self.p == 1.0 { return self.n; } - - // For low n, it is faster to sample directly. For both methods, - // performance is independent of p. On Intel Haswell CPU this method - // appears to be faster for approx n < 300. - if self.n < 300 { - let mut result = 0; - let d = Bernoulli::new(self.p); - for _ in 0 .. self.n { - result += rng.sample(d) as u32; - } - return result as u64; - } - - // binomial distribution is symmetrical with respect to p -> 1-p, k -> n-k - // switch p so that it is less than 0.5 - this allows for lower expected values - // we will just invert the result at the end + + // The binomial distribution is symmetrical with respect to p -> 1-p, + // k -> n-k switch p so that it is less than 0.5 - this allows for lower + // expected values we will just invert the result at the end let p = if self.p <= 0.5 { self.p } else { 1.0 - self.p }; - // prepare some cached values - let float_n = self.n as f64; - let ln_fact_n = log_gamma(float_n + 1.0); - let pc = 1.0 - p; - let log_p = p.ln(); - let log_pc = pc.ln(); - let expected = self.n as f64 * p; - let sq = (expected * (2.0 * pc)).sqrt(); - - let mut lresult; - - // we use the Cauchy distribution as the comparison distribution - // f(x) ~ 1/(1+x^2) - let cauchy = Cauchy::new(0.0, 1.0); - loop { - let mut comp_dev: f64; + let result; + let q = 1. - p; + + // For small n * min(p, 1 - p), the BINV algorithm based on the inverse + // transformation of the binomial distribution is efficient. Otherwise, + // the BTPE algorithm is used. + // + // Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1988. Binomial + // random variate generation. Commun. ACM 31, 2 (February 1988), + // 216-222. http://dx.doi.org/10.1145/42372.42381 + + // Threshold for prefering the BINV algorithm. The paper suggests 10, + // Ranlib uses 30, and GSL uses 14. + const BINV_THRESHOLD: f64 = 10.; + + if (self.n as f64) * p < BINV_THRESHOLD && + self.n <= (::std::i32::MAX as u64) { + // Use the BINV algorithm. + let s = p / q; + let a = ((self.n + 1) as f64) * s; + let mut r = q.powi(self.n as i32); + let mut u: f64 = rng.gen(); + let mut x = 0; + while u > r as f64 { + u -= r; + x += 1; + r *= a / (x as f64) - s; + } + result = x; + } else { + // Use the BTPE algorithm. + + // Threshold for using the squeeze algorithm. This can be freely + // chosen based on performance. Ranlib and GSL use 20. + const SQUEEZE_THRESHOLD: i64 = 20; + + // Step 0: Calculate constants as functions of `n` and `p`. + let n = self.n as f64; + let np = n * p; + let npq = np * q; + let f_m = np + p; + let m = f64_to_i64(f_m); + // radius of triangle region, since height=1 also area of region + let p1 = (2.195 * npq.sqrt() - 4.6 * q).floor() + 0.5; + // tip of triangle + let x_m = (m as f64) + 0.5; + // left edge of triangle + let x_l = x_m - p1; + // right edge of triangle + let x_r = x_m + p1; + let c = 0.134 + 20.5 / (15.3 + (m as f64)); + // p1 + area of parallelogram region + let p2 = p1 * (1. + 2. * c); + + fn lambda(a: f64) -> f64 { + a * (1. + 0.5 * a) + } + + let lambda_l = lambda((f_m - x_l) / (f_m - x_l * p)); + let lambda_r = lambda((x_r - f_m) / (x_r * q)); + // p1 + area of left tail + let p3 = p2 + c / lambda_l; + // p1 + area of right tail + let p4 = p3 + c / lambda_r; + + // return value + let mut y: i64; + + let gen_u = Uniform::new(0., p4); + let gen_v = Uniform::new(0., 1.); + loop { - // draw from the Cauchy distribution - comp_dev = rng.sample(cauchy); - // shift the peak of the comparison ditribution - lresult = expected + sq * comp_dev; - // repeat the drawing until we are in the range of possible values - if lresult >= 0.0 && lresult < float_n + 1.0 { + // Step 1: Generate `u` for selecting the region. If region 1 is + // selected, generate a triangularly distributed variate. + let u = gen_u.sample(rng); + let mut v = gen_v.sample(rng); + if !(u > p1) { + y = f64_to_i64(x_m - p1 * v + u); break; } - } - // the result should be discrete - lresult = lresult.floor(); + if !(u > p2) { + // Step 2: Region 2, parallelograms. Check if region 2 is + // used. If so, generate `y`. + let x = x_l + (u - p1) / c; + v = v * c + 1.0 - (x - x_m).abs() / p1; + if v > 1. { + continue; + } else { + y = f64_to_i64(x); + } + } else if !(u > p3) { + // Step 3: Region 3, left exponential tail. + y = f64_to_i64(x_l + v.ln() / lambda_l); + if y < 0 { + continue; + } else { + v *= (u - p2) * lambda_l; + } + } else { + // Step 4: Region 4, right exponential tail. + y = f64_to_i64(x_r - v.ln() / lambda_r); + if y > 0 && (y as u64) > self.n { + continue; + } else { + v *= (u - p3) * lambda_r; + } + } + + // Step 5: Acceptance/rejection comparison. + + // Step 5.0: Test for appropriate method of evaluating f(y). + let k = (y - m).abs(); + if !(k > SQUEEZE_THRESHOLD && (k as f64) < 0.5 * npq - 1.) { + // Step 5.1: Evaluate f(y) via the recursive relationship. Start the + // search from the mode. + let s = p / q; + let a = s * (n + 1.); + let mut f = 1.0; + if m < y { + let mut i = m; + loop { + i += 1; + f *= a / (i as f64) - s; + if i == y { + break; + } + } + } else if m > y { + let mut i = y; + loop { + i += 1; + f /= a / (i as f64) - s; + if i == m { + break; + } + } + } + if v > f { + continue; + } else { + break; + } + } - let log_binomial_dist = ln_fact_n - log_gamma(lresult+1.0) - - log_gamma(float_n - lresult + 1.0) + lresult*log_p + (float_n - lresult)*log_pc; - // this is the binomial probability divided by the comparison probability - // we will generate a uniform random value and if it is larger than this, - // we interpret it as a value falling out of the distribution and repeat - let comparison_coeff = (log_binomial_dist.exp() * sq) * (1.2 * (1.0 + comp_dev*comp_dev)); + // Step 5.2: Squeezing. Check the value of ln(v) againts upper and + // lower bound of ln(f(y)). + let k = k as f64; + let rho = (k / npq) * ((k * (k / 3. + 0.625) + 1./6.) / npq + 0.5); + let t = -0.5 * k*k / npq; + let alpha = v.ln(); + if alpha < t - rho { + break; + } + if alpha > t + rho { + continue; + } + + // Step 5.3: Final acceptance/rejection test. + let x1 = (y + 1) as f64; + let f1 = (m + 1) as f64; + let z = (f64_to_i64(n) + 1 - m) as f64; + let w = (f64_to_i64(n) - y + 1) as f64; + + fn stirling(a: f64) -> f64 { + let a2 = a * a; + (13860. - (462. - (132. - (99. - 140. / a2) / a2) / a2) / a2) / a / 166320. + } + + if alpha > x_m * (f1 / x1).ln() + + (n - (m as f64) + 0.5) * (z / w).ln() + + ((y - m) as f64) * (w * p / (x1 * q)).ln() + // We use the signs from the GSL implementation, which are + // different than the ones in the reference. According to + // the GSL authors, the new signs were verified to be + // correct by one of the original designers of the + // algorithm. + + stirling(f1) + stirling(z) - stirling(x1) - stirling(w) + { + continue; + } - if comparison_coeff >= rng.gen() { break; } + assert!(y >= 0); + result = y as u64; } - // invert the result for p < 0.5 + // Invert the result for p < 0.5. if p != self.p { - self.n - lresult as u64 + self.n - result } else { - lresult as u64 + result } } } #[cfg(test)] mod test { - use Rng; - use distributions::Distribution; + use crate::Rng; + use crate::distributions::Distribution; use super::Binomial; fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) { @@ -144,17 +277,20 @@ mod test { for i in results.iter_mut() { *i = binomial.sample(rng) as f64; } let mean = results.iter().sum::<f64>() / results.len() as f64; - assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0); + assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0, + "mean: {}, expected_mean: {}", mean, expected_mean); let variance = results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64; - assert!((variance - expected_variance).abs() < expected_variance / 10.0); + assert!((variance - expected_variance).abs() < expected_variance / 10.0, + "variance: {}, expected_variance: {}", variance, expected_variance); } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_binomial() { - let mut rng = ::test::rng(351); + let mut rng = crate::test::rng(351); test_binomial_mean_and_variance(150, 0.1, &mut rng); test_binomial_mean_and_variance(70, 0.6, &mut rng); test_binomial_mean_and_variance(40, 0.5, &mut rng); @@ -164,7 +300,7 @@ mod test { #[test] fn test_binomial_end_points() { - let mut rng = ::test::rng(352); + let mut rng = crate::test::rng(352); assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0); assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20); } diff --git a/rand/src/distributions/cauchy.rs b/rand/src/distributions/cauchy.rs index feef015..0a5d149 100644 --- a/rand/src/distributions/cauchy.rs +++ b/rand/src/distributions/cauchy.rs @@ -8,25 +8,18 @@ // except according to those terms. //! The Cauchy distribution. +#![allow(deprecated)] +#![allow(clippy::all)] -use Rng; -use distributions::Distribution; +use crate::Rng; +use crate::distributions::Distribution; use std::f64::consts::PI; /// The Cauchy distribution `Cauchy(median, scale)`. /// /// This distribution has a density function: /// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))` -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{Cauchy, Distribution}; -/// -/// let cau = Cauchy::new(2.0, 5.0); -/// let v = cau.sample(&mut rand::thread_rng()); -/// println!("{} is from a Cauchy(2, 5) distribution", v); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Cauchy { median: f64, @@ -61,7 +54,7 @@ impl Distribution<f64> for Cauchy { #[cfg(test)] mod test { - use distributions::Distribution; + use crate::distributions::Distribution; use super::Cauchy; fn median(mut numbers: &mut [f64]) -> f64 { @@ -75,30 +68,25 @@ mod test { } #[test] - fn test_cauchy_median() { + #[cfg(not(miri))] // Miri doesn't support transcendental functions + fn test_cauchy_averages() { + // NOTE: given that the variance and mean are undefined, + // this test does not have any rigorous statistical meaning. let cauchy = Cauchy::new(10.0, 5.0); - let mut rng = ::test::rng(123); + let mut rng = crate::test::rng(123); let mut numbers: [f64; 1000] = [0.0; 1000]; + let mut sum = 0.0; for i in 0..1000 { numbers[i] = cauchy.sample(&mut rng); + sum += numbers[i]; } let median = median(&mut numbers); println!("Cauchy median: {}", median); - assert!((median - 10.0).abs() < 0.5); // not 100% certain, but probable enough - } - - #[test] - fn test_cauchy_mean() { - let cauchy = Cauchy::new(10.0, 5.0); - let mut rng = ::test::rng(123); - let mut sum = 0.0; - for _ in 0..1000 { - sum += cauchy.sample(&mut rng); - } + assert!((median - 10.0).abs() < 0.4); // not 100% certain, but probable enough let mean = sum / 1000.0; println!("Cauchy mean: {}", mean); // for a Cauchy distribution the mean should not converge - assert!((mean - 10.0).abs() > 0.5); // not 100% certain, but probable enough + assert!((mean - 10.0).abs() > 0.4); // not 100% certain, but probable enough } #[test] diff --git a/rand/src/distributions/dirichlet.rs b/rand/src/distributions/dirichlet.rs index 19384b8..1ce01fd 100644 --- a/rand/src/distributions/dirichlet.rs +++ b/rand/src/distributions/dirichlet.rs @@ -8,28 +8,19 @@ // except according to those terms. //! The dirichlet distribution. +#![allow(deprecated)] +#![allow(clippy::all)] -use Rng; -use distributions::Distribution; -use distributions::gamma::Gamma; +use crate::Rng; +use crate::distributions::Distribution; +use crate::distributions::gamma::Gamma; /// The dirichelet distribution `Dirichlet(alpha)`. /// /// The Dirichlet distribution is a family of continuous multivariate /// probability distributions parameterized by a vector alpha of positive reals. /// It is a multivariate generalization of the beta distribution. -/// -/// # Example -/// -/// ``` -/// use rand::prelude::*; -/// use rand::distributions::Dirichlet; -/// -/// let dirichlet = Dirichlet::new(vec![1.0, 2.0, 3.0]); -/// let samples = dirichlet.sample(&mut rand::thread_rng()); -/// println!("{:?} is from a Dirichlet([1.0, 2.0, 3.0]) distribution", samples); -/// ``` - +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Debug)] pub struct Dirichlet { /// Concentration parameters (alpha) @@ -91,12 +82,12 @@ impl Distribution<Vec<f64>> for Dirichlet { #[cfg(test)] mod test { use super::Dirichlet; - use distributions::Distribution; + use crate::distributions::Distribution; #[test] fn test_dirichlet() { let d = Dirichlet::new(vec![1.0, 2.0, 3.0]); - let mut rng = ::test::rng(221); + let mut rng = crate::test::rng(221); let samples = d.sample(&mut rng); let _: Vec<f64> = samples .into_iter() @@ -112,7 +103,7 @@ mod test { let alpha = 0.5f64; let size = 2; let d = Dirichlet::new_with_param(alpha, size); - let mut rng = ::test::rng(221); + let mut rng = crate::test::rng(221); let samples = d.sample(&mut rng); let _: Vec<f64> = samples .into_iter() diff --git a/rand/src/distributions/exponential.rs b/rand/src/distributions/exponential.rs index a7d0500..0278248 100644 --- a/rand/src/distributions/exponential.rs +++ b/rand/src/distributions/exponential.rs @@ -8,10 +8,11 @@ // except according to those terms. //! The exponential distribution. +#![allow(deprecated)] -use {Rng}; -use distributions::{ziggurat_tables, Distribution}; -use distributions::utils::ziggurat; +use crate::{Rng}; +use crate::distributions::{ziggurat_tables, Distribution}; +use crate::distributions::utils::ziggurat; /// Samples floating-point numbers according to the exponential distribution, /// with rate parameter `Ξ» = 1`. This is equivalent to `Exp::new(1.0)` or @@ -27,15 +28,7 @@ use distributions::utils::ziggurat; /// Generate Normal Random Samples*]( /// https://www.doornik.com/research/ziggurat.pdf). /// Nuffield College, Oxford -/// -/// # Example -/// ``` -/// use rand::prelude::*; -/// use rand::distributions::Exp1; -/// -/// let val: f64 = SmallRng::from_entropy().sample(Exp1); -/// println!("{}", val); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Exp1; @@ -64,17 +57,8 @@ impl Distribution<f64> for Exp1 { /// This distribution has density function: `f(x) = lambda * exp(-lambda * x)` /// for `x > 0`. /// -/// Note that [`Exp1`](struct.Exp1.html) is an optimised implementation for `lambda = 1`. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{Exp, Distribution}; -/// -/// let exp = Exp::new(2.0); -/// let v = exp.sample(&mut rand::thread_rng()); -/// println!("{} is from a Exp(2) distribution", v); -/// ``` +/// Note that [`Exp1`](crate::distributions::Exp1) is an optimised implementation for `lambda = 1`. +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Exp { /// `lambda` stored as `1/lambda`, since this is what we scale by. @@ -100,13 +84,13 @@ impl Distribution<f64> for Exp { #[cfg(test)] mod test { - use distributions::Distribution; + use crate::distributions::Distribution; use super::Exp; #[test] fn test_exp() { let exp = Exp::new(10.0); - let mut rng = ::test::rng(221); + let mut rng = crate::test::rng(221); for _ in 0..1000 { assert!(exp.sample(&mut rng) >= 0.0); } diff --git a/rand/src/distributions/float.rs b/rand/src/distributions/float.rs index ece12f5..bda523a 100644 --- a/rand/src/distributions/float.rs +++ b/rand/src/distributions/float.rs @@ -9,9 +9,9 @@ //! Basic floating-point number distributions use core::mem; -use Rng; -use distributions::{Distribution, Standard}; -use distributions::utils::FloatSIMDUtils; +use crate::Rng; +use crate::distributions::{Distribution, Standard}; +use crate::distributions::utils::FloatSIMDUtils; #[cfg(feature="simd_support")] use packed_simd::*; @@ -36,9 +36,9 @@ use packed_simd::*; /// println!("f32 from (0, 1): {}", val); /// ``` /// -/// [`Standard`]: struct.Standard.html -/// [`Open01`]: struct.Open01.html -/// [`Uniform`]: uniform/struct.Uniform.html +/// [`Standard`]: crate::distributions::Standard +/// [`Open01`]: crate::distributions::Open01 +/// [`Uniform`]: crate::distributions::uniform::Uniform #[derive(Clone, Copy, Debug)] pub struct OpenClosed01; @@ -62,14 +62,16 @@ pub struct OpenClosed01; /// println!("f32 from (0, 1): {}", val); /// ``` /// -/// [`Standard`]: struct.Standard.html -/// [`OpenClosed01`]: struct.OpenClosed01.html -/// [`Uniform`]: uniform/struct.Uniform.html +/// [`Standard`]: crate::distributions::Standard +/// [`OpenClosed01`]: crate::distributions::OpenClosed01 +/// [`Uniform`]: crate::distributions::uniform::Uniform #[derive(Clone, Copy, Debug)] pub struct Open01; -pub(crate) trait IntoFloat { +// This trait is needed by both this lib and rand_distr hence is a hidden export +#[doc(hidden)] +pub trait IntoFloat { type F; /// Helper method to combine the fraction and a contant exponent into a @@ -93,9 +95,7 @@ macro_rules! float_impls { // The exponent is encoded using an offset-binary representation let exponent_bits: $u_scalar = (($exponent_bias + exponent) as $u_scalar) << $fraction_bits; - // TODO: use from_bits when min compiler > 1.25 (see #545) - // $ty::from_bits(self | exponent_bits) - unsafe{ mem::transmute(self | exponent_bits) } + $ty::from_bits(self | exponent_bits) } } @@ -168,9 +168,9 @@ float_impls! { f64x8, u64x8, f64, u64, 52, 1023 } #[cfg(test)] mod tests { - use Rng; - use distributions::{Open01, OpenClosed01}; - use rngs::mock::StepRng; + use crate::Rng; + use crate::distributions::{Open01, OpenClosed01}; + use crate::rngs::mock::StepRng; #[cfg(feature="simd_support")] use packed_simd::*; diff --git a/rand/src/distributions/gamma.rs b/rand/src/distributions/gamma.rs index 43ac2bc..b5a97f5 100644 --- a/rand/src/distributions/gamma.rs +++ b/rand/src/distributions/gamma.rs @@ -8,13 +8,14 @@ // except according to those terms. //! The Gamma and derived distributions. +#![allow(deprecated)] use self::GammaRepr::*; use self::ChiSquaredRepr::*; -use Rng; -use distributions::normal::StandardNormal; -use distributions::{Distribution, Exp, Open01}; +use crate::Rng; +use crate::distributions::normal::StandardNormal; +use crate::distributions::{Distribution, Exp, Open01}; /// The Gamma distribution `Gamma(shape, scale)` distribution. /// @@ -32,20 +33,11 @@ use distributions::{Distribution, Exp, Open01}; /// == 1`, and using the boosting technique described in that paper for /// `shape < 1`. /// -/// # Example -/// -/// ``` -/// use rand::distributions::{Distribution, Gamma}; -/// -/// let gamma = Gamma::new(2.0, 5.0); -/// let v = gamma.sample(&mut rand::thread_rng()); -/// println!("{} is from a Gamma(2, 5) distribution", v); -/// ``` -/// /// [^1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method for /// Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3 /// (September 2000), 363-372. /// DOI:[10.1145/358407.358414](https://doi.acm.org/10.1145/358407.358414) +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Gamma { repr: GammaRepr, @@ -174,16 +166,7 @@ impl Distribution<f64> for GammaLargeShape { /// of `k` independent standard normal random variables. For other /// `k`, this uses the equivalent characterisation /// `ΟΒ²(k) = Gamma(k/2, 2)`. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{ChiSquared, Distribution}; -/// -/// let chi = ChiSquared::new(11.0); -/// let v = chi.sample(&mut rand::thread_rng()); -/// println!("{} is from a ΟΒ²(11) distribution", v) -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct ChiSquared { repr: ChiSquaredRepr, @@ -229,16 +212,7 @@ impl Distribution<f64> for ChiSquared { /// This distribution is equivalent to the ratio of two normalised /// chi-squared distributions, that is, `F(m,n) = (ΟΒ²(m)/m) / /// (ΟΒ²(n)/n)`. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{FisherF, Distribution}; -/// -/// let f = FisherF::new(2.0, 32.0); -/// let v = f.sample(&mut rand::thread_rng()); -/// println!("{} is from an F(2, 32) distribution", v) -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct FisherF { numer: ChiSquared, @@ -270,16 +244,7 @@ impl Distribution<f64> for FisherF { /// The Student t distribution, `t(nu)`, where `nu` is the degrees of /// freedom. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{StudentT, Distribution}; -/// -/// let t = StudentT::new(11.0); -/// let v = t.sample(&mut rand::thread_rng()); -/// println!("{} is from a t(11) distribution", v) -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct StudentT { chi: ChiSquared, @@ -305,16 +270,7 @@ impl Distribution<f64> for StudentT { } /// The Beta distribution with shape parameters `alpha` and `beta`. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{Distribution, Beta}; -/// -/// let beta = Beta::new(2.0, 5.0); -/// let v = beta.sample(&mut rand::thread_rng()); -/// println!("{} is from a Beta(2, 5) distribution", v); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Beta { gamma_a: Gamma, @@ -345,30 +301,32 @@ impl Distribution<f64> for Beta { #[cfg(test)] mod test { - use distributions::Distribution; + use crate::distributions::Distribution; use super::{Beta, ChiSquared, StudentT, FisherF}; + const N: u32 = 100; + #[test] fn test_chi_squared_one() { let chi = ChiSquared::new(1.0); - let mut rng = ::test::rng(201); - for _ in 0..1000 { + let mut rng = crate::test::rng(201); + for _ in 0..N { chi.sample(&mut rng); } } #[test] fn test_chi_squared_small() { let chi = ChiSquared::new(0.5); - let mut rng = ::test::rng(202); - for _ in 0..1000 { + let mut rng = crate::test::rng(202); + for _ in 0..N { chi.sample(&mut rng); } } #[test] fn test_chi_squared_large() { let chi = ChiSquared::new(30.0); - let mut rng = ::test::rng(203); - for _ in 0..1000 { + let mut rng = crate::test::rng(203); + for _ in 0..N { chi.sample(&mut rng); } } @@ -381,8 +339,8 @@ mod test { #[test] fn test_f() { let f = FisherF::new(2.0, 32.0); - let mut rng = ::test::rng(204); - for _ in 0..1000 { + let mut rng = crate::test::rng(204); + for _ in 0..N { f.sample(&mut rng); } } @@ -390,8 +348,8 @@ mod test { #[test] fn test_t() { let t = StudentT::new(11.0); - let mut rng = ::test::rng(205); - for _ in 0..1000 { + let mut rng = crate::test::rng(205); + for _ in 0..N { t.sample(&mut rng); } } @@ -399,8 +357,8 @@ mod test { #[test] fn test_beta() { let beta = Beta::new(1.0, 2.0); - let mut rng = ::test::rng(201); - for _ in 0..1000 { + let mut rng = crate::test::rng(201); + for _ in 0..N { beta.sample(&mut rng); } } diff --git a/rand/src/distributions/integer.rs b/rand/src/distributions/integer.rs index 7e408db..5238339 100644 --- a/rand/src/distributions/integer.rs +++ b/rand/src/distributions/integer.rs @@ -8,8 +8,10 @@ //! The implementations of the `Standard` distribution for integer types. -use {Rng}; -use distributions::{Distribution, Standard}; +use crate::{Rng}; +use crate::distributions::{Distribution, Standard}; +use core::num::{NonZeroU8, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroUsize}; +#[cfg(not(target_os = "emscripten"))] use core::num::NonZeroU128; #[cfg(feature="simd_support")] use packed_simd::*; #[cfg(all(target_arch = "x86", feature="nightly"))] @@ -45,13 +47,13 @@ impl Distribution<u64> for Standard { } } -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] +#[cfg(not(target_os = "emscripten"))] impl Distribution<u128> for Standard { #[inline] fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 { // Use LE; we explicitly generate one value before the next. - let x = rng.next_u64() as u128; - let y = rng.next_u64() as u128; + let x = u128::from(rng.next_u64()); + let y = u128::from(rng.next_u64()); (y << 64) | x } } @@ -85,9 +87,30 @@ impl_int_from_uint! { i8, u8 } impl_int_from_uint! { i16, u16 } impl_int_from_uint! { i32, u32 } impl_int_from_uint! { i64, u64 } -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] impl_int_from_uint! { i128, u128 } +#[cfg(not(target_os = "emscripten"))] impl_int_from_uint! { i128, u128 } impl_int_from_uint! { isize, usize } +macro_rules! impl_nzint { + ($ty:ty, $new:path) => { + impl Distribution<$ty> for Standard { + fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty { + loop { + if let Some(nz) = $new(rng.gen()) { + break nz; + } + } + } + } + } +} + +impl_nzint!(NonZeroU8, NonZeroU8::new); +impl_nzint!(NonZeroU16, NonZeroU16::new); +impl_nzint!(NonZeroU32, NonZeroU32::new); +impl_nzint!(NonZeroU64, NonZeroU64::new); +#[cfg(not(target_os = "emscripten"))] impl_nzint!(NonZeroU128, NonZeroU128::new); +impl_nzint!(NonZeroUsize, NonZeroUsize::new); + #[cfg(feature="simd_support")] macro_rules! simd_impl { ($(($intrinsic:ident, $vec:ty),)+) => {$( @@ -135,19 +158,19 @@ simd_impl!((__m64, u8x8), (__m128i, u8x16), (__m256i, u8x32),); #[cfg(test)] mod tests { - use Rng; - use distributions::{Standard}; + use crate::Rng; + use crate::distributions::{Standard}; #[test] fn test_integers() { - let mut rng = ::test::rng(806); + let mut rng = crate::test::rng(806); rng.sample::<isize, _>(Standard); rng.sample::<i8, _>(Standard); rng.sample::<i16, _>(Standard); rng.sample::<i32, _>(Standard); rng.sample::<i64, _>(Standard); - #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] + #[cfg(not(target_os = "emscripten"))] rng.sample::<i128, _>(Standard); rng.sample::<usize, _>(Standard); @@ -155,7 +178,7 @@ mod tests { rng.sample::<u16, _>(Standard); rng.sample::<u32, _>(Standard); rng.sample::<u64, _>(Standard); - #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] + #[cfg(not(target_os = "emscripten"))] rng.sample::<u128, _>(Standard); } } diff --git a/rand/src/distributions/mod.rs b/rand/src/distributions/mod.rs index 5e879cb..02ece6f 100644 --- a/rand/src/distributions/mod.rs +++ b/rand/src/distributions/mod.rs @@ -7,12 +7,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Generating random samples from probability distributions. +//! Generating random samples from probability distributions //! //! This module is the home of the [`Distribution`] trait and several of its //! implementations. It is the workhorse behind some of the convenient -//! functionality of the [`Rng`] trait, including [`gen`], [`gen_range`] and -//! of course [`sample`]. +//! functionality of the [`Rng`] trait, e.g. [`Rng::gen`], [`Rng::gen_range`] and +//! of course [`Rng::sample`]. //! //! Abstractly, a [probability distribution] describes the probability of //! occurance of each value in its sample space. @@ -40,8 +40,14 @@ //! possible to generate type `T` with [`Rng::gen()`], and by extension also //! with the [`random()`] function. //! +//! ## Random characters +//! +//! [`Alphanumeric`] is a simple distribution to sample random letters and +//! numbers of the `char` type; in contrast [`Standard`] may sample any valid +//! `char`. +//! //! -//! # Distribution to sample from a `Uniform` range +//! # Uniform numeric ranges //! //! The [`Uniform`] distribution is more flexible than [`Standard`], but also //! more specialised: it supports fewer target types, but allows the sample @@ -56,158 +62,84 @@ //! //! User types `T` may also implement `Distribution<T>` for [`Uniform`], //! although this is less straightforward than for [`Standard`] (see the -//! documentation in the [`uniform` module]. Doing so enables generation of +//! documentation in the [`uniform`] module. Doing so enables generation of //! values of type `T` with [`Rng::gen_range`]. //! -//! -//! # Other distributions +//! ## Open and half-open ranges //! //! There are surprisingly many ways to uniformly generate random floats. A //! range between 0 and 1 is standard, but the exact bounds (open vs closed) //! and accuracy differ. In addition to the [`Standard`] distribution Rand offers -//! [`Open01`] and [`OpenClosed01`]. See [Floating point implementation] for -//! more details. -//! -//! [`Alphanumeric`] is a simple distribution to sample random letters and -//! numbers of the `char` type; in contrast [`Standard`] may sample any valid -//! `char`. -//! -//! [`WeightedIndex`] can be used to do weighted sampling from a set of items, -//! such as from an array. -//! -//! # Non-uniform probability distributions -//! -//! Rand currently provides the following probability distributions: -//! -//! - Related to real-valued quantities that grow linearly -//! (e.g. errors, offsets): -//! - [`Normal`] distribution, and [`StandardNormal`] as a primitive -//! - [`Cauchy`] distribution -//! - Related to Bernoulli trials (yes/no events, with a given probability): -//! - [`Binomial`] distribution -//! - [`Bernoulli`] distribution, similar to [`Rng::gen_bool`]. -//! - Related to positive real-valued quantities that grow exponentially -//! (e.g. prices, incomes, populations): -//! - [`LogNormal`] distribution -//! - Related to the occurrence of independent events at a given rate: -//! - [`Pareto`] distribution -//! - [`Poisson`] distribution -//! - [`Exp`]onential distribution, and [`Exp1`] as a primitive -//! - [`Weibull`] distribution -//! - Gamma and derived distributions: -//! - [`Gamma`] distribution -//! - [`ChiSquared`] distribution -//! - [`StudentT`] distribution -//! - [`FisherF`] distribution -//! - Triangular distribution: -//! - [`Beta`] distribution -//! - [`Triangular`] distribution -//! - Multivariate probability distributions -//! - [`Dirichlet`] distribution -//! - [`UnitSphereSurface`] distribution -//! - [`UnitCircle`] distribution +//! [`Open01`] and [`OpenClosed01`]. See "Floating point implementation" section of +//! [`Standard`] documentation for more details. //! -//! # Examples +//! # Non-uniform sampling //! -//! Sampling from a distribution: +//! Sampling a simple true/false outcome with a given probability has a name: +//! the [`Bernoulli`] distribution (this is used by [`Rng::gen_bool`]). //! -//! ``` -//! use rand::{thread_rng, Rng}; -//! use rand::distributions::Exp; +//! For weighted sampling from a sequence of discrete values, use the +//! [`weighted`] module. //! -//! let exp = Exp::new(2.0); -//! let v = thread_rng().sample(exp); -//! println!("{} is from an Exp(2) distribution", v); -//! ``` -//! -//! Implementing the [`Standard`] distribution for a user type: -//! -//! ``` -//! # #![allow(dead_code)] -//! use rand::Rng; -//! use rand::distributions::{Distribution, Standard}; -//! -//! struct MyF32 { -//! x: f32, -//! } -//! -//! impl Distribution<MyF32> for Standard { -//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 { -//! MyF32 { x: rng.gen() } -//! } -//! } -//! ``` +//! This crate no longer includes other non-uniform distributions; instead +//! it is recommended that you use either [`rand_distr`] or [`statrs`]. //! //! //! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution -//! [`Distribution`]: trait.Distribution.html -//! [`gen_range`]: ../trait.Rng.html#method.gen_range -//! [`gen`]: ../trait.Rng.html#method.gen -//! [`sample`]: ../trait.Rng.html#method.sample -//! [`new_inclusive`]: struct.Uniform.html#method.new_inclusive -//! [`random()`]: ../fn.random.html -//! [`Rng::gen_bool`]: ../trait.Rng.html#method.gen_bool -//! [`Rng::gen_range`]: ../trait.Rng.html#method.gen_range -//! [`Rng::gen()`]: ../trait.Rng.html#method.gen -//! [`Rng`]: ../trait.Rng.html -//! [`uniform` module]: uniform/index.html -//! [Floating point implementation]: struct.Standard.html#floating-point-implementation -// distributions -//! [`Alphanumeric`]: struct.Alphanumeric.html -//! [`Bernoulli`]: struct.Bernoulli.html -//! [`Beta`]: struct.Beta.html -//! [`Binomial`]: struct.Binomial.html -//! [`Cauchy`]: struct.Cauchy.html -//! [`ChiSquared`]: struct.ChiSquared.html -//! [`Dirichlet`]: struct.Dirichlet.html -//! [`Exp`]: struct.Exp.html -//! [`Exp1`]: struct.Exp1.html -//! [`FisherF`]: struct.FisherF.html -//! [`Gamma`]: struct.Gamma.html -//! [`LogNormal`]: struct.LogNormal.html -//! [`Normal`]: struct.Normal.html -//! [`Open01`]: struct.Open01.html -//! [`OpenClosed01`]: struct.OpenClosed01.html -//! [`Pareto`]: struct.Pareto.html -//! [`Poisson`]: struct.Poisson.html -//! [`Standard`]: struct.Standard.html -//! [`StandardNormal`]: struct.StandardNormal.html -//! [`StudentT`]: struct.StudentT.html -//! [`Triangular`]: struct.Triangular.html -//! [`Uniform`]: struct.Uniform.html -//! [`Uniform::new`]: struct.Uniform.html#method.new -//! [`Uniform::new_inclusive`]: struct.Uniform.html#method.new_inclusive -//! [`UnitSphereSurface`]: struct.UnitSphereSurface.html -//! [`UnitCircle`]: struct.UnitCircle.html -//! [`Weibull`]: struct.Weibull.html -//! [`WeightedIndex`]: struct.WeightedIndex.html +//! [`rand_distr`]: https://crates.io/crates/rand_distr +//! [`statrs`]: https://crates.io/crates/statrs + +//! [`Alphanumeric`]: distributions::Alphanumeric +//! [`Bernoulli`]: distributions::Bernoulli +//! [`Open01`]: distributions::Open01 +//! [`OpenClosed01`]: distributions::OpenClosed01 +//! [`Standard`]: distributions::Standard +//! [`Uniform`]: distributions::Uniform +//! [`Uniform::new`]: distributions::Uniform::new +//! [`Uniform::new_inclusive`]: distributions::Uniform::new_inclusive +//! [`weighted`]: distributions::weighted +//! [`rand_distr`]: https://crates.io/crates/rand_distr +//! [`statrs`]: https://crates.io/crates/statrs -#[cfg(any(rustc_1_26, features="nightly"))] use core::iter; -use Rng; +use crate::Rng; pub use self::other::Alphanumeric; #[doc(inline)] pub use self::uniform::Uniform; pub use self::float::{OpenClosed01, Open01}; -pub use self::bernoulli::Bernoulli; +pub use self::bernoulli::{Bernoulli, BernoulliError}; #[cfg(feature="alloc")] pub use self::weighted::{WeightedIndex, WeightedError}; + +// The following are all deprecated after being moved to rand_distr +#[allow(deprecated)] #[cfg(feature="std")] pub use self::unit_sphere::UnitSphereSurface; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::unit_circle::UnitCircle; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::gamma::{Gamma, ChiSquared, FisherF, StudentT, Beta}; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::normal::{Normal, LogNormal, StandardNormal}; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::exponential::{Exp, Exp1}; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::pareto::Pareto; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::poisson::Poisson; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::binomial::Binomial; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::cauchy::Cauchy; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::dirichlet::Dirichlet; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::triangular::Triangular; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::weibull::Weibull; pub mod uniform; mod bernoulli; -#[cfg(feature="alloc")] mod weighted; +#[cfg(feature="alloc")] pub mod weighted; #[cfg(feature="std")] mod unit_sphere; #[cfg(feature="std")] mod unit_circle; #[cfg(feature="std")] mod gamma; @@ -222,6 +154,9 @@ mod bernoulli; #[cfg(feature="std")] mod weibull; mod float; +#[doc(hidden)] pub mod hidden_export { + pub use super::float::IntoFloat; // used by rand_distr +} mod integer; mod other; mod utils; @@ -238,8 +173,7 @@ mod utils; /// advantage of not needing to consider thread safety, and for most /// distributions efficient state-less sampling algorithms are available. /// -/// [`Rng`]: ../trait.Rng.html -/// [`sample_iter`]: trait.Distribution.html#method.sample_iter +/// [`sample_iter`]: Distribution::method.sample_iter pub trait Distribution<T> { /// Generate a random value of `T`, using `rng` as the source of randomness. fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T; @@ -247,33 +181,39 @@ pub trait Distribution<T> { /// Create an iterator that generates random values of `T`, using `rng` as /// the source of randomness. /// + /// Note that this function takes `self` by value. This works since + /// `Distribution<T>` is impl'd for `&D` where `D: Distribution<T>`, + /// however borrowing is not automatic hence `distr.sample_iter(...)` may + /// need to be replaced with `(&distr).sample_iter(...)` to borrow or + /// `(&*distr).sample_iter(...)` to reborrow an existing reference. + /// /// # Example /// /// ``` /// use rand::thread_rng; /// use rand::distributions::{Distribution, Alphanumeric, Uniform, Standard}; /// - /// let mut rng = thread_rng(); + /// let rng = thread_rng(); /// /// // Vec of 16 x f32: - /// let v: Vec<f32> = Standard.sample_iter(&mut rng).take(16).collect(); + /// let v: Vec<f32> = Standard.sample_iter(rng).take(16).collect(); /// /// // String: - /// let s: String = Alphanumeric.sample_iter(&mut rng).take(7).collect(); + /// let s: String = Alphanumeric.sample_iter(rng).take(7).collect(); /// /// // Dice-rolling: /// let die_range = Uniform::new_inclusive(1, 6); - /// let mut roll_die = die_range.sample_iter(&mut rng); + /// let mut roll_die = die_range.sample_iter(rng); /// while roll_die.next().unwrap() != 6 { /// println!("Not a 6; rolling again!"); /// } /// ``` - fn sample_iter<'a, R>(&'a self, rng: &'a mut R) -> DistIter<'a, Self, R, T> - where Self: Sized, R: Rng + fn sample_iter<R>(self, rng: R) -> DistIter<Self, R, T> + where R: Rng, Self: Sized { DistIter { distr: self, - rng: rng, + rng, phantom: ::core::marker::PhantomData, } } @@ -292,23 +232,25 @@ impl<'a, T, D: Distribution<T>> Distribution<T> for &'a D { /// This `struct` is created by the [`sample_iter`] method on [`Distribution`]. /// See its documentation for more. /// -/// [`Distribution`]: trait.Distribution.html -/// [`sample_iter`]: trait.Distribution.html#method.sample_iter +/// [`sample_iter`]: Distribution::sample_iter #[derive(Debug)] -pub struct DistIter<'a, D: 'a, R: 'a, T> { - distr: &'a D, - rng: &'a mut R, +pub struct DistIter<D, R, T> { + distr: D, + rng: R, phantom: ::core::marker::PhantomData<T>, } -impl<'a, D, R, T> Iterator for DistIter<'a, D, R, T> - where D: Distribution<T>, R: Rng + 'a +impl<D, R, T> Iterator for DistIter<D, R, T> + where D: Distribution<T>, R: Rng { type Item = T; #[inline(always)] fn next(&mut self) -> Option<T> { - Some(self.distr.sample(self.rng)) + // Here, self.rng may be a reference, but we must take &mut anyway. + // Even if sample could take an R: Rng by value, we would need to do this + // since Rng is not copyable and we cannot enforce that this is "reborrowable". + Some(self.distr.sample(&mut self.rng)) } fn size_hint(&self) -> (usize, Option<usize>) { @@ -316,20 +258,19 @@ impl<'a, D, R, T> Iterator for DistIter<'a, D, R, T> } } -#[cfg(rustc_1_26)] -impl<'a, D, R, T> iter::FusedIterator for DistIter<'a, D, R, T> - where D: Distribution<T>, R: Rng + 'a {} +impl<D, R, T> iter::FusedIterator for DistIter<D, R, T> + where D: Distribution<T>, R: Rng {} #[cfg(features = "nightly")] -impl<'a, D, R, T> iter::TrustedLen for DistIter<'a, D, R, T> - where D: Distribution<T>, R: Rng + 'a {} +impl<D, R, T> iter::TrustedLen for DistIter<D, R, T> + where D: Distribution<T>, R: Rng {} /// A generic random value distribution, implemented for many primitive types. /// Usually generates values with a numerically uniform distribution, and with a /// range appropriate to the type. /// -/// ## Built-in Implementations +/// ## Provided implementations /// /// Assuming the provided `Rng` is well-behaved, these implementations /// generate values with the following ranges and distributions: @@ -346,20 +287,42 @@ impl<'a, D, R, T> iter::TrustedLen for DistIter<'a, D, R, T> /// * Wrapping integers (`Wrapping<T>`), besides the type identical to their /// normal integer variants. /// -/// The following aggregate types also implement the distribution `Standard` as -/// long as their component types implement it: +/// The `Standard` distribution also supports generation of the following +/// compound types where all component types are supported: /// -/// * Tuples and arrays: Each element of the tuple or array is generated -/// independently, using the `Standard` distribution recursively. -/// * `Option<T>` where `Standard` is implemented for `T`: Returns `None` with -/// probability 0.5; otherwise generates a random `x: T` and returns `Some(x)`. +/// * Tuples (up to 12 elements): each element is generated sequentially. +/// * Arrays (up to 32 elements): each element is generated sequentially; +/// see also [`Rng::fill`] which supports arbitrary array length for integer +/// types and tends to be faster for `u32` and smaller types. +/// * `Option<T>` first generates a `bool`, and if true generates and returns +/// `Some(value)` where `value: T`, otherwise returning `None`. /// -/// # Example +/// ## Custom implementations +/// +/// The [`Standard`] distribution may be implemented for user types as follows: +/// +/// ``` +/// # #![allow(dead_code)] +/// use rand::Rng; +/// use rand::distributions::{Distribution, Standard}; +/// +/// struct MyF32 { +/// x: f32, +/// } +/// +/// impl Distribution<MyF32> for Standard { +/// fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 { +/// MyF32 { x: rng.gen() } +/// } +/// } +/// ``` +/// +/// ## Example usage /// ``` /// use rand::prelude::*; /// use rand::distributions::Standard; /// -/// let val: f32 = SmallRng::from_entropy().sample(Standard); +/// let val: f32 = StdRng::from_entropy().sample(Standard); /// println!("f32 from [0, 1): {}", val); /// ``` /// @@ -379,243 +342,40 @@ impl<'a, D, R, T> iter::TrustedLen for DistIter<'a, D, R, T> /// faster on some architectures (on modern Intel CPUs all methods have /// approximately equal performance). /// -/// [`Open01`]: struct.Open01.html -/// [`OpenClosed01`]: struct.OpenClosed01.html -/// [`Uniform`]: uniform/struct.Uniform.html +/// [`Uniform`]: uniform::Uniform #[derive(Clone, Copy, Debug)] pub struct Standard; -/// A value with a particular weight for use with `WeightedChoice`. -#[deprecated(since="0.6.0", note="use WeightedIndex instead")] -#[allow(deprecated)] -#[derive(Copy, Clone, Debug)] -pub struct Weighted<T> { - /// The numerical weight of this item - pub weight: u32, - /// The actual item which is being weighted - pub item: T, -} - -/// A distribution that selects from a finite collection of weighted items. -/// -/// Deprecated: use [`WeightedIndex`] instead. -/// -/// [`WeightedIndex`]: struct.WeightedIndex.html -#[deprecated(since="0.6.0", note="use WeightedIndex instead")] -#[allow(deprecated)] -#[derive(Debug)] -pub struct WeightedChoice<'a, T:'a> { - items: &'a mut [Weighted<T>], - weight_range: Uniform<u32>, -} - -#[deprecated(since="0.6.0", note="use WeightedIndex instead")] -#[allow(deprecated)] -impl<'a, T: Clone> WeightedChoice<'a, T> { - /// Create a new `WeightedChoice`. - /// - /// Panics if: - /// - /// - `items` is empty - /// - the total weight is 0 - /// - the total weight is larger than a `u32` can contain. - pub fn new(items: &'a mut [Weighted<T>]) -> WeightedChoice<'a, T> { - // strictly speaking, this is subsumed by the total weight == 0 case - assert!(!items.is_empty(), "WeightedChoice::new called with no items"); - - let mut running_total: u32 = 0; - - // we convert the list from individual weights to cumulative - // weights so we can binary search. This *could* drop elements - // with weight == 0 as an optimisation. - for item in items.iter_mut() { - running_total = match running_total.checked_add(item.weight) { - Some(n) => n, - None => panic!("WeightedChoice::new called with a total weight \ - larger than a u32 can contain") - }; - - item.weight = running_total; - } - assert!(running_total != 0, "WeightedChoice::new called with a total weight of 0"); - - WeightedChoice { - items, - // we're likely to be generating numbers in this range - // relatively often, so might as well cache it - weight_range: Uniform::new(0, running_total) - } - } -} - -#[deprecated(since="0.6.0", note="use WeightedIndex instead")] -#[allow(deprecated)] -impl<'a, T: Clone> Distribution<T> for WeightedChoice<'a, T> { - fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T { - // we want to find the first element that has cumulative - // weight > sample_weight, which we do by binary since the - // cumulative weights of self.items are sorted. - - // choose a weight in [0, total_weight) - let sample_weight = self.weight_range.sample(rng); - - // short circuit when it's the first item - if sample_weight < self.items[0].weight { - return self.items[0].item.clone(); - } - - let mut idx = 0; - let mut modifier = self.items.len(); - - // now we know that every possibility has an element to the - // left, so we can just search for the last element that has - // cumulative weight <= sample_weight, then the next one will - // be "it". (Note that this greatest element will never be the - // last element of the vector, since sample_weight is chosen - // in [0, total_weight) and the cumulative weight of the last - // one is exactly the total weight.) - while modifier > 1 { - let i = idx + modifier / 2; - if self.items[i].weight <= sample_weight { - // we're small, so look to the right, but allow this - // exact element still. - idx = i; - // we need the `/ 2` to round up otherwise we'll drop - // the trailing elements when `modifier` is odd. - modifier += 1; - } else { - // otherwise we're too big, so go left. (i.e. do - // nothing) - } - modifier /= 2; - } - self.items[idx + 1].item.clone() - } -} - -#[cfg(test)] +#[cfg(all(test, feature = "std"))] mod tests { - use rngs::mock::StepRng; - #[allow(deprecated)] - use super::{WeightedChoice, Weighted, Distribution}; + use crate::Rng; + use super::{Distribution, Uniform}; #[test] - #[allow(deprecated)] - fn test_weighted_choice() { - // this makes assumptions about the internal implementation of - // WeightedChoice. It may fail when the implementation in - // `distributions::uniform::UniformInt` changes. - - macro_rules! t { - ($items:expr, $expected:expr) => {{ - let mut items = $items; - let mut total_weight = 0; - for item in &items { total_weight += item.weight; } - - let wc = WeightedChoice::new(&mut items); - let expected = $expected; - - // Use extremely large steps between the random numbers, because - // we test with small ranges and `UniformInt` is designed to prefer - // the most significant bits. - let mut rng = StepRng::new(0, !0 / (total_weight as u64)); - - for &val in expected.iter() { - assert_eq!(wc.sample(&mut rng), val) - } - }} - } - - t!([Weighted { weight: 1, item: 10}], [10]); - - // skip some - t!([Weighted { weight: 0, item: 20}, - Weighted { weight: 2, item: 21}, - Weighted { weight: 0, item: 22}, - Weighted { weight: 1, item: 23}], - [21, 21, 23]); - - // different weights - t!([Weighted { weight: 4, item: 30}, - Weighted { weight: 3, item: 31}], - [30, 31, 30, 31, 30, 31, 30]); - - // check that we're binary searching - // correctly with some vectors of odd - // length. - t!([Weighted { weight: 1, item: 40}, - Weighted { weight: 1, item: 41}, - Weighted { weight: 1, item: 42}, - Weighted { weight: 1, item: 43}, - Weighted { weight: 1, item: 44}], - [40, 41, 42, 43, 44]); - t!([Weighted { weight: 1, item: 50}, - Weighted { weight: 1, item: 51}, - Weighted { weight: 1, item: 52}, - Weighted { weight: 1, item: 53}, - Weighted { weight: 1, item: 54}, - Weighted { weight: 1, item: 55}, - Weighted { weight: 1, item: 56}], - [50, 54, 51, 55, 52, 56, 53]); - } - - #[test] - #[allow(deprecated)] - fn test_weighted_clone_initialization() { - let initial : Weighted<u32> = Weighted {weight: 1, item: 1}; - let clone = initial.clone(); - assert_eq!(initial.weight, clone.weight); - assert_eq!(initial.item, clone.item); - } - - #[test] #[should_panic] - #[allow(deprecated)] - fn test_weighted_clone_change_weight() { - let initial : Weighted<u32> = Weighted {weight: 1, item: 1}; - let mut clone = initial.clone(); - clone.weight = 5; - assert_eq!(initial.weight, clone.weight); - } - - #[test] #[should_panic] - #[allow(deprecated)] - fn test_weighted_clone_change_item() { - let initial : Weighted<u32> = Weighted {weight: 1, item: 1}; - let mut clone = initial.clone(); - clone.item = 5; - assert_eq!(initial.item, clone.item); - - } - - #[test] #[should_panic] - #[allow(deprecated)] - fn test_weighted_choice_no_items() { - WeightedChoice::<isize>::new(&mut []); - } - #[test] #[should_panic] - #[allow(deprecated)] - fn test_weighted_choice_zero_weight() { - WeightedChoice::new(&mut [Weighted { weight: 0, item: 0}, - Weighted { weight: 0, item: 1}]); - } - #[test] #[should_panic] - #[allow(deprecated)] - fn test_weighted_choice_weight_overflows() { - let x = ::core::u32::MAX / 2; // x + x + 2 is the overflow - WeightedChoice::new(&mut [Weighted { weight: x, item: 0 }, - Weighted { weight: 1, item: 1 }, - Weighted { weight: x, item: 2 }, - Weighted { weight: 1, item: 3 }]); - } - - #[cfg(feature="std")] - #[test] fn test_distributions_iter() { - use distributions::Normal; - let mut rng = ::test::rng(210); - let distr = Normal::new(10.0, 10.0); - let results: Vec<_> = distr.sample_iter(&mut rng).take(100).collect(); + use crate::distributions::Open01; + let mut rng = crate::test::rng(210); + let distr = Open01; + let results: Vec<f32> = distr.sample_iter(&mut rng).take(100).collect(); println!("{:?}", results); } + + #[test] + fn test_make_an_iter() { + fn ten_dice_rolls_other_than_five<'a, R: Rng>(rng: &'a mut R) -> impl Iterator<Item = i32> + 'a { + Uniform::new_inclusive(1, 6) + .sample_iter(rng) + .filter(|x| *x != 5) + .take(10) + } + + let mut rng = crate::test::rng(211); + let mut count = 0; + for val in ten_dice_rolls_other_than_five(&mut rng) { + assert!(val >= 1 && val <= 6 && val != 5); + count += 1; + } + assert_eq!(count, 10); + } } diff --git a/rand/src/distributions/normal.rs b/rand/src/distributions/normal.rs index b8d632e..7808baf 100644 --- a/rand/src/distributions/normal.rs +++ b/rand/src/distributions/normal.rs @@ -8,10 +8,11 @@ // except according to those terms. //! The normal and derived distributions. +#![allow(deprecated)] -use Rng; -use distributions::{ziggurat_tables, Distribution, Open01}; -use distributions::utils::ziggurat; +use crate::Rng; +use crate::distributions::{ziggurat_tables, Distribution, Open01}; +use crate::distributions::utils::ziggurat; /// Samples floating-point numbers according to the normal distribution /// `N(0, 1)` (a.k.a. a standard normal, or Gaussian). This is equivalent to @@ -25,15 +26,7 @@ use distributions::utils::ziggurat; /// Generate Normal Random Samples*]( /// https://www.doornik.com/research/ziggurat.pdf). /// Nuffield College, Oxford -/// -/// # Example -/// ``` -/// use rand::prelude::*; -/// use rand::distributions::StandardNormal; -/// -/// let val: f64 = SmallRng::from_entropy().sample(StandardNormal); -/// println!("{}", val); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct StandardNormal; @@ -80,18 +73,8 @@ impl Distribution<f64> for StandardNormal { /// Note that [`StandardNormal`] is an optimised implementation for mean 0, and /// standard deviation 1. /// -/// # Example -/// -/// ``` -/// use rand::distributions::{Normal, Distribution}; -/// -/// // mean 2, standard deviation 3 -/// let normal = Normal::new(2.0, 3.0); -/// let v = normal.sample(&mut rand::thread_rng()); -/// println!("{} is from a N(2, 9) distribution", v) -/// ``` -/// -/// [`StandardNormal`]: struct.StandardNormal.html +/// [`StandardNormal`]: crate::distributions::StandardNormal +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Normal { mean: f64, @@ -126,17 +109,7 @@ impl Distribution<f64> for Normal { /// /// If `X` is log-normal distributed, then `ln(X)` is `N(mean, std_dev**2)` /// distributed. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{LogNormal, Distribution}; -/// -/// // mean 2, standard deviation 3 -/// let log_normal = LogNormal::new(2.0, 3.0); -/// let v = log_normal.sample(&mut rand::thread_rng()); -/// println!("{} is from an ln N(2, 9) distribution", v) -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct LogNormal { norm: Normal @@ -163,13 +136,13 @@ impl Distribution<f64> for LogNormal { #[cfg(test)] mod tests { - use distributions::Distribution; + use crate::distributions::Distribution; use super::{Normal, LogNormal}; #[test] fn test_normal() { let norm = Normal::new(10.0, 10.0); - let mut rng = ::test::rng(210); + let mut rng = crate::test::rng(210); for _ in 0..1000 { norm.sample(&mut rng); } @@ -184,7 +157,7 @@ mod tests { #[test] fn test_log_normal() { let lnorm = LogNormal::new(10.0, 10.0); - let mut rng = ::test::rng(211); + let mut rng = crate::test::rng(211); for _ in 0..1000 { lnorm.sample(&mut rng); } diff --git a/rand/src/distributions/other.rs b/rand/src/distributions/other.rs index 2295f79..6ec0473 100644 --- a/rand/src/distributions/other.rs +++ b/rand/src/distributions/other.rs @@ -11,8 +11,8 @@ use core::char; use core::num::Wrapping; -use {Rng}; -use distributions::{Distribution, Standard, Uniform}; +use crate::Rng; +use crate::distributions::{Distribution, Standard, Uniform}; // ----- Sampling distributions ----- @@ -116,6 +116,7 @@ macro_rules! tuple_impl { } impl Distribution<()> for Standard { + #[allow(clippy::unused_unit)] #[inline] fn sample<R: Rng + ?Sized>(&self, _: &mut R) -> () { () } } @@ -176,13 +177,13 @@ impl<T> Distribution<Wrapping<T>> for Standard where Standard: Distribution<T> { #[cfg(test)] mod tests { - use {Rng, RngCore, Standard}; - use distributions::Alphanumeric; + use crate::{Rng, RngCore, Standard}; + use crate::distributions::Alphanumeric; #[cfg(all(not(feature="std"), feature="alloc"))] use alloc::string::String; #[test] fn test_misc() { - let rng: &mut RngCore = &mut ::test::rng(820); + let rng: &mut dyn RngCore = &mut crate::test::rng(820); rng.sample::<char, _>(Standard); rng.sample::<bool, _>(Standard); @@ -192,7 +193,7 @@ mod tests { #[test] fn test_chars() { use core::iter; - let mut rng = ::test::rng(805); + let mut rng = crate::test::rng(805); // Test by generating a relatively large number of chars, so we also // take the rejection sampling path. @@ -203,7 +204,7 @@ mod tests { #[test] fn test_alphanumeric() { - let mut rng = ::test::rng(806); + let mut rng = crate::test::rng(806); // Test by generating a relatively large number of chars, so we also // take the rejection sampling path. diff --git a/rand/src/distributions/pareto.rs b/rand/src/distributions/pareto.rs index 744a157..edc9122 100644 --- a/rand/src/distributions/pareto.rs +++ b/rand/src/distributions/pareto.rs @@ -7,20 +7,13 @@ // except according to those terms. //! The Pareto distribution. +#![allow(deprecated)] -use Rng; -use distributions::{Distribution, OpenClosed01}; +use crate::Rng; +use crate::distributions::{Distribution, OpenClosed01}; /// Samples floating-point numbers according to the Pareto distribution -/// -/// # Example -/// ``` -/// use rand::prelude::*; -/// use rand::distributions::Pareto; -/// -/// let val: f64 = SmallRng::from_entropy().sample(Pareto::new(1., 2.)); -/// println!("{}", val); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Pareto { scale: f64, @@ -51,7 +44,7 @@ impl Distribution<f64> for Pareto { #[cfg(test)] mod tests { - use distributions::Distribution; + use crate::distributions::Distribution; use super::Pareto; #[test] @@ -65,7 +58,7 @@ mod tests { let scale = 1.0; let shape = 2.0; let d = Pareto::new(scale, shape); - let mut rng = ::test::rng(1); + let mut rng = crate::test::rng(1); for _ in 0..1000 { let r = d.sample(&mut rng); assert!(r >= scale); diff --git a/rand/src/distributions/poisson.rs b/rand/src/distributions/poisson.rs index 1244caa..9fd6e99 100644 --- a/rand/src/distributions/poisson.rs +++ b/rand/src/distributions/poisson.rs @@ -8,25 +8,17 @@ // except according to those terms. //! The Poisson distribution. +#![allow(deprecated)] -use Rng; -use distributions::{Distribution, Cauchy}; -use distributions::utils::log_gamma; +use crate::Rng; +use crate::distributions::{Distribution, Cauchy}; +use crate::distributions::utils::log_gamma; /// The Poisson distribution `Poisson(lambda)`. /// /// This distribution has a density function: /// `f(k) = lambda^k * exp(-lambda) / k!` for `k >= 0`. -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{Poisson, Distribution}; -/// -/// let poi = Poisson::new(2.0); -/// let v = poi.sample(&mut rand::thread_rng()); -/// println!("{} is from a Poisson(2) distribution", v); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Poisson { lambda: f64, @@ -113,13 +105,14 @@ impl Distribution<u64> for Poisson { #[cfg(test)] mod test { - use distributions::Distribution; + use crate::distributions::Distribution; use super::Poisson; #[test] + #[cfg(not(miri))] // Miri is too slow fn test_poisson_10() { let poisson = Poisson::new(10.0); - let mut rng = ::test::rng(123); + let mut rng = crate::test::rng(123); let mut sum = 0; for _ in 0..1000 { sum += poisson.sample(&mut rng); @@ -130,10 +123,11 @@ mod test { } #[test] + #[cfg(not(miri))] // Miri doesn't support transcendental functions fn test_poisson_15() { // Take the 'high expected values' path let poisson = Poisson::new(15.0); - let mut rng = ::test::rng(123); + let mut rng = crate::test::rng(123); let mut sum = 0; for _ in 0..1000 { sum += poisson.sample(&mut rng); diff --git a/rand/src/distributions/triangular.rs b/rand/src/distributions/triangular.rs index a6eef5c..3e8f8b0 100644 --- a/rand/src/distributions/triangular.rs +++ b/rand/src/distributions/triangular.rs @@ -5,22 +5,15 @@ // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. + //! The triangular distribution. +#![allow(deprecated)] -use Rng; -use distributions::{Distribution, Standard}; +use crate::Rng; +use crate::distributions::{Distribution, Standard}; /// The triangular distribution. -/// -/// # Example -/// -/// ```rust -/// use rand::distributions::{Triangular, Distribution}; -/// -/// let d = Triangular::new(0., 5., 2.5); -/// let v = d.sample(&mut rand::thread_rng()); -/// println!("{} is from a triangular distribution", v); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Triangular { min: f64, @@ -61,7 +54,7 @@ impl Distribution<f64> for Triangular { #[cfg(test)] mod test { - use distributions::Distribution; + use crate::distributions::Distribution; use super::Triangular; #[test] @@ -78,7 +71,7 @@ mod test { #[test] fn test_sample() { let norm = Triangular::new(0., 1., 0.5); - let mut rng = ::test::rng(1); + let mut rng = crate::test::rng(1); for _ in 0..1000 { norm.sample(&mut rng); } diff --git a/rand/src/distributions/uniform.rs b/rand/src/distributions/uniform.rs index ceed77d..8c90f4e 100644 --- a/rand/src/distributions/uniform.rs +++ b/rand/src/distributions/uniform.rs @@ -15,13 +15,13 @@ //! [`Uniform`]. //! //! This distribution is provided with support for several primitive types -//! (all integer and floating-point types) as well as `std::time::Duration`, +//! (all integer and floating-point types) as well as [`std::time::Duration`], //! and supports extension to user-defined types via a type-specific *back-end* //! implementation. //! //! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the //! back-ends supporting sampling from primitive integer and floating-point -//! ranges as well as from `std::time::Duration`; these types do not normally +//! ranges as well as from [`std::time::Duration`]; these types do not normally //! need to be used directly (unless implementing a derived back-end). //! //! # Example usage @@ -100,28 +100,26 @@ //! let x = uniform.sample(&mut thread_rng()); //! ``` //! -//! [`Uniform`]: struct.Uniform.html -//! [`Rng::gen_range`]: ../../trait.Rng.html#method.gen_range -//! [`SampleUniform`]: trait.SampleUniform.html -//! [`UniformSampler`]: trait.UniformSampler.html -//! [`UniformInt`]: struct.UniformInt.html -//! [`UniformFloat`]: struct.UniformFloat.html -//! [`UniformDuration`]: struct.UniformDuration.html -//! [`SampleBorrow::borrow`]: trait.SampleBorrow.html#method.borrow +//! [`SampleUniform`]: crate::distributions::uniform::SampleUniform +//! [`UniformSampler`]: crate::distributions::uniform::UniformSampler +//! [`UniformInt`]: crate::distributions::uniform::UniformInt +//! [`UniformFloat`]: crate::distributions::uniform::UniformFloat +//! [`UniformDuration`]: crate::distributions::uniform::UniformDuration +//! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow #[cfg(feature = "std")] use std::time::Duration; -#[cfg(all(not(feature = "std"), rustc_1_25))] +#[cfg(not(feature = "std"))] use core::time::Duration; -use Rng; -use distributions::Distribution; -use distributions::float::IntoFloat; -use distributions::utils::{WideningMultiply, FloatSIMDUtils, FloatAsSIMD, BoolAsSIMD}; +use crate::Rng; +use crate::distributions::Distribution; +use crate::distributions::float::IntoFloat; +use crate::distributions::utils::{WideningMultiply, FloatSIMDUtils, FloatAsSIMD, BoolAsSIMD}; #[cfg(not(feature = "std"))] #[allow(unused_imports)] // rustc doesn't detect that this is actually used -use distributions::utils::Float; +use crate::distributions::utils::Float; #[cfg(feature="simd_support")] @@ -165,10 +163,8 @@ use packed_simd::*; /// } /// ``` /// -/// [`Uniform::new`]: struct.Uniform.html#method.new -/// [`Uniform::new_inclusive`]: struct.Uniform.html#method.new_inclusive -/// [`new`]: struct.Uniform.html#method.new -/// [`new_inclusive`]: struct.Uniform.html#method.new_inclusive +/// [`new`]: Uniform::new +/// [`new_inclusive`]: Uniform::new_inclusive #[derive(Clone, Copy, Debug)] pub struct Uniform<X: SampleUniform> { inner: X::Sampler, @@ -206,9 +202,7 @@ impl<X: SampleUniform> Distribution<X> for Uniform<X> { /// See the [module documentation] on how to implement [`Uniform`] range /// sampling for a custom type. /// -/// [`UniformSampler`]: trait.UniformSampler.html -/// [module documentation]: index.html -/// [`Uniform`]: struct.Uniform.html +/// [module documentation]: crate::distributions::uniform pub trait SampleUniform: Sized { /// The `UniformSampler` implementation supporting type `X`. type Sampler: UniformSampler<X = Self>; @@ -222,9 +216,8 @@ pub trait SampleUniform: Sized { /// Implementation of [`sample_single`] is optional, and is only useful when /// the implementation can be faster than `Self::new(low, high).sample(rng)`. /// -/// [module documentation]: index.html -/// [`Uniform`]: struct.Uniform.html -/// [`sample_single`]: trait.UniformSampler.html#method.sample_single +/// [module documentation]: crate::distributions::uniform +/// [`sample_single`]: UniformSampler::sample_single pub trait UniformSampler: Sized { /// The type sampled by this implementation. type X; @@ -253,14 +246,11 @@ pub trait UniformSampler: Sized { /// Sample a single value uniformly from a range with inclusive lower bound /// and exclusive upper bound `[low, high)`. /// - /// Usually users should not call this directly but instead use - /// `Uniform::sample_single`, which asserts that `low < high` before calling - /// this. - /// - /// Via this method, implementations can provide a method optimized for - /// sampling only a single value from the specified range. The default - /// implementation simply calls `UniformSampler::new` then `sample` on the - /// result. + /// By default this is implemented using + /// `UniformSampler::new(low, high).sample(rng)`. However, for some types + /// more optimal implementations for single usage may be provided via this + /// method (which is the case for integers and floats). + /// Results may not be identical. fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X where B1: SampleBorrow<Self::X> + Sized, @@ -277,7 +267,6 @@ impl<X: SampleUniform> From<::core::ops::Range<X>> for Uniform<X> { } } -#[cfg(rustc_1_27)] impl<X: SampleUniform> From<::core::ops::RangeInclusive<X>> for Uniform<X> { fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> { Uniform::new_inclusive(r.start(), r.end()) @@ -288,11 +277,11 @@ impl<X: SampleUniform> From<::core::ops::RangeInclusive<X>> for Uniform<X> { /// only for SampleUniform and references to SampleUniform in /// order to resolve ambiguity issues. /// -/// [`Borrow`]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html +/// [`Borrow`]: std::borrow::Borrow pub trait SampleBorrow<Borrowed> { /// Immutably borrows from an owned value. See [`Borrow::borrow`] /// - /// [`Borrow::borrow`]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html#tymethod.borrow + /// [`Borrow::borrow`]: std::borrow::Borrow::borrow fn borrow(&self) -> &Borrowed; } impl<Borrowed> SampleBorrow<Borrowed> for Borrowed where Borrowed: SampleUniform { @@ -316,48 +305,42 @@ impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed where Borrowed: Sampl /// /// # Implementation notes /// +/// For simplicity, we use the same generic struct `UniformInt<X>` for all +/// integer types `X`. This gives us only one field type, `X`; to store unsigned +/// values of this size, we take use the fact that these conversions are no-ops. +/// /// For a closed range, the number of possible numbers we should generate is -/// `range = (high - low + 1)`. It is not possible to end up with a uniform -/// distribution if we map *all* the random integers that can be generated to -/// this range. We have to map integers from a `zone` that is a multiple of the -/// range. The rest of the integers, that cause a bias, are rejected. +/// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of +/// our sample space, `zone`, is a multiple of `range`; other values must be +/// rejected (by replacing with a new random sample). /// -/// The problem with `range` is that to cover the full range of the type, it has -/// to store `unsigned_max + 1`, which can't be represented. But if the range -/// covers the full range of the type, no modulus is needed. A range of size 0 -/// can't exist, so we use that to represent this special case. Wrapping -/// arithmetic even makes representing `unsigned_max + 1` as 0 simple. +/// As a special case, we use `range = 0` to represent the full range of the +/// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`). /// -/// We don't calculate `zone` directly, but first calculate the number of -/// integers to reject. To handle `unsigned_max + 1` not fitting in the type, -/// we use: -/// `ints_to_reject = (unsigned_max + 1) % range;` -/// `ints_to_reject = (unsigned_max - range + 1) % range;` +/// The optimum `zone` is the largest product of `range` which fits in our +/// (unsigned) target type. We calculate this by calculating how many numbers we +/// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large) +/// product of `range` will suffice, thus in `sample_single` we multiply by a +/// power of 2 via bit-shifting (faster but may cause more rejections). /// -/// The smallest integer PRNGs generate is `u32`. That is why for small integer -/// sizes (`i8`/`u8` and `i16`/`u16`) there is an optimization: don't pick the -/// largest zone that can fit in the small type, but pick the largest zone that -/// can fit in an `u32`. `ints_to_reject` is always less than half the size of -/// the small integer. This means the first bit of `zone` is always 1, and so -/// are all the other preceding bits of a larger integer. The easiest way to -/// grow the `zone` for the larger type is to simply sign extend it. +/// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we +/// use `u32` for our `zone` and samples (because it's not slower and because +/// it reduces the chance of having to reject a sample). In this case we cannot +/// store `zone` in the target type since it is too large, however we know +/// `ints_to_reject < range <= $unsigned::MAX`. /// /// An alternative to using a modulus is widening multiply: After a widening /// multiply by `range`, the result is in the high word. Then comparing the low /// word against `zone` makes sure our distribution is uniform. -/// -/// [`UniformSampler`]: trait.UniformSampler.html -/// [`Uniform`]: struct.Uniform.html #[derive(Clone, Copy, Debug)] pub struct UniformInt<X> { low: X, range: X, - zone: X, + z: X, // either ints_to_reject or zone depending on implementation } macro_rules! uniform_int_impl { - ($ty:ty, $signed:ty, $unsigned:ident, - $i_large:ident, $u_large:ident) => { + ($ty:ty, $unsigned:ident, $u_large:ident) => { impl SampleUniform for $ty { type Sampler = UniformInt<$ty>; } @@ -392,34 +375,30 @@ macro_rules! uniform_int_impl { let high = *high_b.borrow(); assert!(low <= high, "Uniform::new_inclusive called with `low > high`"); - let unsigned_max = ::core::$unsigned::MAX; + let unsigned_max = ::core::$u_large::MAX; let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned; let ints_to_reject = if range > 0 { + let range = $u_large::from(range); (unsigned_max - range + 1) % range } else { 0 }; - let zone = unsigned_max - ints_to_reject; UniformInt { low: low, // These are really $unsigned values, but store as $ty: range: range as $ty, - zone: zone as $ty + z: ints_to_reject as $unsigned as $ty } } fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { let range = self.range as $unsigned as $u_large; if range > 0 { - // Grow `zone` to fit a type of at least 32 bits, by - // sign-extending it (the first bit is always 1, so are all - // the preceding bits of the larger type). - // For types that already have the right size, all the - // casting is a no-op. - let zone = self.zone as $signed as $i_large as $u_large; + let unsigned_max = ::core::$u_large::MAX; + let zone = unsigned_max - (self.z as $unsigned as $u_large); loop { let v: $u_large = rng.gen(); let (hi, lo) = v.wmul(range); @@ -441,7 +420,7 @@ macro_rules! uniform_int_impl { let low = *low_b.borrow(); let high = *high_b.borrow(); assert!(low < high, - "Uniform::sample_single called with low >= high"); + "UniformSampler::sample_single: low >= high"); let range = high.wrapping_sub(low) as $unsigned as $u_large; let zone = if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned { @@ -469,20 +448,20 @@ macro_rules! uniform_int_impl { } } -uniform_int_impl! { i8, i8, u8, i32, u32 } -uniform_int_impl! { i16, i16, u16, i32, u32 } -uniform_int_impl! { i32, i32, u32, i32, u32 } -uniform_int_impl! { i64, i64, u64, i64, u64 } -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] -uniform_int_impl! { i128, i128, u128, u128, u128 } -uniform_int_impl! { isize, isize, usize, isize, usize } -uniform_int_impl! { u8, i8, u8, i32, u32 } -uniform_int_impl! { u16, i16, u16, i32, u32 } -uniform_int_impl! { u32, i32, u32, i32, u32 } -uniform_int_impl! { u64, i64, u64, i64, u64 } -uniform_int_impl! { usize, isize, usize, isize, usize } -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] -uniform_int_impl! { u128, u128, u128, i128, u128 } +uniform_int_impl! { i8, u8, u32 } +uniform_int_impl! { i16, u16, u32 } +uniform_int_impl! { i32, u32, u32 } +uniform_int_impl! { i64, u64, u64 } +#[cfg(not(target_os = "emscripten"))] +uniform_int_impl! { i128, u128, u128 } +uniform_int_impl! { isize, usize, usize } +uniform_int_impl! { u8, u8, u32 } +uniform_int_impl! { u16, u16, u32 } +uniform_int_impl! { u32, u32, u32 } +uniform_int_impl! { u64, u64, u64 } +uniform_int_impl! { usize, usize, usize } +#[cfg(not(target_os = "emscripten"))] +uniform_int_impl! { u128, u128, u128 } #[cfg(all(feature = "simd_support", feature = "nightly"))] macro_rules! uniform_simd_int_impl { @@ -544,13 +523,13 @@ macro_rules! uniform_simd_int_impl { low: low, // These are really $unsigned values, but store as $ty: range: range.cast(), - zone: zone.cast(), + z: zone.cast(), } } fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X { let range: $unsigned = self.range.cast(); - let zone: $unsigned = self.zone.cast(); + let zone: $unsigned = self.z.cast(); // This might seem very slow, generating a whole new // SIMD vector for every sample rejection. For most uses @@ -646,11 +625,9 @@ uniform_simd_int_impl! { /// multiply and addition. Values produced this way have what equals 22 bits of /// random digits for an `f32`, and 52 for an `f64`. /// -/// [`UniformSampler`]: trait.UniformSampler.html -/// [`new`]: trait.UniformSampler.html#tymethod.new -/// [`new_inclusive`]: trait.UniformSampler.html#tymethod.new_inclusive -/// [`Uniform`]: struct.Uniform.html -/// [`Standard`]: ../struct.Standard.html +/// [`new`]: UniformSampler::new +/// [`new_inclusive`]: UniformSampler::new_inclusive +/// [`Standard`]: crate::distributions::Standard #[derive(Clone, Copy, Debug)] pub struct UniformFloat<X> { low: X, @@ -748,7 +725,7 @@ macro_rules! uniform_float_impl { let low = *low_b.borrow(); let high = *high_b.borrow(); assert!(low.all_lt(high), - "Uniform::sample_single called with low >= high"); + "UniformSampler::sample_single: low >= high"); let mut scale = high - low; loop { @@ -799,7 +776,7 @@ macro_rules! uniform_float_impl { let mask = !scale.finite_mask(); if mask.any() { assert!(low.all_finite() && high.all_finite(), - "Uniform::sample_single called with non-finite boundaries"); + "Uniform::sample_single: low and high must be finite"); scale = scale.decrease_masked(mask); } } @@ -833,17 +810,12 @@ uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 } /// /// Unless you are implementing [`UniformSampler`] for your own types, this type /// should not be used directly, use [`Uniform`] instead. -/// -/// [`UniformSampler`]: trait.UniformSampler.html -/// [`Uniform`]: struct.Uniform.html -#[cfg(any(feature = "std", rustc_1_25))] #[derive(Clone, Copy, Debug)] pub struct UniformDuration { mode: UniformDurationMode, offset: u32, } -#[cfg(any(feature = "std", rustc_1_25))] #[derive(Debug, Copy, Clone)] enum UniformDurationMode { Small { @@ -860,12 +832,10 @@ enum UniformDurationMode { } } -#[cfg(any(feature = "std", rustc_1_25))] impl SampleUniform for Duration { type Sampler = UniformDuration; } -#[cfg(any(feature = "std", rustc_1_25))] impl UniformSampler for UniformDuration { type X = Duration; @@ -895,8 +865,8 @@ impl UniformSampler for UniformDuration { let mut high_n = high.subsec_nanos(); if high_n < low_n { - high_s = high_s - 1; - high_n = high_n + 1_000_000_000; + high_s -= 1; + high_n += 1_000_000_000; } let mode = if low_s == high_s { @@ -907,10 +877,10 @@ impl UniformSampler for UniformDuration { } else { let max = high_s .checked_mul(1_000_000_000) - .and_then(|n| n.checked_add(high_n as u64)); + .and_then(|n| n.checked_add(u64::from(high_n))); if let Some(higher_bound) = max { - let lower_bound = low_s * 1_000_000_000 + low_n as u64; + let lower_bound = low_s * 1_000_000_000 + u64::from(low_n); UniformDurationMode::Medium { nanos: Uniform::new_inclusive(lower_bound, higher_bound), } @@ -959,10 +929,10 @@ impl UniformSampler for UniformDuration { #[cfg(test)] mod tests { - use Rng; - use rngs::mock::StepRng; - use distributions::uniform::Uniform; - use distributions::utils::FloatAsSIMD; + use crate::Rng; + use crate::rngs::mock::StepRng; + use crate::distributions::uniform::Uniform; + use crate::distributions::utils::FloatAsSIMD; #[cfg(feature="simd_support")] use packed_simd::*; #[should_panic] @@ -973,7 +943,7 @@ mod tests { #[test] fn test_uniform_good_limits_equal_int() { - let mut rng = ::test::rng(804); + let mut rng = crate::test::rng(804); let dist = Uniform::new_inclusive(10, 10); for _ in 0..20 { assert_eq!(rng.sample(dist), 10); @@ -987,13 +957,14 @@ mod tests { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_integers() { use core::{i8, i16, i32, i64, isize}; use core::{u8, u16, u32, u64, usize}; - #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] + #[cfg(not(target_os = "emscripten"))] use core::{i128, u128}; - let mut rng = ::test::rng(251); + let mut rng = crate::test::rng(251); macro_rules! t { ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{ for &(low, high) in $v.iter() { @@ -1054,7 +1025,7 @@ mod tests { } t!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize); - #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] + #[cfg(not(target_os = "emscripten"))] t!(i128, u128); #[cfg(all(feature = "simd_support", feature = "nightly"))] @@ -1071,8 +1042,9 @@ mod tests { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_floats() { - let mut rng = ::test::rng(252); + let mut rng = crate::test::rng(252); let mut zero_rng = StepRng::new(0, 0); let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0); macro_rules! t { @@ -1155,11 +1127,12 @@ mod tests { #[cfg(all(feature="std", not(target_arch = "wasm32"), not(target_arch = "asmjs")))] + #[cfg(not(miri))] // Miri does not support catching panics fn test_float_assertions() { use std::panic::catch_unwind; use super::SampleUniform; fn range<T: SampleUniform>(low: T, high: T) { - let mut rng = ::test::rng(253); + let mut rng = crate::test::rng(253); rng.gen_range(low, high); } @@ -1209,14 +1182,14 @@ mod tests { #[test] - #[cfg(any(feature = "std", rustc_1_25))] + #[cfg(not(miri))] // Miri is too slow fn test_durations() { #[cfg(feature = "std")] use std::time::Duration; - #[cfg(all(not(feature = "std"), rustc_1_25))] + #[cfg(not(feature = "std"))] use core::time::Duration; - let mut rng = ::test::rng(253); + let mut rng = crate::test::rng(253); let v = &[(Duration::new(10, 50000), Duration::new(100, 1234)), (Duration::new(0, 100), Duration::new(1, 50)), @@ -1232,7 +1205,7 @@ mod tests { #[test] fn test_custom_uniform() { - use distributions::uniform::{UniformSampler, UniformFloat, SampleUniform, SampleBorrow}; + use crate::distributions::uniform::{UniformSampler, UniformFloat, SampleUniform, SampleBorrow}; #[derive(Clone, Copy, PartialEq, PartialOrd)] struct MyF32 { x: f32, @@ -1267,7 +1240,7 @@ mod tests { let (low, high) = (MyF32{ x: 17.0f32 }, MyF32{ x: 22.0f32 }); let uniform = Uniform::new(low, high); - let mut rng = ::test::rng(804); + let mut rng = crate::test::rng(804); for _ in 0..100 { let x: MyF32 = rng.sample(uniform); assert!(low <= x && x < high); @@ -1284,7 +1257,6 @@ mod tests { assert_eq!(r.inner.scale, 5.0); } - #[cfg(rustc_1_27)] #[test] fn test_uniform_from_std_range_inclusive() { let r = Uniform::from(2u32..=6); diff --git a/rand/src/distributions/unit_circle.rs b/rand/src/distributions/unit_circle.rs index 01ab76a..56e75b6 100644 --- a/rand/src/distributions/unit_circle.rs +++ b/rand/src/distributions/unit_circle.rs @@ -6,28 +6,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use Rng; -use distributions::{Distribution, Uniform}; +#![allow(deprecated)] +#![allow(clippy::all)] + +use crate::Rng; +use crate::distributions::{Distribution, Uniform}; /// Samples uniformly from the edge of the unit circle in two dimensions. /// /// Implemented via a method by von Neumann[^1]. /// -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{UnitCircle, Distribution}; -/// -/// let circle = UnitCircle::new(); -/// let v = circle.sample(&mut rand::thread_rng()); -/// println!("{:?} is from the unit circle.", v) -/// ``` -/// /// [^1]: von Neumann, J. (1951) [*Various Techniques Used in Connection with /// Random Digits.*](https://mcnp.lanl.gov/pdf_files/nbs_vonneumann.pdf) /// NBS Appl. Math. Ser., No. 12. Washington, DC: U.S. Government Printing /// Office, pp. 36-38. +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct UnitCircle; @@ -61,7 +54,7 @@ impl Distribution<[f64; 2]> for UnitCircle { #[cfg(test)] mod tests { - use distributions::Distribution; + use crate::distributions::Distribution; use super::UnitCircle; /// Assert that two numbers are almost equal to each other. @@ -82,7 +75,7 @@ mod tests { #[test] fn norm() { - let mut rng = ::test::rng(1); + let mut rng = crate::test::rng(1); let dist = UnitCircle::new(); for _ in 0..1000 { let x = dist.sample(&mut rng); @@ -92,10 +85,17 @@ mod tests { #[test] fn value_stability() { - let mut rng = ::test::rng(2); - let dist = UnitCircle::new(); - assert_eq!(dist.sample(&mut rng), [-0.8032118336637037, 0.5956935036263119]); - assert_eq!(dist.sample(&mut rng), [-0.4742919588505423, -0.880367615130018]); - assert_eq!(dist.sample(&mut rng), [0.9297328981467168, 0.368234623716601]); + let mut rng = crate::test::rng(2); + let expected = [ + [-0.9965658683520504, -0.08280380447614634], + [-0.9790853270389644, -0.20345004884984505], + [-0.8449189758898707, 0.5348943112253227], + ]; + let samples = [ + UnitCircle.sample(&mut rng), + UnitCircle.sample(&mut rng), + UnitCircle.sample(&mut rng), + ]; + assert_eq!(samples, expected); } } diff --git a/rand/src/distributions/unit_sphere.rs b/rand/src/distributions/unit_sphere.rs index 37de88b..188f48c 100644 --- a/rand/src/distributions/unit_sphere.rs +++ b/rand/src/distributions/unit_sphere.rs @@ -6,27 +6,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use Rng; -use distributions::{Distribution, Uniform}; +#![allow(deprecated)] +#![allow(clippy::all)] + +use crate::Rng; +use crate::distributions::{Distribution, Uniform}; /// Samples uniformly from the surface of the unit sphere in three dimensions. /// /// Implemented via a method by Marsaglia[^1]. /// -/// -/// # Example -/// -/// ``` -/// use rand::distributions::{UnitSphereSurface, Distribution}; -/// -/// let sphere = UnitSphereSurface::new(); -/// let v = sphere.sample(&mut rand::thread_rng()); -/// println!("{:?} is from the unit sphere surface.", v) -/// ``` -/// /// [^1]: Marsaglia, George (1972). [*Choosing a Point from the Surface of a /// Sphere.*](https://doi.org/10.1214/aoms/1177692644) /// Ann. Math. Statist. 43, no. 2, 645--646. +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct UnitSphereSurface; @@ -56,7 +49,7 @@ impl Distribution<[f64; 3]> for UnitSphereSurface { #[cfg(test)] mod tests { - use distributions::Distribution; + use crate::distributions::Distribution; use super::UnitSphereSurface; /// Assert that two numbers are almost equal to each other. @@ -77,7 +70,7 @@ mod tests { #[test] fn norm() { - let mut rng = ::test::rng(1); + let mut rng = crate::test::rng(1); let dist = UnitSphereSurface::new(); for _ in 0..1000 { let x = dist.sample(&mut rng); @@ -87,13 +80,17 @@ mod tests { #[test] fn value_stability() { - let mut rng = ::test::rng(2); - let dist = UnitSphereSurface::new(); - assert_eq!(dist.sample(&mut rng), - [-0.24950027180862533, -0.7552572587896719, 0.6060825747478084]); - assert_eq!(dist.sample(&mut rng), - [0.47604534507233487, -0.797200864987207, -0.3712837328763685]); - assert_eq!(dist.sample(&mut rng), - [0.9795722330927367, 0.18692349236651176, 0.07414747571708524]); + let mut rng = crate::test::rng(2); + let expected = [ + [0.03247542860231647, -0.7830477442152738, 0.6211131755296027], + [-0.09978440840914075, 0.9706650829833128, -0.21875184231323952], + [0.2735582468624679, 0.9435374242279655, -0.1868234852870203], + ]; + let samples = [ + UnitSphereSurface.sample(&mut rng), + UnitSphereSurface.sample(&mut rng), + UnitSphereSurface.sample(&mut rng), + ]; + assert_eq!(samples, expected); } } diff --git a/rand/src/distributions/utils.rs b/rand/src/distributions/utils.rs index d4d3642..3af4e86 100644 --- a/rand/src/distributions/utils.rs +++ b/rand/src/distributions/utils.rs @@ -11,9 +11,9 @@ #[cfg(feature="simd_support")] use packed_simd::*; #[cfg(feature="std")] -use distributions::ziggurat_tables; +use crate::distributions::ziggurat_tables; #[cfg(feature="std")] -use Rng; +use crate::Rng; pub trait WideningMultiply<RHS = Self> { @@ -61,7 +61,7 @@ macro_rules! wmul_impl { wmul_impl! { u8, u16, 8 } wmul_impl! { u16, u32, 16 } wmul_impl! { u32, u64, 32 } -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] +#[cfg(not(target_os = "emscripten"))] wmul_impl! { u64, u128, 64 } // This code is a translation of the __mulddi3 function in LLVM's @@ -125,9 +125,9 @@ macro_rules! wmul_impl_large { )+ }; } -#[cfg(not(all(rustc_1_26, not(target_os = "emscripten"))))] +#[cfg(target_os = "emscripten")] wmul_impl_large! { u64, 32 } -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] +#[cfg(not(target_os = "emscripten"))] wmul_impl_large! { u128, 64 } macro_rules! wmul_impl_usize { @@ -249,13 +249,9 @@ pub(crate) trait FloatSIMDUtils { /// Implement functions available in std builds but missing from core primitives #[cfg(not(std))] pub(crate) trait Float : Sized { - type Bits; - fn is_nan(self) -> bool; fn is_infinite(self) -> bool; fn is_finite(self) -> bool; - fn to_bits(self) -> Self::Bits; - fn from_bits(v: Self::Bits) -> Self; } /// Implement functions on f32/f64 to give them APIs similar to SIMD types @@ -289,8 +285,6 @@ macro_rules! scalar_float_impl { ($ty:ident, $uty:ident) => { #[cfg(not(std))] impl Float for $ty { - type Bits = $uty; - #[inline] fn is_nan(self) -> bool { self != self @@ -305,17 +299,6 @@ macro_rules! scalar_float_impl { fn is_finite(self) -> bool { !(self.is_nan() || self.is_infinite()) } - - #[inline] - fn to_bits(self) -> Self::Bits { - unsafe { ::core::mem::transmute(self) } - } - - #[inline] - fn from_bits(v: Self::Bits) -> Self { - // It turns out the safety issues with sNaN were overblown! Hooray! - unsafe { ::core::mem::transmute(v) } - } } impl FloatSIMDUtils for $ty { @@ -383,6 +366,7 @@ macro_rules! simd_impl { <$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask)) } type UInt = $uty; + #[inline] fn cast_from_int(i: Self::UInt) -> Self { i.cast() } } } @@ -464,7 +448,7 @@ pub fn ziggurat<R: Rng + ?Sized, P, Z>( mut pdf: P, mut zero_case: Z) -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64 { - use distributions::float::IntoFloat; + use crate::distributions::float::IntoFloat; loop { // As an optimisation we re-implement the conversion to a f64. // From the remaining 12 most significant bits we use 8 to construct `i`. diff --git a/rand/src/distributions/weibull.rs b/rand/src/distributions/weibull.rs index 5fbe10a..483714f 100644 --- a/rand/src/distributions/weibull.rs +++ b/rand/src/distributions/weibull.rs @@ -7,20 +7,13 @@ // except according to those terms. //! The Weibull distribution. +#![allow(deprecated)] -use Rng; -use distributions::{Distribution, OpenClosed01}; +use crate::Rng; +use crate::distributions::{Distribution, OpenClosed01}; /// Samples floating-point numbers according to the Weibull distribution -/// -/// # Example -/// ``` -/// use rand::prelude::*; -/// use rand::distributions::Weibull; -/// -/// let val: f64 = SmallRng::from_entropy().sample(Weibull::new(1., 10.)); -/// println!("{}", val); -/// ``` +#[deprecated(since="0.7.0", note="moved to rand_distr crate")] #[derive(Clone, Copy, Debug)] pub struct Weibull { inv_shape: f64, @@ -48,7 +41,7 @@ impl Distribution<f64> for Weibull { #[cfg(test)] mod tests { - use distributions::Distribution; + use crate::distributions::Distribution; use super::Weibull; #[test] @@ -62,7 +55,7 @@ mod tests { let scale = 1.0; let shape = 2.0; let d = Weibull::new(scale, shape); - let mut rng = ::test::rng(1); + let mut rng = crate::test::rng(1); for _ in 0..1000 { let r = d.sample(&mut rng); assert!(r >= 0.); diff --git a/rand/src/distributions/weighted/alias_method.rs b/rand/src/distributions/weighted/alias_method.rs new file mode 100644 index 0000000..bdd4ba0 --- /dev/null +++ b/rand/src/distributions/weighted/alias_method.rs @@ -0,0 +1,499 @@ +//! This module contains an implementation of alias method for sampling random +//! indices with probabilities proportional to a collection of weights. + +use super::WeightedError; +#[cfg(not(feature = "std"))] +use crate::alloc::vec::Vec; +#[cfg(not(feature = "std"))] +use crate::alloc::vec; +use core::fmt; +use core::iter::Sum; +use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign}; +use crate::distributions::uniform::SampleUniform; +use crate::distributions::Distribution; +use crate::distributions::Uniform; +use crate::Rng; + +/// A distribution using weighted sampling to pick a discretely selected item. +/// +/// Sampling a [`WeightedIndex<W>`] distribution returns the index of a randomly +/// selected element from the vector used to create the [`WeightedIndex<W>`]. +/// The chance of a given element being picked is proportional to the value of +/// the element. The weights can have any type `W` for which a implementation of +/// [`Weight`] exists. +/// +/// # Performance +/// +/// Given that `n` is the number of items in the vector used to create an +/// [`WeightedIndex<W>`], [`WeightedIndex<W>`] will require `O(n)` amount of +/// memory. More specifically it takes up some constant amount of memory plus +/// the vector used to create it and a [`Vec<u32>`] with capacity `n`. +/// +/// Time complexity for the creation of a [`WeightedIndex<W>`] is `O(n)`. +/// Sampling is `O(1)`, it makes a call to [`Uniform<u32>::sample`] and a call +/// to [`Uniform<W>::sample`]. +/// +/// # Example +/// +/// ``` +/// use rand::distributions::weighted::alias_method::WeightedIndex; +/// use rand::prelude::*; +/// +/// let choices = vec!['a', 'b', 'c']; +/// let weights = vec![2, 1, 1]; +/// let dist = WeightedIndex::new(weights).unwrap(); +/// let mut rng = thread_rng(); +/// for _ in 0..100 { +/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c' +/// println!("{}", choices[dist.sample(&mut rng)]); +/// } +/// +/// let items = [('a', 0), ('b', 3), ('c', 7)]; +/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1).collect()).unwrap(); +/// for _ in 0..100 { +/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c' +/// println!("{}", items[dist2.sample(&mut rng)].0); +/// } +/// ``` +/// +/// [`WeightedIndex<W>`]: crate::distributions::weighted::alias_method::WeightedIndex +/// [`Weight`]: crate::distributions::weighted::alias_method::Weight +/// [`Vec<u32>`]: Vec +/// [`Uniform<u32>::sample`]: Distribution::sample +/// [`Uniform<W>::sample`]: Distribution::sample +pub struct WeightedIndex<W: Weight> { + aliases: Vec<u32>, + no_alias_odds: Vec<W>, + uniform_index: Uniform<u32>, + uniform_within_weight_sum: Uniform<W>, +} + +impl<W: Weight> WeightedIndex<W> { + /// Creates a new [`WeightedIndex`]. + /// + /// Returns an error if: + /// - The vector is empty. + /// - The vector is longer than `u32::MAX`. + /// - For any weight `w`: `w < 0` or `w > max` where `max = W::MAX / + /// weights.len()`. + /// - The sum of weights is zero. + pub fn new(weights: Vec<W>) -> Result<Self, WeightedError> { + let n = weights.len(); + if n == 0 { + return Err(WeightedError::NoItem); + } else if n > ::core::u32::MAX as usize { + return Err(WeightedError::TooMany); + } + let n = n as u32; + + let max_weight_size = W::try_from_u32_lossy(n) + .map(|n| W::MAX / n) + .unwrap_or(W::ZERO); + if !weights + .iter() + .all(|&w| W::ZERO <= w && w <= max_weight_size) + { + return Err(WeightedError::InvalidWeight); + } + + // The sum of weights will represent 100% of no alias odds. + let weight_sum = Weight::sum(weights.as_slice()); + // Prevent floating point overflow due to rounding errors. + let weight_sum = if weight_sum > W::MAX { + W::MAX + } else { + weight_sum + }; + if weight_sum == W::ZERO { + return Err(WeightedError::AllWeightsZero); + } + + // `weight_sum` would have been zero if `try_from_lossy` causes an error here. + let n_converted = W::try_from_u32_lossy(n).unwrap(); + + let mut no_alias_odds = weights; + for odds in no_alias_odds.iter_mut() { + *odds *= n_converted; + // Prevent floating point overflow due to rounding errors. + *odds = if *odds > W::MAX { W::MAX } else { *odds }; + } + + /// This struct is designed to contain three data structures at once, + /// sharing the same memory. More precisely it contains two linked lists + /// and an alias map, which will be the output of this method. To keep + /// the three data structures from getting in each other's way, it must + /// be ensured that a single index is only ever in one of them at the + /// same time. + struct Aliases { + aliases: Vec<u32>, + smalls_head: u32, + bigs_head: u32, + } + + impl Aliases { + fn new(size: u32) -> Self { + Aliases { + aliases: vec![0; size as usize], + smalls_head: ::core::u32::MAX, + bigs_head: ::core::u32::MAX, + } + } + + fn push_small(&mut self, idx: u32) { + self.aliases[idx as usize] = self.smalls_head; + self.smalls_head = idx; + } + + fn push_big(&mut self, idx: u32) { + self.aliases[idx as usize] = self.bigs_head; + self.bigs_head = idx; + } + + fn pop_small(&mut self) -> u32 { + let popped = self.smalls_head; + self.smalls_head = self.aliases[popped as usize]; + popped + } + + fn pop_big(&mut self) -> u32 { + let popped = self.bigs_head; + self.bigs_head = self.aliases[popped as usize]; + popped + } + + fn smalls_is_empty(&self) -> bool { + self.smalls_head == ::core::u32::MAX + } + + fn bigs_is_empty(&self) -> bool { + self.bigs_head == ::core::u32::MAX + } + + fn set_alias(&mut self, idx: u32, alias: u32) { + self.aliases[idx as usize] = alias; + } + } + + let mut aliases = Aliases::new(n); + + // Split indices into those with small weights and those with big weights. + for (index, &odds) in no_alias_odds.iter().enumerate() { + if odds < weight_sum { + aliases.push_small(index as u32); + } else { + aliases.push_big(index as u32); + } + } + + // Build the alias map by finding an alias with big weight for each index with + // small weight. + while !aliases.smalls_is_empty() && !aliases.bigs_is_empty() { + let s = aliases.pop_small(); + let b = aliases.pop_big(); + + aliases.set_alias(s, b); + no_alias_odds[b as usize] = no_alias_odds[b as usize] + - weight_sum + + no_alias_odds[s as usize]; + + if no_alias_odds[b as usize] < weight_sum { + aliases.push_small(b); + } else { + aliases.push_big(b); + } + } + + // The remaining indices should have no alias odds of about 100%. This is due to + // numeric accuracy. Otherwise they would be exactly 100%. + while !aliases.smalls_is_empty() { + no_alias_odds[aliases.pop_small() as usize] = weight_sum; + } + while !aliases.bigs_is_empty() { + no_alias_odds[aliases.pop_big() as usize] = weight_sum; + } + + // Prepare distributions for sampling. Creating them beforehand improves + // sampling performance. + let uniform_index = Uniform::new(0, n); + let uniform_within_weight_sum = Uniform::new(W::ZERO, weight_sum); + + Ok(Self { + aliases: aliases.aliases, + no_alias_odds, + uniform_index, + uniform_within_weight_sum, + }) + } +} + +impl<W: Weight> Distribution<usize> for WeightedIndex<W> { + fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize { + let candidate = rng.sample(self.uniform_index); + if rng.sample(&self.uniform_within_weight_sum) < self.no_alias_odds[candidate as usize] { + candidate as usize + } else { + self.aliases[candidate as usize] as usize + } + } +} + +impl<W: Weight> fmt::Debug for WeightedIndex<W> +where + W: fmt::Debug, + Uniform<W>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("WeightedIndex") + .field("aliases", &self.aliases) + .field("no_alias_odds", &self.no_alias_odds) + .field("uniform_index", &self.uniform_index) + .field("uniform_within_weight_sum", &self.uniform_within_weight_sum) + .finish() + } +} + +impl<W: Weight> Clone for WeightedIndex<W> +where + Uniform<W>: Clone, +{ + fn clone(&self) -> Self { + Self { + aliases: self.aliases.clone(), + no_alias_odds: self.no_alias_odds.clone(), + uniform_index: self.uniform_index.clone(), + uniform_within_weight_sum: self.uniform_within_weight_sum.clone(), + } + } +} + +/// Trait that must be implemented for weights, that are used with +/// [`WeightedIndex`]. Currently no guarantees on the correctness of +/// [`WeightedIndex`] are given for custom implementations of this trait. +pub trait Weight: + Sized + + Copy + + SampleUniform + + PartialOrd + + Add<Output = Self> + + AddAssign + + Sub<Output = Self> + + SubAssign + + Mul<Output = Self> + + MulAssign + + Div<Output = Self> + + DivAssign + + Sum +{ + /// Maximum number representable by `Self`. + const MAX: Self; + + /// Element of `Self` equivalent to 0. + const ZERO: Self; + + /// Produce an instance of `Self` from a `u32` value, or return `None` if + /// out of range. Loss of precision (where `Self` is a floating point type) + /// is acceptable. + fn try_from_u32_lossy(n: u32) -> Option<Self>; + + /// Sums all values in slice `values`. + fn sum(values: &[Self]) -> Self { + values.iter().map(|x| *x).sum() + } +} + +macro_rules! impl_weight_for_float { + ($T: ident) => { + impl Weight for $T { + const MAX: Self = ::core::$T::MAX; + const ZERO: Self = 0.0; + + fn try_from_u32_lossy(n: u32) -> Option<Self> { + Some(n as $T) + } + + fn sum(values: &[Self]) -> Self { + pairwise_sum(values) + } + } + }; +} + +/// In comparison to naive accumulation, the pairwise sum algorithm reduces +/// rounding errors when there are many floating point values. +fn pairwise_sum<T: Weight>(values: &[T]) -> T { + if values.len() <= 32 { + values.iter().map(|x| *x).sum() + } else { + let mid = values.len() / 2; + let (a, b) = values.split_at(mid); + pairwise_sum(a) + pairwise_sum(b) + } +} + +macro_rules! impl_weight_for_int { + ($T: ident) => { + impl Weight for $T { + const MAX: Self = ::core::$T::MAX; + const ZERO: Self = 0; + + fn try_from_u32_lossy(n: u32) -> Option<Self> { + let n_converted = n as Self; + if n_converted >= Self::ZERO && n_converted as u32 == n { + Some(n_converted) + } else { + None + } + } + } + }; +} + +impl_weight_for_float!(f64); +impl_weight_for_float!(f32); +impl_weight_for_int!(usize); +#[cfg(not(target_os = "emscripten"))] +impl_weight_for_int!(u128); +impl_weight_for_int!(u64); +impl_weight_for_int!(u32); +impl_weight_for_int!(u16); +impl_weight_for_int!(u8); +impl_weight_for_int!(isize); +#[cfg(not(target_os = "emscripten"))] +impl_weight_for_int!(i128); +impl_weight_for_int!(i64); +impl_weight_for_int!(i32); +impl_weight_for_int!(i16); +impl_weight_for_int!(i8); + +#[cfg(test)] +mod test { + use super::*; + + #[test] + #[cfg(not(miri))] // Miri is too slow + fn test_weighted_index_f32() { + test_weighted_index(f32::into); + + // Floating point special cases + assert_eq!( + WeightedIndex::new(vec![::core::f32::INFINITY]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(vec![-0_f32]).unwrap_err(), + WeightedError::AllWeightsZero + ); + assert_eq!( + WeightedIndex::new(vec![-1_f32]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(vec![-::core::f32::INFINITY]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(vec![::core::f32::NAN]).unwrap_err(), + WeightedError::InvalidWeight + ); + } + + #[cfg(not(target_os = "emscripten"))] + #[test] + #[cfg(not(miri))] // Miri is too slow + fn test_weighted_index_u128() { + test_weighted_index(|x: u128| x as f64); + } + + #[cfg(all(rustc_1_26, not(target_os = "emscripten")))] + #[test] + #[cfg(not(miri))] // Miri is too slow + fn test_weighted_index_i128() { + test_weighted_index(|x: i128| x as f64); + + // Signed integer special cases + assert_eq!( + WeightedIndex::new(vec![-1_i128]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(vec![::core::i128::MIN]).unwrap_err(), + WeightedError::InvalidWeight + ); + } + + #[test] + #[cfg(not(miri))] // Miri is too slow + fn test_weighted_index_u8() { + test_weighted_index(u8::into); + } + + #[test] + #[cfg(not(miri))] // Miri is too slow + fn test_weighted_index_i8() { + test_weighted_index(i8::into); + + // Signed integer special cases + assert_eq!( + WeightedIndex::new(vec![-1_i8]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(vec![::core::i8::MIN]).unwrap_err(), + WeightedError::InvalidWeight + ); + } + + fn test_weighted_index<W: Weight, F: Fn(W) -> f64>(w_to_f64: F) + where + WeightedIndex<W>: fmt::Debug, + { + const NUM_WEIGHTS: u32 = 10; + const ZERO_WEIGHT_INDEX: u32 = 3; + const NUM_SAMPLES: u32 = 15000; + let mut rng = crate::test::rng(0x9c9fa0b0580a7031); + + let weights = { + let mut weights = Vec::with_capacity(NUM_WEIGHTS as usize); + let random_weight_distribution = crate::distributions::Uniform::new_inclusive( + W::ZERO, + W::MAX / W::try_from_u32_lossy(NUM_WEIGHTS).unwrap(), + ); + for _ in 0..NUM_WEIGHTS { + weights.push(rng.sample(&random_weight_distribution)); + } + weights[ZERO_WEIGHT_INDEX as usize] = W::ZERO; + weights + }; + let weight_sum = weights.iter().map(|w| *w).sum::<W>(); + let expected_counts = weights + .iter() + .map(|&w| w_to_f64(w) / w_to_f64(weight_sum) * NUM_SAMPLES as f64) + .collect::<Vec<f64>>(); + let weight_distribution = WeightedIndex::new(weights).unwrap(); + + let mut counts = vec![0; NUM_WEIGHTS as usize]; + for _ in 0..NUM_SAMPLES { + counts[rng.sample(&weight_distribution)] += 1; + } + + assert_eq!(counts[ZERO_WEIGHT_INDEX as usize], 0); + for (count, expected_count) in counts.into_iter().zip(expected_counts) { + let difference = (count as f64 - expected_count).abs(); + let max_allowed_difference = NUM_SAMPLES as f64 / NUM_WEIGHTS as f64 * 0.1; + assert!(difference <= max_allowed_difference); + } + + assert_eq!( + WeightedIndex::<W>::new(vec![]).unwrap_err(), + WeightedError::NoItem + ); + assert_eq!( + WeightedIndex::new(vec![W::ZERO]).unwrap_err(), + WeightedError::AllWeightsZero + ); + assert_eq!( + WeightedIndex::new(vec![W::MAX, W::MAX]).unwrap_err(), + WeightedError::InvalidWeight + ); + } +} diff --git a/rand/src/distributions/weighted.rs b/rand/src/distributions/weighted/mod.rs index 01c8fe6..2711637 100644 --- a/rand/src/distributions/weighted.rs +++ b/rand/src/distributions/weighted/mod.rs @@ -6,14 +6,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use Rng; -use distributions::Distribution; -use distributions::uniform::{UniformSampler, SampleUniform, SampleBorrow}; -use ::core::cmp::PartialOrd; +//! Weighted index sampling +//! +//! This module provides two implementations for sampling indices: +//! +//! * [`WeightedIndex`] allows `O(log N)` sampling +//! * [`alias_method::WeightedIndex`] allows `O(1)` sampling, but with +//! much greater set-up cost +//! +//! [`alias_method::WeightedIndex`]: alias_method/struct.WeightedIndex.html + +pub mod alias_method; + +use crate::Rng; +use crate::distributions::Distribution; +use crate::distributions::uniform::{UniformSampler, SampleUniform, SampleBorrow}; +use core::cmp::PartialOrd; use core::fmt; // Note that this whole module is only imported if feature="alloc" is enabled. -#[cfg(not(feature="std"))] use alloc::vec::Vec; +#[cfg(not(feature="std"))] use crate::alloc::vec::Vec; /// A distribution using weighted sampling to pick a discretely selected /// item. @@ -40,9 +52,9 @@ use core::fmt; /// `N` is the number of weights. /// /// Sampling from `WeightedIndex` will result in a single call to -/// [`Uniform<X>::sample`], which typically will request a single value from -/// the underlying [`RngCore`], though the exact number depends on the -/// implementaiton of [`Uniform<X>::sample`]. +/// `Uniform<X>::sample` (method of the [`Distribution`] trait), which typically +/// will request a single value from the underlying [`RngCore`], though the +/// exact number depends on the implementaiton of `Uniform<X>::sample`. /// /// # Example /// @@ -67,12 +79,12 @@ use core::fmt; /// } /// ``` /// -/// [`Uniform<X>`]: struct.Uniform.html -/// [`Uniform<X>::sample`]: struct.Uniform.html#method.sample -/// [`RngCore`]: ../trait.RngCore.html +/// [`Uniform<X>`]: crate::distributions::uniform::Uniform +/// [`RngCore`]: crate::RngCore #[derive(Debug, Clone)] pub struct WeightedIndex<X: SampleUniform + PartialOrd> { cumulative_weights: Vec<X>, + total_weight: X, weight_distribution: X::Sampler, } @@ -84,8 +96,7 @@ impl<X: SampleUniform + PartialOrd> WeightedIndex<X> { /// Returns an error if the iterator is empty, if any weight is `< 0`, or /// if its total value is 0. /// - /// [`Distribution`]: trait.Distribution.html - /// [`Uniform<X>`]: struct.Uniform.html + /// [`Uniform<X>`]: crate::distributions::uniform::Uniform pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError> where I: IntoIterator, I::Item: SampleBorrow<X>, @@ -100,13 +111,13 @@ impl<X: SampleUniform + PartialOrd> WeightedIndex<X> { let zero = <X as Default>::default(); if total_weight < zero { - return Err(WeightedError::NegativeWeight); + return Err(WeightedError::InvalidWeight); } let mut weights = Vec::<X>::with_capacity(iter.size_hint().0); for w in iter { if *w.borrow() < zero { - return Err(WeightedError::NegativeWeight); + return Err(WeightedError::InvalidWeight); } weights.push(total_weight.clone()); total_weight += w.borrow(); @@ -115,9 +126,98 @@ impl<X: SampleUniform + PartialOrd> WeightedIndex<X> { if total_weight == zero { return Err(WeightedError::AllWeightsZero); } - let distr = X::Sampler::new(zero, total_weight); + let distr = X::Sampler::new(zero, total_weight.clone()); - Ok(WeightedIndex { cumulative_weights: weights, weight_distribution: distr }) + Ok(WeightedIndex { cumulative_weights: weights, total_weight, weight_distribution: distr }) + } + + /// Update a subset of weights, without changing the number of weights. + /// + /// `new_weights` must be sorted by the index. + /// + /// Using this method instead of `new` might be more efficient if only a small number of + /// weights is modified. No allocations are performed, unless the weight type `X` uses + /// allocation internally. + /// + /// In case of error, `self` is not modified. + pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), WeightedError> + where X: for<'a> ::core::ops::AddAssign<&'a X> + + for<'a> ::core::ops::SubAssign<&'a X> + + Clone + + Default { + if new_weights.is_empty() { + return Ok(()); + } + + let zero = <X as Default>::default(); + + let mut total_weight = self.total_weight.clone(); + + // Check for errors first, so we don't modify `self` in case something + // goes wrong. + let mut prev_i = None; + for &(i, w) in new_weights { + if let Some(old_i) = prev_i { + if old_i >= i { + return Err(WeightedError::InvalidWeight); + } + } + if *w < zero { + return Err(WeightedError::InvalidWeight); + } + if i >= self.cumulative_weights.len() + 1 { + return Err(WeightedError::TooMany); + } + + let mut old_w = if i < self.cumulative_weights.len() { + self.cumulative_weights[i].clone() + } else { + self.total_weight.clone() + }; + if i > 0 { + old_w -= &self.cumulative_weights[i - 1]; + } + + total_weight -= &old_w; + total_weight += w; + prev_i = Some(i); + } + if total_weight == zero { + return Err(WeightedError::AllWeightsZero); + } + + // Update the weights. Because we checked all the preconditions in the + // previous loop, this should never panic. + let mut iter = new_weights.iter(); + + let mut prev_weight = zero.clone(); + let mut next_new_weight = iter.next(); + let &(first_new_index, _) = next_new_weight.unwrap(); + let mut cumulative_weight = if first_new_index > 0 { + self.cumulative_weights[first_new_index - 1].clone() + } else { + zero.clone() + }; + for i in first_new_index..self.cumulative_weights.len() { + match next_new_weight { + Some(&(j, w)) if i == j => { + cumulative_weight += w; + next_new_weight = iter.next(); + }, + _ => { + let mut tmp = self.cumulative_weights[i].clone(); + tmp -= &prev_weight; // We know this is positive. + cumulative_weight += &tmp; + } + } + prev_weight = cumulative_weight.clone(); + core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]); + } + + self.total_weight = total_weight; + self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone()); + + Ok(()) } } @@ -137,8 +237,9 @@ mod test { use super::*; #[test] + #[cfg(not(miri))] // Miri is too slow fn test_weightedindex() { - let mut r = ::test::rng(700); + let mut r = crate::test::rng(700); const N_REPS: u32 = 5000; let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; let total_weight = weights.iter().sum::<u32>() as f32; @@ -186,31 +287,61 @@ mod test { assert_eq!(WeightedIndex::new(&[10][0..0]).unwrap_err(), WeightedError::NoItem); assert_eq!(WeightedIndex::new(&[0]).unwrap_err(), WeightedError::AllWeightsZero); - assert_eq!(WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), WeightedError::NegativeWeight); - assert_eq!(WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), WeightedError::NegativeWeight); - assert_eq!(WeightedIndex::new(&[-10]).unwrap_err(), WeightedError::NegativeWeight); + assert_eq!(WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), WeightedError::InvalidWeight); + assert_eq!(WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), WeightedError::InvalidWeight); + assert_eq!(WeightedIndex::new(&[-10]).unwrap_err(), WeightedError::InvalidWeight); + } + + #[test] + fn test_update_weights() { + let data = [ + (&[10u32, 2, 3, 4][..], + &[(1, &100), (2, &4)][..], // positive change + &[10, 100, 4, 4][..]), + (&[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..], + &[(2, &1), (5, &1), (13, &100)][..], // negative change and last element + &[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..]), + ]; + + for (weights, update, expected_weights) in data.into_iter() { + let total_weight = weights.iter().sum::<u32>(); + let mut distr = WeightedIndex::new(weights.to_vec()).unwrap(); + assert_eq!(distr.total_weight, total_weight); + + distr.update_weights(update).unwrap(); + let expected_total_weight = expected_weights.iter().sum::<u32>(); + let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap(); + assert_eq!(distr.total_weight, expected_total_weight); + assert_eq!(distr.total_weight, expected_distr.total_weight); + assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights); + } } } /// Error type returned from `WeightedIndex::new`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WeightedError { - /// The provided iterator contained no items. + /// The provided weight collection contains no items. NoItem, - /// A weight lower than zero was used. - NegativeWeight, + /// A weight is either less than zero, greater than the supported maximum or + /// otherwise invalid. + InvalidWeight, - /// All items in the provided iterator had a weight of zero. + /// All items in the provided weight collection are zero. AllWeightsZero, + + /// Too many weights are provided (length greater than `u32::MAX`) + TooMany, } impl WeightedError { fn msg(&self) -> &str { match *self { - WeightedError::NoItem => "No items found", - WeightedError::NegativeWeight => "Item has negative weight", - WeightedError::AllWeightsZero => "All items had weight zero", + WeightedError::NoItem => "No weights provided.", + WeightedError::InvalidWeight => "A weight is invalid.", + WeightedError::AllWeightsZero => "All weights are zero.", + WeightedError::TooMany => "Too many weights (hit u32::MAX)", } } } @@ -220,7 +351,7 @@ impl ::std::error::Error for WeightedError { fn description(&self) -> &str { self.msg() } - fn cause(&self) -> Option<&::std::error::Error> { + fn cause(&self) -> Option<&dyn (::std::error::Error)> { None } } diff --git a/rand/src/lib.rs b/rand/src/lib.rs index ca231b5..b4167c3 100644 --- a/rand/src/lib.rs +++ b/rand/src/lib.rs @@ -17,7 +17,7 @@ //! To get you started quickly, the easiest and highest-level way to get //! a random value is to use [`random()`]; alternatively you can use //! [`thread_rng()`]. The [`Rng`] trait provides a useful API on all RNGs, while -//! the [`distributions` module] and [`seq` module] provide further +//! the [`distributions`] and [`seq`] modules provide further //! functionality on top of RNGs. //! //! ``` @@ -39,12 +39,6 @@ //! //! For the user guide and futher documentation, please read //! [The Rust Rand Book](https://rust-random.github.io/book). -//! -//! [`distributions` module]: distributions/index.html -//! [`random()`]: fn.random.html -//! [`Rng`]: trait.Rng.html -//! [`seq` module]: seq/index.html -//! [`thread_rng()`]: fn.thread_rng.html #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", @@ -56,112 +50,64 @@ #![doc(test(attr(allow(unused_variables), deny(warnings))))] #![cfg_attr(not(feature="std"), no_std)] -#![cfg_attr(all(feature="alloc", not(feature="std")), feature(alloc))] #![cfg_attr(all(feature="simd_support", feature="nightly"), feature(stdsimd))] -#[cfg(feature = "std")] extern crate core; -#[cfg(all(feature = "alloc", not(feature="std")))] #[macro_use] extern crate alloc; - -#[cfg(feature="simd_support")] extern crate packed_simd; +#![allow(clippy::excessive_precision, clippy::unreadable_literal, clippy::float_cmp)] -#[cfg(feature = "rand_os")] -extern crate rand_os; +#[cfg(all(feature="alloc", not(feature="std")))] +extern crate alloc; -extern crate rand_core; -extern crate rand_isaac; // only for deprecations -extern crate rand_chacha; // only for deprecations -extern crate rand_hc; -extern crate rand_pcg; -extern crate rand_xorshift; +#[cfg(feature = "getrandom")] +use getrandom_package as getrandom; -#[cfg(feature = "log")] #[macro_use] extern crate log; #[allow(unused)] -#[cfg(not(feature = "log"))] macro_rules! trace { ($($x:tt)*) => () } +macro_rules! trace { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::trace!($($x)*) + } +) } #[allow(unused)] -#[cfg(not(feature = "log"))] macro_rules! debug { ($($x:tt)*) => () } +macro_rules! debug { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::debug!($($x)*) + } +) } #[allow(unused)] -#[cfg(not(feature = "log"))] macro_rules! info { ($($x:tt)*) => () } +macro_rules! info { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::info!($($x)*) + } +) } #[allow(unused)] -#[cfg(not(feature = "log"))] macro_rules! warn { ($($x:tt)*) => () } +macro_rules! warn { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::warn!($($x)*) + } +) } #[allow(unused)] -#[cfg(not(feature = "log"))] macro_rules! error { ($($x:tt)*) => () } - +macro_rules! error { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::error!($($x)*) + } +) } // Re-exports from rand_core -pub use rand_core::{RngCore, CryptoRng, SeedableRng}; -pub use rand_core::{ErrorKind, Error}; +pub use rand_core::{RngCore, CryptoRng, SeedableRng, Error}; // Public exports -#[cfg(feature="std")] pub use rngs::thread::thread_rng; +#[cfg(feature="std")] pub use crate::rngs::thread::thread_rng; // Public modules pub mod distributions; pub mod prelude; -#[deprecated(since="0.6.0")] -pub mod prng; pub mod rngs; pub mod seq; -//////////////////////////////////////////////////////////////////////////////// -// Compatibility re-exports. Documentation is hidden; will be removed eventually. - -#[doc(hidden)] mod deprecated; - -#[allow(deprecated)] -#[doc(hidden)] pub use deprecated::ReseedingRng; - -#[allow(deprecated)] -#[cfg(feature="std")] #[doc(hidden)] pub use deprecated::EntropyRng; - -#[allow(deprecated)] -#[cfg(feature="rand_os")] -#[doc(hidden)] -pub use deprecated::OsRng; - -#[allow(deprecated)] -#[doc(hidden)] pub use deprecated::{ChaChaRng, IsaacRng, Isaac64Rng, XorShiftRng}; -#[allow(deprecated)] -#[doc(hidden)] pub use deprecated::StdRng; - - -#[allow(deprecated)] -#[doc(hidden)] -pub mod jitter { - pub use deprecated::JitterRng; - pub use rngs::TimerError; -} -#[allow(deprecated)] -#[cfg(feature="rand_os")] -#[doc(hidden)] -pub mod os { - pub use deprecated::OsRng; -} -#[allow(deprecated)] -#[doc(hidden)] -pub mod chacha { - pub use deprecated::ChaChaRng; -} -#[allow(deprecated)] -#[doc(hidden)] -pub mod isaac { - pub use deprecated::{IsaacRng, Isaac64Rng}; -} -#[allow(deprecated)] -#[cfg(feature="std")] -#[doc(hidden)] -pub mod read { - pub use deprecated::ReadRng; -} - -#[allow(deprecated)] -#[cfg(feature="std")] #[doc(hidden)] pub use deprecated::ThreadRng; - -//////////////////////////////////////////////////////////////////////////////// - use core::{mem, slice}; -use distributions::{Distribution, Standard}; -use distributions::uniform::{SampleUniform, UniformSampler, SampleBorrow}; +use core::num::Wrapping; +use crate::distributions::{Distribution, Standard}; +use crate::distributions::uniform::{SampleUniform, UniformSampler, SampleBorrow}; /// An automatically-implemented extension trait on [`RngCore`] providing high-level /// generic methods for sampling values and other convenience methods. @@ -200,13 +146,9 @@ use distributions::uniform::{SampleUniform, UniformSampler, SampleBorrow}; /// /// # let v = foo(&mut thread_rng()); /// ``` -/// -/// [`RngCore`]: trait.RngCore.html pub trait Rng: RngCore { /// Return a random value supporting the [`Standard`] distribution. /// - /// [`Standard`]: distributions/struct.Standard.html - /// /// # Example /// /// ``` @@ -217,8 +159,31 @@ pub trait Rng: RngCore { /// println!("{}", x); /// println!("{:?}", rng.gen::<(f64, bool)>()); /// ``` + /// + /// # Arrays and tuples + /// + /// The `rng.gen()` method is able to generate arrays (up to 32 elements) + /// and tuples (up to 12 elements), so long as all element types can be + /// generated. + /// + /// For arrays of integers, especially for those with small element types + /// (< 64 bit), it will likely be faster to instead use [`Rng::fill`]. + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut rng = thread_rng(); + /// let tuple: (u8, i32, char) = rng.gen(); // arbitrary tuple support + /// + /// let arr1: [f32; 32] = rng.gen(); // array construction + /// let mut arr2 = [0u8; 128]; + /// rng.fill(&mut arr2); // array fill + /// ``` + /// + /// [`Standard`]: distributions::Standard #[inline] - fn gen<T>(&mut self) -> T where Standard: Distribution<T> { + fn gen<T>(&mut self) -> T + where Standard: Distribution<T> { Standard.sample(self) } @@ -245,10 +210,12 @@ pub trait Rng: RngCore { /// println!("{}", m); /// ``` /// - /// [`Uniform`]: distributions/uniform/struct.Uniform.html + /// [`Uniform`]: distributions::uniform::Uniform fn gen_range<T: SampleUniform, B1, B2>(&mut self, low: B1, high: B2) -> T - where B1: SampleBorrow<T> + Sized, - B2: SampleBorrow<T> + Sized { + where + B1: SampleBorrow<T> + Sized, + B2: SampleBorrow<T> + Sized, + { T::Sampler::sample_single(low, high, self) } @@ -272,34 +239,39 @@ pub trait Rng: RngCore { /// Create an iterator that generates values using the given distribution. /// + /// Note that this function takes its arguments by value. This works since + /// `(&mut R): Rng where R: Rng` and + /// `(&D): Distribution where D: Distribution`, + /// however borrowing is not automatic hence `rng.sample_iter(...)` may + /// need to be replaced with `(&mut rng).sample_iter(...)`. + /// /// # Example /// /// ``` /// use rand::{thread_rng, Rng}; /// use rand::distributions::{Alphanumeric, Uniform, Standard}; /// - /// let mut rng = thread_rng(); + /// let rng = thread_rng(); /// /// // Vec of 16 x f32: - /// let v: Vec<f32> = thread_rng().sample_iter(&Standard).take(16).collect(); + /// let v: Vec<f32> = rng.sample_iter(Standard).take(16).collect(); /// /// // String: - /// let s: String = rng.sample_iter(&Alphanumeric).take(7).collect(); + /// let s: String = rng.sample_iter(Alphanumeric).take(7).collect(); /// /// // Combined values - /// println!("{:?}", thread_rng().sample_iter(&Standard).take(5) + /// println!("{:?}", rng.sample_iter(Standard).take(5) /// .collect::<Vec<(f64, bool)>>()); /// /// // Dice-rolling: /// let die_range = Uniform::new_inclusive(1, 6); - /// let mut roll_die = rng.sample_iter(&die_range); + /// let mut roll_die = rng.sample_iter(die_range); /// while roll_die.next().unwrap() != 6 { /// println!("Not a 6; rolling again!"); /// } /// ``` - fn sample_iter<'a, T, D: Distribution<T>>(&'a mut self, distr: &'a D) - -> distributions::DistIter<'a, D, Self, T> where Self: Sized - { + fn sample_iter<T, D>(self, distr: D) -> distributions::DistIter<D, Self, T> + where D: Distribution<T>, Self: Sized { distr.sample_iter(self) } @@ -323,9 +295,8 @@ pub trait Rng: RngCore { /// thread_rng().fill(&mut arr[..]); /// ``` /// - /// [`fill_bytes`]: trait.RngCore.html#method.fill_bytes - /// [`try_fill`]: trait.Rng.html#method.try_fill - /// [`AsByteSliceMut`]: trait.AsByteSliceMut.html + /// [`fill_bytes`]: RngCore::fill_bytes + /// [`try_fill`]: Rng::try_fill fn fill<T: AsByteSliceMut + ?Sized>(&mut self, dest: &mut T) { self.fill_bytes(dest.as_byte_slice_mut()); dest.to_le(); @@ -338,10 +309,8 @@ pub trait Rng: RngCore { /// On big-endian platforms this performs byte-swapping to ensure /// portability of results from reproducible generators. /// - /// This uses [`try_fill_bytes`] internally and forwards all RNG errors. In - /// some cases errors may be resolvable; see [`ErrorKind`] and - /// documentation for the RNG in use. If you do not plan to handle these - /// errors you may prefer to use [`fill`]. + /// This is identical to [`fill`] except that it uses [`try_fill_bytes`] + /// internally and forwards RNG errors. /// /// # Example /// @@ -358,10 +327,8 @@ pub trait Rng: RngCore { /// # try_inner().unwrap() /// ``` /// - /// [`ErrorKind`]: enum.ErrorKind.html - /// [`try_fill_bytes`]: trait.RngCore.html#method.try_fill_bytes - /// [`fill`]: trait.Rng.html#method.fill - /// [`AsByteSliceMut`]: trait.AsByteSliceMut.html + /// [`try_fill_bytes`]: RngCore::try_fill_bytes + /// [`fill`]: Rng::fill fn try_fill<T: AsByteSliceMut + ?Sized>(&mut self, dest: &mut T) -> Result<(), Error> { self.try_fill_bytes(dest.as_byte_slice_mut())?; dest.to_le(); @@ -386,10 +353,10 @@ pub trait Rng: RngCore { /// /// If `p < 0` or `p > 1`. /// - /// [`Bernoulli`]: distributions/bernoulli/struct.Bernoulli.html + /// [`Bernoulli`]: distributions::bernoulli::Bernoulli #[inline] fn gen_bool(&mut self, p: f64) -> bool { - let d = distributions::Bernoulli::new(p); + let d = distributions::Bernoulli::new(p).unwrap(); self.sample(d) } @@ -415,55 +382,19 @@ pub trait Rng: RngCore { /// println!("{}", rng.gen_ratio(2, 3)); /// ``` /// - /// [`Bernoulli`]: distributions/bernoulli/struct.Bernoulli.html + /// [`Bernoulli`]: distributions::bernoulli::Bernoulli #[inline] fn gen_ratio(&mut self, numerator: u32, denominator: u32) -> bool { - let d = distributions::Bernoulli::from_ratio(numerator, denominator); + let d = distributions::Bernoulli::from_ratio(numerator, denominator).unwrap(); self.sample(d) } - - /// Return a random element from `values`. - /// - /// Deprecated: use [`SliceRandom::choose`] instead. - /// - /// [`SliceRandom::choose`]: seq/trait.SliceRandom.html#method.choose - #[deprecated(since="0.6.0", note="use SliceRandom::choose instead")] - fn choose<'a, T>(&mut self, values: &'a [T]) -> Option<&'a T> { - use seq::SliceRandom; - values.choose(self) - } - - /// Return a mutable pointer to a random element from `values`. - /// - /// Deprecated: use [`SliceRandom::choose_mut`] instead. - /// - /// [`SliceRandom::choose_mut`]: seq/trait.SliceRandom.html#method.choose_mut - #[deprecated(since="0.6.0", note="use SliceRandom::choose_mut instead")] - fn choose_mut<'a, T>(&mut self, values: &'a mut [T]) -> Option<&'a mut T> { - use seq::SliceRandom; - values.choose_mut(self) - } - - /// Shuffle a mutable slice in place. - /// - /// Deprecated: use [`SliceRandom::shuffle`] instead. - /// - /// [`SliceRandom::shuffle`]: seq/trait.SliceRandom.html#method.shuffle - #[deprecated(since="0.6.0", note="use SliceRandom::shuffle instead")] - fn shuffle<T>(&mut self, values: &mut [T]) { - use seq::SliceRandom; - values.shuffle(self) - } } impl<R: RngCore + ?Sized> Rng for R {} /// Trait for casting types to byte slices /// -/// This is used by the [`fill`] and [`try_fill`] methods. -/// -/// [`fill`]: trait.Rng.html#method.fill -/// [`try_fill`]: trait.Rng.html#method.try_fill +/// This is used by the [`Rng::fill`] and [`Rng::try_fill`] methods. pub trait AsByteSliceMut { /// Return a mutable reference to self as a byte slice fn as_byte_slice_mut(&mut self) -> &mut [u8]; @@ -481,6 +412,7 @@ impl AsByteSliceMut for [u8] { } macro_rules! impl_as_byte_slice { + () => {}; ($t:ty) => { impl AsByteSliceMut for [$t] { fn as_byte_slice_mut(&mut self) -> &mut [u8] { @@ -491,8 +423,7 @@ macro_rules! impl_as_byte_slice { } } else { unsafe { - slice::from_raw_parts_mut(&mut self[0] - as *mut $t + slice::from_raw_parts_mut(self.as_mut_ptr() as *mut u8, self.len() * mem::size_of::<$t>() ) @@ -506,26 +437,47 @@ macro_rules! impl_as_byte_slice { } } } + + impl AsByteSliceMut for [Wrapping<$t>] { + fn as_byte_slice_mut(&mut self) -> &mut [u8] { + if self.len() == 0 { + unsafe { + // must not use null pointer + slice::from_raw_parts_mut(0x1 as *mut u8, 0) + } + } else { + unsafe { + slice::from_raw_parts_mut(self.as_mut_ptr() + as *mut u8, + self.len() * mem::size_of::<$t>() + ) + } + } + } + + fn to_le(&mut self) { + for x in self { + *x = Wrapping(x.0.to_le()); + } + } + } + }; + ($t:ty, $($tt:ty,)*) => { + impl_as_byte_slice!($t); + // TODO: this could replace above impl once Rust #32463 is fixed + // impl_as_byte_slice!(Wrapping<$t>); + impl_as_byte_slice!($($tt,)*); } } -impl_as_byte_slice!(u16); -impl_as_byte_slice!(u32); -impl_as_byte_slice!(u64); -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] impl_as_byte_slice!(u128); -impl_as_byte_slice!(usize); -impl_as_byte_slice!(i8); -impl_as_byte_slice!(i16); -impl_as_byte_slice!(i32); -impl_as_byte_slice!(i64); -#[cfg(all(rustc_1_26, not(target_os = "emscripten")))] impl_as_byte_slice!(i128); -impl_as_byte_slice!(isize); +impl_as_byte_slice!(u16, u32, u64, usize,); +#[cfg(not(target_os = "emscripten"))] impl_as_byte_slice!(u128); +impl_as_byte_slice!(i8, i16, i32, i64, isize,); +#[cfg(not(target_os = "emscripten"))] impl_as_byte_slice!(i128); macro_rules! impl_as_byte_slice_arrays { ($n:expr,) => {}; - ($n:expr, $N:ident, $($NN:ident,)*) => { - impl_as_byte_slice_arrays!($n - 1, $($NN,)*); - + ($n:expr, $N:ident) => { impl<T> AsByteSliceMut for [T; $n] where [T]: AsByteSliceMut { fn as_byte_slice_mut(&mut self) -> &mut [u8] { self[..].as_byte_slice_mut() @@ -536,96 +488,19 @@ macro_rules! impl_as_byte_slice_arrays { } } }; + ($n:expr, $N:ident, $($NN:ident,)*) => { + impl_as_byte_slice_arrays!($n, $N); + impl_as_byte_slice_arrays!($n - 1, $($NN,)*); + }; (!div $n:expr,) => {}; (!div $n:expr, $N:ident, $($NN:ident,)*) => { + impl_as_byte_slice_arrays!($n, $N); impl_as_byte_slice_arrays!(!div $n / 2, $($NN,)*); - - impl<T> AsByteSliceMut for [T; $n] where [T]: AsByteSliceMut { - fn as_byte_slice_mut(&mut self) -> &mut [u8] { - self[..].as_byte_slice_mut() - } - - fn to_le(&mut self) { - self[..].to_le() - } - } }; } impl_as_byte_slice_arrays!(32, N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,); impl_as_byte_slice_arrays!(!div 4096, N,N,N,N,N,N,N,); - -/// A convenience extension to [`SeedableRng`] allowing construction from fresh -/// entropy. This trait is automatically implemented for any PRNG implementing -/// [`SeedableRng`] and is not intended to be implemented by users. -/// -/// This is equivalent to using `SeedableRng::from_rng(EntropyRng::new())` then -/// unwrapping the result. -/// -/// Since this is convenient and secure, it is the recommended way to create -/// PRNGs, though two alternatives may be considered: -/// -/// * Deterministic creation using [`SeedableRng::from_seed`] with a fixed seed -/// * Seeding from `thread_rng`: `SeedableRng::from_rng(thread_rng())?`; -/// this will usually be faster and should also be secure, but requires -/// trusting one extra component. -/// -/// ## Example -/// -/// ``` -/// use rand::{Rng, FromEntropy}; -/// use rand::rngs::StdRng; -/// -/// let mut rng = StdRng::from_entropy(); -/// println!("Random die roll: {}", rng.gen_range(1, 7)); -/// ``` -/// -/// [`EntropyRng`]: rngs/struct.EntropyRng.html -/// [`SeedableRng`]: trait.SeedableRng.html -/// [`SeedableRng::from_seed`]: trait.SeedableRng.html#tymethod.from_seed -#[cfg(feature="std")] -pub trait FromEntropy: SeedableRng { - /// Creates a new instance, automatically seeded with fresh entropy. - /// - /// Normally this will use `OsRng`, but if that fails `JitterRng` will be - /// used instead. Both should be suitable for cryptography. It is possible - /// that both entropy sources will fail though unlikely; failures would - /// almost certainly be platform limitations or build issues, i.e. most - /// applications targetting PC/mobile platforms should not need to worry - /// about this failing. - /// - /// # Panics - /// - /// If all entropy sources fail this will panic. If you need to handle - /// errors, use the following code, equivalent aside from error handling: - /// - /// ``` - /// # use rand::Error; - /// use rand::prelude::*; - /// use rand::rngs::EntropyRng; - /// - /// # fn try_inner() -> Result<(), Error> { - /// // This uses StdRng, but is valid for any R: SeedableRng - /// let mut rng = StdRng::from_rng(EntropyRng::new())?; - /// - /// println!("random number: {}", rng.gen_range(1, 10)); - /// # Ok(()) - /// # } - /// - /// # try_inner().unwrap() - /// ``` - fn from_entropy() -> Self; -} - -#[cfg(feature="std")] -impl<R: SeedableRng> FromEntropy for R { - fn from_entropy() -> R { - R::from_rng(rngs::EntropyRng::new()).unwrap_or_else(|err| - panic!("FromEntropy::from_entropy() failed: {}", err)) - } -} - - /// Generates a random value using the thread-local random number generator. /// /// This is simply a shortcut for `thread_rng().gen()`. See [`thread_rng`] for @@ -667,40 +542,26 @@ impl<R: SeedableRng> FromEntropy for R { /// } /// ``` /// -/// [`thread_rng`]: fn.thread_rng.html -/// [`Standard`]: distributions/struct.Standard.html +/// [`Standard`]: distributions::Standard #[cfg(feature="std")] #[inline] -pub fn random<T>() -> T where Standard: Distribution<T> { +pub fn random<T>() -> T +where Standard: Distribution<T> { thread_rng().gen() } #[cfg(test)] mod test { - use rngs::mock::StepRng; - use rngs::StdRng; + use crate::rngs::mock::StepRng; use super::*; #[cfg(all(not(feature="std"), feature="alloc"))] use alloc::boxed::Box; - pub struct TestRng<R> { inner: R } - - impl<R: RngCore> RngCore for TestRng<R> { - fn next_u32(&mut self) -> u32 { - self.inner.next_u32() - } - fn next_u64(&mut self) -> u64 { - self.inner.next_u64() - } - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.inner.fill_bytes(dest) - } - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.inner.try_fill_bytes(dest) - } - } - - pub fn rng(seed: u64) -> TestRng<StdRng> { - TestRng { inner: StdRng::seed_from_u64(seed) } + /// Construct a deterministic RNG with the given seed + pub fn rng(seed: u64) -> impl RngCore { + // For tests, we want a statistically good, fast, reproducible RNG. + // PCG32 will do fine, and will be easy to embed if we ever need to. + const INC: u64 = 11634580027462260723; + rand_pcg::Pcg32::new(seed, INC) } #[test] @@ -740,6 +601,12 @@ mod test { rng.fill(&mut array[..]); assert_eq!(array, [x as u32, (x >> 32) as u32]); assert_eq!(rng.next_u32(), x as u32); + + // Check equivalence using wrapped arrays + let mut warray = [Wrapping(0u32); 2]; + rng.fill(&mut warray[..]); + assert_eq!(array[0], warray[0].0); + assert_eq!(array[1], warray[1].0); } #[test] @@ -796,9 +663,9 @@ mod test { #[test] fn test_rng_trait_object() { - use distributions::{Distribution, Standard}; + use crate::distributions::{Distribution, Standard}; let mut rng = rng(109); - let mut r = &mut rng as &mut RngCore; + let mut r = &mut rng as &mut dyn RngCore; r.next_u32(); r.gen::<i32>(); assert_eq!(r.gen_range(0, 1), 0); @@ -808,9 +675,9 @@ mod test { #[test] #[cfg(feature="alloc")] fn test_rng_boxed_trait() { - use distributions::{Distribution, Standard}; + use crate::distributions::{Distribution, Standard}; let rng = rng(110); - let mut r = Box::new(rng) as Box<RngCore>; + let mut r = Box::new(rng) as Box<dyn RngCore>; r.next_u32(); r.gen::<i32>(); assert_eq!(r.gen_range(0, 1), 0); @@ -833,6 +700,7 @@ mod test { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_gen_ratio_average() { const NUM: u32 = 3; const DENOM: u32 = 10; diff --git a/rand/src/prelude.rs b/rand/src/prelude.rs index 5d8a0e9..3c386e8 100644 --- a/rand/src/prelude.rs +++ b/rand/src/prelude.rs @@ -14,14 +14,15 @@ //! //! ``` //! use rand::prelude::*; -//! # let _ = StdRng::from_entropy(); -//! # let mut r = SmallRng::from_rng(thread_rng()).unwrap(); +//! # let mut r = StdRng::from_rng(thread_rng()).unwrap(); //! # let _: f32 = r.gen(); //! ``` -#[doc(no_inline)] pub use distributions::Distribution; -#[doc(no_inline)] pub use rngs::{SmallRng, StdRng}; -#[doc(no_inline)] #[cfg(feature="std")] pub use rngs::ThreadRng; -#[doc(no_inline)] pub use {Rng, RngCore, CryptoRng, SeedableRng}; -#[doc(no_inline)] #[cfg(feature="std")] pub use {FromEntropy, random, thread_rng}; -#[doc(no_inline)] pub use seq::{SliceRandom, IteratorRandom}; +#[doc(no_inline)] pub use crate::distributions::Distribution; +#[doc(no_inline)] pub use crate::rngs::StdRng; +#[cfg(feature="small_rng")] +#[doc(no_inline)] pub use crate::rngs::SmallRng; +#[doc(no_inline)] #[cfg(feature="std")] pub use crate::rngs::ThreadRng; +#[doc(no_inline)] pub use crate::{Rng, RngCore, CryptoRng, SeedableRng}; +#[doc(no_inline)] #[cfg(feature="std")] pub use crate::{random, thread_rng}; +#[doc(no_inline)] pub use crate::seq::{SliceRandom, IteratorRandom}; diff --git a/rand/src/prng/mod.rs b/rand/src/prng/mod.rs deleted file mode 100644 index 3c0d27b..0000000 --- a/rand/src/prng/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Pseudo-random number generators. -//! -//! This module is deprecated: -//! -//! - documentation has moved to -//! [The Book](https://rust-random.github.io/book/guide-rngs.html), -//! - PRNGs have moved to other `rand_*` crates. - -// Deprecations (to be removed in 0.7) -#[doc(hidden)] #[allow(deprecated)] -pub use deprecated::XorShiftRng; -#[doc(hidden)] pub mod isaac { - // Note: we miss `IsaacCore` here but probably unimportant. - #[allow(deprecated)] pub use deprecated::IsaacRng; -} -#[doc(hidden)] pub mod isaac64 { - #[allow(deprecated)] pub use deprecated::Isaac64Rng; -} -#[doc(hidden)] #[allow(deprecated)] pub use deprecated::{IsaacRng, Isaac64Rng}; -#[doc(hidden)] pub mod chacha { - // Note: we miss `ChaChaCore` here but probably unimportant. - #[allow(deprecated)] pub use deprecated::ChaChaRng; -} -#[doc(hidden)] #[allow(deprecated)] pub use deprecated::ChaChaRng; -#[doc(hidden)] pub mod hc128 { - // Note: we miss `Hc128Core` here but probably unimportant. - #[allow(deprecated)] pub use deprecated::Hc128Rng; -} -#[doc(hidden)] #[allow(deprecated)] pub use deprecated::Hc128Rng; diff --git a/rand/src/rngs/adapter/mod.rs b/rand/src/rngs/adapter/mod.rs index 60b832e..659ff26 100644 --- a/rand/src/rngs/adapter/mod.rs +++ b/rand/src/rngs/adapter/mod.rs @@ -8,8 +8,8 @@ //! Wrappers / adapters forming RNGs -#[cfg(feature="std")] #[doc(hidden)] pub mod read; +#[cfg(feature="std")] mod read; mod reseeding; -#[cfg(feature="std")] pub use self::read::ReadRng; +#[cfg(feature="std")] pub use self::read::{ReadRng, ReadError}; pub use self::reseeding::ReseedingRng; diff --git a/rand/src/rngs/adapter/read.rs b/rand/src/rngs/adapter/read.rs index 30b6de6..901462e 100644 --- a/rand/src/rngs/adapter/read.rs +++ b/rand/src/rngs/adapter/read.rs @@ -10,12 +10,13 @@ //! A wrapper around any Read to treat it as an RNG. use std::io::Read; +use std::fmt; -use rand_core::{RngCore, Error, ErrorKind, impls}; +use rand_core::{RngCore, Error, impls}; /// An RNG that reads random bytes straight from any type supporting -/// `std::io::Read`, for example files. +/// [`std::io::Read`], for example files. /// /// This will work best with an infinite reader, but that is not required. /// @@ -24,10 +25,10 @@ use rand_core::{RngCore, Error, ErrorKind, impls}; /// /// # Panics /// -/// `ReadRng` uses `std::io::read_exact`, which retries on interrupts. All other -/// errors from the underlying reader, including when it does not have enough -/// data, will only be reported through [`try_fill_bytes`]. The other -/// [`RngCore`] methods will panic in case of an error. +/// `ReadRng` uses [`std::io::Read::read_exact`], which retries on interrupts. +/// All other errors from the underlying reader, including when it does not +/// have enough data, will only be reported through [`try_fill_bytes`]. +/// The other [`RngCore`] methods will panic in case of an error. /// /// # Example /// @@ -40,9 +41,8 @@ use rand_core::{RngCore, Error, ErrorKind, impls}; /// println!("{:x}", rng.gen::<u32>()); /// ``` /// -/// [`OsRng`]: ../struct.OsRng.html -/// [`RngCore`]: ../../trait.RngCore.html -/// [`try_fill_bytes`]: ../../trait.RngCore.html#method.tymethod.try_fill_bytes +/// [`OsRng`]: crate::rngs::OsRng +/// [`try_fill_bytes`]: RngCore::try_fill_bytes #[derive(Debug)] pub struct ReadRng<R> { reader: R @@ -72,24 +72,33 @@ impl<R: Read> RngCore for ReadRng<R> { } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - if dest.len() == 0 { return Ok(()); } + if dest.is_empty() { return Ok(()); } // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`. - self.reader.read_exact(dest).map_err(|err| { - match err.kind() { - ::std::io::ErrorKind::UnexpectedEof => Error::with_cause( - ErrorKind::Unavailable, - "not enough bytes available, reached end of source", err), - _ => Error::with_cause(ErrorKind::Unavailable, - "error reading from Read source", err) - } - }) + self.reader.read_exact(dest).map_err(|e| Error::new(ReadError(e))) } } +/// `ReadRng` error type +#[derive(Debug)] +pub struct ReadError(std::io::Error); + +impl fmt::Display for ReadError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ReadError: {}", self.0) + } +} + +impl std::error::Error for ReadError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.0) + } +} + + #[cfg(test)] mod test { use super::ReadRng; - use {RngCore, ErrorKind}; + use crate::RngCore; #[test] fn test_reader_rng_u64() { @@ -132,6 +141,8 @@ mod test { let mut rng = ReadRng::new(&v[..]); - assert!(rng.try_fill_bytes(&mut w).err().unwrap().kind == ErrorKind::Unavailable); + let result = rng.try_fill_bytes(&mut w); + assert!(result.is_err()); + println!("Error: {}", result.unwrap_err()); } } diff --git a/rand/src/rngs/adapter/reseeding.rs b/rand/src/rngs/adapter/reseeding.rs index 016afab..ec88efe 100644 --- a/rand/src/rngs/adapter/reseeding.rs +++ b/rand/src/rngs/adapter/reseeding.rs @@ -12,7 +12,7 @@ use core::mem::size_of; -use rand_core::{RngCore, CryptoRng, SeedableRng, Error, ErrorKind}; +use rand_core::{RngCore, CryptoRng, SeedableRng, Error}; use rand_core::block::{BlockRngCore, BlockRng}; /// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the @@ -24,7 +24,7 @@ use rand_core::block::{BlockRngCore, BlockRng}; /// - After `clone()`, the clone will be reseeded on first use. /// - After a process is forked, the RNG in the child process is reseeded within /// the next few generated values, depending on the block size of the -/// underlying PRNG. For [`ChaChaCore`] and [`Hc128Core`] this is a maximum of +/// underlying PRNG. For ChaCha and Hc128 this is a maximum of /// 15 `u32` values before reseeding. /// - After the PRNG has generated a configurable number of random bytes. /// @@ -57,33 +57,24 @@ use rand_core::block::{BlockRngCore, BlockRng}; /// # Example /// /// ``` -/// # extern crate rand; -/// # extern crate rand_chacha; -/// # fn main() { /// use rand::prelude::*; -/// use rand_chacha::ChaChaCore; // Internal part of ChaChaRng that +/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that /// // implements BlockRngCore /// use rand::rngs::OsRng; /// use rand::rngs::adapter::ReseedingRng; /// -/// let prng = ChaChaCore::from_entropy(); -// FIXME: it is better to use EntropyRng as reseeder, but that doesn't implement -// clone yet. -/// let reseeder = OsRng::new().unwrap(); -/// let mut reseeding_rng = ReseedingRng::new(prng, 0, reseeder); +/// let prng = ChaCha20Core::from_entropy(); +/// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng); /// /// println!("{}", reseeding_rng.gen::<u64>()); /// /// let mut cloned_rng = reseeding_rng.clone(); /// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>()); -/// # } /// ``` /// -/// [`ChaChaCore`]: ../../../rand_chacha/struct.ChaChaCore.html -/// [`Hc128Core`]: ../../../rand_hc/struct.Hc128Core.html -/// [`BlockRngCore`]: ../../../rand_core/block/trait.BlockRngCore.html -/// [`ReseedingRng::new`]: struct.ReseedingRng.html#method.new -/// [`reseed()`]: struct.ReseedingRng.html#method.reseed +/// [`BlockRngCore`]: rand_core::block::BlockRngCore +/// [`ReseedingRng::new`]: ReseedingRng::new +/// [`reseed()`]: ReseedingRng::reseed #[derive(Debug)] pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>) where R: BlockRngCore + SeedableRng, @@ -234,6 +225,7 @@ where R: BlockRngCore + SeedableRng, results: &mut <Self as BlockRngCore>::Results, global_fork_counter: usize) { + #![allow(clippy::if_same_then_else)] // false positive if self.is_forked(global_fork_counter) { info!("Fork detected, reseeding RNG"); } else { @@ -243,21 +235,13 @@ where R: BlockRngCore + SeedableRng, let num_bytes = results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>(); - let threshold = if let Err(e) = self.reseed() { - let delay = match e.kind { - ErrorKind::Transient => num_bytes as i64, - kind @ _ if kind.should_retry() => self.threshold >> 8, - _ => self.threshold, - }; - warn!("Reseeding RNG delayed reseeding by {} bytes due to \ - error from source: {}", delay, e); - delay - } else { - self.fork_counter = global_fork_counter; - self.threshold - }; + if let Err(e) = self.reseed() { + warn!("Reseeding RNG failed: {}", e); + let _ = e; + } + self.fork_counter = global_fork_counter; - self.bytes_until_reseed = threshold - num_bytes as i64; + self.bytes_until_reseed = self.threshold - num_bytes as i64; self.inner.generate(results); } } @@ -282,12 +266,11 @@ where R: BlockRngCore + SeedableRng + CryptoRng, Rsdr: RngCore + CryptoRng {} -#[cfg(all(feature="std", unix, not(target_os="emscripten")))] +#[cfg(all(unix, not(target_os="emscripten")))] mod fork { - extern crate libc; - - use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; - use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; + use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; + #[allow(deprecated)] // Required for compatibility with Rust < 1.24. + use core::sync::atomic::{ATOMIC_USIZE_INIT, ATOMIC_BOOL_INIT}; // Fork protection // @@ -301,12 +284,14 @@ mod fork { // don't update `fork_counter`, so a reseed is attempted as soon as // possible. + #[allow(deprecated)] static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; pub fn get_fork_counter() -> usize { RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed) } + #[allow(deprecated)] static FORK_HANDLER_REGISTERED: AtomicBool = ATOMIC_BOOL_INIT; extern fn fork_handler() { @@ -316,14 +301,14 @@ mod fork { } pub fn register_fork_handler() { - if FORK_HANDLER_REGISTERED.load(Ordering::Relaxed) == false { + if !FORK_HANDLER_REGISTERED.load(Ordering::Relaxed) { unsafe { libc::pthread_atfork(None, None, Some(fork_handler)) }; FORK_HANDLER_REGISTERED.store(true, Ordering::Relaxed); } } } -#[cfg(not(all(feature="std", unix, not(target_os="emscripten"))))] +#[cfg(not(all(unix, not(target_os="emscripten"))))] mod fork { pub fn get_fork_counter() -> usize { 0 } pub fn register_fork_handler() {} @@ -332,25 +317,27 @@ mod fork { #[cfg(test)] mod test { - use {Rng, SeedableRng}; - use rand_chacha::ChaChaCore; - use rngs::mock::StepRng; + use crate::{Rng, SeedableRng}; + use crate::rngs::std::Core; + use crate::rngs::mock::StepRng; use super::ReseedingRng; #[test] fn test_reseeding() { let mut zero = StepRng::new(0, 0); - let rng = ChaChaCore::from_rng(&mut zero).unwrap(); - let mut reseeding = ReseedingRng::new(rng, 32*4, zero); - - // Currently we only support for arrays up to length 32. - // TODO: cannot generate seq via Rng::gen because it uses different alg - let mut buf = [0u32; 32]; // Needs to be a multiple of the RNGs result - // size to test exactly. - reseeding.fill(&mut buf); + let rng = Core::from_rng(&mut zero).unwrap(); + let thresh = 1; // reseed every time the buffer is exhausted + let mut reseeding = ReseedingRng::new(rng, thresh, zero); + + // RNG buffer size is [u32; 64] + // Debug is only implemented up to length 32 so use two arrays + let mut buf = ([0u32; 32], [0u32; 32]); + reseeding.fill(&mut buf.0); + reseeding.fill(&mut buf.1); let seq = buf; for _ in 0..10 { - reseeding.fill(&mut buf); + reseeding.fill(&mut buf.0); + reseeding.fill(&mut buf.1); assert_eq!(buf, seq); } } @@ -358,7 +345,7 @@ mod test { #[test] fn test_clone_reseeding() { let mut zero = StepRng::new(0, 0); - let rng = ChaChaCore::from_rng(&mut zero).unwrap(); + let rng = Core::from_rng(&mut zero).unwrap(); let mut rng1 = ReseedingRng::new(rng, 32*4, zero); let first: u32 = rng1.gen(); diff --git a/rand/src/rngs/entropy.rs b/rand/src/rngs/entropy.rs index 372b4d7..1ed59ab 100644 --- a/rand/src/rngs/entropy.rs +++ b/rand/src/rngs/entropy.rs @@ -8,52 +8,21 @@ //! Entropy generator, or wrapper around external generators -use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls}; -#[allow(unused)] -use rngs; +#![allow(deprecated)] // whole module is deprecated + +use rand_core::{RngCore, CryptoRng, Error}; +use crate::rngs::OsRng; /// An interface returning random data from external source(s), provided /// specifically for securely seeding algorithmic generators (PRNGs). /// -/// Where possible, `EntropyRng` retrieves random data from the operating -/// system's interface for random numbers ([`OsRng`]); if that fails it will -/// fall back to the [`JitterRng`] entropy collector. In the latter case it will -/// still try to use [`OsRng`] on the next usage. -/// -/// If no secure source of entropy is available `EntropyRng` will panic on use; -/// i.e. it should never output predictable data. -/// -/// This is either a little slow ([`OsRng`] requires a system call) or extremely -/// slow ([`JitterRng`] must use significant CPU time to generate sufficient -/// jitter); for better performance it is common to seed a local PRNG from -/// external entropy then primarily use the local PRNG ([`thread_rng`] is -/// provided as a convenient, local, automatically-seeded CSPRNG). -/// -/// # Panics -/// -/// On most systems, like Windows, Linux, macOS and *BSD on common hardware, it -/// is highly unlikely for both [`OsRng`] and [`JitterRng`] to fail. But on -/// combinations like webassembly without Emscripten or stdweb both sources are -/// unavailable. If both sources fail, only [`try_fill_bytes`] is able to -/// report the error, and only the one from `OsRng`. The other [`RngCore`] -/// methods will panic in case of an error. -/// -/// [`OsRng`]: struct.OsRng.html -/// [`JitterRng`]: jitter/struct.JitterRng.html -/// [`thread_rng`]: ../fn.thread_rng.html -/// [`RngCore`]: ../trait.RngCore.html -/// [`try_fill_bytes`]: ../trait.RngCore.html#method.tymethod.try_fill_bytes +/// This is deprecated. It is suggested you use [`rngs::OsRng`] instead. +/// +/// [`rngs::OsRng`]: crate::rngs::OsRng #[derive(Debug)] +#[deprecated(since="0.7.0", note="use rngs::OsRng instead")] pub struct EntropyRng { - source: Source, -} - -#[derive(Debug)] -enum Source { - Os(Os), - Custom(Custom), - Jitter(Jitter), - None, + source: OsRng, } impl EntropyRng { @@ -63,7 +32,7 @@ impl EntropyRng { /// those are done on first use. This is done to make `new` infallible, /// and `try_fill_bytes` the only place to report errors. pub fn new() -> Self { - EntropyRng { source: Source::None } + EntropyRng { source: OsRng } } } @@ -75,167 +44,25 @@ impl Default for EntropyRng { impl RngCore for EntropyRng { fn next_u32(&mut self) -> u32 { - impls::next_u32_via_fill(self) + self.source.next_u32() } fn next_u64(&mut self) -> u64 { - impls::next_u64_via_fill(self) + self.source.next_u64() } fn fill_bytes(&mut self, dest: &mut [u8]) { - self.try_fill_bytes(dest).unwrap_or_else(|err| - panic!("all entropy sources failed; first error: {}", err)) + self.source.fill_bytes(dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - let mut reported_error = None; - - if let Source::Os(ref mut os_rng) = self.source { - match os_rng.fill(dest) { - Ok(()) => return Ok(()), - Err(err) => { - warn!("EntropyRng: OsRng failed \ - [trying other entropy sources]: {}", err); - reported_error = Some(err); - }, - } - } else if Os::is_supported() { - match Os::new_and_fill(dest) { - Ok(os_rng) => { - debug!("EntropyRng: using OsRng"); - self.source = Source::Os(os_rng); - return Ok(()); - }, - Err(err) => { reported_error = reported_error.or(Some(err)) }, - } - } - - if let Source::Custom(ref mut rng) = self.source { - match rng.fill(dest) { - Ok(()) => return Ok(()), - Err(err) => { - warn!("EntropyRng: custom entropy source failed \ - [trying other entropy sources]: {}", err); - reported_error = Some(err); - }, - } - } else if Custom::is_supported() { - match Custom::new_and_fill(dest) { - Ok(custom) => { - debug!("EntropyRng: using custom entropy source"); - self.source = Source::Custom(custom); - return Ok(()); - }, - Err(err) => { reported_error = reported_error.or(Some(err)) }, - } - } - - if let Source::Jitter(ref mut jitter_rng) = self.source { - match jitter_rng.fill(dest) { - Ok(()) => return Ok(()), - Err(err) => { - warn!("EntropyRng: JitterRng failed: {}", err); - reported_error = Some(err); - }, - } - } else if Jitter::is_supported() { - match Jitter::new_and_fill(dest) { - Ok(jitter_rng) => { - debug!("EntropyRng: using JitterRng"); - self.source = Source::Jitter(jitter_rng); - return Ok(()); - }, - Err(err) => { reported_error = reported_error.or(Some(err)) }, - } - } - - if let Some(err) = reported_error { - Err(Error::with_cause(ErrorKind::Unavailable, - "All entropy sources failed", - err)) - } else { - Err(Error::new(ErrorKind::Unavailable, - "No entropy sources available")) - } + self.source.try_fill_bytes(dest) } } impl CryptoRng for EntropyRng {} - -trait EntropySource { - fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> - where Self: Sized; - - fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error>; - - fn is_supported() -> bool { true } -} - -#[allow(unused)] -#[derive(Clone, Debug)] -struct NoSource; - -#[allow(unused)] -impl EntropySource for NoSource { - fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> { - Err(Error::new(ErrorKind::Unavailable, "Source not supported")) - } - - fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> { - unreachable!() - } - - fn is_supported() -> bool { false } -} - - -#[cfg(feature="rand_os")] -#[derive(Clone, Debug)] -pub struct Os(rngs::OsRng); - -#[cfg(feature="rand_os")] -impl EntropySource for Os { - fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> { - let mut rng = rngs::OsRng::new()?; - rng.try_fill_bytes(dest)?; - Ok(Os(rng)) - } - - fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -#[cfg(not(feature="std"))] -type Os = NoSource; - - -type Custom = NoSource; - - -#[cfg(not(target_arch = "wasm32"))] -#[derive(Clone, Debug)] -pub struct Jitter(rngs::JitterRng); - -#[cfg(not(target_arch = "wasm32"))] -impl EntropySource for Jitter { - fn new_and_fill(dest: &mut [u8]) -> Result<Self, Error> { - let mut rng = rngs::JitterRng::new()?; - rng.try_fill_bytes(dest)?; - Ok(Jitter(rng)) - } - - fn fill(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.0.try_fill_bytes(dest) - } -} - -#[cfg(target_arch = "wasm32")] -type Jitter = NoSource; - - #[cfg(test)] mod test { use super::*; diff --git a/rand/src/rngs/jitter.rs b/rand/src/rngs/jitter.rs deleted file mode 100644 index 3e93477..0000000 --- a/rand/src/rngs/jitter.rs +++ /dev/null @@ -1,885 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -// -// Based on jitterentropy-library, http://www.chronox.de/jent.html. -// Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2017. -// -// With permission from Stephan Mueller to relicense the Rust translation under -// the MIT license. - -//! Non-physical true random number generator based on timing jitter. - -// Note: the C implementation of `Jitterentropy` relies on being compiled -// without optimizations. This implementation goes through lengths to make the -// compiler not optimize out code which does influence timing jitter, but is -// technically dead code. - -use rand_core::{RngCore, CryptoRng, Error, ErrorKind, impls}; - -use core::{fmt, mem, ptr}; -#[cfg(all(feature="std", not(target_arch = "wasm32")))] -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; - -const MEMORY_BLOCKS: usize = 64; -const MEMORY_BLOCKSIZE: usize = 32; -const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE; - -/// A true random number generator based on jitter in the CPU execution time, -/// and jitter in memory access time. -/// -/// This is a true random number generator, as opposed to pseudo-random -/// generators. Random numbers generated by `JitterRng` can be seen as fresh -/// entropy. A consequence is that is orders of magnitude slower than [`OsRng`] -/// and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower). -/// -/// There are very few situations where using this RNG is appropriate. Only very -/// few applications require true entropy. A normal PRNG can be statistically -/// indistinguishable, and a cryptographic PRNG should also be as impossible to -/// predict. -/// -/// Use of `JitterRng` is recommended for initializing cryptographic PRNGs when -/// [`OsRng`] is not available. -/// -/// `JitterRng` can be used without the standard library, but not conveniently, -/// you must provide a high-precision timer and carefully have to follow the -/// instructions of [`new_with_timer`]. -/// -/// This implementation is based on -/// [Jitterentropy](http://www.chronox.de/jent.html) version 2.1.0. -/// -/// Note: There is no accurate timer available on Wasm platforms, to help -/// prevent fingerprinting or timing side-channel attacks. Therefore -/// [`JitterRng::new()`] is not available on Wasm. -/// -/// # Quality testing -/// -/// [`JitterRng::new()`] has build-in, but limited, quality testing, however -/// before using `JitterRng` on untested hardware, or after changes that could -/// effect how the code is optimized (such as a new LLVM version), it is -/// recommend to run the much more stringent -/// [NIST SP 800-90B Entropy Estimation Suite]( -/// https://github.com/usnistgov/SP800-90B_EntropyAssessment). -/// -/// Use the following code using [`timer_stats`] to collect the data: -/// -/// ```no_run -/// use rand::rngs::JitterRng; -/// # -/// # use std::error::Error; -/// # use std::fs::File; -/// # use std::io::Write; -/// # -/// # fn try_main() -> Result<(), Box<Error>> { -/// let mut rng = JitterRng::new()?; -/// -/// // 1_000_000 results are required for the -/// // NIST SP 800-90B Entropy Estimation Suite -/// const ROUNDS: usize = 1_000_000; -/// let mut deltas_variable: Vec<u8> = Vec::with_capacity(ROUNDS); -/// let mut deltas_minimal: Vec<u8> = Vec::with_capacity(ROUNDS); -/// -/// for _ in 0..ROUNDS { -/// deltas_variable.push(rng.timer_stats(true) as u8); -/// deltas_minimal.push(rng.timer_stats(false) as u8); -/// } -/// -/// // Write out after the statistics collection loop, to not disturb the -/// // test results. -/// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?; -/// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?; -/// # -/// # Ok(()) -/// # } -/// # -/// # fn main() { -/// # try_main().unwrap(); -/// # } -/// ``` -/// -/// This will produce two files: `jitter_rng_var.bin` and `jitter_rng_min.bin`. -/// Run the Entropy Estimation Suite in three configurations, as outlined below. -/// Every run has two steps. One step to produce an estimation, another to -/// validate the estimation. -/// -/// 1. Estimate the expected amount of entropy that is at least available with -/// each round of the entropy collector. This number should be greater than -/// the amount estimated with `64 / test_timer()`. -/// ```sh -/// python noniid_main.py -v jitter_rng_var.bin 8 -/// restart.py -v jitter_rng_var.bin 8 <min-entropy> -/// ``` -/// 2. Estimate the expected amount of entropy that is available in the last 4 -/// bits of the timer delta after running noice sources. Note that a value of -/// `3.70` is the minimum estimated entropy for true randomness. -/// ```sh -/// python noniid_main.py -v -u 4 jitter_rng_var.bin 4 -/// restart.py -v -u 4 jitter_rng_var.bin 4 <min-entropy> -/// ``` -/// 3. Estimate the expected amount of entropy that is available to the entropy -/// collector if both noice sources only run their minimal number of times. -/// This measures the absolute worst-case, and gives a lower bound for the -/// available entropy. -/// ```sh -/// python noniid_main.py -v -u 4 jitter_rng_min.bin 4 -/// restart.py -v -u 4 jitter_rng_min.bin 4 <min-entropy> -/// ``` -/// -/// [`OsRng`]: struct.OsRng.html -/// [`JitterRng::new()`]: struct.JitterRng.html#method.new -/// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer -/// [`timer_stats`]: struct.JitterRng.html#method.timer_stats -pub struct JitterRng { - data: u64, // Actual random number - // Number of rounds to run the entropy collector per 64 bits - rounds: u8, - // Timer used by `measure_jitter` - timer: fn() -> u64, - // Memory for the Memory Access noise source - mem_prev_index: u16, - // Make `next_u32` not waste 32 bits - data_half_used: bool, -} - -// Note: `JitterRng` maintains a small 64-bit entropy pool. With every -// `generate` 64 new bits should be integrated in the pool. If a round of -// `generate` were to collect less than the expected 64 bit, then the returned -// value, and the new state of the entropy pool, would be in some way related to -// the initial state. It is therefore better if the initial state of the entropy -// pool is different on each call to `generate`. This has a few implications: -// - `generate` should be called once before using `JitterRng` to produce the -// first usable value (this is done by default in `new`); -// - We do not zero the entropy pool after generating a result. The reference -// implementation also does not support zeroing, but recommends generating a -// new value without using it if you want to protect a previously generated -// 'secret' value from someone inspecting the memory; -// - Implementing `Clone` seems acceptable, as it would not cause the systematic -// bias a constant might cause. Only instead of one value that could be -// potentially related to the same initial state, there are now two. - -// Entropy collector state. -// These values are not necessary to preserve across runs. -struct EcState { - // Previous time stamp to determine the timer delta - prev_time: u64, - // Deltas used for the stuck test - last_delta: i32, - last_delta2: i32, - // Memory for the Memory Access noise source - mem: [u8; MEMORY_SIZE], -} - -impl EcState { - // Stuck test by checking the: - // - 1st derivation of the jitter measurement (time delta) - // - 2nd derivation of the jitter measurement (delta of time deltas) - // - 3rd derivation of the jitter measurement (delta of delta of time - // deltas) - // - // All values must always be non-zero. - // This test is a heuristic to see whether the last measurement holds - // entropy. - fn stuck(&mut self, current_delta: i32) -> bool { - let delta2 = self.last_delta - current_delta; - let delta3 = delta2 - self.last_delta2; - - self.last_delta = current_delta; - self.last_delta2 = delta2; - - current_delta == 0 || delta2 == 0 || delta3 == 0 - } -} - -// Custom Debug implementation that does not expose the internal state -impl fmt::Debug for JitterRng { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "JitterRng {{}}") - } -} - -impl Clone for JitterRng { - fn clone(&self) -> JitterRng { - JitterRng { - data: self.data, - rounds: self.rounds, - timer: self.timer, - mem_prev_index: self.mem_prev_index, - // The 32 bits that may still be unused from the previous round are - // for the original to use, not for the clone. - data_half_used: false, - } - } -} - -/// An error that can occur when [`JitterRng::test_timer`] fails. -/// -/// [`JitterRng::test_timer`]: struct.JitterRng.html#method.test_timer -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum TimerError { - /// No timer available. - NoTimer, - /// Timer too coarse to use as an entropy source. - CoarseTimer, - /// Timer is not monotonically increasing. - NotMonotonic, - /// Variations of deltas of time too small. - TinyVariantions, - /// Too many stuck results (indicating no added entropy). - TooManyStuck, - #[doc(hidden)] - __Nonexhaustive, -} - -impl TimerError { - fn description(&self) -> &'static str { - match *self { - TimerError::NoTimer => "no timer available", - TimerError::CoarseTimer => "coarse timer", - TimerError::NotMonotonic => "timer not monotonic", - TimerError::TinyVariantions => "time delta variations too small", - TimerError::TooManyStuck => "too many stuck results", - TimerError::__Nonexhaustive => unreachable!(), - } - } -} - -impl fmt::Display for TimerError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} - -#[cfg(feature="std")] -impl ::std::error::Error for TimerError { - fn description(&self) -> &str { - self.description() - } -} - -impl From<TimerError> for Error { - fn from(err: TimerError) -> Error { - // Timer check is already quite permissive of failures so we don't - // expect false-positive failures, i.e. any error is irrecoverable. - Error::with_cause(ErrorKind::Unavailable, - "timer jitter failed basic quality tests", err) - } -} - -// Initialise to zero; must be positive -#[cfg(all(feature="std", not(target_arch = "wasm32")))] -static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT; - -impl JitterRng { - /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a - /// platform-specific function with higher accuracy if necessary and - /// available. - /// - /// During initialization CPU execution timing jitter is measured a few - /// hundred times. If this does not pass basic quality tests, an error is - /// returned. The test result is cached to make subsequent calls faster. - #[cfg(all(feature="std", not(target_arch = "wasm32")))] - pub fn new() -> Result<JitterRng, TimerError> { - let mut state = JitterRng::new_with_timer(platform::get_nstime); - let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8; - if rounds == 0 { - // No result yet: run test. - // This allows the timer test to run multiple times; we don't care. - rounds = state.test_timer()?; - JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed); - info!("JitterRng: using {} rounds per u64 output", rounds); - } - state.set_rounds(rounds); - - // Fill `data` with a non-zero value. - state.gen_entropy(); - Ok(state) - } - - /// Create a new `JitterRng`. - /// A custom timer can be supplied, making it possible to use `JitterRng` in - /// `no_std` environments. - /// - /// The timer must have nanosecond precision. - /// - /// This method is more low-level than `new()`. It is the responsibility of - /// the caller to run [`test_timer`] before using any numbers generated with - /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to - /// consume at least one `u64` before using the first result to initialize - /// the entropy collection pool. - /// - /// # Example - /// - /// ``` - /// # use rand::{Rng, Error}; - /// use rand::rngs::JitterRng; - /// - /// # fn try_inner() -> Result<(), Error> { - /// fn get_nstime() -> u64 { - /// use std::time::{SystemTime, UNIX_EPOCH}; - /// - /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - /// // The correct way to calculate the current time is - /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64` - /// // But this is faster, and the difference in terms of entropy is - /// // negligible (log2(10^9) == 29.9). - /// dur.as_secs() << 30 | dur.subsec_nanos() as u64 - /// } - /// - /// let mut rng = JitterRng::new_with_timer(get_nstime); - /// let rounds = rng.test_timer()?; - /// rng.set_rounds(rounds); // optional - /// let _ = rng.gen::<u64>(); - /// - /// // Ready for use - /// let v: u64 = rng.gen(); - /// # Ok(()) - /// # } - /// - /// # let _ = try_inner(); - /// ``` - /// - /// [`test_timer`]: struct.JitterRng.html#method.test_timer - /// [`set_rounds`]: struct.JitterRng.html#method.set_rounds - pub fn new_with_timer(timer: fn() -> u64) -> JitterRng { - JitterRng { - data: 0, - rounds: 64, - timer, - mem_prev_index: 0, - data_half_used: false, - } - } - - /// Configures how many rounds are used to generate each 64-bit value. - /// This must be greater than zero, and has a big impact on performance - /// and output quality. - /// - /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds - /// can be used. The `test_timer()` function returns the minimum number of - /// rounds required for full strength (platform dependent), so one may use - /// `rng.set_rounds(rng.test_timer()?);` or cache the value. - /// - /// [`new_with_timer`]: struct.JitterRng.html#method.new_with_timer - pub fn set_rounds(&mut self, rounds: u8) { - assert!(rounds > 0); - self.rounds = rounds; - } - - // Calculate a random loop count used for the next round of an entropy - // collection, based on bits from a fresh value from the timer. - // - // The timer is folded to produce a number that contains at most `n_bits` - // bits. - // - // Note: A constant should be added to the resulting random loop count to - // prevent loops that run 0 times. - #[inline(never)] - fn random_loop_cnt(&mut self, n_bits: u32) -> u32 { - let mut rounds = 0; - - let mut time = (self.timer)(); - // Mix with the current state of the random number balance the random - // loop counter a bit more. - time ^= self.data; - - // We fold the time value as much as possible to ensure that as many - // bits of the time stamp are included as possible. - let folds = (64 + n_bits - 1) / n_bits; - let mask = (1 << n_bits) - 1; - for _ in 0..folds { - rounds ^= time & mask; - time >>= n_bits; - } - - rounds as u32 - } - - // CPU jitter noise source - // Noise source based on the CPU execution time jitter - // - // This function injects the individual bits of the time value into the - // entropy pool using an LFSR. - // - // The code is deliberately inefficient with respect to the bit shifting. - // This function not only acts as folding operation, but this function's - // execution is used to measure the CPU execution time jitter. Any change to - // the loop in this function implies that careful retesting must be done. - #[inline(never)] - fn lfsr_time(&mut self, time: u64, var_rounds: bool) { - fn lfsr(mut data: u64, time: u64) -> u64{ - for i in 1..65 { - let mut tmp = time << (64 - i); - tmp >>= 64 - 1; - - // Fibonacci LSFR with polynomial of - // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is - // primitive according to - // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf - // (the shift values are the polynomial values minus one - // due to counting bits from 0 to 63). As the current - // position is always the LSB, the polynomial only needs - // to shift data in from the left without wrap. - data ^= tmp; - data ^= (data >> 63) & 1; - data ^= (data >> 60) & 1; - data ^= (data >> 55) & 1; - data ^= (data >> 30) & 1; - data ^= (data >> 27) & 1; - data ^= (data >> 22) & 1; - data = data.rotate_left(1); - } - data - } - - // Note: in the reference implementation only the last round effects - // `self.data`, all the other results are ignored. To make sure the - // other rounds are not optimised out, we first run all but the last - // round on a throw-away value instead of the real `self.data`. - let mut lfsr_loop_cnt = 0; - if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) }; - - let mut throw_away: u64 = 0; - for _ in 0..lfsr_loop_cnt { - throw_away = lfsr(throw_away, time); - } - black_box(throw_away); - - self.data = lfsr(self.data, time); - } - - // Memory Access noise source - // This is a noise source based on variations in memory access times - // - // This function performs memory accesses which will add to the timing - // variations due to an unknown amount of CPU wait states that need to be - // added when accessing memory. The memory size should be larger than the L1 - // caches as outlined in the documentation and the associated testing. - // - // The L1 cache has a very high bandwidth, albeit its access rate is usually - // slower than accessing CPU registers. Therefore, L1 accesses only add - // minimal variations as the CPU has hardly to wait. Starting with L2, - // significant variations are added because L2 typically does not belong to - // the CPU any more and therefore a wider range of CPU wait states is - // necessary for accesses. L3 and real memory accesses have even a wider - // range of wait states. However, to reliably access either L3 or memory, - // the `self.mem` memory must be quite large which is usually not desirable. - #[inline(never)] - fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) { - let mut acc_loop_cnt = 128; - if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) }; - - let mut index = self.mem_prev_index as usize; - for _ in 0..acc_loop_cnt { - // Addition of memblocksize - 1 to index with wrap around logic to - // ensure that every memory location is hit evenly. - // The modulus also allows the compiler to remove the indexing - // bounds check. - index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE; - - // memory access: just add 1 to one byte - // memory access implies read from and write to memory location - mem[index] = mem[index].wrapping_add(1); - } - self.mem_prev_index = index as u16; - } - - // This is the heart of the entropy generation: calculate time deltas and - // use the CPU jitter in the time deltas. The jitter is injected into the - // entropy pool. - // - // Ensure that `ec.prev_time` is primed before using the output of this - // function. This can be done by calling this function and not using its - // result. - fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> { - // Invoke one noise source before time measurement to add variations - self.memaccess(&mut ec.mem, true); - - // Get time stamp and calculate time delta to previous - // invocation to measure the timing variations - let time = (self.timer)(); - // Note: wrapping_sub combined with a cast to `i64` generates a correct - // delta, even in the unlikely case this is a timer that is not strictly - // monotonic. - let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32; - ec.prev_time = time; - - // Call the next noise source which also injects the data - self.lfsr_time(current_delta as u64, true); - - // Check whether we have a stuck measurement (i.e. does the last - // measurement holds entropy?). - if ec.stuck(current_delta) { return None }; - - // Rotate the data buffer by a prime number (any odd number would - // do) to ensure that every bit position of the input time stamp - // has an even chance of being merged with a bit position in the - // entropy pool. We do not use one here as the adjacent bits in - // successive time deltas may have some form of dependency. The - // chosen value of 7 implies that the low 7 bits of the next - // time delta value is concatenated with the current time delta. - self.data = self.data.rotate_left(7); - - Some(()) - } - - // Shuffle the pool a bit by mixing some value with a bijective function - // (XOR) into the pool. - // - // The function generates a mixer value that depends on the bits set and - // the location of the set bits in the random number generated by the - // entropy source. Therefore, based on the generated random number, this - // mixer value can have 2^64 different values. That mixer value is - // initialized with the first two SHA-1 constants. After obtaining the - // mixer value, it is XORed into the random number. - // - // The mixer value is not assumed to contain any entropy. But due to the - // XOR operation, it can also not destroy any entropy present in the - // entropy pool. - #[inline(never)] - fn stir_pool(&mut self) { - // This constant is derived from the first two 32 bit initialization - // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 - // The order does not really matter as we do not rely on the specific - // numbers. We just pick the SHA-1 constants as they have a good mix of - // bit set and unset. - const CONSTANT: u64 = 0x67452301efcdab89; - - // The start value of the mixer variable is derived from the third - // and fourth 32 bit initialization vector of SHA-1 as defined in - // FIPS 180-4 section 5.3.1 - let mut mixer = 0x98badcfe10325476; - - // This is a constant time function to prevent leaking timing - // information about the random number. - // The normal code is: - // ``` - // for i in 0..64 { - // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; } - // } - // ``` - // This is a bit fragile, as LLVM really wants to use branches here, and - // we rely on it to not recognise the opportunity. - for i in 0..64 { - let apply = (self.data >> i) & 1; - let mask = !apply.wrapping_sub(1); - mixer ^= CONSTANT & mask; - mixer = mixer.rotate_left(1); - } - - self.data ^= mixer; - } - - fn gen_entropy(&mut self) -> u64 { - trace!("JitterRng: collecting entropy"); - - // Prime `ec.prev_time`, and run the noice sources to make sure the - // first loop round collects the expected entropy. - let mut ec = EcState { - prev_time: (self.timer)(), - last_delta: 0, - last_delta2: 0, - mem: [0; MEMORY_SIZE], - }; - let _ = self.measure_jitter(&mut ec); - - for _ in 0..self.rounds { - // If a stuck measurement is received, repeat measurement - // Note: we do not guard against an infinite loop, that would mean - // the timer suddenly became broken. - while self.measure_jitter(&mut ec).is_none() {} - } - - // Do a single read from `self.mem` to make sure the Memory Access noise - // source is not optimised out. - black_box(ec.mem[0]); - - self.stir_pool(); - self.data - } - - /// Basic quality tests on the timer, by measuring CPU timing jitter a few - /// hundred times. - /// - /// If succesful, this will return the estimated number of rounds necessary - /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause - /// of the failure will be returned. - /// - /// [`TimerError`]: enum.TimerError.html - pub fn test_timer(&mut self) -> Result<u8, TimerError> { - debug!("JitterRng: testing timer ..."); - // We could add a check for system capabilities such as `clock_getres` - // or check for `CONFIG_X86_TSC`, but it does not make much sense as the - // following sanity checks verify that we have a high-resolution timer. - - let mut delta_sum = 0; - let mut old_delta = 0; - - let mut time_backwards = 0; - let mut count_mod = 0; - let mut count_stuck = 0; - - let mut ec = EcState { - prev_time: (self.timer)(), - last_delta: 0, - last_delta2: 0, - mem: [0; MEMORY_SIZE], - }; - - // TESTLOOPCOUNT needs some loops to identify edge systems. - // 100 is definitely too little. - const TESTLOOPCOUNT: u64 = 300; - const CLEARCACHE: u64 = 100; - - for i in 0..(CLEARCACHE + TESTLOOPCOUNT) { - // Measure time delta of core entropy collection logic - let time = (self.timer)(); - self.memaccess(&mut ec.mem, true); - self.lfsr_time(time, true); - let time2 = (self.timer)(); - - // Test whether timer works - if time == 0 || time2 == 0 { - return Err(TimerError::NoTimer); - } - let delta = time2.wrapping_sub(time) as i64 as i32; - - // Test whether timer is fine grained enough to provide delta even - // when called shortly after each other -- this implies that we also - // have a high resolution timer - if delta == 0 { - return Err(TimerError::CoarseTimer); - } - - // Up to here we did not modify any variable that will be - // evaluated later, but we already performed some work. Thus we - // already have had an impact on the caches, branch prediction, - // etc. with the goal to clear it to get the worst case - // measurements. - if i < CLEARCACHE { continue; } - - if ec.stuck(delta) { count_stuck += 1; } - - // Test whether we have an increasing timer. - if !(time2 > time) { time_backwards += 1; } - - // Count the number of times the counter increases in steps of 100ns - // or greater. - if (delta % 100) == 0 { count_mod += 1; } - - // Ensure that we have a varying delta timer which is necessary for - // the calculation of entropy -- perform this check only after the - // first loop is executed as we need to prime the old_delta value - delta_sum += (delta - old_delta).abs() as u64; - old_delta = delta; - } - - // Do a single read from `self.mem` to make sure the Memory Access noise - // source is not optimised out. - black_box(ec.mem[0]); - - // We allow the time to run backwards for up to three times. - // This can happen if the clock is being adjusted by NTP operations. - // If such an operation just happens to interfere with our test, it - // should not fail. The value of 3 should cover the NTP case being - // performed during our test run. - if time_backwards > 3 { - return Err(TimerError::NotMonotonic); - } - - // Test that the available amount of entropy per round does not get to - // low. We expect 1 bit of entropy per round as a reasonable minimum - // (although less is possible, it means the collector loop has to run - // much more often). - // `assert!(delta_average >= log2(1))` - // `assert!(delta_sum / TESTLOOPCOUNT >= 1)` - // `assert!(delta_sum >= TESTLOOPCOUNT)` - if delta_sum < TESTLOOPCOUNT { - return Err(TimerError::TinyVariantions); - } - - // Ensure that we have variations in the time stamp below 100 for at - // least 10% of all checks -- on some platforms, the counter increments - // in multiples of 100, but not always - if count_mod > (TESTLOOPCOUNT * 9 / 10) { - return Err(TimerError::CoarseTimer); - } - - // If we have more than 90% stuck results, then this Jitter RNG is - // likely to not work well. - if count_stuck > (TESTLOOPCOUNT * 9 / 10) { - return Err(TimerError::TooManyStuck); - } - - // Estimate the number of `measure_jitter` rounds necessary for 64 bits - // of entropy. - // - // We don't try very hard to come up with a good estimate of the - // available bits of entropy per round here for two reasons: - // 1. Simple estimates of the available bits (like Shannon entropy) are - // too optimistic. - // 2. Unless we want to waste a lot of time during intialization, there - // only a small number of samples are available. - // - // Therefore we use a very simple and conservative estimate: - // `let bits_of_entropy = log2(delta_average) / 2`. - // - // The number of rounds `measure_jitter` should run to collect 64 bits - // of entropy is `64 / bits_of_entropy`. - let delta_average = delta_sum / TESTLOOPCOUNT; - - if delta_average >= 16 { - let log2 = 64 - delta_average.leading_zeros(); - // Do something similar to roundup(64/(log2/2)): - Ok( ((64u32 * 2 + log2 - 1) / log2) as u8) - } else { - // For values < 16 the rounding error becomes too large, use a - // lookup table. - // Values 0 and 1 are invalid, and filtered out by the - // `delta_sum < TESTLOOPCOUNT` test above. - let log2_lookup = [0, 0, 128, 81, 64, 56, 50, 46, - 43, 41, 39, 38, 36, 35, 34, 33]; - Ok(log2_lookup[delta_average as usize]) - } - } - - /// Statistical test: return the timer delta of one normal run of the - /// `JitterRng` entropy collector. - /// - /// Setting `var_rounds` to `true` will execute the memory access and the - /// CPU jitter noice sources a variable amount of times (just like a real - /// `JitterRng` round). - /// - /// Setting `var_rounds` to `false` will execute the noice sources the - /// minimal number of times. This can be used to measure the minimum amount - /// of entropy one round of the entropy collector can collect in the worst - /// case. - /// - /// See [Quality testing](struct.JitterRng.html#quality-testing) on how to - /// use `timer_stats` to test the quality of `JitterRng`. - pub fn timer_stats(&mut self, var_rounds: bool) -> i64 { - let mut mem = [0; MEMORY_SIZE]; - - let time = (self.timer)(); - self.memaccess(&mut mem, var_rounds); - self.lfsr_time(time, var_rounds); - let time2 = (self.timer)(); - time2.wrapping_sub(time) as i64 - } -} - -#[cfg(feature="std")] -mod platform { - #[cfg(not(any(target_os = "macos", target_os = "ios", - target_os = "windows", - target_arch = "wasm32")))] - pub fn get_nstime() -> u64 { - use std::time::{SystemTime, UNIX_EPOCH}; - - let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - // The correct way to calculate the current time is - // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64` - // But this is faster, and the difference in terms of entropy is - // negligible (log2(10^9) == 29.9). - dur.as_secs() << 30 | dur.subsec_nanos() as u64 - } - - #[cfg(any(target_os = "macos", target_os = "ios"))] - pub fn get_nstime() -> u64 { - extern crate libc; - // On Mac OS and iOS std::time::SystemTime only has 1000ns resolution. - // We use `mach_absolute_time` instead. This provides a CPU dependent - // unit, to get real nanoseconds the result should by multiplied by - // numer/denom from `mach_timebase_info`. - // But we are not interested in the exact nanoseconds, just entropy. So - // we use the raw result. - unsafe { libc::mach_absolute_time() } - } - - #[cfg(target_os = "windows")] - pub fn get_nstime() -> u64 { - extern crate winapi; - unsafe { - let mut t = super::mem::zeroed(); - winapi::um::profileapi::QueryPerformanceCounter(&mut t); - *t.QuadPart() as u64 - } - } -} - -// A function that is opaque to the optimizer to assist in avoiding dead-code -// elimination. Taken from `bencher`. -fn black_box<T>(dummy: T) -> T { - unsafe { - let ret = ptr::read_volatile(&dummy); - mem::forget(dummy); - ret - } -} - -impl RngCore for JitterRng { - fn next_u32(&mut self) -> u32 { - // We want to use both parts of the generated entropy - if self.data_half_used { - self.data_half_used = false; - (self.data >> 32) as u32 - } else { - self.data = self.next_u64(); - self.data_half_used = true; - self.data as u32 - } - } - - fn next_u64(&mut self) -> u64 { - self.data_half_used = false; - self.gen_entropy() - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - // Fill using `next_u32`. This is faster for filling small slices (four - // bytes or less), while the overhead is negligible. - // - // This is done especially for wrappers that implement `next_u32` - // themselves via `fill_bytes`. - impls::fill_bytes_via_next(self, dest) - } - - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - Ok(self.fill_bytes(dest)) - } -} - -impl CryptoRng for JitterRng {} - -#[cfg(test)] -mod test_jitter_init { - use super::JitterRng; - - #[cfg(all(feature="std", not(target_arch = "wasm32")))] - #[test] - fn test_jitter_init() { - use RngCore; - // Because this is a debug build, measurements here are not representive - // of the final release build. - // Don't fail this test if initializing `JitterRng` fails because of a - // bad timer (the timer from the standard library may not have enough - // accuracy on all platforms). - match JitterRng::new() { - Ok(ref mut rng) => { - // false positives are possible, but extremely unlikely - assert!(rng.next_u32() | rng.next_u32() != 0); - }, - Err(_) => {}, - } - } - - #[test] - fn test_jitter_bad_timer() { - fn bad_timer() -> u64 { 0 } - let mut rng = JitterRng::new_with_timer(bad_timer); - assert!(rng.test_timer().is_err()); - } -} diff --git a/rand/src/rngs/mock.rs b/rand/src/rngs/mock.rs index 3c9a994..b4081da 100644 --- a/rand/src/rngs/mock.rs +++ b/rand/src/rngs/mock.rs @@ -39,21 +39,26 @@ impl StepRng { } impl RngCore for StepRng { + #[inline] fn next_u32(&mut self) -> u32 { self.next_u64() as u32 } + #[inline] fn next_u64(&mut self) -> u64 { let result = self.v; self.v = self.v.wrapping_add(self.a); result } + #[inline] fn fill_bytes(&mut self, dest: &mut [u8]) { impls::fill_bytes_via_next(self, dest); } + #[inline] fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - Ok(self.fill_bytes(dest)) + self.fill_bytes(dest); + Ok(()) } } diff --git a/rand/src/rngs/mod.rs b/rand/src/rngs/mod.rs index 847fc94..abf3243 100644 --- a/rand/src/rngs/mod.rs +++ b/rand/src/rngs/mod.rs @@ -6,177 +6,114 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Random number generators and adapters for common usage: -//! -//! - [`ThreadRng`], a fast, secure, auto-seeded thread-local generator -//! - [`StdRng`] and [`SmallRng`], algorithms to cover typical usage -//! - [`EntropyRng`], [`OsRng`] and [`JitterRng`] as entropy sources -//! - [`mock::StepRng`] as a simple counter for tests -//! - [`adapter::ReadRng`] to read from a file/stream -//! - [`adapter::ReseedingRng`] to reseed a PRNG on clone / process fork etc. -//! -//! # Background β Random number generators (RNGs) -//! -//! Computers are inherently deterministic, so to get *random* numbers one -//! either has to use a hardware generator or collect bits of *entropy* from -//! various sources (e.g. event timestamps, or jitter). This is a relatively -//! slow and complicated operation. -//! -//! Generally the operating system will collect some entropy, remove bias, and -//! use that to seed its own PRNG; [`OsRng`] provides an interface to this. -//! [`JitterRng`] is an entropy collector included with Rand that measures -//! jitter in the CPU execution time, and jitter in memory access time. -//! [`EntropyRng`] is a wrapper that uses the best entropy source that is -//! available. -//! -//! ## Pseudo-random number generators -//! -//! What is commonly used instead of "true" random number renerators, are -//! *pseudo-random number generators* (PRNGs), deterministic algorithms that -//! produce an infinite stream of pseudo-random numbers from a small random -//! seed. PRNGs are faster, and have better provable properties. The numbers -//! produced can be statistically of very high quality and can be impossible to -//! predict. (They can also have obvious correlations and be trivial to predict; -//! quality varies.) -//! -//! There are two different types of PRNGs: those developed for simulations -//! and statistics, and those developed for use in cryptography; the latter are -//! called Cryptographically Secure PRNGs (CSPRNG or CPRNG). Both types can -//! have good statistical quality but the latter also have to be impossible to -//! predict, even after seeing many previous output values. Rand provides a good -//! default algorithm from each class: -//! -//! - [`SmallRng`] is a PRNG chosen for low memory usage, high performance and -//! good statistical quality. -//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security -//! (based on reviews, maturity and usage). The current algorithm is HC-128, -//! which is one of the recommendations by ECRYPT's eSTREAM project. -//! -//! The above PRNGs do not cover all use-cases; more algorithms can be found in -//! the [`prng` module], as well as in several other crates. For example, you -//! may wish a CSPRNG with significantly lower memory usage than [`StdRng`] -//! while being less concerned about performance, in which case [`ChaChaRng`] -//! is a good choice. -//! -//! One complexity is that the internal state of a PRNG must change with every -//! generated number. For APIs this generally means a mutable reference to the -//! state of the PRNG has to be passed around. -//! -//! A solution is [`ThreadRng`]. This is a thread-local implementation of -//! [`StdRng`] with automatic seeding on first use. It is the best choice if you -//! "just" want a convenient, secure, fast random number source. Use via the -//! [`thread_rng`] function, which gets a reference to the current thread's -//! local instance. -//! -//! ## Seeding -//! -//! As mentioned above, PRNGs require a random seed in order to produce random -//! output. This is especially important for CSPRNGs, which are still -//! deterministic algorithms, thus can only be secure if their seed value is -//! also secure. To seed a PRNG, use one of: -//! -//! - [`FromEntropy::from_entropy`]; this is the most convenient way to seed -//! with fresh, secure random data. -//! - [`SeedableRng::from_rng`]; this allows seeding from another PRNG or -//! from an entropy source such as [`EntropyRng`]. -//! - [`SeedableRng::from_seed`]; this is mostly useful if you wish to be able -//! to reproduce the output sequence by using a fixed seed. (Don't use -//! [`StdRng`] or [`SmallRng`] in this case since different algorithms may be -//! used by future versions of Rand; use an algorithm from the -//! [`prng` module].) -//! -//! ## Conclusion -//! -//! - [`thread_rng`] is what you often want to use. -//! - If you want more control, flexibility, or better performance, use -//! [`StdRng`], [`SmallRng`] or an algorithm from the [`prng` module]. -//! - Use [`FromEntropy::from_entropy`] to seed new PRNGs. -//! - If you need reproducibility, use [`SeedableRng::from_seed`] combined with -//! a named PRNG. -//! -//! More information and notes on cryptographic security can be found -//! in the [`prng` module]. -//! -//! ## Examples -//! -//! Examples of seeding PRNGs: -//! -//! ``` -//! use rand::prelude::*; -//! # use rand::Error; -//! -//! // StdRng seeded securely by the OS or local entropy collector: -//! let mut rng = StdRng::from_entropy(); -//! # let v: u32 = rng.gen(); -//! -//! // SmallRng seeded from thread_rng: -//! # fn try_inner() -> Result<(), Error> { -//! let mut rng = SmallRng::from_rng(thread_rng())?; -//! # let v: u32 = rng.gen(); -//! # Ok(()) -//! # } -//! # try_inner().unwrap(); -//! -//! // SmallRng seeded by a constant, for deterministic results: -//! let seed = [1,2,3,4, 5,6,7,8, 9,10,11,12, 13,14,15,16]; // byte array -//! let mut rng = SmallRng::from_seed(seed); -//! # let v: u32 = rng.gen(); -//! ``` -//! -//! -//! # Implementing custom RNGs -//! -//! If you want to implement custom RNG, see the [`rand_core`] crate. The RNG -//! will have to implement the [`RngCore`] trait, where the [`Rng`] trait is -//! build on top of. -//! -//! If the RNG needs seeding, also implement the [`SeedableRng`] trait. -//! -//! [`CryptoRng`] is a marker trait cryptographically secure PRNGs can -//! implement. -//! -//! -// This module: -//! [`ThreadRng`]: struct.ThreadRng.html -//! [`StdRng`]: struct.StdRng.html -//! [`SmallRng`]: struct.SmallRng.html -//! [`EntropyRng`]: struct.EntropyRng.html -//! [`OsRng`]: struct.OsRng.html -//! [`JitterRng`]: struct.JitterRng.html -// Other traits and functions: -//! [`rand_core`]: https://crates.io/crates/rand_core -//! [`prng` module]: ../prng/index.html -//! [`CryptoRng`]: ../trait.CryptoRng.html -//! [`FromEntropy`]: ../trait.FromEntropy.html -//! [`FromEntropy::from_entropy`]: ../trait.FromEntropy.html#tymethod.from_entropy -//! [`RngCore`]: ../trait.RngCore.html -//! [`Rng`]: ../trait.Rng.html -//! [`SeedableRng`]: ../trait.SeedableRng.html -//! [`SeedableRng::from_rng`]: ../trait.SeedableRng.html#tymethod.from_rng -//! [`SeedableRng::from_seed`]: ../trait.SeedableRng.html#tymethod.from_seed -//! [`thread_rng`]: ../fn.thread_rng.html -//! [`mock::StepRng`]: mock/struct.StepRng.html -//! [`adapter::ReadRng`]: adapter/struct.ReadRng.html -//! [`adapter::ReseedingRng`]: adapter/struct.ReseedingRng.html -//! [`ChaChaRng`]: ../../rand_chacha/struct.ChaChaRng.html +//! Random number generators and adapters +//! +//! ## Background: Random number generators (RNGs) +//! +//! Computers cannot produce random numbers from nowhere. We classify +//! random number generators as follows: +//! +//! - "True" random number generators (TRNGs) use hard-to-predict data sources +//! (e.g. the high-resolution parts of event timings and sensor jitter) to +//! harvest random bit-sequences, apply algorithms to remove bias and +//! estimate available entropy, then combine these bits into a byte-sequence +//! or an entropy pool. This job is usually done by the operating system or +//! a hardware generator (HRNG). +//! - "Pseudo"-random number generators (PRNGs) use algorithms to transform a +//! seed into a sequence of pseudo-random numbers. These generators can be +//! fast and produce well-distributed unpredictable random numbers (or not). +//! They are usually deterministic: given algorithm and seed, the output +//! sequence can be reproduced. They have finite period and eventually loop; +//! with many algorithms this period is fixed and can be proven sufficiently +//! long, while others are chaotic and the period depends on the seed. +//! - "Cryptographically secure" pseudo-random number generators (CSPRNGs) +//! are the sub-set of PRNGs which are secure. Security of the generator +//! relies both on hiding the internal state and using a strong algorithm. +//! +//! ## Traits and functionality +//! +//! All RNGs implement the [`RngCore`] trait, as a consequence of which the +//! [`Rng`] extension trait is automatically implemented. Secure RNGs may +//! additionally implement the [`CryptoRng`] trait. +//! +//! All PRNGs require a seed to produce their random number sequence. The +//! [`SeedableRng`] trait provides three ways of constructing PRNGs: +//! +//! - `from_seed` accepts a type specific to the PRNG +//! - `from_rng` allows a PRNG to be seeded from any other RNG +//! - `seed_from_u64` allows any PRNG to be seeded from a `u64` insecurely +//! - `from_entropy` securely seeds a PRNG from fresh entropy +//! +//! Use the [`rand_core`] crate when implementing your own RNGs. +//! +//! ## Our generators +//! +//! This crate provides several random number generators: +//! +//! - [`OsRng`] is an interface to the operating system's random number +//! source. Typically the operating system uses a CSPRNG with entropy +//! provided by a TRNG and some type of on-going re-seeding. +//! - [`ThreadRng`], provided by the [`thread_rng`] function, is a handle to a +//! thread-local CSPRNG with periodic seeding from [`OsRng`]. Because this +//! is local, it is typically much faster than [`OsRng`]. It should be +//! secure, though the paranoid may prefer [`OsRng`]. +//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security +//! (based on reviews, maturity and usage). The current algorithm is ChaCha20, +//! which is well established and rigorously analysed. +//! [`StdRng`] provides the algorithm used by [`ThreadRng`] but without +//! periodic reseeding. +//! - [`SmallRng`] is an **insecure** PRNG designed to be fast, simple, require +//! little memory, and have good output quality. +//! +//! The algorithms selected for [`StdRng`] and [`SmallRng`] may change in any +//! release and may be platform-dependent, therefore they should be considered +//! **not reproducible**. +//! +//! ## Additional generators +//! +//! **TRNGs**: The [`rdrand`] crate provides an interface to the RDRAND and +//! RDSEED instructions available in modern Intel and AMD CPUs. +//! The [`rand_jitter`] crate provides a user-space implementation of +//! entropy harvesting from CPU timer jitter, but is very slow and has +//! [security issues](https://github.com/rust-random/rand/issues/699). +//! +//! **PRNGs**: Several companion crates are available, providing individual or +//! families of PRNG algorithms. These provide the implementations behind +//! [`StdRng`] and [`SmallRng`] but can also be used directly, indeed *should* +//! be used directly when **reproducibility** matters. +//! Some suggestions are: [`rand_chacha`], [`rand_pcg`], [`rand_xoshiro`]. +//! A full list can be found by searching for crates with the [`rng` tag]. +//! +//! [`SmallRng`]: rngs::SmallRng +//! [`StdRng`]: rngs::StdRng +//! [`OsRng`]: rngs::OsRng +//! [`ThreadRng`]: rngs::ThreadRng +//! [`mock::StepRng`]: rngs::mock::StepRng +//! [`adapter::ReadRng`]: rngs::adapter::ReadRng +//! [`adapter::ReseedingRng`]: rngs::adapter::ReseedingRng +//! [`rdrand`]: https://crates.io/crates/rdrand +//! [`rand_jitter`]: https://crates.io/crates/rand_jitter +//! [`rand_chacha`]: https://crates.io/crates/rand_chacha +//! [`rand_pcg`]: https://crates.io/crates/rand_pcg +//! [`rand_xoshiro`]: https://crates.io/crates/rand_xoshiro +//! [`rng` tag]: https://crates.io/keywords/rng pub mod adapter; #[cfg(feature="std")] mod entropy; -mod jitter; pub mod mock; // Public so we don't export `StepRng` directly, making it a bit // more clear it is intended for testing. +#[cfg(feature="small_rng")] mod small; mod std; #[cfg(feature="std")] pub(crate) mod thread; - -pub use self::jitter::{JitterRng, TimerError}; +#[allow(deprecated)] #[cfg(feature="std")] pub use self::entropy::EntropyRng; +#[cfg(feature="small_rng")] pub use self::small::SmallRng; pub use self::std::StdRng; #[cfg(feature="std")] pub use self::thread::ThreadRng; -#[cfg(feature="rand_os")] -pub use rand_os::OsRng; +#[cfg(feature="getrandom")] pub use rand_core::OsRng; diff --git a/rand/src/rngs/small.rs b/rand/src/rngs/small.rs index b652c8c..6571363 100644 --- a/rand/src/rngs/small.rs +++ b/rand/src/rngs/small.rs @@ -8,35 +8,42 @@ //! A small fast RNG -use {RngCore, SeedableRng, Error}; +use rand_core::{RngCore, SeedableRng, Error}; -#[cfg(all(all(rustc_1_26, not(target_os = "emscripten")), target_pointer_width = "64"))] -type Rng = ::rand_pcg::Pcg64Mcg; -#[cfg(not(all(all(rustc_1_26, not(target_os = "emscripten")), target_pointer_width = "64")))] -type Rng = ::rand_pcg::Pcg32; +#[cfg(all(not(target_os = "emscripten"), target_pointer_width = "64"))] +type Rng = rand_pcg::Pcg64Mcg; +#[cfg(not(all(not(target_os = "emscripten"), target_pointer_width = "64")))] +type Rng = rand_pcg::Pcg32; -/// An RNG recommended when small state, cheap initialization and good -/// performance are required. The PRNG algorithm in `SmallRng` is chosen to be -/// efficient on the current platform, **without consideration for cryptography -/// or security**. The size of its state is much smaller than for [`StdRng`]. +/// A small-state, fast non-crypto PRNG /// -/// Reproducibility of output from this generator is however not required, thus -/// future library versions may use a different internal generator with -/// different output. Further, this generator may not be portable and can -/// produce different output depending on the architecture. If you require -/// reproducible output, use a named RNG. Refer to the documentation on the -/// [`prng` module](../prng/index.html). +/// `SmallRng` may be a good choice when a PRNG with small state, cheap +/// initialization, good statistical quality and good performance are required. +/// It is **not** a good choice when security against prediction or +/// reproducibility are important. /// -/// The current algorithm is [`Pcg64Mcg`] on 64-bit platforms with Rust version -/// 1.26 and later, or [`Pcg32`] otherwise. +/// This PRNG is **feature-gated**: to use, you must enable the crate feature +/// `small_rng`. +/// +/// The algorithm is deterministic but should not be considered reproducible +/// due to dependence on platform and possible replacement in future +/// library versions. For a reproducible generator, use a named PRNG from an +/// external crate, e.g. [rand_pcg] or [rand_chacha]. +/// Refer also to [The Book](https://rust-random.github.io/book/guide-rngs.html). +/// +/// The PRNG algorithm in `SmallRng` is chosen to be +/// efficient on the current platform, without consideration for cryptography +/// or security. The size of its state is much smaller than [`StdRng`]. +/// The current algorithm is [`Pcg64Mcg`](rand_pcg::Pcg64Mcg) on 64-bit +/// platforms and [`Pcg32`](rand_pcg::Pcg32) on 32-bit platforms. Both are +/// implemented by the [rand_pcg] crate. /// /// # Examples /// -/// Initializing `SmallRng` with a random seed can be done using [`FromEntropy`]: +/// Initializing `SmallRng` with a random seed can be done using [`SeedableRng::from_entropy`]: /// /// ``` -/// # use rand::Rng; -/// use rand::FromEntropy; +/// use rand::{Rng, SeedableRng}; /// use rand::rngs::SmallRng; /// /// // Create small, cheap to initialize and fast RNG with a random seed. @@ -64,11 +71,10 @@ type Rng = ::rand_pcg::Pcg32; /// .collect(); /// ``` /// -/// [`FromEntropy`]: ../trait.FromEntropy.html -/// [`StdRng`]: struct.StdRng.html -/// [`thread_rng`]: ../fn.thread_rng.html -/// [`Pcg64Mcg`]: ../../rand_pcg/type.Pcg64Mcg.html -/// [`Pcg32`]: ../../rand_pcg/type.Pcg32.html +/// [`StdRng`]: crate::rngs::StdRng +/// [`thread_rng`]: crate::thread_rng +/// [rand_chacha]: https://crates.io/crates/rand_chacha +/// [rand_pcg]: https://crates.io/crates/rand_pcg #[derive(Clone, Debug)] pub struct SmallRng(Rng); @@ -83,10 +89,12 @@ impl RngCore for SmallRng { self.0.next_u64() } + #[inline(always)] fn fill_bytes(&mut self, dest: &mut [u8]) { self.0.fill_bytes(dest); } + #[inline(always)] fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { self.0.try_fill_bytes(dest) } @@ -95,10 +103,12 @@ impl RngCore for SmallRng { impl SeedableRng for SmallRng { type Seed = <Rng as SeedableRng>::Seed; + #[inline(always)] fn from_seed(seed: Self::Seed) -> Self { SmallRng(Rng::from_seed(seed)) } + #[inline(always)] fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { Rng::from_rng(rng).map(SmallRng) } diff --git a/rand/src/rngs/std.rs b/rand/src/rngs/std.rs index ce1658b..22e08ae 100644 --- a/rand/src/rngs/std.rs +++ b/rand/src/rngs/std.rs @@ -8,25 +8,30 @@ //! The standard RNG -use {RngCore, CryptoRng, Error, SeedableRng}; -use rand_hc::Hc128Rng; +use crate::{RngCore, CryptoRng, Error, SeedableRng}; + +#[cfg(target_os = "emscripten")] pub(crate) use rand_hc::Hc128Core as Core; +#[cfg(not(target_os = "emscripten"))] pub(crate) use rand_chacha::ChaCha20Core as Core; +#[cfg(target_os = "emscripten")] use rand_hc::Hc128Rng as Rng; +#[cfg(not(target_os = "emscripten"))] use rand_chacha::ChaCha20Rng as Rng; /// The standard RNG. The PRNG algorithm in `StdRng` is chosen to be efficient /// on the current platform, to be statistically strong and unpredictable /// (meaning a cryptographically secure PRNG). /// -/// The current algorithm used on all platforms is [HC-128]. +/// The current algorithm used is the ChaCha block cipher with either 20 or 12 +/// rounds (see the `stdrng_*` feature flags, documented in the README). +/// This may change as new evidence of cipher security and performance +/// becomes available. /// -/// Reproducibility of output from this generator is however not required, thus -/// future library versions may use a different internal generator with -/// different output. Further, this generator may not be portable and can -/// produce different output depending on the architecture. If you require -/// reproducible output, use a named RNG, for example [`ChaChaRng`]. +/// The algorithm is deterministic but should not be considered reproducible +/// due to dependence on configuration and possible replacement in future +/// library versions. For a secure reproducible generator, we recommend use of +/// the [rand_chacha] crate directly. /// -/// [HC-128]: ../../rand_hc/struct.Hc128Rng.html -/// [`ChaChaRng`]: ../../rand_chacha/struct.ChaChaRng.html +/// [rand_chacha]: https://crates.io/crates/rand_chacha #[derive(Clone, Debug)] -pub struct StdRng(Hc128Rng); +pub struct StdRng(Rng); impl RngCore for StdRng { #[inline(always)] @@ -39,24 +44,28 @@ impl RngCore for StdRng { self.0.next_u64() } + #[inline(always)] fn fill_bytes(&mut self, dest: &mut [u8]) { self.0.fill_bytes(dest); } + #[inline(always)] fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { self.0.try_fill_bytes(dest) } } impl SeedableRng for StdRng { - type Seed = <Hc128Rng as SeedableRng>::Seed; + type Seed = <Rng as SeedableRng>::Seed; + #[inline(always)] fn from_seed(seed: Self::Seed) -> Self { - StdRng(Hc128Rng::from_seed(seed)) + StdRng(Rng::from_seed(seed)) } + #[inline(always)] fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> { - Hc128Rng::from_rng(rng).map(StdRng) + Rng::from_rng(rng).map(StdRng) } } @@ -65,17 +74,27 @@ impl CryptoRng for StdRng {} #[cfg(test)] mod test { - use {RngCore, SeedableRng}; - use rngs::StdRng; + use crate::{RngCore, SeedableRng}; + use crate::rngs::StdRng; #[test] fn test_stdrng_construction() { + // Test value-stability of StdRng. This is expected to break any time + // the algorithm is changed. let seed = [1,0,0,0, 23,0,0,0, 200,1,0,0, 210,30,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0]; - let mut rng1 = StdRng::from_seed(seed); - assert_eq!(rng1.next_u64(), 15759097995037006553); - let mut rng2 = StdRng::from_rng(rng1).unwrap(); - assert_eq!(rng2.next_u64(), 6766915756997287454); + #[cfg(any(feature="stdrng_strong", not(feature="stdrng_fast")))] + let target = [3950704604716924505, 5573172343717151650]; + #[cfg(all(not(feature="stdrng_strong"), feature="stdrng_fast"))] + let target = [10719222850664546238, 14064965282130556830]; + + let mut rng0 = StdRng::from_seed(seed); + let x0 = rng0.next_u64(); + + let mut rng1 = StdRng::from_rng(rng0).unwrap(); + let x1 = rng1.next_u64(); + + assert_eq!([x0, x1], target); } } diff --git a/rand/src/rngs/thread.rs b/rand/src/rngs/thread.rs index 7977d85..2006f41 100644 --- a/rand/src/rngs/thread.rs +++ b/rand/src/rngs/thread.rs @@ -9,11 +9,12 @@ //! Thread-local random number generator use std::cell::UnsafeCell; +use std::ptr::NonNull; -use {RngCore, CryptoRng, SeedableRng, Error}; -use rngs::adapter::ReseedingRng; -use rngs::EntropyRng; -use rand_hc::Hc128Core; +use crate::{RngCore, CryptoRng, SeedableRng, Error}; +use crate::rngs::adapter::ReseedingRng; +use crate::rngs::OsRng; +use super::std::Core; // Rationale for using `UnsafeCell` in `ThreadRng`: // @@ -28,61 +29,43 @@ use rand_hc::Hc128Core; // completely under our control. We just have to ensure none of them use // `ThreadRng` internally, which is nonsensical anyway. We should also never run // `ThreadRng` in destructors of its implementation, which is also nonsensical. -// -// The additional `Rc` is not strictly neccesary, and could be removed. For now -// it ensures `ThreadRng` stays `!Send` and `!Sync`, and implements `Clone`. -// Number of generated bytes after which to reseed `TreadRng`. -// -// The time it takes to reseed HC-128 is roughly equivalent to generating 7 KiB. -// We pick a treshold here that is large enough to not reduce the average -// performance too much, but also small enough to not make reseeding something -// that basically never happens. -const THREAD_RNG_RESEED_THRESHOLD: u64 = 32*1024*1024; // 32 MiB +// Number of generated bytes after which to reseed `ThreadRng`. +// According to benchmarks, reseeding has a noticable impact with thresholds +// of 32 kB and less. We choose 64 kB to avoid significant overhead. +const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64; /// The type returned by [`thread_rng`], essentially just a reference to the /// PRNG in thread-local memory. /// -/// `ThreadRng` uses [`ReseedingRng`] wrapping the same PRNG as [`StdRng`], -/// which is reseeded after generating 32 MiB of random data. A single instance -/// is cached per thread and the returned `ThreadRng` is a reference to this -/// instance β hence `ThreadRng` is neither `Send` nor `Sync` but is safe to use -/// within a single thread. This RNG is seeded and reseeded via [`EntropyRng`] -/// as required. +/// `ThreadRng` uses the same PRNG as [`StdRng`] for security and performance. +/// As hinted by the name, the generator is thread-local. `ThreadRng` is a +/// handle to this generator and thus supports `Copy`, but not `Send` or `Sync`. /// -/// Note that the reseeding is done as an extra precaution against entropy -/// leaks and is in theory unnecessary β to predict `ThreadRng`'s output, an -/// attacker would have to either determine most of the RNG's seed or internal -/// state, or crack the algorithm used. +/// Unlike `StdRng`, `ThreadRng` uses the [`ReseedingRng`] wrapper to reseed +/// the PRNG from fresh entropy every 64 kiB of random data. +/// [`OsRng`] is used to provide seed data. /// -/// Like [`StdRng`], `ThreadRng` is a cryptographically secure PRNG. The current -/// algorithm used is [HC-128], which is an array-based PRNG that trades memory -/// usage for better performance. This makes it similar to ISAAC, the algorithm -/// used in `ThreadRng` before rand 0.5. +/// Note that the reseeding is done as an extra precaution against side-channel +/// attacks and mis-use (e.g. if somehow weak entropy were supplied initially). +/// The PRNG algorithms used are assumed to be secure. /// -/// Cloning this handle just produces a new reference to the same thread-local -/// generator. -/// -/// [`thread_rng`]: ../fn.thread_rng.html -/// [`ReseedingRng`]: adapter/struct.ReseedingRng.html -/// [`StdRng`]: struct.StdRng.html -/// [`EntropyRng`]: struct.EntropyRng.html -/// [HC-128]: ../../rand_hc/struct.Hc128Rng.html -#[derive(Clone, Debug)] +/// [`ReseedingRng`]: crate::rngs::adapter::ReseedingRng +/// [`StdRng`]: crate::rngs::StdRng +#[derive(Copy, Clone, Debug)] pub struct ThreadRng { - // use of raw pointer implies type is neither Send nor Sync - rng: *mut ReseedingRng<Hc128Core, EntropyRng>, + // inner raw pointer implies type is neither Send nor Sync + rng: NonNull<ReseedingRng<Core, OsRng>>, } thread_local!( - static THREAD_RNG_KEY: UnsafeCell<ReseedingRng<Hc128Core, EntropyRng>> = { - let mut entropy_source = EntropyRng::new(); - let r = Hc128Core::from_rng(&mut entropy_source).unwrap_or_else(|err| + static THREAD_RNG_KEY: UnsafeCell<ReseedingRng<Core, OsRng>> = { + let r = Core::from_rng(OsRng).unwrap_or_else(|err| panic!("could not initialize thread_rng: {}", err)); let rng = ReseedingRng::new(r, THREAD_RNG_RESEED_THRESHOLD, - entropy_source); + OsRng); UnsafeCell::new(rng) } ); @@ -91,38 +74,38 @@ thread_local!( /// seeded by the system. Intended to be used in method chaining style, /// e.g. `thread_rng().gen::<i32>()`, or cached locally, e.g. /// `let mut rng = thread_rng();`. Invoked by the `Default` trait, making -/// `ThreadRng::default()` equivelent. +/// `ThreadRng::default()` equivalent. /// /// For more information see [`ThreadRng`]. -/// -/// [`ThreadRng`]: rngs/struct.ThreadRng.html pub fn thread_rng() -> ThreadRng { - ThreadRng { rng: THREAD_RNG_KEY.with(|t| t.get()) } + let raw = THREAD_RNG_KEY.with(|t| t.get()); + let nn = NonNull::new(raw).unwrap(); + ThreadRng { rng: nn } } impl Default for ThreadRng { fn default() -> ThreadRng { - ::prelude::thread_rng() + crate::prelude::thread_rng() } } impl RngCore for ThreadRng { #[inline(always)] fn next_u32(&mut self) -> u32 { - unsafe { (*self.rng).next_u32() } + unsafe { self.rng.as_mut().next_u32() } } #[inline(always)] fn next_u64(&mut self) -> u64 { - unsafe { (*self.rng).next_u64() } + unsafe { self.rng.as_mut().next_u64() } } fn fill_bytes(&mut self, dest: &mut [u8]) { - unsafe { (*self.rng).fill_bytes(dest) } + unsafe { self.rng.as_mut().fill_bytes(dest) } } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - unsafe { (*self.rng).try_fill_bytes(dest) } + unsafe { self.rng.as_mut().try_fill_bytes(dest) } } } @@ -133,8 +116,8 @@ impl CryptoRng for ThreadRng {} mod test { #[test] fn test_thread_rng() { - use Rng; - let mut r = ::thread_rng(); + use crate::Rng; + let mut r = crate::thread_rng(); r.gen::<i32>(); assert_eq!(r.gen_range(0, 1), 0); } diff --git a/rand/src/seq/index.rs b/rand/src/seq/index.rs index 3d4df3a..22a5733 100644 --- a/rand/src/seq/index.rs +++ b/rand/src/seq/index.rs @@ -6,18 +6,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Index sampling +//! Low-level API for sampling indices #[cfg(feature="alloc")] use core::slice; #[cfg(feature="std")] use std::vec; -#[cfg(all(feature="alloc", not(feature="std")))] use alloc::vec::{self, Vec}; +#[cfg(all(feature="alloc", not(feature="std")))] use crate::alloc::vec::{self, Vec}; // BTreeMap is not as fast in tests, but better than nothing. #[cfg(feature="std")] use std::collections::{HashSet}; -#[cfg(all(feature="alloc", not(feature="std")))] use alloc::collections::BTreeSet; +#[cfg(all(feature="alloc", not(feature="std")))] use crate::alloc::collections::BTreeSet; -#[cfg(feature="alloc")] use distributions::{Distribution, Uniform}; -use Rng; +#[cfg(feature="alloc")] use crate::distributions::{Distribution, Uniform, uniform::SampleUniform}; +use crate::Rng; /// A vector of indices. /// @@ -30,25 +30,37 @@ pub enum IndexVec { impl IndexVec { /// Returns the number of indices + #[inline] pub fn len(&self) -> usize { - match self { - &IndexVec::U32(ref v) => v.len(), - &IndexVec::USize(ref v) => v.len(), + match *self { + IndexVec::U32(ref v) => v.len(), + IndexVec::USize(ref v) => v.len(), + } + } + + /// Returns `true` if the length is 0. + #[inline] + pub fn is_empty(&self) -> bool { + match *self { + IndexVec::U32(ref v) => v.is_empty(), + IndexVec::USize(ref v) => v.is_empty(), } } /// Return the value at the given `index`. /// - /// (Note: we cannot implement `std::ops::Index` because of lifetime + /// (Note: we cannot implement [`std::ops::Index`] because of lifetime /// restrictions.) + #[inline] pub fn index(&self, index: usize) -> usize { - match self { - &IndexVec::U32(ref v) => v[index] as usize, - &IndexVec::USize(ref v) => v[index], + match *self { + IndexVec::U32(ref v) => v[index] as usize, + IndexVec::USize(ref v) => v[index], } } /// Return result as a `Vec<usize>`. Conversion may or may not be trivial. + #[inline] pub fn into_vec(self) -> Vec<usize> { match self { IndexVec::U32(v) => v.into_iter().map(|i| i as usize).collect(), @@ -57,14 +69,16 @@ impl IndexVec { } /// Iterate over the indices as a sequence of `usize` values - pub fn iter<'a>(&'a self) -> IndexVecIter<'a> { - match self { - &IndexVec::U32(ref v) => IndexVecIter::U32(v.iter()), - &IndexVec::USize(ref v) => IndexVecIter::USize(v.iter()), + #[inline] + pub fn iter(&self) -> IndexVecIter<'_> { + match *self { + IndexVec::U32(ref v) => IndexVecIter::U32(v.iter()), + IndexVec::USize(ref v) => IndexVecIter::USize(v.iter()), } } /// Convert into an iterator over the indices as a sequence of `usize` values + #[inline] pub fn into_iter(self) -> IndexVecIntoIter { match self { IndexVec::U32(v) => IndexVecIntoIter::U32(v.into_iter()), @@ -88,12 +102,14 @@ impl PartialEq for IndexVec { } impl From<Vec<u32>> for IndexVec { + #[inline] fn from(v: Vec<u32>) -> Self { IndexVec::U32(v) } } impl From<Vec<usize>> for IndexVec { + #[inline] fn from(v: Vec<usize>) -> Self { IndexVec::USize(v) } @@ -108,18 +124,20 @@ pub enum IndexVecIter<'a> { impl<'a> Iterator for IndexVecIter<'a> { type Item = usize; + #[inline] fn next(&mut self) -> Option<usize> { use self::IndexVecIter::*; - match self { - &mut U32(ref mut iter) => iter.next().map(|i| *i as usize), - &mut USize(ref mut iter) => iter.next().cloned(), + match *self { + U32(ref mut iter) => iter.next().map(|i| *i as usize), + USize(ref mut iter) => iter.next().cloned(), } } + #[inline] fn size_hint(&self) -> (usize, Option<usize>) { - match self { - &IndexVecIter::U32(ref v) => v.size_hint(), - &IndexVecIter::USize(ref v) => v.size_hint(), + match *self { + IndexVecIter::U32(ref v) => v.size_hint(), + IndexVecIter::USize(ref v) => v.size_hint(), } } } @@ -136,19 +154,21 @@ pub enum IndexVecIntoIter { impl Iterator for IndexVecIntoIter { type Item = usize; + #[inline] fn next(&mut self) -> Option<Self::Item> { use self::IndexVecIntoIter::*; - match self { - &mut U32(ref mut v) => v.next().map(|i| i as usize), - &mut USize(ref mut v) => v.next(), + match *self { + U32(ref mut v) => v.next().map(|i| i as usize), + USize(ref mut v) => v.next(), } } + #[inline] fn size_hint(&self) -> (usize, Option<usize>) { use self::IndexVecIntoIter::*; - match self { - &U32(ref v) => v.size_hint(), - &USize(ref v) => v.size_hint(), + match *self { + U32(ref v) => v.size_hint(), + USize(ref v) => v.size_hint(), } } } @@ -173,14 +193,13 @@ impl ExactSizeIterator for IndexVecIntoIter {} /// Note that performance is significantly better over `u32` indices than over /// `u64` indices. Because of this we hide the underlying type behind an /// abstraction, `IndexVec`. -/// +/// /// If an allocation-free `no_std` function is required, it is suggested /// to adapt the internal `sample_floyd` implementation. /// /// Panics if `amount > length`. pub fn sample<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec - where R: Rng + ?Sized, -{ +where R: Rng + ?Sized { if amount > length { panic!("`amount` of samples must be less than or equal to `length`"); } @@ -213,9 +232,7 @@ pub fn sample<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec if (length as f32) < C[j] * (amount as f32) { sample_inplace(rng, length, amount) } else { - // note: could have a specific u32 impl, but I'm lazy and - // generics don't have usable conversions - sample_rejection(rng, length as usize, amount as usize) + sample_rejection(rng, length, amount) } } } @@ -227,8 +244,7 @@ pub fn sample<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec /// /// This implementation uses `O(amount)` memory and `O(amount^2)` time. fn sample_floyd<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec - where R: Rng + ?Sized, -{ +where R: Rng + ?Sized { // For small amount we use Floyd's fully-shuffled variant. For larger // amounts this is slow due to Vec::insert performance, so we shuffle // afterwards. Benchmarks show little overhead from extra logic. @@ -243,11 +259,9 @@ fn sample_floyd<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec indices.insert(pos, j); continue; } - } else { - if indices.contains(&t) { - indices.push(j); - continue; - } + } else if indices.contains(&t) { + indices.push(j); + continue; } indices.push(t); } @@ -274,8 +288,7 @@ fn sample_floyd<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec /// /// Set-up is `O(length)` time and memory and shuffling is `O(amount)` time. fn sample_inplace<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec - where R: Rng + ?Sized, -{ +where R: Rng + ?Sized { debug_assert!(amount <= length); let mut indices: Vec<u32> = Vec::with_capacity(length as usize); indices.extend(0..length); @@ -288,21 +301,36 @@ fn sample_inplace<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec IndexVec::from(indices) } +trait UInt: Copy + PartialOrd + Ord + PartialEq + Eq + SampleUniform + core::hash::Hash { + fn zero() -> Self; + fn as_usize(self) -> usize; +} +impl UInt for u32 { + #[inline] fn zero() -> Self { 0 } + #[inline] fn as_usize(self) -> usize { self as usize } +} +impl UInt for usize { + #[inline] fn zero() -> Self { 0 } + #[inline] fn as_usize(self) -> usize { self } +} + /// Randomly sample exactly `amount` indices from `0..length`, using rejection /// sampling. -/// +/// /// Since `amount <<< length` there is a low chance of a random sample in /// `0..length` being a duplicate. We test for duplicates and resample where /// necessary. The algorithm is `O(amount)` time and memory. -fn sample_rejection<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec - where R: Rng + ?Sized, -{ +/// +/// This function is generic over X primarily so that results are value-stable +/// over 32-bit and 64-bit platforms. +fn sample_rejection<X: UInt, R>(rng: &mut R, length: X, amount: X) -> IndexVec +where R: Rng + ?Sized, IndexVec: From<Vec<X>> { debug_assert!(amount < length); - #[cfg(feature="std")] let mut cache = HashSet::with_capacity(amount); + #[cfg(feature="std")] let mut cache = HashSet::with_capacity(amount.as_usize()); #[cfg(not(feature="std"))] let mut cache = BTreeSet::new(); - let distr = Uniform::new(0, length); - let mut indices = Vec::with_capacity(amount); - for _ in 0..amount { + let distr = Uniform::new(X::zero(), length); + let mut indices = Vec::with_capacity(amount.as_usize()); + for _ in 0..amount.as_usize() { let mut pos = distr.sample(rng); while !cache.insert(pos) { pos = distr.sample(rng); @@ -310,30 +338,32 @@ fn sample_rejection<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec indices.push(pos); } - debug_assert_eq!(indices.len(), amount); + debug_assert_eq!(indices.len(), amount.as_usize()); IndexVec::from(indices) } #[cfg(test)] mod test { + #[cfg(feature="std")] use std::vec; + #[cfg(all(feature="alloc", not(feature="std")))] use crate::alloc::vec; use super::*; #[test] fn test_sample_boundaries() { - let mut r = ::test::rng(404); + let mut r = crate::test::rng(404); assert_eq!(sample_inplace(&mut r, 0, 0).len(), 0); assert_eq!(sample_inplace(&mut r, 1, 0).len(), 0); assert_eq!(sample_inplace(&mut r, 1, 1).into_vec(), vec![0]); - assert_eq!(sample_rejection(&mut r, 1, 0).len(), 0); + assert_eq!(sample_rejection(&mut r, 1u32, 0).len(), 0); assert_eq!(sample_floyd(&mut r, 0, 0).len(), 0); assert_eq!(sample_floyd(&mut r, 1, 0).len(), 0); assert_eq!(sample_floyd(&mut r, 1, 1).into_vec(), vec![0]); // These algorithms should be fast with big numbers. Test average. - let sum: usize = sample_rejection(&mut r, 1 << 25, 10) + let sum: usize = sample_rejection(&mut r, 1 << 25, 10u32) .into_iter().sum(); assert!(1 << 25 < sum && sum < (1 << 25) * 25); @@ -343,8 +373,9 @@ mod test { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_sample_alg() { - let seed_rng = ::test::rng; + let seed_rng = crate::test::rng; // We can't test which algorithm is used directly, but Floyd's alg // should produce different results from the others. (Also, `inplace` @@ -371,7 +402,7 @@ mod test { // A large length and larger amount should use cache let (length, amount): (usize, usize) = (1<<20, 600); let v1 = sample(&mut seed_rng(422), length, amount); - let v2 = sample_rejection(&mut seed_rng(422), length, amount); + let v2 = sample_rejection(&mut seed_rng(422), length as u32, amount as u32); assert!(v1.iter().all(|e| e < length)); assert_eq!(v1, v2); } diff --git a/rand/src/seq/mod.rs b/rand/src/seq/mod.rs index 9959602..cec9bb1 100644 --- a/rand/src/seq/mod.rs +++ b/rand/src/seq/mod.rs @@ -6,25 +6,55 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Functions for randomly accessing and sampling sequences. +//! Sequence-related functionality //! -//! TODO: module doc +//! This module provides: +//! +//! * [`seq::SliceRandom`] slice sampling and mutation +//! * [`seq::IteratorRandom`] iterator sampling +//! * [`seq::index::sample`] low-level API to choose multiple indices from +//! `0..length` +//! +//! Also see: +//! +//! * [`distributions::weighted`] module which provides implementations of +//! weighted index sampling. +//! +//! In order to make results reproducible across 32-64 bit architectures, all +//! `usize` indices are sampled as a `u32` where possible (also providing a +//! small performance boost in some cases). #[cfg(feature="alloc")] pub mod index; #[cfg(feature="alloc")] use core::ops::Index; -#[cfg(all(feature="alloc", not(feature="std")))] use alloc::vec::Vec; +#[cfg(all(feature="alloc", not(feature="std")))] use crate::alloc::vec::Vec; -use Rng; -#[cfg(feature="alloc")] use distributions::WeightedError; -#[cfg(feature="alloc")] use distributions::uniform::{SampleUniform, SampleBorrow}; +use crate::Rng; +#[cfg(feature="alloc")] use crate::distributions::WeightedError; +#[cfg(feature="alloc")] use crate::distributions::uniform::{SampleUniform, SampleBorrow}; /// Extension trait on slices, providing random mutation and sampling methods. /// -/// An implementation is provided for slices. This may also be implementable for -/// other types. +/// This trait is implemented on all `[T]` slice types, providing several +/// methods for choosing and shuffling elements. You must `use` this trait: +/// +/// ``` +/// use rand::seq::SliceRandom; +/// +/// fn main() { +/// let mut rng = rand::thread_rng(); +/// let mut bytes = "Hello, random!".to_string().into_bytes(); +/// bytes.shuffle(&mut rng); +/// let str = String::from_utf8(bytes).unwrap(); +/// println!("{}", str); +/// } +/// ``` +/// Example output (non-deterministic): +/// ```none +/// l,nmroHado !le +/// ``` pub trait SliceRandom { /// The element type. type Item; @@ -32,7 +62,7 @@ pub trait SliceRandom { /// Returns a reference to one random element of the slice, or `None` if the /// slice is empty. /// - /// Depending on the implementation, complexity is expected to be `O(1)`. + /// For slices, complexity is `O(1)`. /// /// # Example /// @@ -46,33 +76,33 @@ pub trait SliceRandom { /// assert_eq!(choices[..0].choose(&mut rng), None); /// ``` fn choose<R>(&self, rng: &mut R) -> Option<&Self::Item> - where R: Rng + ?Sized; + where R: Rng + ?Sized; /// Returns a mutable reference to one random element of the slice, or /// `None` if the slice is empty. - /// - /// Depending on the implementation, complexity is expected to be `O(1)`. + /// + /// For slices, complexity is `O(1)`. fn choose_mut<R>(&mut self, rng: &mut R) -> Option<&mut Self::Item> - where R: Rng + ?Sized; + where R: Rng + ?Sized; - /// Produces an iterator that chooses `amount` elements from the slice at - /// random without repeating any, and returns them in random order. - /// - /// In case this API is not sufficiently flexible, use `index::sample` then - /// apply the indices to the slice. - /// - /// Complexity is expected to be the same as `index::sample`. - /// + /// Chooses `amount` elements from the slice at random, without repetition, + /// and in random order. The returned iterator is appropriate both for + /// collection into a `Vec` and filling an existing buffer (see example). + /// + /// In case this API is not sufficiently flexible, use [`index::sample`]. + /// + /// For slices, complexity is the same as [`index::sample`]. + /// /// # Example /// ``` /// use rand::seq::SliceRandom; - /// + /// /// let mut rng = &mut rand::thread_rng(); /// let sample = "Hello, audience!".as_bytes(); - /// + /// /// // collect the results into a vector: /// let v: Vec<u8> = sample.choose_multiple(&mut rng, 3).cloned().collect(); - /// + /// /// // store in a buffer: /// let mut buf = [0u8; 5]; /// for (b, slot) in sample.choose_multiple(&mut rng, buf.len()).zip(buf.iter_mut()) { @@ -81,13 +111,18 @@ pub trait SliceRandom { /// ``` #[cfg(feature = "alloc")] fn choose_multiple<R>(&self, rng: &mut R, amount: usize) -> SliceChooseIter<Self, Self::Item> - where R: Rng + ?Sized; + where R: Rng + ?Sized; - /// Similar to [`choose`], where the likelihood of each outcome may be - /// specified. The specified function `weight` maps items `x` to a relative + /// Similar to [`choose`], but where the likelihood of each outcome may be + /// specified. + /// + /// The specified function `weight` maps each item `x` to a relative /// likelihood `weight(x)`. The probability of each item being selected is /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`. /// + /// For slices of length `n`, complexity is `O(n)`. + /// See also [`choose_weighted_mut`], [`distributions::weighted`]. + /// /// # Example /// /// ``` @@ -98,47 +133,59 @@ pub trait SliceRandom { /// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c' /// println!("{:?}", choices.choose_weighted(&mut rng, |item| item.1).unwrap().0); /// ``` - /// [`choose`]: trait.SliceRandom.html#method.choose + /// [`choose`]: SliceRandom::choose + /// [`choose_weighted_mut`]: SliceRandom::choose_weighted_mut + /// [`distributions::weighted`]: crate::distributions::weighted #[cfg(feature = "alloc")] - fn choose_weighted<R, F, B, X>(&self, rng: &mut R, weight: F) -> Result<&Self::Item, WeightedError> - where R: Rng + ?Sized, - F: Fn(&Self::Item) -> B, - B: SampleBorrow<X>, - X: SampleUniform + - for<'a> ::core::ops::AddAssign<&'a X> + - ::core::cmp::PartialOrd<X> + - Clone + - Default; - - /// Similar to [`choose_mut`], where the likelihood of each outcome may be - /// specified. The specified function `weight` maps items `x` to a relative + fn choose_weighted<R, F, B, X>( + &self, rng: &mut R, weight: F, + ) -> Result<&Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow<X>, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd<X> + + Clone + + Default; + + /// Similar to [`choose_mut`], but where the likelihood of each outcome may + /// be specified. + /// + /// The specified function `weight` maps each item `x` to a relative /// likelihood `weight(x)`. The probability of each item being selected is /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`. /// - /// See also [`choose_weighted`]. + /// For slices of length `n`, complexity is `O(n)`. + /// See also [`choose_weighted`], [`distributions::weighted`]. /// - /// [`choose_mut`]: trait.SliceRandom.html#method.choose_mut - /// [`choose_weighted`]: trait.SliceRandom.html#method.choose_weighted + /// [`choose_mut`]: SliceRandom::choose_mut + /// [`choose_weighted`]: SliceRandom::choose_weighted + /// [`distributions::weighted`]: crate::distributions::weighted #[cfg(feature = "alloc")] - fn choose_weighted_mut<R, F, B, X>(&mut self, rng: &mut R, weight: F) -> Result<&mut Self::Item, WeightedError> - where R: Rng + ?Sized, - F: Fn(&Self::Item) -> B, - B: SampleBorrow<X>, - X: SampleUniform + - for<'a> ::core::ops::AddAssign<&'a X> + - ::core::cmp::PartialOrd<X> + - Clone + - Default; + fn choose_weighted_mut<R, F, B, X>( + &mut self, rng: &mut R, weight: F, + ) -> Result<&mut Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow<X>, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd<X> + + Clone + + Default; /// Shuffle a mutable slice in place. - /// - /// Depending on the implementation, complexity is expected to be `O(1)`. + /// + /// For slices of length `n`, complexity is `O(n)`. /// /// # Example /// /// ``` - /// use rand::thread_rng; /// use rand::seq::SliceRandom; + /// use rand::thread_rng; /// /// let mut rng = thread_rng(); /// let mut y = [1, 2, 3, 4, 5]; @@ -146,7 +193,8 @@ pub trait SliceRandom { /// y.shuffle(&mut rng); /// println!("Shuffled: {:?}", y); /// ``` - fn shuffle<R>(&mut self, rng: &mut R) where R: Rng + ?Sized; + fn shuffle<R>(&mut self, rng: &mut R) + where R: Rng + ?Sized; /// Shuffle a slice in place, but exit early. /// @@ -164,47 +212,65 @@ pub trait SliceRandom { /// If `amount` is greater than the number of elements in the slice, this /// will perform a full shuffle. /// - /// Complexity is expected to be `O(m)` where `m = amount`. - fn partial_shuffle<R>(&mut self, rng: &mut R, amount: usize) - -> (&mut [Self::Item], &mut [Self::Item]) where R: Rng + ?Sized; + /// For slices, complexity is `O(m)` where `m = amount`. + fn partial_shuffle<R>( + &mut self, rng: &mut R, amount: usize, + ) -> (&mut [Self::Item], &mut [Self::Item]) + where R: Rng + ?Sized; } /// Extension trait on iterators, providing random sampling methods. +/// +/// This trait is implemented on all sized iterators, providing methods for +/// choosing one or more elements. You must `use` this trait: +/// +/// ``` +/// use rand::seq::IteratorRandom; +/// +/// fn main() { +/// let mut rng = rand::thread_rng(); +/// +/// let faces = "πππππ π’"; +/// println!("I am {}!", faces.chars().choose(&mut rng).unwrap()); +/// } +/// ``` +/// Example output (non-deterministic): +/// ```none +/// I am π! +/// ``` pub trait IteratorRandom: Iterator + Sized { - /// Choose one element at random from the iterator. If you have a slice, - /// it's significantly faster to call the [`choose`] or [`choose_mut`] - /// functions using the slice instead. - /// - /// Returns `None` if and only if the iterator is empty. + /// Choose one element at random from the iterator. /// - /// Complexity is `O(n)`, where `n` is the length of the iterator. - /// This likely consumes multiple random numbers, but the exact number - /// is unspecified. + /// Returns `None` if and only if the iterator is empty. /// - /// [`choose`]: trait.SliceRandom.html#method.choose - /// [`choose_mut`]: trait.SliceRandom.html#method.choose_mut + /// This method uses [`Iterator::size_hint`] for optimisation. With an + /// accurate hint and where [`Iterator::nth`] is a constant-time operation + /// this method can offer `O(1)` performance. Where no size hint is + /// available, complexity is `O(n)` where `n` is the iterator length. + /// Partial hints (where `lower > 0`) also improve performance. + /// + /// For slices, prefer [`SliceRandom::choose`] which guarantees `O(1)` + /// performance. fn choose<R>(mut self, rng: &mut R) -> Option<Self::Item> - where R: Rng + ?Sized - { + where R: Rng + ?Sized { let (mut lower, mut upper) = self.size_hint(); let mut consumed = 0; let mut result = None; if upper == Some(lower) { - return if lower == 0 { None } else { self.nth(rng.gen_range(0, lower)) }; + return if lower == 0 { None } else { self.nth(gen_index(rng, lower)) }; } // Continue until the iterator is exhausted loop { if lower > 1 { - let ix = rng.gen_range(0, lower + consumed); - let skip; - if ix < lower { + let ix = gen_index(rng, lower + consumed); + let skip = if ix < lower { result = self.nth(ix); - skip = lower - (ix + 1); + lower - (ix + 1) } else { - skip = lower; - } + lower + }; if upper == Some(lower) { return result; } @@ -230,21 +296,21 @@ pub trait IteratorRandom: Iterator + Sized { } } - /// Collects `amount` values at random from the iterator into a supplied - /// buffer. - /// + /// Collects values at random from the iterator into a supplied buffer + /// until that buffer is filled. + /// /// Although the elements are selected randomly, the order of elements in /// the buffer is neither stable nor fully random. If random ordering is /// desired, shuffle the result. - /// - /// Returns the number of elements added to the buffer. This equals `amount` - /// unless the iterator contains insufficient elements, in which case this - /// equals the number of elements available. - /// + /// + /// Returns the number of elements added to the buffer. This equals the length + /// of the buffer unless the iterator contains insufficient elements, in which + /// case this equals the number of elements available. + /// /// Complexity is `O(n)` where `n` is the length of the iterator. - fn choose_multiple_fill<R>(mut self, rng: &mut R, buf: &mut [Self::Item]) - -> usize where R: Rng + ?Sized - { + /// For slices, prefer [`SliceRandom::choose_multiple`]. + fn choose_multiple_fill<R>(mut self, rng: &mut R, buf: &mut [Self::Item]) -> usize + where R: Rng + ?Sized { let amount = buf.len(); let mut len = 0; while len < amount { @@ -259,7 +325,7 @@ pub trait IteratorRandom: Iterator + Sized { // Continue, since the iterator was not exhausted for (i, elem) in self.enumerate() { - let k = rng.gen_range(0, i + 1 + amount); + let k = gen_index(rng, i + 1 + amount); if let Some(slot) = buf.get_mut(k) { *slot = elem; } @@ -274,16 +340,16 @@ pub trait IteratorRandom: Iterator + Sized { /// Although the elements are selected randomly, the order of elements in /// the buffer is neither stable nor fully random. If random ordering is /// desired, shuffle the result. - /// + /// /// The length of the returned vector equals `amount` unless the iterator /// contains insufficient elements, in which case it equals the number of /// elements available. - /// + /// /// Complexity is `O(n)` where `n` is the length of the iterator. + /// For slices, prefer [`SliceRandom::choose_multiple`]. #[cfg(feature = "alloc")] fn choose_multiple<R>(mut self, rng: &mut R, amount: usize) -> Vec<Self::Item> - where R: Rng + ?Sized - { + where R: Rng + ?Sized { let mut reservoir = Vec::with_capacity(amount); reservoir.extend(self.by_ref().take(amount)); @@ -293,7 +359,7 @@ pub trait IteratorRandom: Iterator + Sized { // If the iterator stops once, then so do we. if reservoir.len() == amount { for (i, elem) in self.enumerate() { - let k = rng.gen_range(0, i + 1 + amount); + let k = gen_index(rng, i + 1 + amount); if let Some(slot) = reservoir.get_mut(k) { *slot = elem; } @@ -312,31 +378,27 @@ impl<T> SliceRandom for [T] { type Item = T; fn choose<R>(&self, rng: &mut R) -> Option<&Self::Item> - where R: Rng + ?Sized - { + where R: Rng + ?Sized { if self.is_empty() { None } else { - Some(&self[rng.gen_range(0, self.len())]) + Some(&self[gen_index(rng, self.len())]) } } fn choose_mut<R>(&mut self, rng: &mut R) -> Option<&mut Self::Item> - where R: Rng + ?Sized - { + where R: Rng + ?Sized { if self.is_empty() { None } else { let len = self.len(); - Some(&mut self[rng.gen_range(0, len)]) + Some(&mut self[gen_index(rng, len)]) } } #[cfg(feature = "alloc")] - fn choose_multiple<R>(&self, rng: &mut R, amount: usize) - -> SliceChooseIter<Self, Self::Item> - where R: Rng + ?Sized - { + fn choose_multiple<R>(&self, rng: &mut R, amount: usize) -> SliceChooseIter<Self, Self::Item> + where R: Rng + ?Sized { let amount = ::core::cmp::min(amount, self.len()); SliceChooseIter { slice: self, @@ -346,57 +408,66 @@ impl<T> SliceRandom for [T] { } #[cfg(feature = "alloc")] - fn choose_weighted<R, F, B, X>(&self, rng: &mut R, weight: F) -> Result<&Self::Item, WeightedError> - where R: Rng + ?Sized, - F: Fn(&Self::Item) -> B, - B: SampleBorrow<X>, - X: SampleUniform + - for<'a> ::core::ops::AddAssign<&'a X> + - ::core::cmp::PartialOrd<X> + - Clone + - Default { - use distributions::{Distribution, WeightedIndex}; + fn choose_weighted<R, F, B, X>( + &self, rng: &mut R, weight: F, + ) -> Result<&Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow<X>, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd<X> + + Clone + + Default, + { + use crate::distributions::{Distribution, WeightedIndex}; let distr = WeightedIndex::new(self.iter().map(weight))?; Ok(&self[distr.sample(rng)]) } #[cfg(feature = "alloc")] - fn choose_weighted_mut<R, F, B, X>(&mut self, rng: &mut R, weight: F) -> Result<&mut Self::Item, WeightedError> - where R: Rng + ?Sized, - F: Fn(&Self::Item) -> B, - B: SampleBorrow<X>, - X: SampleUniform + - for<'a> ::core::ops::AddAssign<&'a X> + - ::core::cmp::PartialOrd<X> + - Clone + - Default { - use distributions::{Distribution, WeightedIndex}; + fn choose_weighted_mut<R, F, B, X>( + &mut self, rng: &mut R, weight: F, + ) -> Result<&mut Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow<X>, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd<X> + + Clone + + Default, + { + use crate::distributions::{Distribution, WeightedIndex}; let distr = WeightedIndex::new(self.iter().map(weight))?; Ok(&mut self[distr.sample(rng)]) } - fn shuffle<R>(&mut self, rng: &mut R) where R: Rng + ?Sized - { + fn shuffle<R>(&mut self, rng: &mut R) + where R: Rng + ?Sized { for i in (1..self.len()).rev() { // invariant: elements with index > i have been locked in place. - self.swap(i, rng.gen_range(0, i + 1)); + self.swap(i, gen_index(rng, i + 1)); } } - fn partial_shuffle<R>(&mut self, rng: &mut R, amount: usize) - -> (&mut [Self::Item], &mut [Self::Item]) where R: Rng + ?Sized - { + fn partial_shuffle<R>( + &mut self, rng: &mut R, amount: usize, + ) -> (&mut [Self::Item], &mut [Self::Item]) + where R: Rng + ?Sized { // This applies Durstenfeld's algorithm for the // [FisherβYates shuffle](https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm) // for an unbiased permutation, but exits early after choosing `amount` // elements. - + let len = self.len(); let end = if amount >= len { 0 } else { len - amount }; - + for i in (end..len).rev() { // invariant: elements with index > i have been locked in place. - self.swap(i, rng.gen_range(0, i + 1)); + self.swap(i, gen_index(rng, i + 1)); } let r = self.split_at_mut(end); (r.1, r.0) @@ -406,8 +477,10 @@ impl<T> SliceRandom for [T] { impl<I> IteratorRandom for I where I: Iterator + Sized {} -/// Iterator over multiple choices, as returned by [`SliceRandom::choose_multiple]( -/// trait.SliceRandom.html#method.choose_multiple). +/// An iterator over multiple slice elements. +/// +/// This struct is created by +/// [`SliceRandom::choose_multiple`](trait.SliceRandom.html#tymethod.choose_multiple). #[cfg(feature = "alloc")] #[derive(Debug)] pub struct SliceChooseIter<'a, S: ?Sized + 'a, T: 'a> { @@ -424,7 +497,7 @@ impl<'a, S: Index<usize, Output = T> + ?Sized + 'a, T: 'a> Iterator for SliceCho // TODO: investigate using SliceIndex::get_unchecked when stable self.indices.next().map(|i| &self.slice[i as usize]) } - + fn size_hint(&self) -> (usize, Option<usize>) { (self.indices.len(), Some(self.indices.len())) } @@ -440,94 +513,39 @@ impl<'a, S: Index<usize, Output = T> + ?Sized + 'a, T: 'a> ExactSizeIterator } -/// Randomly sample `amount` elements from a finite iterator. -/// -/// Deprecated: use [`IteratorRandom::choose_multiple`] instead. -/// -/// [`IteratorRandom::choose_multiple`]: trait.IteratorRandom.html#method.choose_multiple -#[cfg(feature = "alloc")] -#[deprecated(since="0.6.0", note="use IteratorRandom::choose_multiple instead")] -pub fn sample_iter<T, I, R>(rng: &mut R, iterable: I, amount: usize) -> Result<Vec<T>, Vec<T>> - where I: IntoIterator<Item=T>, - R: Rng + ?Sized, -{ - use seq::IteratorRandom; - let iter = iterable.into_iter(); - let result = iter.choose_multiple(rng, amount); - if result.len() == amount { - Ok(result) +// Sample a number uniformly between 0 and `ubound`. Uses 32-bit sampling where +// possible, primarily in order to produce the same output on 32-bit and 64-bit +// platforms. +#[inline] +fn gen_index<R: Rng + ?Sized>(rng: &mut R, ubound: usize) -> usize { + if ubound <= (core::u32::MAX as usize) { + rng.gen_range(0, ubound as u32) as usize } else { - Err(result) + rng.gen_range(0, ubound) } } -/// Randomly sample exactly `amount` values from `slice`. -/// -/// The values are non-repeating and in random order. -/// -/// This implementation uses `O(amount)` time and memory. -/// -/// Panics if `amount > slice.len()` -/// -/// Deprecated: use [`SliceRandom::choose_multiple`] instead. -/// -/// [`SliceRandom::choose_multiple`]: trait.SliceRandom.html#method.choose_multiple -#[cfg(feature = "alloc")] -#[deprecated(since="0.6.0", note="use SliceRandom::choose_multiple instead")] -pub fn sample_slice<R, T>(rng: &mut R, slice: &[T], amount: usize) -> Vec<T> - where R: Rng + ?Sized, - T: Clone -{ - let indices = index::sample(rng, slice.len(), amount).into_iter(); - - let mut out = Vec::with_capacity(amount); - out.extend(indices.map(|i| slice[i].clone())); - out -} - -/// Randomly sample exactly `amount` references from `slice`. -/// -/// The references are non-repeating and in random order. -/// -/// This implementation uses `O(amount)` time and memory. -/// -/// Panics if `amount > slice.len()` -/// -/// Deprecated: use [`SliceRandom::choose_multiple`] instead. -/// -/// [`SliceRandom::choose_multiple`]: trait.SliceRandom.html#method.choose_multiple -#[cfg(feature = "alloc")] -#[deprecated(since="0.6.0", note="use SliceRandom::choose_multiple instead")] -pub fn sample_slice_ref<'a, R, T>(rng: &mut R, slice: &'a [T], amount: usize) -> Vec<&'a T> - where R: Rng + ?Sized -{ - let indices = index::sample(rng, slice.len(), amount).into_iter(); - - let mut out = Vec::with_capacity(amount); - out.extend(indices.map(|i| &slice[i])); - out -} #[cfg(test)] mod test { use super::*; - #[cfg(feature = "alloc")] use {Rng, SeedableRng}; - #[cfg(feature = "alloc")] use rngs::SmallRng; + #[cfg(feature = "alloc")] use crate::Rng; #[cfg(all(feature="alloc", not(feature="std")))] use alloc::vec::Vec; #[test] fn test_slice_choose() { - let mut r = ::test::rng(107); + let mut r = crate::test::rng(107); let chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n']; let mut chosen = [0i32; 14]; + // The below all use a binomial distribution with n=1000, p=1/14. + // binocdf(40, 1000, 1/14) ~= 2e-5; 1-binocdf(106, ..) ~= 2e-5 for _ in 0..1000 { let picked = *chars.choose(&mut r).unwrap(); chosen[(picked as usize) - ('a' as usize)] += 1; } for count in chosen.iter() { - let err = *count - (1000 / (chars.len() as i32)); - assert!(-20 <= err && err <= 20); + assert!(40 < *count && *count < 106); } chosen.iter_mut().for_each(|x| *x = 0); @@ -535,8 +553,7 @@ mod test { *chosen.choose_mut(&mut r).unwrap() += 1; } for count in chosen.iter() { - let err = *count - (1000 / (chosen.len() as i32)); - assert!(-20 <= err && err <= 20); + assert!(40 < *count && *count < 106); } let mut v: [isize; 0] = []; @@ -597,8 +614,9 @@ mod test { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_iterator_choose() { - let r = &mut ::test::rng(109); + let r = &mut crate::test::rng(109); fn test_iter<R: Rng + ?Sized, Iter: Iterator<Item=usize> + Clone>(r: &mut R, iter: Iter) { let mut chosen = [0i32; 9]; for _ in 0..1000 { @@ -628,8 +646,9 @@ mod test { } #[test] + #[cfg(not(miri))] // Miri is too slow fn test_shuffle() { - let mut r = ::test::rng(108); + let mut r = crate::test::rng(108); let empty: &mut [isize] = &mut []; empty.shuffle(&mut r); let mut one = [1]; @@ -669,14 +688,16 @@ mod test { counts[permutation] += 1; } for count in counts.iter() { - let err = *count - 10000i32 / 24; - assert!(-50 <= err && err <= 50); + // Binomial(10000, 1/24) with average 416.667 + // Octave: binocdf(n, 10000, 1/24) + // 99.9% chance samples lie within this range: + assert!(352 <= *count && *count <= 483, "count: {}", count); } } #[test] fn test_partial_shuffle() { - let mut r = ::test::rng(118); + let mut r = crate::test::rng(118); let mut empty: [u32; 0] = []; let res = empty.partial_shuffle(&mut r, 10); @@ -696,7 +717,7 @@ mod test { let min_val = 1; let max_val = 100; - let mut r = ::test::rng(401); + let mut r = crate::test::rng(401); let vals = (min_val..max_val).collect::<Vec<i32>>(); let small_sample = vals.iter().choose_multiple(&mut r, 5); let large_sample = vals.iter().choose_multiple(&mut r, vals.len() + 5); @@ -713,75 +734,9 @@ mod test { #[test] #[cfg(feature = "alloc")] - #[allow(deprecated)] - fn test_sample_slice_boundaries() { - let empty: &[u8] = &[]; - - let mut r = ::test::rng(402); - - // sample 0 items - assert_eq!(&sample_slice(&mut r, empty, 0)[..], [0u8; 0]); - assert_eq!(&sample_slice(&mut r, &[42, 2, 42], 0)[..], [0u8; 0]); - - // sample 1 item - assert_eq!(&sample_slice(&mut r, &[42], 1)[..], [42]); - let v = sample_slice(&mut r, &[1, 42], 1)[0]; - assert!(v == 1 || v == 42); - - // sample "all" the items - let v = sample_slice(&mut r, &[42, 133], 2); - assert!(&v[..] == [42, 133] || v[..] == [133, 42]); - - // Make sure lucky 777's aren't lucky - let slice = &[42, 777]; - let mut num_42 = 0; - let total = 1000; - for _ in 0..total { - let v = sample_slice(&mut r, slice, 1); - assert_eq!(v.len(), 1); - let v = v[0]; - assert!(v == 42 || v == 777); - if v == 42 { - num_42 += 1; - } - } - let ratio_42 = num_42 as f64 / 1000 as f64; - assert!(0.4 <= ratio_42 || ratio_42 <= 0.6, "{}", ratio_42); - } - - #[test] - #[cfg(feature = "alloc")] - #[allow(deprecated)] - fn test_sample_slice() { - let seeded_rng = SmallRng::from_seed; - - let mut r = ::test::rng(403); - - for n in 1..20 { - let length = 5*n - 4; // 1, 6, ... - let amount = r.gen_range(0, length); - let mut seed = [0u8; 16]; - r.fill(&mut seed); - - // assert the basics work - let regular = index::sample(&mut seeded_rng(seed), length, amount); - assert_eq!(regular.len(), amount); - assert!(regular.iter().all(|e| e < length)); - - // also test that sampling the slice works - let vec: Vec<u32> = (0..(length as u32)).collect(); - let result = sample_slice(&mut seeded_rng(seed), &vec, amount); - assert_eq!(result, regular.iter().map(|i| i as u32).collect::<Vec<_>>()); - - let result = sample_slice_ref(&mut seeded_rng(seed), &vec, amount); - assert!(result.iter().zip(regular.iter()).all(|(i,j)| **i == j as u32)); - } - } - - #[test] - #[cfg(feature = "alloc")] + #[cfg(not(miri))] // Miri is too slow fn test_weighted() { - let mut r = ::test::rng(406); + let mut r = crate::test::rng(406); const N_REPS: u32 = 3000; let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; let total_weight = weights.iter().sum::<u32>() as f32; @@ -830,7 +785,7 @@ mod test { assert_eq!(empty_slice.choose_weighted(&mut r, |_| 1), Err(WeightedError::NoItem)); assert_eq!(empty_slice.choose_weighted_mut(&mut r, |_| 1), Err(WeightedError::NoItem)); assert_eq!(['x'].choose_weighted_mut(&mut r, |_| 0), Err(WeightedError::AllWeightsZero)); - assert_eq!([0, -1].choose_weighted_mut(&mut r, |x| *x), Err(WeightedError::NegativeWeight)); - assert_eq!([-1, 0].choose_weighted_mut(&mut r, |x| *x), Err(WeightedError::NegativeWeight)); + assert_eq!([0, -1].choose_weighted_mut(&mut r, |x| *x), Err(WeightedError::InvalidWeight)); + assert_eq!([-1, 0].choose_weighted_mut(&mut r, |x| *x), Err(WeightedError::InvalidWeight)); } } |