summaryrefslogtreecommitdiff
path: root/rand/src/distributions
diff options
context:
space:
mode:
Diffstat (limited to 'rand/src/distributions')
-rw-r--r--rand/src/distributions/bernoulli.rs165
-rw-r--r--rand/src/distributions/binomial.rs177
-rw-r--r--rand/src/distributions/cauchy.rs115
-rw-r--r--rand/src/distributions/dirichlet.rs137
-rw-r--r--rand/src/distributions/exponential.rs86
-rw-r--r--rand/src/distributions/float.rs259
-rw-r--r--rand/src/distributions/gamma.rs209
-rw-r--r--rand/src/distributions/integer.rs161
-rw-r--r--rand/src/distributions/mod.rs666
-rw-r--r--rand/src/distributions/normal.rs120
-rw-r--r--rand/src/distributions/other.rs219
-rw-r--r--rand/src/distributions/pareto.rs74
-rw-r--r--rand/src/distributions/poisson.rs157
-rw-r--r--rand/src/distributions/range.rs241
-rw-r--r--rand/src/distributions/triangular.rs86
-rw-r--r--rand/src/distributions/uniform.rs1297
-rw-r--r--rand/src/distributions/unit_circle.rs102
-rw-r--r--rand/src/distributions/unit_sphere.rs100
-rw-r--r--rand/src/distributions/utils.rs504
-rw-r--r--rand/src/distributions/weibull.rs71
-rw-r--r--rand/src/distributions/weighted.rs232
-rw-r--r--rand/src/distributions/ziggurat_tables.rs9
22 files changed, 4518 insertions, 669 deletions
diff --git a/rand/src/distributions/bernoulli.rs b/rand/src/distributions/bernoulli.rs
new file mode 100644
index 0000000..f49618c
--- /dev/null
+++ b/rand/src/distributions/bernoulli.rs
@@ -0,0 +1,165 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Bernoulli distribution.
+
+use Rng;
+use distributions::Distribution;
+
+/// The Bernoulli distribution.
+///
+/// This is a special case of the Binomial distribution where `n = 1`.
+///
+/// # Example
+///
+/// ```rust
+/// use rand::distributions::{Bernoulli, Distribution};
+///
+/// let d = Bernoulli::new(0.3);
+/// let v = d.sample(&mut rand::thread_rng());
+/// println!("{} is from a Bernoulli distribution", v);
+/// ```
+///
+/// # Precision
+///
+/// This `Bernoulli` distribution uses 64 bits from the RNG (a `u64`),
+/// so only probabilities that are multiples of 2<sup>-64</sup> can be
+/// represented.
+#[derive(Clone, Copy, Debug)]
+pub struct Bernoulli {
+ /// Probability of success, relative to the maximal integer.
+ p_int: u64,
+}
+
+// To sample from the Bernoulli distribution we use a method that compares a
+// random `u64` value `v < (p * 2^64)`.
+//
+// If `p == 1.0`, the integer `v` to compare against can not represented as a
+// `u64`. We manually set it to `u64::MAX` instead (2^64 - 1 instead of 2^64).
+// Note that value of `p < 1.0` can never result in `u64::MAX`, because an
+// `f64` only has 53 bits of precision, and the next largest value of `p` will
+// result in `2^64 - 2048`.
+//
+// Also there is a 100% theoretical concern: if someone consistenly wants to
+// generate `true` using the Bernoulli distribution (i.e. by using a probability
+// of `1.0`), just using `u64::MAX` is not enough. On average it would return
+// false once every 2^64 iterations. Some people apparently care about this
+// case.
+//
+// That is why we special-case `u64::MAX` to always return `true`, without using
+// the RNG, and pay the performance price for all uses that *are* reasonable.
+// Luckily, if `new()` and `sample` are close, the compiler can optimize out the
+// extra check.
+const ALWAYS_TRUE: u64 = ::core::u64::MAX;
+
+// This is just `2.0.powi(64)`, but written this way because it is not available
+// in `no_std` mode.
+const SCALE: f64 = 2.0 * (1u64 << 63) as f64;
+
+impl Bernoulli {
+ /// Construct a new `Bernoulli` with the given probability of success `p`.
+ ///
+ /// # Panics
+ ///
+ /// If `p < 0` or `p > 1`.
+ ///
+ /// # Precision
+ ///
+ /// For `p = 1.0`, the resulting distribution will always generate true.
+ /// For `p = 0.0`, the resulting distribution will always generate false.
+ ///
+ /// This method is accurate for any input `p` in the range `[0, 1]` which is
+ /// a multiple of 2<sup>-64</sup>. (Note that not all multiples of
+ /// 2<sup>-64</sup> in `[0, 1]` can be represented as a `f64`.)
+ #[inline]
+ pub fn new(p: f64) -> Bernoulli {
+ if p < 0.0 || p >= 1.0 {
+ if p == 1.0 { return Bernoulli { p_int: ALWAYS_TRUE } }
+ panic!("Bernoulli::new not called with 0.0 <= p <= 1.0");
+ }
+ Bernoulli { p_int: (p * SCALE) as u64 }
+ }
+
+ /// Construct a new `Bernoulli` with the probability of success of
+ /// `numerator`-in-`denominator`. I.e. `new_ratio(2, 3)` will return
+ /// a `Bernoulli` with a 2-in-3 chance, or about 67%, of returning `true`.
+ ///
+ /// If `numerator == denominator` then the returned `Bernoulli` will always
+ /// return `true`. If `numerator == 0` it will always return `false`.
+ ///
+ /// # Panics
+ ///
+ /// If `denominator == 0` or `numerator > denominator`.
+ ///
+ #[inline]
+ pub fn from_ratio(numerator: u32, denominator: u32) -> Bernoulli {
+ assert!(numerator <= denominator);
+ if numerator == denominator {
+ return Bernoulli { p_int: ::core::u64::MAX }
+ }
+ let p_int = ((numerator as f64 / denominator as f64) * SCALE) as u64;
+ Bernoulli { p_int }
+ }
+}
+
+impl Distribution<bool> for Bernoulli {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
+ // Make sure to always return true for p = 1.0.
+ if self.p_int == ALWAYS_TRUE { return true; }
+ let v: u64 = rng.gen();
+ v < self.p_int
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use Rng;
+ use distributions::Distribution;
+ use super::Bernoulli;
+
+ #[test]
+ fn test_trivial() {
+ let mut r = ::test::rng(1);
+ let always_false = Bernoulli::new(0.0);
+ let always_true = Bernoulli::new(1.0);
+ for _ in 0..5 {
+ assert_eq!(r.sample::<bool, _>(&always_false), false);
+ assert_eq!(r.sample::<bool, _>(&always_true), true);
+ assert_eq!(Distribution::<bool>::sample(&always_false, &mut r), false);
+ assert_eq!(Distribution::<bool>::sample(&always_true, &mut r), true);
+ }
+ }
+
+ #[test]
+ fn test_average() {
+ const P: f64 = 0.3;
+ const NUM: u32 = 3;
+ const DENOM: u32 = 10;
+ let d1 = Bernoulli::new(P);
+ let d2 = Bernoulli::from_ratio(NUM, DENOM);
+ const N: u32 = 100_000;
+
+ let mut sum1: u32 = 0;
+ let mut sum2: u32 = 0;
+ let mut rng = ::test::rng(2);
+ for _ in 0..N {
+ if d1.sample(&mut rng) {
+ sum1 += 1;
+ }
+ if d2.sample(&mut rng) {
+ sum2 += 1;
+ }
+ }
+ let avg1 = (sum1 as f64) / (N as f64);
+ assert!((avg1 - P).abs() < 5e-3);
+
+ let avg2 = (sum2 as f64) / (N as f64);
+ assert!((avg2 - (NUM as f64)/(DENOM as f64)).abs() < 5e-3);
+ }
+}
diff --git a/rand/src/distributions/binomial.rs b/rand/src/distributions/binomial.rs
new file mode 100644
index 0000000..2df393e
--- /dev/null
+++ b/rand/src/distributions/binomial.rs
@@ -0,0 +1,177 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2016-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The binomial distribution.
+
+use Rng;
+use distributions::{Distribution, Bernoulli, Cauchy};
+use distributions::utils::log_gamma;
+
+/// The binomial distribution `Binomial(n, p)`.
+///
+/// This distribution has density function:
+/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Binomial, Distribution};
+///
+/// let bin = Binomial::new(20, 0.3);
+/// let v = bin.sample(&mut rand::thread_rng());
+/// println!("{} is from a binomial distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Binomial {
+ /// Number of trials.
+ n: u64,
+ /// Probability of success.
+ p: f64,
+}
+
+impl Binomial {
+ /// Construct a new `Binomial` with the given shape parameters `n` (number
+ /// of trials) and `p` (probability of success).
+ ///
+ /// Panics if `p < 0` or `p > 1`.
+ pub fn new(n: u64, p: f64) -> Binomial {
+ assert!(p >= 0.0, "Binomial::new called with p < 0");
+ assert!(p <= 1.0, "Binomial::new called with p > 1");
+ Binomial { n, p }
+ }
+}
+
+impl Distribution<u64> for Binomial {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+ // Handle these values directly.
+ if self.p == 0.0 {
+ return 0;
+ } else if self.p == 1.0 {
+ return self.n;
+ }
+
+ // For low n, it is faster to sample directly. For both methods,
+ // performance is independent of p. On Intel Haswell CPU this method
+ // appears to be faster for approx n < 300.
+ if self.n < 300 {
+ let mut result = 0;
+ let d = Bernoulli::new(self.p);
+ for _ in 0 .. self.n {
+ result += rng.sample(d) as u32;
+ }
+ return result as u64;
+ }
+
+ // binomial distribution is symmetrical with respect to p -> 1-p, k -> n-k
+ // switch p so that it is less than 0.5 - this allows for lower expected values
+ // we will just invert the result at the end
+ let p = if self.p <= 0.5 {
+ self.p
+ } else {
+ 1.0 - self.p
+ };
+
+ // prepare some cached values
+ let float_n = self.n as f64;
+ let ln_fact_n = log_gamma(float_n + 1.0);
+ let pc = 1.0 - p;
+ let log_p = p.ln();
+ let log_pc = pc.ln();
+ let expected = self.n as f64 * p;
+ let sq = (expected * (2.0 * pc)).sqrt();
+
+ let mut lresult;
+
+ // we use the Cauchy distribution as the comparison distribution
+ // f(x) ~ 1/(1+x^2)
+ let cauchy = Cauchy::new(0.0, 1.0);
+ loop {
+ let mut comp_dev: f64;
+ loop {
+ // draw from the Cauchy distribution
+ comp_dev = rng.sample(cauchy);
+ // shift the peak of the comparison ditribution
+ lresult = expected + sq * comp_dev;
+ // repeat the drawing until we are in the range of possible values
+ if lresult >= 0.0 && lresult < float_n + 1.0 {
+ break;
+ }
+ }
+
+ // the result should be discrete
+ lresult = lresult.floor();
+
+ let log_binomial_dist = ln_fact_n - log_gamma(lresult+1.0) -
+ log_gamma(float_n - lresult + 1.0) + lresult*log_p + (float_n - lresult)*log_pc;
+ // this is the binomial probability divided by the comparison probability
+ // we will generate a uniform random value and if it is larger than this,
+ // we interpret it as a value falling out of the distribution and repeat
+ let comparison_coeff = (log_binomial_dist.exp() * sq) * (1.2 * (1.0 + comp_dev*comp_dev));
+
+ if comparison_coeff >= rng.gen() {
+ break;
+ }
+ }
+
+ // invert the result for p < 0.5
+ if p != self.p {
+ self.n - lresult as u64
+ } else {
+ lresult as u64
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use Rng;
+ use distributions::Distribution;
+ use super::Binomial;
+
+ fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
+ let binomial = Binomial::new(n, p);
+
+ let expected_mean = n as f64 * p;
+ let expected_variance = n as f64 * p * (1.0 - p);
+
+ let mut results = [0.0; 1000];
+ for i in results.iter_mut() { *i = binomial.sample(rng) as f64; }
+
+ let mean = results.iter().sum::<f64>() / results.len() as f64;
+ assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);
+
+ let variance =
+ results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>()
+ / results.len() as f64;
+ assert!((variance - expected_variance).abs() < expected_variance / 10.0);
+ }
+
+ #[test]
+ fn test_binomial() {
+ let mut rng = ::test::rng(351);
+ test_binomial_mean_and_variance(150, 0.1, &mut rng);
+ test_binomial_mean_and_variance(70, 0.6, &mut rng);
+ test_binomial_mean_and_variance(40, 0.5, &mut rng);
+ test_binomial_mean_and_variance(20, 0.7, &mut rng);
+ test_binomial_mean_and_variance(20, 0.5, &mut rng);
+ }
+
+ #[test]
+ fn test_binomial_end_points() {
+ let mut rng = ::test::rng(352);
+ assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
+ assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_binomial_invalid_lambda_neg() {
+ Binomial::new(20, -10.0);
+ }
+}
diff --git a/rand/src/distributions/cauchy.rs b/rand/src/distributions/cauchy.rs
new file mode 100644
index 0000000..feef015
--- /dev/null
+++ b/rand/src/distributions/cauchy.rs
@@ -0,0 +1,115 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2016-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Cauchy distribution.
+
+use Rng;
+use distributions::Distribution;
+use std::f64::consts::PI;
+
+/// The Cauchy distribution `Cauchy(median, scale)`.
+///
+/// This distribution has a density function:
+/// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))`
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Cauchy, Distribution};
+///
+/// let cau = Cauchy::new(2.0, 5.0);
+/// let v = cau.sample(&mut rand::thread_rng());
+/// println!("{} is from a Cauchy(2, 5) distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Cauchy {
+ median: f64,
+ scale: f64
+}
+
+impl Cauchy {
+ /// Construct a new `Cauchy` with the given shape parameters
+ /// `median` the peak location and `scale` the scale factor.
+ /// Panics if `scale <= 0`.
+ pub fn new(median: f64, scale: f64) -> Cauchy {
+ assert!(scale > 0.0, "Cauchy::new called with scale factor <= 0");
+ Cauchy {
+ median,
+ scale
+ }
+ }
+}
+
+impl Distribution<f64> for Cauchy {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ // sample from [0, 1)
+ let x = rng.gen::<f64>();
+ // get standard cauchy random number
+ // note that π/2 is not exactly representable, even if x=0.5 the result is finite
+ let comp_dev = (PI * x).tan();
+ // shift and scale according to parameters
+ let result = self.median + self.scale * comp_dev;
+ result
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use distributions::Distribution;
+ use super::Cauchy;
+
+ fn median(mut numbers: &mut [f64]) -> f64 {
+ sort(&mut numbers);
+ let mid = numbers.len() / 2;
+ numbers[mid]
+ }
+
+ fn sort(numbers: &mut [f64]) {
+ numbers.sort_by(|a, b| a.partial_cmp(b).unwrap());
+ }
+
+ #[test]
+ fn test_cauchy_median() {
+ let cauchy = Cauchy::new(10.0, 5.0);
+ let mut rng = ::test::rng(123);
+ let mut numbers: [f64; 1000] = [0.0; 1000];
+ for i in 0..1000 {
+ numbers[i] = cauchy.sample(&mut rng);
+ }
+ let median = median(&mut numbers);
+ println!("Cauchy median: {}", median);
+ assert!((median - 10.0).abs() < 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ fn test_cauchy_mean() {
+ let cauchy = Cauchy::new(10.0, 5.0);
+ let mut rng = ::test::rng(123);
+ let mut sum = 0.0;
+ for _ in 0..1000 {
+ sum += cauchy.sample(&mut rng);
+ }
+ let mean = sum / 1000.0;
+ println!("Cauchy mean: {}", mean);
+ // for a Cauchy distribution the mean should not converge
+ assert!((mean - 10.0).abs() > 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_cauchy_invalid_scale_zero() {
+ Cauchy::new(0.0, 0.0);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_cauchy_invalid_scale_neg() {
+ Cauchy::new(0.0, -10.0);
+ }
+}
diff --git a/rand/src/distributions/dirichlet.rs b/rand/src/distributions/dirichlet.rs
new file mode 100644
index 0000000..19384b8
--- /dev/null
+++ b/rand/src/distributions/dirichlet.rs
@@ -0,0 +1,137 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The dirichlet distribution.
+
+use Rng;
+use distributions::Distribution;
+use distributions::gamma::Gamma;
+
+/// The dirichelet distribution `Dirichlet(alpha)`.
+///
+/// The Dirichlet distribution is a family of continuous multivariate
+/// probability distributions parameterized by a vector alpha of positive reals.
+/// It is a multivariate generalization of the beta distribution.
+///
+/// # Example
+///
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Dirichlet;
+///
+/// let dirichlet = Dirichlet::new(vec![1.0, 2.0, 3.0]);
+/// let samples = dirichlet.sample(&mut rand::thread_rng());
+/// println!("{:?} is from a Dirichlet([1.0, 2.0, 3.0]) distribution", samples);
+/// ```
+
+#[derive(Clone, Debug)]
+pub struct Dirichlet {
+ /// Concentration parameters (alpha)
+ alpha: Vec<f64>,
+}
+
+impl Dirichlet {
+ /// Construct a new `Dirichlet` with the given alpha parameter `alpha`.
+ ///
+ /// # Panics
+ /// - if `alpha.len() < 2`
+ ///
+ #[inline]
+ pub fn new<V: Into<Vec<f64>>>(alpha: V) -> Dirichlet {
+ let a = alpha.into();
+ assert!(a.len() > 1);
+ for i in 0..a.len() {
+ assert!(a[i] > 0.0);
+ }
+
+ Dirichlet { alpha: a }
+ }
+
+ /// Construct a new `Dirichlet` with the given shape parameter `alpha` and `size`.
+ ///
+ /// # Panics
+ /// - if `alpha <= 0.0`
+ /// - if `size < 2`
+ ///
+ #[inline]
+ pub fn new_with_param(alpha: f64, size: usize) -> Dirichlet {
+ assert!(alpha > 0.0);
+ assert!(size > 1);
+ Dirichlet {
+ alpha: vec![alpha; size],
+ }
+ }
+}
+
+impl Distribution<Vec<f64>> for Dirichlet {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Vec<f64> {
+ let n = self.alpha.len();
+ let mut samples = vec![0.0f64; n];
+ let mut sum = 0.0f64;
+
+ for i in 0..n {
+ let g = Gamma::new(self.alpha[i], 1.0);
+ samples[i] = g.sample(rng);
+ sum += samples[i];
+ }
+ let invacc = 1.0 / sum;
+ for i in 0..n {
+ samples[i] *= invacc;
+ }
+ samples
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::Dirichlet;
+ use distributions::Distribution;
+
+ #[test]
+ fn test_dirichlet() {
+ let d = Dirichlet::new(vec![1.0, 2.0, 3.0]);
+ let mut rng = ::test::rng(221);
+ let samples = d.sample(&mut rng);
+ let _: Vec<f64> = samples
+ .into_iter()
+ .map(|x| {
+ assert!(x > 0.0);
+ x
+ })
+ .collect();
+ }
+
+ #[test]
+ fn test_dirichlet_with_param() {
+ let alpha = 0.5f64;
+ let size = 2;
+ let d = Dirichlet::new_with_param(alpha, size);
+ let mut rng = ::test::rng(221);
+ let samples = d.sample(&mut rng);
+ let _: Vec<f64> = samples
+ .into_iter()
+ .map(|x| {
+ assert!(x > 0.0);
+ x
+ })
+ .collect();
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_dirichlet_invalid_length() {
+ Dirichlet::new_with_param(0.5f64, 1);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_dirichlet_invalid_alpha() {
+ Dirichlet::new_with_param(0.0f64, 2);
+ }
+}
diff --git a/rand/src/distributions/exponential.rs b/rand/src/distributions/exponential.rs
index c3c924c..a7d0500 100644
--- a/rand/src/distributions/exponential.rs
+++ b/rand/src/distributions/exponential.rs
@@ -1,74 +1,78 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The exponential distribution.
-use {Rng, Rand};
-use distributions::{ziggurat, ziggurat_tables, Sample, IndependentSample};
+use {Rng};
+use distributions::{ziggurat_tables, Distribution};
+use distributions::utils::ziggurat;
-/// A wrapper around an `f64` to generate Exp(1) random numbers.
+/// Samples floating-point numbers according to the exponential distribution,
+/// with rate parameter `λ = 1`. This is equivalent to `Exp::new(1.0)` or
+/// sampling with `-rng.gen::<f64>().ln()`, but faster.
///
/// See `Exp` for the general exponential distribution.
///
-/// Implemented via the ZIGNOR variant[1] of the Ziggurat method. The
-/// exact description in the paper was adjusted to use tables for the
-/// exponential distribution rather than normal.
+/// Implemented via the ZIGNOR variant[^1] of the Ziggurat method. The exact
+/// description in the paper was adjusted to use tables for the exponential
+/// distribution rather than normal.
///
-/// [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
-/// Generate Normal Random
-/// Samples*](http://www.doornik.com/research/ziggurat.pdf). Nuffield
-/// College, Oxford
+/// [^1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
+/// Generate Normal Random Samples*](
+/// https://www.doornik.com/research/ziggurat.pdf).
+/// Nuffield College, Oxford
///
/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Exp1;
///
-/// ```rust
-/// use rand::distributions::exponential::Exp1;
-///
-/// let Exp1(x) = rand::random();
-/// println!("{}", x);
+/// let val: f64 = SmallRng::from_entropy().sample(Exp1);
+/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
-pub struct Exp1(pub f64);
+pub struct Exp1;
// This could be done via `-rng.gen::<f64>().ln()` but that is slower.
-impl Rand for Exp1 {
+impl Distribution<f64> for Exp1 {
#[inline]
- fn rand<R:Rng>(rng: &mut R) -> Exp1 {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
#[inline]
fn pdf(x: f64) -> f64 {
(-x).exp()
}
#[inline]
- fn zero_case<R:Rng>(rng: &mut R, _u: f64) -> f64 {
+ fn zero_case<R: Rng + ?Sized>(rng: &mut R, _u: f64) -> f64 {
ziggurat_tables::ZIG_EXP_R - rng.gen::<f64>().ln()
}
- Exp1(ziggurat(rng, false,
- &ziggurat_tables::ZIG_EXP_X,
- &ziggurat_tables::ZIG_EXP_F,
- pdf, zero_case))
+ ziggurat(rng, false,
+ &ziggurat_tables::ZIG_EXP_X,
+ &ziggurat_tables::ZIG_EXP_F,
+ pdf, zero_case)
}
}
/// The exponential distribution `Exp(lambda)`.
///
-/// This distribution has density function: `f(x) = lambda *
-/// exp(-lambda * x)` for `x > 0`.
+/// This distribution has density function: `f(x) = lambda * exp(-lambda * x)`
+/// for `x > 0`.
+///
+/// Note that [`Exp1`](struct.Exp1.html) is an optimised implementation for `lambda = 1`.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{Exp, IndependentSample};
+/// ```
+/// use rand::distributions::{Exp, Distribution};
///
/// let exp = Exp::new(2.0);
-/// let v = exp.ind_sample(&mut rand::thread_rng());
+/// let v = exp.sample(&mut rand::thread_rng());
/// println!("{} is from a Exp(2) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
@@ -87,28 +91,24 @@ impl Exp {
}
}
-impl Sample<f64> for Exp {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for Exp {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let Exp1(n) = rng.gen::<Exp1>();
+impl Distribution<f64> for Exp {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let n: f64 = rng.sample(Exp1);
n * self.lambda_inverse
}
}
#[cfg(test)]
mod test {
- use distributions::{Sample, IndependentSample};
+ use distributions::Distribution;
use super::Exp;
#[test]
fn test_exp() {
- let mut exp = Exp::new(10.0);
- let mut rng = ::test::rng();
+ let exp = Exp::new(10.0);
+ let mut rng = ::test::rng(221);
for _ in 0..1000 {
assert!(exp.sample(&mut rng) >= 0.0);
- assert!(exp.ind_sample(&mut rng) >= 0.0);
}
}
#[test]
diff --git a/rand/src/distributions/float.rs b/rand/src/distributions/float.rs
new file mode 100644
index 0000000..ece12f5
--- /dev/null
+++ b/rand/src/distributions/float.rs
@@ -0,0 +1,259 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Basic floating-point number distributions
+
+use core::mem;
+use Rng;
+use distributions::{Distribution, Standard};
+use distributions::utils::FloatSIMDUtils;
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+
+/// A distribution to sample floating point numbers uniformly in the half-open
+/// interval `(0, 1]`, i.e. including 1 but not 0.
+///
+/// All values that can be generated are of the form `n * ε/2`. For `f32`
+/// the 23 most significant random bits of a `u32` are used and for `f64` the
+/// 53 most significant bits of a `u64` are used. The conversion uses the
+/// multiplicative method.
+///
+/// See also: [`Standard`] which samples from `[0, 1)`, [`Open01`]
+/// which samples from `(0, 1)` and [`Uniform`] which samples from arbitrary
+/// ranges.
+///
+/// # Example
+/// ```
+/// use rand::{thread_rng, Rng};
+/// use rand::distributions::OpenClosed01;
+///
+/// let val: f32 = thread_rng().sample(OpenClosed01);
+/// println!("f32 from (0, 1): {}", val);
+/// ```
+///
+/// [`Standard`]: struct.Standard.html
+/// [`Open01`]: struct.Open01.html
+/// [`Uniform`]: uniform/struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct OpenClosed01;
+
+/// A distribution to sample floating point numbers uniformly in the open
+/// interval `(0, 1)`, i.e. not including either endpoint.
+///
+/// All values that can be generated are of the form `n * ε + ε/2`. For `f32`
+/// the 22 most significant random bits of an `u32` are used, for `f64` 52 from
+/// an `u64`. The conversion uses a transmute-based method.
+///
+/// See also: [`Standard`] which samples from `[0, 1)`, [`OpenClosed01`]
+/// which samples from `(0, 1]` and [`Uniform`] which samples from arbitrary
+/// ranges.
+///
+/// # Example
+/// ```
+/// use rand::{thread_rng, Rng};
+/// use rand::distributions::Open01;
+///
+/// let val: f32 = thread_rng().sample(Open01);
+/// println!("f32 from (0, 1): {}", val);
+/// ```
+///
+/// [`Standard`]: struct.Standard.html
+/// [`OpenClosed01`]: struct.OpenClosed01.html
+/// [`Uniform`]: uniform/struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct Open01;
+
+
+pub(crate) trait IntoFloat {
+ type F;
+
+ /// Helper method to combine the fraction and a contant exponent into a
+ /// float.
+ ///
+ /// Only the least significant bits of `self` may be set, 23 for `f32` and
+ /// 52 for `f64`.
+ /// The resulting value will fall in a range that depends on the exponent.
+ /// As an example the range with exponent 0 will be
+ /// [2<sup>0</sup>..2<sup>1</sup>), which is [1..2).
+ fn into_float_with_exponent(self, exponent: i32) -> Self::F;
+}
+
+macro_rules! float_impls {
+ ($ty:ident, $uty:ident, $f_scalar:ident, $u_scalar:ty,
+ $fraction_bits:expr, $exponent_bias:expr) => {
+ impl IntoFloat for $uty {
+ type F = $ty;
+ #[inline(always)]
+ fn into_float_with_exponent(self, exponent: i32) -> $ty {
+ // The exponent is encoded using an offset-binary representation
+ let exponent_bits: $u_scalar =
+ (($exponent_bias + exponent) as $u_scalar) << $fraction_bits;
+ // TODO: use from_bits when min compiler > 1.25 (see #545)
+ // $ty::from_bits(self | exponent_bits)
+ unsafe{ mem::transmute(self | exponent_bits) }
+ }
+ }
+
+ impl Distribution<$ty> for Standard {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ // Multiply-based method; 24/53 random bits; [0, 1) interval.
+ // We use the most significant bits because for simple RNGs
+ // those are usually more random.
+ let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+ let precision = $fraction_bits + 1;
+ let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
+
+ let value: $uty = rng.gen();
+ let value = value >> (float_size - precision);
+ scale * $ty::cast_from_int(value)
+ }
+ }
+
+ impl Distribution<$ty> for OpenClosed01 {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ // Multiply-based method; 24/53 random bits; (0, 1] interval.
+ // We use the most significant bits because for simple RNGs
+ // those are usually more random.
+ let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+ let precision = $fraction_bits + 1;
+ let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
+
+ let value: $uty = rng.gen();
+ let value = value >> (float_size - precision);
+ // Add 1 to shift up; will not overflow because of right-shift:
+ scale * $ty::cast_from_int(value + 1)
+ }
+ }
+
+ impl Distribution<$ty> for Open01 {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ // Transmute-based method; 23/52 random bits; (0, 1) interval.
+ // We use the most significant bits because for simple RNGs
+ // those are usually more random.
+ use core::$f_scalar::EPSILON;
+ let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+
+ let value: $uty = rng.gen();
+ let fraction = value >> (float_size - $fraction_bits);
+ fraction.into_float_with_exponent(0) - (1.0 - EPSILON / 2.0)
+ }
+ }
+ }
+}
+
+float_impls! { f32, u32, f32, u32, 23, 127 }
+float_impls! { f64, u64, f64, u64, 52, 1023 }
+
+#[cfg(feature="simd_support")]
+float_impls! { f32x2, u32x2, f32, u32, 23, 127 }
+#[cfg(feature="simd_support")]
+float_impls! { f32x4, u32x4, f32, u32, 23, 127 }
+#[cfg(feature="simd_support")]
+float_impls! { f32x8, u32x8, f32, u32, 23, 127 }
+#[cfg(feature="simd_support")]
+float_impls! { f32x16, u32x16, f32, u32, 23, 127 }
+
+#[cfg(feature="simd_support")]
+float_impls! { f64x2, u64x2, f64, u64, 52, 1023 }
+#[cfg(feature="simd_support")]
+float_impls! { f64x4, u64x4, f64, u64, 52, 1023 }
+#[cfg(feature="simd_support")]
+float_impls! { f64x8, u64x8, f64, u64, 52, 1023 }
+
+
+#[cfg(test)]
+mod tests {
+ use Rng;
+ use distributions::{Open01, OpenClosed01};
+ use rngs::mock::StepRng;
+ #[cfg(feature="simd_support")]
+ use packed_simd::*;
+
+ const EPSILON32: f32 = ::core::f32::EPSILON;
+ const EPSILON64: f64 = ::core::f64::EPSILON;
+
+ macro_rules! test_f32 {
+ ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
+ #[test]
+ fn $fnn() {
+ // Standard
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.gen::<$ty>(), $ZERO);
+ let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
+ assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
+
+ // OpenClosed01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(OpenClosed01),
+ 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
+ assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
+
+ // Open01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 9 | 1 << (9 + 32), 0);
+ assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
+ }
+ }
+ }
+ test_f32! { f32_edge_cases, f32, 0.0, EPSILON32 }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x2_edge_cases, f32x2, f32x2::splat(0.0), f32x2::splat(EPSILON32) }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x4_edge_cases, f32x4, f32x4::splat(0.0), f32x4::splat(EPSILON32) }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x8_edge_cases, f32x8, f32x8::splat(0.0), f32x8::splat(EPSILON32) }
+ #[cfg(feature="simd_support")]
+ test_f32! { f32x16_edge_cases, f32x16, f32x16::splat(0.0), f32x16::splat(EPSILON32) }
+
+ macro_rules! test_f64 {
+ ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
+ #[test]
+ fn $fnn() {
+ // Standard
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.gen::<$ty>(), $ZERO);
+ let mut one = StepRng::new(1 << 11, 0);
+ assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
+
+ // OpenClosed01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(OpenClosed01),
+ 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 11, 0);
+ assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
+
+ // Open01
+ let mut zeros = StepRng::new(0, 0);
+ assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
+ let mut one = StepRng::new(1 << 12, 0);
+ assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
+ let mut max = StepRng::new(!0, 0);
+ assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
+ }
+ }
+ }
+ test_f64! { f64_edge_cases, f64, 0.0, EPSILON64 }
+ #[cfg(feature="simd_support")]
+ test_f64! { f64x2_edge_cases, f64x2, f64x2::splat(0.0), f64x2::splat(EPSILON64) }
+ #[cfg(feature="simd_support")]
+ test_f64! { f64x4_edge_cases, f64x4, f64x4::splat(0.0), f64x4::splat(EPSILON64) }
+ #[cfg(feature="simd_support")]
+ test_f64! { f64x8_edge_cases, f64x8, f64x8::splat(0.0), f64x8::splat(EPSILON64) }
+}
diff --git a/rand/src/distributions/gamma.rs b/rand/src/distributions/gamma.rs
index 2806495..43ac2bc 100644
--- a/rand/src/distributions/gamma.rs
+++ b/rand/src/distributions/gamma.rs
@@ -1,23 +1,20 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//
-// ignore-lexer-test FIXME #15679
//! The Gamma and derived distributions.
use self::GammaRepr::*;
use self::ChiSquaredRepr::*;
-use {Rng, Open01};
-use super::normal::StandardNormal;
-use super::{IndependentSample, Sample, Exp};
+use Rng;
+use distributions::normal::StandardNormal;
+use distributions::{Distribution, Exp, Open01};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
@@ -30,25 +27,25 @@ use super::{IndependentSample, Sample, Exp};
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
-/// The algorithm used is that described by Marsaglia & Tsang 2000[1],
+/// The algorithm used is that described by Marsaglia & Tsang 2000[^1],
/// falling back to directly sampling from an Exponential for `shape
-/// == 1`, and using the boosting technique described in [1] for
+/// == 1`, and using the boosting technique described in that paper for
/// `shape < 1`.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{IndependentSample, Gamma};
+/// ```
+/// use rand::distributions::{Distribution, Gamma};
///
/// let gamma = Gamma::new(2.0, 5.0);
-/// let v = gamma.ind_sample(&mut rand::thread_rng());
+/// let v = gamma.sample(&mut rand::thread_rng());
/// println!("{} is from a Gamma(2, 5) distribution", v);
/// ```
///
-/// [1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method
-/// for Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
-/// (September 2000),
-/// 363-372. DOI:[10.1145/358407.358414](http://doi.acm.org/10.1145/358407.358414)
+/// [^1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method for
+/// Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
+/// (September 2000), 363-372.
+/// DOI:[10.1145/358407.358414](https://doi.acm.org/10.1145/358407.358414)
#[derive(Clone, Copy, Debug)]
pub struct Gamma {
repr: GammaRepr,
@@ -109,7 +106,7 @@ impl Gamma {
} else {
Large(GammaLargeShape::new_raw(shape, scale))
};
- Gamma { repr: repr }
+ Gamma { repr }
}
}
@@ -126,50 +123,40 @@ impl GammaLargeShape {
fn new_raw(shape: f64, scale: f64) -> GammaLargeShape {
let d = shape - 1. / 3.;
GammaLargeShape {
- scale: scale,
+ scale,
c: 1. / (9. * d).sqrt(),
- d: d
+ d
}
}
}
-impl Sample<f64> for Gamma {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl Sample<f64> for GammaSmallShape {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl Sample<f64> for GammaLargeShape {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-
-impl IndependentSample<f64> for Gamma {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
+impl Distribution<f64> for Gamma {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
match self.repr {
- Small(ref g) => g.ind_sample(rng),
- One(ref g) => g.ind_sample(rng),
- Large(ref g) => g.ind_sample(rng),
+ Small(ref g) => g.sample(rng),
+ One(ref g) => g.sample(rng),
+ Large(ref g) => g.sample(rng),
}
}
}
-impl IndependentSample<f64> for GammaSmallShape {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let Open01(u) = rng.gen::<Open01<f64>>();
+impl Distribution<f64> for GammaSmallShape {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let u: f64 = rng.sample(Open01);
- self.large_shape.ind_sample(rng) * u.powf(self.inv_shape)
+ self.large_shape.sample(rng) * u.powf(self.inv_shape)
}
}
-impl IndependentSample<f64> for GammaLargeShape {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
+impl Distribution<f64> for GammaLargeShape {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
loop {
- let StandardNormal(x) = rng.gen::<StandardNormal>();
+ let x = rng.sample(StandardNormal);
let v_cbrt = 1.0 + self.c * x;
if v_cbrt <= 0.0 { // a^3 <= 0 iff a <= 0
continue
}
let v = v_cbrt * v_cbrt * v_cbrt;
- let Open01(u) = rng.gen::<Open01<f64>>();
+ let u: f64 = rng.sample(Open01);
let x_sqr = x * x;
if u < 1.0 - 0.0331 * x_sqr * x_sqr ||
@@ -190,11 +177,11 @@ impl IndependentSample<f64> for GammaLargeShape {
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{ChiSquared, IndependentSample};
+/// ```
+/// use rand::distributions::{ChiSquared, Distribution};
///
/// let chi = ChiSquared::new(11.0);
-/// let v = chi.ind_sample(&mut rand::thread_rng());
+/// let v = chi.sample(&mut rand::thread_rng());
/// println!("{} is from a χ²(11) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -221,21 +208,18 @@ impl ChiSquared {
assert!(k > 0.0, "ChiSquared::new called with `k` < 0");
DoFAnythingElse(Gamma::new(0.5 * k, 2.0))
};
- ChiSquared { repr: repr }
+ ChiSquared { repr }
}
}
-impl Sample<f64> for ChiSquared {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for ChiSquared {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
+impl Distribution<f64> for ChiSquared {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
- let StandardNormal(norm) = rng.gen::<StandardNormal>();
+ let norm = rng.sample(StandardNormal);
norm * norm
}
- DoFAnythingElse(ref g) => g.ind_sample(rng)
+ DoFAnythingElse(ref g) => g.sample(rng)
}
}
}
@@ -248,11 +232,11 @@ impl IndependentSample<f64> for ChiSquared {
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{FisherF, IndependentSample};
+/// ```
+/// use rand::distributions::{FisherF, Distribution};
///
/// let f = FisherF::new(2.0, 32.0);
-/// let v = f.ind_sample(&mut rand::thread_rng());
+/// let v = f.sample(&mut rand::thread_rng());
/// println!("{} is from an F(2, 32) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -278,12 +262,9 @@ impl FisherF {
}
}
}
-impl Sample<f64> for FisherF {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for FisherF {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- self.numer.ind_sample(rng) / self.denom.ind_sample(rng) * self.dof_ratio
+impl Distribution<f64> for FisherF {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ self.numer.sample(rng) / self.denom.sample(rng) * self.dof_ratio
}
}
@@ -292,11 +273,11 @@ impl IndependentSample<f64> for FisherF {
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{StudentT, IndependentSample};
+/// ```
+/// use rand::distributions::{StudentT, Distribution};
///
/// let t = StudentT::new(11.0);
-/// let v = t.ind_sample(&mut rand::thread_rng());
+/// let v = t.sample(&mut rand::thread_rng());
/// println!("{} is from a t(11) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -316,46 +297,79 @@ impl StudentT {
}
}
}
-impl Sample<f64> for StudentT {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
+impl Distribution<f64> for StudentT {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let norm = rng.sample(StandardNormal);
+ norm * (self.dof / self.chi.sample(rng)).sqrt()
+ }
+}
+
+/// The Beta distribution with shape parameters `alpha` and `beta`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Distribution, Beta};
+///
+/// let beta = Beta::new(2.0, 5.0);
+/// let v = beta.sample(&mut rand::thread_rng());
+/// println!("{} is from a Beta(2, 5) distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Beta {
+ gamma_a: Gamma,
+ gamma_b: Gamma,
+}
+
+impl Beta {
+ /// Construct an object representing the `Beta(alpha, beta)`
+ /// distribution.
+ ///
+ /// Panics if `shape <= 0` or `scale <= 0`.
+ pub fn new(alpha: f64, beta: f64) -> Beta {
+ assert!((alpha > 0.) & (beta > 0.));
+ Beta {
+ gamma_a: Gamma::new(alpha, 1.),
+ gamma_b: Gamma::new(beta, 1.),
+ }
+ }
}
-impl IndependentSample<f64> for StudentT {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let StandardNormal(norm) = rng.gen::<StandardNormal>();
- norm * (self.dof / self.chi.ind_sample(rng)).sqrt()
+
+impl Distribution<f64> for Beta {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let x = self.gamma_a.sample(rng);
+ let y = self.gamma_b.sample(rng);
+ x / (x + y)
}
}
#[cfg(test)]
mod test {
- use distributions::{Sample, IndependentSample};
- use super::{ChiSquared, StudentT, FisherF};
+ use distributions::Distribution;
+ use super::{Beta, ChiSquared, StudentT, FisherF};
#[test]
fn test_chi_squared_one() {
- let mut chi = ChiSquared::new(1.0);
- let mut rng = ::test::rng();
+ let chi = ChiSquared::new(1.0);
+ let mut rng = ::test::rng(201);
for _ in 0..1000 {
chi.sample(&mut rng);
- chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
- let mut chi = ChiSquared::new(0.5);
- let mut rng = ::test::rng();
+ let chi = ChiSquared::new(0.5);
+ let mut rng = ::test::rng(202);
for _ in 0..1000 {
chi.sample(&mut rng);
- chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
- let mut chi = ChiSquared::new(30.0);
- let mut rng = ::test::rng();
+ let chi = ChiSquared::new(30.0);
+ let mut rng = ::test::rng(203);
for _ in 0..1000 {
chi.sample(&mut rng);
- chi.ind_sample(&mut rng);
}
}
#[test]
@@ -366,21 +380,34 @@ mod test {
#[test]
fn test_f() {
- let mut f = FisherF::new(2.0, 32.0);
- let mut rng = ::test::rng();
+ let f = FisherF::new(2.0, 32.0);
+ let mut rng = ::test::rng(204);
for _ in 0..1000 {
f.sample(&mut rng);
- f.ind_sample(&mut rng);
}
}
#[test]
fn test_t() {
- let mut t = StudentT::new(11.0);
- let mut rng = ::test::rng();
+ let t = StudentT::new(11.0);
+ let mut rng = ::test::rng(205);
for _ in 0..1000 {
t.sample(&mut rng);
- t.ind_sample(&mut rng);
}
}
+
+ #[test]
+ fn test_beta() {
+ let beta = Beta::new(1.0, 2.0);
+ let mut rng = ::test::rng(201);
+ for _ in 0..1000 {
+ beta.sample(&mut rng);
+ }
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_beta_invalid_dof() {
+ Beta::new(0., 0.);
+ }
}
diff --git a/rand/src/distributions/integer.rs b/rand/src/distributions/integer.rs
new file mode 100644
index 0000000..4e6604d
--- /dev/null
+++ b/rand/src/distributions/integer.rs
@@ -0,0 +1,161 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The implementations of the `Standard` distribution for integer types.
+
+use {Rng};
+use distributions::{Distribution, Standard};
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+#[cfg(all(target_arch = "x86", feature="nightly"))]
+use core::arch::x86::*;
+#[cfg(all(target_arch = "x86_64", feature="nightly"))]
+use core::arch::x86_64::*;
+
+impl Distribution<u8> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
+ rng.next_u32() as u8
+ }
+}
+
+impl Distribution<u16> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 {
+ rng.next_u32() as u16
+ }
+}
+
+impl Distribution<u32> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 {
+ rng.next_u32()
+ }
+}
+
+impl Distribution<u64> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+ rng.next_u64()
+ }
+}
+
+#[cfg(rust_1_26)]
+impl Distribution<u128> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 {
+ // Use LE; we explicitly generate one value before the next.
+ let x = rng.next_u64() as u128;
+ let y = rng.next_u64() as u128;
+ (y << 64) | x
+ }
+}
+
+impl Distribution<usize> for Standard {
+ #[inline]
+ #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+ rng.next_u32() as usize
+ }
+
+ #[inline]
+ #[cfg(target_pointer_width = "64")]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+ rng.next_u64() as usize
+ }
+}
+
+macro_rules! impl_int_from_uint {
+ ($ty:ty, $uty:ty) => {
+ impl Distribution<$ty> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ rng.gen::<$uty>() as $ty
+ }
+ }
+ }
+}
+
+impl_int_from_uint! { i8, u8 }
+impl_int_from_uint! { i16, u16 }
+impl_int_from_uint! { i32, u32 }
+impl_int_from_uint! { i64, u64 }
+#[cfg(rust_1_26)] impl_int_from_uint! { i128, u128 }
+impl_int_from_uint! { isize, usize }
+
+#[cfg(feature="simd_support")]
+macro_rules! simd_impl {
+ ($(($intrinsic:ident, $vec:ty),)+) => {$(
+ impl Distribution<$intrinsic> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $intrinsic {
+ $intrinsic::from_bits(rng.gen::<$vec>())
+ }
+ }
+ )+};
+
+ ($bits:expr,) => {};
+ ($bits:expr, $ty:ty, $($ty_more:ty,)*) => {
+ simd_impl!($bits, $($ty_more,)*);
+
+ impl Distribution<$ty> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+ let mut vec: $ty = Default::default();
+ unsafe {
+ let ptr = &mut vec;
+ let b_ptr = &mut *(ptr as *mut $ty as *mut [u8; $bits/8]);
+ rng.fill_bytes(b_ptr);
+ }
+ vec.to_le()
+ }
+ }
+ };
+}
+
+#[cfg(feature="simd_support")]
+simd_impl!(16, u8x2, i8x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(32, u8x4, i8x4, u16x2, i16x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(64, u8x8, i8x8, u16x4, i16x4, u32x2, i32x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(128, u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2,);
+#[cfg(feature="simd_support")]
+simd_impl!(256, u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4,);
+#[cfg(feature="simd_support")]
+simd_impl!(512, u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8,);
+#[cfg(all(feature="simd_support", feature="nightly", any(target_arch="x86", target_arch="x86_64")))]
+simd_impl!((__m64, u8x8), (__m128i, u8x16), (__m256i, u8x32),);
+
+#[cfg(test)]
+mod tests {
+ use Rng;
+ use distributions::{Standard};
+
+ #[test]
+ fn test_integers() {
+ let mut rng = ::test::rng(806);
+
+ rng.sample::<isize, _>(Standard);
+ rng.sample::<i8, _>(Standard);
+ rng.sample::<i16, _>(Standard);
+ rng.sample::<i32, _>(Standard);
+ rng.sample::<i64, _>(Standard);
+ #[cfg(rust_1_26)]
+ rng.sample::<i128, _>(Standard);
+
+ rng.sample::<usize, _>(Standard);
+ rng.sample::<u8, _>(Standard);
+ rng.sample::<u16, _>(Standard);
+ rng.sample::<u32, _>(Standard);
+ rng.sample::<u64, _>(Standard);
+ #[cfg(rust_1_26)]
+ rng.sample::<u128, _>(Standard);
+ }
+}
diff --git a/rand/src/distributions/mod.rs b/rand/src/distributions/mod.rs
index 5de8efb..160cd31 100644
--- a/rand/src/distributions/mod.rs
+++ b/rand/src/distributions/mod.rs
@@ -1,94 +1,394 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Sampling from random distributions.
+//! Generating random samples from probability distributions.
//!
-//! This is a generalization of `Rand` to allow parameters to control the
-//! exact properties of the generated values, e.g. the mean and standard
-//! deviation of a normal distribution. The `Sample` trait is the most
-//! general, and allows for generating values that change some state
-//! internally. The `IndependentSample` trait is for generating values
-//! that do not need to record state.
-
-use core::marker;
-
-use {Rng, Rand};
-
-pub use self::range::Range;
-#[cfg(feature="std")]
-pub use self::gamma::{Gamma, ChiSquared, FisherF, StudentT};
-#[cfg(feature="std")]
-pub use self::normal::{Normal, LogNormal};
-#[cfg(feature="std")]
-pub use self::exponential::Exp;
-
-pub mod range;
-#[cfg(feature="std")]
-pub mod gamma;
-#[cfg(feature="std")]
-pub mod normal;
-#[cfg(feature="std")]
-pub mod exponential;
-
-#[cfg(feature="std")]
-mod ziggurat_tables;
-
-/// Types that can be used to create a random instance of `Support`.
-pub trait Sample<Support> {
- /// Generate a random value of `Support`, using `rng` as the
- /// source of randomness.
- fn sample<R: Rng>(&mut self, rng: &mut R) -> Support;
-}
-
-/// `Sample`s that do not require keeping track of state.
+//! This module is the home of the [`Distribution`] trait and several of its
+//! implementations. It is the workhorse behind some of the convenient
+//! functionality of the [`Rng`] trait, including [`gen`], [`gen_range`] and
+//! of course [`sample`].
+//!
+//! Abstractly, a [probability distribution] describes the probability of
+//! occurance of each value in its sample space.
+//!
+//! More concretely, an implementation of `Distribution<T>` for type `X` is an
+//! algorithm for choosing values from the sample space (a subset of `T`)
+//! according to the distribution `X` represents, using an external source of
+//! randomness (an RNG supplied to the `sample` function).
+//!
+//! A type `X` may implement `Distribution<T>` for multiple types `T`.
+//! Any type implementing [`Distribution`] is stateless (i.e. immutable),
+//! but it may have internal parameters set at construction time (for example,
+//! [`Uniform`] allows specification of its sample space as a range within `T`).
+//!
+//!
+//! # The `Standard` distribution
+//!
+//! The [`Standard`] distribution is important to mention. This is the
+//! distribution used by [`Rng::gen()`] and represents the "default" way to
+//! produce a random value for many different types, including most primitive
+//! types, tuples, arrays, and a few derived types. See the documentation of
+//! [`Standard`] for more details.
+//!
+//! Implementing `Distribution<T>` for [`Standard`] for user types `T` makes it
+//! possible to generate type `T` with [`Rng::gen()`], and by extension also
+//! with the [`random()`] function.
+//!
+//!
+//! # Distribution to sample from a `Uniform` range
+//!
+//! The [`Uniform`] distribution is more flexible than [`Standard`], but also
+//! more specialised: it supports fewer target types, but allows the sample
+//! space to be specified as an arbitrary range within its target type `T`.
+//! Both [`Standard`] and [`Uniform`] are in some sense uniform distributions.
+//!
+//! Values may be sampled from this distribution using [`Rng::gen_range`] or
+//! by creating a distribution object with [`Uniform::new`],
+//! [`Uniform::new_inclusive`] or `From<Range>`. When the range limits are not
+//! known at compile time it is typically faster to reuse an existing
+//! distribution object than to call [`Rng::gen_range`].
+//!
+//! User types `T` may also implement `Distribution<T>` for [`Uniform`],
+//! although this is less straightforward than for [`Standard`] (see the
+//! documentation in the [`uniform` module]. Doing so enables generation of
+//! values of type `T` with [`Rng::gen_range`].
+//!
+//!
+//! # Other distributions
+//!
+//! There are surprisingly many ways to uniformly generate random floats. A
+//! range between 0 and 1 is standard, but the exact bounds (open vs closed)
+//! and accuracy differ. In addition to the [`Standard`] distribution Rand offers
+//! [`Open01`] and [`OpenClosed01`]. See [Floating point implementation] for
+//! more details.
+//!
+//! [`Alphanumeric`] is a simple distribution to sample random letters and
+//! numbers of the `char` type; in contrast [`Standard`] may sample any valid
+//! `char`.
+//!
+//! [`WeightedIndex`] can be used to do weighted sampling from a set of items,
+//! such as from an array.
+//!
+//! # Non-uniform probability distributions
+//!
+//! Rand currently provides the following probability distributions:
+//!
+//! - Related to real-valued quantities that grow linearly
+//! (e.g. errors, offsets):
+//! - [`Normal`] distribution, and [`StandardNormal`] as a primitive
+//! - [`Cauchy`] distribution
+//! - Related to Bernoulli trials (yes/no events, with a given probability):
+//! - [`Binomial`] distribution
+//! - [`Bernoulli`] distribution, similar to [`Rng::gen_bool`].
+//! - Related to positive real-valued quantities that grow exponentially
+//! (e.g. prices, incomes, populations):
+//! - [`LogNormal`] distribution
+//! - Related to the occurrence of independent events at a given rate:
+//! - [`Pareto`] distribution
+//! - [`Poisson`] distribution
+//! - [`Exp`]onential distribution, and [`Exp1`] as a primitive
+//! - [`Weibull`] distribution
+//! - Gamma and derived distributions:
+//! - [`Gamma`] distribution
+//! - [`ChiSquared`] distribution
+//! - [`StudentT`] distribution
+//! - [`FisherF`] distribution
+//! - Triangular distribution:
+//! - [`Beta`] distribution
+//! - [`Triangular`] distribution
+//! - Multivariate probability distributions
+//! - [`Dirichlet`] distribution
+//! - [`UnitSphereSurface`] distribution
+//! - [`UnitCircle`] distribution
+//!
+//! # Examples
+//!
+//! Sampling from a distribution:
+//!
+//! ```
+//! use rand::{thread_rng, Rng};
+//! use rand::distributions::Exp;
+//!
+//! let exp = Exp::new(2.0);
+//! let v = thread_rng().sample(exp);
+//! println!("{} is from an Exp(2) distribution", v);
+//! ```
+//!
+//! Implementing the [`Standard`] distribution for a user type:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use rand::Rng;
+//! use rand::distributions::{Distribution, Standard};
+//!
+//! struct MyF32 {
+//! x: f32,
+//! }
+//!
+//! impl Distribution<MyF32> for Standard {
+//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 {
+//! MyF32 { x: rng.gen() }
+//! }
+//! }
+//! ```
+//!
+//!
+//! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution
+//! [`Distribution`]: trait.Distribution.html
+//! [`gen_range`]: ../trait.Rng.html#method.gen_range
+//! [`gen`]: ../trait.Rng.html#method.gen
+//! [`sample`]: ../trait.Rng.html#method.sample
+//! [`new_inclusive`]: struct.Uniform.html#method.new_inclusive
+//! [`random()`]: ../fn.random.html
+//! [`Rng::gen_bool`]: ../trait.Rng.html#method.gen_bool
+//! [`Rng::gen_range`]: ../trait.Rng.html#method.gen_range
+//! [`Rng::gen()`]: ../trait.Rng.html#method.gen
+//! [`Rng`]: ../trait.Rng.html
+//! [`uniform` module]: uniform/index.html
+//! [Floating point implementation]: struct.Standard.html#floating-point-implementation
+// distributions
+//! [`Alphanumeric`]: struct.Alphanumeric.html
+//! [`Bernoulli`]: struct.Bernoulli.html
+//! [`Beta`]: struct.Beta.html
+//! [`Binomial`]: struct.Binomial.html
+//! [`Cauchy`]: struct.Cauchy.html
+//! [`ChiSquared`]: struct.ChiSquared.html
+//! [`Dirichlet`]: struct.Dirichlet.html
+//! [`Exp`]: struct.Exp.html
+//! [`Exp1`]: struct.Exp1.html
+//! [`FisherF`]: struct.FisherF.html
+//! [`Gamma`]: struct.Gamma.html
+//! [`LogNormal`]: struct.LogNormal.html
+//! [`Normal`]: struct.Normal.html
+//! [`Open01`]: struct.Open01.html
+//! [`OpenClosed01`]: struct.OpenClosed01.html
+//! [`Pareto`]: struct.Pareto.html
+//! [`Poisson`]: struct.Poisson.html
+//! [`Standard`]: struct.Standard.html
+//! [`StandardNormal`]: struct.StandardNormal.html
+//! [`StudentT`]: struct.StudentT.html
+//! [`Triangular`]: struct.Triangular.html
+//! [`Uniform`]: struct.Uniform.html
+//! [`Uniform::new`]: struct.Uniform.html#method.new
+//! [`Uniform::new_inclusive`]: struct.Uniform.html#method.new_inclusive
+//! [`UnitSphereSurface`]: struct.UnitSphereSurface.html
+//! [`UnitCircle`]: struct.UnitCircle.html
+//! [`Weibull`]: struct.Weibull.html
+//! [`WeightedIndex`]: struct.WeightedIndex.html
+
+#[cfg(any(rust_1_26, features="nightly"))]
+use core::iter;
+use Rng;
+
+pub use self::other::Alphanumeric;
+#[doc(inline)] pub use self::uniform::Uniform;
+pub use self::float::{OpenClosed01, Open01};
+pub use self::bernoulli::Bernoulli;
+#[cfg(feature="alloc")] pub use self::weighted::{WeightedIndex, WeightedError};
+#[cfg(feature="std")] pub use self::unit_sphere::UnitSphereSurface;
+#[cfg(feature="std")] pub use self::unit_circle::UnitCircle;
+#[cfg(feature="std")] pub use self::gamma::{Gamma, ChiSquared, FisherF,
+ StudentT, Beta};
+#[cfg(feature="std")] pub use self::normal::{Normal, LogNormal, StandardNormal};
+#[cfg(feature="std")] pub use self::exponential::{Exp, Exp1};
+#[cfg(feature="std")] pub use self::pareto::Pareto;
+#[cfg(feature="std")] pub use self::poisson::Poisson;
+#[cfg(feature="std")] pub use self::binomial::Binomial;
+#[cfg(feature="std")] pub use self::cauchy::Cauchy;
+#[cfg(feature="std")] pub use self::dirichlet::Dirichlet;
+#[cfg(feature="std")] pub use self::triangular::Triangular;
+#[cfg(feature="std")] pub use self::weibull::Weibull;
+
+pub mod uniform;
+mod bernoulli;
+#[cfg(feature="alloc")] mod weighted;
+#[cfg(feature="std")] mod unit_sphere;
+#[cfg(feature="std")] mod unit_circle;
+#[cfg(feature="std")] mod gamma;
+#[cfg(feature="std")] mod normal;
+#[cfg(feature="std")] mod exponential;
+#[cfg(feature="std")] mod pareto;
+#[cfg(feature="std")] mod poisson;
+#[cfg(feature="std")] mod binomial;
+#[cfg(feature="std")] mod cauchy;
+#[cfg(feature="std")] mod dirichlet;
+#[cfg(feature="std")] mod triangular;
+#[cfg(feature="std")] mod weibull;
+
+mod float;
+mod integer;
+mod other;
+mod utils;
+#[cfg(feature="std")] mod ziggurat_tables;
+
+/// Types (distributions) that can be used to create a random instance of `T`.
+///
+/// It is possible to sample from a distribution through both the
+/// `Distribution` and [`Rng`] traits, via `distr.sample(&mut rng)` and
+/// `rng.sample(distr)`. They also both offer the [`sample_iter`] method, which
+/// produces an iterator that samples from the distribution.
+///
+/// All implementations are expected to be immutable; this has the significant
+/// advantage of not needing to consider thread safety, and for most
+/// distributions efficient state-less sampling algorithms are available.
///
-/// Since no state is recorded, each sample is (statistically)
-/// independent of all others, assuming the `Rng` used has this
-/// property.
-// FIXME maybe having this separate is overkill (the only reason is to
-// take &self rather than &mut self)? or maybe this should be the
-// trait called `Sample` and the other should be `DependentSample`.
-pub trait IndependentSample<Support>: Sample<Support> {
- /// Generate a random value.
- fn ind_sample<R: Rng>(&self, &mut R) -> Support;
+/// [`Rng`]: ../trait.Rng.html
+/// [`sample_iter`]: trait.Distribution.html#method.sample_iter
+pub trait Distribution<T> {
+ /// Generate a random value of `T`, using `rng` as the source of randomness.
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T;
+
+ /// Create an iterator that generates random values of `T`, using `rng` as
+ /// the source of randomness.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use rand::thread_rng;
+ /// use rand::distributions::{Distribution, Alphanumeric, Uniform, Standard};
+ ///
+ /// let mut rng = thread_rng();
+ ///
+ /// // Vec of 16 x f32:
+ /// let v: Vec<f32> = Standard.sample_iter(&mut rng).take(16).collect();
+ ///
+ /// // String:
+ /// let s: String = Alphanumeric.sample_iter(&mut rng).take(7).collect();
+ ///
+ /// // Dice-rolling:
+ /// let die_range = Uniform::new_inclusive(1, 6);
+ /// let mut roll_die = die_range.sample_iter(&mut rng);
+ /// while roll_die.next().unwrap() != 6 {
+ /// println!("Not a 6; rolling again!");
+ /// }
+ /// ```
+ fn sample_iter<'a, R>(&'a self, rng: &'a mut R) -> DistIter<'a, Self, R, T>
+ where Self: Sized, R: Rng
+ {
+ DistIter {
+ distr: self,
+ rng: rng,
+ phantom: ::core::marker::PhantomData,
+ }
+ }
}
-/// A wrapper for generating types that implement `Rand` via the
-/// `Sample` & `IndependentSample` traits.
-#[derive(Debug)]
-pub struct RandSample<Sup> {
- _marker: marker::PhantomData<fn() -> Sup>,
+impl<'a, T, D: Distribution<T>> Distribution<T> for &'a D {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
+ (*self).sample(rng)
+ }
}
-impl<Sup> Copy for RandSample<Sup> {}
-impl<Sup> Clone for RandSample<Sup> {
- fn clone(&self) -> Self { *self }
-}
-impl<Sup: Rand> Sample<Sup> for RandSample<Sup> {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> Sup { self.ind_sample(rng) }
+/// An iterator that generates random values of `T` with distribution `D`,
+/// using `R` as the source of randomness.
+///
+/// This `struct` is created by the [`sample_iter`] method on [`Distribution`].
+/// See its documentation for more.
+///
+/// [`Distribution`]: trait.Distribution.html
+/// [`sample_iter`]: trait.Distribution.html#method.sample_iter
+#[derive(Debug)]
+pub struct DistIter<'a, D: 'a, R: 'a, T> {
+ distr: &'a D,
+ rng: &'a mut R,
+ phantom: ::core::marker::PhantomData<T>,
}
-impl<Sup: Rand> IndependentSample<Sup> for RandSample<Sup> {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> Sup {
- rng.gen()
+impl<'a, D, R, T> Iterator for DistIter<'a, D, R, T>
+ where D: Distribution<T>, R: Rng + 'a
+{
+ type Item = T;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<T> {
+ Some(self.distr.sample(self.rng))
}
-}
-impl<Sup> RandSample<Sup> {
- pub fn new() -> RandSample<Sup> {
- RandSample { _marker: marker::PhantomData }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::max_value(), None)
}
}
+#[cfg(rust_1_26)]
+impl<'a, D, R, T> iter::FusedIterator for DistIter<'a, D, R, T>
+ where D: Distribution<T>, R: Rng + 'a {}
+
+#[cfg(features = "nightly")]
+impl<'a, D, R, T> iter::TrustedLen for DistIter<'a, D, R, T>
+ where D: Distribution<T>, R: Rng + 'a {}
+
+
+/// A generic random value distribution, implemented for many primitive types.
+/// Usually generates values with a numerically uniform distribution, and with a
+/// range appropriate to the type.
+///
+/// ## Built-in Implementations
+///
+/// Assuming the provided `Rng` is well-behaved, these implementations
+/// generate values with the following ranges and distributions:
+///
+/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed
+/// over all values of the type.
+/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all
+/// code points in the range `0...0x10_FFFF`, except for the range
+/// `0xD800...0xDFFF` (the surrogate code points). This includes
+/// unassigned/reserved code points.
+/// * `bool`: Generates `false` or `true`, each with probability 0.5.
+/// * Floating point types (`f32` and `f64`): Uniformly distributed in the
+/// half-open range `[0, 1)`. See notes below.
+/// * Wrapping integers (`Wrapping<T>`), besides the type identical to their
+/// normal integer variants.
+///
+/// The following aggregate types also implement the distribution `Standard` as
+/// long as their component types implement it:
+///
+/// * Tuples and arrays: Each element of the tuple or array is generated
+/// independently, using the `Standard` distribution recursively.
+/// * `Option<T>` where `Standard` is implemented for `T`: Returns `None` with
+/// probability 0.5; otherwise generates a random `x: T` and returns `Some(x)`.
+///
+/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Standard;
+///
+/// let val: f32 = SmallRng::from_entropy().sample(Standard);
+/// println!("f32 from [0, 1): {}", val);
+/// ```
+///
+/// # Floating point implementation
+/// The floating point implementations for `Standard` generate a random value in
+/// the half-open interval `[0, 1)`, i.e. including 0 but not 1.
+///
+/// All values that can be generated are of the form `n * ε/2`. For `f32`
+/// the 23 most significant random bits of a `u32` are used and for `f64` the
+/// 53 most significant bits of a `u64` are used. The conversion uses the
+/// multiplicative method: `(rng.gen::<$uty>() >> N) as $ty * (ε/2)`.
+///
+/// See also: [`Open01`] which samples from `(0, 1)`, [`OpenClosed01`] which
+/// samples from `(0, 1]` and `Rng::gen_range(0, 1)` which also samples from
+/// `[0, 1)`. Note that `Open01` and `gen_range` (which uses [`Uniform`]) use
+/// transmute-based methods which yield 1 bit less precision but may perform
+/// faster on some architectures (on modern Intel CPUs all methods have
+/// approximately equal performance).
+///
+/// [`Open01`]: struct.Open01.html
+/// [`OpenClosed01`]: struct.OpenClosed01.html
+/// [`Uniform`]: uniform/struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct Standard;
+
+
/// A value with a particular weight for use with `WeightedChoice`.
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
#[derive(Copy, Clone, Debug)]
pub struct Weighted<T> {
/// The numerical weight of this item
@@ -99,35 +399,19 @@ pub struct Weighted<T> {
/// A distribution that selects from a finite collection of weighted items.
///
-/// Each item has an associated weight that influences how likely it
-/// is to be chosen: higher weight is more likely.
-///
-/// The `Clone` restriction is a limitation of the `Sample` and
-/// `IndependentSample` traits. Note that `&T` is (cheaply) `Clone` for
-/// all `T`, as is `u32`, so one can store references or indices into
-/// another vector.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::distributions::{Weighted, WeightedChoice, IndependentSample};
+/// Deprecated: use [`WeightedIndex`] instead.
///
-/// let mut items = vec!(Weighted { weight: 2, item: 'a' },
-/// Weighted { weight: 4, item: 'b' },
-/// Weighted { weight: 1, item: 'c' });
-/// let wc = WeightedChoice::new(&mut items);
-/// let mut rng = rand::thread_rng();
-/// for _ in 0..16 {
-/// // on average prints 'a' 4 times, 'b' 8 and 'c' twice.
-/// println!("{}", wc.ind_sample(&mut rng));
-/// }
-/// ```
+/// [`WeightedIndex`]: struct.WeightedIndex.html
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
#[derive(Debug)]
pub struct WeightedChoice<'a, T:'a> {
items: &'a mut [Weighted<T>],
- weight_range: Range<u32>
+ weight_range: Uniform<u32>,
}
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
impl<'a, T: Clone> WeightedChoice<'a, T> {
/// Create a new `WeightedChoice`.
///
@@ -157,26 +441,24 @@ impl<'a, T: Clone> WeightedChoice<'a, T> {
assert!(running_total != 0, "WeightedChoice::new called with a total weight of 0");
WeightedChoice {
- items: items,
+ items,
// we're likely to be generating numbers in this range
// relatively often, so might as well cache it
- weight_range: Range::new(0, running_total)
+ weight_range: Uniform::new(0, running_total)
}
}
}
-impl<'a, T: Clone> Sample<T> for WeightedChoice<'a, T> {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> T { self.ind_sample(rng) }
-}
-
-impl<'a, T: Clone> IndependentSample<T> for WeightedChoice<'a, T> {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> T {
+#[deprecated(since="0.6.0", note="use WeightedIndex instead")]
+#[allow(deprecated)]
+impl<'a, T: Clone> Distribution<T> for WeightedChoice<'a, T> {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
// we want to find the first element that has cumulative
// weight > sample_weight, which we do by binary since the
// cumulative weights of self.items are sorted.
// choose a weight in [0, total_weight)
- let sample_weight = self.weight_range.ind_sample(rng);
+ let sample_weight = self.weight_range.sample(rng);
// short circuit when it's the first item
if sample_weight < self.items[0].weight {
@@ -208,163 +490,78 @@ impl<'a, T: Clone> IndependentSample<T> for WeightedChoice<'a, T> {
}
modifier /= 2;
}
- return self.items[idx + 1].item.clone();
- }
-}
-
-/// Sample a random number using the Ziggurat method (specifically the
-/// ZIGNOR variant from Doornik 2005). Most of the arguments are
-/// directly from the paper:
-///
-/// * `rng`: source of randomness
-/// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0.
-/// * `X`: the $x_i$ abscissae.
-/// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$)
-/// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$
-/// * `pdf`: the probability density function
-/// * `zero_case`: manual sampling from the tail when we chose the
-/// bottom box (i.e. i == 0)
-
-// the perf improvement (25-50%) is definitely worth the extra code
-// size from force-inlining.
-#[cfg(feature="std")]
-#[inline(always)]
-fn ziggurat<R: Rng, P, Z>(
- rng: &mut R,
- symmetric: bool,
- x_tab: ziggurat_tables::ZigTable,
- f_tab: ziggurat_tables::ZigTable,
- mut pdf: P,
- mut zero_case: Z)
- -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64 {
- const SCALE: f64 = (1u64 << 53) as f64;
- loop {
- // reimplement the f64 generation as an optimisation suggested
- // by the Doornik paper: we have a lot of precision-space
- // (i.e. there are 11 bits of the 64 of a u64 to use after
- // creating a f64), so we might as well reuse some to save
- // generating a whole extra random number. (Seems to be 15%
- // faster.)
- //
- // This unfortunately misses out on the benefits of direct
- // floating point generation if an RNG like dSMFT is
- // used. (That is, such RNGs create floats directly, highly
- // efficiently and overload next_f32/f64, so by not calling it
- // this may be slower than it would be otherwise.)
- // FIXME: investigate/optimise for the above.
- let bits: u64 = rng.gen();
- let i = (bits & 0xff) as usize;
- let f = (bits >> 11) as f64 / SCALE;
-
- // u is either U(-1, 1) or U(0, 1) depending on if this is a
- // symmetric distribution or not.
- let u = if symmetric {2.0 * f - 1.0} else {f};
- let x = u * x_tab[i];
-
- let test_x = if symmetric { x.abs() } else {x};
-
- // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i])
- if test_x < x_tab[i + 1] {
- return x;
- }
- if i == 0 {
- return zero_case(rng, u);
- }
- // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1
- if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) {
- return x;
- }
+ self.items[idx + 1].item.clone()
}
}
#[cfg(test)]
mod tests {
+ use rngs::mock::StepRng;
+ #[allow(deprecated)]
+ use super::{WeightedChoice, Weighted, Distribution};
- use {Rng, Rand};
- use super::{RandSample, WeightedChoice, Weighted, Sample, IndependentSample};
-
- #[derive(PartialEq, Debug)]
- struct ConstRand(usize);
- impl Rand for ConstRand {
- fn rand<R: Rng>(_: &mut R) -> ConstRand {
- ConstRand(0)
- }
- }
-
- // 0, 1, 2, 3, ...
- struct CountingRng { i: u32 }
- impl Rng for CountingRng {
- fn next_u32(&mut self) -> u32 {
- self.i += 1;
- self.i - 1
- }
- fn next_u64(&mut self) -> u64 {
- self.next_u32() as u64
- }
- }
-
- #[test]
- fn test_rand_sample() {
- let mut rand_sample = RandSample::<ConstRand>::new();
-
- assert_eq!(rand_sample.sample(&mut ::test::rng()), ConstRand(0));
- assert_eq!(rand_sample.ind_sample(&mut ::test::rng()), ConstRand(0));
- }
#[test]
+ #[allow(deprecated)]
fn test_weighted_choice() {
// this makes assumptions about the internal implementation of
- // WeightedChoice, specifically: it doesn't reorder the items,
- // it doesn't do weird things to the RNG (so 0 maps to 0, 1 to
- // 1, internally; modulo a modulo operation).
+ // WeightedChoice. It may fail when the implementation in
+ // `distributions::uniform::UniformInt` changes.
macro_rules! t {
($items:expr, $expected:expr) => {{
let mut items = $items;
+ let mut total_weight = 0;
+ for item in &items { total_weight += item.weight; }
+
let wc = WeightedChoice::new(&mut items);
let expected = $expected;
- let mut rng = CountingRng { i: 0 };
+ // Use extremely large steps between the random numbers, because
+ // we test with small ranges and `UniformInt` is designed to prefer
+ // the most significant bits.
+ let mut rng = StepRng::new(0, !0 / (total_weight as u64));
for &val in expected.iter() {
- assert_eq!(wc.ind_sample(&mut rng), val)
+ assert_eq!(wc.sample(&mut rng), val)
}
}}
}
- t!(vec!(Weighted { weight: 1, item: 10}), [10]);
+ t!([Weighted { weight: 1, item: 10}], [10]);
// skip some
- t!(vec!(Weighted { weight: 0, item: 20},
- Weighted { weight: 2, item: 21},
- Weighted { weight: 0, item: 22},
- Weighted { weight: 1, item: 23}),
- [21,21, 23]);
+ t!([Weighted { weight: 0, item: 20},
+ Weighted { weight: 2, item: 21},
+ Weighted { weight: 0, item: 22},
+ Weighted { weight: 1, item: 23}],
+ [21, 21, 23]);
// different weights
- t!(vec!(Weighted { weight: 4, item: 30},
- Weighted { weight: 3, item: 31}),
- [30,30,30,30, 31,31,31]);
+ t!([Weighted { weight: 4, item: 30},
+ Weighted { weight: 3, item: 31}],
+ [30, 31, 30, 31, 30, 31, 30]);
// check that we're binary searching
// correctly with some vectors of odd
// length.
- t!(vec!(Weighted { weight: 1, item: 40},
- Weighted { weight: 1, item: 41},
- Weighted { weight: 1, item: 42},
- Weighted { weight: 1, item: 43},
- Weighted { weight: 1, item: 44}),
+ t!([Weighted { weight: 1, item: 40},
+ Weighted { weight: 1, item: 41},
+ Weighted { weight: 1, item: 42},
+ Weighted { weight: 1, item: 43},
+ Weighted { weight: 1, item: 44}],
[40, 41, 42, 43, 44]);
- t!(vec!(Weighted { weight: 1, item: 50},
- Weighted { weight: 1, item: 51},
- Weighted { weight: 1, item: 52},
- Weighted { weight: 1, item: 53},
- Weighted { weight: 1, item: 54},
- Weighted { weight: 1, item: 55},
- Weighted { weight: 1, item: 56}),
- [50, 51, 52, 53, 54, 55, 56]);
+ t!([Weighted { weight: 1, item: 50},
+ Weighted { weight: 1, item: 51},
+ Weighted { weight: 1, item: 52},
+ Weighted { weight: 1, item: 53},
+ Weighted { weight: 1, item: 54},
+ Weighted { weight: 1, item: 55},
+ Weighted { weight: 1, item: 56}],
+ [50, 54, 51, 55, 52, 56, 53]);
}
#[test]
+ #[allow(deprecated)]
fn test_weighted_clone_initialization() {
let initial : Weighted<u32> = Weighted {weight: 1, item: 1};
let clone = initial.clone();
@@ -373,6 +570,7 @@ mod tests {
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_clone_change_weight() {
let initial : Weighted<u32> = Weighted {weight: 1, item: 1};
let mut clone = initial.clone();
@@ -381,6 +579,7 @@ mod tests {
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_clone_change_item() {
let initial : Weighted<u32> = Weighted {weight: 1, item: 1};
let mut clone = initial.clone();
@@ -390,20 +589,33 @@ mod tests {
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_choice_no_items() {
WeightedChoice::<isize>::new(&mut []);
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_choice_zero_weight() {
WeightedChoice::new(&mut [Weighted { weight: 0, item: 0},
Weighted { weight: 0, item: 1}]);
}
#[test] #[should_panic]
+ #[allow(deprecated)]
fn test_weighted_choice_weight_overflows() {
- let x = ::std::u32::MAX / 2; // x + x + 2 is the overflow
+ let x = ::core::u32::MAX / 2; // x + x + 2 is the overflow
WeightedChoice::new(&mut [Weighted { weight: x, item: 0 },
Weighted { weight: 1, item: 1 },
Weighted { weight: x, item: 2 },
Weighted { weight: 1, item: 3 }]);
}
+
+ #[cfg(feature="std")]
+ #[test]
+ fn test_distributions_iter() {
+ use distributions::Normal;
+ let mut rng = ::test::rng(210);
+ let distr = Normal::new(10.0, 10.0);
+ let results: Vec<_> = distr.sample_iter(&mut rng).take(100).collect();
+ println!("{:?}", results);
+ }
}
diff --git a/rand/src/distributions/normal.rs b/rand/src/distributions/normal.rs
index 280613d..b8d632e 100644
--- a/rand/src/distributions/normal.rs
+++ b/rand/src/distributions/normal.rs
@@ -1,49 +1,50 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The normal and derived distributions.
-use {Rng, Rand, Open01};
-use distributions::{ziggurat, ziggurat_tables, Sample, IndependentSample};
+use Rng;
+use distributions::{ziggurat_tables, Distribution, Open01};
+use distributions::utils::ziggurat;
-/// A wrapper around an `f64` to generate N(0, 1) random numbers
-/// (a.k.a. a standard normal, or Gaussian).
+/// Samples floating-point numbers according to the normal distribution
+/// `N(0, 1)` (a.k.a. a standard normal, or Gaussian). This is equivalent to
+/// `Normal::new(0.0, 1.0)` but faster.
///
/// See `Normal` for the general normal distribution.
///
-/// Implemented via the ZIGNOR variant[1] of the Ziggurat method.
+/// Implemented via the ZIGNOR variant[^1] of the Ziggurat method.
///
-/// [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
-/// Generate Normal Random
-/// Samples*](http://www.doornik.com/research/ziggurat.pdf). Nuffield
-/// College, Oxford
+/// [^1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
+/// Generate Normal Random Samples*](
+/// https://www.doornik.com/research/ziggurat.pdf).
+/// Nuffield College, Oxford
///
/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::StandardNormal;
///
-/// ```rust
-/// use rand::distributions::normal::StandardNormal;
-///
-/// let StandardNormal(x) = rand::random();
-/// println!("{}", x);
+/// let val: f64 = SmallRng::from_entropy().sample(StandardNormal);
+/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
-pub struct StandardNormal(pub f64);
+pub struct StandardNormal;
-impl Rand for StandardNormal {
- fn rand<R:Rng>(rng: &mut R) -> StandardNormal {
+impl Distribution<f64> for StandardNormal {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
#[inline]
fn pdf(x: f64) -> f64 {
(-x*x/2.0).exp()
}
#[inline]
- fn zero_case<R:Rng>(rng: &mut R, u: f64) -> f64 {
+ fn zero_case<R: Rng + ?Sized>(rng: &mut R, u: f64) -> f64 {
// compute a random number in the tail by hand
// strange initial conditions, because the loop is not
@@ -54,8 +55,8 @@ impl Rand for StandardNormal {
let mut y = 0.0f64;
while -2.0 * y < x * x {
- let Open01(x_) = rng.gen::<Open01<f64>>();
- let Open01(y_) = rng.gen::<Open01<f64>>();
+ let x_: f64 = rng.sample(Open01);
+ let y_: f64 = rng.sample(Open01);
x = x_.ln() / ziggurat_tables::ZIG_NORM_R;
y = y_.ln();
@@ -64,30 +65,33 @@ impl Rand for StandardNormal {
if u < 0.0 { x - ziggurat_tables::ZIG_NORM_R } else { ziggurat_tables::ZIG_NORM_R - x }
}
- StandardNormal(ziggurat(
- rng,
- true, // this is symmetric
- &ziggurat_tables::ZIG_NORM_X,
- &ziggurat_tables::ZIG_NORM_F,
- pdf, zero_case))
+ ziggurat(rng, true, // this is symmetric
+ &ziggurat_tables::ZIG_NORM_X,
+ &ziggurat_tables::ZIG_NORM_F,
+ pdf, zero_case)
}
}
/// The normal distribution `N(mean, std_dev**2)`.
///
-/// This uses the ZIGNOR variant of the Ziggurat method, see
-/// `StandardNormal` for more details.
+/// This uses the ZIGNOR variant of the Ziggurat method, see [`StandardNormal`]
+/// for more details.
+///
+/// Note that [`StandardNormal`] is an optimised implementation for mean 0, and
+/// standard deviation 1.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{Normal, IndependentSample};
+/// ```
+/// use rand::distributions::{Normal, Distribution};
///
/// // mean 2, standard deviation 3
/// let normal = Normal::new(2.0, 3.0);
-/// let v = normal.ind_sample(&mut rand::thread_rng());
+/// let v = normal.sample(&mut rand::thread_rng());
/// println!("{} is from a N(2, 9) distribution", v)
/// ```
+///
+/// [`StandardNormal`]: struct.StandardNormal.html
#[derive(Clone, Copy, Debug)]
pub struct Normal {
mean: f64,
@@ -105,17 +109,14 @@ impl Normal {
pub fn new(mean: f64, std_dev: f64) -> Normal {
assert!(std_dev >= 0.0, "Normal::new called with `std_dev` < 0");
Normal {
- mean: mean,
- std_dev: std_dev
+ mean,
+ std_dev
}
}
}
-impl Sample<f64> for Normal {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for Normal {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- let StandardNormal(n) = rng.gen::<StandardNormal>();
+impl Distribution<f64> for Normal {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let n = rng.sample(StandardNormal);
self.mean + self.std_dev * n
}
}
@@ -123,17 +124,17 @@ impl IndependentSample<f64> for Normal {
/// The log-normal distribution `ln N(mean, std_dev**2)`.
///
-/// If `X` is log-normal distributed, then `ln(X)` is `N(mean,
-/// std_dev**2)` distributed.
+/// If `X` is log-normal distributed, then `ln(X)` is `N(mean, std_dev**2)`
+/// distributed.
///
/// # Example
///
-/// ```rust
-/// use rand::distributions::{LogNormal, IndependentSample};
+/// ```
+/// use rand::distributions::{LogNormal, Distribution};
///
/// // mean 2, standard deviation 3
/// let log_normal = LogNormal::new(2.0, 3.0);
-/// let v = log_normal.ind_sample(&mut rand::thread_rng());
+/// let v = log_normal.sample(&mut rand::thread_rng());
/// println!("{} is from an ln N(2, 9) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
@@ -154,27 +155,23 @@ impl LogNormal {
LogNormal { norm: Normal::new(mean, std_dev) }
}
}
-impl Sample<f64> for LogNormal {
- fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
-}
-impl IndependentSample<f64> for LogNormal {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
- self.norm.ind_sample(rng).exp()
+impl Distribution<f64> for LogNormal {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ self.norm.sample(rng).exp()
}
}
#[cfg(test)]
mod tests {
- use distributions::{Sample, IndependentSample};
+ use distributions::Distribution;
use super::{Normal, LogNormal};
#[test]
fn test_normal() {
- let mut norm = Normal::new(10.0, 10.0);
- let mut rng = ::test::rng();
+ let norm = Normal::new(10.0, 10.0);
+ let mut rng = ::test::rng(210);
for _ in 0..1000 {
norm.sample(&mut rng);
- norm.ind_sample(&mut rng);
}
}
#[test]
@@ -186,11 +183,10 @@ mod tests {
#[test]
fn test_log_normal() {
- let mut lnorm = LogNormal::new(10.0, 10.0);
- let mut rng = ::test::rng();
+ let lnorm = LogNormal::new(10.0, 10.0);
+ let mut rng = ::test::rng(211);
for _ in 0..1000 {
lnorm.sample(&mut rng);
- lnorm.ind_sample(&mut rng);
}
}
#[test]
diff --git a/rand/src/distributions/other.rs b/rand/src/distributions/other.rs
new file mode 100644
index 0000000..2295f79
--- /dev/null
+++ b/rand/src/distributions/other.rs
@@ -0,0 +1,219 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The implementations of the `Standard` distribution for other built-in types.
+
+use core::char;
+use core::num::Wrapping;
+
+use {Rng};
+use distributions::{Distribution, Standard, Uniform};
+
+// ----- Sampling distributions -----
+
+/// Sample a `char`, uniformly distributed over ASCII letters and numbers:
+/// a-z, A-Z and 0-9.
+///
+/// # Example
+///
+/// ```
+/// use std::iter;
+/// use rand::{Rng, thread_rng};
+/// use rand::distributions::Alphanumeric;
+///
+/// let mut rng = thread_rng();
+/// let chars: String = iter::repeat(())
+/// .map(|()| rng.sample(Alphanumeric))
+/// .take(7)
+/// .collect();
+/// println!("Random chars: {}", chars);
+/// ```
+#[derive(Debug)]
+pub struct Alphanumeric;
+
+
+// ----- Implementations of distributions -----
+
+impl Distribution<char> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
+ // A valid `char` is either in the interval `[0, 0xD800)` or
+ // `(0xDFFF, 0x11_0000)`. All `char`s must therefore be in
+ // `[0, 0x11_0000)` but not in the "gap" `[0xD800, 0xDFFF]` which is
+ // reserved for surrogates. This is the size of that gap.
+ const GAP_SIZE: u32 = 0xDFFF - 0xD800 + 1;
+
+ // Uniform::new(0, 0x11_0000 - GAP_SIZE) can also be used but it
+ // seemed slower.
+ let range = Uniform::new(GAP_SIZE, 0x11_0000);
+
+ let mut n = range.sample(rng);
+ if n <= 0xDFFF {
+ n -= GAP_SIZE;
+ }
+ unsafe { char::from_u32_unchecked(n) }
+ }
+}
+
+impl Distribution<char> for Alphanumeric {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
+ const RANGE: u32 = 26 + 26 + 10;
+ const GEN_ASCII_STR_CHARSET: &[u8] =
+ b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
+ abcdefghijklmnopqrstuvwxyz\
+ 0123456789";
+ // We can pick from 62 characters. This is so close to a power of 2, 64,
+ // that we can do better than `Uniform`. Use a simple bitshift and
+ // rejection sampling. We do not use a bitmask, because for small RNGs
+ // the most significant bits are usually of higher quality.
+ loop {
+ let var = rng.next_u32() >> (32 - 6);
+ if var < RANGE {
+ return GEN_ASCII_STR_CHARSET[var as usize] as char
+ }
+ }
+ }
+}
+
+impl Distribution<bool> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
+ // We can compare against an arbitrary bit of an u32 to get a bool.
+ // Because the least significant bits of a lower quality RNG can have
+ // simple patterns, we compare against the most significant bit. This is
+ // easiest done using a sign test.
+ (rng.next_u32() as i32) < 0
+ }
+}
+
+macro_rules! tuple_impl {
+ // use variables to indicate the arity of the tuple
+ ($($tyvar:ident),* ) => {
+ // the trailing commas are for the 1 tuple
+ impl< $( $tyvar ),* >
+ Distribution<( $( $tyvar ),* , )>
+ for Standard
+ where $( Standard: Distribution<$tyvar> ),*
+ {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> ( $( $tyvar ),* , ) {
+ (
+ // use the $tyvar's to get the appropriate number of
+ // repeats (they're not actually needed)
+ $(
+ _rng.gen::<$tyvar>()
+ ),*
+ ,
+ )
+ }
+ }
+ }
+}
+
+impl Distribution<()> for Standard {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, _: &mut R) -> () { () }
+}
+tuple_impl!{A}
+tuple_impl!{A, B}
+tuple_impl!{A, B, C}
+tuple_impl!{A, B, C, D}
+tuple_impl!{A, B, C, D, E}
+tuple_impl!{A, B, C, D, E, F}
+tuple_impl!{A, B, C, D, E, F, G}
+tuple_impl!{A, B, C, D, E, F, G, H}
+tuple_impl!{A, B, C, D, E, F, G, H, I}
+tuple_impl!{A, B, C, D, E, F, G, H, I, J}
+tuple_impl!{A, B, C, D, E, F, G, H, I, J, K}
+tuple_impl!{A, B, C, D, E, F, G, H, I, J, K, L}
+
+macro_rules! array_impl {
+ // recursive, given at least one type parameter:
+ {$n:expr, $t:ident, $($ts:ident,)*} => {
+ array_impl!{($n - 1), $($ts,)*}
+
+ impl<T> Distribution<[T; $n]> for Standard where Standard: Distribution<T> {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] {
+ [_rng.gen::<$t>(), $(_rng.gen::<$ts>()),*]
+ }
+ }
+ };
+ // empty case:
+ {$n:expr,} => {
+ impl<T> Distribution<[T; $n]> for Standard {
+ fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] { [] }
+ }
+ };
+}
+
+array_impl!{32, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T,}
+
+impl<T> Distribution<Option<T>> for Standard where Standard: Distribution<T> {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Option<T> {
+ // UFCS is needed here: https://github.com/rust-lang/rust/issues/24066
+ if rng.gen::<bool>() {
+ Some(rng.gen())
+ } else {
+ None
+ }
+ }
+}
+
+impl<T> Distribution<Wrapping<T>> for Standard where Standard: Distribution<T> {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Wrapping<T> {
+ Wrapping(rng.gen())
+ }
+}
+
+
+#[cfg(test)]
+mod tests {
+ use {Rng, RngCore, Standard};
+ use distributions::Alphanumeric;
+ #[cfg(all(not(feature="std"), feature="alloc"))] use alloc::string::String;
+
+ #[test]
+ fn test_misc() {
+ let rng: &mut RngCore = &mut ::test::rng(820);
+
+ rng.sample::<char, _>(Standard);
+ rng.sample::<bool, _>(Standard);
+ }
+
+ #[cfg(feature="alloc")]
+ #[test]
+ fn test_chars() {
+ use core::iter;
+ let mut rng = ::test::rng(805);
+
+ // Test by generating a relatively large number of chars, so we also
+ // take the rejection sampling path.
+ let word: String = iter::repeat(())
+ .map(|()| rng.gen::<char>()).take(1000).collect();
+ assert!(word.len() != 0);
+ }
+
+ #[test]
+ fn test_alphanumeric() {
+ let mut rng = ::test::rng(806);
+
+ // Test by generating a relatively large number of chars, so we also
+ // take the rejection sampling path.
+ let mut incorrect = false;
+ for _ in 0..100 {
+ let c = rng.sample(Alphanumeric);
+ incorrect |= !((c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') );
+ }
+ assert!(incorrect == false);
+ }
+}
diff --git a/rand/src/distributions/pareto.rs b/rand/src/distributions/pareto.rs
new file mode 100644
index 0000000..744a157
--- /dev/null
+++ b/rand/src/distributions/pareto.rs
@@ -0,0 +1,74 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Pareto distribution.
+
+use Rng;
+use distributions::{Distribution, OpenClosed01};
+
+/// Samples floating-point numbers according to the Pareto distribution
+///
+/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Pareto;
+///
+/// let val: f64 = SmallRng::from_entropy().sample(Pareto::new(1., 2.));
+/// println!("{}", val);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Pareto {
+ scale: f64,
+ inv_neg_shape: f64,
+}
+
+impl Pareto {
+ /// Construct a new Pareto distribution with given `scale` and `shape`.
+ ///
+ /// In the literature, `scale` is commonly written as x<sub>m</sub> or k and
+ /// `shape` is often written as α.
+ ///
+ /// # Panics
+ ///
+ /// `scale` and `shape` have to be non-zero and positive.
+ pub fn new(scale: f64, shape: f64) -> Pareto {
+ assert!((scale > 0.) & (shape > 0.));
+ Pareto { scale, inv_neg_shape: -1.0 / shape }
+ }
+}
+
+impl Distribution<f64> for Pareto {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let u: f64 = rng.sample(OpenClosed01);
+ self.scale * u.powf(self.inv_neg_shape)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::Pareto;
+
+ #[test]
+ #[should_panic]
+ fn invalid() {
+ Pareto::new(0., 0.);
+ }
+
+ #[test]
+ fn sample() {
+ let scale = 1.0;
+ let shape = 2.0;
+ let d = Pareto::new(scale, shape);
+ let mut rng = ::test::rng(1);
+ for _ in 0..1000 {
+ let r = d.sample(&mut rng);
+ assert!(r >= scale);
+ }
+ }
+}
diff --git a/rand/src/distributions/poisson.rs b/rand/src/distributions/poisson.rs
new file mode 100644
index 0000000..1244caa
--- /dev/null
+++ b/rand/src/distributions/poisson.rs
@@ -0,0 +1,157 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2016-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Poisson distribution.
+
+use Rng;
+use distributions::{Distribution, Cauchy};
+use distributions::utils::log_gamma;
+
+/// The Poisson distribution `Poisson(lambda)`.
+///
+/// This distribution has a density function:
+/// `f(k) = lambda^k * exp(-lambda) / k!` for `k >= 0`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Poisson, Distribution};
+///
+/// let poi = Poisson::new(2.0);
+/// let v = poi.sample(&mut rand::thread_rng());
+/// println!("{} is from a Poisson(2) distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Poisson {
+ lambda: f64,
+ // precalculated values
+ exp_lambda: f64,
+ log_lambda: f64,
+ sqrt_2lambda: f64,
+ magic_val: f64,
+}
+
+impl Poisson {
+ /// Construct a new `Poisson` with the given shape parameter
+ /// `lambda`. Panics if `lambda <= 0`.
+ pub fn new(lambda: f64) -> Poisson {
+ assert!(lambda > 0.0, "Poisson::new called with lambda <= 0");
+ let log_lambda = lambda.ln();
+ Poisson {
+ lambda,
+ exp_lambda: (-lambda).exp(),
+ log_lambda,
+ sqrt_2lambda: (2.0 * lambda).sqrt(),
+ magic_val: lambda * log_lambda - log_gamma(1.0 + lambda),
+ }
+ }
+}
+
+impl Distribution<u64> for Poisson {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+ // using the algorithm from Numerical Recipes in C
+
+ // for low expected values use the Knuth method
+ if self.lambda < 12.0 {
+ let mut result = 0;
+ let mut p = 1.0;
+ while p > self.exp_lambda {
+ p *= rng.gen::<f64>();
+ result += 1;
+ }
+ result - 1
+ }
+ // high expected values - rejection method
+ else {
+ let mut int_result: u64;
+
+ // we use the Cauchy distribution as the comparison distribution
+ // f(x) ~ 1/(1+x^2)
+ let cauchy = Cauchy::new(0.0, 1.0);
+
+ loop {
+ let mut result;
+ let mut comp_dev;
+
+ loop {
+ // draw from the Cauchy distribution
+ comp_dev = rng.sample(cauchy);
+ // shift the peak of the comparison ditribution
+ result = self.sqrt_2lambda * comp_dev + self.lambda;
+ // repeat the drawing until we are in the range of possible values
+ if result >= 0.0 {
+ break;
+ }
+ }
+ // now the result is a random variable greater than 0 with Cauchy distribution
+ // the result should be an integer value
+ result = result.floor();
+ int_result = result as u64;
+
+ // this is the ratio of the Poisson distribution to the comparison distribution
+ // the magic value scales the distribution function to a range of approximately 0-1
+ // since it is not exact, we multiply the ratio by 0.9 to avoid ratios greater than 1
+ // this doesn't change the resulting distribution, only increases the rate of failed drawings
+ let check = 0.9 * (1.0 + comp_dev * comp_dev)
+ * (result * self.log_lambda - log_gamma(1.0 + result) - self.magic_val).exp();
+
+ // check with uniform random value - if below the threshold, we are within the target distribution
+ if rng.gen::<f64>() <= check {
+ break;
+ }
+ }
+ int_result
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use distributions::Distribution;
+ use super::Poisson;
+
+ #[test]
+ fn test_poisson_10() {
+ let poisson = Poisson::new(10.0);
+ let mut rng = ::test::rng(123);
+ let mut sum = 0;
+ for _ in 0..1000 {
+ sum += poisson.sample(&mut rng);
+ }
+ let avg = (sum as f64) / 1000.0;
+ println!("Poisson average: {}", avg);
+ assert!((avg - 10.0).abs() < 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ fn test_poisson_15() {
+ // Take the 'high expected values' path
+ let poisson = Poisson::new(15.0);
+ let mut rng = ::test::rng(123);
+ let mut sum = 0;
+ for _ in 0..1000 {
+ sum += poisson.sample(&mut rng);
+ }
+ let avg = (sum as f64) / 1000.0;
+ println!("Poisson average: {}", avg);
+ assert!((avg - 15.0).abs() < 0.5); // not 100% certain, but probable enough
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_poisson_invalid_lambda_zero() {
+ Poisson::new(0.0);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_poisson_invalid_lambda_neg() {
+ Poisson::new(-10.0);
+ }
+}
diff --git a/rand/src/distributions/range.rs b/rand/src/distributions/range.rs
deleted file mode 100644
index 935a00a..0000000
--- a/rand/src/distributions/range.rs
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Generating numbers between two others.
-
-// this is surprisingly complicated to be both generic & correct
-
-use core::num::Wrapping as w;
-
-use Rng;
-use distributions::{Sample, IndependentSample};
-
-/// Sample values uniformly between two bounds.
-///
-/// This gives a uniform distribution (assuming the RNG used to sample
-/// it is itself uniform & the `SampleRange` implementation for the
-/// given type is correct), even for edge cases like `low = 0u8`,
-/// `high = 170u8`, for which a naive modulo operation would return
-/// numbers less than 85 with double the probability to those greater
-/// than 85.
-///
-/// Types should attempt to sample in `[low, high)`, i.e., not
-/// including `high`, but this may be very difficult. All the
-/// primitive integer types satisfy this property, and the float types
-/// normally satisfy it, but rounding may mean `high` can occur.
-///
-/// # Example
-///
-/// ```rust
-/// use rand::distributions::{IndependentSample, Range};
-///
-/// fn main() {
-/// let between = Range::new(10, 10000);
-/// let mut rng = rand::thread_rng();
-/// let mut sum = 0;
-/// for _ in 0..1000 {
-/// sum += between.ind_sample(&mut rng);
-/// }
-/// println!("{}", sum);
-/// }
-/// ```
-#[derive(Clone, Copy, Debug)]
-pub struct Range<X> {
- low: X,
- range: X,
- accept_zone: X
-}
-
-impl<X: SampleRange + PartialOrd> Range<X> {
- /// Create a new `Range` instance that samples uniformly from
- /// `[low, high)`. Panics if `low >= high`.
- pub fn new(low: X, high: X) -> Range<X> {
- assert!(low < high, "Range::new called with `low >= high`");
- SampleRange::construct_range(low, high)
- }
-}
-
-impl<Sup: SampleRange> Sample<Sup> for Range<Sup> {
- #[inline]
- fn sample<R: Rng>(&mut self, rng: &mut R) -> Sup { self.ind_sample(rng) }
-}
-impl<Sup: SampleRange> IndependentSample<Sup> for Range<Sup> {
- fn ind_sample<R: Rng>(&self, rng: &mut R) -> Sup {
- SampleRange::sample_range(self, rng)
- }
-}
-
-/// The helper trait for types that have a sensible way to sample
-/// uniformly between two values. This should not be used directly,
-/// and is only to facilitate `Range`.
-pub trait SampleRange : Sized {
- /// Construct the `Range` object that `sample_range`
- /// requires. This should not ever be called directly, only via
- /// `Range::new`, which will check that `low < high`, so this
- /// function doesn't have to repeat the check.
- fn construct_range(low: Self, high: Self) -> Range<Self>;
-
- /// Sample a value from the given `Range` with the given `Rng` as
- /// a source of randomness.
- fn sample_range<R: Rng>(r: &Range<Self>, rng: &mut R) -> Self;
-}
-
-macro_rules! integer_impl {
- ($ty:ty, $unsigned:ident) => {
- impl SampleRange for $ty {
- // we play free and fast with unsigned vs signed here
- // (when $ty is signed), but that's fine, since the
- // contract of this macro is for $ty and $unsigned to be
- // "bit-equal", so casting between them is a no-op & a
- // bijection.
-
- #[inline]
- fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
- let range = (w(high as $unsigned) - w(low as $unsigned)).0;
- let unsigned_max: $unsigned = ::core::$unsigned::MAX;
-
- // this is the largest number that fits into $unsigned
- // that `range` divides evenly, so, if we've sampled
- // `n` uniformly from this region, then `n % range` is
- // uniform in [0, range)
- let zone = unsigned_max - unsigned_max % range;
-
- Range {
- low: low,
- range: range as $ty,
- accept_zone: zone as $ty
- }
- }
-
- #[inline]
- fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
- loop {
- // rejection sample
- let v = rng.gen::<$unsigned>();
- // until we find something that fits into the
- // region which r.range evenly divides (this will
- // be uniformly distributed)
- if v < r.accept_zone as $unsigned {
- // and return it, with some adjustments
- return (w(r.low) + w((v % r.range as $unsigned) as $ty)).0;
- }
- }
- }
- }
- }
-}
-
-integer_impl! { i8, u8 }
-integer_impl! { i16, u16 }
-integer_impl! { i32, u32 }
-integer_impl! { i64, u64 }
-#[cfg(feature = "i128_support")]
-integer_impl! { i128, u128 }
-integer_impl! { isize, usize }
-integer_impl! { u8, u8 }
-integer_impl! { u16, u16 }
-integer_impl! { u32, u32 }
-integer_impl! { u64, u64 }
-#[cfg(feature = "i128_support")]
-integer_impl! { u128, u128 }
-integer_impl! { usize, usize }
-
-macro_rules! float_impl {
- ($ty:ty) => {
- impl SampleRange for $ty {
- fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
- Range {
- low: low,
- range: high - low,
- accept_zone: 0.0 // unused
- }
- }
- fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
- r.low + r.range * rng.gen::<$ty>()
- }
- }
- }
-}
-
-float_impl! { f32 }
-float_impl! { f64 }
-
-#[cfg(test)]
-mod tests {
- use distributions::{Sample, IndependentSample};
- use super::Range as Range;
-
- #[should_panic]
- #[test]
- fn test_range_bad_limits_equal() {
- Range::new(10, 10);
- }
- #[should_panic]
- #[test]
- fn test_range_bad_limits_flipped() {
- Range::new(10, 5);
- }
-
- #[test]
- fn test_integers() {
- let mut rng = ::test::rng();
- macro_rules! t {
- ($($ty:ident),*) => {{
- $(
- let v: &[($ty, $ty)] = &[(0, 10),
- (10, 127),
- (::core::$ty::MIN, ::core::$ty::MAX)];
- for &(low, high) in v.iter() {
- let mut sampler: Range<$ty> = Range::new(low, high);
- for _ in 0..1000 {
- let v = sampler.sample(&mut rng);
- assert!(low <= v && v < high);
- let v = sampler.ind_sample(&mut rng);
- assert!(low <= v && v < high);
- }
- }
- )*
- }}
- }
- #[cfg(not(feature = "i128_support"))]
- t!(i8, i16, i32, i64, isize,
- u8, u16, u32, u64, usize);
- #[cfg(feature = "i128_support")]
- t!(i8, i16, i32, i64, i128, isize,
- u8, u16, u32, u64, u128, usize);
- }
-
- #[test]
- fn test_floats() {
- let mut rng = ::test::rng();
- macro_rules! t {
- ($($ty:ty),*) => {{
- $(
- let v: &[($ty, $ty)] = &[(0.0, 100.0),
- (-1e35, -1e25),
- (1e-35, 1e-25),
- (-1e35, 1e35)];
- for &(low, high) in v.iter() {
- let mut sampler: Range<$ty> = Range::new(low, high);
- for _ in 0..1000 {
- let v = sampler.sample(&mut rng);
- assert!(low <= v && v < high);
- let v = sampler.ind_sample(&mut rng);
- assert!(low <= v && v < high);
- }
- }
- )*
- }}
- }
-
- t!(f32, f64)
- }
-
-}
diff --git a/rand/src/distributions/triangular.rs b/rand/src/distributions/triangular.rs
new file mode 100644
index 0000000..a6eef5c
--- /dev/null
+++ b/rand/src/distributions/triangular.rs
@@ -0,0 +1,86 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//! The triangular distribution.
+
+use Rng;
+use distributions::{Distribution, Standard};
+
+/// The triangular distribution.
+///
+/// # Example
+///
+/// ```rust
+/// use rand::distributions::{Triangular, Distribution};
+///
+/// let d = Triangular::new(0., 5., 2.5);
+/// let v = d.sample(&mut rand::thread_rng());
+/// println!("{} is from a triangular distribution", v);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Triangular {
+ min: f64,
+ max: f64,
+ mode: f64,
+}
+
+impl Triangular {
+ /// Construct a new `Triangular` with minimum `min`, maximum `max` and mode
+ /// `mode`.
+ ///
+ /// # Panics
+ ///
+ /// If `max < mode`, `mode < max` or `max == min`.
+ ///
+ #[inline]
+ pub fn new(min: f64, max: f64, mode: f64) -> Triangular {
+ assert!(max >= mode);
+ assert!(mode >= min);
+ assert!(max != min);
+ Triangular { min, max, mode }
+ }
+}
+
+impl Distribution<f64> for Triangular {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let f: f64 = rng.sample(Standard);
+ let diff_mode_min = self.mode - self.min;
+ let diff_max_min = self.max - self.min;
+ if f * diff_max_min < diff_mode_min {
+ self.min + (f * diff_max_min * diff_mode_min).sqrt()
+ } else {
+ self.max - ((1. - f) * diff_max_min * (self.max - self.mode)).sqrt()
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use distributions::Distribution;
+ use super::Triangular;
+
+ #[test]
+ fn test_new() {
+ for &(min, max, mode) in &[
+ (-1., 1., 0.), (1., 2., 1.), (5., 25., 25.), (1e-5, 1e5, 1e-3),
+ (0., 1., 0.9), (-4., -0.5, -2.), (-13.039, 8.41, 1.17),
+ ] {
+ println!("{} {} {}", min, max, mode);
+ let _ = Triangular::new(min, max, mode);
+ }
+ }
+
+ #[test]
+ fn test_sample() {
+ let norm = Triangular::new(0., 1., 0.5);
+ let mut rng = ::test::rng(1);
+ for _ in 0..1000 {
+ norm.sample(&mut rng);
+ }
+ }
+}
diff --git a/rand/src/distributions/uniform.rs b/rand/src/distributions/uniform.rs
new file mode 100644
index 0000000..5fb89e3
--- /dev/null
+++ b/rand/src/distributions/uniform.rs
@@ -0,0 +1,1297 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A distribution uniformly sampling numbers within a given range.
+//!
+//! [`Uniform`] is the standard distribution to sample uniformly from a range;
+//! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a
+//! standard die. [`Rng::gen_range`] supports any type supported by
+//! [`Uniform`].
+//!
+//! This distribution is provided with support for several primitive types
+//! (all integer and floating-point types) as well as `std::time::Duration`,
+//! and supports extension to user-defined types via a type-specific *back-end*
+//! implementation.
+//!
+//! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the
+//! back-ends supporting sampling from primitive integer and floating-point
+//! ranges as well as from `std::time::Duration`; these types do not normally
+//! need to be used directly (unless implementing a derived back-end).
+//!
+//! # Example usage
+//!
+//! ```
+//! use rand::{Rng, thread_rng};
+//! use rand::distributions::Uniform;
+//!
+//! let mut rng = thread_rng();
+//! let side = Uniform::new(-10.0, 10.0);
+//!
+//! // sample between 1 and 10 points
+//! for _ in 0..rng.gen_range(1, 11) {
+//! // sample a point from the square with sides -10 - 10 in two dimensions
+//! let (x, y) = (rng.sample(side), rng.sample(side));
+//! println!("Point: {}, {}", x, y);
+//! }
+//! ```
+//!
+//! # Extending `Uniform` to support a custom type
+//!
+//! To extend [`Uniform`] to support your own types, write a back-end which
+//! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
+//! helper trait to "register" your back-end. See the `MyF32` example below.
+//!
+//! At a minimum, the back-end needs to store any parameters needed for sampling
+//! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
+//! Those methods should include an assert to check the range is valid (i.e.
+//! `low < high`). The example below merely wraps another back-end.
+//!
+//! The `new`, `new_inclusive` and `sample_single` functions use arguments of
+//! type SampleBorrow<X> in order to support passing in values by reference or
+//! by value. In the implementation of these functions, you can choose to
+//! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
+//! to copy or clone the value, whatever is appropriate for your type.
+//!
+//! ```
+//! use rand::prelude::*;
+//! use rand::distributions::uniform::{Uniform, SampleUniform,
+//! UniformSampler, UniformFloat, SampleBorrow};
+//!
+//! struct MyF32(f32);
+//!
+//! #[derive(Clone, Copy, Debug)]
+//! struct UniformMyF32 {
+//! inner: UniformFloat<f32>,
+//! }
+//!
+//! impl UniformSampler for UniformMyF32 {
+//! type X = MyF32;
+//! fn new<B1, B2>(low: B1, high: B2) -> Self
+//! where B1: SampleBorrow<Self::X> + Sized,
+//! B2: SampleBorrow<Self::X> + Sized
+//! {
+//! UniformMyF32 {
+//! inner: UniformFloat::<f32>::new(low.borrow().0, high.borrow().0),
+//! }
+//! }
+//! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+//! where B1: SampleBorrow<Self::X> + Sized,
+//! B2: SampleBorrow<Self::X> + Sized
+//! {
+//! UniformSampler::new(low, high)
+//! }
+//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+//! MyF32(self.inner.sample(rng))
+//! }
+//! }
+//!
+//! impl SampleUniform for MyF32 {
+//! type Sampler = UniformMyF32;
+//! }
+//!
+//! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
+//! let uniform = Uniform::new(low, high);
+//! let x = uniform.sample(&mut thread_rng());
+//! ```
+//!
+//! [`Uniform`]: struct.Uniform.html
+//! [`Rng::gen_range`]: ../../trait.Rng.html#method.gen_range
+//! [`SampleUniform`]: trait.SampleUniform.html
+//! [`UniformSampler`]: trait.UniformSampler.html
+//! [`UniformInt`]: struct.UniformInt.html
+//! [`UniformFloat`]: struct.UniformFloat.html
+//! [`UniformDuration`]: struct.UniformDuration.html
+//! [`SampleBorrow::borrow`]: trait.SampleBorrow.html#method.borrow
+
+#[cfg(feature = "std")]
+use std::time::Duration;
+#[cfg(all(not(feature = "std"), rust_1_25))]
+use core::time::Duration;
+
+use Rng;
+use distributions::Distribution;
+use distributions::float::IntoFloat;
+use distributions::utils::{WideningMultiply, FloatSIMDUtils, FloatAsSIMD, BoolAsSIMD};
+
+#[cfg(not(feature = "std"))]
+#[allow(unused_imports)] // rustc doesn't detect that this is actually used
+use distributions::utils::Float;
+
+
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+
+/// Sample values uniformly between two bounds.
+///
+/// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
+/// distribution sampling from the given range; these functions may do extra
+/// work up front to make sampling of multiple values faster.
+///
+/// When sampling from a constant range, many calculations can happen at
+/// compile-time and all methods should be fast; for floating-point ranges and
+/// the full range of integer types this should have comparable performance to
+/// the `Standard` distribution.
+///
+/// Steps are taken to avoid bias which might be present in naive
+/// implementations; for example `rng.gen::<u8>() % 170` samples from the range
+/// `[0, 169]` but is twice as likely to select numbers less than 85 than other
+/// values. Further, the implementations here give more weight to the high-bits
+/// generated by the RNG than the low bits, since with some RNGs the low-bits
+/// are of lower quality than the high bits.
+///
+/// Implementations must sample in `[low, high)` range for
+/// `Uniform::new(low, high)`, i.e., excluding `high`. In particular care must
+/// be taken to ensure that rounding never results values `< low` or `>= high`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Distribution, Uniform};
+///
+/// fn main() {
+/// let between = Uniform::from(10..10000);
+/// let mut rng = rand::thread_rng();
+/// let mut sum = 0;
+/// for _ in 0..1000 {
+/// sum += between.sample(&mut rng);
+/// }
+/// println!("{}", sum);
+/// }
+/// ```
+///
+/// [`Uniform::new`]: struct.Uniform.html#method.new
+/// [`Uniform::new_inclusive`]: struct.Uniform.html#method.new_inclusive
+/// [`new`]: struct.Uniform.html#method.new
+/// [`new_inclusive`]: struct.Uniform.html#method.new_inclusive
+#[derive(Clone, Copy, Debug)]
+pub struct Uniform<X: SampleUniform> {
+ inner: X::Sampler,
+}
+
+impl<X: SampleUniform> Uniform<X> {
+ /// Create a new `Uniform` instance which samples uniformly from the half
+ /// open range `[low, high)` (excluding `high`). Panics if `low >= high`.
+ pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X>
+ where B1: SampleBorrow<X> + Sized,
+ B2: SampleBorrow<X> + Sized
+ {
+ Uniform { inner: X::Sampler::new(low, high) }
+ }
+
+ /// Create a new `Uniform` instance which samples uniformly from the closed
+ /// range `[low, high]` (inclusive). Panics if `low > high`.
+ pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X>
+ where B1: SampleBorrow<X> + Sized,
+ B2: SampleBorrow<X> + Sized
+ {
+ Uniform { inner: X::Sampler::new_inclusive(low, high) }
+ }
+}
+
+impl<X: SampleUniform> Distribution<X> for Uniform<X> {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
+ self.inner.sample(rng)
+ }
+}
+
+/// Helper trait for creating objects using the correct implementation of
+/// [`UniformSampler`] for the sampling type.
+///
+/// See the [module documentation] on how to implement [`Uniform`] range
+/// sampling for a custom type.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [module documentation]: index.html
+/// [`Uniform`]: struct.Uniform.html
+pub trait SampleUniform: Sized {
+ /// The `UniformSampler` implementation supporting type `X`.
+ type Sampler: UniformSampler<X = Self>;
+}
+
+/// Helper trait handling actual uniform sampling.
+///
+/// See the [module documentation] on how to implement [`Uniform`] range
+/// sampling for a custom type.
+///
+/// Implementation of [`sample_single`] is optional, and is only useful when
+/// the implementation can be faster than `Self::new(low, high).sample(rng)`.
+///
+/// [module documentation]: index.html
+/// [`Uniform`]: struct.Uniform.html
+/// [`sample_single`]: trait.UniformSampler.html#method.sample_single
+pub trait UniformSampler: Sized {
+ /// The type sampled by this implementation.
+ type X;
+
+ /// Construct self, with inclusive lower bound and exclusive upper bound
+ /// `[low, high)`.
+ ///
+ /// Usually users should not call this directly but instead use
+ /// `Uniform::new`, which asserts that `low < high` before calling this.
+ fn new<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized;
+
+ /// Construct self, with inclusive bounds `[low, high]`.
+ ///
+ /// Usually users should not call this directly but instead use
+ /// `Uniform::new_inclusive`, which asserts that `low <= high` before
+ /// calling this.
+ fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized;
+
+ /// Sample a value.
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
+
+ /// Sample a single value uniformly from a range with inclusive lower bound
+ /// and exclusive upper bound `[low, high)`.
+ ///
+ /// Usually users should not call this directly but instead use
+ /// `Uniform::sample_single`, which asserts that `low < high` before calling
+ /// this.
+ ///
+ /// Via this method, implementations can provide a method optimized for
+ /// sampling only a single value from the specified range. The default
+ /// implementation simply calls `UniformSampler::new` then `sample` on the
+ /// result.
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R)
+ -> Self::X
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let uniform: Self = UniformSampler::new(low, high);
+ uniform.sample(rng)
+ }
+}
+
+impl<X: SampleUniform> From<::core::ops::Range<X>> for Uniform<X> {
+ fn from(r: ::core::ops::Range<X>) -> Uniform<X> {
+ Uniform::new(r.start, r.end)
+ }
+}
+
+#[cfg(rust_1_27)]
+impl<X: SampleUniform> From<::core::ops::RangeInclusive<X>> for Uniform<X> {
+ fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> {
+ Uniform::new_inclusive(r.start(), r.end())
+ }
+}
+
+/// Helper trait similar to [`Borrow`] but implemented
+/// only for SampleUniform and references to SampleUniform in
+/// order to resolve ambiguity issues.
+///
+/// [`Borrow`]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html
+pub trait SampleBorrow<Borrowed> {
+ /// Immutably borrows from an owned value. See [`Borrow::borrow`]
+ ///
+ /// [`Borrow::borrow`]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html#tymethod.borrow
+ fn borrow(&self) -> &Borrowed;
+}
+impl<Borrowed> SampleBorrow<Borrowed> for Borrowed where Borrowed: SampleUniform {
+ #[inline(always)]
+ fn borrow(&self) -> &Borrowed { self }
+}
+impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed where Borrowed: SampleUniform {
+ #[inline(always)]
+ fn borrow(&self) -> &Borrowed { *self }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// What follows are all back-ends.
+
+
+/// The back-end implementing [`UniformSampler`] for integer types.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// # Implementation notes
+///
+/// For a closed range, the number of possible numbers we should generate is
+/// `range = (high - low + 1)`. It is not possible to end up with a uniform
+/// distribution if we map *all* the random integers that can be generated to
+/// this range. We have to map integers from a `zone` that is a multiple of the
+/// range. The rest of the integers, that cause a bias, are rejected.
+///
+/// The problem with `range` is that to cover the full range of the type, it has
+/// to store `unsigned_max + 1`, which can't be represented. But if the range
+/// covers the full range of the type, no modulus is needed. A range of size 0
+/// can't exist, so we use that to represent this special case. Wrapping
+/// arithmetic even makes representing `unsigned_max + 1` as 0 simple.
+///
+/// We don't calculate `zone` directly, but first calculate the number of
+/// integers to reject. To handle `unsigned_max + 1` not fitting in the type,
+/// we use:
+/// `ints_to_reject = (unsigned_max + 1) % range;`
+/// `ints_to_reject = (unsigned_max - range + 1) % range;`
+///
+/// The smallest integer PRNGs generate is `u32`. That is why for small integer
+/// sizes (`i8`/`u8` and `i16`/`u16`) there is an optimization: don't pick the
+/// largest zone that can fit in the small type, but pick the largest zone that
+/// can fit in an `u32`. `ints_to_reject` is always less than half the size of
+/// the small integer. This means the first bit of `zone` is always 1, and so
+/// are all the other preceding bits of a larger integer. The easiest way to
+/// grow the `zone` for the larger type is to simply sign extend it.
+///
+/// An alternative to using a modulus is widening multiply: After a widening
+/// multiply by `range`, the result is in the high word. Then comparing the low
+/// word against `zone` makes sure our distribution is uniform.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [`Uniform`]: struct.Uniform.html
+#[derive(Clone, Copy, Debug)]
+pub struct UniformInt<X> {
+ low: X,
+ range: X,
+ zone: X,
+}
+
+macro_rules! uniform_int_impl {
+ ($ty:ty, $signed:ty, $unsigned:ident,
+ $i_large:ident, $u_large:ident) => {
+ impl SampleUniform for $ty {
+ type Sampler = UniformInt<$ty>;
+ }
+
+ impl UniformSampler for UniformInt<$ty> {
+ // We play free and fast with unsigned vs signed here
+ // (when $ty is signed), but that's fine, since the
+ // contract of this macro is for $ty and $unsigned to be
+ // "bit-equal", so casting between them is a no-op.
+
+ type X = $ty;
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low < high, "Uniform::new called with `low >= high`");
+ UniformSampler::new_inclusive(low, high - 1)
+ }
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low <= high,
+ "Uniform::new_inclusive called with `low > high`");
+ let unsigned_max = ::core::$unsigned::MAX;
+
+ let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned;
+ let ints_to_reject =
+ if range > 0 {
+ (unsigned_max - range + 1) % range
+ } else {
+ 0
+ };
+ let zone = unsigned_max - ints_to_reject;
+
+ UniformInt {
+ low: low,
+ // These are really $unsigned values, but store as $ty:
+ range: range as $ty,
+ zone: zone as $ty
+ }
+ }
+
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ let range = self.range as $unsigned as $u_large;
+ if range > 0 {
+ // Grow `zone` to fit a type of at least 32 bits, by
+ // sign-extending it (the first bit is always 1, so are all
+ // the preceding bits of the larger type).
+ // For types that already have the right size, all the
+ // casting is a no-op.
+ let zone = self.zone as $signed as $i_large as $u_large;
+ loop {
+ let v: $u_large = rng.gen();
+ let (hi, lo) = v.wmul(range);
+ if lo <= zone {
+ return self.low.wrapping_add(hi as $ty);
+ }
+ }
+ } else {
+ // Sample from the entire integer range.
+ rng.gen()
+ }
+ }
+
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R)
+ -> Self::X
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low < high,
+ "Uniform::sample_single called with low >= high");
+ let range = high.wrapping_sub(low) as $unsigned as $u_large;
+ let zone =
+ if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned {
+ // Using a modulus is faster than the approximation for
+ // i8 and i16. I suppose we trade the cost of one
+ // modulus for near-perfect branch prediction.
+ let unsigned_max: $u_large = ::core::$u_large::MAX;
+ let ints_to_reject = (unsigned_max - range + 1) % range;
+ unsigned_max - ints_to_reject
+ } else {
+ // conservative but fast approximation
+ range << range.leading_zeros()
+ };
+
+ loop {
+ let v: $u_large = rng.gen();
+ let (hi, lo) = v.wmul(range);
+ if lo <= zone {
+ return low.wrapping_add(hi as $ty);
+ }
+ }
+ }
+ }
+ }
+}
+
+uniform_int_impl! { i8, i8, u8, i32, u32 }
+uniform_int_impl! { i16, i16, u16, i32, u32 }
+uniform_int_impl! { i32, i32, u32, i32, u32 }
+uniform_int_impl! { i64, i64, u64, i64, u64 }
+#[cfg(rust_1_26)]
+uniform_int_impl! { i128, i128, u128, u128, u128 }
+uniform_int_impl! { isize, isize, usize, isize, usize }
+uniform_int_impl! { u8, i8, u8, i32, u32 }
+uniform_int_impl! { u16, i16, u16, i32, u32 }
+uniform_int_impl! { u32, i32, u32, i32, u32 }
+uniform_int_impl! { u64, i64, u64, i64, u64 }
+uniform_int_impl! { usize, isize, usize, isize, usize }
+#[cfg(rust_1_26)]
+uniform_int_impl! { u128, u128, u128, i128, u128 }
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+macro_rules! uniform_simd_int_impl {
+ ($ty:ident, $unsigned:ident, $u_scalar:ident) => {
+ // The "pick the largest zone that can fit in an `u32`" optimization
+ // is less useful here. Multiple lanes complicate things, we don't
+ // know the PRNG's minimal output size, and casting to a larger vector
+ // is generally a bad idea for SIMD performance. The user can still
+ // implement it manually.
+
+ // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality
+ // perhaps `impl SampleUniform for $u_scalar`?
+ impl SampleUniform for $ty {
+ type Sampler = UniformInt<$ty>;
+ }
+
+ impl UniformSampler for UniformInt<$ty> {
+ type X = $ty;
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.lt(high).all(), "Uniform::new called with `low >= high`");
+ UniformSampler::new_inclusive(low, high - 1)
+ }
+
+ #[inline] // if the range is constant, this helps LLVM to do the
+ // calculations at compile-time.
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.le(high).all(),
+ "Uniform::new_inclusive called with `low > high`");
+ let unsigned_max = ::core::$u_scalar::MAX;
+
+ // NOTE: these may need to be replaced with explicitly
+ // wrapping operations if `packed_simd` changes
+ let range: $unsigned = ((high - low) + 1).cast();
+ // `% 0` will panic at runtime.
+ let not_full_range = range.gt($unsigned::splat(0));
+ // replacing 0 with `unsigned_max` allows a faster `select`
+ // with bitwise OR
+ let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max));
+ // wrapping addition
+ let ints_to_reject = (unsigned_max - range + 1) % modulo;
+ // When `range` is 0, `lo` of `v.wmul(range)` will always be
+ // zero which means only one sample is needed.
+ let zone = unsigned_max - ints_to_reject;
+
+ UniformInt {
+ low: low,
+ // These are really $unsigned values, but store as $ty:
+ range: range.cast(),
+ zone: zone.cast(),
+ }
+ }
+
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ let range: $unsigned = self.range.cast();
+ let zone: $unsigned = self.zone.cast();
+
+ // This might seem very slow, generating a whole new
+ // SIMD vector for every sample rejection. For most uses
+ // though, the chance of rejection is small and provides good
+ // general performance. With multiple lanes, that chance is
+ // multiplied. To mitigate this, we replace only the lanes of
+ // the vector which fail, iteratively reducing the chance of
+ // rejection. The replacement method does however add a little
+ // overhead. Benchmarking or calculating probabilities might
+ // reveal contexts where this replacement method is slower.
+ let mut v: $unsigned = rng.gen();
+ loop {
+ let (hi, lo) = v.wmul(range);
+ let mask = lo.le(zone);
+ if mask.all() {
+ let hi: $ty = hi.cast();
+ // wrapping addition
+ let result = self.low + hi;
+ // `select` here compiles to a blend operation
+ // When `range.eq(0).none()` the compare and blend
+ // operations are avoided.
+ let v: $ty = v.cast();
+ return range.gt($unsigned::splat(0)).select(result, v);
+ }
+ // Replace only the failing lanes
+ v = mask.select(v, rng.gen());
+ }
+ }
+ }
+ };
+
+ // bulk implementation
+ ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => {
+ $(
+ uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar);
+ uniform_simd_int_impl!($signed, $unsigned, $u_scalar);
+ )+
+ };
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u64x2, i64x2),
+ (u64x4, i64x4),
+ (u64x8, i64x8),
+ u64
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u32x2, i32x2),
+ (u32x4, i32x4),
+ (u32x8, i32x8),
+ (u32x16, i32x16),
+ u32
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u16x2, i16x2),
+ (u16x4, i16x4),
+ (u16x8, i16x8),
+ (u16x16, i16x16),
+ (u16x32, i16x32),
+ u16
+}
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+uniform_simd_int_impl! {
+ (u8x2, i8x2),
+ (u8x4, i8x4),
+ (u8x8, i8x8),
+ (u8x16, i8x16),
+ (u8x32, i8x32),
+ (u8x64, i8x64),
+ u8
+}
+
+
+/// The back-end implementing [`UniformSampler`] for floating-point types.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// # Implementation notes
+///
+/// Instead of generating a float in the `[0, 1)` range using [`Standard`], the
+/// `UniformFloat` implementation converts the output of an PRNG itself. This
+/// way one or two steps can be optimized out.
+///
+/// The floats are first converted to a value in the `[1, 2)` interval using a
+/// transmute-based method, and then mapped to the expected range with a
+/// multiply and addition. Values produced this way have what equals 22 bits of
+/// random digits for an `f32`, and 52 for an `f64`.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [`new`]: trait.UniformSampler.html#tymethod.new
+/// [`new_inclusive`]: trait.UniformSampler.html#tymethod.new_inclusive
+/// [`Uniform`]: struct.Uniform.html
+/// [`Standard`]: ../struct.Standard.html
+#[derive(Clone, Copy, Debug)]
+pub struct UniformFloat<X> {
+ low: X,
+ scale: X,
+}
+
+macro_rules! uniform_float_impl {
+ ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
+ impl SampleUniform for $ty {
+ type Sampler = UniformFloat<$ty>;
+ }
+
+ impl UniformSampler for UniformFloat<$ty> {
+ type X = $ty;
+
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.all_lt(high),
+ "Uniform::new called with `low >= high`");
+ assert!(low.all_finite() && high.all_finite(),
+ "Uniform::new called with non-finite boundaries");
+ let max_rand = <$ty>::splat((::core::$u_scalar::MAX >> $bits_to_discard)
+ .into_float_with_exponent(0) - 1.0);
+
+ let mut scale = high - low;
+
+ loop {
+ let mask = (scale * max_rand + low).ge_mask(high);
+ if mask.none() {
+ break;
+ }
+ scale = scale.decrease_masked(mask);
+ }
+
+ debug_assert!(<$ty>::splat(0.0).all_le(scale));
+
+ UniformFloat { low, scale }
+ }
+
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.all_le(high),
+ "Uniform::new_inclusive called with `low > high`");
+ assert!(low.all_finite() && high.all_finite(),
+ "Uniform::new_inclusive called with non-finite boundaries");
+ let max_rand = <$ty>::splat((::core::$u_scalar::MAX >> $bits_to_discard)
+ .into_float_with_exponent(0) - 1.0);
+
+ let mut scale = (high - low) / max_rand;
+
+ loop {
+ let mask = (scale * max_rand + low).gt_mask(high);
+ if mask.none() {
+ break;
+ }
+ scale = scale.decrease_masked(mask);
+ }
+
+ debug_assert!(<$ty>::splat(0.0).all_le(scale));
+
+ UniformFloat { low, scale }
+ }
+
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ // Generate a value in the range [1, 2)
+ let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard)
+ .into_float_with_exponent(0);
+
+ // Get a value in the range [0, 1) in order to avoid
+ // overflowing into infinity when multiplying with scale
+ let value0_1 = value1_2 - 1.0;
+
+ // We don't use `f64::mul_add`, because it is not available with
+ // `no_std`. Furthermore, it is slower for some targets (but
+ // faster for others). However, the order of multiplication and
+ // addition is important, because on some platforms (e.g. ARM)
+ // it will be optimized to a single (non-FMA) instruction.
+ value0_1 * self.scale + self.low
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R)
+ -> Self::X
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low.all_lt(high),
+ "Uniform::sample_single called with low >= high");
+ let mut scale = high - low;
+
+ loop {
+ // Generate a value in the range [1, 2)
+ let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard)
+ .into_float_with_exponent(0);
+
+ // Get a value in the range [0, 1) in order to avoid
+ // overflowing into infinity when multiplying with scale
+ let value0_1 = value1_2 - 1.0;
+
+ // Doing multiply before addition allows some architectures
+ // to use a single instruction.
+ let res = value0_1 * scale + low;
+
+ debug_assert!(low.all_le(res) || !scale.all_finite());
+ if res.all_lt(high) {
+ return res;
+ }
+
+ // This handles a number of edge cases.
+ // * `low` or `high` is NaN. In this case `scale` and
+ // `res` are going to end up as NaN.
+ // * `low` is negative infinity and `high` is finite.
+ // `scale` is going to be infinite and `res` will be
+ // NaN.
+ // * `high` is positive infinity and `low` is finite.
+ // `scale` is going to be infinite and `res` will
+ // be infinite or NaN (if value0_1 is 0).
+ // * `low` is negative infinity and `high` is positive
+ // infinity. `scale` will be infinite and `res` will
+ // be NaN.
+ // * `low` and `high` are finite, but `high - low`
+ // overflows to infinite. `scale` will be infinite
+ // and `res` will be infinite or NaN (if value0_1 is 0).
+ // So if `high` or `low` are non-finite, we are guaranteed
+ // to fail the `res < high` check above and end up here.
+ //
+ // While we technically should check for non-finite `low`
+ // and `high` before entering the loop, by doing the checks
+ // here instead, we allow the common case to avoid these
+ // checks. But we are still guaranteed that if `low` or
+ // `high` are non-finite we'll end up here and can do the
+ // appropriate checks.
+ //
+ // Likewise `high - low` overflowing to infinity is also
+ // rare, so handle it here after the common case.
+ let mask = !scale.finite_mask();
+ if mask.any() {
+ assert!(low.all_finite() && high.all_finite(),
+ "Uniform::sample_single called with non-finite boundaries");
+ scale = scale.decrease_masked(mask);
+ }
+ }
+ }
+ }
+ }
+}
+
+uniform_float_impl! { f32, u32, f32, u32, 32 - 23 }
+uniform_float_impl! { f64, u64, f64, u64, 64 - 52 }
+
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 }
+
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 }
+#[cfg(feature="simd_support")]
+uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 }
+
+
+
+/// The back-end implementing [`UniformSampler`] for `Duration`.
+///
+/// Unless you are implementing [`UniformSampler`] for your own types, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// [`UniformSampler`]: trait.UniformSampler.html
+/// [`Uniform`]: struct.Uniform.html
+#[cfg(any(feature = "std", rust_1_25))]
+#[derive(Clone, Copy, Debug)]
+pub struct UniformDuration {
+ mode: UniformDurationMode,
+ offset: u32,
+}
+
+#[cfg(any(feature = "std", rust_1_25))]
+#[derive(Debug, Copy, Clone)]
+enum UniformDurationMode {
+ Small {
+ secs: u64,
+ nanos: Uniform<u32>,
+ },
+ Medium {
+ nanos: Uniform<u64>,
+ },
+ Large {
+ max_secs: u64,
+ max_nanos: u32,
+ secs: Uniform<u64>,
+ }
+}
+
+#[cfg(any(feature = "std", rust_1_25))]
+impl SampleUniform for Duration {
+ type Sampler = UniformDuration;
+}
+
+#[cfg(any(feature = "std", rust_1_25))]
+impl UniformSampler for UniformDuration {
+ type X = Duration;
+
+ #[inline]
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low < high, "Uniform::new called with `low >= high`");
+ UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
+ }
+
+ #[inline]
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ let low = *low_b.borrow();
+ let high = *high_b.borrow();
+ assert!(low <= high, "Uniform::new_inclusive called with `low > high`");
+
+ let low_s = low.as_secs();
+ let low_n = low.subsec_nanos();
+ let mut high_s = high.as_secs();
+ let mut high_n = high.subsec_nanos();
+
+ if high_n < low_n {
+ high_s = high_s - 1;
+ high_n = high_n + 1_000_000_000;
+ }
+
+ let mode = if low_s == high_s {
+ UniformDurationMode::Small {
+ secs: low_s,
+ nanos: Uniform::new_inclusive(low_n, high_n),
+ }
+ } else {
+ let max = high_s
+ .checked_mul(1_000_000_000)
+ .and_then(|n| n.checked_add(high_n as u64));
+
+ if let Some(higher_bound) = max {
+ let lower_bound = low_s * 1_000_000_000 + low_n as u64;
+ UniformDurationMode::Medium {
+ nanos: Uniform::new_inclusive(lower_bound, higher_bound),
+ }
+ } else {
+ // An offset is applied to simplify generation of nanoseconds
+ let max_nanos = high_n - low_n;
+ UniformDurationMode::Large {
+ max_secs: high_s,
+ max_nanos,
+ secs: Uniform::new_inclusive(low_s, high_s),
+ }
+ }
+ };
+ UniformDuration {
+ mode,
+ offset: low_n,
+ }
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
+ match self.mode {
+ UniformDurationMode::Small { secs, nanos } => {
+ let n = nanos.sample(rng);
+ Duration::new(secs, n)
+ }
+ UniformDurationMode::Medium { nanos } => {
+ let nanos = nanos.sample(rng);
+ Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
+ }
+ UniformDurationMode::Large { max_secs, max_nanos, secs } => {
+ // constant folding means this is at least as fast as `gen_range`
+ let nano_range = Uniform::new(0, 1_000_000_000);
+ loop {
+ let s = secs.sample(rng);
+ let n = nano_range.sample(rng);
+ if !(s == max_secs && n > max_nanos) {
+ let sum = n + self.offset;
+ break Duration::new(s, sum);
+ }
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use Rng;
+ use rngs::mock::StepRng;
+ use distributions::uniform::Uniform;
+ use distributions::utils::FloatAsSIMD;
+ #[cfg(feature="simd_support")] use packed_simd::*;
+
+ #[should_panic]
+ #[test]
+ fn test_uniform_bad_limits_equal_int() {
+ Uniform::new(10, 10);
+ }
+
+ #[test]
+ fn test_uniform_good_limits_equal_int() {
+ let mut rng = ::test::rng(804);
+ let dist = Uniform::new_inclusive(10, 10);
+ for _ in 0..20 {
+ assert_eq!(rng.sample(dist), 10);
+ }
+ }
+
+ #[should_panic]
+ #[test]
+ fn test_uniform_bad_limits_flipped_int() {
+ Uniform::new(10, 5);
+ }
+
+ #[test]
+ fn test_integers() {
+ use core::{i8, i16, i32, i64, isize};
+ use core::{u8, u16, u32, u64, usize};
+ #[cfg(rust_1_26)]
+ use core::{i128, u128};
+
+ let mut rng = ::test::rng(251);
+ macro_rules! t {
+ ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
+ for &(low, high) in $v.iter() {
+ let my_uniform = Uniform::new(low, high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $lt(v, high));
+ }
+
+ let my_uniform = Uniform::new_inclusive(low, high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $le(v, high));
+ }
+
+ let my_uniform = Uniform::new(&low, high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $lt(v, high));
+ }
+
+ let my_uniform = Uniform::new_inclusive(&low, &high);
+ for _ in 0..1000 {
+ let v: $ty = rng.sample(my_uniform);
+ assert!($le(low, v) && $le(v, high));
+ }
+
+ for _ in 0..1000 {
+ let v: $ty = rng.gen_range(low, high);
+ assert!($le(low, v) && $lt(v, high));
+ }
+ }
+ }};
+
+ // scalar bulk
+ ($($ty:ident),*) => {{
+ $(t!(
+ $ty,
+ [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
+ |x, y| x <= y,
+ |x, y| x < y
+ );)*
+ }};
+
+ // simd bulk
+ ($($ty:ident),* => $scalar:ident) => {{
+ $(t!(
+ $ty,
+ [
+ ($ty::splat(0), $ty::splat(10)),
+ ($ty::splat(10), $ty::splat(127)),
+ ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
+ ],
+ |x: $ty, y| x.le(y).all(),
+ |x: $ty, y| x.lt(y).all()
+ );)*
+ }};
+ }
+ t!(i8, i16, i32, i64, isize,
+ u8, u16, u32, u64, usize);
+ #[cfg(rust_1_26)]
+ t!(i128, u128);
+
+ #[cfg(all(feature = "simd_support", feature = "nightly"))]
+ {
+ t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
+ t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
+ t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
+ t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
+ t!(u32x2, u32x4, u32x8, u32x16 => u32);
+ t!(i32x2, i32x4, i32x8, i32x16 => i32);
+ t!(u64x2, u64x4, u64x8 => u64);
+ t!(i64x2, i64x4, i64x8 => i64);
+ }
+ }
+
+ #[test]
+ fn test_floats() {
+ let mut rng = ::test::rng(252);
+ let mut zero_rng = StepRng::new(0, 0);
+ let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0);
+ macro_rules! t {
+ ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
+ let v: &[($f_scalar, $f_scalar)]=
+ &[(0.0, 100.0),
+ (-1e35, -1e25),
+ (1e-35, 1e-25),
+ (-1e35, 1e35),
+ (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
+ (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
+ (-<$f_scalar>::from_bits(5), 0.0),
+ (-<$f_scalar>::from_bits(7), -0.0),
+ (10.0, ::core::$f_scalar::MAX),
+ (-100.0, ::core::$f_scalar::MAX),
+ (-::core::$f_scalar::MAX / 5.0, ::core::$f_scalar::MAX),
+ (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX / 5.0),
+ (-::core::$f_scalar::MAX * 0.8, ::core::$f_scalar::MAX * 0.7),
+ (-::core::$f_scalar::MAX, ::core::$f_scalar::MAX),
+ ];
+ for &(low_scalar, high_scalar) in v.iter() {
+ for lane in 0..<$ty>::lanes() {
+ let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
+ let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
+ let my_uniform = Uniform::new(low, high);
+ let my_incl_uniform = Uniform::new_inclusive(low, high);
+ for _ in 0..100 {
+ let v = rng.sample(my_uniform).extract(lane);
+ assert!(low_scalar <= v && v < high_scalar);
+ let v = rng.sample(my_incl_uniform).extract(lane);
+ assert!(low_scalar <= v && v <= high_scalar);
+ let v = rng.gen_range(low, high).extract(lane);
+ assert!(low_scalar <= v && v < high_scalar);
+ }
+
+ assert_eq!(rng.sample(Uniform::new_inclusive(low, low)).extract(lane), low_scalar);
+
+ assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar);
+ assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar);
+ assert_eq!(zero_rng.gen_range(low, high).extract(lane), low_scalar);
+ assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar);
+ assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar);
+
+ // Don't run this test for really tiny differences between high and low
+ // since for those rounding might result in selecting high for a very
+ // long time.
+ if (high_scalar - low_scalar) > 0.0001 {
+ let mut lowering_max_rng =
+ StepRng::new(0xffff_ffff_ffff_ffff,
+ (-1i64 << $bits_shifted) as u64);
+ assert!(lowering_max_rng.gen_range(low, high).extract(lane) < high_scalar);
+ }
+ }
+ }
+
+ assert_eq!(rng.sample(Uniform::new_inclusive(::core::$f_scalar::MAX,
+ ::core::$f_scalar::MAX)),
+ ::core::$f_scalar::MAX);
+ assert_eq!(rng.sample(Uniform::new_inclusive(-::core::$f_scalar::MAX,
+ -::core::$f_scalar::MAX)),
+ -::core::$f_scalar::MAX);
+ }}
+ }
+
+ t!(f32, f32, 32 - 23);
+ t!(f64, f64, 64 - 52);
+ #[cfg(feature="simd_support")]
+ {
+ t!(f32x2, f32, 32 - 23);
+ t!(f32x4, f32, 32 - 23);
+ t!(f32x8, f32, 32 - 23);
+ t!(f32x16, f32, 32 - 23);
+ t!(f64x2, f64, 64 - 52);
+ t!(f64x4, f64, 64 - 52);
+ t!(f64x8, f64, 64 - 52);
+ }
+ }
+
+ #[test]
+ #[cfg(all(feature="std",
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs")))]
+ fn test_float_assertions() {
+ use std::panic::catch_unwind;
+ use super::SampleUniform;
+ fn range<T: SampleUniform>(low: T, high: T) {
+ let mut rng = ::test::rng(253);
+ rng.gen_range(low, high);
+ }
+
+ macro_rules! t {
+ ($ty:ident, $f_scalar:ident) => {{
+ let v: &[($f_scalar, $f_scalar)] =
+ &[(::std::$f_scalar::NAN, 0.0),
+ (1.0, ::std::$f_scalar::NAN),
+ (::std::$f_scalar::NAN, ::std::$f_scalar::NAN),
+ (1.0, 0.5),
+ (::std::$f_scalar::MAX, -::std::$f_scalar::MAX),
+ (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY),
+ (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NEG_INFINITY),
+ (::std::$f_scalar::NEG_INFINITY, 5.0),
+ (5.0, ::std::$f_scalar::INFINITY),
+ (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY),
+ (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN),
+ (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY),
+ ];
+ for &(low_scalar, high_scalar) in v.iter() {
+ for lane in 0..<$ty>::lanes() {
+ let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
+ let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
+ assert!(catch_unwind(|| range(low, high)).is_err());
+ assert!(catch_unwind(|| Uniform::new(low, high)).is_err());
+ assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err());
+ assert!(catch_unwind(|| range(low, low)).is_err());
+ assert!(catch_unwind(|| Uniform::new(low, low)).is_err());
+ }
+ }
+ }}
+ }
+
+ t!(f32, f32);
+ t!(f64, f64);
+ #[cfg(feature="simd_support")]
+ {
+ t!(f32x2, f32);
+ t!(f32x4, f32);
+ t!(f32x8, f32);
+ t!(f32x16, f32);
+ t!(f64x2, f64);
+ t!(f64x4, f64);
+ t!(f64x8, f64);
+ }
+ }
+
+
+ #[test]
+ #[cfg(any(feature = "std", rust_1_25))]
+ fn test_durations() {
+ #[cfg(feature = "std")]
+ use std::time::Duration;
+ #[cfg(all(not(feature = "std"), rust_1_25))]
+ use core::time::Duration;
+
+ let mut rng = ::test::rng(253);
+
+ let v = &[(Duration::new(10, 50000), Duration::new(100, 1234)),
+ (Duration::new(0, 100), Duration::new(1, 50)),
+ (Duration::new(0, 0), Duration::new(u64::max_value(), 999_999_999))];
+ for &(low, high) in v.iter() {
+ let my_uniform = Uniform::new(low, high);
+ for _ in 0..1000 {
+ let v = rng.sample(my_uniform);
+ assert!(low <= v && v < high);
+ }
+ }
+ }
+
+ #[test]
+ fn test_custom_uniform() {
+ use distributions::uniform::{UniformSampler, UniformFloat, SampleUniform, SampleBorrow};
+ #[derive(Clone, Copy, PartialEq, PartialOrd)]
+ struct MyF32 {
+ x: f32,
+ }
+ #[derive(Clone, Copy, Debug)]
+ struct UniformMyF32 {
+ inner: UniformFloat<f32>,
+ }
+ impl UniformSampler for UniformMyF32 {
+ type X = MyF32;
+ fn new<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ UniformMyF32 {
+ inner: UniformFloat::<f32>::new(low.borrow().x, high.borrow().x),
+ }
+ }
+ fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+ where B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized
+ {
+ UniformSampler::new(low, high)
+ }
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ MyF32 { x: self.inner.sample(rng) }
+ }
+ }
+ impl SampleUniform for MyF32 {
+ type Sampler = UniformMyF32;
+ }
+
+ let (low, high) = (MyF32{ x: 17.0f32 }, MyF32{ x: 22.0f32 });
+ let uniform = Uniform::new(low, high);
+ let mut rng = ::test::rng(804);
+ for _ in 0..100 {
+ let x: MyF32 = rng.sample(uniform);
+ assert!(low <= x && x < high);
+ }
+ }
+
+ #[test]
+ fn test_uniform_from_std_range() {
+ let r = Uniform::from(2u32..7);
+ assert_eq!(r.inner.low, 2);
+ assert_eq!(r.inner.range, 5);
+ let r = Uniform::from(2.0f64..7.0);
+ assert_eq!(r.inner.low, 2.0);
+ assert_eq!(r.inner.scale, 5.0);
+ }
+
+ #[cfg(rust_1_27)]
+ #[test]
+ fn test_uniform_from_std_range_inclusive() {
+ let r = Uniform::from(2u32..=6);
+ assert_eq!(r.inner.low, 2);
+ assert_eq!(r.inner.range, 5);
+ let r = Uniform::from(2.0f64..=7.0);
+ assert_eq!(r.inner.low, 2.0);
+ assert!(r.inner.scale > 5.0);
+ assert!(r.inner.scale < 5.0 + 1e-14);
+ }
+}
diff --git a/rand/src/distributions/unit_circle.rs b/rand/src/distributions/unit_circle.rs
new file mode 100644
index 0000000..abb36dc
--- /dev/null
+++ b/rand/src/distributions/unit_circle.rs
@@ -0,0 +1,102 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Rng;
+use distributions::{Distribution, Uniform};
+
+/// Samples uniformly from the edge of the unit circle in two dimensions.
+///
+/// Implemented via a method by von Neumann[^1].
+///
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{UnitCircle, Distribution};
+///
+/// let circle = UnitCircle::new();
+/// let v = circle.sample(&mut rand::thread_rng());
+/// println!("{:?} is from the unit circle.", v)
+/// ```
+///
+/// [^1]: von Neumann, J. (1951) [*Various Techniques Used in Connection with
+/// Random Digits.*](https://mcnp.lanl.gov/pdf_files/nbs_vonneumann.pdf)
+/// NBS Appl. Math. Ser., No. 12. Washington, DC: U.S. Government Printing
+/// Office, pp. 36-38.
+#[derive(Clone, Copy, Debug)]
+pub struct UnitCircle {
+ uniform: Uniform<f64>,
+}
+
+impl UnitCircle {
+ /// Construct a new `UnitCircle` distribution.
+ #[inline]
+ pub fn new() -> UnitCircle {
+ UnitCircle { uniform: Uniform::new(-1., 1.) }
+ }
+}
+
+impl Distribution<[f64; 2]> for UnitCircle {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [f64; 2] {
+ let mut x1;
+ let mut x2;
+ let mut sum;
+ loop {
+ x1 = self.uniform.sample(rng);
+ x2 = self.uniform.sample(rng);
+ sum = x1*x1 + x2*x2;
+ if sum < 1. {
+ break;
+ }
+ }
+ let diff = x1*x1 - x2*x2;
+ [diff / sum, 2.*x1*x2 / sum]
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::UnitCircle;
+
+ /// Assert that two numbers are almost equal to each other.
+ ///
+ /// On panic, this macro will print the values of the expressions with their
+ /// debug representations.
+ macro_rules! assert_almost_eq {
+ ($a:expr, $b:expr, $prec:expr) => (
+ let diff = ($a - $b).abs();
+ if diff > $prec {
+ panic!(format!(
+ "assertion failed: `abs(left - right) = {:.1e} < {:e}`, \
+ (left: `{}`, right: `{}`)",
+ diff, $prec, $a, $b));
+ }
+ );
+ }
+
+ #[test]
+ fn norm() {
+ let mut rng = ::test::rng(1);
+ let dist = UnitCircle::new();
+ for _ in 0..1000 {
+ let x = dist.sample(&mut rng);
+ assert_almost_eq!(x[0]*x[0] + x[1]*x[1], 1., 1e-15);
+ }
+ }
+
+ #[test]
+ fn value_stability() {
+ let mut rng = ::test::rng(2);
+ let dist = UnitCircle::new();
+ assert_eq!(dist.sample(&mut rng), [-0.8032118336637037, 0.5956935036263119]);
+ assert_eq!(dist.sample(&mut rng), [-0.4742919588505423, -0.880367615130018]);
+ assert_eq!(dist.sample(&mut rng), [0.9297328981467168, 0.368234623716601]);
+ }
+}
diff --git a/rand/src/distributions/unit_sphere.rs b/rand/src/distributions/unit_sphere.rs
new file mode 100644
index 0000000..61cbda5
--- /dev/null
+++ b/rand/src/distributions/unit_sphere.rs
@@ -0,0 +1,100 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Rng;
+use distributions::{Distribution, Uniform};
+
+/// Samples uniformly from the surface of the unit sphere in three dimensions.
+///
+/// Implemented via a method by Marsaglia[^1].
+///
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{UnitSphereSurface, Distribution};
+///
+/// let sphere = UnitSphereSurface::new();
+/// let v = sphere.sample(&mut rand::thread_rng());
+/// println!("{:?} is from the unit sphere surface.", v)
+/// ```
+///
+/// [^1]: Marsaglia, George (1972). [*Choosing a Point from the Surface of a
+/// Sphere.*](https://doi.org/10.1214/aoms/1177692644)
+/// Ann. Math. Statist. 43, no. 2, 645--646.
+#[derive(Clone, Copy, Debug)]
+pub struct UnitSphereSurface {
+ uniform: Uniform<f64>,
+}
+
+impl UnitSphereSurface {
+ /// Construct a new `UnitSphereSurface` distribution.
+ #[inline]
+ pub fn new() -> UnitSphereSurface {
+ UnitSphereSurface { uniform: Uniform::new(-1., 1.) }
+ }
+}
+
+impl Distribution<[f64; 3]> for UnitSphereSurface {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [f64; 3] {
+ loop {
+ let (x1, x2) = (self.uniform.sample(rng), self.uniform.sample(rng));
+ let sum = x1*x1 + x2*x2;
+ if sum >= 1. {
+ continue;
+ }
+ let factor = 2. * (1.0_f64 - sum).sqrt();
+ return [x1 * factor, x2 * factor, 1. - 2.*sum];
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::UnitSphereSurface;
+
+ /// Assert that two numbers are almost equal to each other.
+ ///
+ /// On panic, this macro will print the values of the expressions with their
+ /// debug representations.
+ macro_rules! assert_almost_eq {
+ ($a:expr, $b:expr, $prec:expr) => (
+ let diff = ($a - $b).abs();
+ if diff > $prec {
+ panic!(format!(
+ "assertion failed: `abs(left - right) = {:.1e} < {:e}`, \
+ (left: `{}`, right: `{}`)",
+ diff, $prec, $a, $b));
+ }
+ );
+ }
+
+ #[test]
+ fn norm() {
+ let mut rng = ::test::rng(1);
+ let dist = UnitSphereSurface::new();
+ for _ in 0..1000 {
+ let x = dist.sample(&mut rng);
+ assert_almost_eq!(x[0]*x[0] + x[1]*x[1] + x[2]*x[2], 1., 1e-15);
+ }
+ }
+
+ #[test]
+ fn value_stability() {
+ let mut rng = ::test::rng(2);
+ let dist = UnitSphereSurface::new();
+ assert_eq!(dist.sample(&mut rng),
+ [-0.24950027180862533, -0.7552572587896719, 0.6060825747478084]);
+ assert_eq!(dist.sample(&mut rng),
+ [0.47604534507233487, -0.797200864987207, -0.3712837328763685]);
+ assert_eq!(dist.sample(&mut rng),
+ [0.9795722330927367, 0.18692349236651176, 0.07414747571708524]);
+ }
+}
diff --git a/rand/src/distributions/utils.rs b/rand/src/distributions/utils.rs
new file mode 100644
index 0000000..a2112fd
--- /dev/null
+++ b/rand/src/distributions/utils.rs
@@ -0,0 +1,504 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Math helper functions
+
+#[cfg(feature="simd_support")]
+use packed_simd::*;
+#[cfg(feature="std")]
+use distributions::ziggurat_tables;
+#[cfg(feature="std")]
+use Rng;
+
+
+pub trait WideningMultiply<RHS = Self> {
+ type Output;
+
+ fn wmul(self, x: RHS) -> Self::Output;
+}
+
+macro_rules! wmul_impl {
+ ($ty:ty, $wide:ty, $shift:expr) => {
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, x: $ty) -> Self::Output {
+ let tmp = (self as $wide) * (x as $wide);
+ ((tmp >> $shift) as $ty, tmp as $ty)
+ }
+ }
+ };
+
+ // simd bulk implementation
+ ($(($ty:ident, $wide:ident),)+, $shift:expr) => {
+ $(
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, x: $ty) -> Self::Output {
+ // For supported vectors, this should compile to a couple
+ // supported multiply & swizzle instructions (no actual
+ // casting).
+ // TODO: optimize
+ let y: $wide = self.cast();
+ let x: $wide = x.cast();
+ let tmp = y * x;
+ let hi: $ty = (tmp >> $shift).cast();
+ let lo: $ty = tmp.cast();
+ (hi, lo)
+ }
+ }
+ )+
+ };
+}
+wmul_impl! { u8, u16, 8 }
+wmul_impl! { u16, u32, 16 }
+wmul_impl! { u32, u64, 32 }
+#[cfg(rust_1_26)]
+wmul_impl! { u64, u128, 64 }
+
+// This code is a translation of the __mulddi3 function in LLVM's
+// compiler-rt. It is an optimised variant of the common method
+// `(a + b) * (c + d) = ac + ad + bc + bd`.
+//
+// For some reason LLVM can optimise the C version very well, but
+// keeps shuffling registers in this Rust translation.
+macro_rules! wmul_impl_large {
+ ($ty:ty, $half:expr) => {
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, b: $ty) -> Self::Output {
+ const LOWER_MASK: $ty = !0 >> $half;
+ let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK);
+ let mut t = low >> $half;
+ low &= LOWER_MASK;
+ t += (self >> $half).wrapping_mul(b & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ let mut high = t >> $half;
+ t = low >> $half;
+ low &= LOWER_MASK;
+ t += (b >> $half).wrapping_mul(self & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ high += t >> $half;
+ high += (self >> $half).wrapping_mul(b >> $half);
+
+ (high, low)
+ }
+ }
+ };
+
+ // simd bulk implementation
+ (($($ty:ty,)+) $scalar:ty, $half:expr) => {
+ $(
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, b: $ty) -> Self::Output {
+ // needs wrapping multiplication
+ const LOWER_MASK: $scalar = !0 >> $half;
+ let mut low = (self & LOWER_MASK) * (b & LOWER_MASK);
+ let mut t = low >> $half;
+ low &= LOWER_MASK;
+ t += (self >> $half) * (b & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ let mut high = t >> $half;
+ t = low >> $half;
+ low &= LOWER_MASK;
+ t += (b >> $half) * (self & LOWER_MASK);
+ low += (t & LOWER_MASK) << $half;
+ high += t >> $half;
+ high += (self >> $half) * (b >> $half);
+
+ (high, low)
+ }
+ }
+ )+
+ };
+}
+#[cfg(not(rust_1_26))]
+wmul_impl_large! { u64, 32 }
+#[cfg(rust_1_26)]
+wmul_impl_large! { u128, 64 }
+
+macro_rules! wmul_impl_usize {
+ ($ty:ty) => {
+ impl WideningMultiply for usize {
+ type Output = (usize, usize);
+
+ #[inline(always)]
+ fn wmul(self, x: usize) -> Self::Output {
+ let (high, low) = (self as $ty).wmul(x as $ty);
+ (high as usize, low as usize)
+ }
+ }
+ }
+}
+#[cfg(target_pointer_width = "32")]
+wmul_impl_usize! { u32 }
+#[cfg(target_pointer_width = "64")]
+wmul_impl_usize! { u64 }
+
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+mod simd_wmul {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ use super::*;
+
+ wmul_impl! {
+ (u8x2, u16x2),
+ (u8x4, u16x4),
+ (u8x8, u16x8),
+ (u8x16, u16x16),
+ (u8x32, u16x32),,
+ 8
+ }
+
+ wmul_impl! { (u16x2, u32x2),, 16 }
+ #[cfg(not(target_feature = "sse2"))]
+ wmul_impl! { (u16x4, u32x4),, 16 }
+ #[cfg(not(target_feature = "sse4.2"))]
+ wmul_impl! { (u16x8, u32x8),, 16 }
+ #[cfg(not(target_feature = "avx2"))]
+ wmul_impl! { (u16x16, u32x16),, 16 }
+
+ // 16-bit lane widths allow use of the x86 `mulhi` instructions, which
+ // means `wmul` can be implemented with only two instructions.
+ #[allow(unused_macros)]
+ macro_rules! wmul_impl_16 {
+ ($ty:ident, $intrinsic:ident, $mulhi:ident, $mullo:ident) => {
+ impl WideningMultiply for $ty {
+ type Output = ($ty, $ty);
+
+ #[inline(always)]
+ fn wmul(self, x: $ty) -> Self::Output {
+ let b = $intrinsic::from_bits(x);
+ let a = $intrinsic::from_bits(self);
+ let hi = $ty::from_bits(unsafe { $mulhi(a, b) });
+ let lo = $ty::from_bits(unsafe { $mullo(a, b) });
+ (hi, lo)
+ }
+ }
+ };
+ }
+
+ #[cfg(target_feature = "sse2")]
+ wmul_impl_16! { u16x4, __m64, _mm_mulhi_pu16, _mm_mullo_pi16 }
+ #[cfg(target_feature = "sse4.2")]
+ wmul_impl_16! { u16x8, __m128i, _mm_mulhi_epu16, _mm_mullo_epi16 }
+ #[cfg(target_feature = "avx2")]
+ wmul_impl_16! { u16x16, __m256i, _mm256_mulhi_epu16, _mm256_mullo_epi16 }
+ // FIXME: there are no `__m512i` types in stdsimd yet, so `wmul::<u16x32>`
+ // cannot use the same implementation.
+
+ wmul_impl! {
+ (u32x2, u64x2),
+ (u32x4, u64x4),
+ (u32x8, u64x8),,
+ 32
+ }
+
+ // TODO: optimize, this seems to seriously slow things down
+ wmul_impl_large! { (u8x64,) u8, 4 }
+ wmul_impl_large! { (u16x32,) u16, 8 }
+ wmul_impl_large! { (u32x16,) u32, 16 }
+ wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 }
+}
+#[cfg(all(feature = "simd_support", feature = "nightly"))]
+pub use self::simd_wmul::*;
+
+
+/// Helper trait when dealing with scalar and SIMD floating point types.
+pub(crate) trait FloatSIMDUtils {
+ // `PartialOrd` for vectors compares lexicographically. We want to compare all
+ // the individual SIMD lanes instead, and get the combined result over all
+ // lanes. This is possible using something like `a.lt(b).all()`, but we
+ // implement it as a trait so we can write the same code for `f32` and `f64`.
+ // Only the comparison functions we need are implemented.
+ fn all_lt(self, other: Self) -> bool;
+ fn all_le(self, other: Self) -> bool;
+ fn all_finite(self) -> bool;
+
+ type Mask;
+ fn finite_mask(self) -> Self::Mask;
+ fn gt_mask(self, other: Self) -> Self::Mask;
+ fn ge_mask(self, other: Self) -> Self::Mask;
+
+ // Decrease all lanes where the mask is `true` to the next lower value
+ // representable by the floating-point type. At least one of the lanes
+ // must be set.
+ fn decrease_masked(self, mask: Self::Mask) -> Self;
+
+ // Convert from int value. Conversion is done while retaining the numerical
+ // value, not by retaining the binary representation.
+ type UInt;
+ fn cast_from_int(i: Self::UInt) -> Self;
+}
+
+/// Implement functions available in std builds but missing from core primitives
+#[cfg(not(std))]
+pub(crate) trait Float : Sized {
+ type Bits;
+
+ fn is_nan(self) -> bool;
+ fn is_infinite(self) -> bool;
+ fn is_finite(self) -> bool;
+ fn to_bits(self) -> Self::Bits;
+ fn from_bits(v: Self::Bits) -> Self;
+}
+
+/// Implement functions on f32/f64 to give them APIs similar to SIMD types
+pub(crate) trait FloatAsSIMD : Sized {
+ #[inline(always)]
+ fn lanes() -> usize { 1 }
+ #[inline(always)]
+ fn splat(scalar: Self) -> Self { scalar }
+ #[inline(always)]
+ fn extract(self, index: usize) -> Self { debug_assert_eq!(index, 0); self }
+ #[inline(always)]
+ fn replace(self, index: usize, new_value: Self) -> Self { debug_assert_eq!(index, 0); new_value }
+}
+
+pub(crate) trait BoolAsSIMD : Sized {
+ fn any(self) -> bool;
+ fn all(self) -> bool;
+ fn none(self) -> bool;
+}
+
+impl BoolAsSIMD for bool {
+ #[inline(always)]
+ fn any(self) -> bool { self }
+ #[inline(always)]
+ fn all(self) -> bool { self }
+ #[inline(always)]
+ fn none(self) -> bool { !self }
+}
+
+macro_rules! scalar_float_impl {
+ ($ty:ident, $uty:ident) => {
+ #[cfg(not(std))]
+ impl Float for $ty {
+ type Bits = $uty;
+
+ #[inline]
+ fn is_nan(self) -> bool {
+ self != self
+ }
+
+ #[inline]
+ fn is_infinite(self) -> bool {
+ self == ::core::$ty::INFINITY || self == ::core::$ty::NEG_INFINITY
+ }
+
+ #[inline]
+ fn is_finite(self) -> bool {
+ !(self.is_nan() || self.is_infinite())
+ }
+
+ #[inline]
+ fn to_bits(self) -> Self::Bits {
+ unsafe { ::core::mem::transmute(self) }
+ }
+
+ #[inline]
+ fn from_bits(v: Self::Bits) -> Self {
+ // It turns out the safety issues with sNaN were overblown! Hooray!
+ unsafe { ::core::mem::transmute(v) }
+ }
+ }
+
+ impl FloatSIMDUtils for $ty {
+ type Mask = bool;
+ #[inline(always)]
+ fn all_lt(self, other: Self) -> bool { self < other }
+ #[inline(always)]
+ fn all_le(self, other: Self) -> bool { self <= other }
+ #[inline(always)]
+ fn all_finite(self) -> bool { self.is_finite() }
+ #[inline(always)]
+ fn finite_mask(self) -> Self::Mask { self.is_finite() }
+ #[inline(always)]
+ fn gt_mask(self, other: Self) -> Self::Mask { self > other }
+ #[inline(always)]
+ fn ge_mask(self, other: Self) -> Self::Mask { self >= other }
+ #[inline(always)]
+ fn decrease_masked(self, mask: Self::Mask) -> Self {
+ debug_assert!(mask, "At least one lane must be set");
+ <$ty>::from_bits(self.to_bits() - 1)
+ }
+ type UInt = $uty;
+ fn cast_from_int(i: Self::UInt) -> Self { i as $ty }
+ }
+
+ impl FloatAsSIMD for $ty {}
+ }
+}
+
+scalar_float_impl!(f32, u32);
+scalar_float_impl!(f64, u64);
+
+
+#[cfg(feature="simd_support")]
+macro_rules! simd_impl {
+ ($ty:ident, $f_scalar:ident, $mty:ident, $uty:ident) => {
+ impl FloatSIMDUtils for $ty {
+ type Mask = $mty;
+ #[inline(always)]
+ fn all_lt(self, other: Self) -> bool { self.lt(other).all() }
+ #[inline(always)]
+ fn all_le(self, other: Self) -> bool { self.le(other).all() }
+ #[inline(always)]
+ fn all_finite(self) -> bool { self.finite_mask().all() }
+ #[inline(always)]
+ fn finite_mask(self) -> Self::Mask {
+ // This can possibly be done faster by checking bit patterns
+ let neg_inf = $ty::splat(::core::$f_scalar::NEG_INFINITY);
+ let pos_inf = $ty::splat(::core::$f_scalar::INFINITY);
+ self.gt(neg_inf) & self.lt(pos_inf)
+ }
+ #[inline(always)]
+ fn gt_mask(self, other: Self) -> Self::Mask { self.gt(other) }
+ #[inline(always)]
+ fn ge_mask(self, other: Self) -> Self::Mask { self.ge(other) }
+ #[inline(always)]
+ fn decrease_masked(self, mask: Self::Mask) -> Self {
+ // Casting a mask into ints will produce all bits set for
+ // true, and 0 for false. Adding that to the binary
+ // representation of a float means subtracting one from
+ // the binary representation, resulting in the next lower
+ // value representable by $ty. This works even when the
+ // current value is infinity.
+ debug_assert!(mask.any(), "At least one lane must be set");
+ <$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask))
+ }
+ type UInt = $uty;
+ fn cast_from_int(i: Self::UInt) -> Self { i.cast() }
+ }
+ }
+}
+
+#[cfg(feature="simd_support")] simd_impl! { f32x2, f32, m32x2, u32x2 }
+#[cfg(feature="simd_support")] simd_impl! { f32x4, f32, m32x4, u32x4 }
+#[cfg(feature="simd_support")] simd_impl! { f32x8, f32, m32x8, u32x8 }
+#[cfg(feature="simd_support")] simd_impl! { f32x16, f32, m32x16, u32x16 }
+#[cfg(feature="simd_support")] simd_impl! { f64x2, f64, m64x2, u64x2 }
+#[cfg(feature="simd_support")] simd_impl! { f64x4, f64, m64x4, u64x4 }
+#[cfg(feature="simd_support")] simd_impl! { f64x8, f64, m64x8, u64x8 }
+
+/// Calculates ln(gamma(x)) (natural logarithm of the gamma
+/// function) using the Lanczos approximation.
+///
+/// The approximation expresses the gamma function as:
+/// `gamma(z+1) = sqrt(2*pi)*(z+g+0.5)^(z+0.5)*exp(-z-g-0.5)*Ag(z)`
+/// `g` is an arbitrary constant; we use the approximation with `g=5`.
+///
+/// Noting that `gamma(z+1) = z*gamma(z)` and applying `ln` to both sides:
+/// `ln(gamma(z)) = (z+0.5)*ln(z+g+0.5)-(z+g+0.5) + ln(sqrt(2*pi)*Ag(z)/z)`
+///
+/// `Ag(z)` is an infinite series with coefficients that can be calculated
+/// ahead of time - we use just the first 6 terms, which is good enough
+/// for most purposes.
+#[cfg(feature="std")]
+pub fn log_gamma(x: f64) -> f64 {
+ // precalculated 6 coefficients for the first 6 terms of the series
+ let coefficients: [f64; 6] = [
+ 76.18009172947146,
+ -86.50532032941677,
+ 24.01409824083091,
+ -1.231739572450155,
+ 0.1208650973866179e-2,
+ -0.5395239384953e-5,
+ ];
+
+ // (x+0.5)*ln(x+g+0.5)-(x+g+0.5)
+ let tmp = x + 5.5;
+ let log = (x + 0.5) * tmp.ln() - tmp;
+
+ // the first few terms of the series for Ag(x)
+ let mut a = 1.000000000190015;
+ let mut denom = x;
+ for coeff in &coefficients {
+ denom += 1.0;
+ a += coeff / denom;
+ }
+
+ // get everything together
+ // a is Ag(x)
+ // 2.5066... is sqrt(2pi)
+ log + (2.5066282746310005 * a / x).ln()
+}
+
+/// Sample a random number using the Ziggurat method (specifically the
+/// ZIGNOR variant from Doornik 2005). Most of the arguments are
+/// directly from the paper:
+///
+/// * `rng`: source of randomness
+/// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0.
+/// * `X`: the $x_i$ abscissae.
+/// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$)
+/// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$
+/// * `pdf`: the probability density function
+/// * `zero_case`: manual sampling from the tail when we chose the
+/// bottom box (i.e. i == 0)
+
+// the perf improvement (25-50%) is definitely worth the extra code
+// size from force-inlining.
+#[cfg(feature="std")]
+#[inline(always)]
+pub fn ziggurat<R: Rng + ?Sized, P, Z>(
+ rng: &mut R,
+ symmetric: bool,
+ x_tab: ziggurat_tables::ZigTable,
+ f_tab: ziggurat_tables::ZigTable,
+ mut pdf: P,
+ mut zero_case: Z)
+ -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64 {
+ use distributions::float::IntoFloat;
+ loop {
+ // As an optimisation we re-implement the conversion to a f64.
+ // From the remaining 12 most significant bits we use 8 to construct `i`.
+ // This saves us generating a whole extra random number, while the added
+ // precision of using 64 bits for f64 does not buy us much.
+ let bits = rng.next_u64();
+ let i = bits as usize & 0xff;
+
+ let u = if symmetric {
+ // Convert to a value in the range [2,4) and substract to get [-1,1)
+ // We can't convert to an open range directly, that would require
+ // substracting `3.0 - EPSILON`, which is not representable.
+ // It is possible with an extra step, but an open range does not
+ // seem neccesary for the ziggurat algorithm anyway.
+ (bits >> 12).into_float_with_exponent(1) - 3.0
+ } else {
+ // Convert to a value in the range [1,2) and substract to get (0,1)
+ (bits >> 12).into_float_with_exponent(0)
+ - (1.0 - ::core::f64::EPSILON / 2.0)
+ };
+ let x = u * x_tab[i];
+
+ let test_x = if symmetric { x.abs() } else {x};
+
+ // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i])
+ if test_x < x_tab[i + 1] {
+ return x;
+ }
+ if i == 0 {
+ return zero_case(rng, u);
+ }
+ // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1
+ if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) {
+ return x;
+ }
+ }
+}
diff --git a/rand/src/distributions/weibull.rs b/rand/src/distributions/weibull.rs
new file mode 100644
index 0000000..5fbe10a
--- /dev/null
+++ b/rand/src/distributions/weibull.rs
@@ -0,0 +1,71 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Weibull distribution.
+
+use Rng;
+use distributions::{Distribution, OpenClosed01};
+
+/// Samples floating-point numbers according to the Weibull distribution
+///
+/// # Example
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Weibull;
+///
+/// let val: f64 = SmallRng::from_entropy().sample(Weibull::new(1., 10.));
+/// println!("{}", val);
+/// ```
+#[derive(Clone, Copy, Debug)]
+pub struct Weibull {
+ inv_shape: f64,
+ scale: f64,
+}
+
+impl Weibull {
+ /// Construct a new `Weibull` distribution with given `scale` and `shape`.
+ ///
+ /// # Panics
+ ///
+ /// `scale` and `shape` have to be non-zero and positive.
+ pub fn new(scale: f64, shape: f64) -> Weibull {
+ assert!((scale > 0.) & (shape > 0.));
+ Weibull { inv_shape: 1./shape, scale }
+ }
+}
+
+impl Distribution<f64> for Weibull {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
+ let x: f64 = rng.sample(OpenClosed01);
+ self.scale * (-x.ln()).powf(self.inv_shape)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use distributions::Distribution;
+ use super::Weibull;
+
+ #[test]
+ #[should_panic]
+ fn invalid() {
+ Weibull::new(0., 0.);
+ }
+
+ #[test]
+ fn sample() {
+ let scale = 1.0;
+ let shape = 2.0;
+ let d = Weibull::new(scale, shape);
+ let mut rng = ::test::rng(1);
+ for _ in 0..1000 {
+ let r = d.sample(&mut rng);
+ assert!(r >= 0.);
+ }
+ }
+}
diff --git a/rand/src/distributions/weighted.rs b/rand/src/distributions/weighted.rs
new file mode 100644
index 0000000..01c8fe6
--- /dev/null
+++ b/rand/src/distributions/weighted.rs
@@ -0,0 +1,232 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Rng;
+use distributions::Distribution;
+use distributions::uniform::{UniformSampler, SampleUniform, SampleBorrow};
+use ::core::cmp::PartialOrd;
+use core::fmt;
+
+// Note that this whole module is only imported if feature="alloc" is enabled.
+#[cfg(not(feature="std"))] use alloc::vec::Vec;
+
+/// A distribution using weighted sampling to pick a discretely selected
+/// item.
+///
+/// Sampling a `WeightedIndex` distribution returns the index of a randomly
+/// selected element from the iterator used when the `WeightedIndex` was
+/// created. The chance of a given element being picked is proportional to the
+/// value of the element. The weights can use any type `X` for which an
+/// implementation of [`Uniform<X>`] exists.
+///
+/// # Performance
+///
+/// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its
+/// size is the sum of the size of those objects, possibly plus some alignment.
+///
+/// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1`
+/// weights of type `X`, where `N` is the number of weights. However, since
+/// `Vec` doesn't guarantee a particular growth strategy, additional memory
+/// might be allocated but not used. Since the `WeightedIndex` object also
+/// contains, this might cause additional allocations, though for primitive
+/// types, ['Uniform<X>`] doesn't allocate any memory.
+///
+/// Time complexity of sampling from `WeightedIndex` is `O(log N)` where
+/// `N` is the number of weights.
+///
+/// Sampling from `WeightedIndex` will result in a single call to
+/// [`Uniform<X>::sample`], which typically will request a single value from
+/// the underlying [`RngCore`], though the exact number depends on the
+/// implementaiton of [`Uniform<X>::sample`].
+///
+/// # Example
+///
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::WeightedIndex;
+///
+/// let choices = ['a', 'b', 'c'];
+/// let weights = [2, 1, 1];
+/// let dist = WeightedIndex::new(&weights).unwrap();
+/// let mut rng = thread_rng();
+/// for _ in 0..100 {
+/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
+/// println!("{}", choices[dist.sample(&mut rng)]);
+/// }
+///
+/// let items = [('a', 0), ('b', 3), ('c', 7)];
+/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap();
+/// for _ in 0..100 {
+/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c'
+/// println!("{}", items[dist2.sample(&mut rng)].0);
+/// }
+/// ```
+///
+/// [`Uniform<X>`]: struct.Uniform.html
+/// [`Uniform<X>::sample`]: struct.Uniform.html#method.sample
+/// [`RngCore`]: ../trait.RngCore.html
+#[derive(Debug, Clone)]
+pub struct WeightedIndex<X: SampleUniform + PartialOrd> {
+ cumulative_weights: Vec<X>,
+ weight_distribution: X::Sampler,
+}
+
+impl<X: SampleUniform + PartialOrd> WeightedIndex<X> {
+ /// Creates a new a `WeightedIndex` [`Distribution`] using the values
+ /// in `weights`. The weights can use any type `X` for which an
+ /// implementation of [`Uniform<X>`] exists.
+ ///
+ /// Returns an error if the iterator is empty, if any weight is `< 0`, or
+ /// if its total value is 0.
+ ///
+ /// [`Distribution`]: trait.Distribution.html
+ /// [`Uniform<X>`]: struct.Uniform.html
+ pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError>
+ where I: IntoIterator,
+ I::Item: SampleBorrow<X>,
+ X: for<'a> ::core::ops::AddAssign<&'a X> +
+ Clone +
+ Default {
+ let mut iter = weights.into_iter();
+ let mut total_weight: X = iter.next()
+ .ok_or(WeightedError::NoItem)?
+ .borrow()
+ .clone();
+
+ let zero = <X as Default>::default();
+ if total_weight < zero {
+ return Err(WeightedError::NegativeWeight);
+ }
+
+ let mut weights = Vec::<X>::with_capacity(iter.size_hint().0);
+ for w in iter {
+ if *w.borrow() < zero {
+ return Err(WeightedError::NegativeWeight);
+ }
+ weights.push(total_weight.clone());
+ total_weight += w.borrow();
+ }
+
+ if total_weight == zero {
+ return Err(WeightedError::AllWeightsZero);
+ }
+ let distr = X::Sampler::new(zero, total_weight);
+
+ Ok(WeightedIndex { cumulative_weights: weights, weight_distribution: distr })
+ }
+}
+
+impl<X> Distribution<usize> for WeightedIndex<X> where
+ X: SampleUniform + PartialOrd {
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+ use ::core::cmp::Ordering;
+ let chosen_weight = self.weight_distribution.sample(rng);
+ // Find the first item which has a weight *higher* than the chosen weight.
+ self.cumulative_weights.binary_search_by(
+ |w| if *w <= chosen_weight { Ordering::Less } else { Ordering::Greater }).unwrap_err()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_weightedindex() {
+ let mut r = ::test::rng(700);
+ const N_REPS: u32 = 5000;
+ let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
+ let total_weight = weights.iter().sum::<u32>() as f32;
+
+ let verify = |result: [i32; 14]| {
+ for (i, count) in result.iter().enumerate() {
+ let exp = (weights[i] * N_REPS) as f32 / total_weight;
+ let mut err = (*count as f32 - exp).abs();
+ if err != 0.0 {
+ err /= exp;
+ }
+ assert!(err <= 0.25);
+ }
+ };
+
+ // WeightedIndex from vec
+ let mut chosen = [0i32; 14];
+ let distr = WeightedIndex::new(weights.to_vec()).unwrap();
+ for _ in 0..N_REPS {
+ chosen[distr.sample(&mut r)] += 1;
+ }
+ verify(chosen);
+
+ // WeightedIndex from slice
+ chosen = [0i32; 14];
+ let distr = WeightedIndex::new(&weights[..]).unwrap();
+ for _ in 0..N_REPS {
+ chosen[distr.sample(&mut r)] += 1;
+ }
+ verify(chosen);
+
+ // WeightedIndex from iterator
+ chosen = [0i32; 14];
+ let distr = WeightedIndex::new(weights.iter()).unwrap();
+ for _ in 0..N_REPS {
+ chosen[distr.sample(&mut r)] += 1;
+ }
+ verify(chosen);
+
+ for _ in 0..5 {
+ assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1);
+ assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0);
+ assert_eq!(WeightedIndex::new(&[0, 0, 0, 0, 10, 0]).unwrap().sample(&mut r), 4);
+ }
+
+ assert_eq!(WeightedIndex::new(&[10][0..0]).unwrap_err(), WeightedError::NoItem);
+ assert_eq!(WeightedIndex::new(&[0]).unwrap_err(), WeightedError::AllWeightsZero);
+ assert_eq!(WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), WeightedError::NegativeWeight);
+ assert_eq!(WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), WeightedError::NegativeWeight);
+ assert_eq!(WeightedIndex::new(&[-10]).unwrap_err(), WeightedError::NegativeWeight);
+ }
+}
+
+/// Error type returned from `WeightedIndex::new`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum WeightedError {
+ /// The provided iterator contained no items.
+ NoItem,
+
+ /// A weight lower than zero was used.
+ NegativeWeight,
+
+ /// All items in the provided iterator had a weight of zero.
+ AllWeightsZero,
+}
+
+impl WeightedError {
+ fn msg(&self) -> &str {
+ match *self {
+ WeightedError::NoItem => "No items found",
+ WeightedError::NegativeWeight => "Item has negative weight",
+ WeightedError::AllWeightsZero => "All items had weight zero",
+ }
+ }
+}
+
+#[cfg(feature="std")]
+impl ::std::error::Error for WeightedError {
+ fn description(&self) -> &str {
+ self.msg()
+ }
+ fn cause(&self) -> Option<&::std::error::Error> {
+ None
+ }
+}
+
+impl fmt::Display for WeightedError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.msg())
+ }
+}
diff --git a/rand/src/distributions/ziggurat_tables.rs b/rand/src/distributions/ziggurat_tables.rs
index b6de4bf..ca1ce30 100644
--- a/rand/src/distributions/ziggurat_tables.rs
+++ b/rand/src/distributions/ziggurat_tables.rs
@@ -1,10 +1,9 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.