aboutsummaryrefslogtreecommitdiff
path: root/rand/rand_core/src/block.rs
diff options
context:
space:
mode:
Diffstat (limited to 'rand/rand_core/src/block.rs')
-rw-r--r--rand/rand_core/src/block.rs185
1 files changed, 57 insertions, 128 deletions
diff --git a/rand/rand_core/src/block.rs b/rand/rand_core/src/block.rs
index de480e4..0ab7458 100644
--- a/rand/rand_core/src/block.rs
+++ b/rand/rand_core/src/block.rs
@@ -16,55 +16,55 @@
//! implementations only need to concern themselves with generation of the
//! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where
//! the optimal implementations are not trivial), and this allows
-//! [`ReseedingRng`] perform periodic reseeding with very low overhead.
+//! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic
+//! reseeding with very low overhead.
//!
//! # Example
-//!
+//!
//! ```norun
//! use rand_core::block::{BlockRngCore, BlockRng};
-//!
+//!
//! struct MyRngCore;
-//!
+//!
//! impl BlockRngCore for MyRngCore {
//! type Results = [u32; 16];
-//!
+//!
//! fn generate(&mut self, results: &mut Self::Results) {
//! unimplemented!()
//! }
//! }
-//!
+//!
//! impl SeedableRng for MyRngCore {
//! type Seed = unimplemented!();
//! fn from_seed(seed: Self::Seed) -> Self {
//! unimplemented!()
//! }
//! }
-//!
+//!
//! // optionally, also implement CryptoRng for MyRngCore
-//!
+//!
//! // Final RNG.
//! type MyRng = BlockRng<u32, MyRngCore>;
//! ```
-//!
-//! [`BlockRngCore`]: trait.BlockRngCore.html
-//! [`RngCore`]: ../trait.RngCore.html
-//! [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes
-//! [`ReseedingRng`]: ../../rand/rngs/adapter/struct.ReseedingRng.html
+//!
+//! [`BlockRngCore`]: crate::block::BlockRngCore
+//! [`fill_bytes`]: RngCore::fill_bytes
use core::convert::AsRef;
-use core::fmt;
-use {RngCore, CryptoRng, SeedableRng, Error};
-use impls::{fill_via_u32_chunks, fill_via_u64_chunks};
+use core::{fmt, ptr};
+#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
+use crate::{RngCore, CryptoRng, SeedableRng, Error};
+use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks};
/// A trait for RNGs which do not generate random numbers individually, but in
/// blocks (typically `[u32; N]`). This technique is commonly used by
/// cryptographic RNGs to improve performance.
-///
-/// See the [module documentation](index.html) for details.
+///
+/// See the [module][crate::block] documentation for details.
pub trait BlockRngCore {
/// Results element type, e.g. `u32`.
type Item;
-
+
/// Results type. This is the 'block' an RNG implementing `BlockRngCore`
/// generates, which will usually be an array like `[u32; 16]`.
type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default;
@@ -105,15 +105,10 @@ pub trait BlockRngCore {
///
/// For easy initialization `BlockRng` also implements [`SeedableRng`].
///
-/// [`BlockRngCore`]: BlockRngCore.t.html
-/// [`BlockRngCore::generate`]: trait.BlockRngCore.html#tymethod.generate
-/// [`BlockRng64`]: struct.BlockRng64.html
-/// [`RngCore`]: ../RngCore.t.html
-/// [`next_u32`]: ../trait.RngCore.html#tymethod.next_u32
-/// [`next_u64`]: ../trait.RngCore.html#tymethod.next_u64
-/// [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes
-/// [`try_fill_bytes`]: ../trait.RngCore.html#tymethod.try_fill_bytes
-/// [`SeedableRng`]: ../SeedableRng.t.html
+/// [`next_u32`]: RngCore::next_u32
+/// [`next_u64`]: RngCore::next_u64
+/// [`fill_bytes`]: RngCore::fill_bytes
+/// [`try_fill_bytes`]: RngCore::try_fill_bytes
#[derive(Clone)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct BlockRng<R: BlockRngCore + ?Sized> {
@@ -137,6 +132,7 @@ impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> {
impl<R: BlockRngCore> BlockRng<R> {
/// Create a new `BlockRng` from an existing RNG implementing
/// `BlockRngCore`. Results will be generated on first use.
+ #[inline]
pub fn new(core: R) -> BlockRng<R>{
let results_empty = R::Results::default();
BlockRng {
@@ -147,22 +143,25 @@ impl<R: BlockRngCore> BlockRng<R> {
}
/// Get the index into the result buffer.
- ///
+ ///
/// If this is equal to or larger than the size of the result buffer then
/// the buffer is "empty" and `generate()` must be called to produce new
/// results.
+ #[inline(always)]
pub fn index(&self) -> usize {
self.index
}
/// Reset the number of available results.
/// This will force a new set of results to be generated on next use.
+ #[inline]
pub fn reset(&mut self) {
self.index = self.results.as_ref().len();
}
/// Generate a new set of results immediately, setting the index to the
/// given value.
+ #[inline]
pub fn generate_and_set(&mut self, index: usize) {
assert!(index < self.results.as_ref().len());
self.core.generate(&mut self.results);
@@ -173,7 +172,7 @@ impl<R: BlockRngCore> BlockRng<R> {
impl<R: BlockRngCore<Item=u32>> RngCore for BlockRng<R>
where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
{
- #[inline(always)]
+ #[inline]
fn next_u32(&mut self) -> u32 {
if self.index >= self.results.as_ref().len() {
self.generate_and_set(0);
@@ -184,12 +183,14 @@ where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
value
}
- #[inline(always)]
+ #[inline]
fn next_u64(&mut self) -> u64 {
let read_u64 = |results: &[u32], index| {
- if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
- // requires little-endian CPU supporting unaligned reads:
- unsafe { *(&results[index] as *const u32 as *const u64) }
+ if cfg!(any(target_endian = "little")) {
+ // requires little-endian CPU
+ #[allow(clippy::cast_ptr_alignment)] // false positive
+ let ptr: *const u64 = results[index..=index+1].as_ptr() as *const u64;
+ unsafe { ptr::read_unaligned(ptr) }
} else {
let x = u64::from(results[index]);
let y = u64::from(results[index + 1]);
@@ -215,48 +216,7 @@ where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
}
}
- // As an optimization we try to write directly into the output buffer.
- // This is only enabled for little-endian platforms where unaligned writes
- // are known to be safe and fast.
- #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
- fn fill_bytes(&mut self, dest: &mut [u8]) {
- let mut filled = 0;
-
- // Continue filling from the current set of results
- if self.index < self.results.as_ref().len() {
- let (consumed_u32, filled_u8) =
- fill_via_u32_chunks(&self.results.as_ref()[self.index..],
- dest);
-
- self.index += consumed_u32;
- filled += filled_u8;
- }
-
- let len_remainder =
- (dest.len() - filled) % (self.results.as_ref().len() * 4);
- let end_direct = dest.len() - len_remainder;
-
- while filled < end_direct {
- let dest_u32: &mut R::Results = unsafe {
- &mut *(dest[filled..].as_mut_ptr() as
- *mut <R as BlockRngCore>::Results)
- };
- self.core.generate(dest_u32);
- filled += self.results.as_ref().len() * 4;
- self.index = self.results.as_ref().len();
- }
-
- if len_remainder > 0 {
- self.core.generate(&mut self.results);
- let (consumed_u32, _) =
- fill_via_u32_chunks(self.results.as_ref(),
- &mut dest[filled..]);
-
- self.index = consumed_u32;
- }
- }
-
- #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+ #[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
let mut read_len = 0;
while read_len < dest.len() {
@@ -272,6 +232,7 @@ where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
}
}
+ #[inline(always)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
@@ -281,14 +242,17 @@ where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
type Seed = R::Seed;
+ #[inline(always)]
fn from_seed(seed: Self::Seed) -> Self {
Self::new(R::from_seed(seed))
}
+ #[inline(always)]
fn seed_from_u64(seed: u64) -> Self {
Self::new(R::seed_from_u64(seed))
}
+ #[inline(always)]
fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
Ok(Self::new(R::from_rng(rng)?))
}
@@ -314,13 +278,10 @@ impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
/// values. If the requested length is not a multiple of 8, some bytes will be
/// discarded.
///
-/// [`BlockRngCore`]: BlockRngCore.t.html
-/// [`RngCore`]: ../RngCore.t.html
-/// [`next_u32`]: ../trait.RngCore.html#tymethod.next_u32
-/// [`next_u64`]: ../trait.RngCore.html#tymethod.next_u64
-/// [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes
-/// [`try_fill_bytes`]: ../trait.RngCore.html#tymethod.try_fill_bytes
-/// [`BlockRng`]: struct.BlockRng.html
+/// [`next_u32`]: RngCore::next_u32
+/// [`next_u64`]: RngCore::next_u64
+/// [`fill_bytes`]: RngCore::fill_bytes
+/// [`try_fill_bytes`]: RngCore::try_fill_bytes
#[derive(Clone)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct BlockRng64<R: BlockRngCore + ?Sized> {
@@ -346,6 +307,7 @@ impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> {
impl<R: BlockRngCore> BlockRng64<R> {
/// Create a new `BlockRng` from an existing RNG implementing
/// `BlockRngCore`. Results will be generated on first use.
+ #[inline]
pub fn new(core: R) -> BlockRng64<R>{
let results_empty = R::Results::default();
BlockRng64 {
@@ -361,12 +323,14 @@ impl<R: BlockRngCore> BlockRng64<R> {
/// If this is equal to or larger than the size of the result buffer then
/// the buffer is "empty" and `generate()` must be called to produce new
/// results.
+ #[inline(always)]
pub fn index(&self) -> usize {
self.index
}
/// Reset the number of available results.
/// This will force a new set of results to be generated on next use.
+ #[inline]
pub fn reset(&mut self) {
self.index = self.results.as_ref().len();
self.half_used = false;
@@ -374,6 +338,7 @@ impl<R: BlockRngCore> BlockRng64<R> {
/// Generate a new set of results immediately, setting the index to the
/// given value.
+ #[inline]
pub fn generate_and_set(&mut self, index: usize) {
assert!(index < self.results.as_ref().len());
self.core.generate(&mut self.results);
@@ -385,7 +350,7 @@ impl<R: BlockRngCore> BlockRng64<R> {
impl<R: BlockRngCore<Item=u64>> RngCore for BlockRng64<R>
where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
{
- #[inline(always)]
+ #[inline]
fn next_u32(&mut self) -> u32 {
let mut index = self.index * 2 - self.half_used as usize;
if index >= self.results.as_ref().len() * 2 {
@@ -411,7 +376,7 @@ where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
}
}
- #[inline(always)]
+ #[inline]
fn next_u64(&mut self) -> u64 {
if self.index >= self.results.as_ref().len() {
self.core.generate(&mut self.results);
@@ -424,48 +389,7 @@ where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
value
}
- // As an optimization we try to write directly into the output buffer.
- // This is only enabled for little-endian platforms where unaligned writes
- // are known to be safe and fast.
- #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
- fn fill_bytes(&mut self, dest: &mut [u8]) {
- let mut filled = 0;
- self.half_used = false;
-
- // Continue filling from the current set of results
- if self.index < self.results.as_ref().len() {
- let (consumed_u64, filled_u8) =
- fill_via_u64_chunks(&self.results.as_ref()[self.index..],
- dest);
-
- self.index += consumed_u64;
- filled += filled_u8;
- }
-
- let len_remainder =
- (dest.len() - filled) % (self.results.as_ref().len() * 8);
- let end_direct = dest.len() - len_remainder;
-
- while filled < end_direct {
- let dest_u64: &mut R::Results = unsafe {
- ::core::mem::transmute(dest[filled..].as_mut_ptr())
- };
- self.core.generate(dest_u64);
- filled += self.results.as_ref().len() * 8;
- self.index = self.results.as_ref().len();
- }
-
- if len_remainder > 0 {
- self.core.generate(&mut self.results);
- let (consumed_u64, _) =
- fill_via_u64_chunks(&mut self.results.as_ref(),
- &mut dest[filled..]);
-
- self.index = consumed_u64;
- }
- }
-
- #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+ #[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
let mut read_len = 0;
self.half_used = false;
@@ -484,22 +408,27 @@ where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
}
}
+ #[inline(always)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
- Ok(self.fill_bytes(dest))
+ self.fill_bytes(dest);
+ Ok(())
}
}
impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
type Seed = R::Seed;
+ #[inline(always)]
fn from_seed(seed: Self::Seed) -> Self {
Self::new(R::from_seed(seed))
}
+ #[inline(always)]
fn seed_from_u64(seed: u64) -> Self {
Self::new(R::seed_from_u64(seed))
}
+ #[inline(always)]
fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
Ok(Self::new(R::from_rng(rng)?))
}