From fd091b04316db9dc5fafadbd6bdbe60b127408a9 Mon Sep 17 00:00:00 2001 From: Daniel Mueller Date: Thu, 2 Jan 2020 08:32:06 -0800 Subject: Update nitrokey crate to 0.4.0 This change finally updates the version of the nitrokey crate that we consume to 0.4.0. Along with that we update rand_core, one of its dependencies, to 0.5.1. Further more we add cfg-if in version 0.1.10 and getrandom in version 0.1.13, both of which are now new (non-development) dependencies. Import subrepo nitrokey/:nitrokey at e81057037e9b4f370b64c0a030a725bc6bdfb870 Import subrepo cfg-if/:cfg-if at 4484a6faf816ff8058088ad857b0c6bb2f4b02b2 Import subrepo getrandom/:getrandom at d661aa7e1b8cc80b47dabe3d2135b3b47d2858af Import subrepo rand/:rand at d877ed528248b52d947e0484364a4e1ae59ca502 --- rand/rand_core/src/block.rs | 185 ++++++++++++++------------------------------ 1 file changed, 57 insertions(+), 128 deletions(-) (limited to 'rand/rand_core/src/block.rs') diff --git a/rand/rand_core/src/block.rs b/rand/rand_core/src/block.rs index de480e4..0ab7458 100644 --- a/rand/rand_core/src/block.rs +++ b/rand/rand_core/src/block.rs @@ -16,55 +16,55 @@ //! implementations only need to concern themselves with generation of the //! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where //! the optimal implementations are not trivial), and this allows -//! [`ReseedingRng`] perform periodic reseeding with very low overhead. +//! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic +//! reseeding with very low overhead. //! //! # Example -//! +//! //! ```norun //! use rand_core::block::{BlockRngCore, BlockRng}; -//! +//! //! struct MyRngCore; -//! +//! //! impl BlockRngCore for MyRngCore { //! type Results = [u32; 16]; -//! +//! //! fn generate(&mut self, results: &mut Self::Results) { //! unimplemented!() //! } //! } -//! +//! //! impl SeedableRng for MyRngCore { //! type Seed = unimplemented!(); //! fn from_seed(seed: Self::Seed) -> Self { //! unimplemented!() //! } //! } -//! +//! //! // optionally, also implement CryptoRng for MyRngCore -//! +//! //! // Final RNG. //! type MyRng = BlockRng; //! ``` -//! -//! [`BlockRngCore`]: trait.BlockRngCore.html -//! [`RngCore`]: ../trait.RngCore.html -//! [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes -//! [`ReseedingRng`]: ../../rand/rngs/adapter/struct.ReseedingRng.html +//! +//! [`BlockRngCore`]: crate::block::BlockRngCore +//! [`fill_bytes`]: RngCore::fill_bytes use core::convert::AsRef; -use core::fmt; -use {RngCore, CryptoRng, SeedableRng, Error}; -use impls::{fill_via_u32_chunks, fill_via_u64_chunks}; +use core::{fmt, ptr}; +#[cfg(feature="serde1")] use serde::{Serialize, Deserialize}; +use crate::{RngCore, CryptoRng, SeedableRng, Error}; +use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks}; /// A trait for RNGs which do not generate random numbers individually, but in /// blocks (typically `[u32; N]`). This technique is commonly used by /// cryptographic RNGs to improve performance. -/// -/// See the [module documentation](index.html) for details. +/// +/// See the [module][crate::block] documentation for details. pub trait BlockRngCore { /// Results element type, e.g. `u32`. type Item; - + /// Results type. This is the 'block' an RNG implementing `BlockRngCore` /// generates, which will usually be an array like `[u32; 16]`. type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default; @@ -105,15 +105,10 @@ pub trait BlockRngCore { /// /// For easy initialization `BlockRng` also implements [`SeedableRng`]. /// -/// [`BlockRngCore`]: BlockRngCore.t.html -/// [`BlockRngCore::generate`]: trait.BlockRngCore.html#tymethod.generate -/// [`BlockRng64`]: struct.BlockRng64.html -/// [`RngCore`]: ../RngCore.t.html -/// [`next_u32`]: ../trait.RngCore.html#tymethod.next_u32 -/// [`next_u64`]: ../trait.RngCore.html#tymethod.next_u64 -/// [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes -/// [`try_fill_bytes`]: ../trait.RngCore.html#tymethod.try_fill_bytes -/// [`SeedableRng`]: ../SeedableRng.t.html +/// [`next_u32`]: RngCore::next_u32 +/// [`next_u64`]: RngCore::next_u64 +/// [`fill_bytes`]: RngCore::fill_bytes +/// [`try_fill_bytes`]: RngCore::try_fill_bytes #[derive(Clone)] #[cfg_attr(feature="serde1", derive(Serialize, Deserialize))] pub struct BlockRng { @@ -137,6 +132,7 @@ impl fmt::Debug for BlockRng { impl BlockRng { /// Create a new `BlockRng` from an existing RNG implementing /// `BlockRngCore`. Results will be generated on first use. + #[inline] pub fn new(core: R) -> BlockRng{ let results_empty = R::Results::default(); BlockRng { @@ -147,22 +143,25 @@ impl BlockRng { } /// Get the index into the result buffer. - /// + /// /// If this is equal to or larger than the size of the result buffer then /// the buffer is "empty" and `generate()` must be called to produce new /// results. + #[inline(always)] pub fn index(&self) -> usize { self.index } /// Reset the number of available results. /// This will force a new set of results to be generated on next use. + #[inline] pub fn reset(&mut self) { self.index = self.results.as_ref().len(); } /// Generate a new set of results immediately, setting the index to the /// given value. + #[inline] pub fn generate_and_set(&mut self, index: usize) { assert!(index < self.results.as_ref().len()); self.core.generate(&mut self.results); @@ -173,7 +172,7 @@ impl BlockRng { impl> RngCore for BlockRng where ::Results: AsRef<[u32]> + AsMut<[u32]> { - #[inline(always)] + #[inline] fn next_u32(&mut self) -> u32 { if self.index >= self.results.as_ref().len() { self.generate_and_set(0); @@ -184,12 +183,14 @@ where ::Results: AsRef<[u32]> + AsMut<[u32]> value } - #[inline(always)] + #[inline] fn next_u64(&mut self) -> u64 { let read_u64 = |results: &[u32], index| { - if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { - // requires little-endian CPU supporting unaligned reads: - unsafe { *(&results[index] as *const u32 as *const u64) } + if cfg!(any(target_endian = "little")) { + // requires little-endian CPU + #[allow(clippy::cast_ptr_alignment)] // false positive + let ptr: *const u64 = results[index..=index+1].as_ptr() as *const u64; + unsafe { ptr::read_unaligned(ptr) } } else { let x = u64::from(results[index]); let y = u64::from(results[index + 1]); @@ -215,48 +216,7 @@ where ::Results: AsRef<[u32]> + AsMut<[u32]> } } - // As an optimization we try to write directly into the output buffer. - // This is only enabled for little-endian platforms where unaligned writes - // are known to be safe and fast. - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - fn fill_bytes(&mut self, dest: &mut [u8]) { - let mut filled = 0; - - // Continue filling from the current set of results - if self.index < self.results.as_ref().len() { - let (consumed_u32, filled_u8) = - fill_via_u32_chunks(&self.results.as_ref()[self.index..], - dest); - - self.index += consumed_u32; - filled += filled_u8; - } - - let len_remainder = - (dest.len() - filled) % (self.results.as_ref().len() * 4); - let end_direct = dest.len() - len_remainder; - - while filled < end_direct { - let dest_u32: &mut R::Results = unsafe { - &mut *(dest[filled..].as_mut_ptr() as - *mut ::Results) - }; - self.core.generate(dest_u32); - filled += self.results.as_ref().len() * 4; - self.index = self.results.as_ref().len(); - } - - if len_remainder > 0 { - self.core.generate(&mut self.results); - let (consumed_u32, _) = - fill_via_u32_chunks(self.results.as_ref(), - &mut dest[filled..]); - - self.index = consumed_u32; - } - } - - #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + #[inline] fn fill_bytes(&mut self, dest: &mut [u8]) { let mut read_len = 0; while read_len < dest.len() { @@ -272,6 +232,7 @@ where ::Results: AsRef<[u32]> + AsMut<[u32]> } } + #[inline(always)] fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { self.fill_bytes(dest); Ok(()) @@ -281,14 +242,17 @@ where ::Results: AsRef<[u32]> + AsMut<[u32]> impl SeedableRng for BlockRng { type Seed = R::Seed; + #[inline(always)] fn from_seed(seed: Self::Seed) -> Self { Self::new(R::from_seed(seed)) } + #[inline(always)] fn seed_from_u64(seed: u64) -> Self { Self::new(R::seed_from_u64(seed)) } + #[inline(always)] fn from_rng(rng: S) -> Result { Ok(Self::new(R::from_rng(rng)?)) } @@ -314,13 +278,10 @@ impl SeedableRng for BlockRng { /// values. If the requested length is not a multiple of 8, some bytes will be /// discarded. /// -/// [`BlockRngCore`]: BlockRngCore.t.html -/// [`RngCore`]: ../RngCore.t.html -/// [`next_u32`]: ../trait.RngCore.html#tymethod.next_u32 -/// [`next_u64`]: ../trait.RngCore.html#tymethod.next_u64 -/// [`fill_bytes`]: ../trait.RngCore.html#tymethod.fill_bytes -/// [`try_fill_bytes`]: ../trait.RngCore.html#tymethod.try_fill_bytes -/// [`BlockRng`]: struct.BlockRng.html +/// [`next_u32`]: RngCore::next_u32 +/// [`next_u64`]: RngCore::next_u64 +/// [`fill_bytes`]: RngCore::fill_bytes +/// [`try_fill_bytes`]: RngCore::try_fill_bytes #[derive(Clone)] #[cfg_attr(feature="serde1", derive(Serialize, Deserialize))] pub struct BlockRng64 { @@ -346,6 +307,7 @@ impl fmt::Debug for BlockRng64 { impl BlockRng64 { /// Create a new `BlockRng` from an existing RNG implementing /// `BlockRngCore`. Results will be generated on first use. + #[inline] pub fn new(core: R) -> BlockRng64{ let results_empty = R::Results::default(); BlockRng64 { @@ -361,12 +323,14 @@ impl BlockRng64 { /// If this is equal to or larger than the size of the result buffer then /// the buffer is "empty" and `generate()` must be called to produce new /// results. + #[inline(always)] pub fn index(&self) -> usize { self.index } /// Reset the number of available results. /// This will force a new set of results to be generated on next use. + #[inline] pub fn reset(&mut self) { self.index = self.results.as_ref().len(); self.half_used = false; @@ -374,6 +338,7 @@ impl BlockRng64 { /// Generate a new set of results immediately, setting the index to the /// given value. + #[inline] pub fn generate_and_set(&mut self, index: usize) { assert!(index < self.results.as_ref().len()); self.core.generate(&mut self.results); @@ -385,7 +350,7 @@ impl BlockRng64 { impl> RngCore for BlockRng64 where ::Results: AsRef<[u64]> + AsMut<[u64]> { - #[inline(always)] + #[inline] fn next_u32(&mut self) -> u32 { let mut index = self.index * 2 - self.half_used as usize; if index >= self.results.as_ref().len() * 2 { @@ -411,7 +376,7 @@ where ::Results: AsRef<[u64]> + AsMut<[u64]> } } - #[inline(always)] + #[inline] fn next_u64(&mut self) -> u64 { if self.index >= self.results.as_ref().len() { self.core.generate(&mut self.results); @@ -424,48 +389,7 @@ where ::Results: AsRef<[u64]> + AsMut<[u64]> value } - // As an optimization we try to write directly into the output buffer. - // This is only enabled for little-endian platforms where unaligned writes - // are known to be safe and fast. - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - fn fill_bytes(&mut self, dest: &mut [u8]) { - let mut filled = 0; - self.half_used = false; - - // Continue filling from the current set of results - if self.index < self.results.as_ref().len() { - let (consumed_u64, filled_u8) = - fill_via_u64_chunks(&self.results.as_ref()[self.index..], - dest); - - self.index += consumed_u64; - filled += filled_u8; - } - - let len_remainder = - (dest.len() - filled) % (self.results.as_ref().len() * 8); - let end_direct = dest.len() - len_remainder; - - while filled < end_direct { - let dest_u64: &mut R::Results = unsafe { - ::core::mem::transmute(dest[filled..].as_mut_ptr()) - }; - self.core.generate(dest_u64); - filled += self.results.as_ref().len() * 8; - self.index = self.results.as_ref().len(); - } - - if len_remainder > 0 { - self.core.generate(&mut self.results); - let (consumed_u64, _) = - fill_via_u64_chunks(&mut self.results.as_ref(), - &mut dest[filled..]); - - self.index = consumed_u64; - } - } - - #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + #[inline] fn fill_bytes(&mut self, dest: &mut [u8]) { let mut read_len = 0; self.half_used = false; @@ -484,22 +408,27 @@ where ::Results: AsRef<[u64]> + AsMut<[u64]> } } + #[inline(always)] fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - Ok(self.fill_bytes(dest)) + self.fill_bytes(dest); + Ok(()) } } impl SeedableRng for BlockRng64 { type Seed = R::Seed; + #[inline(always)] fn from_seed(seed: Self::Seed) -> Self { Self::new(R::from_seed(seed)) } + #[inline(always)] fn seed_from_u64(seed: u64) -> Self { Self::new(R::seed_from_u64(seed)) } + #[inline(always)] fn from_rng(rng: S) -> Result { Ok(Self::new(R::from_rng(rng)?)) } -- cgit v1.2.1