Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chacha20: adding 64-bit counter support #359

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
47 changes: 42 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

51 changes: 37 additions & 14 deletions chacha20/src/backends/avx2.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use crate::Rounds;
use crate::{Rounds, Variant};
use core::marker::PhantomData;

#[cfg(feature = "rng")]
use crate::{ChaChaCore, Variant};
use crate::ChaChaCore;

#[cfg(feature = "cipher")]
use crate::{
Expand Down Expand Up @@ -33,10 +33,11 @@ const N: usize = PAR_BLOCKS / 2;
#[inline]
#[target_feature(enable = "avx2")]
#[cfg(feature = "cipher")]
pub(crate) unsafe fn inner<R, F>(state: &mut [u32; STATE_WORDS], f: F)
pub(crate) unsafe fn inner<R, F, V>(state: &mut [u32; STATE_WORDS], f: F)
where
R: Rounds,
F: StreamClosure<BlockSize = U64>,
V: Variant
{
let state_ptr = state.as_ptr() as *const __m128i;
let v = [
Expand All @@ -45,21 +46,33 @@ where
_mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(2))),
];
let mut c = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(3)));
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 0));
if V::USES_U32_COUNTER {
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 0));
} else {
c = _mm256_add_epi64(c, _mm256_set_epi64x(0, 1, 0, 0));
}
let mut ctr = [c; N];
for i in 0..N {
ctr[i] = c;
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 2, 0, 0, 0, 2));
if V::USES_U32_COUNTER {
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 2, 0, 0, 0, 2));
} else {
c = _mm256_add_epi64(c, _mm256_set_epi64x(0, 2, 0, 2));
}
}
let mut backend = Backend::<R> {
let mut backend = Backend::<R, V> {
v,
ctr,
_pd: PhantomData,
_variant: PhantomData
};

f.call(&mut backend);

state[12] = _mm256_extract_epi32(backend.ctr[0], 0) as u32;
if !V::USES_U32_COUNTER {
state[13] = _mm256_extract_epi32(backend.ctr[0], 1) as u32;
}
}

#[inline]
Expand All @@ -83,41 +96,47 @@ where
ctr[i] = c;
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 2, 0, 0, 0, 2));
}
let mut backend = Backend::<R> {
let mut backend = Backend::<R, V> {
v,
ctr,
_pd: PhantomData,
_variant: PhantomData
};

backend.rng_gen_par_ks_blocks(buffer);

core.state[12] = _mm256_extract_epi32(backend.ctr[0], 0) as u32;
}

struct Backend<R: Rounds> {
struct Backend<R: Rounds, V: Variant> {
v: [__m256i; 3],
ctr: [__m256i; N],
_pd: PhantomData<R>,
_variant: PhantomData<V>
}

#[cfg(feature = "cipher")]
impl<R: Rounds> BlockSizeUser for Backend<R> {
impl<R: Rounds, V: Variant> BlockSizeUser for Backend<R, V> {
type BlockSize = U64;
}

#[cfg(feature = "cipher")]
impl<R: Rounds> ParBlocksSizeUser for Backend<R> {
impl<R: Rounds, V: Variant> ParBlocksSizeUser for Backend<R, V> {
type ParBlocksSize = U4;
}

#[cfg(feature = "cipher")]
impl<R: Rounds> StreamBackend for Backend<R> {
impl<R: Rounds, V: Variant> StreamBackend for Backend<R, V> {
#[inline(always)]
fn gen_ks_block(&mut self, block: &mut Block) {
unsafe {
let res = rounds::<R>(&self.v, &self.ctr);
for c in self.ctr.iter_mut() {
*c = _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 1));
if V::USES_U32_COUNTER {
*c = _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 1));
} else {
*c = _mm256_add_epi64(*c, _mm256_set_epi64x(0, 1, 0, 1));
}
}

let res0: [__m128i; 8] = core::mem::transmute(res[0]);
Expand All @@ -136,7 +155,11 @@ impl<R: Rounds> StreamBackend for Backend<R> {

let pb = PAR_BLOCKS as i32;
for c in self.ctr.iter_mut() {
*c = _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, pb, 0, 0, 0, pb));
if V::USES_U32_COUNTER {
*c = _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, pb, 0, 0, 0, pb));
} else {
*c = _mm256_add_epi64(*c, _mm256_set_epi64x(0, pb as i64, 0, pb as i64));
}
}

let mut block_ptr = blocks.as_mut_ptr() as *mut __m128i;
Expand All @@ -153,7 +176,7 @@ impl<R: Rounds> StreamBackend for Backend<R> {
}

#[cfg(feature = "rng")]
impl<R: Rounds> Backend<R> {
impl<R: Rounds, V: Variant> Backend<R, V> {
#[inline(always)]
fn rng_gen_par_ks_blocks(&mut self, blocks: &mut [u32; 64]) {
unsafe {
Expand Down
Loading
Loading