Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 92 additions & 2 deletions k256/src/arithmetic/scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ mod wide;

pub(crate) use self::wide::WideScalar;

use crate::{FieldBytes, Secp256k1, ORDER};
use crate::{FieldBytes, NonZeroScalar, Secp256k1, ORDER};
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Shr, Sub, SubAssign};
use elliptic_curve::{
bigint::{limb, nlimbs, ArrayEncoding, Encoding, Limb, U256},
Expand All @@ -25,7 +25,11 @@ use elliptic_curve::{
use {crate::ScalarBits, elliptic_curve::group::ff::PrimeFieldBits};

#[cfg(feature = "digest")]
use ecdsa_core::{elliptic_curve::consts::U32, hazmat::FromDigest, signature::digest::Digest};
use ecdsa_core::{
elliptic_curve::consts::{U32, U64},
hazmat::FromDigest,
signature::digest::Digest,
};

#[cfg(test)]
use num_bigint::{BigUint, ToBigUint};
Expand Down Expand Up @@ -220,6 +224,72 @@ impl Scalar {
Self(U256::conditional_select(&w, &r, !underflow))
}

/// Parses the given byte array as a scalar.
///
/// Treats the byte array as a big-endian integer and reduces it by the curve order.
#[cfg(feature = "digest")]
fn from_wide_bytes_reduced(bytes: &[u8; 64]) -> Self {
let w = WideScalar::from_bytes(bytes);
w.reduce()
}

#[allow(dead_code)]
// TODO: this can be used instead of `generate_vartime_nonzero()`
fn generate_biased_nonzero(mut rng: impl CryptoRng + RngCore) -> NonZeroScalar {
let mut buf = [0u8; 64];
rng.fill_bytes(&mut buf);
WideScalar::from_bytes(&buf).reduce_nonzero()
}

fn generate_vartime_nonzero(mut rng: impl RngCore) -> NonZeroScalar {
let mut bytes = FieldBytes::default();

// TODO: pre-generate several scalars to bring the probability of non-constant-timeness down?
loop {
rng.fill_bytes(&mut bytes);
if let Some(scalar) = Scalar::from_repr(bytes).into() {
if let Some(nz_scalar) = NonZeroScalar::new(scalar).into() {
return nz_scalar;
}
}
}
}

// TODO: better located in the `Field` impl for `NonZeroScalar`.
/// Generates a non-zero scalar (in variable time).
pub fn random_nonzero(rng: impl RngCore) -> NonZeroScalar {
Self::generate_vartime_nonzero(rng)
}

// TODO: should `FromDigest` have this method instead?
/// Generates a scalar from a large enough digest to make the bias negligible.
#[cfg(feature = "digest")]
pub fn from_digest_safe<D>(digest: D) -> Self
where
D: Digest<OutputSize = U64>,
{
// TODO: A pretty awkward conversion. Is there an idiomatic way?
let garr = digest.finalize();
let mut arr = [0u8; 64];
arr[0..32].clone_from_slice(&garr[0..32]);
arr[32..64].clone_from_slice(&garr[32..64]);
Self::from_wide_bytes_reduced(&arr)
}

// TODO: should it be in `FromDigest`? Or a separate trait?
/// Generates a nonzero scalar from a large enough digest to make the bias negligible.
#[cfg(feature = "digest")]
pub fn from_digest_safe_nonzero<D>(digest: D) -> NonZeroScalar
where
D: Digest<OutputSize = U64>,
{
let garr = digest.finalize();
let mut arr = [0u8; 64];
arr[0..32].clone_from_slice(&garr[0..32]);
arr[32..64].clone_from_slice(&garr[32..64]);
WideScalar::from_bytes(&arr).reduce_nonzero()
}

/// Is this scalar greater than or equal to n / 2?
pub fn is_high(&self) -> Choice {
self.0.ct_gt(&FRAC_MODULUS_2)
Expand Down Expand Up @@ -709,6 +779,15 @@ mod tests {
assert_eq!((a - &a).is_zero().unwrap_u8(), 1);
}

#[test]
fn from_wide_bytes_reduced() {
let m = Scalar::modulus_as_biguint();
let b = [0xffu8; 64];
let s = Scalar::from_wide_bytes_reduced(&b);
let s_bu = s.to_biguint().unwrap();
assert!(s_bu < m);
}

prop_compose! {
fn scalar()(bytes in any::<[u8; 32]>()) -> Scalar {
let mut res = bytes_to_biguint(&bytes);
Expand Down Expand Up @@ -807,5 +886,16 @@ mod tests {
let m = Scalar::modulus_as_biguint();
assert_eq!((&inv_bi * &a_bi) % &m, 1.to_biguint().unwrap());
}

#[test]
fn fuzzy_from_wide_bytes_reduced(bytes_hi in any::<[u8; 32]>(), bytes_lo in any::<[u8; 32]>()) {
let m = Scalar::modulus_as_biguint();
let mut bytes = [0u8; 64];
bytes[0..32].clone_from_slice(&bytes_hi);
bytes[32..64].clone_from_slice(&bytes_lo);
let s = Scalar::from_wide_bytes_reduced(&bytes);
let s_bu = s.to_biguint().unwrap();
assert!(s_bu < m);
}
}
}
50 changes: 33 additions & 17 deletions k256/src/arithmetic/scalar/wide32.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
//! Wide scalar (32-bit limbs)

use super::{Scalar, MODULUS};
use crate::ORDER;
use crate::{NonZeroScalar, ORDER};
use elliptic_curve::{
bigint::{Limb, U256, U512},
group::ff::Field,
subtle::{Choice, ConditionallySelectable},
};

Expand Down Expand Up @@ -230,8 +231,13 @@ impl WideScalar {
Scalar::conditional_select(&res, &res.add(&Scalar::ONE), Choice::from(c as u8))
}

#[inline(always)] // only used in Scalar::mul(), so won't cause binary bloat
pub(super) fn reduce(&self) -> Scalar {
pub(super) fn reduce_impl(&self, modulus_minus_one: bool) -> Scalar {
let neg_modulus0 = if modulus_minus_one {
NEG_MODULUS[0] + 1
} else {
NEG_MODULUS[0]
};

let w = self.0.to_uint_array();
let n0 = w[8];
let n1 = w[9];
Expand All @@ -249,46 +255,46 @@ impl WideScalar {
let c0 = w[0];
let c1 = 0;
let c2 = 0;
let (c0, c1) = muladd_fast(n0, NEG_MODULUS[0], c0, c1);
let (c0, c1) = muladd_fast(n0, neg_modulus0, c0, c1);
let (m0, c0, c1) = (c0, c1, 0);
let (c0, c1) = sumadd_fast(w[1], c0, c1);
let (c0, c1, c2) = muladd(n1, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n1, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n0, NEG_MODULUS[1], c0, c1, c2);
let (m1, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n2, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n2, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n1, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(n0, NEG_MODULUS[2], c0, c1, c2);
let (m2, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[3], c0, c1, c2);
let (c0, c1, c2) = muladd(n3, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n3, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n2, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(n1, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n0, NEG_MODULUS[3], c0, c1, c2);
let (m3, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[4], c0, c1, c2);
let (c0, c1, c2) = muladd(n4, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n4, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n3, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(n2, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n1, NEG_MODULUS[3], c0, c1, c2);
let (c0, c1, c2) = sumadd(n0, c0, c1, c2);
let (m4, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[5], c0, c1, c2);
let (c0, c1, c2) = muladd(n5, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n5, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n4, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(n3, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n2, NEG_MODULUS[3], c0, c1, c2);
let (c0, c1, c2) = sumadd(n1, c0, c1, c2);
let (m5, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[6], c0, c1, c2);
let (c0, c1, c2) = muladd(n6, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n6, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n5, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(n4, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n3, NEG_MODULUS[3], c0, c1, c2);
let (c0, c1, c2) = sumadd(n2, c0, c1, c2);
let (m6, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[7], c0, c1, c2);
let (c0, c1, c2) = muladd(n7, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n7, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n6, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(n5, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n4, NEG_MODULUS[3], c0, c1, c2);
Expand Down Expand Up @@ -316,25 +322,25 @@ impl WideScalar {
let c0 = m0;
let c1 = 0;
let c2 = 0;
let (c0, c1) = muladd_fast(m8, NEG_MODULUS[0], c0, c1);
let (c0, c1) = muladd_fast(m8, neg_modulus0, c0, c1);
let (p0, c0, c1) = (c0, c1, 0);
let (c0, c1) = sumadd_fast(m1, c0, c1);
let (c0, c1, c2) = muladd(m9, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(m9, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(m8, NEG_MODULUS[1], c0, c1, c2);
let (p1, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(m2, c0, c1, c2);
let (c0, c1, c2) = muladd(m10, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(m10, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(m9, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(m8, NEG_MODULUS[2], c0, c1, c2);
let (p2, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(m3, c0, c1, c2);
let (c0, c1, c2) = muladd(m11, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(m11, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(m10, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(m9, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(m8, NEG_MODULUS[3], c0, c1, c2);
let (p3, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(m4, c0, c1, c2);
let (c0, c1, c2) = muladd(m12, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(m12, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(m11, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = muladd(m10, NEG_MODULUS[2], c0, c1, c2);
let (c0, c1, c2) = muladd(m9, NEG_MODULUS[3], c0, c1, c2);
Expand All @@ -360,7 +366,7 @@ impl WideScalar {

// Reduce 258 bits into 256.
// r[0..7] = p[0..7] + p[8] * NEG_MODULUS.
let mut c = p0 as u64 + (NEG_MODULUS[0] as u64) * (p8 as u64);
let mut c = p0 as u64 + (neg_modulus0 as u64) * (p8 as u64);
let r0 = (c & 0xFFFFFFFFu64) as u32;
c >>= 32;
c += p1 as u64 + (NEG_MODULUS[1] as u64) * (p8 as u64);
Expand Down Expand Up @@ -392,6 +398,16 @@ impl WideScalar {
let underflow = Choice::from((underflow.0 >> 31) as u8);
Scalar(U256::conditional_select(&r, &r2, !underflow | high_bit))
}

#[inline(always)] // only used in Scalar::mul(), so won't cause binary bloat
pub(super) fn reduce(&self) -> Scalar {
self.reduce_impl(false)
}

pub(super) fn reduce_nonzero(&self) -> NonZeroScalar {
let s = self.reduce_impl(true);
NonZeroScalar::new(s + Scalar::one()).unwrap()
}
}

/// Constant-time comparison.
Expand Down
44 changes: 30 additions & 14 deletions k256/src/arithmetic/scalar/wide64.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
//! Wide scalar (64-bit limbs)

use super::{Scalar, MODULUS};
use crate::ORDER;
use crate::{NonZeroScalar, ORDER};
use elliptic_curve::{
bigint::{Limb, U256, U512},
group::ff::Field,
subtle::{Choice, ConditionallySelectable},
};

Expand Down Expand Up @@ -118,32 +119,37 @@ impl WideScalar {
Scalar::conditional_select(&res, &res.add(&Scalar::ONE), Choice::from(c as u8))
}

#[inline(always)] // only used in Scalar::mul(), so won't cause binary bloat
pub(super) fn reduce(&self) -> Scalar {
fn reduce_impl(&self, modulus_minus_one: bool) -> Scalar {
let neg_modulus0 = if modulus_minus_one {
NEG_MODULUS[0] + 1
} else {
NEG_MODULUS[0]
};

let w = self.0.to_uint_array();
let n0 = w[4];
let n1 = w[5];
let n2 = w[6];
let n3 = w[7];

// Reduce 512 bits into 385.
// m[0..6] = self[0..3] + n[0..3] * NEG_MODULUS.
// m[0..6] = self[0..3] + n[0..3] * neg_modulus.
let c0 = w[0];
let c1 = 0;
let c2 = 0;
let (c0, c1) = muladd_fast(n0, NEG_MODULUS[0], c0, c1);
let (c0, c1) = muladd_fast(n0, neg_modulus0, c0, c1);
let (m0, c0, c1) = (c0, c1, 0);
let (c0, c1) = sumadd_fast(w[1], c0, c1);
let (c0, c1, c2) = muladd(n1, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n1, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n0, NEG_MODULUS[1], c0, c1, c2);
let (m1, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[2], c0, c1, c2);
let (c0, c1, c2) = muladd(n2, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n2, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n1, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = sumadd(n0, c0, c1, c2);
let (m2, c0, c1, c2) = (c0, c1, c2, 0);
let (c0, c1, c2) = sumadd(w[3], c0, c1, c2);
let (c0, c1, c2) = muladd(n3, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(n3, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(n2, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = sumadd(n1, c0, c1, c2);
let (m3, c0, c1, c2) = (c0, c1, c2, 0);
Expand All @@ -156,18 +162,18 @@ impl WideScalar {
let m6 = c0;

// Reduce 385 bits into 258.
// p[0..4] = m[0..3] + m[4..6] * NEG_MODULUS.
// p[0..4] = m[0..3] + m[4..6] * neg_modulus.
let c0 = m0;
let c1 = 0;
let c2 = 0;
let (c0, c1) = muladd_fast(m4, NEG_MODULUS[0], c0, c1);
let (c0, c1) = muladd_fast(m4, neg_modulus0, c0, c1);
let (p0, c0, c1) = (c0, c1, 0);
let (c0, c1) = sumadd_fast(m1, c0, c1);
let (c0, c1, c2) = muladd(m5, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(m5, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(m4, NEG_MODULUS[1], c0, c1, c2);
let (p1, c0, c1) = (c0, c1, 0);
let (c0, c1, c2) = sumadd(m2, c0, c1, c2);
let (c0, c1, c2) = muladd(m6, NEG_MODULUS[0], c0, c1, c2);
let (c0, c1, c2) = muladd(m6, neg_modulus0, c0, c1, c2);
let (c0, c1, c2) = muladd(m5, NEG_MODULUS[1], c0, c1, c2);
let (c0, c1, c2) = sumadd(m4, c0, c1, c2);
let (p2, c0, c1, _c2) = (c0, c1, c2, 0);
Expand All @@ -179,8 +185,8 @@ impl WideScalar {
debug_assert!(p4 <= 2);

// Reduce 258 bits into 256.
// r[0..3] = p[0..3] + p[4] * NEG_MODULUS.
let mut c = (p0 as u128) + (NEG_MODULUS[0] as u128) * (p4 as u128);
// r[0..3] = p[0..3] + p[4] * neg_modulus.
let mut c = (p0 as u128) + (neg_modulus0 as u128) * (p4 as u128);
let r0 = (c & 0xFFFFFFFFFFFFFFFFu128) as u64;
c >>= 64;
c += (p1 as u128) + (NEG_MODULUS[1] as u128) * (p4 as u128);
Expand All @@ -200,6 +206,16 @@ impl WideScalar {
let underflow = Choice::from((underflow.0 >> 63) as u8);
Scalar(U256::conditional_select(&r, &r2, !underflow | high_bit))
}

#[inline(always)] // only used in Scalar::mul(), so won't cause binary bloat
pub(super) fn reduce(&self) -> Scalar {
self.reduce_impl(false)
}

pub(super) fn reduce_nonzero(&self) -> NonZeroScalar {
let s = self.reduce_impl(true);
NonZeroScalar::new(s + Scalar::one()).unwrap()
}
}

/// Constant-time comparison.
Expand Down