From 96f48538503ff40bf0017652c4b4f3a42cd3fa94 Mon Sep 17 00:00:00 2001 From: Tim Ruffing Date: Sat, 1 Apr 2023 15:35:50 +0900 Subject: [PATCH] ct: Use volatile "trick" in all fe/scalar cmov implementations Apparently clang 15 is able to compile our cmov code into a branch, at least for fe_cmov and fe_storage_cmov. This commit makes the condition volatile in all cmov implementations (except ge but that one only calls into the fe impls). This is just a quick fix. We should still look into other methods, e.g., asm and #457. We should also consider not caring about constant-time in scalar_low_impl.h We should also consider testing on very new compilers in nightly CI, see https://github.com/bitcoin-core/secp256k1/pull/864#issuecomment-769211867 --- src/field_10x26_impl.h | 6 ++++-- src/field_5x52_impl.h | 6 ++++-- src/scalar_4x64_impl.h | 3 ++- src/scalar_8x32_impl.h | 3 ++- src/scalar_low_impl.h | 3 ++- 5 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index 21742bf6e..0eeb8a8de 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -1132,8 +1132,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { uint32_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r->n, sizeof(r->n)); - mask0 = flag + ~((uint32_t)0); + mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); @@ -1231,8 +1232,9 @@ static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) { static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { uint32_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r->n, sizeof(r->n)); - mask0 = flag + ~((uint32_t)0); + mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); diff --git a/src/field_5x52_impl.h b/src/field_5x52_impl.h index 6bd202f58..dc0467db1 100644 --- a/src/field_5x52_impl.h +++ b/src/field_5x52_impl.h @@ -476,8 +476,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { uint64_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r->n, sizeof(r->n)); - mask0 = flag + ~((uint64_t)0); + mask0 = vflag + ~((uint64_t)0); mask1 = ~mask0; r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); @@ -559,8 +560,9 @@ static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) { static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { uint64_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r->n, sizeof(r->n)); - mask0 = flag + ~((uint64_t)0); + mask0 = vflag + ~((uint64_t)0); mask1 = ~mask0; r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 585a4b630..60aca8c10 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -958,8 +958,9 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint64_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r->d, sizeof(r->d)); - mask0 = flag + ~((uint64_t)0); + mask0 = vflag + ~((uint64_t)0); mask1 = ~mask0; r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1); r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1); diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 6086f1ec2..ad025cff0 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -733,8 +733,9 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r->d, sizeof(r->d)); - mask0 = flag + ~((uint32_t)0); + mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1); r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1); diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h index aa75f8b0f..4005cc8c8 100644 --- a/src/scalar_low_impl.h +++ b/src/scalar_low_impl.h @@ -120,8 +120,9 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; + volatile int vflag = flag; VG_CHECK_VERIFY(r, sizeof(*r)); - mask0 = flag + ~((uint32_t)0); + mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; *r = (*r & mask0) | (*a & mask1); }