@@ -41,16 +41,19 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
4141 r -> d [1 ] = 0 ;
4242 r -> d [2 ] = 0 ;
4343 r -> d [3 ] = 0 ;
44+ secp256k1_scalar_verify (r );
4445}
4546
4647SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits (const secp256k1_scalar * a , unsigned int offset , unsigned int count ) {
4748 VERIFY_CHECK ((offset + count - 1 ) >> 6 == offset >> 6 );
49+ secp256k1_scalar_verify (a );
4850 return (a -> d [offset >> 6 ] >> (offset & 0x3F )) & ((((uint64_t )1 ) << count ) - 1 );
4951}
5052
5153SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var (const secp256k1_scalar * a , unsigned int offset , unsigned int count ) {
5254 VERIFY_CHECK (count < 32 );
5355 VERIFY_CHECK (offset + count <= 256 );
56+ secp256k1_scalar_verify (a );
5457 if ((offset + count - 1 ) >> 6 == offset >> 6 ) {
5558 return secp256k1_scalar_get_bits (a , offset , count );
5659 } else {
@@ -85,12 +88,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
8588 r -> d [2 ] = secp256k1_u128_to_u64 (& t ); secp256k1_u128_rshift (& t , 64 );
8689 secp256k1_u128_accum_u64 (& t , r -> d [3 ]);
8790 r -> d [3 ] = secp256k1_u128_to_u64 (& t );
91+ secp256k1_scalar_verify (r );
8892 return overflow ;
8993}
9094
9195static int secp256k1_scalar_add (secp256k1_scalar * r , const secp256k1_scalar * a , const secp256k1_scalar * b ) {
9296 int overflow ;
9397 secp256k1_uint128 t ;
98+ secp256k1_scalar_verify (a );
99+ secp256k1_scalar_verify (b );
94100 secp256k1_u128_from_u64 (& t , a -> d [0 ]);
95101 secp256k1_u128_accum_u64 (& t , b -> d [0 ]);
96102 r -> d [0 ] = secp256k1_u128_to_u64 (& t ); secp256k1_u128_rshift (& t , 64 );
@@ -106,13 +112,15 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
106112 overflow = secp256k1_u128_to_u64 (& t ) + secp256k1_scalar_check_overflow (r );
107113 VERIFY_CHECK (overflow == 0 || overflow == 1 );
108114 secp256k1_scalar_reduce (r , overflow );
115+ secp256k1_scalar_verify (r );
109116 return overflow ;
110117}
111118
112119static void secp256k1_scalar_cadd_bit (secp256k1_scalar * r , unsigned int bit , int flag ) {
113120 secp256k1_uint128 t ;
114121 volatile int vflag = flag ;
115122 VERIFY_CHECK (bit < 256 );
123+ secp256k1_scalar_verify (r );
116124 bit += ((uint32_t ) vflag - 1 ) & 0x100 ; /* forcing (bit >> 6) > 3 makes this a noop */
117125 secp256k1_u128_from_u64 (& t , r -> d [0 ]);
118126 secp256k1_u128_accum_u64 (& t , ((uint64_t )((bit >> 6 ) == 0 )) << (bit & 0x3F ));
@@ -128,6 +136,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
128136 r -> d [3 ] = secp256k1_u128_to_u64 (& t );
129137#ifdef VERIFY
130138 VERIFY_CHECK (secp256k1_u128_hi_u64 (& t ) == 0 );
139+ secp256k1_scalar_verify (r );
131140#endif
132141}
133142
@@ -138,25 +147,29 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
138147 r -> d [2 ] = secp256k1_read_be64 (& b32 [8 ]);
139148 r -> d [3 ] = secp256k1_read_be64 (& b32 [0 ]);
140149 over = secp256k1_scalar_reduce (r , secp256k1_scalar_check_overflow (r ));
150+ secp256k1_scalar_verify (r );
141151 if (overflow ) {
142152 * overflow = over ;
143153 }
144154}
145155
146156static void secp256k1_scalar_get_b32 (unsigned char * bin , const secp256k1_scalar * a ) {
157+ secp256k1_scalar_verify (a );
147158 secp256k1_write_be64 (& bin [0 ], a -> d [3 ]);
148159 secp256k1_write_be64 (& bin [8 ], a -> d [2 ]);
149160 secp256k1_write_be64 (& bin [16 ], a -> d [1 ]);
150161 secp256k1_write_be64 (& bin [24 ], a -> d [0 ]);
151162}
152163
153164SECP256K1_INLINE static int secp256k1_scalar_is_zero (const secp256k1_scalar * a ) {
165+ secp256k1_scalar_verify (a );
154166 return (a -> d [0 ] | a -> d [1 ] | a -> d [2 ] | a -> d [3 ]) == 0 ;
155167}
156168
157169static void secp256k1_scalar_negate (secp256k1_scalar * r , const secp256k1_scalar * a ) {
158170 uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero (a ) == 0 );
159171 secp256k1_uint128 t ;
172+ secp256k1_scalar_verify (a );
160173 secp256k1_u128_from_u64 (& t , ~a -> d [0 ]);
161174 secp256k1_u128_accum_u64 (& t , SECP256K1_N_0 + 1 );
162175 r -> d [0 ] = secp256k1_u128_to_u64 (& t ) & nonzero ; secp256k1_u128_rshift (& t , 64 );
@@ -169,15 +182,18 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
169182 secp256k1_u128_accum_u64 (& t , ~a -> d [3 ]);
170183 secp256k1_u128_accum_u64 (& t , SECP256K1_N_3 );
171184 r -> d [3 ] = secp256k1_u128_to_u64 (& t ) & nonzero ;
185+ secp256k1_scalar_verify (r );
172186}
173187
174188SECP256K1_INLINE static int secp256k1_scalar_is_one (const secp256k1_scalar * a ) {
189+ secp256k1_scalar_verify (a );
175190 return ((a -> d [0 ] ^ 1 ) | a -> d [1 ] | a -> d [2 ] | a -> d [3 ]) == 0 ;
176191}
177192
178193static int secp256k1_scalar_is_high (const secp256k1_scalar * a ) {
179194 int yes = 0 ;
180195 int no = 0 ;
196+ secp256k1_scalar_verify (a );
181197 no |= (a -> d [3 ] < SECP256K1_N_H_3 );
182198 yes |= (a -> d [3 ] > SECP256K1_N_H_3 ) & ~no ;
183199 no |= (a -> d [2 ] < SECP256K1_N_H_2 ) & ~yes ; /* No need for a > check. */
@@ -194,6 +210,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
194210 uint64_t mask = - vflag ;
195211 uint64_t nonzero = (secp256k1_scalar_is_zero (r ) != 0 ) - 1 ;
196212 secp256k1_uint128 t ;
213+ secp256k1_scalar_verify (r );
197214 secp256k1_u128_from_u64 (& t , r -> d [0 ] ^ mask );
198215 secp256k1_u128_accum_u64 (& t , (SECP256K1_N_0 + 1 ) & mask );
199216 r -> d [0 ] = secp256k1_u128_to_u64 (& t ) & nonzero ; secp256k1_u128_rshift (& t , 64 );
@@ -206,6 +223,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
206223 secp256k1_u128_accum_u64 (& t , r -> d [3 ] ^ mask );
207224 secp256k1_u128_accum_u64 (& t , SECP256K1_N_3 & mask );
208225 r -> d [3 ] = secp256k1_u128_to_u64 (& t ) & nonzero ;
226+ secp256k1_scalar_verify (r );
209227 return 2 * (mask == 0 ) - 1 ;
210228}
211229
@@ -764,23 +782,29 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
764782
765783static void secp256k1_scalar_mul (secp256k1_scalar * r , const secp256k1_scalar * a , const secp256k1_scalar * b ) {
766784 uint64_t l [8 ];
785+ secp256k1_scalar_verify (a );
786+ secp256k1_scalar_verify (b );
767787 secp256k1_scalar_mul_512 (l , a , b );
768788 secp256k1_scalar_reduce_512 (r , l );
789+ secp256k1_scalar_verify (r );
769790}
770791
771792static int secp256k1_scalar_shr_int (secp256k1_scalar * r , int n ) {
772793 int ret ;
773794 VERIFY_CHECK (n > 0 );
774795 VERIFY_CHECK (n < 16 );
796+ secp256k1_scalar_verify (r );
775797 ret = r -> d [0 ] & ((1 << n ) - 1 );
776798 r -> d [0 ] = (r -> d [0 ] >> n ) + (r -> d [1 ] << (64 - n ));
777799 r -> d [1 ] = (r -> d [1 ] >> n ) + (r -> d [2 ] << (64 - n ));
778800 r -> d [2 ] = (r -> d [2 ] >> n ) + (r -> d [3 ] << (64 - n ));
779801 r -> d [3 ] = (r -> d [3 ] >> n );
802+ secp256k1_scalar_verify (r );
780803 return ret ;
781804}
782805
783806static void secp256k1_scalar_split_128 (secp256k1_scalar * r1 , secp256k1_scalar * r2 , const secp256k1_scalar * k ) {
807+ secp256k1_scalar_verify (k );
784808 r1 -> d [0 ] = k -> d [0 ];
785809 r1 -> d [1 ] = k -> d [1 ];
786810 r1 -> d [2 ] = 0 ;
@@ -789,9 +813,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
789813 r2 -> d [1 ] = k -> d [3 ];
790814 r2 -> d [2 ] = 0 ;
791815 r2 -> d [3 ] = 0 ;
816+ secp256k1_scalar_verify (r1 );
817+ secp256k1_scalar_verify (r2 );
792818}
793819
794820SECP256K1_INLINE static int secp256k1_scalar_eq (const secp256k1_scalar * a , const secp256k1_scalar * b ) {
821+ secp256k1_scalar_verify (a );
822+ secp256k1_scalar_verify (b );
795823 return ((a -> d [0 ] ^ b -> d [0 ]) | (a -> d [1 ] ^ b -> d [1 ]) | (a -> d [2 ] ^ b -> d [2 ]) | (a -> d [3 ] ^ b -> d [3 ])) == 0 ;
796824}
797825
@@ -801,6 +829,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
801829 unsigned int shiftlow ;
802830 unsigned int shifthigh ;
803831 VERIFY_CHECK (shift >= 256 );
832+ secp256k1_scalar_verify (a );
833+ secp256k1_scalar_verify (b );
804834 secp256k1_scalar_mul_512 (l , a , b );
805835 shiftlimbs = shift >> 6 ;
806836 shiftlow = shift & 0x3F ;
@@ -810,18 +840,21 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
810840 r -> d [2 ] = shift < 384 ? (l [2 + shiftlimbs ] >> shiftlow | (shift < 320 && shiftlow ? (l [3 + shiftlimbs ] << shifthigh ) : 0 )) : 0 ;
811841 r -> d [3 ] = shift < 320 ? (l [3 + shiftlimbs ] >> shiftlow ) : 0 ;
812842 secp256k1_scalar_cadd_bit (r , 0 , (l [(shift - 1 ) >> 6 ] >> ((shift - 1 ) & 0x3f )) & 1 );
843+ secp256k1_scalar_verify (r );
813844}
814845
815846static SECP256K1_INLINE void secp256k1_scalar_cmov (secp256k1_scalar * r , const secp256k1_scalar * a , int flag ) {
816847 uint64_t mask0 , mask1 ;
817848 volatile int vflag = flag ;
818849 SECP256K1_CHECKMEM_CHECK_VERIFY (r -> d , sizeof (r -> d ));
850+ secp256k1_scalar_verify (a );
819851 mask0 = vflag + ~((uint64_t )0 );
820852 mask1 = ~mask0 ;
821853 r -> d [0 ] = (r -> d [0 ] & mask0 ) | (a -> d [0 ] & mask1 );
822854 r -> d [1 ] = (r -> d [1 ] & mask0 ) | (a -> d [1 ] & mask1 );
823855 r -> d [2 ] = (r -> d [2 ] & mask0 ) | (a -> d [2 ] & mask1 );
824856 r -> d [3 ] = (r -> d [3 ] & mask0 ) | (a -> d [3 ] & mask1 );
857+ secp256k1_scalar_verify (r );
825858}
826859
827860static void secp256k1_scalar_from_signed62 (secp256k1_scalar * r , const secp256k1_modinv64_signed62 * a ) {
@@ -841,18 +874,14 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
841874 r -> d [2 ] = a2 >> 4 | a3 << 58 ;
842875 r -> d [3 ] = a3 >> 6 | a4 << 56 ;
843876
844- #ifdef VERIFY
845- VERIFY_CHECK (secp256k1_scalar_check_overflow (r ) == 0 );
846- #endif
877+ secp256k1_scalar_verify (r );
847878}
848879
849880static void secp256k1_scalar_to_signed62 (secp256k1_modinv64_signed62 * r , const secp256k1_scalar * a ) {
850881 const uint64_t M62 = UINT64_MAX >> 2 ;
851882 const uint64_t a0 = a -> d [0 ], a1 = a -> d [1 ], a2 = a -> d [2 ], a3 = a -> d [3 ];
852883
853- #ifdef VERIFY
854- VERIFY_CHECK (secp256k1_scalar_check_overflow (a ) == 0 );
855- #endif
884+ secp256k1_scalar_verify (a );
856885
857886 r -> v [0 ] = a0 & M62 ;
858887 r -> v [1 ] = (a0 >> 62 | a1 << 2 ) & M62 ;
@@ -870,31 +899,36 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
870899 secp256k1_modinv64_signed62 s ;
871900#ifdef VERIFY
872901 int zero_in = secp256k1_scalar_is_zero (x );
902+ secp256k1_scalar_verify (x );
873903#endif
874904 secp256k1_scalar_to_signed62 (& s , x );
875905 secp256k1_modinv64 (& s , & secp256k1_const_modinfo_scalar );
876906 secp256k1_scalar_from_signed62 (r , & s );
877907
878908#ifdef VERIFY
879909 VERIFY_CHECK (secp256k1_scalar_is_zero (r ) == zero_in );
910+ secp256k1_scalar_verify (r );
880911#endif
881912}
882913
883914static void secp256k1_scalar_inverse_var (secp256k1_scalar * r , const secp256k1_scalar * x ) {
884915 secp256k1_modinv64_signed62 s ;
885916#ifdef VERIFY
886917 int zero_in = secp256k1_scalar_is_zero (x );
918+ secp256k1_scalar_verify (x );
887919#endif
888920 secp256k1_scalar_to_signed62 (& s , x );
889921 secp256k1_modinv64_var (& s , & secp256k1_const_modinfo_scalar );
890922 secp256k1_scalar_from_signed62 (r , & s );
891923
892924#ifdef VERIFY
893925 VERIFY_CHECK (secp256k1_scalar_is_zero (r ) == zero_in );
926+ secp256k1_scalar_verify (r );
894927#endif
895928}
896929
897930SECP256K1_INLINE static int secp256k1_scalar_is_even (const secp256k1_scalar * a ) {
931+ secp256k1_scalar_verify (a );
898932 return !(a -> d [0 ] & 1 );
899933}
900934
0 commit comments