@@ -38,18 +38,20 @@ void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1
3838 CHECK (r == expected_r );
3939 CHECK ((k * s ) % EXHAUSTIVE_TEST_ORDER == (i + r * j ) % EXHAUSTIVE_TEST_ORDER ||
4040 (k * (EXHAUSTIVE_TEST_ORDER - s )) % EXHAUSTIVE_TEST_ORDER == (i + r * j ) % EXHAUSTIVE_TEST_ORDER );
41- /* In computing the recid, there is an overflow condition that is disabled in
42- * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
43- * will exceed the group order, and our signing code always holds out for r
44- * values that don't overflow, so with a proper overflow check the tests would
45- * loop indefinitely. */
41+ /* The recid's second bit is for conveying overflow (R.x value >= group order).
42+ * In the actual secp256k1 this is an astronomically unlikely event, but in the
43+ * small group used here, it will always be the case.
44+ * Note that this isn't actually useful; full recovery would need to convey
45+ * floor(R.x / group_order), but only one bit is used as that is sufficient
46+ * in the real group. */
47+ expected_recid = 2 ;
4648 r_dot_y_normalized = group [k ].y ;
4749 secp256k1_fe_normalize (& r_dot_y_normalized );
4850 /* Also the recovery id is flipped depending if we hit the low-s branch */
4951 if ((k * s ) % EXHAUSTIVE_TEST_ORDER == (i + r * j ) % EXHAUSTIVE_TEST_ORDER ) {
50- expected_recid = secp256k1_fe_is_odd (& r_dot_y_normalized ) ? 1 : 0 ;
52+ expected_recid | = secp256k1_fe_is_odd (& r_dot_y_normalized );
5153 } else {
52- expected_recid = secp256k1_fe_is_odd (& r_dot_y_normalized ) ? 0 : 1 ;
54+ expected_recid |= ! secp256k1_fe_is_odd (& r_dot_y_normalized );
5355 }
5456 CHECK (recid == expected_recid );
5557
0 commit comments