Skip to content
This repository was archived by the owner on May 20, 2024. It is now read-only.

Commit c1bcf4c

Browse files
authored
Merge pull request #12 from dragan2234/develop
Merge banderwagon branch to develop
2 parents 4faf139 + 85dc2b4 commit c1bcf4c

12 files changed

+245
-176
lines changed

Cargo.toml

+3
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ name = "ipa-multipoint"
33
version = "0.1.0"
44
edition = "2018"
55

6+
67
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
78

89
[dependencies]
@@ -18,6 +19,8 @@ rand_chacha = { version = "0.3.0", default-features = false }
1819
itertools = "0.10.1"
1920
sha2 = "0.9.8"
2021
hex = "0.4.3"
22+
banderwagon = { git = "https://github.com/crate-crypto/banderwagon" }
23+
2124
[[bench]]
2225
name = "benchmark_main"
2326
harness = false

Readme.md

+43-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
A polynomial commitment scheme for opening multiple polynomials at different points using the inner product argument.
44

5-
This library uses the bandersnatch curve and is described in [https://eprint.iacr.org/2021/1152.pdf].
5+
This library uses the banderwagon prime group (https://hackmd.io/@6iQDuIePQjyYBqDChYw_jg/BJ2-L6Nzc) built on top of bandersnatch curve described in [https://eprint.iacr.org/2021/1152.pdf].
66

77

88
**Do not use in production.**
@@ -24,6 +24,8 @@ This library uses the bandersnatch curve and is described in [https://eprint.iac
2424

2525
## Tentative benchmarks
2626

27+
Bandersnatch (old):
28+
2729
Machine : 2.4 GHz 8-Core Intel Core i9
2830

2931
- To verify the opening of a polynomial of degree 255 (256 points in lagrange basis): `11.92ms`
@@ -37,4 +39,43 @@ Machine : 2.4 GHz 8-Core Intel Core i9
3739
- To prove a multi-opening proof of 20,000 polynomials: `422.94ms`
3840

3941

40-
These benchmarks are tentative because on one hand, the machine being used may not be the what the average user uses, while on the other hand, we have not optimised the verifier algorithm to remove `bH` , the pippenger algorithm does not take into consideration GLV and we are not using rayon to parallelise.
42+
43+
New benchmark on banderwagon subgroup: Apple M1 Pro 16GB RAM
44+
45+
- ipa - prove (256): `28.700 ms`
46+
47+
- ipa - verify (multi exp2 256): `2.1628 ms`
48+
49+
- ipa - verify (256): `20.818 ms`
50+
51+
- multipoint - verify (256)/1: `2.6983 ms`
52+
53+
- multipoint - verify (256)/1000: `8.5925 ms`
54+
55+
- multipoint - verify (256)/2000: `12.688 ms`
56+
57+
- multipoint - verify (256)/4000: `21.726 ms`
58+
59+
- multipoint - verify (256)/8000: `36.616 ms`
60+
61+
- multipoint - verify (256)/16000: `69.401 ms`
62+
63+
- multipoint - verify (256)/128000: `490.23 ms`
64+
65+
- multiproof - prove (256)/1: `33.231 ms`
66+
67+
- multiproof - prove (256)/1000: `47.764 ms`
68+
69+
- multiproof - prove (256)/2000: `56.670 ms`
70+
71+
- multiproof - prove (256)/4000: `74.597 ms`
72+
73+
- multiproof - prove (256)/8000: `114.39 ms`
74+
75+
- multiproof - prove (256)/16000: `189.94 ms`
76+
77+
- multiproof - prove (256)/128000: `1.2693 s`
78+
79+
80+
81+
*These benchmarks are tentative because on one hand, the machine being used may not be the what the average user uses, while on the other hand, we have not optimised the verifier algorithm to remove `bH` , the pippenger algorithm does not take into consideration GLV and we are not using rayon to parallelise.*

benches/benchmarks/ipa_prove.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
use ark_std::rand::SeedableRng;
22
use ark_std::UniformRand;
3-
use bandersnatch::Fr;
3+
use banderwagon::Fr;
44
use criterion::{black_box, criterion_group, criterion_main, Criterion};
55
use ipa_multipoint::ipa::create;
66
use ipa_multipoint::lagrange_basis::LagrangeBasis;
77
use ipa_multipoint::math_utils::powers_of;
8-
use ipa_multipoint::multiproof::CRS;
8+
use ipa_multipoint::crs::CRS;
99
use ipa_multipoint::transcript::Transcript;
1010
use rand_chacha::ChaCha20Rng;
1111

benches/benchmarks/ipa_verify.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
use ark_std::rand::SeedableRng;
22
use ark_std::UniformRand;
3-
use bandersnatch::Fr;
3+
use banderwagon::Fr;
44
use criterion::{black_box, criterion_group, criterion_main, Criterion};
55
use ipa_multipoint::ipa::create;
66
use ipa_multipoint::lagrange_basis::LagrangeBasis;
77
use ipa_multipoint::math_utils::{inner_product, powers_of};
8-
use ipa_multipoint::multiproof::CRS;
8+
use ipa_multipoint::crs::CRS;
99
use ipa_multipoint::transcript::Transcript;
1010
use rand_chacha::ChaCha20Rng;
1111

benches/benchmarks/multipoint_prove.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
use ark_std::UniformRand;
2-
use bandersnatch::Fr;
2+
use banderwagon::Fr;
33
use criterion::BenchmarkId;
44
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
55
use ipa_multipoint::lagrange_basis::*;
66
use ipa_multipoint::multiproof::*;
77
use ipa_multipoint::transcript::Transcript;
8+
use ipa_multipoint::crs::CRS;
89

910
pub fn criterion_benchmark(c: &mut Criterion) {
1011
let mut group = c.benchmark_group("multiproof - prove (256)");

benches/benchmarks/multipoint_verify.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
use ark_std::UniformRand;
2-
use bandersnatch::Fr;
2+
use banderwagon::Fr;
33
use criterion::BenchmarkId;
44
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
55
use ipa_multipoint::lagrange_basis::*;
66
use ipa_multipoint::multiproof::*;
77
use ipa_multipoint::transcript::Transcript;
8+
use ipa_multipoint::crs::CRS;
89

910
pub fn criterion_benchmark(c: &mut Criterion) {
1011
let mut group = c.benchmark_group("multipoint - verify (256)");

src/crs.rs

+123
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
use ark_serialize::CanonicalSerialize;
2+
use banderwagon::Element;
3+
4+
use crate::{ipa::slow_vartime_multiscalar_mul, lagrange_basis::LagrangeBasis};
5+
6+
#[derive(Debug, Clone)]
7+
pub struct CRS {
8+
pub n: usize,
9+
pub G: Vec<Element>,
10+
pub Q: Element,
11+
}
12+
13+
impl CRS {
14+
pub fn new(n: usize, seed: &'static [u8]) -> CRS {
15+
// TODO generate the Q value from the seed also
16+
// TODO: this will also make assert_dedup work as expected
17+
// TODO: since we should take in `Q` too
18+
let G: Vec<_> = generate_random_elements(n, seed).into_iter().collect();
19+
let Q = Element::prime_subgroup_generator();
20+
21+
CRS::assert_dedup(&G);
22+
23+
CRS { n, G, Q }
24+
}
25+
// Asserts that not of the points generated are the same
26+
fn assert_dedup(points: &[Element]) {
27+
use std::collections::HashMap;
28+
let mut map = HashMap::new();
29+
for point in points {
30+
assert!(
31+
map.insert(point.to_bytes(), ()).is_none(),
32+
"crs has duplicated points"
33+
)
34+
}
35+
}
36+
pub fn commit_lagrange_poly(&self, polynomial: &LagrangeBasis) -> Element {
37+
slow_vartime_multiscalar_mul(polynomial.values().iter(), self.G.iter())
38+
}
39+
}
40+
41+
impl std::ops::Index<usize> for CRS {
42+
type Output = Element;
43+
44+
fn index(&self, index: usize) -> &Self::Output {
45+
&self.G[index]
46+
}
47+
}
48+
49+
fn generate_random_elements(num_required_points: usize, seed: &'static [u8]) -> Vec<Element> {
50+
use ark_ec::group::Group;
51+
use ark_ff::PrimeField;
52+
use bandersnatch::Fq;
53+
use sha2::{Digest, Sha256};
54+
55+
let choose_largest = false;
56+
57+
(0u64..)
58+
.into_iter()
59+
// Hash the seed + i to get a possible x value
60+
.map(|i| {
61+
let mut hasher = Sha256::new();
62+
hasher.update(seed);
63+
hasher.update(&i.to_be_bytes());
64+
let bytes: Vec<u8> = hasher.finalize().to_vec();
65+
bytes
66+
})
67+
// The Element::from_bytes method does not reduce the bytes, it expects the
68+
// input to be in a canonical format, so we must do the reduction ourselves
69+
.map(|hash_bytes| Fq::from_be_bytes_mod_order(&hash_bytes))
70+
.map(|x_coord| {
71+
let mut bytes = [0u8; 32];
72+
x_coord.serialize(&mut bytes[..]).unwrap();
73+
// TODO: this reverse is hacky, and its because there is no way to specify the endianness in arkworks
74+
// TODO So we reverse it here, to be interopable with the banderwagon specs which needs big endian bytes
75+
bytes.reverse();
76+
bytes
77+
})
78+
// Deserialise the x-cordinate to get a valid banderwagon element
79+
.map(|bytes| Element::from_bytes(&bytes))
80+
.filter_map(|point| point)
81+
.take(num_required_points)
82+
.collect()
83+
}
84+
85+
#[test]
86+
fn crs_consistency() {
87+
// TODO: update hackmd as we are now using banderwagon and the point finding strategy
88+
// TODO is a bit different
89+
// See: https://hackmd.io/1RcGSMQgT4uREaq1CCx_cg#Methodology
90+
use ark_serialize::CanonicalSerialize;
91+
use bandersnatch::Fq;
92+
use sha2::{Digest, Sha256};
93+
94+
let points = generate_random_elements(256, b"eth_verkle_oct_2021");
95+
96+
let mut bytes = [0u8; 32];
97+
points[0].serialize(&mut bytes[..]).unwrap();
98+
assert_eq!(
99+
hex::encode(&bytes),
100+
"01587ad1336675eb912550ec2a28eb8923b824b490dd2ba82e48f14590a298a0",
101+
"the first point is incorrect"
102+
);
103+
let mut bytes = [0u8; 32];
104+
points[255].serialize(&mut bytes[..]).unwrap();
105+
assert_eq!(
106+
hex::encode(&bytes),
107+
"3de2be346b539395b0c0de56a5ccca54a317f1b5c80107b0802af9a62276a4d8",
108+
"the 256th (last) point is incorrect"
109+
);
110+
111+
let mut hasher = Sha256::new();
112+
for point in &points {
113+
let mut bytes = [0u8; 32];
114+
point.serialize(&mut bytes[..]).unwrap();
115+
hasher.update(&bytes);
116+
}
117+
let bytes = hasher.finalize().to_vec();
118+
assert_eq!(
119+
hex::encode(&bytes),
120+
"1fcaea10bf24f750200e06fa473c76ff0468007291fa548e2d99f09ba9256fdb",
121+
"unexpected point encountered"
122+
);
123+
}

0 commit comments

Comments
 (0)