Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding FFT like #104

Merged
merged 57 commits into from
Jul 4, 2022
Merged
Show file tree
Hide file tree
Changes from 42 commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
53229ab
Adding draft fwd fft
joserochh Dec 13, 2021
c656671
Fixing types
joserochh Dec 13, 2021
3701ff6
Fixing set SIMD for idx vectors
joserochh Dec 13, 2021
0100d50
Fixing storeu for doubles
joserochh Dec 13, 2021
f5e0807
Fixes and refactoring
joserochh Dec 15, 2021
3329d7d
Trying to run FWD FFT as test
joserochh Dec 16, 2021
129c999
Adding dummy function for test
joserochh Dec 16, 2021
3d2a699
Naive FFT Implementation
joserochh Dec 21, 2021
00c782c
Building double coefficients with AVX
joserochh Jan 3, 2022
f0fe2f6
Adding Inv FFT
joserochh Jan 5, 2022
d80e445
Fixed Inv FFT with Debug Prints
joserochh Jan 17, 2022
be3bc68
Adding native implementation of FFT and unit tests
joserochh Jan 24, 2022
a32560c
Adding benchmarking
joserochh Jan 31, 2022
a8c3d6f
Merge branch 'development' into joserochh/fft
joserochh Jan 31, 2022
fe8609d
Fixing clang format version
joserochh Jan 31, 2022
18e9229
Adding HEXL_UNUSED on BuildFloatingPoints
joserochh Jan 31, 2022
a2579a0
Reversing clang format version for CI
joserochh Jan 31, 2022
15d518e
Trying fix on windows build
joserochh Jan 31, 2022
9e4224e
Reversing pre-commit version for CI
joserochh Jan 31, 2022
bb5d59c
Trying another fix for windows build
joserochh Jan 31, 2022
9668c12
Reversing clang format version for CI
joserochh Jan 31, 2022
5f67332
Trying fix for conversion error fron int to mask8
joserochh Jan 31, 2022
b28d658
Reversing clang format version for CI
joserochh Jan 31, 2022
0c8061d
Trying another fix for conversion error fron int to mask8
joserochh Feb 1, 2022
5a71088
Reversing clang format version for CI
joserochh Feb 1, 2022
4c5036b
Trying fix for _mm512_storeu_epi64 was not declared in this scope
joserochh Feb 1, 2022
c497def
Reversing clang format version for CI
joserochh Feb 1, 2022
ba7d34b
Replacing for _mm512_storeu_si512
joserochh Feb 1, 2022
421da34
Reversing clang format version for CI
joserochh Feb 1, 2022
0177981
Adding missing doc param
joserochh Feb 1, 2022
28dccc1
Reversing clang format version for CI
joserochh Feb 1, 2022
79325c6
Removing clang format version
joserochh Feb 1, 2022
5643d11
Adding depth first to fwd fft
joserochh Feb 1, 2022
ee6cf72
Adding own gcd function
joserochh Feb 1, 2022
1950770
Adding recursive inverse
joserochh Feb 2, 2022
27814a1
Complying to first review
joserochh Feb 2, 2022
b481c04
Merge from joserochh/fft
joserochh Feb 2, 2022
b3f58a0
Working
joserochh Feb 3, 2022
d9e9910
Removing hexl gcd
joserochh Feb 3, 2022
05b4c3a
Merge branch 'joserochh/fft-recursive' into joserochh/fft
joserochh Feb 3, 2022
120ae24
Removing casting from gcd
joserochh Feb 3, 2022
aefde73
Complying to second review
joserochh Feb 3, 2022
e242de6
Cmplying to third review
joserochh Feb 4, 2022
5f479cc
Fixes
joserochh Feb 7, 2022
1944df6
Fixing another double cast
joserochh Feb 7, 2022
34ba616
Complying to review
joserochh Feb 8, 2022
a4a4bc4
Fixing build floating points func
joserochh Feb 10, 2022
d1526ef
Merge branch 'discrete_fourier_transform_seal' into joserochh/fft
joserochh Apr 4, 2022
2454336
Merge remote-tracking branch 'origin/development' into joserochh/fft
joserochh Jun 20, 2022
110872a
Moving FFT to experimental
joserochh Jun 20, 2022
c328f78
Benchmark CMakeList.txt fix
joserochh Jun 20, 2022
374f126
Fixing Typo
joserochh Jun 20, 2022
724bc95
Fixing include path on hexl.hpp
joserochh Jun 20, 2022
85df9cd
Renaming fft to fft like
joserochh Jun 30, 2022
b3b840d
Fiixing hexl.hpp
joserochh Jun 30, 2022
cb0c94f
Adding doxygen params
joserochh Jun 30, 2022
d3d1e96
pre-commit
joserochh Jun 30, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions benchmark/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

set(SRC main.cpp
bench-ntt.cpp
bench-fft.cpp
bench-eltwise-add-mod.cpp
bench-eltwise-cmp-add.cpp
bench-eltwise-cmp-sub-mod.cpp
Expand Down
16 changes: 8 additions & 8 deletions benchmark/bench-eltwise-add-mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ static void BM_EltwiseVectorVectorAddModNative(
size_t input_size = state.range(0);
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand All @@ -47,8 +47,8 @@ static void BM_EltwiseVectorVectorAddModAVX512(
size_t input_size = state.range(0);
size_t modulus = 1152921504606877697;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand All @@ -71,8 +71,8 @@ static void BM_EltwiseVectorScalarAddModNative(
size_t input_size = state.range(0);
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand All @@ -96,8 +96,8 @@ static void BM_EltwiseVectorScalarAddModAVX512(
size_t input_size = state.range(0);
size_t modulus = 1152921504606877697;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand Down
10 changes: 5 additions & 5 deletions benchmark/bench-eltwise-cmp-add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ static void BM_EltwiseCmpAddNative(benchmark::State& state) { // NOLINT

uint64_t modulus = 100;

uint64_t bound = GenerateInsecureUniformRandomValue(0, modulus);
uint64_t diff = GenerateInsecureUniformRandomValue(1, modulus - 1);
auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t bound = GenerateInsecureUniformIntRandomValue(0, modulus);
uint64_t diff = GenerateInsecureUniformIntRandomValue(1, modulus - 1);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);

for (auto _ : state) {
EltwiseCmpAddNative(input1.data(), input1.data(), input_size, CMPINT::NLT,
Expand All @@ -48,8 +48,8 @@ static void BM_EltwiseCmpAddAVX512(benchmark::State& state) { // NOLINT

uint64_t bound = 50;
// must be non-zero
uint64_t diff = GenerateInsecureUniformRandomValue(1, bound - 1);
auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, bound);
uint64_t diff = GenerateInsecureUniformIntRandomValue(1, bound - 1);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, bound);

for (auto _ : state) {
EltwiseCmpAddAVX512(input1.data(), input1.data(), input_size, CMPINT::NLT,
Expand Down
12 changes: 6 additions & 6 deletions benchmark/bench-eltwise-cmp-sub-mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ static void BM_EltwiseCmpSubModNative(benchmark::State& state) { // NOLINT
size_t input_size = state.range(0);

uint64_t modulus = 100;
uint64_t bound = GenerateInsecureUniformRandomValue(1, modulus);
uint64_t diff = GenerateInsecureUniformRandomValue(1, modulus);
auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t bound = GenerateInsecureUniformIntRandomValue(1, modulus);
uint64_t diff = GenerateInsecureUniformIntRandomValue(1, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);

for (auto _ : state) {
EltwiseCmpSubModNative(input1.data(), input1.data(), input_size, modulus,
Expand All @@ -45,9 +45,9 @@ BENCHMARK(BM_EltwiseCmpSubModNative)
static void BM_EltwiseCmpSubModAVX512_64(benchmark::State& state) { // NOLINT
size_t input_size = state.range(0);
uint64_t modulus = 100;
uint64_t bound = GenerateInsecureUniformRandomValue(0, modulus);
uint64_t diff = GenerateInsecureUniformRandomValue(1, modulus);
auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t bound = GenerateInsecureUniformIntRandomValue(0, modulus);
uint64_t diff = GenerateInsecureUniformIntRandomValue(1, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);

for (auto _ : state) {
EltwiseCmpSubModAVX512<64>(input1.data(), input1.data(), input_size,
Expand Down
18 changes: 9 additions & 9 deletions benchmark/bench-eltwise-fma-mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@ static void BM_EltwiseFMAModAddNative(benchmark::State& state) { // NOLINT
uint64_t modulus = 0xffffffffffc0001ULL;
bool add = state.range(1);

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
AlignedVector64<uint64_t> input3 =
GenerateInsecureUniformRandomValues(input_size, 0, modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t* arg3 = add ? input3.data() : nullptr;

for (auto _ : state) {
Expand All @@ -47,10 +47,10 @@ static void BM_EltwiseFMAModAVX512DQ(benchmark::State& state) { // NOLINT
size_t modulus = 100;
bool add = state.range(1);

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
AlignedVector64<uint64_t> input3 =
GenerateInsecureUniformRandomValues(input_size, 0, modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);

uint64_t* arg3 = add ? input3.data() : nullptr;

Expand All @@ -73,9 +73,9 @@ static void BM_EltwiseFMAModAVX512IFMA(benchmark::State& state) { // NOLINT
size_t modulus = 100;
bool add = state.range(1);

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input3 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
auto input3 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);

uint64_t* arg3 = add ? input3.data() : nullptr;

Expand Down
32 changes: 16 additions & 16 deletions benchmark/bench-eltwise-mult-mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ static void BM_EltwiseMultMod(benchmark::State& state) { // NOLINT
size_t input_mod_factor = state.range(2);
uint64_t modulus = (1ULL << bit_width) + 7;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 2);

for (auto _ : state) {
Expand All @@ -47,8 +47,8 @@ static void BM_EltwiseMultModNative(benchmark::State& state) { // NOLINT
size_t input_size = state.range(0);
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 2);

for (auto _ : state) {
Expand All @@ -73,8 +73,8 @@ static void BM_EltwiseMultModAVX512Float(benchmark::State& state) { // NOLINT
size_t input_mod_factor = state.range(1);
size_t modulus = 100;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 2);

for (auto _ : state) {
Expand Down Expand Up @@ -110,8 +110,8 @@ static void BM_EltwiseMultModAVX512DQInt(benchmark::State& state) { // NOLINT
size_t input_mod_factor = state.range(1);
size_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 3);

for (auto _ : state) {
Expand Down Expand Up @@ -146,10 +146,10 @@ static void BM_EltwiseMultModAVX512IFMAInt(
size_t input_mod_factor = state.range(1);
size_t modulus = 100;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0,
input_mod_factor * modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0,
input_mod_factor * modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(
input_size, 0, input_mod_factor * modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(
input_size, 0, input_mod_factor * modulus);
AlignedVector64<uint64_t> output(input_size, 3);

for (auto _ : state) {
Expand Down Expand Up @@ -187,10 +187,10 @@ static void BM_EltwiseMultModMontAVX512IFMAIntEConv(
size_t input_size = state.range(0);
size_t input_mod_factor = state.range(1);
uint64_t modulus = (1ULL << 50) + 7; // 1125899906842631
auto op1 = GenerateInsecureUniformRandomValues(input_size, 0,
input_mod_factor * modulus);
auto op2 = GenerateInsecureUniformRandomValues(input_size, 0,
input_mod_factor * modulus);
auto op1 = GenerateInsecureUniformIntRandomValues(input_size, 0,
input_mod_factor * modulus);
auto op2 = GenerateInsecureUniformIntRandomValues(input_size, 0,
input_mod_factor * modulus);
AlignedVector64<uint64_t> output(input_size, 3);

int r = 51; // R = 2251799813685248
Expand Down
24 changes: 12 additions & 12 deletions benchmark/bench-eltwise-reduce-mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ static void BM_EltwiseReduceModInPlace(benchmark::State& state) { // NOLINT
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 100 * modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 1;
for (auto _ : state) {
Expand All @@ -45,7 +45,7 @@ static void BM_EltwiseReduceModCopy(benchmark::State& state) { // NOLINT
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 100 * modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 1;
AlignedVector64<uint64_t> output(input_size, 0);
Expand All @@ -70,7 +70,7 @@ static void BM_EltwiseReduceModNative(benchmark::State& state) { // NOLINT
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 100 * modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 1;
AlignedVector64<uint64_t> output(input_size, 0);
Expand All @@ -96,7 +96,7 @@ static void BM_EltwiseReduceModAVX512(benchmark::State& state) { // NOLINT
size_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 100 * modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 1;
AlignedVector64<uint64_t> output(input_size, 0);
Expand Down Expand Up @@ -124,7 +124,7 @@ static void BM_EltwiseReduceModAVX512BitShift64(
size_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 100 * modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 2;
AlignedVector64<uint64_t> output(input_size, 0);
Expand Down Expand Up @@ -152,7 +152,7 @@ static void BM_EltwiseReduceModAVX512BitShift52(
size_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 100 * modulus);
GenerateInsecureUniformIntRandomValues(input_size, 0, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 2;
AlignedVector64<uint64_t> output(input_size, 0);
Expand All @@ -179,7 +179,7 @@ static void BM_EltwiseReduceModAVX512BitShift52GT(
size_t input_size = state.range(0);
size_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(
auto input1 = GenerateInsecureUniformIntRandomValues(
input_size, 4503599627370496, 100 * modulus);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 1;
Expand All @@ -203,7 +203,7 @@ static void BM_EltwiseReduceModAVX512BitShift52LT(
size_t modulus = 0xffffffffffc0001ULL;

auto input1 =
GenerateInsecureUniformRandomValues(input_size, 0, 2251799813685248);
GenerateInsecureUniformIntRandomValues(input_size, 0, 2251799813685248);
const uint64_t input_mod_factor = modulus;
const uint64_t output_mod_factor = 1;
AlignedVector64<uint64_t> output(input_size, 0);
Expand All @@ -229,7 +229,7 @@ static void BM_EltwiseReduceModMontAVX512BitShift52LT(
size_t input_size = state.range(0);
uint64_t modulus = 67280421310725ULL;

auto input_a = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input_a = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> input_b(input_size, 42006526039321);

int r = 46; // R^2 mod N = 42006526039321
Expand All @@ -256,7 +256,7 @@ static void BM_EltwiseReduceModMontFormInAVX512BitShift52LT(
size_t input_size = state.range(0);
uint64_t modulus = 67280421310725ULL;

auto input_a = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input_a = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> input_b(input_size, 42006526039321);

int r = 46; // R^2 mod N = 42006526039321
Expand All @@ -282,7 +282,7 @@ static void BM_EltwiseReduceModMontFormInAVX512BitShift64LT(
size_t input_size = state.range(0);
uint64_t modulus = 67280421310725ULL;

auto input_a = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input_a = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> input_b(input_size, 42006526039321);

int r = 46; // R^2 mod N = 42006526039321
Expand All @@ -308,7 +308,7 @@ static void BM_EltwiseReduceModInOutMontFormAVX512BitShift52LT(
size_t input_size = state.range(0);
uint64_t modulus = 67280421310725ULL;

auto input_a = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input_a = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);

int r = 46; // R^2 mod N = 42006526039321
const uint64_t R2_mod_q = 42006526039321;
Expand Down
16 changes: 8 additions & 8 deletions benchmark/bench-eltwise-sub-mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ static void BM_EltwiseVectorVectorSubModNative(
size_t input_size = state.range(0);
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand All @@ -47,8 +47,8 @@ static void BM_EltwiseVectorVectorSubModAVX512(
size_t input_size = state.range(0);
size_t modulus = 1152921504606877697;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
auto input2 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand All @@ -71,8 +71,8 @@ static void BM_EltwiseVectorScalarSubModNative(
size_t input_size = state.range(0);
uint64_t modulus = 0xffffffffffc0001ULL;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand All @@ -96,8 +96,8 @@ static void BM_EltwiseVectorScalarSubModAVX512(
size_t input_size = state.range(0);
size_t modulus = 1152921504606877697;

auto input1 = GenerateInsecureUniformRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformRandomValue(0, modulus);
auto input1 = GenerateInsecureUniformIntRandomValues(input_size, 0, modulus);
uint64_t input2 = GenerateInsecureUniformIntRandomValue(0, modulus);
AlignedVector64<uint64_t> output(input_size, 0);

for (auto _ : state) {
Expand Down
Loading