Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add option to print and save all results, when performing multiple test iterations #298

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 30 additions & 16 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,40 @@ on:
description: "Docker images for smoke testing (comma-separated, e.g., ubuntu:20.04,ubuntu:22.04,ubuntu:24.04)"
required: false
default: "ubuntu:20.04,ubuntu:22.04,ubuntu:24.04"
build_runner:
description: "os in which build steps run on"
required: false
default: "ubuntu-22.04"
type: string
jobs:
build-source-package:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
continue-on-error: true
strategy:
matrix:
dist: ${{ fromJSON(vars.BUILD_DISTS) }}
steps:
- uses: actions/checkout@v4
with:
path: sources
- name: Validate configure.ac version matches GitHub Release (only on release)
if: github.event.release.tag_name != ''
env:
VERSION: ${{ github.event.release.tag_name }}
run: |
# Extract the current version from configure.ac
CURRENT_VERSION=$(awk -F'[(),]' '/AC_INIT/ {print $3}' sources/configure.ac | tr -d ' ')

echo "Current configure.ac version: $CURRENT_VERSION"
echo "GitHub Release version: $VERSION"

# Check if versions match
if [ "$CURRENT_VERSION" != "$VERSION" ]; then
echo "❌ Version mismatch! configure.ac: $CURRENT_VERSION, GitHub Release: $VERSION"
exit 1 # Fail the build
else
echo "Version match. Proceeding with the build."
fi
- name: Install dependencies
run: |
sudo apt-get update && \
Expand Down Expand Up @@ -63,7 +87,8 @@ jobs:
memtier-benchmark_*.tar.*

build-binary-package:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
continue-on-error: true
environment: build
strategy:
matrix:
Expand Down Expand Up @@ -121,13 +146,10 @@ jobs:
*.deb

smoke-test-packages:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
needs: build-binary-package
env:
ARCH: amd64
# required by ubuntu:bionic
# https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
strategy:
matrix:
image: ${{ fromJSON(vars.SMOKE_TEST_IMAGES) }}
Expand All @@ -141,15 +163,7 @@ jobs:
exit 1
fi
echo "BUILD_ARCH=$BUILD_ARCH" >> $GITHUB_ENV
- name: Get binary packages for ubuntu:bionic
if: matrix.image == 'ubuntu:bionic'
uses: actions/download-artifact@v3
with:
name: binary-${{ env.BUILD_ARCH }}-${{ env.ARCH }}
path: binary-${{ env.BUILD_ARCH }}-${{ env.ARCH }}

- name: Get binary packages for other versions
if: matrix.image != 'ubuntu:bionic'
- name: Get binary packages
uses: actions/download-artifact@v4
with:
name: binary-${{ env.BUILD_ARCH }}-${{ env.ARCH }}
Expand All @@ -162,7 +176,7 @@ jobs:
publish-to-apt:
env:
DEB_S3_VERSION: "0.11.3"
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
environment: build
needs: smoke-test-packages
steps:
Expand Down
23 changes: 20 additions & 3 deletions memtier_benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,8 @@ static void config_print(FILE *file, struct benchmark_config *cfg)
"wait-ratio = %u:%u\n"
"num-slaves = %u-%u\n"
"wait-timeout = %u-%u\n"
"json-out-file = %s\n",
"json-out-file = %s\n"
"print-all-runs = %s\n",
cfg->server,
cfg->port,
cfg->unix_socket,
Expand Down Expand Up @@ -209,7 +210,8 @@ static void config_print(FILE *file, struct benchmark_config *cfg)
cfg->wait_ratio.a, cfg->wait_ratio.b,
cfg->num_slaves.min, cfg->num_slaves.max,
cfg->wait_timeout.min, cfg->wait_timeout.max,
cfg->json_out_file);
cfg->json_out_file,
cfg->print_all_runs ? "yes" : "no");
}

static void config_print_to_json(json_handler * jsonhandler, struct benchmark_config *cfg)
Expand Down Expand Up @@ -267,6 +269,7 @@ static void config_print_to_json(json_handler * jsonhandler, struct benchmark_co
jsonhandler->write_obj("wait-ratio" ,"\"%u:%u\"", cfg->wait_ratio.a, cfg->wait_ratio.b);
jsonhandler->write_obj("num-slaves" ,"\"%u:%u\"", cfg->num_slaves.min, cfg->num_slaves.max);
jsonhandler->write_obj("wait-timeout" ,"\"%u-%u\"", cfg->wait_timeout.min, cfg->wait_timeout.max);
jsonhandler->write_obj("print-all-runs" ,"\"%s\"", cfg->print_all_runs ? "true" : "false");

jsonhandler->close_nesting();
}
Expand Down Expand Up @@ -403,6 +406,7 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf
o_show_config,
o_hide_histogram,
o_print_percentiles,
o_print_all_runs,
o_distinct_client_seed,
o_randomize,
o_client_stats,
Expand Down Expand Up @@ -456,6 +460,7 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf
{ "show-config", 0, 0, o_show_config },
{ "hide-histogram", 0, 0, o_hide_histogram },
{ "print-percentiles", 1, 0, o_print_percentiles },
{ "print-all-runs", 0, 0, o_print_all_runs },
{ "distinct-client-seed", 0, 0, o_distinct_client_seed },
{ "randomize", 0, 0, o_randomize },
{ "requests", 1, 0, 'n' },
Expand Down Expand Up @@ -587,6 +592,9 @@ static int config_parse_args(int argc, char *argv[], struct benchmark_config *cf
return -1;
}
break;
case o_print_all_runs:
cfg->print_all_runs = true;
break;
case o_distinct_client_seed:
cfg->distinct_client_seed++;
break;
Expand Down Expand Up @@ -977,6 +985,7 @@ void usage() {
" --show-config Print detailed configuration before running\n"
" --hide-histogram Don't print detailed latency histogram\n"
" --print-percentiles Specify which percentiles info to print on the results table (by default prints percentiles: 50,99,99.9)\n"
" --print-all-runs When performing multiple test iterations, print and save results for all iterations\n"
" --cluster-mode Run client in cluster mode\n"
" -h, --help Display this help\n"
" -v, --version Display version information\n"
Expand Down Expand Up @@ -1652,7 +1661,16 @@ int main(int argc, char *argv[])
}

// If more than 1 run was used, compute best, worst and average
// Furthermore, if print_all_runs is enabled we save separate histograms per run
if (cfg.run_count > 1) {
// User wants to see a separate histogram per run
if (cfg.print_all_runs) {
for (auto i = 0U; i < all_stats.size(); i++) {
auto run_title = std::string("RUN #") + std::to_string(i + 1) + " RESULTS";
all_stats[i].print(outfile, &cfg, run_title.c_str(), jsonhandler);
}
}
// User wants the best and worst
unsigned int min_ops_sec = (unsigned int) -1;
unsigned int max_ops_sec = 0;
run_stats* worst = NULL;
Expand All @@ -1669,7 +1687,6 @@ int main(int argc, char *argv[])
best = &(*i);
}
}

// Best results:
best->print(outfile, &cfg, "BEST RUN RESULTS", jsonhandler);
// worst results:
Expand Down
1 change: 1 addition & 0 deletions memtier_benchmark.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ struct benchmark_config {
int show_config;
int hide_histogram;
config_quantiles print_percentiles;
bool print_all_runs;
int distinct_client_seed;
int randomize;
int next_client_idx;
Expand Down
45 changes: 45 additions & 0 deletions tests/tests_oss_simple_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,51 @@ def test_default_set_get_3_runs(env):
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)



# run each test on different env
def test_print_all_runs(env):
run_count = 5
benchmark_specs = {"name": env.testName, "args": ['--print-all-runs','--run-count={}'.format(run_count)]}
addTLSArgs(benchmark_specs, env)
config = get_default_memtier_config()
master_nodes_list = env.getMasterNodesList()
overall_expected_request_count = get_expected_request_count(config) * run_count

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

# Create a temporary directory
test_dir = tempfile.mkdtemp()

config = RunConfig(test_dir, env.testName, config, {})
ensure_clean_benchmark_folder(config.results_dir)

benchmark = Benchmark.from_json(config, benchmark_specs)

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)

json_filename = '{0}/mb.json'.format(config.results_dir)
## Assert that all BW metrics are properly stored and calculated
with open(json_filename) as results_json:
results_dict = json.load(results_json)
print_all_runs = results_dict["configuration"]["print-all-runs"]
env.assertTrue(print_all_runs)
for run_count in range(1, run_count+1):
# assert the run infomation exists
env.assertTrue(f"RUN #{run_count} RESULTS" in results_dict)

# ensure best, worst, and aggregate results are present
env.assertTrue("BEST RUN RESULTS" in results_dict)
env.assertTrue("WORST RUN RESULTS" in results_dict)
env.assertTrue(f"AGGREGATED AVERAGE RESULTS ({run_count} runs)" in results_dict)
# all stats should only exist on a single run json
env.assertTrue("ALL STATS" not in results_dict)

def test_default_arbitrary_command_pubsub(env):
benchmark_specs = {"name": env.testName, "args": []}
addTLSArgs(benchmark_specs, env)
Expand Down
Loading