Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
181 changes: 47 additions & 134 deletions poetry.lock

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "redisbench-admin"
version = "0.9.9"
version = "0.9.12"
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
authors = ["filipecosta90 <filipecosta.90@gmail.com>","Redis Performance Group <performance@redis.com>"]
readme = "README.md"
Expand All @@ -10,7 +10,7 @@ redisbench-admin = "redisbench_admin.cli:main"
perf-daemon = "redisbench_admin.profilers.daemon:main"

[tool.poetry.dependencies]
python = "^3.6.1"
python = "^3.6.9"
humanize = "^2.4.0"
requests = "^2.23.0"
py_cpuinfo = "^5.0.0"
Expand All @@ -22,7 +22,7 @@ jsonpath_ng = "^1.5.2"
pysftp = "^0.2.9"
python_terraform = "^0.10.1"
GitPython = "^3.1.12"
PyYAML = "^5.4"
PyYAML = "^6.0"
wget = "^3.2"
pytablewriter = {extras = ["html"], version = "^0.64.1"}
sshtunnel = "^0.4.0"
Expand Down
2 changes: 1 addition & 1 deletion redisbench_admin/export/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def create_export_arguments(parser):
type=str,
default="json",
help="results format of the the benchmark results files to read "
"results from ( either pyperf-json, csv, json, redis-benchmark-txt )",
"results from ( either google.benchmark, pyperf-json, csv, json, redis-benchmark-txt )",
)
parser.add_argument(
"--use-result",
Expand Down
34 changes: 28 additions & 6 deletions redisbench_admin/export/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@


from redisbench_admin.export.common.common import split_tags_string
from redisbench_admin.export.google_benchmark.google_benchmark_json_format import (
generate_summary_json_google_benchmark,
)
from redisbench_admin.export.pyperf.pyperf_json_format import (
generate_summary_json_pyperf,
)
Expand Down Expand Up @@ -50,10 +53,13 @@ def export_command_logic(args, project_name, project_version):
"You need to specify at least one (or more) of --deployment-version --github_branch arguments"
)
exit(1)
if results_format != "csv" and results_format != "pyperf-json":
non_required_spec = ["csv", "pyperf-json", "google.benchmark"]
if results_format not in non_required_spec:
if exporter_spec_file is None:
logging.error(
"--exporter-spec-file is required for all formats with exception of csv and pyperf-json"
"--exporter-spec-file is required for all formats with exception of {}".format(
",".join(non_required_spec)
)
)
exit(1)
else:
Expand All @@ -76,6 +82,22 @@ def export_command_logic(args, project_name, project_version):
with open(benchmark_file, "r") as json_file:
start_dict = json.load(json_file)
results_dict = generate_summary_json_pyperf(start_dict)
if results_format == "google.benchmark":
with open(benchmark_file, "r") as json_file:
# override test names
print_warning = False
old_test_name = test_name
if test_name is None:
print_warning = True
start_dict = json.load(json_file)
results_dict, test_name = generate_summary_json_google_benchmark(start_dict)
if print_warning is True:
logging.warning(
"You've specificied a test name {} but on google benchmark we override it based on the test names retrieved from out file {}".format(
old_test_name, test_name
)
)

if args.override_test_time:
datapoints_timestamp = int(args.override_test_time.timestamp() * 1000.0)
logging.info(
Expand Down Expand Up @@ -120,9 +142,9 @@ def export_command_logic(args, project_name, project_version):
triggering_env,
)
logging.info("Parsed a total of {} metrics".format(len(timeseries_dict.keys())))
if results_format == "pyperf-json":
logging.info("Parsing pyperf format into timeseries format")
timeseries_dict = export_pyperf_json_to_timeseries_dict(
if results_format == "pyperf-json" or results_format == "google.benchmark":
logging.info("Parsing {} format into timeseries format".format(results_format))
timeseries_dict = export_json_to_timeseries_dict(
results_dict,
break_by_dict,
datapoints_timestamp,
Expand Down Expand Up @@ -181,7 +203,7 @@ def export_command_logic(args, project_name, project_version):
)


def export_pyperf_json_to_timeseries_dict(
def export_json_to_timeseries_dict(
benchmark_file,
break_by_dict,
datapoints_timestamp,
Expand Down
5 changes: 5 additions & 0 deletions redisbench_admin/export/google_benchmark/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Apache License Version 2.0
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# Apache License Version 2.0
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
import logging

import numpy as np


def metric_safe_name(row, replace_by="_"):
import re

metric_name = row.strip()
metric_name = re.sub(r"\W+", replace_by, metric_name)
return metric_name


def generate_summary_json_google_benchmark(input_json):
result_json = {}
test_names = []
for benchmark in input_json["benchmarks"]:
original_name = benchmark["name"]
benchmark_name = original_name
non_safe_count = len(original_name) - len(metric_safe_name(original_name, ""))
if non_safe_count > 0:
benchmark_name = metric_safe_name(original_name)
while "_" == benchmark_name[len(benchmark_name) - 1]:
benchmark_name = benchmark_name[: len(benchmark_name) - 1]
logging.warning(
"Given the benchmark name {} contains {} non alphanumeric characters, we're replacing it by the safe version {}".format(
original_name, "-", benchmark_name
)
)
metrics = {}
test_names.append(benchmark_name)
for metric_name, metric_value_str in benchmark.items():
metric_value = None
try:
metric_value = float(metric_value_str)
except ValueError:
pass
if metric_value is not None:
logging.info(
"Adding google.benchmark to benchmark {} metric named {}={}".format(
benchmark_name, metric_name, metric_value
)
)
metrics[metric_name] = metric_value

result_json[benchmark_name] = metrics

return result_json, test_names
2 changes: 1 addition & 1 deletion redisbench_admin/run/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def common_run_args(parser):
parser.add_argument(
"--allowed-tools",
type=str,
default="memtier_benchmark,redis-benchmark,redisgraph-benchmark-go,ycsb,"
default="memtier_benchmark,redis-benchmark,redisgraph-benchmark-go,ycsb,go-ycsb,"
+ "tsbs_run_queries_redistimeseries,tsbs_load_redistimeseries,"
+ "ftsb_redisearch,"
+ "aibench_run_inference_redisai_vision,ann-benchmarks",
Expand Down
38 changes: 34 additions & 4 deletions redisbench_admin/run/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@
from redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries import (
prepare_tsbs_benchmark_command,
)
from redisbench_admin.run.ycsb.ycsb import prepare_ycsb_benchmark_command
from redisbench_admin.run.ycsb.ycsb import (
prepare_ycsb_benchmark_command,
prepare_go_ycsb_benchmark_command,
)
from redisbench_admin.run_remote.remote_helpers import (
extract_module_semver_from_info_modules_cmd,
)
Expand Down Expand Up @@ -168,7 +171,7 @@ def prepare_benchmark_parameters_specif_tooling(
remote_results_file,
isremote,
)
if "ycsb" in benchmark_tool:
if "ycsb" in benchmark_tool and "go-ycsb" not in benchmark_tool:
if isremote is True:
benchmark_tool = "/tmp/ycsb/bin/ycsb"
current_workdir = "/tmp/ycsb"
Expand All @@ -179,6 +182,18 @@ def prepare_benchmark_parameters_specif_tooling(
entry,
current_workdir,
)
if "go-ycsb" in benchmark_tool:
if isremote is True:
benchmark_tool = "/tmp/{}".format(benchmark_tool)
command_arr, command_str = prepare_go_ycsb_benchmark_command(
benchmark_tool,
server_private_ip,
server_plaintext_port,
entry,
current_workdir,
cluster_api_enabled,
)

if "tsbs_" in benchmark_tool:
input_data_file = None
if isremote is True:
Expand Down Expand Up @@ -417,10 +432,25 @@ def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
is_array = True
try:
logging.info("Sending init command: {}".format(cmd))
stdout = ""
if is_array:
stdout = r.execute_command(*cmd)
if "FT.CREATE" in cmd[0]:
logging.info("Detected FT.CREATE to all nodes on OSS Cluster")
try:
stdout = r.execute_command(*cmd, target_nodes="all")
except redis.exceptions.ResponseError:
pass
else:
stdout = r.execute_command(*cmd)
else:
stdout = r.execute_command(cmd)
if "FT.CREATE" in cmd:
logging.info("Detected FT.CREATE to all nodes on OSS Cluster")
try:
stdout = r.execute_command(cmd, target_nodes="all")
except redis.exceptions.ResponseError:
pass
else:
stdout = r.execute_command(cmd)
logging.info("Command reply: {}".format(stdout))
except redis.connection.ConnectionError as e:
logging.error(
Expand Down
78 changes: 59 additions & 19 deletions redisbench_admin/run/redistimeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,25 +315,65 @@ def timeseries_test_sucess_flow(
rts.hset(
branch_target_table_keyname, None, None, branch_target_table_dict
)

update_secondary_result_keys(
artifact_version,
benchmark_duration_seconds,
build_variant_name,
dataset_load_duration_seconds,
deployment_name,
deployment_type,
metadata_tags,
rts,
running_platform,
start_time_ms,
test_name,
testcase_metric_context_paths,
tf_github_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
)
if test_name is not None:
if type(test_name) is str:
update_secondary_result_keys(
artifact_version,
benchmark_duration_seconds,
build_variant_name,
dataset_load_duration_seconds,
deployment_name,
deployment_type,
metadata_tags,
rts,
running_platform,
start_time_ms,
test_name,
testcase_metric_context_paths,
tf_github_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
)
if type(test_name) is list:
for inner_test_name in test_name:
update_secondary_result_keys(
artifact_version,
benchmark_duration_seconds,
build_variant_name,
dataset_load_duration_seconds,
deployment_name,
deployment_type,
metadata_tags,
rts,
running_platform,
start_time_ms,
inner_test_name,
testcase_metric_context_paths,
tf_github_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
)
else:
update_secondary_result_keys(
artifact_version,
benchmark_duration_seconds,
build_variant_name,
dataset_load_duration_seconds,
deployment_name,
deployment_type,
metadata_tags,
rts,
running_platform,
start_time_ms,
test_name,
testcase_metric_context_paths,
tf_github_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
)
return version_target_tables, branch_target_tables


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,21 @@ def prepare_tsbs_benchmark_command(
return command_arr, command_str


def extract_tsbs_extra_links(
def extract_remote_tool_extra_links(
benchmark_config,
benchmark_tool,
config_key="clientconfig",
os_str="linux",
arch_str="amd64",
project="redistimeseries",
tools_group="tsbs",
):
remote_tool_link = "/tmp/{}".format(benchmark_tool)
tool_link = (
"https://s3.amazonaws.com/benchmarks.redislabs/"
+ "redistimeseries/tools/tsbs/{}_{}_{}".format(benchmark_tool, os_str, arch_str)
+ "{}/tools/{}/{}_{}_{}".format(
project, tools_group, benchmark_tool, os_str, arch_str
)
)
queries_file_link = None
for entry in benchmark_config[config_key]:
Expand Down
Loading