Skip to content

Updated benchmark config to include resource memory usage. Checking valid memory settings upon CLI runner #186

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Dec 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,29 +1,27 @@
[tool.poetry]
name = "redis-benchmarks-specification"
version = "0.1.62"
version = "0.1.63"
description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute."
authors = ["filipecosta90 <filipecosta.90@gmail.com>","Redis Performance Group <performance@redis.com>"]
readme = "Readme.md"

[tool.poetry.dependencies]
python = "^3.8"
Flask = "^2.2.2"
Werkzeug = "^2.2.2"
python = "^3.6.9a"
Flask = "^2.0.3"
flask-restx = "^0.5.0"
redis = "^4.2.0"
marshmallow = "^3.12.2"
argparse = "^1.4.0"
Flask-HTTPAuth = "^4.4.0"
PyYAML = "^6.0"
docker = "^5.0.0"
redisbench-admin = "0.9.31"
redisbench-admin = "^0.9.23"
psutil = "^5.9.4"
PyGithub = "^1.55"
GitPython = "^3.1.20"
semver = "^2.13.0"
node-semver = "^0.8.1"
typed-ast = "^1.5.0"
numpy = "^1.23.4"
oyaml = "^1.0"

[tool.poetry.dev-dependencies]
Expand Down
27 changes: 27 additions & 0 deletions redis_benchmarks_specification/__cli__/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,33 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
if priority is not None:
benchmark_config["priority"] = priority

resources = {}
if "resources" in benchmark_config["dbconfig"]:
resources = benchmark_config["dbconfig"]["resources"]
else:
benchmark_config["dbconfig"]["resources"] = resources

resources_requests = {}
if "requests" in resources:
resources_requests = benchmark_config["dbconfig"]["resources"][
"requests"
]
else:
benchmark_config["dbconfig"]["resources"][
"requests"
] = resources_requests

if "memory" not in resources_requests:
benchmark_config["dbconfig"]["resources"]["requests"][
"memory"
] = "1g"
requires_override = True
logging.warn(
"dont have resources.requests.memory in {}. Setting 1GB default".format(
test_name
)
)

if tested_groups != origin_tested_groups:
requires_override = True
benchmark_config["tested-groups"] = tested_groups
Expand Down
163 changes: 163 additions & 0 deletions redis_benchmarks_specification/__common__/runner.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,64 @@
import csv
import logging
import os
import pathlib
import re

import redis
from redisbench_admin.run.metrics import collect_redis_metrics
from redisbench_admin.run.redistimeseries import timeseries_test_sucess_flow
from redisbench_admin.run_remote.run_remote import export_redis_metrics


def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
cmds = None
res = 0
if dbconfig_keyname in benchmark_config:
# print(benchmark_config[dbconfig_keyname])
# print(type(benchmark_config[dbconfig_keyname]))
for k, v in benchmark_config[dbconfig_keyname].items():
if "init_commands" in k:
cmds = v

if type(cmds) == str:
cmds = [cmds]
if cmds is not None:
for cmd in cmds:
is_array = False
print(type(cmd))
if type(cmd) == list:
is_array = True
if '"' in cmd:
cols = []
for lines in csv.reader(
cmd,
quotechar='"',
delimiter=" ",
quoting=csv.QUOTE_ALL,
skipinitialspace=True,
):
if lines[0] != " " and len(lines[0]) > 0:
cols.append(lines[0])
cmd = cols
is_array = True
try:
logging.info("Sending init command: {}".format(cmd))
stdout = ""
if is_array:
stdout = r.execute_command(*cmd)
else:
stdout = r.execute_command(cmd)
res = res + 1
logging.info("Command reply: {}".format(stdout))
except redis.connection.ConnectionError as e:
logging.error(
"Error establishing connection to Redis. Message: {}".format(
e.__str__()
)
)

return res


def get_benchmark_specs(testsuites_folder, test="", test_regex=".*"):
final_files = []
Expand Down Expand Up @@ -56,3 +112,110 @@ def extract_testsuites(args):
)
)
return testsuite_spec_files


def reset_commandstats(redis_conns):
for pos, redis_conn in enumerate(redis_conns):
logging.info("Resetting commmandstats for shard {}".format(pos))
try:
redis_conn.config_resetstat()
except redis.exceptions.ResponseError as e:
logging.warning(
"Catched an error while resetting status: {}".format(e.__str__())
)


def exporter_datasink_common(
benchmark_config,
benchmark_duration_seconds,
build_variant_name,
datapoint_time_ms,
dataset_load_duration_seconds,
datasink_conn,
datasink_push_results_redistimeseries,
git_branch,
git_version,
metadata,
redis_conns,
results_dict,
running_platform,
setup_name,
setup_type,
test_name,
tf_github_org,
tf_github_repo,
tf_triggering_env,
topology_spec_name,
):
logging.info("Using datapoint_time_ms: {}".format(datapoint_time_ms))
timeseries_test_sucess_flow(
datasink_push_results_redistimeseries,
git_version,
benchmark_config,
benchmark_duration_seconds,
dataset_load_duration_seconds,
None,
topology_spec_name,
setup_name,
None,
results_dict,
datasink_conn,
datapoint_time_ms,
test_name,
git_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
metadata,
build_variant_name,
running_platform,
)
logging.info("Collecting memory metrics")
(_, _, overall_end_time_metrics,) = collect_redis_metrics(
redis_conns,
["memory"],
{
"memory": [
"used_memory",
"used_memory_dataset",
]
},
)
# 7 days from now
expire_redis_metrics_ms = 7 * 24 * 60 * 60 * 1000
export_redis_metrics(
git_version,
datapoint_time_ms,
overall_end_time_metrics,
datasink_conn,
setup_name,
setup_type,
test_name,
git_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
{"metric-type": "redis-metrics"},
expire_redis_metrics_ms,
)
logging.info("Collecting commandstat metrics")
(
_,
_,
overall_commandstats_metrics,
) = collect_redis_metrics(redis_conns, ["commandstats"])
export_redis_metrics(
git_version,
datapoint_time_ms,
overall_commandstats_metrics,
datasink_conn,
setup_name,
setup_type,
test_name,
git_branch,
tf_github_org,
tf_github_repo,
tf_triggering_env,
{"metric-type": "commandstats"},
expire_redis_metrics_ms,
)
15 changes: 14 additions & 1 deletion redis_benchmarks_specification/__runner__/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,19 @@ def create_client_runner_args(project_name):
help="Interpret PATTERN as a regular expression to filter test names",
)
parser.add_argument("--db_server_host", type=str, default="localhost")
parser.add_argument("--db_server_password", type=str, default=None)
parser.add_argument("--db_server_port", type=int, default=6379)
parser.add_argument("--cpuset_start_pos", type=int, default=0)
parser.add_argument(
"--tests-priority-lower-limit",
type=int,
default=0,
help="Run a subset of the tests based uppon a preset priority. By default runs all tests.",
)
parser.add_argument(
"--tests-priority-upper-limit",
type=int,
default=-1,
default=100000,
help="Run a subset of the tests based uppon a preset priority. By default runs all tests.",
)
parser.add_argument(
Expand All @@ -68,6 +75,12 @@ def create_client_runner_args(project_name):
action="store_true",
help="Only check how many benchmarks we would run. Don't run benchmark but can change state of DB.",
)
parser.add_argument(
"--dry-run-include-preload",
default=False,
action="store_true",
help="Run all steps before benchmark. This can change the state of the DB.",
)
parser.add_argument(
"--datasink_redistimeseries_host", type=str, default=DATASINK_RTS_HOST
)
Expand Down
Loading