Skip to content

Updated dbconfig requests memory limit based uppon client runner checks #188

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Dec 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions redis_benchmarks_specification/__cli__/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@ def spec_cli_args(parser):
default=SPECS_PATH_TEST_SUITES,
help="Test suites folder, containing the different test variations",
)
parser.add_argument(
"--defaults_filename",
type=str,
default="defaults.yml",
help="specify the defaults file containing spec topologies, common metric extractions,etc...",
)
parser.add_argument("--redis_host", type=str, default=GH_REDIS_SERVER_HOST)
parser.add_argument("--branch", type=str, default="unstable")
parser.add_argument("--commandstats-csv", type=str, default="")
Expand Down
4 changes: 4 additions & 0 deletions redis_benchmarks_specification/__cli__/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,11 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
fail_on_required_diff = args.fail_on_required_diff
overall_result = True
test_names = []
defaults_filename = args.defaults_filename

for test_file in testsuite_spec_files:
if defaults_filename in test_file:
continue
benchmark_config = {}
requires_override = False
test_result = True
Expand Down
1 change: 1 addition & 0 deletions redis_benchmarks_specification/__common__/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ def exporter_datasink_common(
]
},
)
print(overall_end_time_metrics)
# 7 days from now
expire_redis_metrics_ms = 7 * 24 * 60 * 60 * 1000
export_redis_metrics(
Expand Down
12 changes: 12 additions & 0 deletions redis_benchmarks_specification/__runner__/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,12 @@ def create_client_runner_args(project_name):
default=MACHINE_NAME,
help="Specify the running platform name. By default it will use the machine name.",
)
parser.add_argument(
"--defaults_filename",
type=str,
default="{}/defaults.yml".format(SPECS_PATH_TEST_SUITES),
help="specify the defaults file containing spec topologies, common metric extractions,etc...",
)
parser.add_argument("--triggering_env", type=str, default="ci")
parser.add_argument("--setup_type", type=str, default="oss-standalone")
parser.add_argument("--github_repo", type=str, default="redis")
Expand Down Expand Up @@ -178,4 +184,10 @@ def create_client_runner_args(project_name):
type=int,
help="override memtier test-time for each benchmark. By default will preserve test time specified in test spec",
)
parser.add_argument(
"--override-test-runs",
default=1,
type=int,
help="override memtier number of runs for each benchmark. By default will run once each test",
)
return parser
88 changes: 81 additions & 7 deletions redis_benchmarks_specification/__runner__/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@
)
from redisbench_admin.run.metrics import extract_results_table
from redisbench_admin.run.run import calculate_client_tool_duration_and_check
from redisbench_admin.utils.benchmark_config import get_final_benchmark_config
from redisbench_admin.utils.benchmark_config import (
get_final_benchmark_config,
get_defaults,
)
from redisbench_admin.utils.local import get_local_run_full_filename
from redisbench_admin.utils.results import post_process_benchmark_results

Expand Down Expand Up @@ -215,6 +218,7 @@ def prepare_memtier_benchmark_parameters(
tls_cacert=None,
resp_version=None,
override_memtier_test_time=0,
override_test_runs=1,
):
benchmark_command = [
full_benchmark_path,
Expand Down Expand Up @@ -253,6 +257,62 @@ def prepare_memtier_benchmark_parameters(
if "arguments" in clientconfig:
benchmark_command_str = benchmark_command_str + " " + clientconfig["arguments"]
logging.info(override_memtier_test_time)

if override_test_runs > 1:
benchmark_command_str = re.sub(
"--run-count\\s\\d+",
"--run-count={}".format(override_test_runs),
benchmark_command_str,
)
benchmark_command_str = re.sub(
"--run-count=\\d+",
"--run-count={}".format(override_test_runs),
benchmark_command_str,
)
benchmark_command_str = re.sub(
'--run-count="\\d+"',
"--run-count={}".format(override_test_runs),
benchmark_command_str,
)
# short
benchmark_command_str = re.sub(
"-x\\s\\d+",
"-x={}".format(override_test_runs),
benchmark_command_str,
)
benchmark_command_str = re.sub(
"-x=\\d+",
"-x={}".format(override_test_runs),
benchmark_command_str,
)
benchmark_command_str = re.sub(
'-x="\\d+"',
"-x={}".format(override_test_runs),
benchmark_command_str,
)
if (
len(
re.findall(
"--run-count={}".format(override_test_runs),
benchmark_command_str,
)
)
== 0
and len(
re.findall(
"-x={}".format(override_test_runs),
benchmark_command_str,
)
)
== 0
):
logging.info("adding --run-count option to benchmark run. ")
benchmark_command_str = (
benchmark_command_str
+ " "
+ "--run-count={}".format(override_test_runs)
)

if override_memtier_test_time > 0:
benchmark_command_str = re.sub(
"--test-time\\s\\d+",
Expand Down Expand Up @@ -301,14 +361,25 @@ def process_self_contained_coordinator_stream(
dry_run_count = 0
dry_run = args.dry_run
dry_run_include_preload = args.dry_run_include_preload
defaults_filename = args.defaults_filename
override_test_runs = args.override_test_runs
(
_,
default_metrics,
_,
_,
_,
) = get_defaults(defaults_filename)

for test_file in testsuite_spec_files:
if defaults_filename in test_file:
continue
client_containers = []

with open(test_file, "r") as stream:
_, benchmark_config, test_name = get_final_benchmark_config(
None, stream, ""
)
default_metrics = []

if tls_enabled:
test_name = test_name + "-tls"
Expand Down Expand Up @@ -511,7 +582,7 @@ def process_self_contained_coordinator_stream(
)

used_memory_check(
benchmark_required_memory, r, "start of benchmark"
test_name, benchmark_required_memory, r, "start of benchmark"
)

logging.info("Checking if there is a keyspace check being enforced")
Expand Down Expand Up @@ -582,6 +653,7 @@ def process_self_contained_coordinator_stream(
test_tls_cacert,
resp_version,
override_memtier_test_time,
override_test_runs,
)

client_container_image = extract_client_container_image(
Expand Down Expand Up @@ -652,7 +724,9 @@ def process_self_contained_coordinator_stream(

logging.info("Printing client tool stdout output")

used_memory_check(benchmark_required_memory, r, "end of benchmark")
used_memory_check(
test_name, benchmark_required_memory, r, "end of benchmark"
)

if args.flushall_on_every_test_end:
logging.info("Sending FLUSHALL to the DB")
Expand Down Expand Up @@ -804,14 +878,14 @@ def process_self_contained_coordinator_stream(
)


def used_memory_check(benchmark_required_memory, r, stage):
def used_memory_check(test_name, benchmark_required_memory, r, stage):
used_memory = r.info("memory")["used_memory"]
used_memory_gb = int(math.ceil(float(used_memory) / 1024.0 / 1024.0 / 1024.0))
logging.info("Benchmark used memory at {}: {}g".format(stage, used_memory_gb))
if used_memory > benchmark_required_memory:
logging.error(
"The benchmark specified a dbconfig resource request of memory ({}) bellow the REAL MEMORY USAGE OF: {}. FIX IT!.".format(
benchmark_required_memory, used_memory_gb
"The benchmark {} specified a dbconfig resource request of memory ({}) bellow the REAL MEMORY USAGE OF: {}. FIX IT!.".format(
test_name, benchmark_required_memory, used_memory_gb
)
)
exit(1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,7 @@ def process_self_contained_coordinator_stream(
cpuset_start_pos=0,
redis_proc_start_port=6379,
docker_air_gap=False,
defaults_filename="defaults.yml",
):
stream_id = "n/a"
overall_result = False
Expand Down Expand Up @@ -406,6 +407,8 @@ def process_self_contained_coordinator_stream(
logging.info("Successfully loaded images {}".format(images_loaded))

for test_file in testsuite_spec_files:
if defaults_filename in test_file:
continue
redis_containers = []
client_containers = []

Expand Down
24 changes: 24 additions & 0 deletions redis_benchmarks_specification/test-suites/defaults.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@

exporter:
redistimeseries:
break_by:
- version
- commit
timemetric: $."ALL STATS".Runtime."Start time"
metrics:
- $."BEST RUN RESULTS".Totals."Ops/sec"
- $."BEST RUN RESULTS".Totals."Latency"
- $."BEST RUN RESULTS".Totals."Misses/sec"
- $."BEST RUN RESULTS".Totals."Percentile Latencies"."p50.00"
- $."WORST RUN RESULTS".Totals."Ops/sec"
- $."WORST RUN RESULTS".Totals."Latency"
- $."WORST RUN RESULTS".Totals."Misses/sec"
- $."WORST RUN RESULTS".Totals."Percentile Latencies"."p50.00"
- $."AGGREGATED AVERAGE RESULTS (3 runs)".Totals."Ops/sec"
- $."AGGREGATED AVERAGE RESULTS (3 runs)".Totals."Latency"
- $."AGGREGATED AVERAGE RESULTS (3 runs)".Totals."Misses/sec"
- $."AGGREGATED AVERAGE RESULTS (5 runs)".Totals."Percentile Latencies"."p50.00"
- $."ALL STATS".Totals."Ops/sec"
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dbconfig:
keyspacelen: 0
resources:
requests:
memory: 1g
memory: 52g
tested-commands:
- hset
redis-topologies:
Expand All @@ -23,17 +23,7 @@ clientconfig:
requests:
cpus: '4'
memory: 2g
exporter:
redistimeseries:
break_by:
- version
- commit
timemetric: $."ALL STATS".Runtime."Start time"
metrics:
- $."ALL STATS".Totals."Ops/sec"
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"

tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dbconfig:
keyspacelen: 0
resources:
requests:
memory: 1g
memory: 52g
tested-commands:
- hset
redis-topologies:
Expand All @@ -23,17 +23,7 @@ clientconfig:
requests:
cpus: '4'
memory: 2g
exporter:
redistimeseries:
break_by:
- version
- commit
timemetric: $."ALL STATS".Runtime."Start time"
metrics:
- $."ALL STATS".Totals."Ops/sec"
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"

tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dbconfig:
keyspacelen: 0
resources:
requests:
memory: 1g
memory: 9g
tested-commands:
- hset
redis-topologies:
Expand All @@ -23,17 +23,7 @@ clientconfig:
requests:
cpus: '4'
memory: 2g
exporter:
redistimeseries:
break_by:
- version
- commit
timemetric: $."ALL STATS".Runtime."Start time"
metrics:
- $."ALL STATS".Totals."Ops/sec"
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"

tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dbconfig:
keyspacelen: 0
resources:
requests:
memory: 1g
memory: 9g
tested-commands:
- hset
redis-topologies:
Expand All @@ -23,17 +23,7 @@ clientconfig:
requests:
cpus: '4'
memory: 2g
exporter:
redistimeseries:
break_by:
- version
- commit
timemetric: $."ALL STATS".Runtime."Start time"
metrics:
- $."ALL STATS".Totals."Ops/sec"
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"

tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dbconfig:
keyspacelen: 0
resources:
requests:
memory: 1g
memory: 2g
tested-commands:
- hset
redis-topologies:
Expand All @@ -23,17 +23,7 @@ clientconfig:
requests:
cpus: '4'
memory: 2g
exporter:
redistimeseries:
break_by:
- version
- commit
timemetric: $."ALL STATS".Runtime."Start time"
metrics:
- $."ALL STATS".Totals."Ops/sec"
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"

tested-groups:
- hash
priority: 5
Loading