Skip to content

Updated the benchmark SPEC to include command priority. Included --dry-run and --tests-priority-upper-limit in redis-benchmarks-spec-client-runner #184

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 70 additions & 70 deletions commands-priority.json
Original file line number Diff line number Diff line change
Expand Up @@ -74,84 +74,84 @@
"setbit": 73,
"zcount": 74,
"unlink": 75,
"select": 76,
"setnx": 77,
"type": 78,
"hlen": 79,
"sscan": 80,
"hsetnx": 81,
"setnx": 76,
"type": 77,
"hlen": 78,
"sscan": 79,
"hsetnx": 80,
"select": 81,
"srandmember": 82,
"zscan": 83,
"xack": 84,
"pttl": 85,
"restore": 86,
"pexpireat": 87,
"xdel": 88,
"discard": 89,
"xpending": 90,
"getset": 91,
"psetex": 92,
"mset": 93,
"bzpopmin": 94,
"persist": 95,
"pfcount": 96,
"hkeys": 97,
"xread": 98,
"xlen": 99,
"xtrim": 100,
"decrby": 101,
"rpushx": 102,
"sinter": 103,
"decr": 104,
"sunion": 105,
"sinterstore": 106,
"smismember": 107,
"zpopmin": 108,
"echo": 109,
"dbsize": 110,
"lset": 111,
"xpending": 89,
"getset": 90,
"psetex": 91,
"mset": 92,
"bzpopmin": 93,
"persist": 94,
"pfcount": 95,
"hkeys": 96,
"xread": 97,
"xlen": 98,
"xtrim": 99,
"decrby": 100,
"rpushx": 101,
"sinter": 102,
"decr": 103,
"sunion": 104,
"sinterstore": 105,
"smismember": 106,
"zpopmin": 107,
"echo": 108,
"dbsize": 109,
"lset": 110,
"geoadd": 111,
"xclaim": 112,
"geoadd": 113,
"randomkey": 114,
"rename": 115,
"zrangebylex": 116,
"zinterstore": 117,
"append": 118,
"zunionstore": 119,
"xrange": 120,
"getrange": 121,
"xautoclaim": 122,
"sunionstore": 123,
"bitcount": 124,
"dump": 125,
"hrandfield": 126,
"getex": 127,
"strlen": 128,
"pubsub": 129,
"linsert": 130,
"bitpos": 131,
"sdiff": 132,
"msetnx": 133,
"getdel": 134,
"sort": 135,
"renamenx": 136,
"setrange": 137,
"lpos": 138,
"geopos": 139,
"blmove": 140,
"georadius": 141,
"zlexcount": 142,
"pfmerge": 143,
"xrevrange": 144,
"georadiusbymember": 145,
"zremrangebylex": 146,
"lmove": 147,
"sdiffstore": 148,
"zpopmax": 149,
"bzpopmax": 150,
"zmscore": 151,
"flushdb": 152,
"zrevrangebylex": 153,
"randomkey": 113,
"rename": 114,
"zrangebylex": 115,
"zinterstore": 116,
"append": 117,
"zunionstore": 118,
"xrange": 119,
"getrange": 120,
"xautoclaim": 121,
"sunionstore": 122,
"bitcount": 123,
"dump": 124,
"hrandfield": 125,
"getex": 126,
"strlen": 127,
"pubsub": 128,
"linsert": 129,
"bitpos": 130,
"sdiff": 131,
"msetnx": 132,
"getdel": 133,
"sort": 134,
"renamenx": 135,
"setrange": 136,
"lpos": 137,
"geopos": 138,
"blmove": 139,
"georadius": 140,
"zlexcount": 141,
"pfmerge": 142,
"xrevrange": 143,
"georadiusbymember": 144,
"zremrangebylex": 145,
"lmove": 146,
"sdiffstore": 147,
"zpopmax": 148,
"bzpopmax": 149,
"zmscore": 150,
"flushdb": 151,
"zrevrangebylex": 152,
"discard": 153,
"smove": 154,
"flushall": 155,
"georadius_ro": 156,
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "redis-benchmarks-specification"
version = "0.1.61"
version = "0.1.62"
description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute."
authors = ["filipecosta90 <filipecosta.90@gmail.com>","Redis Performance Group <performance@redis.com>"]
readme = "Readme.md"
Expand Down
58 changes: 56 additions & 2 deletions redis_benchmarks_specification/__cli__/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,15 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
len(testsuite_spec_files), testsuites_folder
)
)
priority_json = {}
if args.commands_priority_file != "":
with open(args.commands_priority_file, "r") as fd:
logging.info(
"Reading {} file with priority by commandstats".format(
args.commands_priority_file
)
)
priority_json = json.load(fd)
tracked_groups = []
override_enabled = args.override_tests
fail_on_required_diff = args.fail_on_required_diff
Expand Down Expand Up @@ -76,6 +85,7 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
if "tested-groups" in benchmark_config:
origin_tested_groups = benchmark_config["tested-groups"]
origin_tested_commands = []

tested_commands = []
if "tested-commands" in benchmark_config:
origin_tested_commands = benchmark_config["tested-commands"]
Expand Down Expand Up @@ -126,6 +136,31 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
)
)

priority = None
# maximum priority of all tested commands
priority_json_value = None
for command in tested_commands:
if command in priority_json:
priority_v = priority_json[command]
if priority_json_value is None:
priority_json_value = priority_v
if priority_v > priority_json_value:
priority_json_value = priority_v

if "priority" in benchmark_config:
priority = benchmark_config["priority"]
else:
if priority_json_value is not None:
requires_override = True
logging.warn(
"dont have priority in {}, but the commands in the test have max priority of {}".format(
test_name, priority_json_value
)
)
priority = priority_json_value
if priority is not None:
benchmark_config["priority"] = priority

if tested_groups != origin_tested_groups:
requires_override = True
benchmark_config["tested-groups"] = tested_groups
Expand Down Expand Up @@ -171,6 +206,7 @@ def generate_stats_cli_command_logic(args, project_name, project_version):

# open file in read mode
total_count = 0
total_usecs = 0
total_tracked_count = 0
with open(
args.commandstats_csv, "r", encoding="utf8", errors="ignore"
Expand All @@ -185,6 +221,10 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
cmdstat = row[0]
cmdstat = cmdstat.replace("cmdstat_", "")
count = int(row[1])
usecs = None
if len(row) > 2:
usecs = int(row[2])
total_usecs += usecs
if count == 0:
continue
tracked = False
Expand All @@ -206,7 +246,7 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
if cmdstat in tracked_commands_json:
tracked = True
if module is False or include_modules:
row = [cmdstat, group, count, tracked, deprecated]
row = [cmdstat, group, count, usecs, tracked, deprecated]
rows.append(row)

priority_list = sorted(((priority[cmd], cmd) for cmd in priority), reverse=True)
Expand Down Expand Up @@ -245,7 +285,16 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
json.dump(priority_json, fd, indent=True)

if args.summary_csv != "":
header = ["command", "group", "count", "tracked", "deprecated", "%"]
header = [
"command",
"group",
"count",
"usecs",
"tracked",
"deprecated",
"% count",
"% usecs",
]
import csv

with open(args.summary_csv, "w", encoding="UTF8", newline="") as f:
Expand All @@ -256,8 +305,13 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
for row in rows:
# write the data
count = row[2]
usec = row[3]
pct = count / total_count
pct_usec = "n/a"
if usec is not None:
pct_usec = usec / total_usecs
row.append(pct)
row.append(pct_usec)
writer.writerow(row)

if total_tracked_count > 0:
Expand Down
12 changes: 12 additions & 0 deletions redis_benchmarks_specification/__runner__/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,18 @@ def create_client_runner_args(project_name):
parser.add_argument("--db_server_host", type=str, default="localhost")
parser.add_argument("--db_server_port", type=int, default=6379)
parser.add_argument("--cpuset_start_pos", type=int, default=0)
parser.add_argument(
"--tests-priority-upper-limit",
type=int,
default=-1,
help="Run a subset of the tests based uppon a preset priority. By default runs all tests.",
)
parser.add_argument(
"--dry-run",
default=False,
action="store_true",
help="Only check how many benchmarks we would run. Don't run benchmark but can change state of DB.",
)
parser.add_argument(
"--datasink_redistimeseries_host", type=str, default=DATASINK_RTS_HOST
)
Expand Down
31 changes: 31 additions & 0 deletions redis_benchmarks_specification/__runner__/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,8 @@ def process_self_contained_coordinator_stream(
overall_result = True
results_matrix = []
total_test_suite_runs = 0
dry_run_count = 0
dry_run = args.dry_run
for test_file in testsuite_spec_files:
client_containers = []

Expand Down Expand Up @@ -290,6 +292,7 @@ def process_self_contained_coordinator_stream(
tf_github_repo = args.github_repo
tf_triggering_env = args.platform_name
setup_type = args.setup_type
priority_limit = args.tests_priority_upper_limit
git_hash = "NA"
git_version = args.github_version
build_variant_name = "NA"
Expand Down Expand Up @@ -364,6 +367,25 @@ def process_self_contained_coordinator_stream(
_, test_tls_key = cp_to_workdir(
temporary_dir_client, tls_key
)
priority = None
if "priority" in benchmark_config:
priority = benchmark_config["priority"]

if priority_limit > 0 and priority is not None:
if priority_limit < priority:
logging.warning(
"Skipping test {} giving the priority limit ({}) is above the priority value ({})".format(
test_name, priority_limit, priority
)
)
continue
logging.info(
"Test {} priority ({}) is within the priority limit ({})".format(
test_name,
priority,
priority_limit,
)
)

if "dataset" in benchmark_config["dbconfig"]:
if args.run_tests_with_dataset is False:
Expand All @@ -374,6 +396,10 @@ def process_self_contained_coordinator_stream(
)
continue

if dry_run is True:
dry_run_count = dry_run_count + 1
continue

if "preload_tool" in benchmark_config["dbconfig"]:
data_prepopulation_step(
benchmark_config,
Expand Down Expand Up @@ -669,6 +695,11 @@ def process_self_contained_coordinator_stream(
)
csv_writer.dump(dest_fpath)

if dry_run is True:
logging.info(
"Number of tests that would have been run: {}".format(dry_run_count)
)


def cp_to_workdir(benchmark_tool_workdir, srcfile):
head, filename = os.path.split(srcfile)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ exporter:
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ exporter:
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ exporter:
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ exporter:
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ exporter:
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ exporter:
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
tested-groups:
- hash
priority: 5
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,4 @@ exporter:
- $."ALL STATS".Totals."Latency"
- $."ALL STATS".Totals."Misses/sec"
- $."ALL STATS".Totals."Percentile Latencies"."p50.00"
priority: 33
Loading