From 707a7aa581daac1523e19fa342f4bba9692bce4a Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Thu, 26 Oct 2023 12:37:30 -0700 Subject: [PATCH] Add support for optional benchmark command line args (#4173) Signed-off-by: Rishabh Singh --- .../benchmark_test/benchmark_args.py | 14 ++++ .../benchmark_test/benchmark_test_cluster.py | 3 +- .../benchmark_test/benchmark_test_suite.py | 9 +++ .../test_benchmark_args.py | 12 +++ .../test_benchmark_test_cluster.py | 3 +- .../test_benchmark_test_cluster_min.py | 3 +- .../test_benchmark_test_suite.py | 78 ++++++++++++++++--- 7 files changed, 109 insertions(+), 13 deletions(-) diff --git a/src/test_workflow/benchmark_test/benchmark_args.py b/src/test_workflow/benchmark_test/benchmark_args.py index a57b0395ad..ec054b8944 100644 --- a/src/test_workflow/benchmark_test/benchmark_args.py +++ b/src/test_workflow/benchmark_test/benchmark_args.py @@ -40,6 +40,9 @@ class BenchmarkArgs: enable_remote_store: bool workload: str workload_params: str + test_procedure: str + exclude_tasks: str + include_tasks: str benchmark_config: IO user_tag: str target_hosts: str @@ -97,6 +100,14 @@ def __init__(self) -> None: parser.add_argument("--workload-params", dest="workload_params", help="With this parameter you can inject variables into workloads. Parameters differs " "for each workload type. e.g., --workload-params \"number_of_replicas:1,number_of_shards:5\"") + parser.add_argument("--test-procedure", dest="test_procedure", + help="Defines a test procedure to use. You can find a list of test procedures by using " + "opensearch-benchmark list test-procedures. E.g. --test-procedure=\"ingest-only\"") + parser.add_argument("--exclude-tasks", dest="exclude_tasks", + help="Defines a comma-separated list of test procedure tasks not to run. E.g. --exclude-tasks=\"index-append\"") + parser.add_argument("--include-tasks", dest="include_tasks", + help="Defines a comma-separated list of test procedure tasks to run. By default, all tasks listed in a test procedure array are run." + " E.g. --include-tasks=\"scroll\"") parser.add_argument("--capture-node-stat", dest="telemetry", action="append_const", const="node-stats", help="Enable opensearch-benchmark to capture node stat metrics such as cpu, mem, jvm etc as well.") parser.add_argument("--capture-segment-replication-stat", dest="telemetry", action="append_const", @@ -130,6 +141,9 @@ def __init__(self) -> None: self.data_instance_type = args.data_instance_type if args.data_instance_type else None self.workload = args.workload self.workload_params = args.workload_params if args.workload_params else None + self.test_procedure = args.test_procedure if args.test_procedure else None + self.exclude_tasks = args.exclude_tasks if args.exclude_tasks else None + self.include_tasks = args.include_tasks if args.include_tasks else None self.benchmark_config = args.benchmark_config if args.benchmark_config else None self.user_tag = args.user_tag if args.user_tag else None self.additional_config = json.dumps(args.additional_config) if args.additional_config is not None else None diff --git a/src/test_workflow/benchmark_test/benchmark_test_cluster.py b/src/test_workflow/benchmark_test/benchmark_test_cluster.py index 027a052123..b5071685f0 100644 --- a/src/test_workflow/benchmark_test/benchmark_test_cluster.py +++ b/src/test_workflow/benchmark_test/benchmark_test_cluster.py @@ -160,7 +160,8 @@ def setup_cdk_params(self, config: dict) -> dict: "jvmSysProps": self.args.jvm_sys_props, "use50PercentHeap": str(self.args.use_50_percent_heap).lower(), "isInternal": config["Constants"]["isInternal"], - "enableRemoteStore": str(self.args.enable_remote_store).lower() + "enableRemoteStore": str(self.args.enable_remote_store).lower(), + "customRoleArn": config["Constants"]["IamRoleArn"] } @classmethod diff --git a/src/test_workflow/benchmark_test/benchmark_test_suite.py b/src/test_workflow/benchmark_test/benchmark_test_suite.py index 4e2788f77c..ac95a7d2f1 100644 --- a/src/test_workflow/benchmark_test/benchmark_test_suite.py +++ b/src/test_workflow/benchmark_test/benchmark_test_suite.py @@ -44,6 +44,15 @@ def __init__( logging.info(f"Workload Params are {args.workload_params}") self.command += f" --workload-params '{args.workload_params}'" + if self.args.test_procedure: + self.command += f" --test-procedure=\"{self.args.test_procedure}\"" + + if self.args.exclude_tasks: + self.command += f" --exclude-tasks=\"{self.args.exclude_tasks}\"" + + if self.args.include_tasks: + self.command += f" --include-tasks=\"{self.args.include_tasks}\"" + if self.args.user_tag: user_tag = f"--user-tag=\"{args.user_tag}\"" self.command += f" {user_tag}" diff --git a/tests/tests_test_workflow/test_benchmark_args.py b/tests/tests_test_workflow/test_benchmark_args.py index a813d2444c..6a39ddf8df 100644 --- a/tests/tests_test_workflow/test_benchmark_args.py +++ b/tests/tests_test_workflow/test_benchmark_args.py @@ -85,3 +85,15 @@ def test_benchmark_without_distribution_url_and_without_manifest(self) -> None: with self.assertRaises(Exception) as context: BenchmarkArgs() self.assertEqual(str(context.exception), "Please provide either --bundle-manifest or --distribution-url to run the performance test.") + + @patch("argparse._sys.argv", [ARGS_PY, "--bundle-manifest", TEST_DIST_MANIFEST_PATH, "--config", TEST_CONFIG_PATH, "--workload", "test", + "--test-procedure", 'test-procedure,another-test-procedure', "--exclude-tasks", "index,type:search,tag:setup", + "--include-tasks", "index,type:search,tag:setup"]) + def test_benchmark_with_optional_benchmark_parameters(self) -> None: + test_args = BenchmarkArgs() + self.assertEqual(test_args.test_procedure, + 'test-procedure,another-test-procedure') + self.assertEqual(test_args.exclude_tasks, + 'index,type:search,tag:setup') + self.assertEqual(test_args.include_tasks, + 'index,type:search,tag:setup') diff --git a/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster.py b/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster.py index ffd01a22be..ac0b755314 100644 --- a/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster.py +++ b/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster.py @@ -33,7 +33,7 @@ def setUp(self, args: Optional[Mock] = None, use_manifest: bool = True) -> None: self.security = True self.config = {"Constants": {"SecurityGroupId": "sg-00000000", "VpcId": "vpc-12345", "AccountId": "12345678", "Region": "us-west-2", "Role": "role-arn", "serverAccessType": "prefixList", "restrictServerAccessTo": "pl-1234", - "isInternal": "true"}} + "isInternal": "true", "IamRoleArn": "arn:aws:iam::12344567890:role/customRole"}} self.benchmark_test_cluster = BenchmarkTestCluster(bundle_manifest=self.manifest, config=self.config, args=self.args, current_workspace="current_workspace") @patch("test_workflow.benchmark_test.benchmark_test_cluster.BenchmarkTestCluster.wait_for_processing") @@ -78,6 +78,7 @@ def test_create_single_node_insecure(self, mock_wait_for_processing: Optional[Mo self.assertEqual(self.benchmark_test_cluster.port, 80) self.assertTrue("securityDisabled=true" in self.benchmark_test_cluster.params) self.assertTrue("dataInstanceType=r5.4xlarge" in self.benchmark_test_cluster.params) + self.assertTrue("customRoleArn=arn:aws:iam::12344567890:role/customRole" in self.benchmark_test_cluster.params) @patch("test_workflow.benchmark_test.benchmark_test_cluster.BenchmarkTestCluster.wait_for_processing") def test_create_multi_node(self, mock_wait_for_processing: Optional[Mock]) -> None: diff --git a/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster_min.py b/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster_min.py index 2dd88826d0..03b7ca660b 100644 --- a/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster_min.py +++ b/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_cluster_min.py @@ -31,7 +31,7 @@ def setUp(self, args: Optional[Mock] = None) -> None: self.security = True self.config = {"Constants": {"SecurityGroupId": "sg-00000000", "VpcId": "vpc-12345", "AccountId": "12345678", "Region": "us-west-2", "Role": "role-arn", "serverAccessType": "prefixList", "restrictServerAccessTo": "pl-1234", - "isInternal": "true"}} + "isInternal": "true", "IamRoleArn": ""}} self.benchmark_test_cluster = BenchmarkTestCluster(bundle_manifest=self.manifest, config=self.config, args=self.args, current_workspace="current_workspace") @patch("test_workflow.benchmark_test.benchmark_test_cluster.BenchmarkTestCluster.wait_for_processing") @@ -47,3 +47,4 @@ def test_create_min_cluster(self, mock_wait_for_processing: Optional[Mock]) -> N self.assertTrue("minDistribution=true" in self.benchmark_test_cluster.params) self.assertTrue("distributionUrl=https://artifacts.opensearch.org/snapshots/core/opensearch/2.9.0-SNAPSHOT/" "opensearch-min-2.9.0-SNAPSHOT-linux-arm64-latest.tar.gz" in self.benchmark_test_cluster.params) + self.assertTrue("customRoleArn" not in self.benchmark_test_cluster.params) diff --git a/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_suite.py b/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_suite.py index 44895e0d73..2cde6b4e1c 100644 --- a/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_suite.py +++ b/tests/tests_test_workflow/test_benchmark_workflow/benchmark_test/test_benchmark_test_suite.py @@ -6,22 +6,24 @@ # compatible open source license. import unittest -from typing import Optional +from typing import Any from unittest.mock import Mock, patch from test_workflow.benchmark_test.benchmark_test_suite import BenchmarkTestSuite class TestBenchmarkTestSuite(unittest.TestCase): - def setUp(self, config: Optional[str] = None, tag: Optional[str] = None, - workload_params: Optional[str] = None, telemetry: Optional[list] = None, telemetry_params: Optional[str] = None) -> None: + def setUp(self, **kwargs: Any) -> None: self.args = Mock() self.args.workload = "nyc_taxis" - self.args.benchmark_config = config - self.args.user_tag = tag - self.args.workload_params = workload_params - self.args.telemetry = telemetry - self.args.telemetry_params = telemetry_params + self.args.benchmark_config = kwargs['config'] if 'config' in kwargs else None + self.args.user_tag = kwargs['tags'] if 'tags' in kwargs else None + self.args.workload_params = kwargs['workload_params'] if 'workload_params' in kwargs else None + self.args.telemetry = kwargs['telemetry'] if 'telemetry' in kwargs else None + self.args.telemetry_params = kwargs['telemetry_params'] if 'telemetry_params' in kwargs else None + self.args.test_procedure = kwargs['test_procedure'] if 'test_procedure' in kwargs else None + self.args.exclude_tasks = kwargs['exclude_tasks'] if 'exclude_tasks' in kwargs else None + self.args.include_tasks = kwargs['include_tasks'] if 'include_tasks' in kwargs else None self.endpoint = "abc.com" self.benchmark_test_suite = BenchmarkTestSuite(endpoint=self.endpoint, security=False, args=self.args) @@ -45,7 +47,9 @@ def test_execute_security_enabled(self) -> None: 'verify_certs:false,basic_auth_user:\'admin\',basic_auth_password:\'admin\'"') def test_execute_default_with_optional_args(self) -> None: - TestBenchmarkTestSuite.setUp(self, "/home/test/benchmark.ini", "key1:value1,key2:value2", "{\"number_of_replicas\":\"1\"}", ['node-stats', 'test'], "{\"example_key\":\"example_value\"}") + TestBenchmarkTestSuite.setUp(self, config="/home/test/benchmark.ini", tags="key1:value1,key2:value2", + workload_params="{\"number_of_replicas\":\"1\"}", telemetry=['node-stats', 'test'], + telemetry_params="{\"example_key\":\"example_value\"}") with patch("subprocess.check_call") as mock_check_call: self.benchmark_test_suite.execute() self.assertEqual(mock_check_call.call_count, 1) @@ -59,7 +63,8 @@ def test_execute_default_with_optional_args(self) -> None: '--client-options="timeout:300"') def test_execute_default_with_no_telemetry_params(self) -> None: - TestBenchmarkTestSuite.setUp(self, "/home/test/benchmark.ini", "key1:value1,key2:value2", "{\"number_of_replicas\":\"1\"}", ['node-stats', 'test']) + TestBenchmarkTestSuite.setUp(self, config="/home/test/benchmark.ini", tags="key1:value1,key2:value2", + workload_params="{\"number_of_replicas\":\"1\"}", telemetry=['node-stats', 'test']) with patch("subprocess.check_call") as mock_check_call: self.benchmark_test_suite.execute() self.assertEqual(mock_check_call.call_count, 1) @@ -71,3 +76,56 @@ def test_execute_default_with_no_telemetry_params(self) -> None: '--workload-params \'{"number_of_replicas":"1"}\' ' '--user-tag="key1:value1,key2:value2" --telemetry node-stats,test, ' '--client-options="timeout:300"') + + def test_execute_with_test_procedure_params(self) -> None: + TestBenchmarkTestSuite.setUp(self, config="/home/test/benchmark.ini", tags="key1:value1,key2:value2", + workload_params="{\"number_of_replicas\":\"1\"}", test_procedure="test-proc1,test-proc2") + with patch("subprocess.check_call") as mock_check_call: + self.benchmark_test_suite.execute() + self.assertEqual(mock_check_call.call_count, 1) + self.assertEqual(self.benchmark_test_suite.command, 'docker run --rm -v /home/test/benchmark.ini:' + '/opensearch-benchmark/.benchmark/benchmark.ini ' + 'opensearchproject/opensearch-benchmark:latest execute-test ' + '--workload=nyc_taxis ' + '--pipeline=benchmark-only --target-hosts=abc.com ' + '--workload-params \'{"number_of_replicas":"1"}\' ' + '--test-procedure="test-proc1,test-proc2" ' + '--user-tag="key1:value1,key2:value2" ' + '--client-options="timeout:300"') + + def test_execute_with_include_exclude_params(self) -> None: + TestBenchmarkTestSuite.setUp(self, config="/home/test/benchmark.ini", tags="key1:value1,key2:value2", + workload_params="{\"number_of_replicas\":\"1\"}", include_tasks="task1,type:index", + exclude_tasks="task2,type:search") + with patch("subprocess.check_call") as mock_check_call: + self.benchmark_test_suite.execute() + self.assertEqual(mock_check_call.call_count, 1) + self.assertEqual(self.benchmark_test_suite.command, 'docker run --rm -v /home/test/benchmark.ini:' + '/opensearch-benchmark/.benchmark/benchmark.ini ' + 'opensearchproject/opensearch-benchmark:latest execute-test ' + '--workload=nyc_taxis ' + '--pipeline=benchmark-only --target-hosts=abc.com ' + '--workload-params \'{"number_of_replicas":"1"}\' ' + '--exclude-tasks="task2,type:search" ' + '--include-tasks="task1,type:index" ' + '--user-tag="key1:value1,key2:value2" ' + '--client-options="timeout:300"') + + def test_execute_with_all_benchmark_optional_params(self) -> None: + TestBenchmarkTestSuite.setUp(self, config="/home/test/benchmark.ini", tags="key1:value1,key2:value2", + workload_params="{\"number_of_replicas\":\"1\"}", test_procedure="test-proc1,test-proc2", + include_tasks="task1,type:index", exclude_tasks="task2,type:search") + with patch("subprocess.check_call") as mock_check_call: + self.benchmark_test_suite.execute() + self.assertEqual(mock_check_call.call_count, 1) + self.assertEqual(self.benchmark_test_suite.command, 'docker run --rm -v /home/test/benchmark.ini:' + '/opensearch-benchmark/.benchmark/benchmark.ini ' + 'opensearchproject/opensearch-benchmark:latest execute-test ' + '--workload=nyc_taxis ' + '--pipeline=benchmark-only --target-hosts=abc.com ' + '--workload-params \'{"number_of_replicas":"1"}\' ' + '--test-procedure="test-proc1,test-proc2" ' + '--exclude-tasks="task2,type:search" ' + '--include-tasks="task1,type:index" ' + '--user-tag="key1:value1,key2:value2" ' + '--client-options="timeout:300"')