From 7afa882f73f3c2e3f27cb5f1742b7ab2ced3a66b Mon Sep 17 00:00:00 2001 From: Vivien Nicolas Date: Tue, 11 Apr 2023 18:33:52 +0200 Subject: [PATCH] Add some options to dump the message expected by the Test Harness --- .../matter_yamltests/hooks.py | 10 +++- .../matter_yamltests/parser.py | 8 ++- .../matter_yamltests/runner.py | 4 +- .../matter_yamltests/yaml_loader.py | 5 +- .../py_matter_yamltests/test_yaml_loader.py | 49 ++++++++++--------- scripts/tests/yaml/chiptool.py | 1 + scripts/tests/yaml/runner.py | 14 +++--- scripts/tests/yaml/tests_logger.py | 34 ++++++++++--- 8 files changed, 83 insertions(+), 42 deletions(-) diff --git a/scripts/py_matter_yamltests/matter_yamltests/hooks.py b/scripts/py_matter_yamltests/matter_yamltests/hooks.py index f471af83408589..500904e033f3de 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/hooks.py +++ b/scripts/py_matter_yamltests/matter_yamltests/hooks.py @@ -98,12 +98,15 @@ def stop(self, duration: int): """ pass - def test_start(self, name: str, count: int): + def test_start(self, filename: str, name: str, count: int): """ This method is called when the runner starts running a single test. Parameters ---------- + filename: str + The name of the file containing the test that is starting. + name: str The name of the test that is starting. @@ -126,7 +129,7 @@ def test_stop(self, exception: Exception, duration: int): """ pass - def step_skipped(self, name: str): + def step_skipped(self, name: str, expression: str): """ This method is called when running a step is skipped. @@ -134,6 +137,9 @@ def step_skipped(self, name: str): ---------- name: str The name of the test step that is skipped. + + expression: str + The PICS expression that results in the test step to be skipped. """ pass diff --git a/scripts/py_matter_yamltests/matter_yamltests/parser.py b/scripts/py_matter_yamltests/matter_yamltests/parser.py index 8f9b38a85f63f8..1affdf21fd6564 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/parser.py +++ b/scripts/py_matter_yamltests/matter_yamltests/parser.py @@ -180,6 +180,7 @@ def __init__(self, test: dict, config: dict, definitions: SpecDefinitions, pics_ self.attribute = _value_or_none(test, 'attribute') self.event = _value_or_none(test, 'event') self.endpoint = _value_or_config(test, 'endpoint', config) + self.pics = _value_or_none(test, 'PICS') self.is_pics_enabled = pics_checker.check(_value_or_none(test, 'PICS')) self.identity = _value_or_none(test, 'identity') @@ -565,6 +566,10 @@ def wait_for(self): def event_number(self): return self._test.event_number + @property + def pics(self): + return self._test.pics + def post_process_response(self, received_responses): result = PostProcessResponseResult() @@ -955,11 +960,12 @@ class TestParserConfig: class TestParser: def __init__(self, test_file: str, parser_config: TestParserConfig = TestParserConfig()): yaml_loader = YamlLoader() - name, pics, config, tests = yaml_loader.load(test_file) + filename, name, pics, config, tests = yaml_loader.load(test_file) self.__apply_config_override(config, parser_config.config_override) self.__apply_legacy_config(config) + self.filename = filename self.name = name self.PICS = pics self.tests = YamlTests( diff --git a/scripts/py_matter_yamltests/matter_yamltests/runner.py b/scripts/py_matter_yamltests/matter_yamltests/runner.py index 4c5db7a9d2b8d8..008945d03163af 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/runner.py +++ b/scripts/py_matter_yamltests/matter_yamltests/runner.py @@ -157,12 +157,12 @@ async def _run(self, parser: TestParser, config: TestRunnerConfig): await self.start() hooks = config.hooks - hooks.test_start(parser.name, parser.tests.count) + hooks.test_start(parser.filename, parser.name, parser.tests.count) test_duration = 0 for idx, request in enumerate(parser.tests): if not request.is_pics_enabled: - hooks.step_skipped(request.label) + hooks.step_skipped(request.label, request.pics) continue elif not config.adapter: hooks.step_start(request.label) diff --git a/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py b/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py index 543de252dc3820..388ac1d4560a9e 100644 --- a/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py +++ b/scripts/py_matter_yamltests/matter_yamltests/yaml_loader.py @@ -25,6 +25,7 @@ except: from yaml import SafeLoader +import os import yaml @@ -32,12 +33,14 @@ class YamlLoader: """This class loads a file from the disk and validates that the content is a well formed yaml test.""" def load(self, yaml_file: str) -> tuple[str, Union[list, str], dict, list]: + filename = '' name = '' pics = None config = {} tests = [] if yaml_file: + filename = os.path.splitext(os.path.basename(yaml_file))[0] with open(yaml_file) as f: loader = SafeLoader add_yaml_support_for_scientific_notation_without_dot(loader) @@ -50,7 +53,7 @@ def load(self, yaml_file: str) -> tuple[str, Union[list, str], dict, list]: config = content.get('config', {}) tests = content.get('tests', []) - return (name, pics, config, tests) + return (filename, name, pics, config, tests) def __check_content(self, content): schema = { diff --git a/scripts/py_matter_yamltests/test_yaml_loader.py b/scripts/py_matter_yamltests/test_yaml_loader.py index d67dd062667ba1..1e748e317a3082 100644 --- a/scripts/py_matter_yamltests/test_yaml_loader.py +++ b/scripts/py_matter_yamltests/test_yaml_loader.py @@ -66,7 +66,8 @@ def test_missing_file(self): content = None - name, pics, config, tests = load(content) + filename, name, pics, config, tests = load(content) + self.assertEqual(filename, '') self.assertEqual(name, '') self.assertEqual(pics, None) self.assertEqual(config, {}) @@ -77,7 +78,8 @@ def test_empty_file(self): content = '' - name, pics, config, tests = load(content) + filename, name, pics, config, tests = load(content) + self.assertEqual(name, '') self.assertEqual(name, '') self.assertEqual(pics, None) self.assertEqual(config, {}) @@ -99,7 +101,7 @@ def test_key_name(self): name: Test Name ''' - name, _, _, _ = load(content) + _, name, _, _, _ = load(content) self.assertEqual(name, 'Test Name') def test_key_name_wrong_values(self): @@ -117,7 +119,7 @@ def test_key_pics_string(self): PICS: OO.S ''' - _, pics, _, _ = load(content) + _, _, pics, _, _ = load(content) self.assertEqual(pics, 'OO.S') def test_key_pics_list(self): @@ -129,7 +131,7 @@ def test_key_pics_list(self): - OO.C ''' - _, pics, _, _ = load(content) + _, _, pics, _, _ = load(content) self.assertEqual(pics, ['OO.S', 'OO.C']) def test_key_pics_wrong_values(self): @@ -149,7 +151,7 @@ def test_key_config(self): name2: value2 ''' - _, _, config, _ = load(content) + _, _, _, config, _ = load(content) self.assertEqual(config, {'name': 'value', 'name2': 'value2'}) def test_key_config_wrong_values(self): @@ -169,7 +171,7 @@ def test_key_tests(self): - label: Test2 ''' - _, _, _, tests = load(content) + _, _, _, _, tests = load(content) self.assertEqual(tests, [{'label': 'Test1'}, {'label': 'Test2'}]) def test_key_tests_wrong_values(self): @@ -202,7 +204,7 @@ def test_key_tests_step_bool_keys(self): wrong_values = self._get_wrong_values([bool], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value=True)) + _, _, _, _, tests = load(content.format(key=key, value=True)) self.assertEqual(tests, [{key: True}]) for value in wrong_values: @@ -232,7 +234,7 @@ def test_key_tests_step_str_keys(self): wrong_values = self._get_wrong_values([str], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value='a string')) + _, _, _, _, tests = load(content.format(key=key, value='a string')) self.assertEqual(tests, [{key: 'a string'}]) for value in wrong_values: @@ -256,7 +258,7 @@ def test_key_tests_step_int_keys(self): wrong_values = self._get_wrong_values([int], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value=1)) + _, _, _, _, tests = load(content.format(key=key, value=1)) self.assertEqual(tests, [{key: 1}]) for value in wrong_values: @@ -276,7 +278,8 @@ def test_key_tests_step_dict_keys(self): ' value: True\n') wrong_values = self._get_wrong_values([dict], spaces=6) for key in keys: - _, _, _, tests = load(content.format(key=key, value=valid_value)) + _, _, _, _, tests = load( + content.format(key=key, value=valid_value)) self.assertEqual(tests, [{key: {'value': True}}]) for value in wrong_values: @@ -291,12 +294,12 @@ def test_key_tests_step_response_key(self): value = ('\n' ' value: True\n') - _, _, _, tests = load(content.format(value=value)) + _, _, _, _, tests = load(content.format(value=value)) self.assertEqual(tests, [{'response': {'value': True}}]) value = ('\n' ' - value: True\n') - _, _, _, tests = load(content.format(value=value)) + _, _, _, _, tests = load(content.format(value=value)) self.assertEqual(tests, [{'response': [{'value': True}]}]) wrong_values = self._get_wrong_values([dict, list], spaces=6) @@ -310,10 +313,10 @@ def test_key_tests_step_event_number_key(self): content = ('tests:\n' ' - eventNumber: {value}') - _, _, _, tests = load(content.format(value=1)) + _, _, _, _, tests = load(content.format(value=1)) self.assertEqual(tests, [{'eventNumber': 1}]) - _, _, _, tests = load(content.format(value='TestKey')) + _, _, _, _, tests = load(content.format(value='TestKey')) self.assertEqual(tests, [{'eventNumber': 'TestKey'}]) wrong_values = self._get_wrong_values([str, int], spaces=6) @@ -328,7 +331,7 @@ def test_key_tests_step_verification_key(self): ' - verification: {value}\n' ' disabled: true') - _, _, _, tests = load(content.format(value='Test Sentence')) + _, _, _, _, tests = load(content.format(value='Test Sentence')) self.assertEqual( tests, [{'verification': 'Test Sentence', 'disabled': True}]) @@ -392,7 +395,7 @@ def test_key_tests_step_rule_step_with_verification_should_be_disabled_or_intera disabled: true ''' - _, _, _, tests = load(content) + _, _, _, _, tests = load(content) self.assertEqual(tests, [ {'label': 'A Test Name', 'verification': 'A verification sentence', 'disabled': True}]) @@ -412,7 +415,7 @@ def test_key_tests_step_rule_step_with_verification_should_be_disabled_or_intera command: UserPrompt ''' - _, _, _, tests = load(content) + _, _, _, _, tests = load(content) self.assertEqual(tests, [ {'label': 'A Test Name', 'verification': 'A verification sentence', 'command': 'UserPrompt'}]) @@ -427,7 +430,7 @@ def test_key_tests_step_response_key_values_key(self): ' - response:\n' ' values: {value}') - _, _, _, tests = load(content.format(value=[])) + _, _, _, _, tests = load(content.format(value=[])) self.assertEqual(tests, [{'response': {'values': []}}]) wrong_values = self._get_wrong_values([list], spaces=8) @@ -442,7 +445,7 @@ def test_key_tests_step_response_key_error_key(self): ' - response:\n' ' error: {value}') - _, _, _, tests = load(content.format(value='AnError')) + _, _, _, _, tests = load(content.format(value='AnError')) self.assertEqual(tests, [{'response': {'error': 'AnError'}}]) wrong_values = self._get_wrong_values([str], spaces=8) @@ -457,7 +460,7 @@ def test_key_tests_step_response_key_cluster_error_key(self): ' - response:\n' ' clusterError: {value}') - _, _, _, tests = load(content.format(value=1)) + _, _, _, _, tests = load(content.format(value=1)) self.assertEqual(tests, [{'response': {'clusterError': 1}}]) wrong_values = self._get_wrong_values([int], spaces=8) @@ -472,7 +475,7 @@ def test_key_tests_step_response_key_constraints_key(self): ' - response:\n' ' constraints: {value}') - _, _, _, tests = load(content.format(value={})) + _, _, _, _, tests = load(content.format(value={})) self.assertEqual(tests, [{'response': {'constraints': {}}}]) wrong_values = self._get_wrong_values([dict], spaces=8) @@ -487,7 +490,7 @@ def test_key_tests_step_response_key_save_as_key(self): ' - response:\n' ' saveAs: {value}') - _, _, _, tests = load(content.format(value='AKey')) + _, _, _, _, tests = load(content.format(value='AKey')) self.assertEqual(tests, [{'response': {'saveAs': 'AKey'}}]) wrong_values = self._get_wrong_values([str], spaces=8) diff --git a/scripts/tests/yaml/chiptool.py b/scripts/tests/yaml/chiptool.py index 9218832ea8c0f5..26442680306bc1 100755 --- a/scripts/tests/yaml/chiptool.py +++ b/scripts/tests/yaml/chiptool.py @@ -78,6 +78,7 @@ def chiptool_runner_options(f): CONTEXT_SETTINGS['ignore_unknown_options'] = True +CONTEXT_SETTINGS['default_map']['chiptool']['use_test_harness_log_format'] = True @click.command(context_settings=CONTEXT_SETTINGS) diff --git a/scripts/tests/yaml/runner.py b/scripts/tests/yaml/runner.py index 9889976cf9e002..29de6820c9081c 100755 --- a/scripts/tests/yaml/runner.py +++ b/scripts/tests/yaml/runner.py @@ -71,6 +71,8 @@ def test_runner_options(f): help='Show additional logs provided by the adapter.')(f) f = click.option('--show_adapter_logs_on_error', type=bool, default=True, show_default=True, help='Show additional logs provided by the adapter on error.')(f) + f = click.option('--use_test_harness_log_format', type=bool, default=False, show_default=True, + help='Use the test harness log format..')(f) return f @@ -261,11 +263,11 @@ def dry_run(parser_group: ParserGroup): @runner_base.command() @test_runner_options @pass_parser_group -def run(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool): +def run(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, use_test_harness_log_format: bool): """Run the test suite.""" adapter = __import__(adapter, fromlist=[None]).Adapter(parser_group.builder_config.parser_config.definitions) runner_options = TestRunnerOptions(stop_on_error, stop_on_warning, stop_at_number) - runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error) + runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error, use_test_harness_log_format) runner_config = TestRunnerConfig(adapter, parser_group.pseudo_clusters, runner_options, runner_hooks) runner = TestRunner() @@ -276,11 +278,11 @@ def run(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_wa @test_runner_options @websocket_runner_options @pass_parser_group -def websocket(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, server_address: str, server_port: int, server_path: str, server_name: str, server_arguments: str): +def websocket(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, use_test_harness_log_format: bool, server_address: str, server_port: int, server_path: str, server_name: str, server_arguments: str): """Run the test suite using websockets.""" adapter = __import__(adapter, fromlist=[None]).Adapter(parser_group.builder_config.parser_config.definitions) runner_options = TestRunnerOptions(stop_on_error, stop_on_warning, stop_at_number) - runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error) + runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error, use_test_harness_log_format) runner_config = TestRunnerConfig(adapter, parser_group.pseudo_clusters, runner_options, runner_hooks) if server_path is None and server_name: @@ -299,11 +301,11 @@ def websocket(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop @test_runner_options @chip_repl_runner_options @pass_parser_group -def chip_repl(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, runner: str, repl_storage_path: str, commission_on_network_dut: bool): +def chip_repl(parser_group: ParserGroup, adapter: str, stop_on_error: bool, stop_on_warning: bool, stop_at_number: int, show_adapter_logs: bool, show_adapter_logs_on_error: bool, use_test_harness_log_format: bool, runner: str, repl_storage_path: str, commission_on_network_dut: bool): """Run the test suite using chip-repl.""" adapter = __import__(adapter, fromlist=[None]).Adapter(parser_group.builder_config.parser_config.definitions) runner_options = TestRunnerOptions(stop_on_error, stop_on_warning, stop_at_number) - runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error) + runner_hooks = TestRunnerLogger(show_adapter_logs, show_adapter_logs_on_error, use_test_harness_log_format) runner_config = TestRunnerConfig(adapter, parser_group.pseudo_clusters, runner_options, runner_hooks) runner = __import__(runner, fromlist=[None]).Runner(repl_storage_path, commission_on_network_dut) diff --git a/scripts/tests/yaml/tests_logger.py b/scripts/tests/yaml/tests_logger.py index 4a0bd553a03047..873fdf651fe65e 100755 --- a/scripts/tests/yaml/tests_logger.py +++ b/scripts/tests/yaml/tests_logger.py @@ -130,9 +130,11 @@ class RunnerStrings: class TestRunnerLogger(TestRunnerHooks): - def __init__(self, show_adapter_logs: bool = False, show_adapter_logs_on_error: bool = True): + def __init__(self, show_adapter_logs: bool = False, show_adapter_logs_on_error: bool = True, use_test_harness_log_format: bool = False): self.__show_adapter_logs = show_adapter_logs self.__show_adapter_logs_on_error = show_adapter_logs_on_error + self.__use_test_harness_log_format = use_test_harness_log_format + self.__filename = None self.__index = 1 self.__successes = 0 self.__warnings = 0 @@ -144,14 +146,17 @@ def __init__(self, show_adapter_logs: bool = False, show_adapter_logs_on_error: def start(self, count: int): print(self.__strings.start) - pass def stop(self, duration: int): print(self.__strings.stop.format(runned=self.__runned, skipped=self.__skipped, duration=duration)) - def test_start(self, name: str, count: int): + def test_start(self, filename: str, name: str, count: int): print(self.__strings.test_start.format(name=click.style(name, bold=True), count=click.style(count, bold=True))) + if self.__use_test_harness_log_format: + self.__filename = filename + print(" ***** Test Start : {}".format(filename)) + def test_stop(self, duration: int): if self.__errors: state = _FAILURE @@ -165,13 +170,22 @@ def test_stop(self, duration: int): warnings = click.style(self.__warnings, bold=True) print(self.__strings.test_stop.format(state=state, successes=successes, errors=errors, warnings=warnings, duration=duration)) - def step_skipped(self, name: str): + if self.__use_test_harness_log_format and (state == _SUCCESS or state == _WARNING): + print(" ***** Test Complete: {}".format(self.__filename)) + + def step_skipped(self, name: str, expression: str): print(self.__strings.step_skipped.format(index=self.__index, name=_strikethrough(name))) self.__index += 1 self.__skipped += 1 + if self.__use_test_harness_log_format: + print(" **** Skipping: {} == false".format(expression)) + def step_start(self, name: str): + if self.__use_test_harness_log_format: + print(" ***** Test Step {} : {}".format(self.__index, name)) + print(self.__strings.step_start.format(index=self.__index, name=click.style(name, bold=True)), end='') # flushing stdout such that the previous print statement is visible on the screen for long running tasks. sys.stdout.flush() @@ -224,6 +238,12 @@ def step_failure(self, logger, logs, duration: int, expected, received): self.__errors += logger.errors self.__runned += 1 + if self.__use_test_harness_log_format: + for entry in logger.entries: + if entry.is_error(): + print(" ***** Test Failure : {}".format(entry.message)) + break + def __print_step_exception(self, exception: TestStepError): if exception.context is None: return @@ -348,7 +368,7 @@ def parser(): @simulate.command() def runner(): """Simulate running tests.""" - runner_logger = TestRunnerLogger() + runner_logger = TestRunnerLogger(use_test_harness_log_format=True) class TestLogger: def __init__(self, entries=[], successes=0, warnings=0, errors=0): @@ -378,12 +398,12 @@ def __init__(self, message, module='CTL', level='Others'): ] runner_logger.start(99) - runner_logger.test_start('test.yaml', 23) + runner_logger.test_start('Test_File', 'A test with multiple steps', 23) runner_logger.step_start('First Step') runner_logger.step_success(success_logger, empty_logs, 1234) runner_logger.step_start('Second Step') runner_logger.step_failure(error_logger, other_logs, 4321, expected_response, received_response) - runner_logger.step_skipped('Third Step') + runner_logger.step_skipped('Third Step', 'SHOULD_RUN') runner_logger.step_start('Fourth Step') runner_logger.step_unknown() runner_logger.test_stop(1234 + 4321)