forked from chromium/chromium
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add scripts to support running WPT on Chromium CI.
testing/scripts/run_wpt_tests.py is used to adapt swarming flags to WPT flags (eg: for sharding), and also to kick-off the update_wpt_output.py cleanup step. t_p/b/t/build_wpt_metadata.py is for converting Chromium TestExpectations files into WPT Metadata files for skipped tests. t_p/b/t/update_wpt_output.py is run as a cleanup step on each shard. It rewrites the output of a WPT run by inserting expected statuses from the TestExpectations file, which includes supporting flaky tests (ie: multiple expected statuses) which WPT doesn't currently understand. Bug: 937369 Change-Id: Ic7d2c037ec545f38a1101759db6eb04db54057c6 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1660702 Commit-Queue: Luke Z <lpz@chromium.org> Reviewed-by: Dirk Pranke <dpranke@chromium.org> Reviewed-by: Robert Ma <robertma@chromium.org> Cr-Commit-Position: refs/heads/master@{#672059}
- Loading branch information
1 parent
dbbe42a
commit 46e1c59
Showing
7 changed files
with
590 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
#!/usr/bin/env vpython | ||
# Copyright 2018 The Chromium Authors. All rights reserved. | ||
# Use of this source code is governed by a BSD-style license that can be | ||
# found in the LICENSE file. | ||
|
||
"""Runs WPT as an isolate bundle. | ||
This script maps flags supported by run_isolate_script_test.py to flags that are | ||
understood by WPT. | ||
Here's the mapping [isolate script flag] : [wpt flag] | ||
--isolated-script-test-output : --log-chromium | ||
--total-shards : --total-chunks | ||
--shard-index : -- this-chunk | ||
""" | ||
|
||
import json | ||
import os | ||
import sys | ||
|
||
import common | ||
|
||
|
||
class WPTTestAdapter(common.BaseIsolatedScriptArgsAdapter): | ||
|
||
def generate_test_output_args(self, output): | ||
return ['--log-chromium', output] | ||
|
||
def generate_sharding_args(self, total_shards, shard_index): | ||
return ['--total-chunks=%d' % total_shards, | ||
# shard_index is 0-based but WPT's this-chunk to be 1-based | ||
'--this-chunk=%d' % (shard_index + 1)] | ||
|
||
def add_extra_arguments(self, parser): | ||
# These args are used to rewrite the output generated by WPT to include | ||
# missing features, such as flakineess expectations. | ||
parser.add_argument("--old-json-output-file-path") | ||
parser.add_argument("--new-json-output-dir") | ||
parser.add_argument("--new-json-output-filename") | ||
|
||
def clean_up_after_test_run(self): | ||
common.run_command([ | ||
sys.executable, | ||
os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools', | ||
'update_wpt_output.py'), | ||
'--old-json-output-file-path', | ||
self.options.old_json_output_file_path, | ||
'--new-json-output-dir', self.options.new_json_output_dir, | ||
'--new-json-output-filename', self.options.new_json_output_filename, | ||
]) | ||
|
||
|
||
def main(): | ||
adapter = WPTTestAdapter() | ||
return adapter.run_test() | ||
|
||
|
||
# This is not really a "script test" so does not need to manually add | ||
# any additional compile targets. | ||
def main_compile_targets(args): | ||
json.dump([], args.output) | ||
|
||
|
||
if __name__ == '__main__': | ||
# Conform minimally to the protocol defined by ScriptTest. | ||
if 'compile_targets' in sys.argv: | ||
funcs = { | ||
'run': None, | ||
'compile_targets': main_compile_targets, | ||
} | ||
sys.exit(common.run_script(sys.argv[1:], funcs)) | ||
sys.exit(main()) |
127 changes: 127 additions & 0 deletions
127
third_party/blink/tools/blinkpy/w3c/wpt_metadata_builder.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,127 @@ | ||
# Copyright 2019 The Chromium Authors. All rights reserved. | ||
# Use of this source code is governed by a BSD-style license that can be | ||
# found in the LICENSE file. | ||
"""Converts Chromium Test Expectations into WPT Metadata ini files. | ||
This script loads TestExpectations for any WPT test and creates the metadata | ||
files corresponding to the expectation. This script runs as a BUILD action rule. | ||
The output is then bundled into the WPT isolate package to be shipped to bots | ||
running the WPT test suite. | ||
""" | ||
|
||
import argparse | ||
import logging | ||
import os | ||
|
||
from blinkpy.common.system.log_utils import configure_logging | ||
from blinkpy.web_tests.models import test_expectations | ||
|
||
_log = logging.getLogger(__name__) | ||
|
||
|
||
class WPTMetadataBuilder(object): | ||
def __init__(self, expectations): | ||
""" | ||
Args: | ||
expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object | ||
""" | ||
self.expectations = expectations | ||
self.metadata_output_dir = "" | ||
|
||
def run(self, args=None): | ||
"""Main entry point to parse flags and execute the script.""" | ||
parser = argparse.ArgumentParser(description=__doc__) | ||
parser.add_argument("--metadata-output-dir", | ||
help="The directory to output the metadata files into.") | ||
parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.') | ||
args = parser.parse_args(args) | ||
|
||
log_level = logging.DEBUG if args.verbose else logging.INFO | ||
configure_logging(logging_level=log_level, include_time=True) | ||
|
||
self.metadata_output_dir = args.metadata_output_dir | ||
self._build_metadata_and_write() | ||
|
||
return 0 | ||
|
||
def _build_metadata_and_write(self): | ||
"""Build the metadata files and write them to disk.""" | ||
if os.path.exists(self.metadata_output_dir): | ||
_log.warning("Output dir exists, deleting: %s", | ||
self.metadata_output_dir) | ||
import shutil | ||
shutil.rmtree(self.metadata_output_dir) | ||
|
||
for test_name in self.get_test_names_for_metadata(): | ||
filename, file_contents = self.get_metadata_filename_and_contents(test_name) | ||
if not filename or not file_contents: | ||
continue | ||
|
||
# Write the contents to the file name | ||
if not os.path.exists(os.path.dirname(filename)): | ||
os.makedirs(os.path.dirname(filename)) | ||
with open(filename, "w") as metadata_file: | ||
metadata_file.write(file_contents) | ||
|
||
def get_test_names_for_metadata(self): | ||
"""Determines which tests in the expectation file need metadata. | ||
Returns: | ||
A list of test names that need metadata. | ||
""" | ||
return self.expectations.get_tests_with_result_type( | ||
test_expectations.SKIP) | ||
|
||
def get_metadata_filename_and_contents(self, test_name): | ||
"""Determines the metadata filename and contents for the specified test. | ||
The metadata filename is derived from the test name but will differ if | ||
the expectation is for a single test or for a directory of tests. The | ||
contents of the metadata file will also differ for those two cases. | ||
Args: | ||
test_name: A test name from the expectation file. | ||
Returns: | ||
A pair of strings, the first is the path to the metadata file and | ||
the second is the contents to write to that file. Or None if the | ||
test does not need a metadata file. | ||
""" | ||
# Ignore expectations for non-WPT tests | ||
if not test_name or not test_name.startswith('external/wpt'): | ||
return None, None | ||
|
||
# Split the test name by directory. We omit the first 2 entries because | ||
# they are 'external' and 'wpt' and these don't exist in the WPT's test | ||
# names. | ||
test_name_parts = test_name.split("/")[2:] | ||
|
||
# Check if this is a test file or a test directory | ||
is_test_dir = test_name.endswith("/") | ||
metadata_filename = None | ||
metadata_file_contents = None | ||
if is_test_dir: | ||
# A test directory gets one metadata file called __dir__.ini and all | ||
# tests in that dir are skipped. | ||
metadata_filename = os.path.join(self.metadata_output_dir, | ||
*test_name_parts) | ||
metadata_filename = os.path.join(metadata_filename, "__dir__.ini") | ||
_log.debug("Creating a dir-wide ini file %s", metadata_filename) | ||
|
||
metadata_file_contents = "disabled: build_wpt_metadata.py" | ||
else: | ||
# For individual tests, we create one file per test, with the name | ||
# of the test in the file as well. | ||
test_filename = test_name_parts[-1] | ||
# Append `.ini` to the test filename to indicate it's the metadata | ||
# file. | ||
test_name_parts[-1] += ".ini" | ||
metadata_filename = os.path.join(self.metadata_output_dir, | ||
*test_name_parts) | ||
_log.debug("Creating a test ini file %s", metadata_filename) | ||
|
||
# The contents of the metadata file is two lines: | ||
# 1. the test name inside square brackets | ||
# 2. an indented line with the test status and reason | ||
metadata_file_contents = ("[%s]\n disabled: build_wpt_metadata.py" % test_filename) | ||
return metadata_filename, metadata_file_contents |
72 changes: 72 additions & 0 deletions
72
third_party/blink/tools/blinkpy/w3c/wpt_metadata_builder_unittest.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
# Copyright 2019 The Chromium Authors. All rights reserved. | ||
# Use of this source code is governed by a BSD-style license that can be | ||
# found in the LICENSE file. | ||
|
||
from collections import OrderedDict | ||
import os | ||
import unittest | ||
|
||
from blinkpy.common.host_mock import MockHost | ||
from blinkpy.web_tests.models.test_expectations import TestExpectations | ||
from blinkpy.web_tests.port.factory_mock import MockPortFactory | ||
from blinkpy.w3c.wpt_metadata_builder import WPTMetadataBuilder | ||
|
||
|
||
def _make_expectation(port, test_name, test_statuses): | ||
"""Creates an expectation object for a single test. | ||
Args: | ||
port: the port to run against | ||
test_name: the name of the test | ||
test_status: the statuses of the test | ||
Returns: | ||
An expectation object with the given test and statuses. | ||
""" | ||
expectation_dict = OrderedDict() | ||
expectation_dict["expectations"] = "Bug(test) %s [ %s ]" % (test_name, test_statuses) | ||
return TestExpectations(port, tests=[test_name], expectations_dict=expectation_dict) | ||
|
||
|
||
class WPTMetadataBuilderTest(unittest.TestCase): | ||
|
||
def setUp(self): | ||
self.num = 2 | ||
self.host = MockHost() | ||
self.host.port_factory = MockPortFactory(self.host) | ||
self.port = self.host.port_factory.get() | ||
|
||
def test_skipped_test(self): | ||
"""A skipped WPT test should get a test-specific metadata file.""" | ||
test_name = "external/wpt/test.html" | ||
expectations = _make_expectation(self.port, test_name, "SKIP") | ||
metadata_builder = WPTMetadataBuilder(expectations) | ||
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name) | ||
self.assertEqual("test.html.ini", filename) | ||
self.assertEqual("[test.html]\n disabled: build_wpt_metadata.py", contents) | ||
|
||
def test_skipped_directory(self): | ||
"""A skipped WPT directory should get a dir-wide metadata file.""" | ||
test_name = "external/wpt/test_dir/" | ||
expectations = _make_expectation(self.port, test_name, "SKIP") | ||
metadata_builder = WPTMetadataBuilder(expectations) | ||
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name) | ||
self.assertEqual(os.path.join("test_dir", "__dir__.ini"), filename) | ||
self.assertEqual("disabled: build_wpt_metadata.py", contents) | ||
|
||
def test_non_wpt_test(self): | ||
"""A non-WPT test should not get any metadata.""" | ||
test_name = "some/other/test.html" | ||
expectations = _make_expectation(self.port, test_name, "SKIP") | ||
metadata_builder = WPTMetadataBuilder(expectations) | ||
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name) | ||
self.assertIsNone(filename) | ||
self.assertIsNone(contents) | ||
|
||
def test_wpt_test_not_skipped(self): | ||
"""A WPT test that is not skipped should not get any metadata.""" | ||
test_name = "external/wpt/test.html" | ||
expectations = _make_expectation(self.port, test_name, "TIMEOUT") | ||
metadata_builder = WPTMetadataBuilder(expectations) | ||
test_names = metadata_builder.get_test_names_for_metadata() | ||
self.assertFalse(test_names) |
Oops, something went wrong.