Skip to content

Commit

Permalink
[Benchmarking] Convert to histograms in run_performance_tests.py.
Browse files Browse the repository at this point in the history
For gtests, do the conversion to histograms in run_performance_tests.py.
This is cleaner than doing it in process_perf_results.py and has
the benefit that Pinpoint will use in in the same way as well.

Bug: 923564
Change-Id: I8eddd2d0934f68d45b278494eb67990ae9a30414
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1981633
Reviewed-by: John Budorick <jbudorick@chromium.org>
Reviewed-by: John Chen <johnchen@chromium.org>
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#727513}
  • Loading branch information
CalebRouleau authored and Commit Bot committed Dec 26, 2019
1 parent 9ba7e00 commit ae5cd53
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 33 deletions.
3 changes: 3 additions & 0 deletions testing/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,14 @@ group("run_perf_test") {
"//testing/scripts/common.py",
"//testing/scripts/run_performance_tests.py",
"//tools/perf/generate_legacy_perf_dashboard_json.py",
"//tools/perf/core/__init__.py",
"//tools/perf/core/path_util.py",
"//tools/perf/core/results_merger.py",
]

data_deps = [
":test_scripts_shared",
"//third_party/catapult/tracing:convert_chart_json",
]

if (is_android) {
Expand Down
38 changes: 38 additions & 0 deletions testing/scripts/run_performance_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
PERF_DIR = os.path.join(CHROMIUM_SRC_DIR, 'tools', 'perf')
sys.path.append(PERF_DIR)
import generate_legacy_perf_dashboard_json
from core import path_util

PERF_CORE_DIR = os.path.join(PERF_DIR, 'core')
sys.path.append(PERF_CORE_DIR)
Expand All @@ -84,6 +85,32 @@
os.path.dirname(__file__), '..', '..', 'tools', 'perf', 'core',
'shard_maps')

# See https://crbug.com/923564.
# We want to switch over to using histograms for everything, but converting from
# the format output by gtest perf tests to histograms has introduced several
# problems. So, only perform the conversion on tests that are whitelisted and
# are okay with potentially encountering issues.
GTEST_CONVERSION_WHITELIST = [
'angle_perftests',
'base_perftests',
'cc_perftests',
'components_perftests',
'dawn_perf_tests',
'gpu_perftests',
'latency_perftests',
'load_library_perf_tests',
'media_perftests',
'net_perftests',
'passthrough_command_buffer_perftests',
'performance_browser_tests',
'services_perftests',
'tracing_perftests',
'validating_command_buffer_perftests',
'views_perftests',
'viz_perftests',
'xr.vr.common_perftests',
]


class OutputFilePaths(object):
"""Provide paths to where results outputs should be written.
Expand Down Expand Up @@ -158,6 +185,11 @@ def _get_executable(self):
else:
return './%s' % executable

@property
def executable_name(self):
"""Gets the platform-independent name of the executable."""
return self._options.executable

def _get_passthrough_args(self):
return self._options.passthrough_args

Expand Down Expand Up @@ -237,6 +269,12 @@ def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
traceback.print_exc()
return_code = 1
write_legacy_test_results(return_code, output_paths.test_results)
if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
with path_util.SysPath(path_util.GetTracingDir()):
# pylint: disable=no-name-in-module
from tracing.value import gtest_json_converter
# pylint: enable=no-name-in-module
gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results)
return return_code


Expand Down
33 changes: 0 additions & 33 deletions tools/perf/process_perf_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,32 +49,6 @@
DATA_FORMAT_HISTOGRAMS = 'histograms'
DATA_FORMAT_UNKNOWN = 'unknown'

# See https://crbug.com/923564.
# We want to switch over to using histograms for everything, but converting from
# the format output by gtest perf tests to histograms has introduced several
# problems. So, only perform the conversion on tests that are whitelisted and
# are okay with potentially encountering issues.
GTEST_CONVERSION_WHITELIST = [
'angle_perftests',
'base_perftests',
'cc_perftests',
'components_perftests',
'dawn_perf_tests',
'gpu_perftests',
'latency_perftests',
'load_library_perf_tests',
'media_perftests',
'net_perftests',
'passthrough_command_buffer_perftests',
'performance_browser_tests',
'services_perftests',
'tracing_perftests',
'validating_command_buffer_perftests',
'views_perftests',
'viz_perftests',
'xr.vr.common_perftests',
]


def _GetMachineGroup(build_properties):
machine_group = None
Expand Down Expand Up @@ -118,13 +92,6 @@ def _upload_perf_results(json_to_upload, name, configuration_name,
if isinstance(buildbucket, basestring):
buildbucket = json.loads(buildbucket)

if _is_gtest(json_to_upload) and name in GTEST_CONVERSION_WHITELIST:
path_util.AddTracingToPath()
from tracing.value import ( # pylint: disable=no-name-in-module
gtest_json_converter)
gtest_json_converter.ConvertGtestJsonFile(json_to_upload)
_data_format_cache[json_to_upload] = DATA_FORMAT_HISTOGRAMS

if 'build' in buildbucket:
args += [
'--project', buildbucket['build'].get('project'),
Expand Down

0 comments on commit ae5cd53

Please sign in to comment.