Skip to content

Commit

Permalink
Make minidump tests Python 3-compatible
Browse files Browse the repository at this point in the history
Makes it possible to run //tools/perf/core/minidump_unittests.py under
Python 3 on Linux. Other platforms may work, but have not yet been
verified.

Bug: 1198237
Change-Id: I2ba57111eb8704d446fd42f4a2d229252ec9627e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3169966
Reviewed-by: Dirk Pranke <dpranke@google.com>
Reviewed-by: David Schinazi <dschinazi@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Commit-Queue: Brian Sheedy <bsheedy@chromium.org>
Cr-Commit-Position: refs/heads/main@{#926914}
  • Loading branch information
Brian Sheedy authored and Chromium LUCI CQ committed Sep 30, 2021
1 parent e9c887a commit f9ffd52
Show file tree
Hide file tree
Showing 12 changed files with 127 additions and 32 deletions.
31 changes: 30 additions & 1 deletion .vpython3
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ wheel: <

wheel: <
name: "infra/python/wheels/pyparsing-py2_py3"
version: "version:2.2.0"
version: "version:2.4.7"
>

wheel: <
Expand Down Expand Up @@ -293,3 +293,32 @@ wheel: <
platform: "macosx_10_10_intel"
>
>

# Used by:
# tools/perf/core/results_dashboard.py
wheel: <
name: "infra/python/wheels/httplib2-py3"
version: "version:0.19.1"
>

# Used by:
# tools/perf/flakiness_cli
wheel: <
name: "infra/python/wheels/pandas/${vpython_platform}"
version: "version:1.3.2"
match_tag: <
platform: "win32"
>
match_tag: <
platform: "win_amd64"
>
match_tag: <
platform: "manylinux1_i686"
>
match_tag: <
platform: "manylinux1_x86_64"
>
match_tag: <
platform: "macosx_10_6_intel"
>
>
15 changes: 8 additions & 7 deletions components/crash/content/tools/generate_breakpad_symbols.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependencies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = subprocess.check_output(['ldd', binary])
ldd = subprocess.check_output(['ldd', binary]).decode('utf-8')
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
Expand Down Expand Up @@ -221,7 +221,7 @@ def GetSharedLibraryDependenciesChromeOS(binary):
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependencies of the binary."""
deps = []
if options.platform == 'linux2':
if options.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif options.platform == 'android':
deps = GetSharedLibraryDependenciesAndroid(binary)
Expand All @@ -247,7 +247,7 @@ def GetTransitiveDependencies(options):
dependencies of the binary, along with the binary itself."""
binary = os.path.abspath(options.binary)
exe_path = os.path.dirname(binary)
if options.platform == 'linux2':
if options.platform.startswith('linux'):
# 'ldd' returns all transitive dependencies for us.
deps = set(GetSharedLibraryDependencies(options, binary, exe_path))
deps.add(binary)
Expand Down Expand Up @@ -289,7 +289,7 @@ def CreateSymbolDir(options, output_dir, relative_hash_dir):
"""Create the directory to store breakpad symbols in. On Android/Linux, we
also create a symlink in case the hash in the binary is missing."""
mkdir_p(output_dir)
if options.platform == 'android' or options.platform == "linux2":
if options.platform == 'android' or options.platform.startswith('linux'):
try:
os.symlink(relative_hash_dir, os.path.join(os.path.dirname(output_dir),
'000000000000000000000000000000000'))
Expand Down Expand Up @@ -322,9 +322,10 @@ def _Worker():
reason = "Could not locate dump_syms executable."
break

binary_info = GetBinaryInfoFromHeaderInfo(
subprocess.check_output(
[dump_syms, '-i', binary]).splitlines()[0])
dump_syms_output = subprocess.check_output(
[dump_syms, '-i', binary]).decode('utf-8')
header_info = dump_syms_output.splitlines()[0]
binary_info = GetBinaryInfoFromHeaderInfo(header_info)
if not binary_info:
should_dump_syms = False
reason = "Could not obtain binary information."
Expand Down
48 changes: 48 additions & 0 deletions net/tools/testserver/testserver.py.vpython3
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# This is a vpython "spec" file.
#
# It describes patterns for python wheel dependencies of the python scripts in
# the chromium repo, particularly for dependencies that have compiled components
# (since pure-python dependencies can be easily vendored into third_party).
#
# When vpython is invoked, it finds this file and builds a python VirtualEnv,
# containing all of the dependencies described in this file, fetching them from
# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
# this never requires the end-user machine to have a working python extension
# compilation environment. All of these packages are built using:
# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/
#
# All python scripts in the repo share this same spec, to avoid dependency
# fragmentation.
#
# If you have depot_tools installed in your $PATH, you can invoke python scripts
# in this repo by running them as you normally would run them, except
# substituting `vpython` instead of `python` on the command line, e.g.:
# vpython path/to/script.py some --arguments
#
# Read more about `vpython` and how to modify this file here:
# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md

# This Python file gets its own vpython spec since it doesn't actually need that
# many dependencies and trying to load all dependencies from the main .vpython3
# spec on ChromeOS VMs runs into disk space issues.

python_version: "3.8"

# The default set of platforms vpython checks does not yet include mac-arm64.
# Setting `verify_pep425_tag` to the list of platforms we explicitly must support
# allows us to ensure that vpython specs stay mac-arm64-friendly
verify_pep425_tag: [
{python: "cp38", abi: "cp38", platform: "manylinux1_x86_64"},
{python: "cp38", abi: "cp38", platform: "linux_arm64"},

{python: "cp38", abi: "cp38", platform: "macosx_10_10_intel"},
{python: "cp38", abi: "cp38", platform: "macosx_11_0_arm64"},

{python: "cp38", abi: "cp38", platform: "win32"},
{python: "cp38", abi: "cp38", platform: "win_amd64"}
]

wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.15.0"
>
20 changes: 11 additions & 9 deletions testing/scripts/run_performance_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@
./run_tests ScriptsSmokeTest.testRunPerformanceTests
"""

from __future__ import print_function

import argparse
import json
import os
Expand Down Expand Up @@ -160,7 +162,7 @@ def csv_perf_results(self):


def print_duration(step, start):
print 'Duration of %s: %d seconds' % (step, time.time() - start)
print('Duration of %s: %d seconds' % (step, time.time() - start))


def IsWindows():
Expand Down Expand Up @@ -552,9 +554,9 @@ def execute_telemetry_benchmark(
if os.path.isfile(csv_file_path):
shutil.move(csv_file_path, output_paths.csv_perf_results)
except Exception:
print ('The following exception may have prevented the code from '
'outputing structured test results and perf results output:')
print traceback.format_exc()
print('The following exception may have prevented the code from '
'outputing structured test results and perf results output:')
print(traceback.format_exc())
finally:
# On swarming bots, don't remove output directory, since Result Sink might
# still be uploading files to Result DB. Also, swarming bots automatically
Expand All @@ -573,8 +575,8 @@ def execute_telemetry_benchmark(
# TODO(crbug.com/1019139): Make 111 be the exit code that means
# "no stories were run.".
if return_code in (111, -1, 255):
print ('Exit code %s indicates that no stories were run, so we are marking '
'this as a success.' % return_code)
print('Exit code %s indicates that no stories were run, so we are marking '
'this as a success.' % return_code)
return 0
if return_code:
return return_code
Expand Down Expand Up @@ -717,9 +719,9 @@ def main(sys_args):
overall_return_code = return_code or overall_return_code
test_results_files.append(output_paths.test_results)
if options.run_ref_build:
print ('Not running reference build. --run-ref-build argument is only '
'supported for sharded benchmarks. It is simple to support '
'this for unsharded --benchmarks if needed.')
print('Not running reference build. --run-ref-build argument is only '
'supported for sharded benchmarks. It is simple to support '
'this for unsharded --benchmarks if needed.')
elif options.test_shard_map_filename:
# First determine what shard we are running on to know how to
# index into the bot map to get list of telemetry benchmarks to run.
Expand Down
8 changes: 4 additions & 4 deletions tools/perf/benchmarks/system_health_smoke_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def find_multi_version_stories(stories, disabled):
else:
prefix = name
prefixes[prefix].append(name)
for prefix, stories in prefixes.items():
if len(stories) == 1:
prefixes.pop(prefix)
return prefixes
return {
prefix: stories
for prefix, stories in prefixes.items() if len(stories) != 1
}
6 changes: 4 additions & 2 deletions tools/perf/cli_tools/soundwave/tables/bugs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import six

from cli_tools.soundwave import pandas_sqlite
from core.external_modules import pandas


TABLE_NAME = 'bugs'
COLUMN_TYPES = (
('id', 'int64'), # crbug number identifying this issue
('summary', unicode), # issue title ('1%-5% regression in loading ...')
('summary',
six.text_type), # issue title ('1%-5% regression in loading ...')
('published', 'datetime64[ns]'), # when the issue got created
('updated', 'datetime64[ns]'), # when the issue got last updated
('state', str), # usually either 'open' or 'closed'
Expand Down
2 changes: 1 addition & 1 deletion tools/perf/core/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def ListAllDepsPaths(deps_file):
while os.path.basename(chrome_root) != 'src':
chrome_root = os.path.abspath(os.path.join(chrome_root, '..'))

exec open(deps_file).read() # pylint: disable=exec-used
exec (open(deps_file).read()) # pylint: disable=exec-used

deps_paths = list(deps.keys())

Expand Down
2 changes: 1 addition & 1 deletion tools/perf/core/find_dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def ZipDependencies(target_paths, dependencies, options):
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
link_info.external_attr = 0o0100777 << 16 # Octal.

relative_path = os.path.relpath(target_path, base_dir)
link_script = (
Expand Down
4 changes: 2 additions & 2 deletions tools/perf/core/perf_data_generator_unittest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import cStringIO
from io import BytesIO
import unittest
import tempfile
import json
Expand Down Expand Up @@ -62,7 +62,7 @@ def setUp(self):
perf_data_generator.GTEST_BENCHMARKS)
self.original_OTHER_BENCHMARKS = copy.deepcopy(
perf_data_generator.OTHER_BENCHMARKS)
self.test_stream = cStringIO.StringIO()
self.test_stream = BytesIO()
self.mock_get_non_telemetry_benchmarks = mock.patch(
'core.perf_data_generator.get_scheduled_non_telemetry_benchmarks')
self.get_non_telemetry_benchmarks = (
Expand Down
8 changes: 7 additions & 1 deletion tools/perf/core/results_dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@

import calendar
import datetime
import httplib
import json
import os
import subprocess
Expand All @@ -21,10 +20,17 @@
import traceback
import zlib
import logging

import six
import six.moves.urllib.error # pylint: disable=import-error
import six.moves.urllib.parse # pylint: disable=import-error
import six.moves.urllib.request # pylint: disable=import-error

if six.PY2:
import httplib # pylint: disable=wrong-import-order
else:
import http.client as httplib # pylint: disable=import-error

# TODO(crbug.com/996778): Figure out how to get httplib2 hermetically.
import httplib2 # pylint: disable=import-error

Expand Down
11 changes: 8 additions & 3 deletions tools/perf/core/services/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

import json
import logging

import six
import six.moves.urllib.parse # pylint: disable=import-error

# TODO(crbug.com/996778): Figure out how to get httplib2 hermetically.
Expand Down Expand Up @@ -48,7 +50,7 @@ def json(self):
"""Attempt to load the content as a json object."""
try:
return json.loads(self.content)
except StandardError:
except Exception:
return None

@property
Expand All @@ -57,9 +59,12 @@ def error_message(self):
try:
# Try to find error message within json content.
return self.json['error']
except StandardError:
except Exception:
# Otherwise fall back to entire content itself, converting str to unicode.
return self.content.decode('utf-8')
rv = self.content
if not isinstance(rv, six.text_type):
rv = rv.decode('utf-8')
return rv


class ClientError(RequestError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import adjust_upper_limits
import pandas as pd
import os
import sys
import unittest

from experimental.representative_perf_test_limit_adjuster import (
adjust_upper_limits)


def create_sample_dataframe(story_name, count, avg_start, avg_step, ci_start,
ci_step, cpu_wal_start, cpu_wall_step):
Expand Down

0 comments on commit f9ffd52

Please sign in to comment.