Skip to content

Commit

Permalink
Refractor manage_render_test_goldens.py and add manage_wpr_archives.py
Browse files Browse the repository at this point in the history
R=bsheedy@chromium.org,harringtond@chromium.org

Bug: 1060399
Change-Id: Ifb06c6a94a51a6b1a2815bf94fa8f5a53dec1db8
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2219525
Reviewed-by: Haiyang Pan <hypan@google.com>
Reviewed-by: Robbie Iannucci <iannucci@chromium.org>
Reviewed-by: Dan H <harringtond@chromium.org>
Reviewed-by: Brian Sheedy <bsheedy@chromium.org>
Commit-Queue: Arthur Wang <wuwang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#774253}
  • Loading branch information
wuwang-wang authored and Commit Bot committed Jun 2, 2020
1 parent d1542e9 commit 41dddd7
Show file tree
Hide file tree
Showing 8 changed files with 344 additions and 81 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,9 @@ vs-chromium-project.txt
# Ignore any Android RenderTest goldens
**/render_tests/*.png

# Ignore any WPR archive files
**/wpr_tests/*.wprgo

# Ignore IntelliJ files.
.idea/

Expand Down
7 changes: 7 additions & 0 deletions .vpython
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,13 @@ wheel: <
>
>

# Used by:
# chrome/test/data/android/upload_download_utils_test.py
wheel: <
name: "infra/python/wheels/pyfakefs-py2_py3"
version: "version:3.7.2"
>

# Used by:
# tools/perf/fetch_benchmark_deps.py
wheel: <
Expand Down
14 changes: 14 additions & 0 deletions DEPS
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ vars = {
'checkout_traffic_annotation_tools': 'checkout_configuration != "small"',
'checkout_instrumented_libraries': 'checkout_linux and checkout_configuration != "small"',

# By default bot checkouts the WPR archive files only when this
# flag is set True.
'checkout_wpr_archives': False,

# By default, do not check out WebKit for iOS, as it is not needed unless
# running against ToT WebKit rather than system WebKit. This can be overridden
# e.g. with custom_vars.
Expand Down Expand Up @@ -4551,6 +4555,16 @@ hooks = [
'download',
],
},
# Pull down WPR Archive files
{
'name': 'Fetch WPR archive files',
'pattern': '.',
'condition': 'checkout_android and (checkout_wpr_archives or checkout_src_internal)',
'action': [ 'python',
'src/chrome/test/data/android/manage_wpr_archives.py',
'download',
],
},
{
'name': 'Fetch Android AFDO profile',
'pattern': '.',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
0846bac870a018d9ed4820943700b2c978c5ccca
90 changes: 9 additions & 81 deletions chrome/test/data/android/manage_render_test_goldens.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,9 @@
# render tests or upload any newly generated ones.

import argparse
import hashlib
import multiprocessing
import os
import subprocess

from upload_download_utils import download
from upload_download_utils import upload

STORAGE_BUCKET = 'chromium-android-render-test-goldens'
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
Expand Down Expand Up @@ -46,19 +44,8 @@
]


# Assume a quad core if we can't get the actual core count.
try:
THREAD_COUNT = multiprocessing.cpu_count()
# cpu_count only gets the physical core count. There doesn't appear to be a
# simple way of determining whether a CPU supports simultaneous multithreading
# in Python, so assume that anything with 6 or more cores supports it.
if THREAD_COUNT >= 6:
THREAD_COUNT *= 2
except NotImplementedError:
THREAD_COUNT = 4


def is_file_of_interest(f):
def _is_file_of_interest(f):
"""Filter through png files with right device sdk combo in the names."""
if not f.endswith('.png'):
return False
for combo in ALLOWED_DEVICE_SDK_COMBINATIONS:
Expand All @@ -67,81 +54,22 @@ def is_file_of_interest(f):
return False


def download(directory):
# If someone removes a SHA1 file, we want to remove the associated PNG file
# the next time images are updated.
images_to_delete = []
for f in os.listdir(directory):
if not is_file_of_interest(f):
continue
sha1_path = os.path.join(directory, f + '.sha1')
if not os.path.exists(sha1_path):
images_to_delete.append(os.path.join(directory, f))
for image_path in images_to_delete:
os.remove(image_path)

# Downloading the files can be very spammy, so only show the output if
# something actually goes wrong.
try:
subprocess.check_output([
'download_from_google_storage',
'--bucket', STORAGE_BUCKET,
'-d', directory,
'-t', str(THREAD_COUNT),
], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print ('Downloading RenderTest goldens in directory %s failed with error '
'%d: %s') % (directory, e.returncode, e.output)


def upload(directory, dry_run):
files_to_upload = []
for f in os.listdir(directory):
# Skip any files that we don't care about.
if not is_file_of_interest(f):
continue

png_path = os.path.join(directory, f)
# upload_to_google_storage will upload a file even if it already exists
# in the bucket. As an optimization, hash locally and only pass files to
# the upload script if they don't have a matching .sha1 file already.
sha_path = png_path + '.sha1'
if os.path.isfile(sha_path):
with open(sha_path) as sha_file:
with open(png_path, 'rb') as png_file:
h = hashlib.sha1()
h.update(png_file.read())
if sha_file.read() == h.hexdigest():
continue
files_to_upload.append(png_path)

if len(files_to_upload):
if dry_run:
print ('Will upload the following files:')
for f in files_to_upload:
print (' ' + f)
return
subprocess.check_call([
'upload_to_google_storage.py',
'--bucket', STORAGE_BUCKET,
'-t', str(THREAD_COUNT),
] + files_to_upload)


def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['download', 'upload'],
help='Which action to perform')
parser.add_argument('--dry_run', action='store_true',
help='Dry run for uploading')
default=False, help='Dry run for uploading')
args = parser.parse_args()

if args.action == 'download':
for d in GOLDEN_DIRECTORIES:
download(d)
download(d, _is_file_of_interest,
'RenderTest Goldens', STORAGE_BUCKET)
else:
for d in GOLDEN_DIRECTORIES:
upload(d, args.dry_run)
upload(d, _is_file_of_interest,
'RenderTest Goldens', STORAGE_BUCKET, args.dry_run)


if __name__ == '__main__':
Expand Down
50 changes: 50 additions & 0 deletions chrome/test/data/android/manage_wpr_archives.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

# Simple script to automatically download all current wpr archive files
# for Android WPR record replay tests or upload any newly generated ones.

import argparse
import os

from upload_download_utils import download
from upload_download_utils import upload

STORAGE_BUCKET = 'chrome-wpr-archives'
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_SRC = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..', '..'))
WPR_RECORD_REPLAY_TEST_DIRECTORIES = [
os.path.join(
CHROMIUM_SRC, 'chrome', 'android', 'feed', 'core', 'javatests',
'src', 'org', 'chromium', 'chrome', 'browser', 'feed', 'wpr_tests'),
]


def _is_file_of_interest(f):
"""Filter all wprgo archive file through."""
return f.endswith('.wprgo')


def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['download', 'upload'],
help='Which action to perform')
parser.add_argument('--dry_run', action='store_true',
default=False, help='Dry run for uploading')
args = parser.parse_args()

if args.action == 'download':
for d in WPR_RECORD_REPLAY_TEST_DIRECTORIES:
download(d, _is_file_of_interest,
'WPR archives', STORAGE_BUCKET)
else:
for d in WPR_RECORD_REPLAY_TEST_DIRECTORIES:
upload(d, _is_file_of_interest,
'WPR archives', STORAGE_BUCKET, args.dry_run)


if __name__ == '__main__':
main()

131 changes: 131 additions & 0 deletions chrome/test/data/android/upload_download_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

# Script lib to download all current files from GCS or upload any newly
# generated ones.

import hashlib
import logging
import multiprocessing
import os
import subprocess


def _get_thread_count():
"""Gets a thread_count based on the multiprocessing.cpu_count()."""
try:
thread_count = multiprocessing.cpu_count()
# cpu_count only gets the physical core count. There doesn't appear to be a
# simple way of determining whether a CPU supports simultaneous
# multithreading in Python, so assume that anything with 6 or more cores
# supports it.
if thread_count >= 6:
thread_count *= 2
except NotImplementedError:
# Assume a quad core if we can't get the actual core count.
thread_count = 4
return thread_count


def download(directory, filter, scenario, gcs_bucket):
"""Downloads the files from cloud as according to the given directory.
Args:
directory: A local disk directory.
filter: A method to filter target files inside this directory.
scenario: A scenario names that utilize this download method.
gcs_bucket: A GCS bucket name to be used in the download.
"""
# If someone removes a SHA1 file, we want to remove the associated cloud file
# the next time file are updated.
files_to_delete = _get_files_to_delete(directory, filter)
for file_path in files_to_delete:
os.remove(file_path)

# Downloading the files can be very spammy, so only show the output if
# something actually goes wrong.
try:
subprocess.check_output([
'download_from_google_storage',
'--bucket', gcs_bucket,
'-d', directory,
'-t', str(_get_thread_count()),
], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error('Downloading %s in directory %s failed with error '
'%d: %s') % (scenario, directory, e.returncode, e.output)


def _get_files_to_delete(directory, filter):
"""Returns a list of local files to delete.
Args:
directory: A local disk directory.
filter: A method to filter target files inside the directory.
"""
files_to_delete = []
for f in os.listdir(directory):
if not filter(f):
continue
sha1_path = os.path.join(directory, f + '.sha1')
if not os.path.exists(sha1_path):
files_to_delete.append(os.path.join(directory, f))
return files_to_delete


def upload(directory, filter, scenario, gcs_bucket, dry_run=False):
"""Uploads the files to cloud storage.
Args:
directory: A local disk directory.
filter: A method to filter target files inside the directory.
scenarios: A scenario that calls this upload.
gcs_bucket: A GCS bucket name to be used in upload.
dry_run: A flag to indicate whether it is just a dry_run or a real upload.
"""
files_to_upload = _get_files_to_upload(directory, filter)

if len(files_to_upload):
if dry_run:
logging.info('Will upload the following %s:' % scenario)
logging.info('Destination gcs bucket: %s' % gcs_bucket)
for f in files_to_upload:
logging.info(' ' + f)
return
subprocess.check_call([
'upload_to_google_storage.py',
'--bucket', gcs_bucket,
'-t', str(_get_thread_count()),
] + files_to_upload)


def _get_files_to_upload(directory, filter):
"""Returns a list of files to upload to cloud storage.
Args:
directory: A local disk directory.
filter: A method to filter target files inside the directory.
"""
files_to_upload = []
for f in os.listdir(directory):
# Skip any files that we don't care about.
if not filter(f):
continue

file_path = os.path.join(directory, f)
# upload_to_google_storage will upload a file even if it already exists
# in the bucket. As an optimization, hash locally and only pass files to
# the upload script if they don't have a matching .sha1 file already.
sha_path = file_path + '.sha1'
if os.path.isfile(sha_path):
with open(sha_path) as sha_file:
with open(file_path, 'rb') as source_file:
h = hashlib.sha1()
h.update(source_file.read())
if sha_file.read() == h.hexdigest():
continue
files_to_upload.append(file_path)
return files_to_upload


Loading

0 comments on commit 41dddd7

Please sign in to comment.