forked from chromium/chromium
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Refractor manage_render_test_goldens.py and add manage_wpr_archives.py
R=bsheedy@chromium.org,harringtond@chromium.org Bug: 1060399 Change-Id: Ifb06c6a94a51a6b1a2815bf94fa8f5a53dec1db8 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2219525 Reviewed-by: Haiyang Pan <hypan@google.com> Reviewed-by: Robbie Iannucci <iannucci@chromium.org> Reviewed-by: Dan H <harringtond@chromium.org> Reviewed-by: Brian Sheedy <bsheedy@chromium.org> Commit-Queue: Arthur Wang <wuwang@chromium.org> Cr-Commit-Position: refs/heads/master@{#774253}
- Loading branch information
1 parent
d1542e9
commit 41dddd7
Showing
8 changed files
with
344 additions
and
81 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1 change: 1 addition & 0 deletions
1
.../FeedNewTabPageCardInstrumentationTest#launchNTP_withMultipleFeedCardsRendered.wprgo.sha1
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
0846bac870a018d9ed4820943700b2c978c5ccca |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
#!/usr/bin/env python | ||
# Copyright 2020 The Chromium Authors. All rights reserved. | ||
# Use of this source code is governed by a BSD-style license that can be | ||
# found in the LICENSE file. | ||
|
||
# Simple script to automatically download all current wpr archive files | ||
# for Android WPR record replay tests or upload any newly generated ones. | ||
|
||
import argparse | ||
import os | ||
|
||
from upload_download_utils import download | ||
from upload_download_utils import upload | ||
|
||
STORAGE_BUCKET = 'chrome-wpr-archives' | ||
THIS_DIR = os.path.abspath(os.path.dirname(__file__)) | ||
CHROMIUM_SRC = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..', '..')) | ||
WPR_RECORD_REPLAY_TEST_DIRECTORIES = [ | ||
os.path.join( | ||
CHROMIUM_SRC, 'chrome', 'android', 'feed', 'core', 'javatests', | ||
'src', 'org', 'chromium', 'chrome', 'browser', 'feed', 'wpr_tests'), | ||
] | ||
|
||
|
||
def _is_file_of_interest(f): | ||
"""Filter all wprgo archive file through.""" | ||
return f.endswith('.wprgo') | ||
|
||
|
||
def main(): | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument('action', choices=['download', 'upload'], | ||
help='Which action to perform') | ||
parser.add_argument('--dry_run', action='store_true', | ||
default=False, help='Dry run for uploading') | ||
args = parser.parse_args() | ||
|
||
if args.action == 'download': | ||
for d in WPR_RECORD_REPLAY_TEST_DIRECTORIES: | ||
download(d, _is_file_of_interest, | ||
'WPR archives', STORAGE_BUCKET) | ||
else: | ||
for d in WPR_RECORD_REPLAY_TEST_DIRECTORIES: | ||
upload(d, _is_file_of_interest, | ||
'WPR archives', STORAGE_BUCKET, args.dry_run) | ||
|
||
|
||
if __name__ == '__main__': | ||
main() | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
# Copyright 2020 The Chromium Authors. All rights reserved. | ||
# Use of this source code is governed by a BSD-style license that can be | ||
# found in the LICENSE file. | ||
|
||
# Script lib to download all current files from GCS or upload any newly | ||
# generated ones. | ||
|
||
import hashlib | ||
import logging | ||
import multiprocessing | ||
import os | ||
import subprocess | ||
|
||
|
||
def _get_thread_count(): | ||
"""Gets a thread_count based on the multiprocessing.cpu_count().""" | ||
try: | ||
thread_count = multiprocessing.cpu_count() | ||
# cpu_count only gets the physical core count. There doesn't appear to be a | ||
# simple way of determining whether a CPU supports simultaneous | ||
# multithreading in Python, so assume that anything with 6 or more cores | ||
# supports it. | ||
if thread_count >= 6: | ||
thread_count *= 2 | ||
except NotImplementedError: | ||
# Assume a quad core if we can't get the actual core count. | ||
thread_count = 4 | ||
return thread_count | ||
|
||
|
||
def download(directory, filter, scenario, gcs_bucket): | ||
"""Downloads the files from cloud as according to the given directory. | ||
Args: | ||
directory: A local disk directory. | ||
filter: A method to filter target files inside this directory. | ||
scenario: A scenario names that utilize this download method. | ||
gcs_bucket: A GCS bucket name to be used in the download. | ||
""" | ||
# If someone removes a SHA1 file, we want to remove the associated cloud file | ||
# the next time file are updated. | ||
files_to_delete = _get_files_to_delete(directory, filter) | ||
for file_path in files_to_delete: | ||
os.remove(file_path) | ||
|
||
# Downloading the files can be very spammy, so only show the output if | ||
# something actually goes wrong. | ||
try: | ||
subprocess.check_output([ | ||
'download_from_google_storage', | ||
'--bucket', gcs_bucket, | ||
'-d', directory, | ||
'-t', str(_get_thread_count()), | ||
], stderr=subprocess.STDOUT) | ||
except subprocess.CalledProcessError as e: | ||
logging.error('Downloading %s in directory %s failed with error ' | ||
'%d: %s') % (scenario, directory, e.returncode, e.output) | ||
|
||
|
||
def _get_files_to_delete(directory, filter): | ||
"""Returns a list of local files to delete. | ||
Args: | ||
directory: A local disk directory. | ||
filter: A method to filter target files inside the directory. | ||
""" | ||
files_to_delete = [] | ||
for f in os.listdir(directory): | ||
if not filter(f): | ||
continue | ||
sha1_path = os.path.join(directory, f + '.sha1') | ||
if not os.path.exists(sha1_path): | ||
files_to_delete.append(os.path.join(directory, f)) | ||
return files_to_delete | ||
|
||
|
||
def upload(directory, filter, scenario, gcs_bucket, dry_run=False): | ||
"""Uploads the files to cloud storage. | ||
Args: | ||
directory: A local disk directory. | ||
filter: A method to filter target files inside the directory. | ||
scenarios: A scenario that calls this upload. | ||
gcs_bucket: A GCS bucket name to be used in upload. | ||
dry_run: A flag to indicate whether it is just a dry_run or a real upload. | ||
""" | ||
files_to_upload = _get_files_to_upload(directory, filter) | ||
|
||
if len(files_to_upload): | ||
if dry_run: | ||
logging.info('Will upload the following %s:' % scenario) | ||
logging.info('Destination gcs bucket: %s' % gcs_bucket) | ||
for f in files_to_upload: | ||
logging.info(' ' + f) | ||
return | ||
subprocess.check_call([ | ||
'upload_to_google_storage.py', | ||
'--bucket', gcs_bucket, | ||
'-t', str(_get_thread_count()), | ||
] + files_to_upload) | ||
|
||
|
||
def _get_files_to_upload(directory, filter): | ||
"""Returns a list of files to upload to cloud storage. | ||
Args: | ||
directory: A local disk directory. | ||
filter: A method to filter target files inside the directory. | ||
""" | ||
files_to_upload = [] | ||
for f in os.listdir(directory): | ||
# Skip any files that we don't care about. | ||
if not filter(f): | ||
continue | ||
|
||
file_path = os.path.join(directory, f) | ||
# upload_to_google_storage will upload a file even if it already exists | ||
# in the bucket. As an optimization, hash locally and only pass files to | ||
# the upload script if they don't have a matching .sha1 file already. | ||
sha_path = file_path + '.sha1' | ||
if os.path.isfile(sha_path): | ||
with open(sha_path) as sha_file: | ||
with open(file_path, 'rb') as source_file: | ||
h = hashlib.sha1() | ||
h.update(source_file.read()) | ||
if sha_file.read() == h.hexdigest(): | ||
continue | ||
files_to_upload.append(file_path) | ||
return files_to_upload | ||
|
||
|
Oops, something went wrong.