Skip to content
This repository has been archived by the owner on Aug 4, 2022. It is now read-only.

Commit

Permalink
Bug 1481037 - Update StyleBench. r=jmaher
Browse files Browse the repository at this point in the history
Pick upstream changes.

Differential Revision: https://phabricator.services.mozilla.com/D2757
  • Loading branch information
emilio committed Aug 7, 2018
1 parent 5655897 commit 3f31fa4
Show file tree
Hide file tree
Showing 13 changed files with 1,309 additions and 64 deletions.
42 changes: 36 additions & 6 deletions testing/raptor/raptor/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,13 +378,43 @@ def stylebench_score(cls, val_list):
"""
correctionFactor = 3
results = [i for i, j in val_list]
# stylebench has 4 tests, each of these are made of up 12 subtests
# and a sum of the 12 values. We receive 52 values, and want to use
# the 4 test values, not the sub test values.
if len(results) != 52:
raise Exception("StyleBench has 52 subtests, found: %s instead" % len(results))

results = results[12::13]
# stylebench has 5 tests, each of these are made of up 5 subtests
#
# * Adding classes.
# * Removing classes.
# * Mutating attributes.
# * Adding leaf elements.
# * Removing leaf elements.
#
# which are made of two subtests each (sync/async) and repeated 5 times
# each, thus, the list here looks like:
#
# [Test name/Adding classes - 0/ Sync; <x>]
# [Test name/Adding classes - 0/ Async; <y>]
# [Test name/Adding classes - 0; <x> + <y>]
# [Test name/Removing classes - 0/ Sync; <x>]
# [Test name/Removing classes - 0/ Async; <y>]
# [Test name/Removing classes - 0; <x> + <y>]
# ...
# [Test name/Adding classes - 1 / Sync; <x>]
# [Test name/Adding classes - 1 / Async; <y>]
# [Test name/Adding classes - 1 ; <x> + <y>]
# ...
# [Test name/Removing leaf elements - 4; <x> + <y>]
# [Test name; <sum>] <- This is what we want.
#
# So, 5 (subtests) *
# 5 (repetitions) *
# 3 (entries per repetition (sync/async/sum)) =
# 75 entries for test before the sum.
#
# We receive 76 entries per test, which ads up to 380. We want to use
# the 5 test entries, not the rest.
if len(results) != 380:
raise Exception("StyleBench has 380 entries, found: %s instead" % len(results))

results = results[75::76]
score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
return score

Expand Down
42 changes: 36 additions & 6 deletions testing/talos/talos/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,13 +270,43 @@ def stylebench_score(cls, val_list):
"""
correctionFactor = 3
results = [i for i, j in val_list]
# stylebench has 4 tests, each of these are made of up 12 subtests
# and a sum of the 12 values. We receive 52 values, and want to use
# the 4 test values, not the sub test values.
if len(results) != 52:
raise Exception("StyleBench has 52 subtests, found: %s instead" % len(results))

results = results[12::13]
# stylebench has 5 tests, each of these are made of up 5 subtests
#
# * Adding classes.
# * Removing classes.
# * Mutating attributes.
# * Adding leaf elements.
# * Removing leaf elements.
#
# which are made of two subtests each (sync/async) and repeated 5 times
# each, thus, the list here looks like:
#
# [Test name/Adding classes - 0/ Sync; <x>]
# [Test name/Adding classes - 0/ Async; <y>]
# [Test name/Adding classes - 0; <x> + <y>]
# [Test name/Removing classes - 0/ Sync; <x>]
# [Test name/Removing classes - 0/ Async; <y>]
# [Test name/Removing classes - 0; <x> + <y>]
# ...
# [Test name/Adding classes - 1 / Sync; <x>]
# [Test name/Adding classes - 1 / Async; <y>]
# [Test name/Adding classes - 1 ; <x> + <y>]
# ...
# [Test name/Removing leaf elements - 4; <x> + <y>]
# [Test name; <sum>] <- This is what we want.
#
# So, 5 (subtests) *
# 5 (repetitions) *
# 3 (entries per repetition (sync/async/sum)) =
# 75 entries for test before the sum.
#
# We receive 76 entries per test, which ads up to 380. We want to use
# the 5 test entries, not the rest.
if len(results) != 380:
raise Exception("StyleBench has 380 entries, found: %s instead" % len(results))

results = results[75::76]
score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
return score

Expand Down
13 changes: 13 additions & 0 deletions third_party/webkit/PerformanceTests/StyleBench/README_MOZILLA
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
The source from this directory was copied from the
PerformanceTests/StyleBench directory of the Webkit repository
at: https://svn.webkit.org/repository/webkit/trunk

The SVN revision used was: 234578

The contents of this directory are intended for use to "train" the
profile guided optimization (PGO) of Firefox and for benchmarking
scenarios. The files inside this directory are not intended to ship
with Firefox or any other product. If files inside this directory
are useful for other purposes (e.g. JavaScript libraries), consumers
should vendor those files separately, as it is not appropriate to pull
in components of StyleBench for use outside of StyleBench.
15 changes: 6 additions & 9 deletions third_party/webkit/PerformanceTests/StyleBench/index.html
Original file line number Diff line number Diff line change
@@ -1,20 +1,17 @@
<!DOCTYPE html>
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>StyleBench 0.1</title>
<link rel="stylesheet" href="../Speedometer/resources/main.css">
<script src="../Speedometer/resources/main.js" defer></script>
<script src="../Speedometer/resources/benchmark-runner.js" defer></script>
<script src="../Speedometer/resources/benchmark-report.js" defer></script>
<title>StyleBench 0.3</title>
<link rel="stylesheet" href="resources/main.css">
<script src="resources/main.js" defer></script>
<script src="resources/benchmark-runner.js" defer></script>
<script src="resources/benchmark-report.js" defer></script>
<script src="../resources/statistics.js" defer></script>
<script src="resources/style-bench.js" defer></script>
<script src="resources/tests.js" defer></script>
<script>
addEventListener('load', () => {
if (!window.location.protocol.startsWith('http'))
showSection('local-message', false);

if (location.search == '?gecko' || location.search == '?raptor')
startTest();
});
Expand Down
113 changes: 113 additions & 0 deletions third_party/webkit/PerformanceTests/StyleBench/mozilla.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
diff --git a/third_party/webkit/PerformanceTests/StyleBench/index.html b/third_party/webkit/PerformanceTests/StyleBench/index.html
index c77554dc3506..2a561a9cbb54 100644
--- a/third_party/webkit/PerformanceTests/StyleBench/index.html
+++ b/third_party/webkit/PerformanceTests/StyleBench/index.html
@@ -5,16 +5,22 @@
<title>StyleBench 0.3</title>
<link rel="stylesheet" href="resources/main.css">
<script src="resources/main.js" defer></script>
<script src="resources/benchmark-runner.js" defer></script>
<script src="resources/benchmark-report.js" defer></script>
<script src="../resources/statistics.js" defer></script>
<script src="resources/style-bench.js" defer></script>
<script src="resources/tests.js" defer></script>
+ <script>
+ addEventListener('load', () => {
+ if (location.search == '?gecko' || location.search == '?raptor')
+ startTest();
+ });
+ </script>
</head>
<body>
<main>
<a id="logo-link" href="javascript:showHome()"></a>

<section id="home" class="selected">
<p>
StyleBench is a browser benchmark that measures the performance of the style resolution mechanism.
diff --git a/third_party/webkit/PerformanceTests/StyleBench/resources/benchmark-report.js b/third_party/webkit/PerformanceTests/StyleBench/resources/benchmark-report.js
index b33021d9d9ce..58b3e46982d1 100644
--- a/third_party/webkit/PerformanceTests/StyleBench/resources/benchmark-report.js
+++ b/third_party/webkit/PerformanceTests/StyleBench/resources/benchmark-report.js
@@ -1,12 +1,13 @@
// This file can be customized to report results as needed.

(function () {
- if (!window.testRunner && location.search != '?webkit' && location.hash != '#webkit')
+ if (!window.testRunner && location.search != '?webkit' && location.hash != '#webkit' &&
+ location.search != '?gecko' && location.search != '?raptor')
return;

if (window.testRunner)
testRunner.waitUntilDone();

var scriptElement = document.createElement('script');
scriptElement.src = '../resources/runner.js';
document.head.appendChild(scriptElement);
@@ -31,20 +32,22 @@
customIterationCount: iterationCount,
doNotIgnoreInitialRun: true,
doNotMeasureMemoryUsage: true,
continueTesting: !isLastTest,
unit: unit,
name: name,
aggregator: aggregator};
}
- PerfTestRunner.prepareToMeasureValuesAsync(createTest(null, 'Geometric'));
+ if (window.PerfTestRunner)
+ PerfTestRunner.prepareToMeasureValuesAsync(createTest(null, 'Geometric'));
},
didRunSuites: function (measuredValues) {
- PerfTestRunner.measureValueAsync(measuredValues.geomean);
+ if (window.PerfTestRunner)
+ PerfTestRunner.measureValueAsync(measuredValues.geomean);
valuesByIteration.push(measuredValues);
},
didFinishLastIteration: function () {
document.head.removeChild(document.querySelector('style'));

var measuredValuesByFullName = {};
function addToMeasuredValue(value, fullName, aggregator) {
var values = measuredValuesByFullName[fullName] || new Array;
@@ -63,21 +66,37 @@
for (var subtestName in test.tests)
addToMeasuredValue(test.tests[subtestName], suiteName + '/' + testName + '/' + subtestName);
addToMeasuredValue(test.total, suiteName + '/' + testName, 'Total');
}
addToMeasuredValue(suite.total, suiteName, 'Total');
}
});

- PerfTestRunner.reportValues(createTest(null, null, false, 'pt'), scores);
+ if (window.PerfTestRunner)
+ PerfTestRunner.reportValues(createTest(null, null, false, 'pt'), scores);

var fullNames = new Array;
for (var fullName in measuredValuesByFullName)
fullNames.push(fullName);

- for (var i = 0; i < fullNames.length; i++) {
- var values = measuredValuesByFullName[fullNames[i]];
- PerfTestRunner.reportValues(createTest(fullNames[i], values.aggregator, i + 1 == fullNames.length), values);
+ if (location.search == '?raptor') {
+ var data = ['raptor-benchmark', 'speedometer', measuredValuesByFullName];
+ window.postMessage(data, '*');
+ } else if (typeof tpRecordTime !== "undefined") {
+ var values = new Array;
+ var allNames = new Array;
+ for (var i = 0; i < fullNames.length; i++) {
+ var vals = measuredValuesByFullName[fullNames[i]];
+ values.push(vals);
+ for (var count = 0; count < vals.length; count ++)
+ allNames.push(fullNames[i]);
+ }
+ tpRecordTime(values.join(','), 0, allNames.join(','));
+ } else if (window.PerfTestRunner) {
+ for (var i = 0; i < fullNames.length; i++) {
+ var values = measuredValuesByFullName[fullNames[i]];
+ PerfTestRunner.reportValues(createTest(fullNames[i], values.aggregator, i + 1 == fullNames.length), values);
+ }
}
}
};
})();
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
// This file can be customized to report results as needed.

(function () {
if (!window.testRunner && location.search != '?webkit' && location.hash != '#webkit' &&
location.search != '?gecko' && location.search != '?raptor')
return;

if (window.testRunner)
testRunner.waitUntilDone();

var scriptElement = document.createElement('script');
scriptElement.src = '../resources/runner.js';
document.head.appendChild(scriptElement);

var styleElement = document.createElement('style');
styleElement.textContent = 'pre { padding-top: 600px; }';
document.head.appendChild(styleElement);

var createTest;
var valuesByIteration = new Array;

window.onload = function () {
document.body.removeChild(document.querySelector('main'));
startBenchmark();
}

window.benchmarkClient = {
iterationCount: 5, // Use 4 different instances of DRT/WTR to run 5 iterations.
willStartFirstIteration: function (iterationCount) {
createTest = function (name, aggregator, isLastTest, unit = 'ms') {
return {
customIterationCount: iterationCount,
doNotIgnoreInitialRun: true,
doNotMeasureMemoryUsage: true,
continueTesting: !isLastTest,
unit: unit,
name: name,
aggregator: aggregator};
}
if (window.PerfTestRunner)
PerfTestRunner.prepareToMeasureValuesAsync(createTest(null, 'Geometric'));
},
didRunSuites: function (measuredValues) {
if (window.PerfTestRunner)
PerfTestRunner.measureValueAsync(measuredValues.geomean);
valuesByIteration.push(measuredValues);
},
didFinishLastIteration: function () {
document.head.removeChild(document.querySelector('style'));

var measuredValuesByFullName = {};
function addToMeasuredValue(value, fullName, aggregator) {
var values = measuredValuesByFullName[fullName] || new Array;
measuredValuesByFullName[fullName] = values;
values.push(value);
values.aggregator = aggregator;
}

var scores = [];
valuesByIteration.forEach(function (measuredValues) {
scores.push(measuredValues.score);
for (var suiteName in measuredValues.tests) {
var suite = measuredValues.tests[suiteName];
for (var testName in suite.tests) {
var test = suite.tests[testName];
for (var subtestName in test.tests)
addToMeasuredValue(test.tests[subtestName], suiteName + '/' + testName + '/' + subtestName);
addToMeasuredValue(test.total, suiteName + '/' + testName, 'Total');
}
addToMeasuredValue(suite.total, suiteName, 'Total');
}
});

if (window.PerfTestRunner)
PerfTestRunner.reportValues(createTest(null, null, false, 'pt'), scores);

var fullNames = new Array;
for (var fullName in measuredValuesByFullName)
fullNames.push(fullName);

if (location.search == '?raptor') {
var data = ['raptor-benchmark', 'speedometer', measuredValuesByFullName];
window.postMessage(data, '*');
} else if (typeof tpRecordTime !== "undefined") {
var values = new Array;
var allNames = new Array;
for (var i = 0; i < fullNames.length; i++) {
var vals = measuredValuesByFullName[fullNames[i]];
values.push(vals);
for (var count = 0; count < vals.length; count ++)
allNames.push(fullNames[i]);
}
tpRecordTime(values.join(','), 0, allNames.join(','));
} else if (window.PerfTestRunner) {
for (var i = 0; i < fullNames.length; i++) {
var values = measuredValuesByFullName[fullNames[i]];
PerfTestRunner.reportValues(createTest(fullNames[i], values.aggregator, i + 1 == fullNames.length), values);
}
}
}
};
})();
Loading

0 comments on commit 3f31fa4

Please sign in to comment.