Skip to content

benchmarking: measure and analyze scripts #5503

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 20, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions scripts/bench/README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
Work-in-progress benchmarks.

## Running the suite

```
$ ./measure.py react-a.min.js >a.txt
$ ./measure.py react-b.min.js >b.txt
$ ./analyze.py a.txt b.txt
```

## Running one
One thing you can do with them is benchmark initial render time for a realistic hierarchy:

Expand Down
111 changes: 111 additions & 0 deletions scripts/bench/analyze.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#!/usr/bin/env python
# Copyright 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.

import math
import sys

import numpy as np
import numpy.random as npr
import scipy.stats


def _bootstrap_mean_sem(samples):
"""Return the estimated standard error for a distribution's mean."""
samples = np.array(samples)
n = len(samples)
indices = npr.randint(0, n, (10000, n))
samples = samples[indices]
means = np.sort(np.mean(samples, axis=1))
return np.std(means, ddof=1)


def _read_measurements(f):
"""Read measurements from a file.

Returns {'a': [1.0, 2.0, 3.0], 'b': [5.0, 5.0, 5.0]} for a file containing
the six lines: ['a 1', 'a 2', 'a 3', 'b 5', 'b 5', 'b 5'].
"""
measurements = {}
for line in f:
label, value = line.split(None, 1)
measurements.setdefault(label, []).append(float(value))
return measurements


def _compute_mean_and_sd_of_ratio_from_delta_method(
mean_test,
sem_test,
mean_control,
sem_control
):
mean = (
((mean_test - mean_control) / mean_control) -
(pow(sem_control, 2) * mean_test / pow(mean_control, 3))
)
var = (
pow(sem_test / mean_control, 2) +
(pow(sem_control * mean_test, 2) / pow(mean_control, 4))
)
return (mean, math.sqrt(var))


def _main():
if len(sys.argv) != 3:
sys.stderr.write("usage: analyze.py control.txt test.txt\n")
return 1

ci_size = 0.99
p_value = scipy.stats.norm.ppf(0.5 * (1 + ci_size))

control, test = sys.argv[1:]
with open(control) as f:
control_measurements = _read_measurements(f)
with open(test) as f:
test_measurements = _read_measurements(f)
keys = set()
keys.update(control_measurements.iterkeys())
keys.update(test_measurements.iterkeys())

print "Comparing %s (control) vs %s (test)" % (control, test)
print "Significant differences marked by ***"
print "%% change from control to test, with %g%% CIs:" % (ci_size * 100,)
print

any_sig = False
for key in sorted(keys):
print "* %s" % (key,)
control_nums = control_measurements.get(key, [])
test_nums = test_measurements.get(key, [])
if not control_nums or not test_nums:
print " skipping..."
continue

mean_control = np.mean(control_nums)
mean_test = np.mean(test_nums)
sem_control = _bootstrap_mean_sem(control_nums)
sem_test = _bootstrap_mean_sem(test_nums)

rat_mean, rat_sem = _compute_mean_and_sd_of_ratio_from_delta_method(
mean_test, sem_test, mean_control, sem_control
)
rat_low = rat_mean - p_value * rat_sem
rat_high = rat_mean + p_value * rat_sem

sig = rat_high < 0 or rat_low > 0
any_sig = any_sig or sig

print " %% change: %+6.2f%% [%+6.2f%%, %+6.2f%%]%s" % (
100 * rat_mean,
100 * rat_low,
100 * rat_high,
' ***' if sig else ''
)
print " means: %g (control), %g (test)" % (mean_control, mean_test)

if __name__ == '__main__':
sys.exit(_main())
151 changes: 151 additions & 0 deletions scripts/bench/measure.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
#!/usr/bin/env python
# Copyright 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.

import functools
import json
import os
import subprocess
import sys


def _run_js_in_jsc(jit, js, env):
return subprocess.check_call(
['jsc', '-e', """
function now() {
return preciseTime() * 1000;
}
function globalEval(code) {
(0, eval)(code);
}
function report(label, time) {
print(label + '_' + %(engine)s, time);
}

this.ENV = %(env)s;
%(js)s
""" % {
'env': json.dumps(env),
'js': js,
'engine': json.dumps('jsc_' + ('jit' if jit else 'nojit')),
}],
env=dict(os.environ, JSC_useJIT='yes' if jit else 'no'),
)

_run_js_in_jsc_jit = functools.partial(_run_js_in_jsc, True)
_run_js_in_jsc_nojit = functools.partial(_run_js_in_jsc, False)


def _run_js_in_node(js, env):
return subprocess.check_call(
['node', '-e', """
function now() {
var hrTime = process.hrtime();
return hrTime[0] * 1e3 + hrTime[1] * 1e-6;
}
function globalEval(code) {
var vm = require('vm');
// Hide "module" so UMD wrappers use the global
vm.runInThisContext('(function(module){' + code + '\\n})()');
}
function readFile(filename) {
var fs = require('fs');
return fs.readFileSync(filename);
}
function report(label, time) {
console.log(label + '_node', time);
}

global.ENV = %(env)s;
%(js)s
""" % {
'env': json.dumps(env),
'js': js
}]
)


def _measure_ssr_ms(engine, react_path, bench_name, bench_path, measure_warm):
engine(
"""
var reactCode = readFile(ENV.react_path);
var START = now();
globalEval(reactCode);
var END = now();
if (typeof React !== 'object') throw new Error('React not laoded');
report('factory_ms', END - START);

globalEval(readFile(ENV.bench_path));
if (typeof Benchmark !== 'function') {
throw new Error('benchmark not loaded');
}
var START = now();
var html = React.renderToString(React.createElement(Benchmark));
html.charCodeAt(0); // flatten ropes
var END = now();
report('ssr_' + ENV.bench_name + '_cold_ms', END - START);

var warmup = ENV.measure_warm ? 80 : 0;
var trials = ENV.measure_warm ? 40 : 0;

for (var i = 0; i < warmup; i++) {
React.renderToString(React.createElement(Benchmark));
}

for (var i = 0; i < trials; i++) {
var START = now();
var html = React.renderToString(React.createElement(Benchmark));
html.charCodeAt(0); // flatten ropes
var END = now();
report('ssr_' + ENV.bench_name + '_warm_ms', END - START);
}
""",
{
'bench_name': bench_name,
'bench_path': bench_path,
'measure_warm': measure_warm,
'react_path': react_path,
},
)


def _main():
if len(sys.argv) != 2:
sys.stderr.write("usage: measure.py react.min.js >out.txt\n")
return 1
react_path = sys.argv[1]

trials = 30
sys.stderr.write("Measuring SSR for PE benchmark (%d trials)\n" % trials)
for i in range(trials):
for engine in [
_run_js_in_jsc_jit,
_run_js_in_jsc_nojit,
_run_js_in_node
]:
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', False)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")

trials = 3
sys.stderr.write("Measuring SSR for PE with warm JIT (%d slow trials)\n" % trials)
for i in range(trials):
for engine in [
_run_js_in_jsc_jit,
_run_js_in_jsc_nojit,
_run_js_in_node
]:
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', True)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")


if __name__ == '__main__':
sys.exit(_main())