Skip to content

Commit acabb22

Browse files
committed
Merge pull request #5503 from spicyj/measure-analyze
benchmarking: measure and analyze scripts
2 parents 907dee2 + 844ca8b commit acabb22

File tree

3 files changed

+270
-0
lines changed

3 files changed

+270
-0
lines changed

scripts/bench/README.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
Work-in-progress benchmarks.
22

3+
## Running the suite
4+
5+
```
6+
$ ./measure.py react-a.min.js >a.txt
7+
$ ./measure.py react-b.min.js >b.txt
8+
$ ./analyze.py a.txt b.txt
9+
```
10+
311
## Running one
412
One thing you can do with them is benchmark initial render time for a realistic hierarchy:
513

scripts/bench/analyze.py

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
#!/usr/bin/env python
2+
# Copyright 2015, Facebook, Inc.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree. An additional grant
7+
# of patent rights can be found in the PATENTS file in the same directory.
8+
9+
import math
10+
import sys
11+
12+
import numpy as np
13+
import numpy.random as npr
14+
import scipy.stats
15+
16+
17+
def _bootstrap_mean_sem(samples):
18+
"""Return the estimated standard error for a distribution's mean."""
19+
samples = np.array(samples)
20+
n = len(samples)
21+
indices = npr.randint(0, n, (10000, n))
22+
samples = samples[indices]
23+
means = np.sort(np.mean(samples, axis=1))
24+
return np.std(means, ddof=1)
25+
26+
27+
def _read_measurements(f):
28+
"""Read measurements from a file.
29+
30+
Returns {'a': [1.0, 2.0, 3.0], 'b': [5.0, 5.0, 5.0]} for a file containing
31+
the six lines: ['a 1', 'a 2', 'a 3', 'b 5', 'b 5', 'b 5'].
32+
"""
33+
measurements = {}
34+
for line in f:
35+
label, value = line.split(None, 1)
36+
measurements.setdefault(label, []).append(float(value))
37+
return measurements
38+
39+
40+
def _compute_mean_and_sd_of_ratio_from_delta_method(
41+
mean_test,
42+
sem_test,
43+
mean_control,
44+
sem_control
45+
):
46+
mean = (
47+
((mean_test - mean_control) / mean_control) -
48+
(pow(sem_control, 2) * mean_test / pow(mean_control, 3))
49+
)
50+
var = (
51+
pow(sem_test / mean_control, 2) +
52+
(pow(sem_control * mean_test, 2) / pow(mean_control, 4))
53+
)
54+
return (mean, math.sqrt(var))
55+
56+
57+
def _main():
58+
if len(sys.argv) != 3:
59+
sys.stderr.write("usage: analyze.py control.txt test.txt\n")
60+
return 1
61+
62+
ci_size = 0.99
63+
p_value = scipy.stats.norm.ppf(0.5 * (1 + ci_size))
64+
65+
control, test = sys.argv[1:]
66+
with open(control) as f:
67+
control_measurements = _read_measurements(f)
68+
with open(test) as f:
69+
test_measurements = _read_measurements(f)
70+
keys = set()
71+
keys.update(control_measurements.iterkeys())
72+
keys.update(test_measurements.iterkeys())
73+
74+
print "Comparing %s (control) vs %s (test)" % (control, test)
75+
print "Significant differences marked by ***"
76+
print "%% change from control to test, with %g%% CIs:" % (ci_size * 100,)
77+
print
78+
79+
any_sig = False
80+
for key in sorted(keys):
81+
print "* %s" % (key,)
82+
control_nums = control_measurements.get(key, [])
83+
test_nums = test_measurements.get(key, [])
84+
if not control_nums or not test_nums:
85+
print " skipping..."
86+
continue
87+
88+
mean_control = np.mean(control_nums)
89+
mean_test = np.mean(test_nums)
90+
sem_control = _bootstrap_mean_sem(control_nums)
91+
sem_test = _bootstrap_mean_sem(test_nums)
92+
93+
rat_mean, rat_sem = _compute_mean_and_sd_of_ratio_from_delta_method(
94+
mean_test, sem_test, mean_control, sem_control
95+
)
96+
rat_low = rat_mean - p_value * rat_sem
97+
rat_high = rat_mean + p_value * rat_sem
98+
99+
sig = rat_high < 0 or rat_low > 0
100+
any_sig = any_sig or sig
101+
102+
print " %% change: %+6.2f%% [%+6.2f%%, %+6.2f%%]%s" % (
103+
100 * rat_mean,
104+
100 * rat_low,
105+
100 * rat_high,
106+
' ***' if sig else ''
107+
)
108+
print " means: %g (control), %g (test)" % (mean_control, mean_test)
109+
110+
if __name__ == '__main__':
111+
sys.exit(_main())

scripts/bench/measure.py

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
#!/usr/bin/env python
2+
# Copyright 2015, Facebook, Inc.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree. An additional grant
7+
# of patent rights can be found in the PATENTS file in the same directory.
8+
9+
import functools
10+
import json
11+
import os
12+
import subprocess
13+
import sys
14+
15+
16+
def _run_js_in_jsc(jit, js, env):
17+
return subprocess.check_call(
18+
['jsc', '-e', """
19+
function now() {
20+
return preciseTime() * 1000;
21+
}
22+
function globalEval(code) {
23+
(0, eval)(code);
24+
}
25+
function report(label, time) {
26+
print(label + '_' + %(engine)s, time);
27+
}
28+
29+
this.ENV = %(env)s;
30+
%(js)s
31+
""" % {
32+
'env': json.dumps(env),
33+
'js': js,
34+
'engine': json.dumps('jsc_' + ('jit' if jit else 'nojit')),
35+
}],
36+
env=dict(os.environ, JSC_useJIT='yes' if jit else 'no'),
37+
)
38+
39+
_run_js_in_jsc_jit = functools.partial(_run_js_in_jsc, True)
40+
_run_js_in_jsc_nojit = functools.partial(_run_js_in_jsc, False)
41+
42+
43+
def _run_js_in_node(js, env):
44+
return subprocess.check_call(
45+
['node', '-e', """
46+
function now() {
47+
var hrTime = process.hrtime();
48+
return hrTime[0] * 1e3 + hrTime[1] * 1e-6;
49+
}
50+
function globalEval(code) {
51+
var vm = require('vm');
52+
// Hide "module" so UMD wrappers use the global
53+
vm.runInThisContext('(function(module){' + code + '\\n})()');
54+
}
55+
function readFile(filename) {
56+
var fs = require('fs');
57+
return fs.readFileSync(filename);
58+
}
59+
function report(label, time) {
60+
console.log(label + '_node', time);
61+
}
62+
63+
global.ENV = %(env)s;
64+
%(js)s
65+
""" % {
66+
'env': json.dumps(env),
67+
'js': js
68+
}]
69+
)
70+
71+
72+
def _measure_ssr_ms(engine, react_path, bench_name, bench_path, measure_warm):
73+
engine(
74+
"""
75+
var reactCode = readFile(ENV.react_path);
76+
var START = now();
77+
globalEval(reactCode);
78+
var END = now();
79+
if (typeof React !== 'object') throw new Error('React not laoded');
80+
report('factory_ms', END - START);
81+
82+
globalEval(readFile(ENV.bench_path));
83+
if (typeof Benchmark !== 'function') {
84+
throw new Error('benchmark not loaded');
85+
}
86+
var START = now();
87+
var html = React.renderToString(React.createElement(Benchmark));
88+
html.charCodeAt(0); // flatten ropes
89+
var END = now();
90+
report('ssr_' + ENV.bench_name + '_cold_ms', END - START);
91+
92+
var warmup = ENV.measure_warm ? 80 : 0;
93+
var trials = ENV.measure_warm ? 40 : 0;
94+
95+
for (var i = 0; i < warmup; i++) {
96+
React.renderToString(React.createElement(Benchmark));
97+
}
98+
99+
for (var i = 0; i < trials; i++) {
100+
var START = now();
101+
var html = React.renderToString(React.createElement(Benchmark));
102+
html.charCodeAt(0); // flatten ropes
103+
var END = now();
104+
report('ssr_' + ENV.bench_name + '_warm_ms', END - START);
105+
}
106+
""",
107+
{
108+
'bench_name': bench_name,
109+
'bench_path': bench_path,
110+
'measure_warm': measure_warm,
111+
'react_path': react_path,
112+
},
113+
)
114+
115+
116+
def _main():
117+
if len(sys.argv) != 2:
118+
sys.stderr.write("usage: measure.py react.min.js >out.txt\n")
119+
return 1
120+
react_path = sys.argv[1]
121+
122+
trials = 30
123+
sys.stderr.write("Measuring SSR for PE benchmark (%d trials)\n" % trials)
124+
for i in range(trials):
125+
for engine in [
126+
_run_js_in_jsc_jit,
127+
_run_js_in_jsc_nojit,
128+
_run_js_in_node
129+
]:
130+
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', False)
131+
sys.stderr.write(".")
132+
sys.stderr.flush()
133+
sys.stderr.write("\n")
134+
135+
trials = 3
136+
sys.stderr.write("Measuring SSR for PE with warm JIT (%d slow trials)\n" % trials)
137+
for i in range(trials):
138+
for engine in [
139+
_run_js_in_jsc_jit,
140+
_run_js_in_jsc_nojit,
141+
_run_js_in_node
142+
]:
143+
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', True)
144+
sys.stderr.write(".")
145+
sys.stderr.flush()
146+
sys.stderr.write("\n")
147+
148+
149+
if __name__ == '__main__':
150+
sys.exit(_main())
151+

0 commit comments

Comments
 (0)