Skip to content

Commit 1c2f40e

Browse files
committed
[perf-test] Add in the Benchmark_DTrace driver.
This and the associated *.d file can be used to determine dynamic retain/release counts over the perf test suite.
1 parent d4a95ee commit 1c2f40e

File tree

4 files changed

+167
-10
lines changed

4 files changed

+167
-10
lines changed

benchmark/scripts/Benchmark_DTrace.in

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
#!/usr/bin/env python
2+
3+
# ===--- Benchmark_DTrace.in ----------------------------------------------===//
4+
#
5+
# This source file is part of the Swift.org open source project
6+
#
7+
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
8+
# Licensed under Apache License v2.0 with Runtime Library Exception
9+
#
10+
# See http://swift.org/LICENSE.txt for license information
11+
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
12+
#
13+
# ===----------------------------------------------------------------------===//
14+
15+
import os
16+
import sys
17+
import subprocess
18+
import argparse
19+
20+
DRIVER_LIBRARY_PATH = "@PATH_TO_DRIVER_LIBRARY@"
21+
sys.path.append(DRIVER_LIBRARY_PATH)
22+
DTRACE_PATH = os.path.join(DRIVER_LIBRARY_PATH, 'swift_stats.d')
23+
24+
import perf_test_driver
25+
26+
# Regexes for the XFAIL_LIST. Matches against '([Onone|O|Ounchecked],TestName)'
27+
XFAIL_LIST = [
28+
]
29+
30+
class DTraceResult(perf_test_driver.Result):
31+
32+
def __init__(self, name, status, output, csv_output):
33+
perf_test_driver.Result.__init__(self, name, status, output, XFAIL_LIST)
34+
self.csv_output = csv_output
35+
36+
@classmethod
37+
def data_headers(cls):
38+
return ['Name', 'Result', 'strong_retain', 'strong_retain/iter', 'strong_release', 'strong_release/iter']
39+
40+
@classmethod
41+
def data_format(cls, max_test_len):
42+
non_name_headers = DTraceResult.data_headers()[1:]
43+
fmt = ('{:<%d}' % (max_test_len+5)) + ''.join(['{:<%d}' % (len(h)+2) for h in non_name_headers])
44+
return fmt
45+
46+
@classmethod
47+
def print_data_header(cls, max_test_len, csv_output):
48+
headers = cls.data_headers()
49+
if csv_output:
50+
print(','.join(headers))
51+
return
52+
print(cls.data_format(max_test_len).format(*headers))
53+
54+
def print_data(self, max_test_len):
55+
result = [self.get_name(), self.get_result()] + map(str, self.output)
56+
if self.csv_output:
57+
print(','.join(result))
58+
return
59+
60+
print(DTraceResult.data_format(max_test_len).format(*result))
61+
62+
class DTracePerfTestDriver(perf_test_driver.PerfTestDriver):
63+
def __init__(self, binary, xfail_list, csv_output):
64+
perf_test_driver.PerfTestDriver.__init__(self, binary, xfail_list,
65+
enable_parallel=False,
66+
opt_levels = ['O'])
67+
self.csv_output = csv_output
68+
69+
def print_data_header(self, max_test_len):
70+
DTraceResult.print_data_header(max_test_len, self.csv_output)
71+
72+
def prepare_input(self, name):
73+
return {}
74+
75+
def process_input(self, data):
76+
test_name = '({}_{})'.format(data['opt'], data['test_name'])
77+
print "Running {}...".format(test_name)
78+
sys.stdout.flush()
79+
80+
def get_results_with_iters(iters):
81+
p = subprocess.Popen(['sudo', 'dtrace', '-s', DTRACE_PATH, '-c', '%s %s %s' % (data['path'], data['test_name'], '--num-iters=%d' % iters)],
82+
stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))
83+
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
84+
return [x.split(',')[1] for x in results[results.index('DTRACE RESULTS')+1:]]
85+
iter_2_results = get_results_with_iters(2)
86+
iter_3_results = get_results_with_iters(3)
87+
88+
results = []
89+
for x in zip(iter_2_results, iter_3_results):
90+
results.append(x[1])
91+
results.append(int(x[1]) - int(x[0]))
92+
93+
return DTraceResult(test_name, 0, results, self.csv_output)
94+
95+
SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
96+
97+
def parse_args():
98+
parser = argparse.ArgumentParser()
99+
parser.add_argument('-filter', type=str, default=None,
100+
help='Filter out any test that does not match the given regex')
101+
parser.add_argument('-csv', default=False, action='store_true',
102+
help="Emit csv output", dest='csv_output')
103+
return parser.parse_args()
104+
105+
if __name__ == "__main__":
106+
args = parse_args()
107+
g = DTracePerfTestDriver(SWIFT_BIN_DIR, XFAIL_LIST, args.csv_output)
108+
if g.run(args.filter):
109+
sys.exit(0)
110+
else:
111+
sys.exit(-1)

benchmark/scripts/CMakeLists.txt

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ configure_file(
88
${CMAKE_CURRENT_SOURCE_DIR}/Benchmark_RuntimeLeaksRunner.in
99
${CMAKE_CURRENT_BINARY_DIR}/Benchmark_RuntimeLeaksRunner
1010
@ONLY)
11+
configure_file(
12+
${CMAKE_CURRENT_SOURCE_DIR}/Benchmark_DTrace.in
13+
${CMAKE_CURRENT_BINARY_DIR}/Benchmark_DTrace
14+
@ONLY)
1115
set(PATH_TO_DRIVER_LIBRARY)
1216

1317
file(COPY ${CMAKE_CURRENT_BINARY_DIR}/Benchmark_GuardMalloc
@@ -20,6 +24,11 @@ file(COPY ${CMAKE_CURRENT_BINARY_DIR}/Benchmark_RuntimeLeaksRunner
2024
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
2125
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
2226

27+
file(COPY ${CMAKE_CURRENT_BINARY_DIR}/Benchmark_DTrace
28+
DESTINATION ${CMAKE_BINARY_DIR}/bin
29+
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
30+
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
31+
2332
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/Benchmark_Driver
2433
DESTINATION ${CMAKE_BINARY_DIR}/bin
2534
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ

benchmark/scripts/perf_test_driver/perf_test_driver.py

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -53,28 +53,39 @@ def merge_in_extra_data(self, d):
5353
"""Rather than modifying the extra data dict, just return it as a no-op"""
5454
return d
5555

56+
def print_data(self, max_test_len):
57+
fmt = '{:<%d}{:}' % (max_test_len + 5)
58+
print(fmt.format(self.get_name(), self.get_result()))
59+
5660
def _unwrap_self(args):
5761
return type(args[0]).process_input(*args)
5862

59-
class BenchmarkDriver(object):
63+
PerfTestDriver_OptLevels = ['Onone', 'O', 'Ounchecked']
6064

61-
OptLevels = ['Onone', 'O', 'Ounchecked']
65+
class PerfTestDriver(object):
6266

63-
def __init__(self, binary_dir, xfail_list, enable_parallel=False):
64-
self.targets = [(os.path.join(binary_dir, 'Benchmark_%s' % o), o) for o in BenchmarkDriver.OptLevels]
67+
def __init__(self, binary_dir, xfail_list, enable_parallel=False, opt_levels=PerfTestDriver_OptLevels):
68+
self.targets = [(os.path.join(binary_dir, 'PerfTests_%s' % o), o) for o in opt_levels]
6569
self.xfail_list = xfail_list
6670
self.enable_parallel = enable_parallel
6771
self.data = None
6872

73+
def print_data_header(self, max_test_len):
74+
fmt = '{:<%d}{:}' % (max_test_len + 5)
75+
print(fmt.format('Name', 'Result'))
76+
6977
def prepare_input(self, name, opt_level):
7078
raise RuntimeError("Abstract method")
7179

7280
def process_input(self, data):
7381
raise RuntimeError("Abstract method")
7482

75-
def run_for_opt_level(self, binary, opt_level):
83+
def run_for_opt_level(self, binary, opt_level, test_filter):
7684
print("testing driver at path: %s" % binary)
7785
names = [n.strip() for n in subprocess.check_output([binary, "--list"]).split()[2:]]
86+
if test_filter:
87+
regex = re.compile(test_filter)
88+
names = [n for n in names if regex.match(n)]
7889

7990
def prepare_input_wrapper(name):
8091
x = {'opt': opt_level, 'path': binary, 'test_name': name}
@@ -101,15 +112,14 @@ def reduce_results(acc, r):
101112

102113
def print_data(self, data, max_test_len):
103114
print("Results:")
104-
fmt = '{:<%d}{:}' % (max_test_len + 5)
115+
self.print_data_header(max_test_len)
105116
for d in data:
106117
for r in d['result']:
107-
print(fmt.format(r.get_name(), r.get_result()))
118+
r.print_data(max_test_len)
108119

109-
def run(self):
110-
self.data = [self.run_for_opt_level(binary, opt_level) for binary, opt_level in self.targets]
120+
def run(self, test_filter=None):
121+
self.data = [self.run_for_opt_level(binary, opt_level, test_filter) for binary, opt_level in self.targets]
111122
max_test_len = reduce(max, [d['max_test_len']for d in self.data])
112123
has_failure = reduce(max, [d['has_failure']for d in self.data])
113124
self.print_data(self.data, max_test_len)
114125
return not has_failure
115-
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
/*===--- swift_stats.d ----------------------------------------------------===//
2+
*
3+
* This source file is part of the Swift.org open source project
4+
*
5+
* Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
6+
* Licensed under Apache License v2.0 with Runtime Library Exception
7+
*
8+
* See http://swift.org/LICENSE.txt for license information
9+
* See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
10+
*
11+
*===----------------------------------------------------------------------===*/
12+
13+
pid$target:*:swift_retain:entry
14+
{
15+
@counts[probefunc] = count();
16+
}
17+
18+
pid$target:*:swift_release:entry
19+
{
20+
@counts[probefunc] = count();
21+
}
22+
23+
END
24+
{
25+
printf("\nDTRACE RESULTS\n");
26+
printa("%s,%@u\n", @counts)
27+
}

0 commit comments

Comments
 (0)