Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Run the kiva benchmark on all backends #666

Merged
merged 2 commits into from
Mar 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 70 additions & 17 deletions enable/gcbench/bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
"quartz": "enable.null.quartz",
},
"file": {
"pdf": "enable.null.pdf",
"pdf": "enable.gcbench.pdf",
"ps": "enable.null.ps",
"svg": "enable.null.svg",
},
Expand All @@ -38,16 +38,26 @@ def benchmark(outdir=None):
"""
suite = gen_suite()

results = {}
# NOTE: Only checking UI backends for now
for name, mod_name in _BACKENDS["ui"].items():
print(f"Benchmarking backend: {name}", end="")
try:
module = importlib.import_module(mod_name)
except ImportError:
print(" ... Not available")
continue
results[name] = benchmark_backend(suite, name, module, outdir=outdir)
results = {t: {} for t in _BACKENDS}
Copy link
Contributor

@aaronayres35 aaronayres35 Mar 2, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could 't' be 'backend_type` or 'btype' here like below?

for btype, backends in _BACKENDS.items():
for name, mod_name in backends.items():
print(f"Benchmarking backend: {name}", end="")
try:
module = importlib.import_module(mod_name)
except ImportError:
print(" ... Not available")
continue

# UI backends are checked for performance, File backends are not.
if btype == "ui":
results[btype][name] = benchmark_backend(
suite, name, module, outdir=outdir
)
else:
# XXX: Use the fact that `name` is the same as the file ext.
results[btype][name] = exercise_backend(
suite, name, module, extension=name, outdir=outdir
)

return results

Expand All @@ -58,7 +68,7 @@ def benchmark_backend(suite, mod_name, module, outdir=None):
GraphicsContext = getattr(module, "GraphicsContext")
gc = GraphicsContext(_SIZE)

timings = {}
results = {}
for name, symbol in suite.items():
print(f"\n\tBenchmark {name}", end="")
try:
Expand All @@ -70,20 +80,63 @@ def benchmark_backend(suite, mod_name, module, outdir=None):
# Double sized
with gc:
gc.scale_ctm(2, 2)
timings[name] = gen_timings(gc, instance)
stats = gen_timings(gc, instance)
else:
# Normal scale
timings[name] = gen_timings(gc, instance)
stats = gen_timings(gc, instance)

if timings[name] is None:
if stats is None:
print(f" ... Failed", end="")
results[name] = None
continue

if timings[name] is not None and outdir is not None:
results[name] = {"times": stats}
if outdir is not None:
fname = os.path.join(outdir, f"{mod_name}.{name}.png")
gc.save(fname)
results[name]["format"] = "png"
results[name]["filename"] = os.path.basename(fname)

print() # End the line that was left
return timings
return results


def exercise_backend(suite, mod_name, module, extension, outdir=None):
""" Exercise a single backend
"""
GraphicsContext = getattr(module, "GraphicsContext")

results = {name: None for name in suite}
for name, symbol in suite.items():
# Skip 2x versions
if name.endswith("2x"):
results[name] = {"skip": True}
continue

# Use a fresh context each time
gc = GraphicsContext(_SIZE)

print(f"\n\tBenchmark {name}", end="")
try:
instance = symbol(gc, module)
except Exception:
continue

try:
instance()
except Exception:
print(f" ... Failed", end="")
continue

results[name] = {"times": {}}
if outdir is not None:
fname = os.path.join(outdir, f"{mod_name}.{name}.{extension}")
gc.save(fname)
results[name]["format"] = extension
results[name]["filename"] = os.path.basename(fname)

print() # End the line that was left
return results


def gen_suite():
Expand Down
30 changes: 30 additions & 0 deletions enable/gcbench/pdf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen.canvas import Canvas

import kiva.pdf as pdf_backend

# Pass it along
CompiledPath = pdf_backend.CompiledPath


class GraphicsContext(pdf_backend.GraphicsContext):
""" This is a wrapper of the PDF GraphicsContext which works with the
benchmark program.
"""
def __init__(self, size, *args, **kw):
canvas = Canvas('', pagesize=letter)
super().__init__(canvas, *args, **kw)

def save(self, filename, *args, **kw):
# Reportlab is a bit silly
self.gc._filename = filename
super().save()
79 changes: 49 additions & 30 deletions enable/gcbench/publish.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
td.invalid {{
background: lightpink;
}}
td.skipped {{
}}
</style>
<h3>Kiva Backend Benchmark Results</h3>
<p>
Expand Down Expand Up @@ -79,17 +81,21 @@
def publish(results, outdir):
""" Write the test results out as a simple webpage.
"""
backends = list(results)
backends = []
functions = {}
for bend in backends:
for func, stats in results[bend].items():
functions.setdefault(func, {})[bend] = stats

# Scale timing values relative to the "kiva.agg" backend implementation
# Transpose the results so that they're accesible by function.
for btype, backend_results in results.items():
backends.extend(list(backend_results))
for bend in backend_results:
for name, res in backend_results[bend].items():
functions.setdefault(name, {})[bend] = res

comparisons = {}
for name, results in functions.items():
_build_function_page(name, results, outdir)
comparisons[name] = _format_mean(results, "kiva.agg")
# Scale timing values relative to the "kiva.agg" backend implementation
comparisons[name] = _format_benchmark(results, "kiva.agg")

comparison_table = _build_comparison_table(backends, comparisons)
path = os.path.join(outdir, "index.html")
Expand All @@ -108,10 +114,8 @@ def _build_comparison_table(backends, comparisons):
link = f'<a href="{name}.html">'
row = [f"<td>{link}{name}</a></td>"]
for bend in backends:
# Each backend stat includes a "valid" flag
stat, valid = stats[bend]
# Which gets used to add a CSS class for styling
klass = "valid" if valid else "invalid"
# Each backend stat includes a CSS class for table styling
stat, klass = stats[bend]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe this stats could be called something different in the for loop above? It is the stats after they have been run though a formatting function eg _format_benchmark.

It is pretty inconsequential, but the code names are a little confusing as was thinking stats was a dict of a different form at first

row.append(f'<td class="{klass}">{stat}</td>')
# Concat all the <td>'s into a single string
rows.append("".join(row))
Expand All @@ -131,16 +135,16 @@ def _build_function_page(benchmark_name, results, outdir):
"""
# Build the rows
backends = []
img_tds, stat_tds = "", ""
for name, stats in results.items():
if stats is None:
output_tds, stat_tds = "", ""
for backend_name, result in results.items():
if result is None or "skip" in result:
continue

backends.append(name)
img_tds += f'<td><img src="{name}.{benchmark_name}.png" /></td>'
stat_tds += f"<td>{_format_stats(stats)}</td>"
backends.append(backend_name)
output_tds += f"<td>{_format_output(result)}</td>"
stat_tds += f"<td>{_format_stats(result['times'])}</td>"

rows = f"<tr>{img_tds}</tr>\n<tr>{stat_tds}</tr>"
rows = f"<tr>{output_tds}</tr>\n<tr>{stat_tds}</tr>"

# Headers
headers = "\n".join(f"<th>{name}</th>" for name in backends)
Expand All @@ -155,27 +159,42 @@ def _build_function_page(benchmark_name, results, outdir):
fp.write(content)


def _format_mean(results, baseline):
""" Convert stats for individual benchmark runs into data for a table cell.
def _format_benchmark(results, baseline):
""" Convert stats for backend benchmark runs into data for a table row.
"""
basestats = results[baseline]
if basestats is None:
return {name: ("invalid", False) for name in results}

basevalue = basestats["mean"]
basevalue = results[baseline]["times"]["mean"]
formatted = {}
for name, stats in results.items():
if stats is not None:
relvalue = basevalue / stats["mean"]
formatted[name] = (f"{relvalue:0.2f}", True)
for name, result in results.items():
if result is not None:
stats = result.get("times", {})
if stats:
relvalue = basevalue / stats["mean"]
formatted[name] = (f"{relvalue:0.2f}", "valid")
else:
if "skip" in result:
# Benchmark was skipped
formatted[name] = ("\N{HEAVY MINUS SIGN}", "skipped")
else:
# No times, but the backend succeeded
formatted[name] = ("\N{HEAVY CHECK MARK}", "valid")
else:
formatted[name] = ("n/a", False)
formatted[name] = ("\N{HEAVY BALLOT X}", "invalid")

return formatted


def _format_output(result):
""" Convert the output from a single benchmark run into an image embed or
link.
"""
if result["format"] in ("png", "svg"):
return f'<img src="{result["filename"]}" />'
else:
return f'<a href="{result["filename"]}">download</a>'


def _format_stats(stats):
""" Convert stats for a single benchmark run into a table.
""" Convert timing stats for a single benchmark run into a table.
"""
rows = [
f"<tr><td>{key.capitalize()}</td><td>{value:0.4f}</td></tr>"
Expand Down