Skip to content

Commit

Permalink
Use tempfile in tutorials
Browse files Browse the repository at this point in the history
  • Loading branch information
comaniac committed Oct 21, 2020
1 parent 28e9ab7 commit 1059026
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 19 deletions.
21 changes: 12 additions & 9 deletions tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
We use a convolution layer as an example in this tutorial.
"""

import tempfile

import numpy as np
import tvm
from tvm import te, auto_scheduler, topi
Expand Down Expand Up @@ -82,17 +84,19 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# * :code:`num_measure_trials` is the number of measurement trials we can use during the search.
# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a
# good value for the search to converge. You can do more trials according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a file `conv2d.json`.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a file.
# Note that here we use a temporarty file for demonstraction, but in practice you should use
# a more maintainable file name such as `conv2d.json`.
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.

logfile = tempfile.NamedTemporaryFile(prefix="conv2d", suffix=".json")
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=10,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile("conv2d.json")],
measure_callbacks=[auto_scheduler.RecordToFile(logfile.name)],
)

######################################################################
Expand Down Expand Up @@ -149,15 +153,15 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# Using the record file
# ^^^^^^^^^^^^^^^^^^^^^
# During the search, all measuremnt records are dumpped into the record
# file "conv2d.json". The measurement records can be used to re-apply search results,
# file. The measurement records can be used to re-apply search results,
# resume the search, and perform other analyses.

######################################################################
# Here is an example where we load the best schedule from a file,
# print the equivalent python schedule API, and build the binary again.

# Load the measuremnt record for the best schedule
inp, res = auto_scheduler.load_best("conv2d.json", task.workload_key)
inp, res = auto_scheduler.load_best(logfile.name, task.workload_key)

# Print equivalent python schedule API. This can be used for debugging and
# learning the behavior of the auto-scheduler.
Expand All @@ -176,17 +180,16 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# In the example below we resume the status and do more 5 trials.


log_file = "conv2d.json"
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
cost_model.update_from_file(logfile.name)
search_policy = auto_scheduler.SketchPolicy(
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(logfile.name)]
)
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=5,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
measure_callbacks=[auto_scheduler.RecordToFile(logfile.name)],
)
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)

Expand Down
23 changes: 13 additions & 10 deletions tutorials/auto_scheduler/tune_matmul_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

We use matrix multiplication as an example in this tutorial.
"""
import tempfile

import numpy as np
import tvm
Expand Down Expand Up @@ -75,13 +76,15 @@ def matmul_add(N, L, M, dtype):
# * :code:`num_measure_trials` is the number of measurement trials we can use during the search.
# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a
# good value for the search to converge. You can do more trials according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a file `matmul.json`.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a file.
# Note that here we use a temporarty file for demonstraction, but in practice you should use
# a more maintainable file name such as `matmul.json`.
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions` for more parameters

logfile = tempfile.NamedTemporaryFile(prefix="matmul", suffix=".json")
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile("matmul.json")]
num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(logfile.name)]
)

######################################################################
Expand Down Expand Up @@ -133,15 +136,15 @@ def matmul_add(N, L, M, dtype):
# Using the record file
# ^^^^^^^^^^^^^^^^^^^^^
# During the search, all measuremnt records are dumpped into the record
# file "matmul.json". The measurement records can be used to re-apply search results,
# file. The measurement records can be used to re-apply search results,
# resume the search, and perform other analyses.

######################################################################
# Here is an example where we load the best schedule from a file,
# print the equivalent python schedule API, and build the binary again.

# Load the measuremnt record for the best schedule
inp, res = auto_scheduler.load_best("matmul.json", task.workload_key)
inp, res = auto_scheduler.load_best(logfile.name, task.workload_key)

# Print equivalent python schedule API. This can be used for debugging and
# learning the behavior of the auto-scheduler.
Expand All @@ -160,19 +163,19 @@ def matmul_add(N, L, M, dtype):
# In the example below we resume the status and do more 5 trials.


def resume_search(task, log_file):
def resume_search(task, logfile_name):
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
cost_model.update_from_file(logfile_name)
search_policy = auto_scheduler.SketchPolicy(
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(logfile_name)]
)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(log_file)]
num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(logfile_name)]
)
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)


# resume_search(task, "matmul.json")
# resume_search(task, logfile.name)

######################################################################
# .. note::
Expand Down

0 comments on commit 1059026

Please sign in to comment.