Skip to content

Commit

Permalink
[TVMC] A simplified TVMC API for python scripting (Part 1). (apache#7823
Browse files Browse the repository at this point in the history
)

* Introduce new TVMC Python API.

* Add simple testing model.

* Split result utils into stand-alone file.
  • Loading branch information
Josh Fromm authored and trevor-m committed May 11, 2021
1 parent 1df5ee0 commit b6f4cd8
Show file tree
Hide file tree
Showing 20 changed files with 1,310 additions and 580 deletions.
83 changes: 66 additions & 17 deletions python/tvm/auto_scheduler/search_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,40 +43,74 @@

@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
"""The parameters of target hardware used to guide the search policy
"""The parameters of target hardware used to guide the search policy.
When a parameter isn't provided, it will instead use the
current machine's default value if target is specified.
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.apache.org/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int
num_cores : int, optional
The number of device cores.
vector_unit_bytes : int
vector_unit_bytes : int, optional
The width of vector units in bytes.
cache_line_bytes : int
cache_line_bytes : int, optional
The size of cache line in bytes.
max_shared_memory_per_block : int
max_shared_memory_per_block : int, optional
The max shared memory per block in bytes.
max_local_memory_per_block : int
max_local_memory_per_block : int, optional
The max local memory per block in bytes.
max_threads_per_block : int
max_threads_per_block : int, optional
The max number of threads per block.
max_vthread_extent : int
max_vthread_extent : int, optional
The max vthread extent.
warp_size : int
warp_size : int, optional
The thread numbers of a warp.
target : str or Target, optional
The compilation target. Used to determine default values if provided.
target_host : str or Target, optional
The compilation target host. Used to determine default values if provided.
"""

def __init__(
self,
num_cores,
vector_unit_bytes,
cache_line_bytes,
max_shared_memory_per_block,
max_local_memory_per_block,
max_threads_per_block,
max_vthread_extent,
warp_size,
num_cores=None,
vector_unit_bytes=None,
cache_line_bytes=None,
max_shared_memory_per_block=None,
max_local_memory_per_block=None,
max_threads_per_block=None,
max_vthread_extent=None,
warp_size=None,
target=None,
target_host=None,
):
# If target is provided, get the default paramters for this machine.
if target is not None:
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
default_params = _ffi_api.GetDefaultHardwareParams(target, target_host)

if num_cores is None:
num_cores = default_params.num_cores
if vector_unit_bytes is None:
vector_unit_bytes = default_params.vector_unit_bytes
if cache_line_bytes is None:
cache_line_bytes = default_params.cache_line_bytes
if max_shared_memory_per_block is None:
max_shared_memory_per_block = default_params.max_shared_memory_per_block
if max_local_memory_per_block is None:
max_local_memory_per_block = default_params.max_local_memory_per_block
if max_threads_per_block is None:
max_threads_per_block = default_params.max_threads_per_block
if max_vthread_extent is None:
max_vthread_extent = default_params.max_vthread_extent
if warp_size is None:
warp_size = default_params.warp_size

self.__init_handle_by_constructor__(
_ffi_api.HardwareParams,
num_cores,
Expand All @@ -89,6 +123,21 @@ def __init__(
warp_size,
)

def __str__(self):
"""Pretty printing for hardware parameter configuration."""
format_str = (
"HardwareParams:\n"
f" num_cores: {self.num_cores}\n"
f" vector_unit_bytes: {self.vector_unit_bytes}\n"
f" cache_line_bytes: {self.cache_line_bytes}\n"
f" max_shared_memory_per_block: {self.max_shared_memory_per_block}\n"
f" max_local_memory_per_block: {self.max_local_memory_per_block}\n"
f" max_threads_per_block: {self.max_threads_per_block}\n"
f" max_vthread_extent: {self.max_vthread_extent}\n"
f" warp_size: {self.warp_size}\n"
)
return format_str


@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
Expand Down
5 changes: 4 additions & 1 deletion python/tvm/auto_scheduler/task_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,10 @@ def tune(
tune_option.num_measures_per_round, tune_option.num_measure_trials // len(self.tasks)
)
if self.num_measures_per_round <= 0:
raise ValueError("num_measure_trials is too small. Please set it to a higher value.")
raise ValueError(
"num_measure_trials is too small. Please set it to a higher value."
f"It should be at least {len(self.tasks)} for this model."
)

# restore the status of the task scheduler from a log file
if self.load_log_file:
Expand Down
3 changes: 3 additions & 0 deletions python/tvm/driver/tvmc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
from . import autotuner
from . import compiler
from . import runner
from . import result_utils
from .frontends import load_model as load
from .compiler import compile_model as compile
from .runner import run_module as run
from .autotuner import tune_model as tune
from .model import TVMCModel, TVMCPackage, TVMCResult
Loading

0 comments on commit b6f4cd8

Please sign in to comment.