Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions apps/benchmark/adreno/adreno_gpu_bench_clml.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,6 @@ def get_network(name, batch_size, dtype="float32"):
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model

block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)

Expand Down
11 changes: 0 additions & 11 deletions apps/benchmark/adreno/adreno_gpu_bench_texture.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,17 +83,6 @@ def get_network(name, batch_size, dtype="float32"):
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model

block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)

Expand Down
11 changes: 0 additions & 11 deletions apps/benchmark/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,17 +72,6 @@ def get_network(name, batch_size, dtype="float32"):
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model

block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)

Expand Down
1 change: 0 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,6 @@ def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func):
for p in [
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)

Expand Down
153 changes: 0 additions & 153 deletions gallery/how_to/compile_models/from_mxnet.py

This file was deleted.

30 changes: 15 additions & 15 deletions gallery/how_to/deploy_models/deploy_model_on_nano.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,22 +102,22 @@
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
# We will use pre-trained model from torchvision

import sys

from mxnet.gluon.model_zoo.vision import get_model
import torch
import torchvision
from PIL import Image
import numpy as np

# one line to get the model
try:
block = get_model("resnet18_v1", pretrained=True)
except RuntimeError:
print("Downloads from mxnet no longer supported", file=sys.stderr)
sys.exit(0)
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()

# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()

######################################################################
# In order to test our model, here we download an image of cat and
Expand Down Expand Up @@ -158,9 +158,9 @@ def transform_image(image):
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.

# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
input_name = "input0"
shape_list = [(input_name, x.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
Expand Down Expand Up @@ -241,7 +241,7 @@ def transform_image(image):

module = runtime.GraphModule(rlib["default"](dev))
# set input data
module.set_input("data", tvm.nd.array(x.astype("float32")))
module.set_input(input_name, tvm.nd.array(x.astype("float32")))
# run
module.run()
# get output
Expand Down
32 changes: 16 additions & 16 deletions gallery/how_to/deploy_models/deploy_model_on_rasp.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,22 +95,22 @@
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
# We will use pre-trained model from torchvision

import sys

from mxnet.gluon.model_zoo.vision import get_model
import torch
import torchvision
from PIL import Image
import numpy as np

# one line to get the model
try:
block = get_model("resnet18_v1", pretrained=True)
except RuntimeError:
print("Downloads from mxnet no longer supported", file=sys.stderr)
sys.exit(0)
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()

# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()

######################################################################
# In order to test our model, here we download an image of cat and
Expand Down Expand Up @@ -148,12 +148,12 @@ def transform_image(image):
synset = eval(f.read())

######################################################################
# Now we would like to port the Gluon model to a portable computational graph.
# Now we would like to port the PyTorch model to a portable computational graph.
# It's as easy as several lines.

# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
input_name = "input0"
shape_list = [(input_name, x.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
Expand Down Expand Up @@ -226,7 +226,7 @@ def transform_image(image):
dev = remote.cpu(0)
module = runtime.GraphModule(rlib["default"](dev))
# set input data
module.set_input("data", tvm.nd.array(x.astype("float32")))
module.set_input(input_name, tvm.nd.array(x.astype("float32")))
# run
module.run()
# get output
Expand Down
Loading