Skip to content

Commit 69293b5

Browse files
committed
try make sphinx happy
1 parent 00da7b2 commit 69293b5

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

tutorials/frontend/deploy_prequantized.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# under the License.
1717
"""
1818
Deploy a Framework-prequantized Model with TVM
19-
================================
19+
==============================================
2020
**Author**: `Masahiro Masuda <https://github.com/masahi>`_
2121
2222
This is a tutorial on loading models quantized by deep learning frameworks into TVM.
@@ -103,7 +103,7 @@ def run_tvm_model(mod, params, input_name, inp, target="llvm"):
103103

104104
################################################################################
105105
# Deploy a quantized PyTorch Model
106-
# ------------------
106+
# --------------------------------
107107
# First, we demonstrate how to load deep learning models quantized by PyTorch,
108108
# using our PyTorch frontend.
109109
#
@@ -126,14 +126,14 @@ def quantize_model(model, inp):
126126

127127
##############################################################################
128128
# Load quantization-ready, pretrained Mobilenet v2 model from torchvision
129-
# -----------------
129+
# -----------------------------------------------------------------------
130130
# We choose mobilenet v2 because this model was trained with quantization aware
131131
# training. Other models require a full post training calibration.
132132
qmodel = qmobilenet.mobilenet_v2(pretrained=True).eval()
133133

134134
##############################################################################
135135
# Quantize, trace and run the PyTorch Mobilenet v2 model
136-
# -----------------
136+
# ------------------------------------------------------
137137
# The details are out of scope for this tutorial. Please refer to the tutorials
138138
# on the PyTorch website to learn about quantization and jit.
139139
pt_inp = torch.from_numpy(inp)
@@ -145,7 +145,7 @@ def quantize_model(model, inp):
145145

146146
##############################################################################
147147
# Convert quantized Mobilenet v2 to Relay-QNN using the PyTorch frontend
148-
# -----------------
148+
# ----------------------------------------------------------------------
149149
# The PyTorch frontend has support for converting a quantized PyTorch model to
150150
# an equivalent Relay module enriched with quantization-aware operators.
151151
# We call this representation Relay QNN dialect.
@@ -162,7 +162,7 @@ def quantize_model(model, inp):
162162

163163
##############################################################################
164164
# Compile and run the Relay module
165-
# -----------------
165+
# --------------------------------
166166
# Once we obtained the quantized Relay module, the rest of the workflow
167167
# is the same as running floating point models. Please refer to other
168168
# tutorials for more details.
@@ -171,17 +171,17 @@ def quantize_model(model, inp):
171171
# standard Relay operators before compilation.
172172
tvm_result = run_tvm_model(mod, params, input_name, inp, target="llvm")
173173

174-
######################################################################
174+
##########################################################################
175175
# Compare the output labels
176-
# -----------------
176+
# -------------------------
177177
# We should see identical labels printed.
178178
pt_top3_labels = np.argsort(pt_result[0])[::-1][:3]
179179
tvm_top3_labels = np.argsort(tvm_result[0])[::-1][:3]
180180

181181
print("PyTorch top3 label:", [synset[label] for label in pt_top3_labels])
182182
print("TVM top3 label:", [synset[label] for label in tvm_top3_labels])
183183

184-
##############################################################################
184+
###########################################################################################
185185
# However, due to the difference in numerics, in general the raw floating point
186186
# outputs are not expected to be identical. Here, we print how many floating point
187187
# output values are identical out of 1000 outputs from mobilenet v2.
@@ -190,10 +190,10 @@ def quantize_model(model, inp):
190190

191191
###############################################################################
192192
# Deploy a quantized MXNet Model
193-
# ------------------
193+
# ------------------------------
194194
# TODO
195195

196196
###############################################################################
197197
# Deploy a quantized TFLite Model
198-
# ------------------
198+
# -------------------------------
199199
# TODO

0 commit comments

Comments
 (0)