Skip to content

Commit

Permalink
[VTA][HotFix] Relay->VTA quantization fix (apache#4433)
Browse files Browse the repository at this point in the history
* relay -> vta fix

* setting optlevel to 3 for quantization to fold batchnorm
  • Loading branch information
tmoreau89 authored and yzhliu committed Nov 27, 2019
1 parent f552d4d commit c6f8c23
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 19 deletions.
8 changes: 5 additions & 3 deletions vta/scripts/tune_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,11 @@ def compile_network(opt, env, target):
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})

# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Note: We set opt_level to 3 in order to fold batch norm
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)

# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
Expand Down
10 changes: 6 additions & 4 deletions vta/tutorials/autotvm/tune_relay_vta.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,17 @@ def compile_network(env, target, model, start_pack, stop_pack):
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})

# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Note: We set opt_level to 3 in order to fold batch norm
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)

# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
Expand Down
26 changes: 14 additions & 12 deletions vta/tutorials/frontend/deploy_vision_on_vta.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,18 +168,20 @@

if target.device_name == "vta":
# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[model][0],
stop_name=pack_dict[model][1])
# Note: We set opt_level to 3 in order to fold batch norm
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[model][0],
stop_name=pack_dict[model][1])
else:
relay_prog = mod["main"]

Expand Down

0 comments on commit c6f8c23

Please sign in to comment.