Skip to content

Commit 6b385e9

Browse files
removed nop-type and code consistency
1 parent 92d08ea commit 6b385e9

File tree

7 files changed

+29
-141
lines changed

7 files changed

+29
-141
lines changed

3rdparty/nop-type/nop-type.cc

Lines changed: 0 additions & 30 deletions
This file was deleted.

CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,6 @@ if(NOT USE_RTTI)
261261
endif()
262262

263263
list(APPEND RUNTIME_SRCS 3rdparty/posit/posit-wrapper.cc)
264-
list(APPEND RUNTIME_SRCS 3rdparty/nop-type/nop-type.cc)
265264

266265
if(USE_RPC)
267266
message(STATUS "Build with RPC support...")

Makefile

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,10 +90,6 @@ scalalint:
9090

9191
lint: cpplint pylint jnilint
9292

93-
# Test scripts
94-
pyunittest:
95-
./tests/scripts/task_python_unittest.sh
96-
9793
doc:
9894
doxygen docs/Doxyfile
9995

python/tvm/relay/backend/_backend.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,9 @@ def _tensor_value_repr(tvalue):
9090

9191
@tvm._ffi.register_func("relay._constant_repr")
9292
def _tensor_constant_repr(tvalue):
93-
# TODO(gus) do this in a smarter way
94-
try:
93+
if tvm.datatype.get_type_registered(tvalue.data.dtype.code):
94+
return "custom tensor of type " + tvalue.data.dtype
95+
else:
9596
return str(tvalue.data.asnumpy())
96-
except:
97-
return "custom tensor of type " + tvalue.data.dtype;
98-
9997

10098
tvm._ffi._init_api("relay.backend", __name__)

python/tvm/relay/testing/mobilenet.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,6 @@ def mobile_net(num_classes=1000, data_shape=(1, 3, 224, 224),
129129
bias = relay.var('fc_bias')
130130
fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)
131131
fc = relay.nn.bias_add(fc, bias)
132-
# TODO(gus) i think softmax is broken
133132
softmax = relay.nn.softmax(data=fc)
134133
return relay.Function(relay.analysis.free_vars(softmax), softmax)
135134

python/tvm/target/datatype.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ def get_type_registered(type_code):
7676
def register_op(lower_func,
7777
op_name,
7878
target,
79-
type_name,
80-
src_type_name=None,
79+
src_type_name,
80+
dest_type_name=None,
8181
intrinsic_name=None):
8282
"""Register an external function which computes the given op.
8383
@@ -98,11 +98,11 @@ class name (e.g. Add, LE, Cast).
9898
target : str
9999
The name of codegen target.
100100
101-
type_name : str
101+
src_type_name : str
102102
The name of the custom datatype, e.g. posit (but not custom[posit]8).
103103
104-
src_type_name : str
105-
If op_name is "Cast", then this should be set to the source datatype of
104+
dest_type_name : str
105+
If op_name is "Cast", then this is required and should be set to the dest datatype of
106106
the argument to the Cast. If op_name is not "Cast", this is unused.
107107
108108
intrinsic_name : str
@@ -112,15 +112,15 @@ class name (e.g. Add, LE, Cast).
112112
"""
113113

114114
if op_name == "Cast":
115-
assert src_type_name is not None
115+
assert dest_type_name is not None
116116
lower_func_name = "tvm.datatype.lower." + target + "." + op_name + "." \
117-
+ type_name + "." + src_type_name
117+
+ dest_type_name + "." + src_type_name
118118
elif op_name == "Call" and intrinsic_name is not None:
119119
lower_func_name = "tvm.datatype.lower." + target + "." + op_name \
120-
+ ".intrin." + intrinsic_name + "." + type_name
120+
+ ".intrin." + intrinsic_name + "." + src_type_name
121121
else:
122122
lower_func_name = "tvm.datatype.lower." + target + "." + op_name + "." \
123-
+ type_name
123+
+ src_type_name
124124
tvm._ffi.register_func(lower_func_name, lower_func)
125125

126126
# TODO(gus) could probably make this a decorator if i want

tests/python/unittest/test_custom_datatypes_change_dtype.py renamed to tests/python/unittest/test_custom_datatypes.py

Lines changed: 17 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -92,21 +92,21 @@ def setup():
9292
(32, 16): "FloatToPosit16es2",
9393
(32, 8): 'FloatToPosit8es2',
9494
}),
95-
"Cast", "llvm", "posites2", "float")
95+
"Cast", "llvm", "float", "posites2")
9696
register_op(create_lower_func(
9797
{
9898
(32, 32): "Posit32es2ToFloat",
9999
(16, 32): 'Posit16es2ToFloat',
100100
(8, 32): 'Posit8es2ToFloat',
101101
}),
102-
"Cast", "llvm", "float", "posites2")
102+
"Cast", "llvm", "posites2", "float")
103103
register_op(create_lower_func(
104104
{
105105
(4, 32): 'IntToPosit32es2',
106106
(4, 16): 'IntToPosit16es2',
107107
(4, 8): 'IntToPosit8es2'
108108
}),
109-
"Cast", "llvm", "posites2", "int")
109+
"Cast", "llvm", "int", "posites2")
110110
register_op(create_lower_func({
111111
32: 'Posit32es2Add',
112112
16: 'Posit16es2Add',
@@ -169,65 +169,18 @@ def setup():
169169
}), "Call", "llvm", "posites2", intrinsic_name="tanh")
170170
register_min_func(lambda num_bits: - (2 ** 2 ** 2) ** (num_bits - 2), "posites2")
171171

172-
register("noptype", 132)
173-
register_op(create_lower_func({
174-
(32, 32): "FloatToNop32"
175-
}), "Cast", "llvm", "noptype", "float")
176-
register_op(create_lower_func({
177-
(32, 32): 'Nop32ToFloat'
178-
}), "Cast", "llvm", "float", "noptype")
179-
register_op(create_lower_func({
180-
(4, 32): "IntToNop32"
181-
}), "Cast", "llvm", "noptype", "int")
182-
register_op(create_lower_func({32: 'IntToNop32'}), "Add", "llvm", "noptype")
183-
register_op(create_lower_func({32: 'Nop32Sub'}), "Sub", "llvm", "noptype")
184-
register_op(create_lower_func({32: 'FloatToNop32'}), "FloatImm", "llvm",
185-
"noptype")
186-
register_op(create_lower_func({32: 'Nop32Mul'}), "Mul", "llvm", "noptype")
187-
register_op(create_lower_func({32: 'Nop32Div'}), "Div", "llvm", "noptype")
188-
register_op(create_lower_func({32: 'Nop32Max'}), "Max", "llvm", "noptype")
189-
register_op(create_lower_func({32: 'Nop32Sqrt'}),
190-
"Call",
191-
"llvm",
192-
"noptype",
193-
intrinsic_name="sqrt")
194-
# TODO(gus) not sure if this will work...
195-
register_op(lower_ite,
196-
"Call",
197-
"llvm",
198-
"noptype",
199-
intrinsic_name="tvm_if_then_else")
200-
register_op(create_lower_func({32: 'Nop32Exp'}),
201-
"Call",
202-
"llvm",
203-
"noptype",
204-
intrinsic_name="exp")
205-
register_op(create_lower_func({32: 'Nop32Log'}),
206-
"Call",
207-
"llvm",
208-
"noptype",
209-
intrinsic_name="log")
210-
register_op(create_lower_func({32: 'Nop32Sigmoid'}),
211-
"Call",
212-
"llvm",
213-
"noptype",
214-
intrinsic_name="sigmoid")
215-
register_op(create_lower_func({32: 'Nop32Tanh'}),
216-
"Call",
217-
"llvm",
218-
"noptype",
219-
intrinsic_name="tanh")
220-
# This can be anything, considering the type isn't functionally correct.
221-
register_min_func(lambda num_bits: 0, "noptype")
222-
223-
224172
def run_ops(src_dtype, dst_dtype, rtol=1e-7, atol=1e-7):
225173
"""Run the same op, but with two different datatypes"""
174+
# used for unary ops, first shape in binary ops
175+
shape1 = (5, 10, 5)
176+
# second shape for binary ops
177+
shape2 = (5, )
178+
226179
def check_unary_op(op, src_dtype, dst_dtype):
227-
t1 = relay.TensorType((5, 10, 5), src_dtype)
180+
t1 = relay.TensorType(shape1, src_dtype)
228181
x = relay.var("x", t1)
229182
z = op(x)
230-
x_data = rs.rand(5, 10, 5).astype(t1.dtype)
183+
x_data = rs.rand(*shape1).astype(t1.dtype)
231184

232185
module = tvm.IRModule.from_expr(relay.Function([x], z))
233186

@@ -248,13 +201,13 @@ def check_unary_op(op, src_dtype, dst_dtype):
248201
check_unary_op(op, src_dtype, dst_dtype)
249202

250203
def check_binary_op(opfunc, src_dtype, dst_dtype):
251-
t1 = relay.TensorType((5, 10, 5), src_dtype)
252-
t2 = relay.TensorType((5, ), src_dtype)
204+
t1 = relay.TensorType(shape1, src_dtype)
205+
t2 = relay.TensorType(shape2, src_dtype)
253206
x = relay.var("x", t1)
254207
y = relay.var("y", t2)
255208
z = opfunc(x, y)
256-
x_data = rs.rand(5, 10, 5).astype(t1.dtype)
257-
y_data = rs.rand(5).astype(t2.dtype)
209+
x_data = rs.rand(*shape1).astype(t1.dtype)
210+
y_data = rs.rand(*shape2).astype(t2.dtype)
258211
module = tvm.IRModule.from_expr(relay.Function([x, y], z))
259212

260213
compare(module, (x_data, y_data), src_dtype, dst_dtype, rtol, atol)
@@ -283,15 +236,11 @@ def run_model(get_workload,
283236
module, params = get_workload(image_shape=input_shape,
284237
num_classes=num_classes)
285238

286-
# Convert the input into the correct format.
239+
# generate random input with appropriate shape/type
287240
input = tvm.nd.array(rs.rand(*input_shape).astype(src_dtype))
288241

289242
compare(module, (input, ), src_dtype, dst_dtype, rtol, atol, params)
290243

291-
# # Simplifying inference is essential right now, as batch norms (which get
292-
# # removed) are broken with custom datatypes.
293-
# #expr = relay.ir_pass.simplify_inference(expr)
294-
295244
def run_conv2d(src_dtype, dst_dtype):
296245
def run_test_conv2d(src_dtype,
297246
dst_dtype,
@@ -417,10 +366,6 @@ def run_test_conv2d(src_dtype,
417366

418367

419368
def test_ops():
420-
# TODO(gus) these tolerances are high, and still sometimes fail;
421-
# this is expected, b/c we're comparing between 32bit float and 8
422-
# bit posit.
423-
# Figure out a more logical way to test here.
424369
run_ops('float32', 'custom[posites2]8', rtol=1, atol=1)
425370
run_ops('float32', 'custom[posites2]16', rtol=0.01, atol=1)
426371
run_ops('float32', 'custom[posites2]32')
@@ -446,33 +391,14 @@ def test_models():
446391
num_classes=10)
447392
# run_model(get_inception, (3, 32, 32),
448393
# 'float32',
449-
# 'custom[posit32]32',
394+
# 'custom[posites2]32',
450395
# num_classes=10)
451396
# run_model(get_resnet, (3, 32, 32),
452397
# 'float32',
453-
# 'custom[posit32]32',
398+
# 'custom[posites2]32',
454399
# num_classes=10)
455400

456401
# Meanwhile, noptype is not slow.
457-
run_model(get_mobilenet, (3, 224, 224),
458-
'float32',
459-
'custom[noptype]32',
460-
num_classes=1000,
461-
rtol=float("inf"),
462-
atol=float("inf"))
463-
run_model(get_inception, (3, 299, 299),
464-
'float32',
465-
'custom[noptype]32',
466-
num_classes=1000,
467-
rtol=float("inf"),
468-
atol=float("inf"))
469-
run_model(get_resnet, (3, 224, 224),
470-
'float32',
471-
'custom[noptype]32',
472-
num_classes=1000,
473-
rtol=float("inf"),
474-
atol=float("inf"))
475-
476402

477403
if __name__ == "__main__":
478404
setup()

0 commit comments

Comments
 (0)