@@ -92,21 +92,21 @@ def setup():
92
92
(32 , 16 ): "FloatToPosit16es2" ,
93
93
(32 , 8 ): 'FloatToPosit8es2' ,
94
94
}),
95
- "Cast" , "llvm" , "posites2 " , "float " )
95
+ "Cast" , "llvm" , "float " , "posites2 " )
96
96
register_op (create_lower_func (
97
97
{
98
98
(32 , 32 ): "Posit32es2ToFloat" ,
99
99
(16 , 32 ): 'Posit16es2ToFloat' ,
100
100
(8 , 32 ): 'Posit8es2ToFloat' ,
101
101
}),
102
- "Cast" , "llvm" , "float " , "posites2 " )
102
+ "Cast" , "llvm" , "posites2 " , "float " )
103
103
register_op (create_lower_func (
104
104
{
105
105
(4 , 32 ): 'IntToPosit32es2' ,
106
106
(4 , 16 ): 'IntToPosit16es2' ,
107
107
(4 , 8 ): 'IntToPosit8es2'
108
108
}),
109
- "Cast" , "llvm" , "posites2 " , "int " )
109
+ "Cast" , "llvm" , "int " , "posites2 " )
110
110
register_op (create_lower_func ({
111
111
32 : 'Posit32es2Add' ,
112
112
16 : 'Posit16es2Add' ,
@@ -169,65 +169,18 @@ def setup():
169
169
}), "Call" , "llvm" , "posites2" , intrinsic_name = "tanh" )
170
170
register_min_func (lambda num_bits : - (2 ** 2 ** 2 ) ** (num_bits - 2 ), "posites2" )
171
171
172
- register ("noptype" , 132 )
173
- register_op (create_lower_func ({
174
- (32 , 32 ): "FloatToNop32"
175
- }), "Cast" , "llvm" , "noptype" , "float" )
176
- register_op (create_lower_func ({
177
- (32 , 32 ): 'Nop32ToFloat'
178
- }), "Cast" , "llvm" , "float" , "noptype" )
179
- register_op (create_lower_func ({
180
- (4 , 32 ): "IntToNop32"
181
- }), "Cast" , "llvm" , "noptype" , "int" )
182
- register_op (create_lower_func ({32 : 'IntToNop32' }), "Add" , "llvm" , "noptype" )
183
- register_op (create_lower_func ({32 : 'Nop32Sub' }), "Sub" , "llvm" , "noptype" )
184
- register_op (create_lower_func ({32 : 'FloatToNop32' }), "FloatImm" , "llvm" ,
185
- "noptype" )
186
- register_op (create_lower_func ({32 : 'Nop32Mul' }), "Mul" , "llvm" , "noptype" )
187
- register_op (create_lower_func ({32 : 'Nop32Div' }), "Div" , "llvm" , "noptype" )
188
- register_op (create_lower_func ({32 : 'Nop32Max' }), "Max" , "llvm" , "noptype" )
189
- register_op (create_lower_func ({32 : 'Nop32Sqrt' }),
190
- "Call" ,
191
- "llvm" ,
192
- "noptype" ,
193
- intrinsic_name = "sqrt" )
194
- # TODO(gus) not sure if this will work...
195
- register_op (lower_ite ,
196
- "Call" ,
197
- "llvm" ,
198
- "noptype" ,
199
- intrinsic_name = "tvm_if_then_else" )
200
- register_op (create_lower_func ({32 : 'Nop32Exp' }),
201
- "Call" ,
202
- "llvm" ,
203
- "noptype" ,
204
- intrinsic_name = "exp" )
205
- register_op (create_lower_func ({32 : 'Nop32Log' }),
206
- "Call" ,
207
- "llvm" ,
208
- "noptype" ,
209
- intrinsic_name = "log" )
210
- register_op (create_lower_func ({32 : 'Nop32Sigmoid' }),
211
- "Call" ,
212
- "llvm" ,
213
- "noptype" ,
214
- intrinsic_name = "sigmoid" )
215
- register_op (create_lower_func ({32 : 'Nop32Tanh' }),
216
- "Call" ,
217
- "llvm" ,
218
- "noptype" ,
219
- intrinsic_name = "tanh" )
220
- # This can be anything, considering the type isn't functionally correct.
221
- register_min_func (lambda num_bits : 0 , "noptype" )
222
-
223
-
224
172
def run_ops (src_dtype , dst_dtype , rtol = 1e-7 , atol = 1e-7 ):
225
173
"""Run the same op, but with two different datatypes"""
174
+ # used for unary ops, first shape in binary ops
175
+ shape1 = (5 , 10 , 5 )
176
+ # second shape for binary ops
177
+ shape2 = (5 , )
178
+
226
179
def check_unary_op (op , src_dtype , dst_dtype ):
227
- t1 = relay .TensorType (( 5 , 10 , 5 ) , src_dtype )
180
+ t1 = relay .TensorType (shape1 , src_dtype )
228
181
x = relay .var ("x" , t1 )
229
182
z = op (x )
230
- x_data = rs .rand (5 , 10 , 5 ).astype (t1 .dtype )
183
+ x_data = rs .rand (* shape1 ).astype (t1 .dtype )
231
184
232
185
module = tvm .IRModule .from_expr (relay .Function ([x ], z ))
233
186
@@ -248,13 +201,13 @@ def check_unary_op(op, src_dtype, dst_dtype):
248
201
check_unary_op (op , src_dtype , dst_dtype )
249
202
250
203
def check_binary_op (opfunc , src_dtype , dst_dtype ):
251
- t1 = relay .TensorType (( 5 , 10 , 5 ) , src_dtype )
252
- t2 = relay .TensorType (( 5 , ) , src_dtype )
204
+ t1 = relay .TensorType (shape1 , src_dtype )
205
+ t2 = relay .TensorType (shape2 , src_dtype )
253
206
x = relay .var ("x" , t1 )
254
207
y = relay .var ("y" , t2 )
255
208
z = opfunc (x , y )
256
- x_data = rs .rand (5 , 10 , 5 ).astype (t1 .dtype )
257
- y_data = rs .rand (5 ).astype (t2 .dtype )
209
+ x_data = rs .rand (* shape1 ).astype (t1 .dtype )
210
+ y_data = rs .rand (* shape2 ).astype (t2 .dtype )
258
211
module = tvm .IRModule .from_expr (relay .Function ([x , y ], z ))
259
212
260
213
compare (module , (x_data , y_data ), src_dtype , dst_dtype , rtol , atol )
@@ -283,15 +236,11 @@ def run_model(get_workload,
283
236
module , params = get_workload (image_shape = input_shape ,
284
237
num_classes = num_classes )
285
238
286
- # Convert the input into the correct format.
239
+ # generate random input with appropriate shape/type
287
240
input = tvm .nd .array (rs .rand (* input_shape ).astype (src_dtype ))
288
241
289
242
compare (module , (input , ), src_dtype , dst_dtype , rtol , atol , params )
290
243
291
- # # Simplifying inference is essential right now, as batch norms (which get
292
- # # removed) are broken with custom datatypes.
293
- # #expr = relay.ir_pass.simplify_inference(expr)
294
-
295
244
def run_conv2d (src_dtype , dst_dtype ):
296
245
def run_test_conv2d (src_dtype ,
297
246
dst_dtype ,
@@ -417,10 +366,6 @@ def run_test_conv2d(src_dtype,
417
366
418
367
419
368
def test_ops ():
420
- # TODO(gus) these tolerances are high, and still sometimes fail;
421
- # this is expected, b/c we're comparing between 32bit float and 8
422
- # bit posit.
423
- # Figure out a more logical way to test here.
424
369
run_ops ('float32' , 'custom[posites2]8' , rtol = 1 , atol = 1 )
425
370
run_ops ('float32' , 'custom[posites2]16' , rtol = 0.01 , atol = 1 )
426
371
run_ops ('float32' , 'custom[posites2]32' )
@@ -446,33 +391,14 @@ def test_models():
446
391
num_classes = 10 )
447
392
# run_model(get_inception, (3, 32, 32),
448
393
# 'float32',
449
- # 'custom[posit32 ]32',
394
+ # 'custom[posites2 ]32',
450
395
# num_classes=10)
451
396
# run_model(get_resnet, (3, 32, 32),
452
397
# 'float32',
453
- # 'custom[posit32 ]32',
398
+ # 'custom[posites2 ]32',
454
399
# num_classes=10)
455
400
456
401
# Meanwhile, noptype is not slow.
457
- run_model (get_mobilenet , (3 , 224 , 224 ),
458
- 'float32' ,
459
- 'custom[noptype]32' ,
460
- num_classes = 1000 ,
461
- rtol = float ("inf" ),
462
- atol = float ("inf" ))
463
- run_model (get_inception , (3 , 299 , 299 ),
464
- 'float32' ,
465
- 'custom[noptype]32' ,
466
- num_classes = 1000 ,
467
- rtol = float ("inf" ),
468
- atol = float ("inf" ))
469
- run_model (get_resnet , (3 , 224 , 224 ),
470
- 'float32' ,
471
- 'custom[noptype]32' ,
472
- num_classes = 1000 ,
473
- rtol = float ("inf" ),
474
- atol = float ("inf" ))
475
-
476
402
477
403
if __name__ == "__main__" :
478
404
setup ()
0 commit comments