@@ -90,7 +90,7 @@ def verify_no_input(model, tmpdir, name):
90
90
verify_node_names (model , loaded_model )
91
91
return loaded_model
92
92
93
- def verify_one_input (model , data , tmpdir , name , device = None , loaded_model = None , rtol = 1e-05 , atol = 1e-08 ):
93
+ def verify_one_input (model , data , tmpdir , name , device = None , loaded_model = None , rtol = 1e-05 , atol = 1e-08 , bypass_load_into_cntk = False ):
94
94
# TODO: eventually we want this test method to be more general to suport
95
95
# models with multiple inputs instead of just one input.
96
96
assert len (model .arguments ) == 1
@@ -104,15 +104,19 @@ def verify_one_input(model, data, tmpdir, name, device=None, loaded_model=None,
104
104
# outputs share the same owner
105
105
opname = model .outputs [0 ].owner .op_name
106
106
107
- loaded_model , onnx_model , test_model_path , test_data_path = create_and_populate_onnx_test_case_with_model_conversion (model , tmpdir , name , loaded_model )
107
+ if bypass_load_into_cntk :
108
+ loaded_model , onnx_model , test_model_path , test_data_path = create_and_populate_onnx_test_case_with_model_conversion (model , tmpdir , name , model , bypass_load_into_cntk = True )
109
+ else :
110
+ loaded_model , onnx_model , test_model_path , test_data_path = create_and_populate_onnx_test_case_with_model_conversion (model , tmpdir , name , loaded_model )
108
111
109
112
# TODO: it is better to compare data.shape with model.arguments[0] and
110
113
# to pad batch dimension as needed.
111
114
# Some tests have already expanded batch axis to data (i.e. reduction test)
112
115
if model .arguments [0 ].has_batch_axis () and type (data )!= list :
113
116
data .shape = (1 , ) + data .shape
114
117
115
- assert len (model .outputs ) == len (loaded_model .outputs )
118
+ if not bypass_load_into_cntk :
119
+ assert len (model .outputs ) == len (loaded_model .outputs )
116
120
117
121
dim_denotation = CNTK_FREEDIM_AXIS_DENOTATION if opname in set_of_batch_ops else DIM_SIZE_FOR_NON_BATCH_OPS
118
122
for i in range (0 , len (model .outputs )):
@@ -121,7 +125,8 @@ def verify_one_input(model, data, tmpdir, name, device=None, loaded_model=None,
121
125
if opname not in set_of_batch_irrelevant_ops :
122
126
if model .outputs [i ].has_batch_axis ():
123
127
output_shape = (dim_denotation , ) + output_shape
124
- assert output_shape == loaded_model .outputs [i ].shape
128
+ if not bypass_load_into_cntk :
129
+ assert output_shape == loaded_model .outputs [i ].shape
125
130
126
131
if device :
127
132
o0 = model .eval ({model .arguments [0 ]:data }, device = device )
@@ -763,8 +768,6 @@ def test_Floor(tmpdir, dtype):
763
768
#Gather
764
769
@pytest .mark .parametrize ("dtype" , DType_Config )
765
770
def test_Gather (tmpdir , dtype ):
766
- if (dtype == np .float16 ):
767
- pytest .skip ("TO BE FIXED" )
768
771
with C .default_options (dtype = dtype ):
769
772
c = np .asarray ([[0 ],[1 ]]).astype (dtype )
770
773
x = C .input_variable ((2 ,1 ))
@@ -780,12 +783,9 @@ def test_Gather(tmpdir, dtype):
780
783
#Gather
781
784
@pytest .mark .parametrize ("dtype" , DType_Config )
782
785
def test_Gather_With_Axis (tmpdir , dtype ):
783
- if (dtype == np .float16 ):
784
- pytest .skip ("TO BE FIXED" )
785
786
with C .default_options (dtype = dtype ):
786
787
data = np .asarray ( [[ [111 , 112 ], [121 , 122 ], [131 , 132 ], ],[ [211 , 212 ], [221 , 222 ], [231 , 232 ], ]]).astype (dtype )
787
788
indices = np .asarray ([[0 , 1 , 1 ], [1 , 1 , 1 ]])
788
- x = C .input_variable (np .shape (data ))
789
789
y = C .input_variable (np .shape (indices ))
790
790
axis = 1
791
791
@@ -916,21 +916,21 @@ def test_LayerNormalization(tmpdir, dtype, device_id):
916
916
if dtype == np .float16 :
917
917
pytest .skip ('Test is skipped on float16 to pass build test' )
918
918
919
- # This test point tests the LayerNormalization round trip with defaultepsilon. We loose always the epsilon value when
920
- # exporting to ONNX (because ONNX MeanVarianceNormalization does not have an epsilon attribute). When loading back
921
- # from ONNX, CNTK always uses the default eposilon value (0.00001 ). That's why test below has the default epsilon
919
+ # This test point tests the LayerNormalization round trip with defaultepsilon. We loose always the epsilon value when
920
+ # exporting to ONNX (because ONNX MeanVarianceNormalization does not have an epsilon attribute). When loading back
921
+ # from ONNX, CNTK always uses the default eposilon value (0.00000001 ). That's why test below has the default epsilon
922
922
# value. It is not expected to pass with any other epsilon value until something changes.
923
923
with C .default_options (dtype = dtype ):
924
924
test_shapes = [(3 , 5 , 7 ), (10 , ), (20 , 31 )]
925
925
for shape in test_shapes :
926
926
data = np .reshape (np .arange (np .prod (shape ), dtype = dtype ), shape )
927
927
input_operand = C .input_variable (shape = shape )
928
- model0 = C .layers .LayerNormalization (initial_scale = 1 , initial_bias = 2 , epsilon = 0.00001 )(input_operand )
929
- verify_one_input (model0 , data , tmpdir , 'LayerNorm_0' + str (shape ).replace (',' , '_' ))
928
+ model0 = C .layers .LayerNormalization (initial_scale = 1 , initial_bias = 2 , epsilon = 0.000000001 )(input_operand )
929
+ verify_one_input (model0 , data , tmpdir , 'LayerNorm_0' + str (shape ).replace (',' , '_' ), rtol = 1e-04 , atol = 1e-08 )
930
930
931
- # This test point tests especially with epsilon = 0, because that creates a graph with
931
+ # This test point tests especially with epsilon = 0, because that creates a graph with
932
932
# different number of ops. However, we don't expect the numbers to match in round trip
933
- # because we only support default epislon (0.00001 ) when loading from ONNX. Therefore,
933
+ # because we only support default epislon (0.00000001 ) when loading from ONNX. Therefore,
934
934
# this is just a load/save test.
935
935
model1 = C .layers .LayerNormalization (epsilon = 0.0 )(input_operand )
936
936
filename = os .path .join (str (tmpdir ), R'LayerNorm_1.onnx' )
@@ -1346,7 +1346,8 @@ def test_Mean(tmpdir, dtype):
1346
1346
#MeanVarianceNormalization
1347
1347
@pytest .mark .parametrize ("dtype" , DType_Config )
1348
1348
def test_MeanVarianceNormalization (tmpdir , dtype ):
1349
- pytest .skip ('test_MeanVarianceNormalization is skipped. Work is needed to make CNTK MVN compatible with ONNX Ver 9.' )
1349
+ if dtype == np .float16 :
1350
+ pytest .skip ('Mean Variance Normalization with datatype float16 is not supported in ONNX.' )
1350
1351
with C .default_options (dtype = dtype ):
1351
1352
shape = (3 , 5 , 7 )
1352
1353
data = np .reshape (np .arange (np .prod (shape ), dtype = dtype ), shape )
@@ -1356,8 +1357,9 @@ def test_MeanVarianceNormalization(tmpdir, dtype):
1356
1357
model0 = C .mean_variance_normalization (input_operand , use_stats_across_channels = False , do_variance_scaling = True )
1357
1358
verify_one_input (model0 , data , tmpdir , 'MVN_0' )
1358
1359
1359
- model1 = C .mean_variance_normalization (input_operand , use_stats_across_channels = False , do_variance_scaling = False )
1360
- verify_one_input (model1 , data , tmpdir , 'MVN_1' )
1360
+ # do_variance_scaling = False is no longer supported in onnx.
1361
+ # model1 = C.mean_variance_normalization(input_operand, use_stats_across_channels=False, do_variance_scaling=False)
1362
+ # verify_one_input(model1, data, tmpdir, 'MVN_1')
1361
1363
1362
1364
model2 = C .mean_variance_normalization (input_operand , use_stats_across_channels = True , do_variance_scaling = True )
1363
1365
verify_one_input (model2 , data , tmpdir , 'MVN_2' )
@@ -1409,7 +1411,6 @@ def test_Neg(tmpdir, dtype):
1409
1411
def test_OptimizedRNNStack (bidirectional , num_layers , input_size , hidden_size , recurrent_op , tmpdir , device_id ):
1410
1412
if device_id == - 1 :
1411
1413
pytest .skip ('Test only runs on GPU' )
1412
- pytest .skip ('test_OptimizedRNNStack is skipped. Work is needed to make CNTK compatible with ONNXRUNTIME shape inference.' )
1413
1414
dev = cntk_device (device_id )
1414
1415
from _cntk_py import constant_initializer
1415
1416
model_filename = 'optimized_rnn_stack_' + ('bi' if bidirectional else 'uni' ) + '_layers' + str (num_layers ) + '_inp' + str (input_size ) + '_hid' + str (hidden_size )
@@ -1759,15 +1760,18 @@ def test_Slice(tmpdir, dtype):
1759
1760
(- 2 , - 1 ), (0 , - 1 ), (1 , - 1 ), (- 1 , 0 ), (1 , 0 ), (- 4 , 2 ), (0 , 1 ), (1 , 2 )))
1760
1761
@pytest .mark .parametrize ("dtype" , DType_Config )
1761
1762
def test_SequenceSlice (tmpdir , dtype , beginIndex , endIndex ):
1762
- batch_size = 1
1763
- sequence_length = 5
1764
- input_size = 3
1765
- feature_shape = (input_size ,)
1766
- shape = (batch_size , sequence_length , input_size )
1767
- data = np .reshape (range (0 , np .prod (shape )), shape ).astype (dtype )
1768
- testName = "test_sequence_slice_{0}.{1}" .format (beginIndex , endIndex )
1769
- model = C .sequence .slice (C .sequence .input_variable ((feature_shape )), beginIndex , endIndex )
1770
- verify_sequence_model (model , data , tmpdir , testName )
1763
+ with C .default_options (dtype = dtype ):
1764
+ if dtype == np .float16 :
1765
+ pytest .skip ('Float16 is not supported in CNTK for sequence slice.' )
1766
+ batch_size = 1
1767
+ sequence_length = 5
1768
+ input_size = 3
1769
+ feature_shape = (input_size ,)
1770
+ shape = (batch_size , sequence_length , input_size )
1771
+ data = np .reshape (range (0 , np .prod (shape )), shape ).astype (dtype )
1772
+ testName = "test_sequence_slice_{0}.{1}" .format (beginIndex , endIndex )
1773
+ model = C .sequence .slice (C .sequence .input_variable (feature_shape ), beginIndex , endIndex )
1774
+ verify_sequence_model (model , data , tmpdir , testName )
1771
1775
1772
1776
@pytest .mark .parametrize ("dtype" , DType_Config )
1773
1777
def test_SequenceFirst (tmpdir , dtype ):
@@ -1928,9 +1932,11 @@ def test_Tanh(tmpdir, dtype):
1928
1932
#TopK
1929
1933
@pytest .mark .parametrize ("dtype" , DType_Config )
1930
1934
def test_TopK (tmpdir , dtype ):
1931
- input_size = 10
1932
- data = (np .arange (input_size ,dtype = dtype )* 0.1 ).reshape (1 , input_size )
1933
- x = C .input_variable (input_size )
1935
+ if dtype == np .float16 :
1936
+ pytest .skip ("TopK of float16 not supported in cntk: Unsupported template argument(half) in SortPairsDescending." )
1937
+ input_size = 9
1938
+ data = (np .arange (input_size ,dtype = dtype )* 0.1 + 0.1 ).reshape (input_size )
1939
+ x = C .input_variable (input_size , dtype = dtype )
1934
1940
model = C .top_k (- x * C .log (x ), 3 )
1935
1941
verify_one_input (model , data , tmpdir , "top_k" )
1936
1942
@@ -2081,12 +2087,27 @@ def test_Zeros_Like(tmpdir, dtype):
2081
2087
x = C .input_variable ((3 , 4 ), dynamic_axes = [], dtype = dtype , name = 'feature' )
2082
2088
model = C .zeros_like (x , name = 'zeros_like_op' )
2083
2089
data = np .asarray (range (3 * 4 ), dtype = dtype ).reshape ((3 ,4 ))
2084
- verify_one_input (model , data , tmpdir , "Zeros_Like_0" )
2090
+ # TODO: import not yet implemented.
2091
+ verify_one_input (model , data , tmpdir , "Zeros_Like_0" , bypass_load_into_cntk = True )
2085
2092
2086
2093
# ones_like
2087
2094
@pytest .mark .parametrize ("dtype" , DType_Config )
2088
2095
def test_Ones_Like (tmpdir , dtype ):
2089
2096
x = C .input_variable ((3 , 4 ), dynamic_axes = [], dtype = dtype , name = 'feature' )
2090
2097
model = C .ones_like (x , name = 'ones_like_op' )
2091
2098
data = np .asarray (range (3 * 4 ), dtype = dtype ).reshape ((3 ,4 ))
2092
- verify_one_input (model , data , tmpdir , "Ones_Like_0" )
2099
+ # TODO: import not yet implemented.
2100
+ verify_one_input (model , data , tmpdir , "Ones_Like_0" , bypass_load_into_cntk = True )
2101
+
2102
+ # one hot
2103
+ @pytest .mark .parametrize ("dtype" , DType_Config )
2104
+ def test_One_Hot (tmpdir , dtype ):
2105
+ if dtype == np .float16 :
2106
+ pytest .skip ('float16 not supported in onnxruntime.' )
2107
+ data = np .asarray ([1 , 5 ], dtype = dtype )
2108
+ x = C .input_variable ((2 ), dtype = dtype )
2109
+ model = C .one_hot (x , 6 , False , name = 'one_hot_op' )
2110
+ verify_one_input (model , data , tmpdir , "One_Hot_0" , bypass_load_into_cntk = True )
2111
+
2112
+ model = C .one_hot (x , 6 , False , axis = 0 , name = 'one_hot_op' )
2113
+ verify_one_input (model , data , tmpdir , "One_Hot_1" , bypass_load_into_cntk = True )
0 commit comments