@@ -2044,7 +2044,7 @@ TEST(VulkanComputeGraphTest, test_etvk_copy_offset_node) {
2044
2044
}
2045
2045
}
2046
2046
2047
- TEST (VulkanComputeGraphTest, test_etvk_copy_channel_offset_node ) {
2047
+ TEST (VulkanComputeGraphTest, DISABLED_test_etvk_copy_channel_offset_node ) {
2048
2048
GraphConfig config;
2049
2049
ComputeGraph graph (config);
2050
2050
@@ -2103,7 +2103,7 @@ TEST(VulkanComputeGraphTest, test_etvk_copy_channel_offset_node) {
2103
2103
2104
2104
TEST (
2105
2105
VulkanComputeGraphTest,
2106
- test_etvk_copy_channel_offset_node_clean_boundary ) {
2106
+ DISABLED_test_etvk_copy_channel_offset_node_clean_boundary ) {
2107
2107
// Tricky part for channel copy is handling the boundary across multiple copy.
2108
2108
// For example, when we concat two [3, 1, 1] nchw-tensors along the channel
2109
2109
// dimension, due to channel packing, elements from different source texel
@@ -2312,7 +2312,7 @@ TEST(VulkanComputeGraphTest, test_etvk_copy_offset_int_node) {
2312
2312
}
2313
2313
}
2314
2314
2315
- TEST (VulkanComputeGraphTest, test_etvk_copy_channel_offset_int_node ) {
2315
+ TEST (VulkanComputeGraphTest, DISABLED_test_etvk_copy_channel_offset_int_node ) {
2316
2316
GraphConfig config;
2317
2317
ComputeGraph graph (config);
2318
2318
@@ -2966,71 +2966,6 @@ TEST(VulkanComputeGraphOpsTest, max_pool2d_smoke_test) {
2966
2966
kernel);
2967
2967
}
2968
2968
2969
- void test_conv2d (
2970
- const std::vector<int64_t >& original_sizes,
2971
- const std::vector<int64_t >& padded_sizes,
2972
- const std::vector<int64_t >& gpu_sizes,
2973
- const bool transposed,
2974
- const std::vector<float >& data_out_expected) {
2975
- vTensor vten = vTensor (
2976
- context (),
2977
- gpu_sizes,
2978
- vkapi::kFloat ,
2979
- utils::StorageType::TEXTURE_2D,
2980
- utils::GPUMemoryLayout::TENSOR_CHANNELS_PACKED);
2981
-
2982
- // Create and fill input staging buffer
2983
- const int64_t in_numel = utils::multiply_integers (original_sizes);
2984
- StagingBuffer staging_buffer_in (context (), vkapi::kFloat , in_numel);
2985
-
2986
- std::vector<float > data_in (in_numel);
2987
- for (int i = 0 ; i < in_numel; i++) {
2988
- data_in[i] = i + 1 ;
2989
- }
2990
- staging_buffer_in.copy_from (data_in.data (), sizeof (float ) * in_numel);
2991
-
2992
- // Output staging buffer
2993
- const int64_t out_numel =
2994
- padded_sizes[0 ] * padded_sizes[1 ] * original_sizes[2 ] * original_sizes[3 ];
2995
- StagingBuffer staging_buffer_out (context (), vkapi::kFloat , out_numel);
2996
-
2997
- // Copy data in and out of the tensor
2998
- record_conv2d_prepack_weights_op (
2999
- context (), staging_buffer_in.buffer (), vten, original_sizes, transposed);
3000
- record_image_to_nchw_op (context (), vten, staging_buffer_out.buffer ());
3001
-
3002
- // Execute command buffer
3003
- submit_to_gpu ();
3004
-
3005
- // Extract data from output staging buffer
3006
- std::vector<float > data_out (out_numel);
3007
- staging_buffer_out.copy_to (data_out.data (), sizeof (float ) * out_numel);
3008
-
3009
- // Check data matches results copied from ATen-VK
3010
- for (int i = 0 ; i < vten.numel (); i++) {
3011
- CHECK_VALUE (data_out, i, data_out_expected[i]);
3012
- }
3013
- }
3014
-
3015
- TEST (VulkanComputeGraphOpsTest, conv2d_prepack_test) {
3016
- test_conv2d (
3017
- /* original_sizes = */ {2 , 3 , 1 , 2 },
3018
- /* padded_sizes = */ {4 , 4 },
3019
- /* gpu_sizes = */ {4 , 1 , 8 },
3020
- /* transposed = */ false ,
3021
- /* data_out_expected = */ {1 , 3 , 5 , 0 , 2 , 4 , 6 , 0 , 7 , 9 , 11 ,
3022
- 0 , 8 , 10 , 12 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
3023
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 });
3024
- test_conv2d (
3025
- /* original_sizes = */ {2 , 3 , 1 , 2 },
3026
- /* padded_sizes = */ {4 , 4 },
3027
- /* gpu_sizes = */ {4 , 1 , 8 },
3028
- /* transposed = */ true ,
3029
- /* data_out_expected = */ {2 , 8 , 0 , 0 , 1 , 7 , 0 , 0 , 4 , 10 , 0 ,
3030
- 0 , 3 , 9 , 0 , 0 , 6 , 12 , 0 , 0 , 5 , 11 ,
3031
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 });
3032
- }
3033
-
3034
2969
void test_grid_priors (
3035
2970
std::vector<int64_t > input_sizes,
3036
2971
std::vector<int64_t > output_sizes,
@@ -3254,6 +3189,7 @@ void test_to_copy() {
3254
3189
torch::executor::Half output = output_data[i];
3255
3190
uint16_t * output_bits = reinterpret_cast <uint16_t *>(&output);
3256
3191
3192
+ #ifdef VULKAN_DEBUG
3257
3193
std::string msg;
3258
3194
msg.reserve (64 );
3259
3195
msg = " input = " + std::to_string (input) + " (0b" +
@@ -3264,6 +3200,7 @@ void test_to_copy() {
3264
3200
std::bitset<16 >(*output_bits).to_string () + " )" ;
3265
3201
3266
3202
std::cout << msg << std::endl;
3203
+ #endif
3267
3204
3268
3205
// Note: Torch executor half "rounds up" when converting to fp16 whereas
3269
3206
// most driver implementations of Vulkan's opFConvert() just truncates the
@@ -3290,9 +3227,11 @@ void test_to_copy() {
3290
3227
3291
3228
mse_ex /= output_data.size ();
3292
3229
mse_vk /= output_data.size ();
3230
+ #ifdef VULKAN_DEBUG
3293
3231
std::cout << " ========================================================="
3294
3232
<< std::endl;
3295
3233
std::cout << " mse_ex = " << mse_ex << " , mse_vk = " << mse_vk << std::endl;
3234
+ #endif
3296
3235
}
3297
3236
3298
3237
TEST (VulkanComputeGraphOpsTest, test_to_copy) {
0 commit comments