Skip to content

Commit

Permalink
tests: explicitly handle precision-losing conversion (#81)
Browse files Browse the repository at this point in the history
sxc++ warnings did show some cases where unintended type conversions
may actually have been occurring.  Typical cases involved double->float,
int->float, and double-expression to some data_t conversions.
  • Loading branch information
kruus authored and Evarist committed Jun 23, 2017
1 parent 1fc31c8 commit a914292
Show file tree
Hide file tree
Showing 11 changed files with 56 additions and 41 deletions.
2 changes: 2 additions & 0 deletions tests/gtests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,5 @@ endforeach()
# POST_BUILD
# WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
# COMMAND ${APP_NAME} )

# vim: et ts=4 sw=4
37 changes: 23 additions & 14 deletions tests/gtests/mkldnn_test_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ inline size_t map_index(const mkldnn::memory::desc &md, size_t index) {
auto *strides_within_block = md.data.layout_desc.blocking.strides[1];

size_t ph_index = 0;
int oc_16 = 0, ic_2 = 0,
size_t oc_16 = 0, ic_2 = 0,
oc_2 = 0, ic_16 = 0;

for (int rd = 0; rd < ndims; ++rd) {
Expand All @@ -77,13 +77,15 @@ inline size_t map_index(const mkldnn::memory::desc &md, size_t index) {
EXPECT_LE(dims[d], pdims[d]);

int cur_dim = dims[d];
EXPECT_GT(cur_dim, 0);
int cur_block = md.data.layout_desc.blocking.block_dims[d];

int pos_d = index % cur_dim;
int cur_pos = optd[d] + pos_d;
size_t pos_d = /*static_cast<ssize_t>*/(index % cur_dim);
EXPECT_GE(optd[d], 0);
size_t cur_pos = optd[d] + pos_d;

int cur_pos_block = cur_pos / cur_block;
int cur_pos_within_block = cur_pos % cur_block;
size_t cur_pos_block = cur_pos / cur_block;
size_t cur_pos_within_block = cur_pos % cur_block;

if (d == (with_groups + 0)) { oc_16 = pos_d % 16; oc_2 = pos_d % 2; }
if (d == (with_groups + 1)) { ic_2 = pos_d % 2; ic_16 = pos_d % 16; }
Expand All @@ -94,10 +96,16 @@ inline size_t map_index(const mkldnn::memory::desc &md, size_t index) {
index /= cur_dim;
}
if (md.data.format == fwd_weights_g || md.data.format == fwd_weights) {
ph_index += -16 * ic_2 + oc_16 + ic_2;
//ph_index += -16 * ic_2 + oc_16 + ic_2;
ph_index += oc_16 + ic_2;
EXPECT_GE(ph_index, 16*ic_2);
ph_index -= 16*ic_2;
} else
if (md.data.format == bwd_weights_g || md.data.format == bwd_weights) {
ph_index += -16 * oc_2 + ic_16 + oc_2;
//ph_index += -16 * oc_2 + ic_16 + oc_2;
ph_index += ic_16 + oc_2;
EXPECT_GE(ph_index, 16 * oc_2);
ph_index -= 16 * oc_2;
}
ph_index += md.data.layout_desc.blocking.offset_padding;

Expand Down Expand Up @@ -159,7 +167,8 @@ static inline data_t set_value(size_t index, data_t mean, data_t deviation,
const size_t group = index / group_size;
const size_t in_group = index % group_size;
const bool fill = in_group == ((group % 1637) % group_size);
return fill ? mean + deviation * sin(data_t(index % 37)) : 0;
return fill ? static_cast<data_t>(mean + deviation * sinf(float(index % 37)))
: data_t{0};
} else if (data_traits<data_t>::data_type == mkldnn::memory::data_type::s32
|| data_traits<data_t>::data_type == mkldnn::memory::data_type::s16) {
return data_t(rand()%11);
Expand All @@ -169,25 +178,25 @@ static inline data_t set_value(size_t index, data_t mean, data_t deviation,
}

template <typename data_t>
static void fill_data(const int size, data_t *data, data_t mean,
static void fill_data(const size_t size, data_t *data, data_t mean,
data_t deviation, double sparsity = 1.)
{
# pragma omp parallel for schedule(static)
for (int n = 0; n < size; n++) {
for (size_t n = 0; n < size; n++) {
data[n] = set_value<data_t>(n, mean, deviation, sparsity);
}
}

template <typename data_t>
static void fill_data(const int size, data_t *data, double sparsity = 1.,
static void fill_data(const size_t size, data_t *data, double sparsity = 1.,
bool init_negs = false)
{
# pragma omp parallel for schedule(static)
for (int n = 0; n < size; n++) {
for (size_t n = 0; n < size; n++) {
data[n] = set_value<data_t>(n, data_t(1), data_t(2e-1), sparsity);

if (init_negs && n%4 == 0)
data[n] = -data[n];
if (init_negs && n%4 == 0U)
data[n] = static_cast<data_t>(-data[n]); // weird for unsigned types!
}
}

Expand Down
8 changes: 4 additions & 4 deletions tests/gtests/test_batch_normalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void check_bnrm_fwd(const test_bnrm_params_t &p,
const memory::desc dst_d = dst.get_primitive_desc().desc();

test_bnrm_sizes_t bp = p.sizes;
data_t eps = 1.e-4 * bp.mb * bp.h * bp.w;
data_t eps = static_cast<data_t>(1.e-4 * bp.mb * bp.h * bp.w);

#pragma omp parallel for
for (int c = 0; c < bp.c; c++) {
Expand Down Expand Up @@ -96,7 +96,7 @@ void check_bnrm_fwd(const test_bnrm_params_t &p,
EXPECT_NEAR((variance_data[c] - ref_variance) / variance_norm_max, 0., eps);
}
}
data_t ref_sqrt_variance = sqrt(ref_variance + p.eps);
data_t ref_sqrt_variance = static_cast<data_t>(sqrt(ref_variance + p.eps));
data_t ref_rsqrt_variance = data_t(1) / (ref_sqrt_variance);

if (use_weights) {
Expand Down Expand Up @@ -158,7 +158,7 @@ void check_bnrm_bwd(const test_bnrm_params_t &p,

test_bnrm_sizes_t bp = p.sizes;

const data_t eps = 1.e-4 * bp.mb * bp.h * bp.w;
const data_t eps = static_cast<data_t>(1.e-4 * bp.mb * bp.h * bp.w);

#pragma omp parallel for
for (int c = 0; c < bp.c; c++) {
Expand All @@ -167,7 +167,7 @@ void check_bnrm_bwd(const test_bnrm_params_t &p,

auto v_mean = mean_data[c];
auto v_variance = variance_data[c];
const data_t sqrt_variance = data_t(1) / sqrt(v_variance + p.eps);
const data_t sqrt_variance = data_t(1.0 / sqrt(v_variance + p.eps));

auto gamma = use_weights ? weights_data[map_index(weights_d, c)] : 1;

Expand Down
6 changes: 3 additions & 3 deletions tests/gtests/test_concat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,14 @@ class concat_test: public ::testing::TestWithParam<concat_test_params> {
auto src_memory = memory(mpd);
const size_t sz = src_memory.get_primitive_desc().get_size() / sizeof(data_t);
auto s = (data_t *)src_memory.get_data_handle();
for (size_t j = 0; j < sz; ++j) s[j] = i;
for (size_t j = 0; j < sz; ++j) s[j] = static_cast<data_t>(i);
// fill_data<data_t>(sz, (data_t *)src_memory.get_data_handle());
srcs_pd.push_back(mpd);
srcs.push_back(src_memory);
}

auto dst_desc = memory::desc(p.dst_cds, data_type, p.dst_format);
auto concat_pd = concat::primitive_desc(dst_desc, p.concat_dimension, srcs_pd);
auto concat_pd = concat::primitive_desc(dst_desc, static_cast<int>(p.concat_dimension), srcs_pd);
auto dst = memory(concat_pd.dst_primitive_desc());

std::vector<primitive::at> inputs;
Expand All @@ -127,7 +127,7 @@ class concat_test: public ::testing::TestWithParam<concat_test_params> {
auto s = stream(stream::kind::eager);
s.submit(pipeline).wait();

check_data(srcs, dst, p.concat_dimension);
check_data(srcs, dst, static_cast<int>(p.concat_dimension));
}
};

Expand Down
11 changes: 6 additions & 5 deletions tests/gtests/test_convolution_relu_forward_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void compute_ref_conv_relu_fwd(const test_convolution_sizes_t &c,
bias_data[map_index(
bias.get_primitive_desc().desc(),
g * c.oc / c.ng + oc)] :
0.0;
data_t_dst{0};
for (int ic = 0; ic < c.ic / c.ng; ic++) {
for (int kh = 0; kh < c.kh; kh++) {
for (int kw = 0; kw < c.kw; kw++) {
Expand All @@ -79,8 +79,9 @@ void compute_ref_conv_relu_fwd(const test_convolution_sizes_t &c,
}

if (dst_data[map_index(dst_d, oidx)] < 0) {
dst_data[map_index(dst_d, oidx)] *=
NEGATIVE_SLOPE;
dst_data[map_index(dst_d, oidx)] =
static_cast<data_t_dst>( NEGATIVE_SLOPE
* dst_data[map_index(dst_d, oidx)] );
}

}
Expand Down Expand Up @@ -131,9 +132,9 @@ class convolution_relu_test
/ sizeof(data_t_src), (data_t_src *)c_src.get_data_handle());
// TODO: Temporary workaround for testing of convolution + relu
data_t_src *src_data = (data_t_src *)c_src.get_data_handle();
const int mb_chunk =
const int mb_chunk = static_cast<int>(
(c_src.get_primitive_desc().get_size() / sizeof(data_t_src))
/ cd.mb;
/ cd.mb );
for (int i = 0; i < cd.mb * mb_chunk; ++i) {
if ((i / mb_chunk) % 2) src_data[i] *= -1.;
}
Expand Down
2 changes: 1 addition & 1 deletion tests/gtests/test_eltwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ template <typename T, typename A> T elu_fwd(T s, A alpha) {
return s > 0 ? s : alpha * (::expf(s) - 1);
}
template <typename T, typename A> T elu_bwd(T dd, T s, A alpha) {
return dd * (s > 0 ? 1. : alpha * ::expf(s));
return static_cast<T>(dd * (s > 0 ? 1 : alpha * ::expf((float)s)));
}

template <typename data_t>
Expand Down
2 changes: 1 addition & 1 deletion tests/gtests/test_inner_product_forward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void compute_ref_inner_product_fwd(test_inner_product_descr_t ipd, memory &src,
for (int oc = 0; oc < ipd.oc; oc++) {
int oidx = n * ipd.oc + oc;
dst_data[map_index(dst_d, oidx)] = bias_data ?
bias_data[map_index(bias_d, oc)] : 0.0;
bias_data[map_index(bias_d, oc)] : data_t{0};
for (int ic = 0; ic < ipd.ic; ic++) {
for (int kh = 0; kh < ipd.kh; kh++) {
for (int kw = 0; kw < ipd.kw; kw++) {
Expand Down
18 changes: 9 additions & 9 deletions tests/gtests/test_lrn_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,10 @@ void check_lrn_fwd(const lrn_test_params &p, const memory &src, const memory &ds
}
}

data_t norm_coef = powf(p.test_ld.k + p.test_ld.alpha * sum / summands,
p.test_ld.beta);
data_t ref_out = src_ptr[map_index(src_d, off(n, oc, oh, ow))]/norm_coef;
data_t eps = 1.e-7*(2*summands+5);
auto const norm_coef = std::pow(p.test_ld.k + p.test_ld.alpha * sum / summands,
p.test_ld.beta);
data_t ref_out = static_cast<data_t>(src_ptr[map_index(src_d, off(n, oc, oh, ow))]/norm_coef);
data_t eps = static_cast<data_t>(1.e-7f*(2*summands+5));
data_t out = d[0];
data_t norm_max = std::max(fabs(out), fabs(ref_out));
if (norm_max < eps) norm_max = 1.;
Expand Down Expand Up @@ -158,16 +158,16 @@ void check_lrn_bwd(const lrn_test_params &p, const memory &src,

for (int ks = ks_start; ks < ks_stop; ks++) {
int _t = oc + ks - (kernel_size/2);
data_t omega = get_omega(k, kernel_size, alpha, C,
data_t omega = get_omega(static_cast<data_t>(k), kernel_size, alpha, C,
src_ptr, mb, _t, oh, ow);

if (ks == kernel_size/2) omega_mid = omega;

data_t t = src_ptr[map_index(src_d, off(mb, _t, oh, ow))] / powf(omega, beta);
data_t t = src_ptr[map_index(src_d, off(mb, _t, oh, ow))] / powf((float)omega, (float)beta);
B += (1.0f / omega) * t * diff_dst_ptr[map_index(diff_dst_d, off(mb, _t, oh, ow))];
}

A = (1.0f / powf(omega_mid, beta))
A = (1.0f / powf((float)omega_mid, (float)beta))
* diff_dst_ptr[map_index(diff_dst_d, off(mb, oc, oh, ow))];
B *= src_ptr[map_index(src_d, off(mb, oc, oh, ow))];
B *= (2.0f * alpha * beta) / kernel_size;
Expand All @@ -183,8 +183,8 @@ void check_lrn_bwd(const lrn_test_params &p, const memory &src,
mb, c, h, w);
auto A = ref_diff_src_ptr[map_index(diff_src_d, off(mb, c, h, w))];
auto B = diff_src_ptr[map_index(diff_src_d, off(mb, c, h, w))];
data_t eps = 1.e-6*((2*(2*local_size + 3) + 6)*local_size
+ (2*local_size + 3) + 9);
data_t eps = static_cast<data_t>( 1.e-6*((2*(2*local_size + 3) + 6)*local_size
+ (2*local_size + 3) + 9) );
data_t norm_max = std::max(fabs(A), fabs(B));
if (norm_max < eps) norm_max = 1.;
EXPECT_NEAR(A, B, eps*norm_max);
Expand Down
5 changes: 3 additions & 2 deletions tests/gtests/test_lrn_forward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,10 @@ void check_lrn_fwd(const test_lrn_desc_t &ld,
}
}
}
data_t norm_coef = powf(ld.k + ld.alpha * sum / summands, ld.beta);
data_t norm_coef = powf(static_cast<float>(ld.k + ld.alpha * sum / summands),
static_cast<float>(ld.beta));
data_t ref_out = src_ptr[map_index(src_d, off(n, oc, oh, ow))]/norm_coef;
data_t eps = 1.e-7*(2*summands+5);
data_t eps = static_cast<data_t>(1.e-7f*(2*summands+5));
data_t out = d[0];
data_t norm_max = std::max(fabs(out), fabs(ref_out));
if (norm_max < eps) norm_max = 1.;
Expand Down
4 changes: 3 additions & 1 deletion tests/gtests/test_relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ void check_relu_bwd(data_t negative_slope, const memory::desc &md,
for (size_t i = 0; i < N * C * H * W; ++i) {
data_t ref_s = src_data[map_index(data_d, i)];
data_t ref_dd = diff_dst_data[map_index(diff_data_d, i)];
data_t ref_ds = ref_dd * ((ref_s > 0) ? 1. : negative_slope);
data_t ref_ds = ref_dd * ((ref_s > 0) ? data_t{1} : negative_slope);
EXPECT_NEAR(diff_src_data[map_index(diff_data_d, i)], ref_ds, 1.e-7);
}
}
Expand Down Expand Up @@ -181,6 +181,8 @@ TEST_P(relu_test_float, TestsReLU)
str, relu_test_float, ::testing::Values(__VA_ARGS__))

INST_TEST_CASE(SimpleZeroNegativeSlope_NCHW,
//PARAMS(nchw, nchw, 0.f, 1, 8, 10000, 10000), // is a tensor of 3 Gb data ok? YES (330 s runtime, slow)
//PARAMS(nchw, nchw, 0.f, 1, 12, 10000, 10000), // is a tensor of >4 Gb data ok? worked once (release mode)
PARAMS(nchw, nchw, 0.f, 2, 8, 4, 4),
PARAMS(nchw, nchw, 0.f, 2, 16, 4, 4),
PARAMS(nchw, nchw, 0.f, 2, 16, 8, 8),
Expand Down
2 changes: 1 addition & 1 deletion tests/gtests/test_sum.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class sum_test: public ::testing::TestWithParam<sum_test_params> {
auto s = (data_t *)src_memory.get_data_handle();
# pragma omp parallel for
/* Note: size_t incompatible with MSVC++ */
for (ptrdiff_t j = 0; j < sz; ++j) s[j] = i + 1;
for (ptrdiff_t j = 0; j < sz; ++j) s[j] = static_cast<data_t>(i + 1);
// fill_data<data_t>(sz, (data_t *)src_memory.get_data_handle());
srcs_pd.push_back(mpd);
srcs.push_back(src_memory);
Expand Down

0 comments on commit a914292

Please sign in to comment.