Skip to content

Commit

Permalink
Merge pull request tensorflow#25769 from aselle/cp-20190214b
Browse files Browse the repository at this point in the history
Fix regression in XLA in TensorFlow 1.13 RC0, RC1
  • Loading branch information
aselle authored Feb 15, 2019
2 parents 42debe3 + 88fa1f3 commit c865ec5
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 5 deletions.
43 changes: 39 additions & 4 deletions tensorflow/compiler/tests/depthwise_conv_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,8 +350,13 @@ def testDepthwiseConv2DInputGradCompare(self):
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)

def _CompareBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
def _CompareBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
stride,
padding,
data_format="NHWC"):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)

Expand All @@ -360,13 +365,30 @@ def _GetVal(use_xla):
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]

if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_t0, t1, native_t2, strides=strides, padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
Expand All @@ -384,6 +406,19 @@ def testDepthwiseConv2DFilterGradCompare(self):
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)

def testDepthwiseConv2DFilterGradFormatNCHWCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradFormatNCHWCompare,", index,
"th config:", input_size, "*", filter_size, "producing output",
output_size, "stride:", stride, "padding:", padding)
self._CompareBackpropFilter(
input_size,
filter_size,
output_size,
stride,
padding,
data_format="NCHW")

if __name__ == "__main__":
test.main()
4 changes: 3 additions & 1 deletion tensorflow/compiler/tf2xla/kernels/conv_op_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -434,8 +434,10 @@ xla::StatusOr<xla::XlaOp> MakeXlaBackpropFilterConvOp(
}

// We use this approach only for depthwise convolutions where feature counts
// are large but space dimensions are small.
// are large but space dimensions are small. The conversion logic below
// assumes that the data format is NHWC, so we also check that here.
bool should_perform_depthwise_conv =
attrs.data_format == FORMAT_NHWC &&
(total_spatial_size < dims.in_depth) &&
filter_tensor_shape.dim_size(num_dims - 1) == 1 && attrs.depthwise;

Expand Down

0 comments on commit c865ec5

Please sign in to comment.