Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-347] Logical Operators AND, XOR, OR #10679

Merged
merged 8 commits into from
May 1, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,12 @@ MXNET_BINARY_MATH_OP_NC(eq, a == b ? DType(1) : DType(0));

MXNET_BINARY_MATH_OP_NC(ne, a != b ? DType(1) : DType(0));

MXNET_BINARY_MATH_OP(logical_and, a && b ? DType(1) : DType(0));

MXNET_BINARY_MATH_OP(logical_or, a || b ? DType(1) : DType(0));

MXNET_BINARY_MATH_OP(logical_xor, (a || b) && !(a && b) ? DType(1) : DType(0));
anirudhacharya marked this conversation as resolved.
Show resolved Hide resolved

MXNET_UNARY_MATH_OP(square_root, math::sqrt(a));

MXNET_UNARY_MATH_OP(square_root_grad, 0.5f / math::id(a));
Expand Down
6 changes: 6 additions & 0 deletions src/operator/operator_tune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,12 @@ IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::ne); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::ne); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::eq); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::eq); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::logical_and); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::logical_and); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::logical_or); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::logical_or); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::logical_xor); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::logical_xor); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::smooth_l1_loss); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::smooth_l1_gradient); // NOLINT()
IMPLEMENT_BLANK_WORKLOAD_FWD(mxnet::op::mxnet_op::set_to_int<0>); // NOLINT()
Expand Down
54 changes: 54 additions & 0 deletions src/operator/tensor/elemwise_binary_broadcast_op_logic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -137,5 +137,59 @@ Example::
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::le>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(broadcast_logical_and)
.describe(R"code(Returns the result of element-wise **logical and** with broadcasting.

Example::

x = [[ 1., 1., 1.],
[ 1., 1., 1.]]

y = [[ 0.],
[ 1.]]

broadcast_logical_and(x, y) = [[ 0., 0., 0.],
[ 1., 1., 1.]]
anirudhacharya marked this conversation as resolved.
Show resolved Hide resolved

)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::logical_and>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(broadcast_logical_or)
.describe(R"code(Returns the result of element-wise **logical or** with broadcasting.

Example::

x = [[1 , 2, 0],
[0.5, -2.3, 0.0]]

y = [[2.0],
[0]]
anirudhacharya marked this conversation as resolved.
Show resolved Hide resolved

broadcast_logical_or(x, y) = [[1. 1. 1.]
[1. 1. 0.]]
anirudhacharya marked this conversation as resolved.
Show resolved Hide resolved

)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::logical_or>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(broadcast_logical_xor)
.describe(R"code(Returns the result of element-wise **logical xor** with broadcasting.

Example::

x = [[1 , 2, 0],
[0.5, -2.3, 0.0]]

y = [[2.0],
[0]]

broadcast_logical_xor(x, y) = [[0. 0. 1.]
[1. 1. 0.]]
anirudhacharya marked this conversation as resolved.
Show resolved Hide resolved

)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::logical_xor>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

} // namespace op
} // namespace mxnet
9 changes: 9 additions & 0 deletions src/operator/tensor/elemwise_binary_broadcast_op_logic.cu
Original file line number Diff line number Diff line change
Expand Up @@ -47,5 +47,14 @@ NNVM_REGISTER_OP(broadcast_lesser)
NNVM_REGISTER_OP(broadcast_lesser_equal)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::le>);

NNVM_REGISTER_OP(broadcast_logical_and)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::logical_and>);

NNVM_REGISTER_OP(broadcast_logical_or)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::logical_or>);

NNVM_REGISTER_OP(broadcast_logical_xor)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::logical_xor>);

} // namespace op
} // namespace mxnet
25 changes: 24 additions & 1 deletion tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1581,6 +1581,27 @@ def test_bmin(a, b):
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)

def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.broadcast_logical_and)
anirudhacharya marked this conversation as resolved.
Show resolved Hide resolved
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)

def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.broadcast_logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)

def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.broadcast_logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)

test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
Expand All @@ -1591,7 +1612,9 @@ def test_bmin(a, b):
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)

test_band(a, b)
test_bor(a, b)
test_bxor(a, b)

@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
Expand Down