Skip to content

Commit

Permalink
Add sliding_window operator (#9816)
Browse files Browse the repository at this point in the history
* Add windows operator

* remove TODO

* Convert ICHECKs to CHECKs

* Report errors using diagnostic context

* Use more readable CHECKs

* Remove example; move comments to test

* Revert "Remove example; move comments to test"

This is a partial revert.

This reverts commit c810c2d.

* Add newline to fix Sphinx error

* windows -> sliding_window

* whitespace

* fmt
  • Loading branch information
gussmith23 authored Jan 12, 2022
1 parent 44fe7ef commit 9160dc4
Show file tree
Hide file tree
Showing 9 changed files with 327 additions and 0 deletions.
23 changes: 23 additions & 0 deletions include/tvm/relay/attrs/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,29 @@
namespace tvm {
namespace relay {

/*! \brief Attributes used for the sliding_window operator */
struct SlidingWindowAttrs : public tvm::AttrsNode<SlidingWindowAttrs> {
int axis;
Array<Integer> window_shape;
Array<Integer> strides;
TVM_DECLARE_ATTRS(SlidingWindowAttrs, "relay.attrs.SlidingWindowAttrs") {
TVM_ATTR_FIELD(axis).describe(
"What axis the sliding window begin forming over."
"Window will be slid over this axis and all following axes."
"The axis value determines the window shape (and thus, the"
"number of strides):"
"window shape and strides must both be of length"
"`data.ndim-axis`.");
TVM_ATTR_FIELD(window_shape)
.describe(
"The window shape to form over the input."
"Window shape must be of length `data.ndim-axis`.");
TVM_ATTR_FIELD(strides).describe(
"How to stride the window along each dimension."
"Strides must be of length `data.ndim-axis`.");
}
}; // struct SlidingWindowAttrs

/*! \brief data type cast */
struct CastAttrs : public tvm::AttrsNode<CastAttrs> {
DataType dtype;
Expand Down
84 changes: 84 additions & 0 deletions include/tvm/topi/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,90 @@ namespace topi {
using namespace tvm::te;
using namespace topi::detail;

/*!
* \brief Creates an operation to slide a window over the input x.
*
* \param x The input tensor.
* \param axis What axis the window begins sliding over. Window will be slid
* over this axis and all following axes. The axis value determines the window
* shape (and thus, the number of strides): window shape and strides must both
* be of length `data.ndim-axis`.
* \param window_shape The window shape to form over the input. Window shape
* must be of length `data.ndim-axis`.
* \param strides How to stride the window along each dimension. Strides must be
* of length `data.ndim-axis`.
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the sliding_window operation
*/
inline Tensor sliding_window(const Tensor& x, int axis, Array<Integer> window_shape,
Array<Integer> strides, std::string name = "T_sliding_window",
std::string tag = "") {
CHECK_GE(axis, 0);
auto _axis = size_t(axis);
CHECK_LT(_axis, x->shape.size()) << "axis must be a valid dimension index of x.";
CHECK_EQ(x->shape.size() - _axis, window_shape.size())
<< "There must be a window shape for every dimension of x "
<< "over which we are sliding the window.";
CHECK_EQ(strides.size(), window_shape.size()) << "Windows and strides should be the same length.";

// Compute the new shape.
Array<PrimExpr> new_shape;
// Dimensions up until `axis` remain the same.
for (size_t i = 0; i < _axis; ++i) {
new_shape.push_back(x->shape[i]);
}

// New dimensions which result from sliding the window in each dimension. One new dimension per
// window dimension.
for (size_t i = 0; i < window_shape.size(); ++i) {
// Length of the shape along this dimension.
auto dim_len = x->shape[_axis + i];
// Length of the window along this dimension.
auto window_len = window_shape[i];
// Strides along this dimension.
auto stride = strides[i];

new_shape.push_back(floordiv(dim_len - (window_len - 1) + stride - 1, stride));
}

// Dimensions comprising the window.
for (size_t i = 0; i < window_shape.size(); ++i) {
new_shape.push_back(window_shape[i]);
}

ICHECK(new_shape.size() == _axis + 2 * window_shape.size());

return compute(
new_shape,
[&](const Array<Var>& indices) {
// The index at which to index the old tensor x.
Array<PrimExpr> idx;

// Dimensions up until `axis` remain the same.
for (size_t i = 0; i < _axis; ++i) {
idx.push_back(indices[i]);
}

for (size_t i = 0; i < window_shape.size(); ++i) {
// Which window in this dimension we are indexing.
auto window_idx = indices[_axis + i];
// Which index within the window we are indexing.
auto idx_within_window = indices[_axis + window_shape.size() + i];
// Stride value for this dimension.
auto stride = strides[i];

idx.push_back(window_idx * stride + idx_within_window);
}

ICHECK(idx.size() == x->shape.size());

return x(idx);
},
name, tag);
}

/*!
* \brief Creates an operation to insert new dimensions of length 1
*
Expand Down
9 changes: 9 additions & 0 deletions python/tvm/relay/op/_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,15 @@
# concatenate
_reg.register_schedule("concatenate", strategy.schedule_concatenate)

# sliding_window
@_reg.register_compute("sliding_window")
def compute_sliding_window(attrs, inputs, output_type):
"""Compute definition of sliding_window"""
return [topi.sliding_window(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]


_reg.register_strategy("sliding_window", strategy.sliding_window_strategy)

# strided_set
@_reg.register_compute("strided_set")
def compute_strided_set(attrs, inputs, output_type):
Expand Down
22 changes: 22 additions & 0 deletions python/tvm/relay/op/strategy/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1705,6 +1705,28 @@ def uniform_strategy(attrs, inputs, out_type, target):
return strategy


# sliding_window
def wrap_compute_sliding_window():
"""Wrap sliding_window topi compute"""

def _compute_sliding_window(attrs, inputs, _):
return [topi.sliding_window(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]

return _compute_sliding_window


@override_native_generic_func("sliding_window_strategy")
def sliding_window_strategy(attrs, inputs, out_type, target):
"""sliding_window generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sliding_window(),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sliding_window.generic",
)
return strategy


@override_native_generic_func("normal_strategy")
def normal_strategy(attrs, inputs, out_type, target):
"""normal generic strategy"""
Expand Down
58 changes: 58 additions & 0 deletions python/tvm/relay/op/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,64 @@
from .tensor import shape_of


def sliding_window(data, axis, window_shape, strides):
"""Slide a window over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the window begins sliding over. Window will be slid over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of
# shape (3, 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])
data = np.random.rand(2, 3, 32, 32).astype("float32")
result = create_executor().evaluate(y, {x: relay.const(data)}).numpy()
# The resulting shape still has batch size 2. Each dimension in
# (1, 15, 10) represents the locations where we were able to
# form a window; that is, we were able to place the window
# in one place along the dimension of length 3, 15 places along
# the dimension of length 32 (when striding by 2), and 10 places
# along the second dimension of length 32 (when striding by 3).
# The remaining dimension (3, 4, 5) represent the formed windows.
assert result.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
"""
from .. import _ffi_api as _relay_make

return _relay_make.sliding_window(data, axis, window_shape, strides)


def cast(data, dtype):
"""Cast input tensor to data type.
Expand Down
30 changes: 30 additions & 0 deletions python/tvm/topi/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -971,3 +971,33 @@ def invert_permutation(data):
r_ind = data[ind]
result[r_ind] = ind
return result


def sliding_window(data, axis, window_shape, strides):
"""Slide a window over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the window begins sliding over. Window will be slid over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return cpp.sliding_window(data, axis, window_shape, strides)
71 changes: 71 additions & 0 deletions src/relay/op/tensor/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,77 @@ namespace tvm {
namespace relay {
using tir::IntImmNode;

TVM_REGISTER_NODE_TYPE(SlidingWindowAttrs);

bool SlidingWindowRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
// `types` contains: [data, result]
ICHECK_EQ(types.size(), 2);
const auto* data = types[0].as<TensorTypeNode>();
if (data == nullptr) {
reporter->GetDiagCtx().EmitFatal(Diagnostic::Error(reporter->GetSpan())
<< "SlidingWindow operator expects input to be of TensorType "
<< "but got " << PrettyPrint(types[0]));
return false;
}
const auto* param = attrs.as<SlidingWindowAttrs>();
const int axis = param->axis;

std::vector<IndexExpr> oshape;

// Dimensions up until `axis` remain the same.
for (int i = 0; i < axis; ++i) {
oshape.emplace_back(data->shape[i]);
}

// New dimensions which result from sliding the window in each dimension. One new dimension per
// window dimension.
for (size_t i = 0; i < param->window_shape.size(); ++i) {
// Length of the shape along this dimension.
auto dim_len = data->shape[axis + i];
// Length of the window along this dimension.
auto window_len = param->window_shape[i];
// Strides along this dimension.
auto stride = param->strides[i];

oshape.push_back(floordiv(dim_len - (window_len - 1) + stride - 1, stride));
}

// Dimensions comprising the window.
for (size_t i = 0; i < param->window_shape.size(); ++i) {
oshape.push_back(param->window_shape[i]);
}

reporter->Assign(types[1], TensorType(oshape, data->dtype));
return true;
}

Array<te::Tensor> SlidingWindowCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_type) {
const SlidingWindowAttrs* param = attrs.as<SlidingWindowAttrs>();
ICHECK(param != nullptr);
return {topi::sliding_window(inputs[0], param->axis, param->window_shape, param->strides)};
}

Expr MakeSlidingWindow(Expr data, int axis, Array<Integer> window_shape, Array<Integer> strides) {
auto attrs = make_object<SlidingWindowAttrs>();
attrs->axis = axis;
attrs->window_shape = window_shape;
attrs->strides = strides;
static const Op& op = Op::Get("sliding_window");
return Call(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_GLOBAL("relay.ir.sliding_window").set_body_typed(MakeSlidingWindow);

RELAY_REGISTER_OP("sliding_window")
.describe(R"code(Slide window over a tensor.)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.set_attrs_type<SlidingWindowAttrs>()
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("SlidingWindow", SlidingWindowRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque);

// relay.cast
TVM_REGISTER_NODE_TYPE(CastAttrs);

Expand Down
4 changes: 4 additions & 0 deletions src/topi/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ TVM_REGISTER_GLOBAL("topi.reshape").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = reshape(args[0], args[1]);
});

TVM_REGISTER_GLOBAL("topi.sliding_window").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = sliding_window(args[0], args[1], args[2], args[3]);
});

TVM_REGISTER_GLOBAL("topi.squeeze").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = squeeze(args[0], ArrayOrInt(args[1]));
});
Expand Down
26 changes: 26 additions & 0 deletions tests/python/relay/test_op_level3.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,32 @@ def test_cast():
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")


def test_sliding_window():
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of shape (3,
# 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])

# The resulting shape still has batch size 2. Each dimension in (1, 15, 10)
# represents the locations where we were able to form a window; that is, we
# were able to place the window in one place along the dimension of length
# 3, 15 places along the dimension of length 32 (when striding by 2), and 10
# places along the second dimension of length 32 (when striding by 3). The
# remaining dimensions (3, 4, 5) represent the formed windows.
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 1, 15, 10, 3, 4, 5), "float32")

data = np.random.rand(2, 3, 32, 32).astype("float32")
intrp = create_executor()
result = intrp.evaluate(y, {x: relay.const(data)})
result_np = result.numpy()
assert result_np.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result_np[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result_np[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result_np[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])


def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1.0, 4.0)
Expand Down

0 comments on commit 9160dc4

Please sign in to comment.