Skip to content

Commit

Permalink
Add windows operator
Browse files Browse the repository at this point in the history
  • Loading branch information
gussmith23 committed Jan 4, 2022
1 parent ce108c1 commit 49fb82c
Show file tree
Hide file tree
Showing 9 changed files with 317 additions and 0 deletions.
23 changes: 23 additions & 0 deletions include/tvm/relay/attrs/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,29 @@
namespace tvm {
namespace relay {

/*! \brief Attributes used for the windows operator */
struct WindowsAttrs : public tvm::AttrsNode<WindowsAttrs> {
int axis;
Array<Integer> window_shape;
Array<Integer> strides;
TVM_DECLARE_ATTRS(WindowsAttrs, "relay.attrs.WindowsAttrs") {
TVM_ATTR_FIELD(axis).describe(
"What axis the windows begin forming over."
"Windows will be formed over this axis and all following axes."
"The axis value determines the window shape (and thus, the"
"number of strides):"
"window shape and strides must both be of length"
"`data.ndim-axis`.");
TVM_ATTR_FIELD(window_shape)
.describe(
"The window shape to form over the input."
"Window shape must be of length `data.ndim-axis`.");
TVM_ATTR_FIELD(strides).describe(
"How to stride the window along each dimension."
"Strides must be of length `data.ndim-axis`.");
}
}; // struct WindowsAttrs

/*! \brief data type cast */
struct CastAttrs : public tvm::AttrsNode<CastAttrs> {
DataType dtype;
Expand Down
86 changes: 86 additions & 0 deletions include/tvm/topi/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,92 @@ namespace topi {
using namespace tvm::te;
using namespace topi::detail;

/*!
* \brief Creates an operation to form windows over the input x.
*
* \param x The input tensor.
* \param axis What axis the windows begin forming over. Windows will be formed
* over this axis and all following axes. The axis value determines the window
* shape (and thus, the number of strides): window shape and strides must both
* be of length `data.ndim-axis`.
* \param window_shape The window shape to form over the input. Window shape
* must be of length `data.ndim-axis`.
* \param strides How to stride the window along each dimension. Strides must be
* of length `data.ndim-axis`.
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the windows operation
*/
inline Tensor windows(const Tensor& x, int axis, Array<Integer> window_shape,
Array<Integer> strides, std::string name = "T_windows",
// TODO(@gussmith23) what to tag it?
std::string tag = "") {
ICHECK(axis <= 0);
auto _axis = size_t(axis);
ICHECK(_axis < x->shape.size()) << "axis must be a valid dimension index of x.";
ICHECK(x->shape.size() - _axis == window_shape.size())
<< "There must be a window shape for every dimension of x over which we are forming windows.";
ICHECK(strides.size() == window_shape.size()) << "Windows and strides should be the same length.";

// Compute the new shape.
Array<PrimExpr> new_shape;
// Dimensions up until `axis` remain the same.
for (size_t i = 0; i < _axis; ++i) {
new_shape.push_back(x->shape[i]);
}

// New dimensions which result from sliding the window in each dimension. One new dimension per
// window dimension.
for (size_t i = 0; i < window_shape.size(); ++i) {
// Length of the shape along this dimension.
auto dim_len = x->shape[_axis + i];
// Length of the window along this dimension.
auto window_len = window_shape[i];
// Strides along this dimension.
auto stride = strides[i];

new_shape.push_back(floordiv(dim_len - (window_len - 1) + stride - 1, stride));
}

// Dimensions comprising the window.
for (size_t i = 0; i < window_shape.size(); ++i) {
new_shape.push_back(window_shape[i]);
}

ICHECK(new_shape.size() == _axis + 2 * window_shape.size());

return compute(
new_shape,
[&](const Array<Var>& indices) {
ICHECK(indices.size() == _axis + 2 * window_shape.size());

// The index at which to index the old tensor x.
Array<PrimExpr> idx;

// Dimensions up until `axis` remain the same.
for (size_t i = 0; i < _axis; ++i) {
idx.push_back(indices[i]);
}

for (size_t i = 0; i < window_shape.size(); ++i) {
// Which window in this dimension we are indexing.
auto window_idx = indices[_axis + i];
// Which index within the window we are indexing.
auto idx_within_window = indices[_axis + window_shape.size() + i];
// Stride value for this dimension.
auto stride = strides[i];

idx.push_back(window_idx * stride + idx_within_window);
}

ICHECK(idx.size() == x->shape.size());

return x(idx);
},
name, tag);
}

/*!
* \brief Creates an operation to insert new dimensions of length 1
*
Expand Down
9 changes: 9 additions & 0 deletions python/tvm/relay/op/_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,15 @@
# concatenate
_reg.register_schedule("concatenate", strategy.schedule_concatenate)

# windows
@_reg.register_compute("windows")
def compute_windows(attrs, inputs, output_type):
"""Compute definition of windows"""
return [topi.windows(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]


_reg.register_strategy("windows", strategy.windows_strategy)

# strided_set
@_reg.register_compute("strided_set")
def compute_strided_set(attrs, inputs, output_type):
Expand Down
22 changes: 22 additions & 0 deletions python/tvm/relay/op/strategy/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1662,6 +1662,28 @@ def uniform_strategy(attrs, inputs, out_type, target):
return strategy


# windows
def wrap_compute_windows():
"""Wrap windows topi compute"""

def _compute_windows(attrs, inputs, _):
return [topi.windows(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]

return _compute_windows


@override_native_generic_func("windows_strategy")
def windows_strategy(attrs, inputs, out_type, target):
"""windows generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_windows(),
wrap_topi_schedule(topi.generic.schedule_extern),
name="windows.generic",
)
return strategy


@override_native_generic_func("normal_strategy")
def normal_strategy(attrs, inputs, out_type, target):
"""normal generic strategy"""
Expand Down
57 changes: 57 additions & 0 deletions python/tvm/relay/op/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,63 @@
from .tensor import shape_of


def windows(data, axis, window_shape, strides):
"""Form windows over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the windows begin forming over. Windows will be formed over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of
# shape (3, 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.windows(x, 1, [3, 4, 5], [1, 2, 3])
data = np.random.rand(2, 3, 32, 32).astype("float32")
result = create_executor().evaluate(y, {x: relay.const(data)}).numpy()
# The resulting shape still has batch size 2. Each dimension in
# (1, 15, 10) represents the locations where we were able to
# form a window; that is, we were able to place the window
# in one place along the dimension of length 3, 15 places along
# the dimension of length 32 (when striding by 2), and 10 places
# along the second dimension of length 32 (when striding by 3).
# The remaining dimension (3, 4, 5) represent the formed windows.
assert result.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
"""
from .. import _ffi_api as _relay_make

return _relay_make.windows(data, axis, window_shape, strides)


def cast(data, dtype):
"""Cast input tensor to data type.
Expand Down
30 changes: 30 additions & 0 deletions python/tvm/topi/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -971,3 +971,33 @@ def invert_permutation(data):
r_ind = data[ind]
result[r_ind] = ind
return result


def windows(data, axis, window_shape, strides):
"""Form windows over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the windows begin forming over. Windows will be formed over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return cpp.windows(data, axis, window_shape, strides)
70 changes: 70 additions & 0 deletions src/relay/op/tensor/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,76 @@ namespace tvm {
namespace relay {
using tir::IntImmNode;

TVM_REGISTER_NODE_TYPE(WindowsAttrs);

bool WindowsRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
// `types` contains: [data, result]
ICHECK_EQ(types.size(), 2);
const auto* data = types[0].as<TensorTypeNode>();
if (data == nullptr) {
ICHECK(types[0].as<IncompleteTypeNode>())
<< "windows: expect input type to be TensorType but get " << types[0];
return false;
}
const auto* param = attrs.as<WindowsAttrs>();
const int axis = param->axis;

std::vector<IndexExpr> oshape;

// Dimensions up until `axis` remain the same.
for (int i = 0; i < axis; ++i) {
oshape.emplace_back(data->shape[i]);
}

// New dimensions which result from sliding the window in each dimension. One new dimension per
// window dimension.
for (size_t i = 0; i < param->window_shape.size(); ++i) {
// Length of the shape along this dimension.
auto dim_len = data->shape[axis + i];
// Length of the window along this dimension.
auto window_len = param->window_shape[i];
// Strides along this dimension.
auto stride = param->strides[i];

oshape.push_back(floordiv(dim_len - (window_len - 1) + stride - 1, stride));
}

// Dimensions comprising the window.
for (size_t i = 0; i < param->window_shape.size(); ++i) {
oshape.push_back(param->window_shape[i]);
}

reporter->Assign(types[1], TensorType(oshape, data->dtype));
return true;
}

Array<te::Tensor> WindowsCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_type) {
const WindowsAttrs* param = attrs.as<WindowsAttrs>();
ICHECK(param != nullptr);
return {topi::windows(inputs[0], param->axis, param->window_shape, param->strides)};
}

Expr MakeWindows(Expr data, int axis, Array<Integer> window_shape, Array<Integer> strides) {
auto attrs = make_object<WindowsAttrs>();
attrs->axis = axis;
attrs->window_shape = window_shape;
attrs->strides = strides;
static const Op& op = Op::Get("windows");
return Call(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_GLOBAL("relay.ir.windows").set_body_typed(MakeWindows);

RELAY_REGISTER_OP("windows")
.describe(R"code(Form windows over a tensor.)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.set_attrs_type<WindowsAttrs>()
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("Windows", WindowsRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque);

// relay.cast
TVM_REGISTER_NODE_TYPE(CastAttrs);

Expand Down
4 changes: 4 additions & 0 deletions src/topi/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ TVM_REGISTER_GLOBAL("topi.reshape").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = reshape(args[0], args[1]);
});

TVM_REGISTER_GLOBAL("topi.windows").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = windows(args[0], args[1], args[2], args[3]);
});

TVM_REGISTER_GLOBAL("topi.squeeze").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = squeeze(args[0], ArrayOrInt(args[1]));
});
Expand Down
16 changes: 16 additions & 0 deletions tests/python/relay/test_op_level3.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,22 @@ def test_cast():
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")


def test_windows():
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.windows(x, 1, [3, 4, 5], [1, 2, 3])
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 1, 15, 10, 3, 4, 5), "float32")

data = np.random.rand(2, 3, 32, 32).astype("float32")
intrp = create_executor()
result = intrp.evaluate(y, {x: relay.const(data)})
result_np = result.numpy()
assert result_np.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result_np[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result_np[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result_np[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])


def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1.0, 4.0)
Expand Down

0 comments on commit 49fb82c

Please sign in to comment.