Skip to content

Commit

Permalink
[MLIR][TORCH] Add E2E support for aten.mean and aten.numel op.
Browse files Browse the repository at this point in the history
This commit adds lowering of `aten.mean` and `aten.numel` op.

Signed-Off By: Vivek Khandelwal <vivek@nod-labs.com>
  • Loading branch information
vivekkhandelwal1 committed Dec 2, 2021
1 parent 5475d02 commit 46a0668
Show file tree
Hide file tree
Showing 7 changed files with 161 additions and 2 deletions.
74 changes: 73 additions & 1 deletion e2e_testing/torchscript/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -739,7 +739,6 @@ def forward(self, input, tensor1, tensor2):
def AddCDivModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1,3), tu.rand(1,3), tu.rand(1,3))


# ==============================================================================

class DropoutModule(torch.nn.Module):
Expand All @@ -751,6 +750,7 @@ def __init__(self):
None,
([-1, -1], torch.float32, True),
])

def forward(self, x):
return torch.dropout(x, 0.0, False)

Expand Down Expand Up @@ -809,3 +809,75 @@ def forward(self, tensor):
@register_test_case(module_factory=lambda: Fill_TensorFloat64WithInt64())
def Fill_TensorFloat64WithInt64_basic(module, tu: TestUtils):
module.forward(torch.randn(3, 2, 4).to(torch.float64))


class MeanModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([3, 4], torch.float32, True),
])
def forward(self, x):
return torch.mean(x)


@register_test_case(module_factory=lambda: MeanModule())
def MeanModule_basic(module, tu: TestUtils):
module.forward(torch.randn(3, 4))


class MeanDynamicSizesModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.mean(x)


@register_test_case(module_factory=lambda: MeanDynamicSizesModule())
def MeanDynamicSizesModule_basic(module, tu: TestUtils):
module.forward(torch.randn(3, 4))


class NumelModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
])

def forward(self, input):
return torch.numel(input)

@register_test_case(module_factory=lambda: NumelModule())
def NumelModule_basic(module, tu: TestUtils):
module.forward(tu.rand(4, 3, 5))


class NumelZeroRankModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([], torch.int64, True),
])

def forward(self, input):
return torch.numel(input)

@register_test_case(module_factory=lambda: NumelZeroRankModule())
def NumelZeroRankModule_basic(module, tu: TestUtils):
module.forward(torch.randint(10,[]))
19 changes: 19 additions & 0 deletions e2e_testing/torchscript/reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,22 @@ def forward(self, a):
@register_test_case(module_factory=lambda: ReduceSumDimIntListKeepDimModule())
def ReduceSumDimIntListKeepDimModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5))

# ==============================================================================

class ReduceMeanDtypeModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1], torch.float64, True),
])
def forward(self, a):
return torch.mean(a, dtype=torch.float32)


@register_test_case(module_factory=lambda: ReduceMeanDtypeModule())
def ReduceMeanDtypeModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4, 5).to(torch.float64))
14 changes: 14 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedAtenOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -3062,3 +3062,17 @@ def Torch_AtenAddCDivOp : Torch_Op<"aten.addcdiv", [
let assemblyFormat = "$self `,` $tensor1 `,` $tensor2 `,` $value attr-dict `:` type($self) `,` type($tensor1) `,` type($tensor2) `,` type($value) `->` type($result)";
}

def Torch_AtenMeanOp : Torch_Op<"aten.mean", [
AllowsTypeRefinement,
HasValueSemantics
]> {
let summary = "Generated op for `aten::mean : (Tensor, int?) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
TorchOptionalIntType:$dtype
);
let results = (outs
AnyTorchTensorType:$result
);
let assemblyFormat = "$self `,` $dtype attr-dict `:` type($self) `,` type($dtype) `->` type($result)";
}
25 changes: 25 additions & 0 deletions lib/Conversion/TorchToLinalg/TorchToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3130,6 +3130,29 @@ class ConvertPrimNumToTensorScalarOp
};
} // namespace

namespace {
class ConvertAtenNumelOp : public OpConversionPattern<AtenNumelOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(AtenNumelOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op.getLoc();
Value self = adaptor.self();
SmallVector<Value> sizes(getTensorSizes(rewriter, loc, self));
Value productResult =
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(1));
for (int i = 0; i < sizes.size(); i++)
productResult =
rewriter.create<arith::MulIOp>(loc, productResult, sizes[i]);
rewriter.replaceOp(op, castIndexToInt(rewriter, loc, productResult));
return success();
}
};
} // namespace

// -----------------------------------------------------------------------------
// The pass
// -----------------------------------------------------------------------------
Expand Down Expand Up @@ -3223,6 +3246,8 @@ class ConvertTorchToLinalg
patterns.add<ConvertAtenDropoutOp>(typeConverter, context);
target.addIllegalOp<AtenFill_ScalarOp>();
patterns.add<ConvertAtenFill_ScalarOp>(typeConverter, context);
target.addIllegalOp<AtenNumelOp>();
patterns.add<ConvertAtenNumelOp>(typeConverter, context);

if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
Expand Down
23 changes: 23 additions & 0 deletions lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,27 @@ class DecomposeAtenAddmmOp : public OpRewritePattern<AtenAddmmOp> {
};
} // namespace

// Decompose torch.mean into: sum(x)/div(numTensorElements).
namespace {
class DecomposeAtenMeanOp : public OpRewritePattern<AtenMeanOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(AtenMeanOp op,
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value input = op.self();
Value output = op.result();
BaseTensorType inputTensorType = input.getType().cast<BaseTensorType>();
BaseTensorType outputTensorType = output.getType().cast<BaseTensorType>();
Value sum = rewriter.create<AtenSumOp>(loc, outputTensorType, input, op.dtype());
Value numTensorElements = rewriter.create<AtenNumelOp>(loc, input);
rewriter.replaceOpWithNewOp<AtenDivScalarOp>(op, outputTensorType, sum,
numTensorElements);
return success();
}
};
} // namespace

namespace {
template<typename OpTy, typename T1T2Op>
class DecomposeAtenAddCLikeOp : public OpRewritePattern<OpTy> {
Expand Down Expand Up @@ -464,6 +485,8 @@ class DecomposeComplexOpsPass
target.addIllegalOp<AtenTanhBackwardOp>();
patterns.add<DecomposeAtenAddmmOp>(context);
target.addIllegalOp<AtenAddmmOp>();
patterns.add<DecomposeAtenMeanOp>(context);
target.addIllegalOp<AtenMeanOp>();
patterns.add<DecomposeAtenMatmulOp>(context);
patterns.add<DecomposeAten_LogSoftmaxBackwardDataOp>(context);
target.addIllegalOp<Aten_LogSoftmaxBackwardDataOp>();
Expand Down
7 changes: 6 additions & 1 deletion lib/Dialect/Torch/Transforms/RefineTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -438,6 +438,11 @@ class TypeAnalyzer : public ForwardDataFlowAnalysis<ValueKnowledge> {
return visitAtenBmmOp(bmm, operands);
} else if (auto matmul = dyn_cast<AtenMatmulOp>(op)) {
return visitAtenMatmulOp(matmul, operands);
} else if (auto mean = dyn_cast<AtenMeanOp>(op)) {
Type defaultDtype = operands[0]->getValue().dtype;
Type dtype =
getDtypeOrDefault(mean.getContext(), mean.dtype(), defaultDtype);
return visitReductionAlongAllDimsOp(mean, dtype, operands);
} else if (auto softmaxIntOp = dyn_cast<AtenSoftmaxIntOp>(op)) {
return visitAtenSoftmaxLikeOp(softmaxIntOp, operands);
} else if (auto _softmaxOp = dyn_cast<Aten_SoftmaxOp>(op)) {
Expand Down Expand Up @@ -563,7 +568,7 @@ class TypeAnalyzer : public ForwardDataFlowAnalysis<ValueKnowledge> {
ChangeResult
visitAtenMatmulOp(AtenMatmulOp op,
ArrayRef<LatticeElement<ValueKnowledge> *> operands);

template <typename OpTy>
ChangeResult
visitAtenSoftmaxLikeOp(OpTy op,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,7 @@ def emit_with_mutating_variants(key, **kwargs):
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
emit("aten::sqrt : (Tensor) -> (Tensor)")
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
emit("aten::mean : (Tensor, int?) -> (Tensor)")

# Misc tensor ops.
emit("aten::unsqueeze : (Tensor, int) -> (Tensor)")
Expand Down

0 comments on commit 46a0668

Please sign in to comment.