Skip to content

Commit

Permalink
build: update llvm tag to de3f0f7 (llvm#1789)
Browse files Browse the repository at this point in the history
Credit to @vivekkhandelwal1 for finding the necessary changes.

Summary of changes:

 - Switch Tosa_IntArrayAttr[N], Tosa_IntArrayAttrUpto[N] to DenseI64ArrayAttr.

 - Replace kNoIterationLimit with kNoLimit. (https://reviews.llvm.org/D140525)

 - Add dependency on MhloPasses when MHLO is enabled

 - Specify result type when using mhlo::DotOp
  • Loading branch information
ashay authored Jan 10, 2023
1 parent 0979df6 commit 0faba6d
Show file tree
Hide file tree
Showing 12 changed files with 107 additions and 93 deletions.
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 13012 files
2 changes: 1 addition & 1 deletion externals/mlir-hlo
Submodule mlir-hlo updated 258 files
4 changes: 3 additions & 1 deletion lib/Conversion/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ set(linked_libs TorchMLIRTorchToLinalg
TorchMLIRTorchConversionToMLProgram
TorchMLIRConversionUtils)
if(TORCH_MLIR_ENABLE_MHLO)
list(APPEND linked_libs TorchMLIRTorchToMhlo)
list(APPEND linked_libs
MhloPasses
TorchMLIRTorchToMhlo)
endif()

add_mlir_library(TorchMLIRConversionPasses
Expand Down
4 changes: 2 additions & 2 deletions lib/Conversion/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

#ifdef TORCH_MLIR_ENABLE_MHLO
#include "mhlo/transforms/passes.h"
#include "mlir-hlo/Transforms/passes.h"
#include "transforms/passes.h"
#endif // TORCH_MLIR_ENABLE_MHLO
#include "torch-mlir/Conversion/TorchToLinalg/TorchToLinalg.h"
#include "torch-mlir/Conversion/TorchToSCF/TorchToSCF.h"
Expand All @@ -37,7 +37,7 @@ void mlir::torch::registerConversionPasses() {
return mlir::mhlo::createLegalizeHloToLinalgPass();
});
::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> {
return mlir::createSymbolicShapeOptimizationPass();
return mlir::mhlo::createSymbolicShapeOptimizationPass();
});
#endif // TORCH_MLIR_ENABLE_MHLO
}
5 changes: 4 additions & 1 deletion lib/Conversion/TorchToMhlo/Linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,10 @@ class ConvertAtenMatmulBaseOp : public ConvertAtenOp<AtenOpT> {
}

if (lhsRank <= 2 && rhsRank <= 2) {
output = rewriter.create<mhlo::DotOp>(op->getLoc(), lhs, rhs, nullptr);
auto tensorType =
ConvertAtenOp<AtenOpT>::getTypeConverter()->convertType(op.getType());
output = rewriter.create<mhlo::DotOp>(op->getLoc(), tensorType, lhs, rhs,
nullptr);
return success();
}

Expand Down
103 changes: 56 additions & 47 deletions lib/Conversion/TorchToTosa/TorchToTosa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -881,7 +881,7 @@ class ConvertAtenSqueezeOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
newOutputTy),
self, rewriter.getI64ArrayAttr(newOutputShape));
self, rewriter.getDenseI64ArrayAttr(newOutputShape));
rewriter.replaceOpWithNewOp<tensor::CastOp>(
op,
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
Expand Down Expand Up @@ -1076,7 +1076,7 @@ class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
lhsBroadcastedTy),
lhs, rewriter.getI64ArrayAttr(lhsBroadcastedShape));
lhs, rewriter.getDenseI64ArrayAttr(lhsBroadcastedShape));

auto rankBroadcastedRhs =
rhsRank == maxInputRank
Expand All @@ -1085,7 +1085,7 @@ class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
rhsBroadcastedTy),
rhs, rewriter.getI64ArrayAttr(rhsBroadcastedShape));
rhs, rewriter.getDenseI64ArrayAttr(rhsBroadcastedShape));

// TOSA matmul is performed on two 3D inputs and generates a 3D output.
// Lower ranked tensors are dim-1 reshaped up to 3D
Expand Down Expand Up @@ -1113,7 +1113,7 @@ class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
newType),
tensor, rewriter.getI64ArrayAttr(newShape));
tensor, rewriter.getDenseI64ArrayAttr(newShape));
};

// Where broadcasting is required in one or more batch dims, the following
Expand Down Expand Up @@ -1303,7 +1303,7 @@ class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
newLhsType),
lhsReshapeInput, rewriter.getI64ArrayAttr(newLhsShape));
lhsReshapeInput, rewriter.getDenseI64ArrayAttr(newLhsShape));

SmallVector<int64_t> transposedRhsShape;
SmallVector<int32_t> transposedRhsDims;
Expand Down Expand Up @@ -1375,7 +1375,7 @@ class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
newRhsType),
transposedRhsValue, rewriter.getI64ArrayAttr(newRhsShape));
transposedRhsValue, rewriter.getDenseI64ArrayAttr(newRhsShape));
}

auto matmulLhsShape = makeShapeTorchCompatible(
Expand Down Expand Up @@ -1506,7 +1506,7 @@ class ConvertAtenMatmulBaseOp : public OpConversionPattern<AtenOpT> {
op->getLoc(),
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
reshapedOpType),
mmOpResult, rewriter.getI64ArrayAttr(reshapedOpShape));
mmOpResult, rewriter.getDenseI64ArrayAttr(reshapedOpShape));

if (opNeedsTranspose) {

Expand Down Expand Up @@ -1915,9 +1915,9 @@ LogicalResult ConvertAtenOp<AtenConvolutionOp>::matchAndRewrite(
.create<tosa::Conv2DOp>(op->getLoc(),
getTypeConverter()->convertType(convOpTy),
transposedInput, transposedWeight, bias,
rewriter.getI64ArrayAttr(padding),
rewriter.getI64ArrayAttr(stride),
rewriter.getI64ArrayAttr(dilation))
rewriter.getDenseI64ArrayAttr(padding),
rewriter.getDenseI64ArrayAttr(stride),
rewriter.getDenseI64ArrayAttr(dilation))
.getResult();

std::optional<Value> nhwcToNchwTransposeConst =
Expand Down Expand Up @@ -1979,7 +1979,7 @@ LogicalResult ConvertAtenOp<AtenReshapeOp>::matchAndRewrite(

rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
op, getTypeConverter()->convertType(newType), self,
rewriter.getI64ArrayAttr(newShape));
rewriter.getDenseI64ArrayAttr(newShape));

return success();
}
Expand Down Expand Up @@ -2078,7 +2078,8 @@ LogicalResult ConvertAtenOp<AtenBatchNormOp>::matchAndRewrite(
outTensorType.getElementType());

result = rewriter.create<tosa::ReshapeOp>(
op->getLoc(), newType, toBcast, rewriter.getI64ArrayAttr(newShape));
op->getLoc(), newType, toBcast,
rewriter.getDenseI64ArrayAttr(newShape));

return success();
};
Expand Down Expand Up @@ -2203,8 +2204,8 @@ LogicalResult ConvertAtenOp<AtenNativeLayerNormOp>::matchAndRewrite(
sumDiv, rewriter.getI64IntegerAttr(i));
}

return rewriter.create<tosa::ReshapeOp>(op.getLoc(), outType, sumDiv,
rewriter.getI64ArrayAttr(outShape));
return rewriter.create<tosa::ReshapeOp>(
op.getLoc(), outType, sumDiv, rewriter.getDenseI64ArrayAttr(outShape));
};

// TOSA has integer Div so, compute reciprocal of element count to be used in
Expand Down Expand Up @@ -2260,11 +2261,11 @@ LogicalResult ConvertAtenOp<AtenNativeLayerNormOp>::matchAndRewrite(

Value weightVal = rewriter.create<tosa::ReshapeOp>(
op.getLoc(), weightAndMeanBcastType, adaptor.getWeight(),
rewriter.getI64ArrayAttr(weightAndBiasBcastShape));
rewriter.getDenseI64ArrayAttr(weightAndBiasBcastShape));

Value biasVal = rewriter.create<tosa::ReshapeOp>(
op.getLoc(), weightAndMeanBcastType, adaptor.getBias(),
rewriter.getI64ArrayAttr(weightAndBiasBcastShape));
rewriter.getDenseI64ArrayAttr(weightAndBiasBcastShape));

double eps;
if (!matchPattern(op.getEps(), m_TorchConstantFloat(&eps)))
Expand Down Expand Up @@ -2365,8 +2366,9 @@ LogicalResult ConvertAtenOp<AtenFlattenUsingIntsOp>::matchAndRewrite(

auto newType = RankedTensorType::get(makeShapeLLVMCompatible(newShape),
selfType.getElementType());
auto reshapeOp = rewriter.create<tosa::ReshapeOp>(
op.getLoc(), newType, adaptor.getSelf(), rewriter.getI64ArrayAttr(newShape));
auto reshapeOp =
rewriter.create<tosa::ReshapeOp>(op.getLoc(), newType, adaptor.getSelf(),
rewriter.getDenseI64ArrayAttr(newShape));

rewriter.replaceOpWithNewOp<tensor::CastOp>(
op, getTypeConverter()->convertType(op.getType()), reshapeOp);
Expand Down Expand Up @@ -2530,7 +2532,7 @@ LogicalResult ConvertAtenOp<AtenUnsqueezeOp>::matchAndRewrite(

rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
op, getTypeConverter()->convertType(op.getType()), adaptor.getSelf(),
rewriter.getI64ArrayAttr(outShape));
rewriter.getDenseI64ArrayAttr(outShape));

return success();
}
Expand Down Expand Up @@ -2603,7 +2605,7 @@ LogicalResult ConvertAtenOp<AtenViewOp>::matchAndRewrite(

rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
op, getTypeConverter()->convertType(op.getType()), adaptor.getSelf(),
rewriter.getI64ArrayAttr(outShape));
rewriter.getDenseI64ArrayAttr(outShape));

return success();
}
Expand Down Expand Up @@ -2838,7 +2840,7 @@ LogicalResult ConvertAtenOp<AtenEmbeddingOp>::matchAndRewrite(
op->getLoc(),
RankedTensorType::get(makeShapeLLVMCompatible(newWeightShape),
weightType.getElementType()),
weight, rewriter.getI64ArrayAttr(newWeightShape));
weight, rewriter.getDenseI64ArrayAttr(newWeightShape));

int64_t numIndices = 1;
if (indicesType.hasStaticShape()) {
Expand All @@ -2853,7 +2855,7 @@ LogicalResult ConvertAtenOp<AtenEmbeddingOp>::matchAndRewrite(
op->getLoc(),
RankedTensorType::get(makeShapeLLVMCompatible(newIndicesShape),
indicesType.getElementType()),
indices, rewriter.getI64ArrayAttr(newIndicesShape));
indices, rewriter.getDenseI64ArrayAttr(newIndicesShape));

auto castIndices = rewriter.create<tosa::CastOp>(
op->getLoc(),
Expand All @@ -2870,7 +2872,8 @@ LogicalResult ConvertAtenOp<AtenEmbeddingOp>::matchAndRewrite(

rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
op, outType, gatherOp,
rewriter.getI64ArrayAttr(makeShapeTorchCompatible(outType.getShape())));
rewriter.getDenseI64ArrayAttr(
makeShapeTorchCompatible(outType.getShape())));

return success();
}
Expand Down Expand Up @@ -2960,7 +2963,7 @@ LogicalResult ConvertAtenOp<AtenMaxDimOp>::matchAndRewrite(
}

auto dimAttr = rewriter.getIntegerAttr(rewriter.getI64Type(), dim);
auto prunedShapeAttr = rewriter.getI64ArrayAttr(prunedShape);
auto prunedShapeAttr = rewriter.getDenseI64ArrayAttr(prunedShape);

Value reduceMax = rewriter.create<tosa::ReduceMaxOp>(
op->getLoc(),
Expand All @@ -2975,7 +2978,7 @@ LogicalResult ConvertAtenOp<AtenMaxDimOp>::matchAndRewrite(
if (argMax.getType() != indicesType) {
argMax = rewriter.create<tosa::ReshapeOp>(
op->getLoc(), indicesType, argMax,
rewriter.getI64ArrayAttr(reducedShape));
rewriter.getDenseI64ArrayAttr(reducedShape));
}

if (!keepDim) {
Expand Down Expand Up @@ -3043,8 +3046,8 @@ LogicalResult ConvertAtenOp<AtenSliceTensorOp>::matchAndRewrite(

rewriter.replaceOpWithNewOp<tosa::SliceOp>(
op, getTypeConverter()->convertType(op.getType()), adaptor.getSelf(),
rewriter.getI64ArrayAttr(startSlice),
rewriter.getI64ArrayAttr(sizeSlice));
rewriter.getDenseI64ArrayAttr(startSlice),
rewriter.getDenseI64ArrayAttr(sizeSlice));

return success();
}
Expand Down Expand Up @@ -3427,8 +3430,9 @@ class ConvertAtenPoolingBaseOp : public OpConversionPattern<AtenOpT> {
// function also transposes inputs.
virtual LogicalResult processInputs(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter,
Value &input, ArrayAttr &kernel,
ArrayAttr &stride, ArrayAttr &pad,
Value &input, DenseI64ArrayAttr &kernel,
DenseI64ArrayAttr &stride,
DenseI64ArrayAttr &pad,
Type &outputTy) const {
return rewriter.notifyMatchFailure(
op, "Unimplemented pooling input parsing function");
Expand Down Expand Up @@ -3503,7 +3507,7 @@ class ConvertAtenPoolingBaseOp : public OpConversionPattern<AtenOpT> {
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value input;
ArrayAttr kernel, stride, pad;
DenseI64ArrayAttr kernel, stride, pad;
Type outputTy;

// Attempts to read input and kernel parameters, or synthesize them in the
Expand Down Expand Up @@ -3540,8 +3544,9 @@ class ConvertAtenAdaptivePoolingOp
using OpAdaptor = typename AtenOpT::Adaptor;
LogicalResult processInputs(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter, Value &input,
ArrayAttr &kernel, ArrayAttr &stride,
ArrayAttr &pad, Type &outputTy) const override {
DenseI64ArrayAttr &kernel,
DenseI64ArrayAttr &stride, DenseI64ArrayAttr &pad,
Type &outputTy) const override {
auto inputXchw = adaptor.getSelf();
auto inputTy = inputXchw.getType().template cast<RankedTensorType>();
if (!inputTy)
Expand Down Expand Up @@ -3603,12 +3608,12 @@ class ConvertAtenAdaptivePoolingOp
input =
ConvertAtenPoolingBaseOp<AtenOpT, TosaOpT>::transposePoolingInputToHwc(
op, rewriter, inputXchw);
kernel = rewriter.getI64ArrayAttr(kernelDims);
stride = rewriter.getI64ArrayAttr({strideH, strideW});
kernel = rewriter.getDenseI64ArrayAttr(kernelDims);
stride = rewriter.getDenseI64ArrayAttr({strideH, strideW});
// Adaptive pooling does unit dilation and zero pad.
pad = rewriter.getI64ArrayAttr({0, 0, 0, 0});
outputTy =
RankedTensorType::get(makeShapeLLVMCompatible(outputShape), inputElemTy);
pad = rewriter.getDenseI64ArrayAttr({0, 0, 0, 0});
outputTy = RankedTensorType::get(makeShapeLLVMCompatible(outputShape),
inputElemTy);

return success();
}
Expand Down Expand Up @@ -3643,8 +3648,9 @@ static Type getOutputTypeForNonAdaptivePoolingOp(
template <typename AtenOpT, typename tosaOp>
static LogicalResult getOutputTypeAndPoolingParameters(
AtenOpT op, ConversionPatternRewriter &rewriter, Value inputXchw,
SmallVectorImpl<int64_t> &dilationArray, Type &outputTy, ArrayAttr &kernel,
ArrayAttr &stride, ArrayAttr &pad) {
SmallVectorImpl<int64_t> &dilationArray, Type &outputTy,
DenseI64ArrayAttr &kernel, DenseI64ArrayAttr &stride,
DenseI64ArrayAttr &pad) {

RankedTensorType inputTy = inputXchw.getType().cast<RankedTensorType>();
if (!inputTy)
Expand All @@ -3669,9 +3675,9 @@ static LogicalResult getOutputTypeAndPoolingParameters(
return rewriter.notifyMatchFailure(
op, "Non-const padding factor for pooling op unsupported");

kernel = rewriter.getI64ArrayAttr(kernelSizeInts);
stride = rewriter.getI64ArrayAttr(strideInts);
pad = rewriter.getI64ArrayAttr(
kernel = rewriter.getDenseI64ArrayAttr(kernelSizeInts);
stride = rewriter.getDenseI64ArrayAttr(strideInts);
pad = rewriter.getDenseI64ArrayAttr(
{paddingInts[0], paddingInts[0], paddingInts[1], paddingInts[1]});

// FIXME: add ceil_mode support.
Expand All @@ -3696,10 +3702,12 @@ class ConvertAtenMaxPool2dOp
tosa::MaxPool2dOp>::ConvertAtenPoolingBaseOp;
LogicalResult processInputs(AtenMaxPool2dOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter, Value &input,
ArrayAttr &kernel, ArrayAttr &stride,
ArrayAttr &pad, Type &outputTy) const override {
DenseI64ArrayAttr &kernel,
DenseI64ArrayAttr &stride, DenseI64ArrayAttr &pad,
Type &outputTy) const override {
SmallVector<int64_t, 2> dilationArray;
if (!matchPattern(op.getDilation(), m_TorchListOfConstantInts(dilationArray)))
if (!matchPattern(op.getDilation(),
m_TorchListOfConstantInts(dilationArray)))
return rewriter.notifyMatchFailure(
op, "Non-const dilation for pooling op unsupported.");
// TOSA pooling only supports unit dilation.
Expand Down Expand Up @@ -3729,8 +3737,9 @@ class ConvertAtenAvgPool2dOp
tosa::AvgPool2dOp>::ConvertAtenPoolingBaseOp;
LogicalResult processInputs(AtenAvgPool2dOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter, Value &input,
ArrayAttr &kernel, ArrayAttr &stride,
ArrayAttr &pad, Type &outputTy) const override {
DenseI64ArrayAttr &kernel,
DenseI64ArrayAttr &stride, DenseI64ArrayAttr &pad,
Type &outputTy) const override {
SmallVector<int64_t, 2> dilationArray{1, 1};
if (failed(getOutputTypeAndPoolingParameters<AtenAvgPool2dOp,
tosa::AvgPool2dOp>(
Expand Down
Loading

0 comments on commit 0faba6d

Please sign in to comment.