Skip to content

Commit 8252656

Browse files
Advance llvm-project and stablehlo. (#2619)
llvm-project: bbd2b08 stablehlo: ab709fe48de88c67717abfbd7ef17425eb95ddaf These commits were chosen in order to account for an MLIR API break from llvm/llvm-project@3dbac2c which required a patch to stablehlo. We integrate a bit beyond that commit to deal with some revert/reapply cycles in the intervening range which were discovered in another downstream. Further, it requires adaptation to the stablehlo API breaks introduced from openxla/stablehlo#1872 which are along for the ride. Since some stablehlo builders were changed to directly take int64_t array refs, also traced that up some call stacks to eliminate some signed/unsigned mismatches that result. Also adds a few TOSA tests to the passing set that seem to work now.
1 parent 63505ad commit 8252656

File tree

7 files changed

+43
-51
lines changed

7 files changed

+43
-51
lines changed

externals/llvm-project

Submodule llvm-project updated 3620 files

externals/stablehlo

Submodule stablehlo updated 311 files

include/torch-mlir/Conversion/TorchToStablehlo/StablehloLegalizeUtils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ Value promoteType(PatternRewriter &rewriter, Location loc, Value input,
5151
Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
5252
TensorType outType);
5353

54-
SmallVector<size_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);
54+
SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);
5555

5656
// Get the dimension sizes of the input tensor, given the dimension axes
5757
FailureOr<SmallVector<Value, 4>> getDimSizesOfTensor(PatternRewriter &rewriter,

lib/Conversion/TorchToStablehlo/Basic.cpp

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -615,12 +615,8 @@ class ConvertAtenTransposeIntOp
615615
SmallVector<int64_t> permValues(inputRank);
616616
std::iota(std::begin(permValues), std::end(permValues), 0);
617617
std::swap(permValues[dim0], permValues[dim1]);
618-
DenseIntElementsAttr permutation = DenseIntElementsAttr::get(
619-
RankedTensorType::get({static_cast<long int>(permValues.size())},
620-
rewriter.getI64Type()),
621-
permValues);
622618
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
623-
permutation);
619+
permValues);
624620
return success();
625621
}
626622
};
@@ -793,12 +789,8 @@ LogicalResult ConvertAtenOp<AtenPermuteOp>::matchAndRewrite(
793789
return op.emitError("not all dims are valid");
794790
}
795791

796-
DenseIntElementsAttr permutation = DenseIntElementsAttr::get(
797-
RankedTensorType::get({static_cast<long int>(permValues.size())},
798-
rewriter.getI64Type()),
799-
permValues);
800792
rewriter.replaceOpWithNewOp<stablehlo::TransposeOp>(op, outType, self,
801-
permutation);
793+
permValues);
802794
return success();
803795
}
804796

@@ -1755,8 +1747,7 @@ LogicalResult ConvertAtenOp<AtenFlipOp>::matchAndRewrite(
17551747
}
17561748
}
17571749

1758-
rewriter.replaceOpWithNewOp<stablehlo::ReverseOp>(
1759-
op, outType, self, rewriter.getI64TensorAttr(dims));
1750+
rewriter.replaceOpWithNewOp<stablehlo::ReverseOp>(op, outType, self, dims);
17601751
return success();
17611752
}
17621753

lib/Conversion/TorchToStablehlo/Linear.cpp

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -62,13 +62,9 @@ Value getPermutedTensor(PatternRewriter &rewriter, Operation *op, Value input,
6262
newShape.push_back(inpShape[d]);
6363
}
6464

65-
auto attrTy = RankedTensorType::get({static_cast<int64_t>(transDims.size())},
66-
rewriter.getIntegerType(64));
67-
auto permuteAttr = DenseIntElementsAttr::get(attrTy, transDims);
68-
6965
auto outTy = RankedTensorType::get(newShape, inputTy.getElementType());
7066
auto result = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), outTy,
71-
input, permuteAttr);
67+
input, transDims);
7268
return result.getResult();
7369
}
7470

@@ -500,8 +496,8 @@ class ConvertAtenConvolutionOp : public ConvertAtenOp<AtenConvolutionOp> {
500496
for (int64_t i = 0; i <= rank; i++)
501497
transposeDims[i] = i;
502498
std::swap(transposeDims[rank - 1], transposeDims[rank - 2]);
503-
weight = rewriter.create<stablehlo::TransposeOp>(
504-
op->getLoc(), weight, rewriter.getI64TensorAttr(transposeDims));
499+
weight = rewriter.create<stablehlo::TransposeOp>(op->getLoc(), weight,
500+
transposeDims);
505501

506502
// 3. [H, W, ..., G, OC, IC//G] => [H, W, ..., G*OC, IC//G]
507503
weightShapeInt.erase(weightShapeInt.end() - 2);
@@ -546,12 +542,10 @@ class ConvertAtenConvolutionOp : public ConvertAtenOp<AtenConvolutionOp> {
546542
}
547543
auto transposeTy =
548544
RankedTensorType::get(transposeShape, weightTy.getElementType());
549-
DenseIntElementsAttr permAttr = DenseIntElementsAttr::get(
550-
RankedTensorType::get({nDims}, rewriter.getI64Type()), perm);
551545
auto transposeOp = rewriter.create<stablehlo::TransposeOp>(
552-
op->getLoc(), transposeTy, weight, permAttr);
546+
op->getLoc(), transposeTy, weight, perm);
553547
auto reverseOp = rewriter.create<stablehlo::ReverseOp>(
554-
op->getLoc(), transposeOp, rewriter.getI64TensorAttr({0, 1}));
548+
op->getLoc(), transposeOp, ArrayRef<int64_t>{0, 1});
555549

556550
// Prepare for transposed convolution
557551
SmallVector<int64_t> stablehloStrideVec(nSpatialDims, 1);

lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -250,12 +250,12 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
250250
return bcast_op.getResult();
251251
}
252252

253-
SmallVector<size_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank) {
254-
SmallVector<size_t> posDims;
253+
SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank) {
254+
SmallVector<int64_t> posDims;
255255
posDims.reserve(rank);
256256
std::transform(
257257
dims.begin(), dims.end(), std::back_inserter(posDims),
258-
[rank](int64_t d) -> size_t { return toPositiveDim(d, rank); });
258+
[rank](int64_t d) -> int64_t { return toPositiveDim(d, rank); });
259259
return posDims;
260260
}
261261

@@ -316,10 +316,10 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
316316
op, "failed to get dimension sizes of the input");
317317

318318
auto dimSizes = *dimSizesInfo;
319-
auto rank = dimSizes.size();
320-
size_t newRank = rank + inputUnsqzDims.size();
319+
int64_t rank = dimSizes.size();
320+
int64_t newRank = rank + inputUnsqzDims.size();
321321
auto unsqzDims = toPositiveDims(inputUnsqzDims, newRank);
322-
for (size_t k = 0, sz = unsqzDims.size(); k < sz; ++k)
322+
for (int64_t k = 0, sz = unsqzDims.size(); k < sz; ++k)
323323
if (k > 1 && unsqzDims[k] <= unsqzDims[k - 1])
324324
return rewriter.notifyMatchFailure(
325325
op, "unsqueeze dimensions must be specified in order");
@@ -335,8 +335,8 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
335335
std::vector<int64_t> newShape;
336336
newDimSizes.reserve(newRank);
337337
newShape.reserve(newRank);
338-
for (size_t k = 0, i = 0, j = 0; k < newRank; ++k) {
339-
if (j < unsqzDims.size() && unsqzDims[j] == k) {
338+
for (int64_t k = 0, i = 0, j = 0; k < newRank; ++k) {
339+
if (j < static_cast<int64_t>(unsqzDims.size()) && unsqzDims[j] == k) {
340340
newDimSizes.push_back(one);
341341
newShape.push_back(1);
342342
j++;

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 24 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@
1313
from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS
1414
from torch_mlir._version import torch_version_for_comparison, version
1515

16+
print(f"TORCH_VERSION_FOR_COMPARISON =", torch_version_for_comparison())
17+
1618
LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
1719
# Lowering Torch Backend IR -> Linalg-on-Tensors Backend IR failed
1820
# 'linalg.depthwise_conv_2d_nchw_chw' op inferred input/output operand #1 has shape's dimension #0 to be 4, but found 8
@@ -21,6 +23,14 @@
2123
"IscloseStaticModuleTrue_basic"
2224
}
2325

26+
if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"):
27+
LINALG_XFAIL_SET |= {
28+
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
29+
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
30+
"ConvolutionModule2DGroups_basic",
31+
}
32+
33+
2434
TORCHDYNAMO_XFAIL_SET = {
2535
#### General TorchDynamo/PyTorch errors
2636

@@ -306,10 +316,11 @@
306316
"ArangeStartOutViewModule_basic",
307317
}
308318

309-
if torch_version_for_comparison() < version.parse("2.1.0.dev"):
310-
TORCHDYNAMO_XFAIL_SET -= {
311-
"ScaledDotProductAttentionSameModule_basic",
312-
"ScaledDotProductAttentionDifferentModule_basic",
319+
if torch_version_for_comparison() >= version.parse("2.2.0.dev20231204"):
320+
TORCHDYNAMO_XFAIL_SET |= {
321+
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
322+
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
323+
"ConvolutionModule2DGroups_basic",
313324
}
314325

315326
TORCHDYNAMO_CRASHING_SET = {
@@ -1305,6 +1316,10 @@
13051316
"MeanModule_basic",
13061317
"ArangeStartOutModule_basic",
13071318
"ArangeStartOutViewModule_basic",
1319+
"Conv2dBiasNoPaddingModule_basic",
1320+
"Conv2dNoPaddingModule_basic",
1321+
"Conv2dWithPaddingDilationStrideModule_basic",
1322+
"Conv2dWithPaddingModule_basic",
13081323
}
13091324

13101325
MAKE_FX_TOSA_PASS_SET = (TOSA_PASS_SET | {
@@ -1335,20 +1350,12 @@
13351350
# failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal
13361351
"AtenEyeModuleInt2D_basic",
13371352
"AtenEyeMModuleInt2D_basic",
1338-
}
13391353

1340-
if torch_version_for_comparison() < version.parse("2.1.0.dev"):
1341-
MAKE_FX_TOSA_PASS_SET -= {
1342-
# 'tensor.expand_shape' op expected rank expansion, but found source rank 1 >= result rank 1
1343-
"ReshapeCollapseModule_basic",
1344-
1345-
# failed to lower torch.aten.empty.memory_format
1346-
"BatchNorm1DModule_basic",
1347-
"BatchNorm1DWith2DInputModule_basic",
1348-
"BatchNorm2DModule_basic",
1349-
"BatchNorm3DModule_basic",
1350-
"BatchNorm1DStaticShapeModule_basic",
1351-
}
1354+
"Conv2dBiasNoPaddingModule_basic",
1355+
"Conv2dNoPaddingModule_basic",
1356+
"Conv2dWithPaddingDilationStrideModule_basic",
1357+
"Conv2dWithPaddingModule_basic",
1358+
}
13521359

13531360
LTC_CRASHING_SET = {
13541361
# TODO: update test to move all inputs to the lazy device. Otherwise test fails with:

0 commit comments

Comments
 (0)