Skip to content

Commit

Permalink
Bump llvm and stablehlo (#3377)
Browse files Browse the repository at this point in the history
* bump llvm to 1e5f29a
* bump stablehlo to c44d9af8d4879adccf1054cb61a53377ae5898cb
  • Loading branch information
qingyunqu authored May 22, 2024
1 parent 4d7cdba commit f4bfe3f
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 16 deletions.
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 1641 files
2 changes: 1 addition & 1 deletion externals/stablehlo
Submodule stablehlo updated 148 files
10 changes: 10 additions & 0 deletions lib/Conversion/TorchToStablehlo/GatherScatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ Value gatherTensorAlongSingleAxis(PatternRewriter &rewriter, Operation *op,
rewriter.getContext(),
/*offsetDims=*/offsetDims,
/*collapsedSliceDims=*/collapsedSliceDims,
/*operandBatchingDims=*/{},
/*startIndicesBatchingDims=*/{},
/*startIndexMap=*/startIndexMap,
/*indexVecDim=*/indexVecDim);

Expand Down Expand Up @@ -584,6 +586,8 @@ LogicalResult ConvertAtenOp<AtenGatherOp>::matchAndRewrite(
rewriter.getContext(),
/*offsetDims=*/{},
/*collapsedSliceDims=*/collapsedDims,
/*operandBatchingDims=*/{},
/*startIndicesBatchingDims=*/{},
/*startIndexMap=*/startIndexMap,
/*indexVecDim=*/indexVecDim);

Expand Down Expand Up @@ -744,6 +748,8 @@ class ConvertAtenScatterOp : public ConvertAtenOp<AtenOpT> {
rewriter.getContext(),
/*updateWindowDims=*/{},
/*insertedWindowDims=*/insertedWindowDims,
/*inputBatchingDims=*/{},
/*scatterIndicesBatchingDims=*/{},
/*scatterDimsToOperandDim=*/scatterDimOperandDimMap,
/*indexVectorDim=*/indexVecDim);

Expand Down Expand Up @@ -826,6 +832,8 @@ LogicalResult ConvertAtenOp<AtenIndexTensorHackedTwinOp>::matchAndRewrite(
rewriter.getContext(),
/*offsetDims=*/offsetDims,
/*collapsedSliceDims=*/collapsedDims,
/*operandBatchingDims=*/{},
/*startIndicesBatchingDims=*/{},
/*startIndexMap=*/startIndexMap,
/*indexVecDim=*/indexVecDim);

Expand Down Expand Up @@ -900,6 +908,8 @@ LogicalResult ConvertAtenOp<AtenIndexPutHackedTwinOp>::matchAndRewrite(
rewriter.getContext(),
/*updateWindowDims=*/updateWindowDims,
/*insertedWindowDims=*/insertedWindowDims,
/*inputBatchingDims=*/{},
/*scatterIndicesBatchingDims=*/{},
/*scatterDimsToOperandDim=*/scatterDimOperandDimMap,
/*indexVectorDim=*/indexVecDim);

Expand Down
34 changes: 31 additions & 3 deletions lib/Conversion/TorchToStablehlo/ViewLike.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ class ConvertAtenViewOp : public ConvertAtenOp<AtenOpT> {
if (!rankType)
return op.emitError("Only ranked tensor types are currently supported");

// collect Value of dims
SmallVector<Value, 4> dimSizes;
if (!getAtenViewOpSizes(op, adaptor, rewriter, dimSizes)) {
return op.emitError("Dims size must be a list of Scalar");
Expand All @@ -187,23 +188,50 @@ class ConvertAtenViewOp : public ConvertAtenOp<AtenOpT> {
return success();
}

// collect constant dim size which == -1
SmallVector<size_t> negOneIndex;
for (size_t i = 0; i < dimSizes.size(); i++) {
int64_t dim;
if (matchPattern(dimSizes[i], m_TorchConstantInt(&dim))) {
if (dim == -1) {
negOneIndex.push_back(i);
}
}
}
if (negOneIndex.size() > 1) {
return op.emitError("Only support at most one -1 in view target dims");
}

std::for_each(dimSizes.begin(), dimSizes.end(), [&](Value &dSize) {
dSize = rewriter.create<ToI64Op>(loc, dSize).getResult();
return dSize;
});

Value numel = rewriter.create<shape::NumElementsOp>(
loc, rewriter.create<shape::ShapeOfOp>(loc, adaptor.getSelf()));
numel =
rewriter.create<arith::IndexCastOp>(loc, rewriter.getI64Type(), numel);

// note: assuming that -1 doesn't arise from dynamic value
if (negOneIndex.size() == 1) {
size_t index = negOneIndex[0];
Value realDim = numel;
for (size_t i = 0; i < dimSizes.size(); i++) {
if (i != index) {
realDim = rewriter.create<arith::DivUIOp>(loc, realDim, dimSizes[i]);
}
}
// update -1 to realDim
dimSizes[index] = realDim;
}

Value stablehloShape =
rewriter.create<tensor::FromElementsOp>(loc, dimSizes);
Value computedShape = rewriter.create<stablehlo::ComputeReshapeShapeOp>(
loc, stablehloShape.getType(), numel, stablehloShape);
rewriter.replaceOpWithNewOp<stablehlo::DynamicReshapeOp>(
op,
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
op.getType()),
adaptor.getSelf(), computedShape);
adaptor.getSelf(), stablehloShape);
return success();
}

Expand Down
8 changes: 7 additions & 1 deletion projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -1449,6 +1449,13 @@
# Write the TOSA set as a "passing" set as it is very early in development
# and very few tests work yet.
TOSA_PASS_SET = {
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
"ElementwiseDivTensorFloatModule_basic",
"ElementwiseMulTensorFloatModule_basic",
"ElementwiseWhereScalarSelfStaticModule_basic",
"GroupNormModule_basic",
"GroupNormNoWeightAndBiasModule_basic",
"NativeGroupNormModule_basic",
"AtenDotModule_basic",
"ElementwiseFloatTensorGtIntScalarModule_basic",
"ElementwiseLogSigmoidModule_basic",
Expand Down Expand Up @@ -1946,7 +1953,6 @@
"Conv2dNoPaddingModule_basic",
"Conv2dWithPaddingDilationStrideModule_basic",
"Conv2dWithPaddingModule_basic",
"AtenInstanceNormModule_basic",
# failed to legalize operation 'torch.operator'
"ElementwisePreluModule_basic",
"ElementwisePreluStaticModule_basic",
Expand Down
24 changes: 14 additions & 10 deletions test/Conversion/TorchToStablehlo/view_like.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -310,11 +310,12 @@ func.func @torch.aten.slice.none.static$slice_like(%arg0: !torch.vtensor<[4,65,2
// CHECK: %[[T3:.*]] = torch_c.to_i64 %[[INT224]]
// CHECK: %[[T4:.*]] = shape.shape_of %[[T0]] : tensor<?x?x?x?xf32> -> tensor<4xindex>
// CHECK: %[[T5:.*]] = shape.num_elements %[[T4]] : tensor<4xindex> -> index
// CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T2]], %[[T3]] : tensor<2xi64>
// CHECK: %[[T6:.*]] = stablehlo.compute_reshape_shape %[[T5]], %[[FROM_ELEMENTS]] : (index, tensor<2xi64>) -> tensor<2xi64>
// CHECK: %[[T7:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[T6]] : (tensor<?x?x?x?xf32>, tensor<2xi64>) -> tensor<?x224xf32>
// CHECK: %[[T8:.*]] = torch_c.from_builtin_tensor %[[T7]] : tensor<?x224xf32> -> !torch.vtensor<[?,224],f32>
// CHECK: return %[[T8]] : !torch.vtensor<[?,224],f32>
// CHECK: %[[T6:.*]] = arith.index_cast %[[T5]] : index to i64
// CHECK: %[[T7:.*]] = arith.divui %[[T6]], %[[T3]] : i64
// CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T7]], %[[T3]] : tensor<2xi64>
// CHECK: %[[T8:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[FROM_ELEMENTS]] : (tensor<?x?x?x?xf32>, tensor<2xi64>) -> tensor<?x224xf32>
// CHECK: %[[T9:.*]] = torch_c.from_builtin_tensor %[[T8]] : tensor<?x224xf32> -> !torch.vtensor<[?,224],f32>
// CHECK: return %[[T9]] : !torch.vtensor<[?,224],f32>
func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,224],f32> {
%int-1 = torch.constant.int -1
%int224 = torch.constant.int 224
Expand All @@ -339,11 +340,14 @@ func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch
// CHECK: %[[T5:.*]] = torch_c.to_i64 %[[INT64]]
// CHECK: %[[T6:.*]] = shape.shape_of %[[T0]] : tensor<?x?x?x?x?xf32> -> tensor<5xindex>
// CHECK: %[[T7:.*]] = shape.num_elements %[[T6]] : tensor<5xindex> -> index
// CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T2]], %[[T3]], %[[T4]], %[[T5]] : tensor<4xi64>
// CHECK: %[[T8:.*]] = stablehlo.compute_reshape_shape %[[T7]], %[[FROM_ELEMENTS]] : (index, tensor<4xi64>) -> tensor<4xi64>
// CHECK: %[[T9:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[T8]] : (tensor<?x?x?x?x?xf32>, tensor<4xi64>) -> tensor<?x120x4x64xf32>
// CHECK: %[[T10:.*]] = torch_c.from_builtin_tensor %[[T9]] : tensor<?x120x4x64xf32> -> !torch.vtensor<[?,120,4,64],f32>
// CHECK: return %[[T10]] : !torch.vtensor<[?,120,4,64],f32>
// CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64
// CHECK: %[[T9:.*]] = arith.divui %[[T8]], %[[T3]] : i64
// CHECK: %[[T10:.*]] = arith.divui %[[T9]], %[[T4]] : i64
// CHECK: %[[T11:.*]] = arith.divui %[[T10]], %[[T5]] : i64
// CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T11]], %[[T3]], %[[T4]], %[[T5]] : tensor<4xi64>
// CHECK: %[[T12:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[FROM_ELEMENTS]] : (tensor<?x?x?x?x?xf32>, tensor<4xi64>) -> tensor<?x120x4x64xf32>
// CHECK: %[[T13:.*]] = torch_c.from_builtin_tensor %[[T12]] : tensor<?x120x4x64xf32> -> !torch.vtensor<[?,120,4,64],f32>
// CHECK: return %[[T13]] : !torch.vtensor<[?,120,4,64],f32>
func.func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?,?],f32>) -> !torch.vtensor<[?,120,4,64],f32> {
%int-1 = torch.constant.int -1
%int120 = torch.constant.int 120
Expand Down

0 comments on commit f4bfe3f

Please sign in to comment.