diff --git a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp index ef50c3bcaf98..9f9a5f894227 100644 --- a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp +++ b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp @@ -1941,7 +1941,6 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP( indicesCt = Torch::kUnknownSize; break; } - indicesCt *= sz; } @@ -1976,8 +1975,16 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP( return success(); } - rewriter.replaceOpWithNewOp(binder.op, resultType, - gather); + // 0 indicesRank will always squeeze the axis dim + // Use PrimsSqueezeOp for the case of result with dynamic shape + SmallVector dimList({index}); + Value dimValueList = rewriter.create( + loc, + rewriter.getType( + rewriter.getType()), + dimList); + rewriter.replaceOpWithNewOp( + binder.op, resultType, gather, dimValueList); return success(); }); patterns.onOp( diff --git a/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir b/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir index 2e7b59088881..64096fd23b7f 100644 --- a/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir +++ b/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir @@ -78,7 +78,8 @@ func.func @test_gather_scalar(%arg0: !torch.vtensor<[3,4,5],f32>, %arg1: !torch. // CHECK: %[[SEL:.+]] = torch.aten.where.self %[[LT]], %[[ADD]], %arg1 // CHECK: %[[FLAT:.+]] = torch.aten.unsqueeze %[[SEL]], %[[ZERO]] : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64> // CHECK: %[[ISEL:.+]] = torch.aten.index_select %arg0, %[[AXIS]], %[[FLAT]] - // CHECK: %[[RES:.+]] = torch.aten.squeeze %[[ISEL]] : !torch.vtensor<[1,4,5],f32> -> !torch.vtensor<[4,5],f32> + // CHECK: %[[DIMS:.+]] = torch.prim.ListConstruct %[[AXIS]] : (!torch.int) -> !torch.list + // CHECK: %[[RES:.+]] = torch.prims.squeeze %[[ISEL]], %[[DIMS]] : !torch.vtensor<[1,4,5],f32>, !torch.list -> !torch.vtensor<[4,5],f32> // CHECK: return %[[RES]] %0 = torch.operator "onnx.Gather"(%arg0, %arg1) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[3,4,5],f32>, !torch.vtensor<[], si64>) -> !torch.vtensor<[4,5],f32> return %0 : !torch.vtensor<[4,5],f32>