diff --git a/externals/llvm-project b/externals/llvm-project index 798fa4b415eea..3a020527c2af1 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 798fa4b415eea55c868ae42b874083cb9886991e +Subproject commit 3a020527c2af10741b12e756de45bd6f774885a4 diff --git a/externals/mlir-hlo b/externals/mlir-hlo index 037315c6515b5..8df20065b22be 160000 --- a/externals/mlir-hlo +++ b/externals/mlir-hlo @@ -1 +1 @@ -Subproject commit 037315c6515b5323ff78bc3c54d70dffad2ddbd0 +Subproject commit 8df20065b22be628f2d365c387200df7d02b80c1 diff --git a/include/torch-mlir/Conversion/Utils/Utils.h b/include/torch-mlir/Conversion/Utils/Utils.h index 1be7065b92664..7a9c10836705f 100644 --- a/include/torch-mlir/Conversion/Utils/Utils.h +++ b/include/torch-mlir/Conversion/Utils/Utils.h @@ -84,7 +84,7 @@ SmallVector getTypeConvertedValues(OpBuilder &b, Location loc, // should be converted builtin types. Value convertScalarToDtype( OpBuilder &b, Location loc, Value scalar, Type dtype, - llvm::Optional srcOriginalDtype = llvm::None); + llvm::Optional srcOriginalDtype = std::nullopt); } // namespace Torch } // namespace torch diff --git a/include/torch-mlir/Dialect/Torch/IR/TorchOps.h b/include/torch-mlir/Dialect/Torch/IR/TorchOps.h index e4db5374a61db..30ba246d93d0a 100644 --- a/include/torch-mlir/Dialect/Torch/IR/TorchOps.h +++ b/include/torch-mlir/Dialect/Torch/IR/TorchOps.h @@ -190,7 +190,7 @@ struct torch_list_of_optional_constant_ints_op_binder { if (matchPattern(value, m_TorchConstantInt(&num))) bind_values.push_back(num); else if (value.getType().isa()) - bind_values.push_back(llvm::None); + bind_values.push_back(std::nullopt); else return false; } diff --git a/lib/CAPI/TorchTypes.cpp b/lib/CAPI/TorchTypes.cpp index 27dbda38f0972..98c2e60859505 100644 --- a/lib/CAPI/TorchTypes.cpp +++ b/lib/CAPI/TorchTypes.cpp @@ -198,7 +198,7 @@ MlirType torchMlirTorchNonValueTensorTypeGet(MlirContext context, intptr_t numSizes, const int64_t *optionalSizes, MlirType optionalDtype) { - Optional> optionalSizesArrayRef = None; + Optional> optionalSizesArrayRef = std::nullopt; // if numSizes == -1, then it is unranked. if (numSizes > -1) optionalSizesArrayRef = llvm::makeArrayRef(optionalSizes, numSizes); @@ -232,7 +232,7 @@ MlirType torchMlirTorchValueTensorTypeGet(MlirContext context, intptr_t numSizes, const int64_t *optionalSizes, MlirType optionalDtype) { - Optional> optionalSizesArrayRef = None; + Optional> optionalSizesArrayRef = std::nullopt; // if numSizes == -1, then it is unranked. if (numSizes > -1) optionalSizesArrayRef = llvm::makeArrayRef(optionalSizes, numSizes); diff --git a/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp b/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp index 7ad70a0ac2903..71ebddce4693b 100644 --- a/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp +++ b/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp @@ -37,7 +37,7 @@ llvm::Optional convertReduceOpCommon( RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; ArrayRef input_shape = input_type.getShape(); ArrayRef output_shape = output_type.getShape(); @@ -101,7 +101,7 @@ convertReduceAllOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; return convertReduceOpCommon( rewriter, op, output_type, input_value, axes_elems, keep_dims, @@ -116,7 +116,7 @@ convertReduceAnyOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; return convertReduceOpCommon( rewriter, op, output_type, input_value, axes_elems, keep_dims, @@ -131,7 +131,7 @@ convertReduceMinOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; return convertReduceOpCommon( rewriter, op, output_type, input_value, axes_elems, keep_dims, @@ -146,7 +146,7 @@ convertReduceMaxOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; return convertReduceOpCommon( rewriter, op, output_type, input_value, axes_elems, keep_dims, @@ -161,7 +161,7 @@ convertReduceProdOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; bool input_is_qtype = input_type.getElementType().isa(); @@ -171,7 +171,7 @@ convertReduceProdOp(PatternRewriter &rewriter, Operation *op, if (input_is_qtype || output_is_qtype) { op->emitOpError("ConvertReduceProdOp: input/output tensor should " "be all floating-point."); - return llvm::None; + return std::nullopt; } return convertReduceOpCommon( @@ -187,7 +187,7 @@ convertReduceSumOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; bool input_is_qtype = input_type.getElementType().isa(); @@ -197,7 +197,7 @@ convertReduceSumOp(PatternRewriter &rewriter, Operation *op, if (input_is_qtype != output_is_qtype) { op->emitOpError("ConvertReduceSumOp: input/output tensor should " "be all quantized or all floating-point."); - return llvm::None; + return std::nullopt; } double input_scale = 1.0f; @@ -242,7 +242,7 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op, RankedTensorType input_type = input_value.getType().dyn_cast(); if (!input_type) - return llvm::None; + return std::nullopt; bool input_is_qtype = input_type.getElementType().isa(); @@ -252,7 +252,7 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op, if (input_is_qtype != output_is_qtype) { op->emitOpError("ConvertReduceSumOp: input/output tensor should " "be all quantized or all floating-point."); - return llvm::None; + return std::nullopt; } // Only supports float type mean() if it's non-quantized @@ -260,7 +260,7 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op, op->emitWarning( "Failed convertReduceMean: input unquantized type but output element " "not FloatType!"); - return llvm::None; + return std::nullopt; } int64_t input_rank = input_type.getRank(); @@ -303,7 +303,7 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op, output_zp); if (!val.has_value()) - return llvm::None; + return std::nullopt; if (!input_is_qtype) { Value div_const = getTosaConstTensorSingleF32(rewriter, op, div_scale); diff --git a/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp b/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp index eeac271371d98..6a1479daca6ce 100644 --- a/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp +++ b/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp @@ -162,7 +162,7 @@ llvm::Optional getConstTensor(PatternRewriter &rewriter, Operation *op, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = @@ -186,7 +186,7 @@ llvm::Optional getConstTensor(PatternRewriter &rewriter, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = RankedTensorType::get( @@ -210,7 +210,7 @@ llvm::Optional getConstTensor(PatternRewriter &rewriter, if (vec.size() != num_total_elements) { op->emitOpError("getConstTensor(): number of elements mismatch."); - return llvm::None; + return std::nullopt; } auto const_type = RankedTensorType::get(shape, rewriter.getF32Type()); diff --git a/lib/Dialect/Torch/IR/TorchOps.cpp b/lib/Dialect/Torch/IR/TorchOps.cpp index c6a7727d24bd8..82fe8059001bb 100644 --- a/lib/Dialect/Torch/IR/TorchOps.cpp +++ b/lib/Dialect/Torch/IR/TorchOps.cpp @@ -1028,7 +1028,7 @@ traceKnownSizeTensorType(Value value, llvm::Optional dim) { if (!tensorType.hasSizes()) return false; - if (dim == llvm::None) + if (dim == std::nullopt) return tensorType.areAllSizesKnown(); // If the dimension value is negative, then convert it to a positive value. @@ -1062,7 +1062,7 @@ traceKnownSizeTensorType(Value value, llvm::Optional dim) { void AtenSizeOp::getCanonicalizationPatterns(RewritePatternSet &patterns, MLIRContext *context) { patterns.add(+[](AtenSizeOp op, PatternRewriter &rewriter) { - auto type = traceKnownSizeTensorType(op.getOperand(), llvm::None); + auto type = traceKnownSizeTensorType(op.getOperand(), std::nullopt); if (failed(type)) return rewriter.notifyMatchFailure(op, "all sizes not known"); SmallVector listElements; diff --git a/lib/Dialect/Torch/IR/TorchTypes.cpp b/lib/Dialect/Torch/IR/TorchTypes.cpp index de8c1d5206951..5d6336f15f912 100644 --- a/lib/Dialect/Torch/IR/TorchTypes.cpp +++ b/lib/Dialect/Torch/IR/TorchTypes.cpp @@ -89,7 +89,7 @@ bool Torch::isValidSubtype(Type subtype, Type type) { static Optional> parseMultipleContainedTypes(AsmParser &parser) { if (parser.parseLess()) - return None; + return std::nullopt; SmallVector containedTypes; if (!parser.parseOptionalGreater()) @@ -97,11 +97,11 @@ parseMultipleContainedTypes(AsmParser &parser) { do { Type containedType = parseTorchDialectType(parser); if (!containedType) - return None; + return std::nullopt; containedTypes.push_back(containedType); } while (!parser.parseOptionalComma()); if (parser.parseGreater()) - return None; + return std::nullopt; return containedTypes; } @@ -222,7 +222,8 @@ Type parseTensorType(MLIRContext *context, AsmParser &parser, llvm::SMLoc startLoc = parser.getCurrentLocation(); if (parser.parseOptionalLess()) return getTensorType(context, - /*optionalSizes=*/None, /*optionalDtype=*/Type()); + /*optionalSizes=*/std::nullopt, + /*optionalDtype=*/Type()); bool hasSizes; SmallVector sizes; if (succeeded(parser.parseOptionalStar())) { @@ -320,7 +321,7 @@ ValueTensorType NonValueTensorType::getWithValueSemantics() const { NonValueTensorType NonValueTensorType::getWithLeastStaticInformation(MLIRContext *context) { return NonValueTensorType::get(context, - /*optionalSizes=*/None, + /*optionalSizes=*/std::nullopt, /*optionalDtype=*/Type()); } @@ -357,7 +358,7 @@ NonValueTensorType ValueTensorType::getWithoutValueSemantics() const { ValueTensorType ValueTensorType::getWithLeastStaticInformation(MLIRContext *context) { return ValueTensorType::get(context, - /*optionalSizes=*/None, + /*optionalSizes=*/std::nullopt, /*optionalDtype=*/Type()); } @@ -428,8 +429,8 @@ Type Torch::meetTensorTypes(BaseTensorType lhs, BaseTensorType rhs) { // If neither has sizes, we have nothing left to do. if (!lhs.hasSizes() && !rhs.hasSizes()) { - return ValueTensorType::get(lhs.getContext(), /*optionalSizes=*/None, - dtype); + return ValueTensorType::get(lhs.getContext(), + /*optionalSizes=*/std::nullopt, dtype); } // If the number of sizes is different, the two types are contradictory. diff --git a/lib/Dialect/Torch/Transforms/GlobalizeObjectGraph.cpp b/lib/Dialect/Torch/Transforms/GlobalizeObjectGraph.cpp index a20940984fcfc..3f2600f0aee8f 100644 --- a/lib/Dialect/Torch/Transforms/GlobalizeObjectGraph.cpp +++ b/lib/Dialect/Torch/Transforms/GlobalizeObjectGraph.cpp @@ -85,7 +85,7 @@ class ObjectGraphInfo { func::FuncOp methodFunc) { auto it = funcLinkageInfo.find({instance, methodFunc}); if (it == funcLinkageInfo.end()) - return None; + return std::nullopt; return it->second; } @@ -638,7 +638,7 @@ static LogicalResult globalizeObjectGraph(ModuleOp module) { for (auto &monomorphization : tracker.getMonomorphizations()) { auto newFunc = cast(monomorphization.func->clone()); newFuncs[monomorphization] = newFunc; - Optional linkageInfo = None; + Optional linkageInfo = std::nullopt; // If it is potentially a method, check its linkage info. if (monomorphization.argInstances.size() != 0 && monomorphization.argInstances[0].argIndex == 0) { diff --git a/lib/Dialect/Torch/Transforms/RefineTypes.cpp b/lib/Dialect/Torch/Transforms/RefineTypes.cpp index c749bfce9f34d..b8286e9ff0afc 100644 --- a/lib/Dialect/Torch/Transforms/RefineTypes.cpp +++ b/lib/Dialect/Torch/Transforms/RefineTypes.cpp @@ -112,9 +112,9 @@ static torch_upstream::TypeKind getTypeKind(Type type) { } /// Returns the dtype that assumes information from both `lhs` and `rhs`. -/// Returns `None` if the types are contradictory. Note this can only be used -/// on the `dtype` from tensors and can't be used on other types like scalar -/// types. +/// Returns `std::nullopt` if the types are contradictory. Note this can only +/// be used on the `dtype` from tensors and can't be used on other types like +/// scalar types. static Optional meetElementTypes(Type lhs, Type rhs) { auto isNullOrBuiltIn = [](Type type) { return !type || isBuiltInType(type); }; (void)isNullOrBuiltIn; @@ -127,7 +127,7 @@ static Optional meetElementTypes(Type lhs, Type rhs) { return lhs; if (lhs == rhs) return lhs; - return None; + return std::nullopt; } enum class OptionalKnowledge { @@ -137,7 +137,7 @@ enum class OptionalKnowledge { }; /// Returns the OptionalKnowledge that assumes information from both `lhs` and -/// `rhs`. Returns `None` if the knowledges are contradictory. +/// `rhs`. Returns `std::nullopt` if the knowledges are contradictory. static Optional meetOptionalKnowledge(OptionalKnowledge lhs, OptionalKnowledge rhs) { if (lhs == OptionalKnowledge::unKnown) @@ -146,7 +146,7 @@ meetOptionalKnowledge(OptionalKnowledge lhs, OptionalKnowledge rhs) { return lhs; if (lhs == rhs) return lhs; - return None; + return std::nullopt; } static OptionalKnowledge joinOptionalKnowledge(OptionalKnowledge lhs, @@ -327,7 +327,7 @@ struct ValueKnowledge { // Given two pieces of static knowledge, calculate new knowledge that assumes // the facts from both. - // If the two pieces of knowledge are contradictory, None is returned. + // If the two pieces of knowledge are contradictory, std::nullopt is returned. static Optional meet(const ValueKnowledge &lhs, const ValueKnowledge &rhs) { if (!lhs.isInitialized) @@ -338,13 +338,13 @@ struct ValueKnowledge { Optional knowledge = meetTypes(lhs, rhs); if (!knowledge.has_value()) - return None; + return std::nullopt; ValueKnowledge result = knowledge.value(); Optional optional = meetOptionalKnowledge(lhs.optional, rhs.optional); if (!optional.has_value()) - return None; + return std::nullopt; result.optional = optional.value(); return result; } @@ -362,7 +362,7 @@ struct ValueKnowledge { return rhs; if (lhs == rhs) return lhs; - return None; + return std::nullopt; } // We start in the uninitialized state by default. @@ -559,7 +559,7 @@ static Type getPromotedResultDType(ValueKnowledge *tensor, Type scalarType) { torch_upstream::ResultTypeState state = {}; // No need to check if rank is zero for tensor because scalar uses // wrappedResult which is a lower priority than both dimResult and zeroResult. - state = updateResultTypeState(tensor, /*rankIsNonZero=*/None, state, + state = updateResultTypeState(tensor, /*rankIsNonZero=*/std::nullopt, state, /*skipRankCheck=*/true); state = updateResultTypeState(getDefaultDtypeForTorchScalar(scalarType), state); @@ -573,7 +573,7 @@ static SmallVector> getRankIsNonZeroArray(ValueRange values) { if (tensorType.hasSizes()) { rankIsNonZero.push_back(tensorType.getSizes().size() != 0); } else { - rankIsNonZero.push_back(None); + rankIsNonZero.push_back(std::nullopt); } } } diff --git a/lib/Dialect/Torch/Utils/Utils.cpp b/lib/Dialect/Torch/Utils/Utils.cpp index 1577ad6051f54..139dd4de16c7d 100644 --- a/lib/Dialect/Torch/Utils/Utils.cpp +++ b/lib/Dialect/Torch/Utils/Utils.cpp @@ -27,10 +27,10 @@ llvm::Optional Torch::matchLegalConstantIndexIntoListOfSize(Value v, int64_t length) { int64_t dim; if (!matchPattern(v, m_TorchConstantInt(&dim))) - return llvm::None; + return std::nullopt; dim = toPositiveDim(dim, length); if (!isValidDim(dim, length)) - return llvm::None; + return std::nullopt; return dim; } @@ -169,7 +169,7 @@ bool Torch::isBuiltInType(Type type) { Optional Torch::getTensorRank(Value tensor) { BaseTensorType tensorType = tensor.getType().cast(); if (!tensorType.hasSizes()) - return llvm::None; + return std::nullopt; return tensorType.getSizes().size(); } diff --git a/lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp b/lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp index 299415bfdb3f7..da53dcf7af739 100644 --- a/lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp +++ b/lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp @@ -61,7 +61,7 @@ static void setupTorchBoolToI1Conversion(ConversionTarget &target, Location loc) -> Optional { // Other builtin integer types could be handled by other materializers. if (!(type.getWidth() == 1 && type.isSignless())) - return None; + return std::nullopt; assert(inputs.size() == 1); assert(inputs[0].getType().isa()); return builder.create(loc, inputs[0]).getResult(); @@ -87,11 +87,11 @@ static void setupTorchIntToI64Conversion(ConversionTarget &target, Location loc) -> Optional { // Other builtin integer types could be handled by other materializers. if (!(type.getWidth() == 64 && type.isSignless())) - return None; + return std::nullopt; // Other input type to be converted to i64 are handled by other // materializers. if (!inputs[0].getType().isa()) - return None; + return std::nullopt; assert(inputs.size() == 1); return builder.create(loc, inputs[0]).getResult(); }); @@ -140,11 +140,11 @@ static void setupTorchGeneratorToI64Conversion(ConversionTarget &target, Location loc) -> Optional { // Other builtin integer types could be handled by other materializers. if (!(type.getWidth() == 64 && type.isSignless())) - return None; + return std::nullopt; // Other input type to be converted to i64 are handled by other // materializers. if (!inputs[0].getType().isa()) - return None; + return std::nullopt; assert(inputs.size() == 1); return builder.create(loc, inputs[0]).getResult(); }); diff --git a/lib/RefBackend/RefBackend.cpp b/lib/RefBackend/RefBackend.cpp index 9ea0fdecf76a2..4299fe522735a 100644 --- a/lib/RefBackend/RefBackend.cpp +++ b/lib/RefBackend/RefBackend.cpp @@ -206,8 +206,8 @@ class MungeCallingConventions for (auto &p : invokedConsumeFuncReturnFuncs) { auto consumeFuncReturnFunc = b.create( module.getLoc(), p.first, - FunctionType::get(module.getContext(), p.second, {}), - b.getStringAttr("private")); + FunctionType::get(module.getContext(), p.second, {})); + consumeFuncReturnFunc.setPrivate(); addEmitCInterfaceAttr(consumeFuncReturnFunc); } }