Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

build: update llvm tag to 3a020527 #1717

Merged
merged 1 commit into from
Dec 14, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 8174 files
2 changes: 1 addition & 1 deletion externals/mlir-hlo
Submodule mlir-hlo updated 148 files
2 changes: 1 addition & 1 deletion include/torch-mlir/Conversion/Utils/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
// should be converted builtin types.
Value convertScalarToDtype(
OpBuilder &b, Location loc, Value scalar, Type dtype,
llvm::Optional<Type> srcOriginalDtype = llvm::None);
llvm::Optional<Type> srcOriginalDtype = std::nullopt);

} // namespace Torch
} // namespace torch
Expand Down
2 changes: 1 addition & 1 deletion include/torch-mlir/Dialect/Torch/IR/TorchOps.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ struct torch_list_of_optional_constant_ints_op_binder {
if (matchPattern(value, m_TorchConstantInt(&num)))
bind_values.push_back(num);
else if (value.getType().isa<Torch::NoneType>())
bind_values.push_back(llvm::None);
bind_values.push_back(std::nullopt);
else
return false;
}
Expand Down
4 changes: 2 additions & 2 deletions lib/CAPI/TorchTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ MlirType torchMlirTorchNonValueTensorTypeGet(MlirContext context,
intptr_t numSizes,
const int64_t *optionalSizes,
MlirType optionalDtype) {
Optional<ArrayRef<int64_t>> optionalSizesArrayRef = None;
Optional<ArrayRef<int64_t>> optionalSizesArrayRef = std::nullopt;
// if numSizes == -1, then it is unranked.
if (numSizes > -1)
optionalSizesArrayRef = llvm::makeArrayRef(optionalSizes, numSizes);
Expand Down Expand Up @@ -232,7 +232,7 @@ MlirType torchMlirTorchValueTensorTypeGet(MlirContext context,
intptr_t numSizes,
const int64_t *optionalSizes,
MlirType optionalDtype) {
Optional<ArrayRef<int64_t>> optionalSizesArrayRef = None;
Optional<ArrayRef<int64_t>> optionalSizesArrayRef = std::nullopt;
// if numSizes == -1, then it is unranked.
if (numSizes > -1)
optionalSizesArrayRef = llvm::makeArrayRef(optionalSizes, numSizes);
Expand Down
26 changes: 13 additions & 13 deletions lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ llvm::Optional<Value> convertReduceOpCommon(
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

ArrayRef<int64_t> input_shape = input_type.getShape();
ArrayRef<int64_t> output_shape = output_type.getShape();
Expand Down Expand Up @@ -101,7 +101,7 @@ convertReduceAllOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

return convertReduceOpCommon<tosa::ReduceAllOp>(
rewriter, op, output_type, input_value, axes_elems, keep_dims,
Expand All @@ -116,7 +116,7 @@ convertReduceAnyOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

return convertReduceOpCommon<tosa::ReduceAnyOp>(
rewriter, op, output_type, input_value, axes_elems, keep_dims,
Expand All @@ -131,7 +131,7 @@ convertReduceMinOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

return convertReduceOpCommon<tosa::ReduceMinOp>(
rewriter, op, output_type, input_value, axes_elems, keep_dims,
Expand All @@ -146,7 +146,7 @@ convertReduceMaxOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

return convertReduceOpCommon<tosa::ReduceMaxOp>(
rewriter, op, output_type, input_value, axes_elems, keep_dims,
Expand All @@ -161,7 +161,7 @@ convertReduceProdOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

bool input_is_qtype =
input_type.getElementType().isa<mlir::quant::UniformQuantizedType>();
Expand All @@ -171,7 +171,7 @@ convertReduceProdOp(PatternRewriter &rewriter, Operation *op,
if (input_is_qtype || output_is_qtype) {
op->emitOpError("ConvertReduceProdOp: input/output tensor should "
"be all floating-point.");
return llvm::None;
return std::nullopt;
}

return convertReduceOpCommon<tosa::ReduceProdOp>(
Expand All @@ -187,7 +187,7 @@ convertReduceSumOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

bool input_is_qtype =
input_type.getElementType().isa<mlir::quant::UniformQuantizedType>();
Expand All @@ -197,7 +197,7 @@ convertReduceSumOp(PatternRewriter &rewriter, Operation *op,
if (input_is_qtype != output_is_qtype) {
op->emitOpError("ConvertReduceSumOp: input/output tensor should "
"be all quantized or all floating-point.");
return llvm::None;
return std::nullopt;
}

double input_scale = 1.0f;
Expand Down Expand Up @@ -242,7 +242,7 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op,
RankedTensorType input_type =
input_value.getType().dyn_cast<RankedTensorType>();
if (!input_type)
return llvm::None;
return std::nullopt;

bool input_is_qtype =
input_type.getElementType().isa<mlir::quant::UniformQuantizedType>();
Expand All @@ -252,15 +252,15 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op,
if (input_is_qtype != output_is_qtype) {
op->emitOpError("ConvertReduceSumOp: input/output tensor should "
"be all quantized or all floating-point.");
return llvm::None;
return std::nullopt;
}

// Only supports float type mean() if it's non-quantized
if (!input_is_qtype && !output_type.getElementType().isa<mlir::FloatType>()) {
op->emitWarning(
"Failed convertReduceMean: input unquantized type but output element "
"not FloatType!");
return llvm::None;
return std::nullopt;
}

int64_t input_rank = input_type.getRank();
Expand Down Expand Up @@ -303,7 +303,7 @@ convertReduceMeanOp(PatternRewriter &rewriter, Operation *op,
output_zp);

if (!val.has_value())
return llvm::None;
return std::nullopt;

if (!input_is_qtype) {
Value div_const = getTosaConstTensorSingleF32(rewriter, op, div_scale);
Expand Down
6 changes: 3 additions & 3 deletions lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ llvm::Optional<Value> getConstTensor(PatternRewriter &rewriter, Operation *op,

if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}

auto const_type =
Expand All @@ -186,7 +186,7 @@ llvm::Optional<Value> getConstTensor<APInt>(PatternRewriter &rewriter,

if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}

auto const_type = RankedTensorType::get(
Expand All @@ -210,7 +210,7 @@ llvm::Optional<Value> getConstTensor<float>(PatternRewriter &rewriter,

if (vec.size() != num_total_elements) {
op->emitOpError("getConstTensor(): number of elements mismatch.");
return llvm::None;
return std::nullopt;
}

auto const_type = RankedTensorType::get(shape, rewriter.getF32Type());
Expand Down
4 changes: 2 additions & 2 deletions lib/Dialect/Torch/IR/TorchOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1028,7 +1028,7 @@ traceKnownSizeTensorType(Value value, llvm::Optional<int64_t> dim) {
if (!tensorType.hasSizes())
return false;

if (dim == llvm::None)
if (dim == std::nullopt)
return tensorType.areAllSizesKnown();

// If the dimension value is negative, then convert it to a positive value.
Expand Down Expand Up @@ -1062,7 +1062,7 @@ traceKnownSizeTensorType(Value value, llvm::Optional<int64_t> dim) {
void AtenSizeOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
MLIRContext *context) {
patterns.add(+[](AtenSizeOp op, PatternRewriter &rewriter) {
auto type = traceKnownSizeTensorType(op.getOperand(), llvm::None);
auto type = traceKnownSizeTensorType(op.getOperand(), std::nullopt);
if (failed(type))
return rewriter.notifyMatchFailure(op, "all sizes not known");
SmallVector<Value> listElements;
Expand Down
17 changes: 9 additions & 8 deletions lib/Dialect/Torch/IR/TorchTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,19 +89,19 @@ bool Torch::isValidSubtype(Type subtype, Type type) {
static Optional<SmallVector<Type>>
parseMultipleContainedTypes(AsmParser &parser) {
if (parser.parseLess())
return None;
return std::nullopt;

SmallVector<Type> containedTypes;
if (!parser.parseOptionalGreater())
return containedTypes;
do {
Type containedType = parseTorchDialectType(parser);
if (!containedType)
return None;
return std::nullopt;
containedTypes.push_back(containedType);
} while (!parser.parseOptionalComma());
if (parser.parseGreater())
return None;
return std::nullopt;
return containedTypes;
}

Expand Down Expand Up @@ -222,7 +222,8 @@ Type parseTensorType(MLIRContext *context, AsmParser &parser,
llvm::SMLoc startLoc = parser.getCurrentLocation();
if (parser.parseOptionalLess())
return getTensorType(context,
/*optionalSizes=*/None, /*optionalDtype=*/Type());
/*optionalSizes=*/std::nullopt,
/*optionalDtype=*/Type());
bool hasSizes;
SmallVector<int64_t> sizes;
if (succeeded(parser.parseOptionalStar())) {
Expand Down Expand Up @@ -320,7 +321,7 @@ ValueTensorType NonValueTensorType::getWithValueSemantics() const {
NonValueTensorType
NonValueTensorType::getWithLeastStaticInformation(MLIRContext *context) {
return NonValueTensorType::get(context,
/*optionalSizes=*/None,
/*optionalSizes=*/std::nullopt,
/*optionalDtype=*/Type());
}

Expand Down Expand Up @@ -357,7 +358,7 @@ NonValueTensorType ValueTensorType::getWithoutValueSemantics() const {
ValueTensorType
ValueTensorType::getWithLeastStaticInformation(MLIRContext *context) {
return ValueTensorType::get(context,
/*optionalSizes=*/None,
/*optionalSizes=*/std::nullopt,
/*optionalDtype=*/Type());
}

Expand Down Expand Up @@ -428,8 +429,8 @@ Type Torch::meetTensorTypes(BaseTensorType lhs, BaseTensorType rhs) {

// If neither has sizes, we have nothing left to do.
if (!lhs.hasSizes() && !rhs.hasSizes()) {
return ValueTensorType::get(lhs.getContext(), /*optionalSizes=*/None,
dtype);
return ValueTensorType::get(lhs.getContext(),
/*optionalSizes=*/std::nullopt, dtype);
}

// If the number of sizes is different, the two types are contradictory.
Expand Down
4 changes: 2 additions & 2 deletions lib/Dialect/Torch/Transforms/GlobalizeObjectGraph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class ObjectGraphInfo {
func::FuncOp methodFunc) {
auto it = funcLinkageInfo.find({instance, methodFunc});
if (it == funcLinkageInfo.end())
return None;
return std::nullopt;
return it->second;
}

Expand Down Expand Up @@ -638,7 +638,7 @@ static LogicalResult globalizeObjectGraph(ModuleOp module) {
for (auto &monomorphization : tracker.getMonomorphizations()) {
auto newFunc = cast<func::FuncOp>(monomorphization.func->clone());
newFuncs[monomorphization] = newFunc;
Optional<LinkageInfo> linkageInfo = None;
Optional<LinkageInfo> linkageInfo = std::nullopt;
// If it is potentially a method, check its linkage info.
if (monomorphization.argInstances.size() != 0 &&
monomorphization.argInstances[0].argIndex == 0) {
Expand Down
24 changes: 12 additions & 12 deletions lib/Dialect/Torch/Transforms/RefineTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,9 @@ static torch_upstream::TypeKind getTypeKind(Type type) {
}

/// Returns the dtype that assumes information from both `lhs` and `rhs`.
/// Returns `None` if the types are contradictory. Note this can only be used
/// on the `dtype` from tensors and can't be used on other types like scalar
/// types.
/// Returns `std::nullopt` if the types are contradictory. Note this can only
/// be used on the `dtype` from tensors and can't be used on other types like
/// scalar types.
static Optional<Type> meetElementTypes(Type lhs, Type rhs) {
auto isNullOrBuiltIn = [](Type type) { return !type || isBuiltInType(type); };
(void)isNullOrBuiltIn;
Expand All @@ -127,7 +127,7 @@ static Optional<Type> meetElementTypes(Type lhs, Type rhs) {
return lhs;
if (lhs == rhs)
return lhs;
return None;
return std::nullopt;
}

enum class OptionalKnowledge {
Expand All @@ -137,7 +137,7 @@ enum class OptionalKnowledge {
};

/// Returns the OptionalKnowledge that assumes information from both `lhs` and
/// `rhs`. Returns `None` if the knowledges are contradictory.
/// `rhs`. Returns `std::nullopt` if the knowledges are contradictory.
static Optional<OptionalKnowledge>
meetOptionalKnowledge(OptionalKnowledge lhs, OptionalKnowledge rhs) {
if (lhs == OptionalKnowledge::unKnown)
Expand All @@ -146,7 +146,7 @@ meetOptionalKnowledge(OptionalKnowledge lhs, OptionalKnowledge rhs) {
return lhs;
if (lhs == rhs)
return lhs;
return None;
return std::nullopt;
}

static OptionalKnowledge joinOptionalKnowledge(OptionalKnowledge lhs,
Expand Down Expand Up @@ -327,7 +327,7 @@ struct ValueKnowledge {

// Given two pieces of static knowledge, calculate new knowledge that assumes
// the facts from both.
// If the two pieces of knowledge are contradictory, None is returned.
// If the two pieces of knowledge are contradictory, std::nullopt is returned.
static Optional<ValueKnowledge> meet(const ValueKnowledge &lhs,
const ValueKnowledge &rhs) {
if (!lhs.isInitialized)
Expand All @@ -338,13 +338,13 @@ struct ValueKnowledge {
Optional<ValueKnowledge> knowledge = meetTypes(lhs, rhs);

if (!knowledge.has_value())
return None;
return std::nullopt;
ValueKnowledge result = knowledge.value();

Optional<OptionalKnowledge> optional =
meetOptionalKnowledge(lhs.optional, rhs.optional);
if (!optional.has_value())
return None;
return std::nullopt;
result.optional = optional.value();
return result;
}
Expand All @@ -362,7 +362,7 @@ struct ValueKnowledge {
return rhs;
if (lhs == rhs)
return lhs;
return None;
return std::nullopt;
}

// We start in the uninitialized state by default.
Expand Down Expand Up @@ -559,7 +559,7 @@ static Type getPromotedResultDType(ValueKnowledge *tensor, Type scalarType) {
torch_upstream::ResultTypeState state = {};
// No need to check if rank is zero for tensor because scalar uses
// wrappedResult which is a lower priority than both dimResult and zeroResult.
state = updateResultTypeState(tensor, /*rankIsNonZero=*/None, state,
state = updateResultTypeState(tensor, /*rankIsNonZero=*/std::nullopt, state,
/*skipRankCheck=*/true);
state =
updateResultTypeState(getDefaultDtypeForTorchScalar(scalarType), state);
Expand All @@ -573,7 +573,7 @@ static SmallVector<Optional<bool>> getRankIsNonZeroArray(ValueRange values) {
if (tensorType.hasSizes()) {
rankIsNonZero.push_back(tensorType.getSizes().size() != 0);
} else {
rankIsNonZero.push_back(None);
rankIsNonZero.push_back(std::nullopt);
}
}
}
Expand Down
6 changes: 3 additions & 3 deletions lib/Dialect/Torch/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ llvm::Optional<int64_t>
Torch::matchLegalConstantIndexIntoListOfSize(Value v, int64_t length) {
int64_t dim;
if (!matchPattern(v, m_TorchConstantInt(&dim)))
return llvm::None;
return std::nullopt;
dim = toPositiveDim(dim, length);
if (!isValidDim(dim, length))
return llvm::None;
return std::nullopt;
return dim;
}

Expand Down Expand Up @@ -169,7 +169,7 @@ bool Torch::isBuiltInType(Type type) {
Optional<unsigned> Torch::getTensorRank(Value tensor) {
BaseTensorType tensorType = tensor.getType().cast<BaseTensorType>();
if (!tensorType.hasSizes())
return llvm::None;
return std::nullopt;
return tensorType.getSizes().size();
}

Expand Down
Loading