-
Notifications
You must be signed in to change notification settings - Fork 13.3k
[CIR] Upstream support for name mangling #137094
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
We have been using the default names for global symbols to this point. This change introduces proper name mangling for functions. This requires introducing a CXXABI class in the CIRGenModule. Because only target independent name mangling is handled in this patch, the CXXABI class does not require a target-specific implementation. The general mechanism for selecting an implementation is introduced here, but the actual target-specific subclasses are deferred until needed.
@llvm/pr-subscribers-clangir @llvm/pr-subscribers-clang Author: Andy Kaylor (andykaylor) ChangesWe have been using the default names for global symbols to this point. This change introduces proper name mangling for functions. This requires introducing a CXXABI class in the CIRGenModule. Because only target independent name mangling is handled in this patch, the CXXABI class does not require a target-specific implementation. The general mechanism for selecting an implementation is introduced here, but the actual target-specific subclasses are deferred until needed. Patch is 66.07 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/137094.diff 22 Files Affected:
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 6bfc1199aea55..045b9ce40f53a 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -151,7 +151,6 @@ struct MissingFeatures {
static bool weakRefReference() { return false; }
static bool hip() { return false; }
static bool setObjCGCLValueClass() { return false; }
- static bool mangledNames() { return false; }
static bool setDLLStorageClass() { return false; }
static bool openMP() { return false; }
static bool emitCheckedInBoundsGEP() { return false; }
@@ -159,6 +158,8 @@ struct MissingFeatures {
static bool bitfields() { return false; }
static bool typeChecks() { return false; }
static bool lambdaFieldToName() { return false; }
+ static bool targetSpecificCXXABI() { return false; }
+ static bool moduleNameHash() { return false; }
// Missing types
static bool dataMemberType() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
new file mode 100644
index 0000000000000..5279307e19613
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -0,0 +1,46 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H
+#define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H
+
+#include "CIRGenModule.h"
+
+#include "clang/AST/Mangle.h"
+
+namespace clang::CIRGen {
+
+/// Implements C++ ABI-specific code generation functions.
+class CIRGenCXXABI {
+protected:
+ CIRGenModule &cgm;
+ std::unique_ptr<clang::MangleContext> mangleContext;
+
+public:
+ // TODO(cir): make this protected when target-specific CIRGenCXXABIs are
+ // implemented.
+ CIRGenCXXABI(CIRGenModule &cgm)
+ : cgm(cgm), mangleContext(cgm.getASTContext().createMangleContext()) {}
+ ~CIRGenCXXABI();
+
+public:
+ /// Gets the mangle context.
+ clang::MangleContext &getMangleContext() { return *mangleContext; }
+};
+
+/// Creates and Itanium-family ABI
+CIRGenCXXABI *CreateCIRGenItaniumCXXABI(CIRGenModule &cgm);
+
+} // namespace clang::CIRGen
+
+#endif
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 3b13d495be5e3..3df1fc101b583 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CIRGenModule.h"
+#include "CIRGenCXXABI.h"
#include "CIRGenConstantEmitter.h"
#include "CIRGenFunction.h"
@@ -30,6 +31,37 @@
using namespace clang;
using namespace clang::CIRGen;
+static CIRGenCXXABI *createCXXABI(CIRGenModule &cgm) {
+ switch (cgm.getASTContext().getCXXABIKind()) {
+ case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::GenericAArch64:
+ case TargetCXXABI::AppleARM64:
+ return CreateCIRGenItaniumCXXABI(cgm);
+
+ case TargetCXXABI::Fuchsia:
+ case TargetCXXABI::GenericARM:
+ case TargetCXXABI::iOS:
+ case TargetCXXABI::WatchOS:
+ case TargetCXXABI::GenericMIPS:
+ case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
+ case TargetCXXABI::Microsoft:
+ cgm.errorNYI("C++ ABI kind not yet implemented");
+ return nullptr;
+ }
+
+ llvm_unreachable("invalid C++ ABI kind");
+}
+
+namespace clang::CIRGen {
+// TODO(cir): Implement target-specific CIRGenCXXABIs
+CIRGenCXXABI *CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
+ assert(!cir::MissingFeatures::targetSpecificCXXABI());
+ return new CIRGenCXXABI(cgm);
+}
+} // namespace clang::CIRGen
+CIRGenCXXABI::~CIRGenCXXABI() {}
+
CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
clang::ASTContext &astContext,
const clang::CodeGenOptions &cgo,
@@ -37,7 +69,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
: builder(mlirContext, *this), astContext(astContext),
langOpts(astContext.getLangOpts()), codeGenOpts(cgo),
theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&mlirContext))},
- diags(diags), target(astContext.getTargetInfo()), genTypes(*this) {
+ diags(diags), target(astContext.getTargetInfo()),
+ abi(createCXXABI(*this)), genTypes(*this) {
// Initialize cached types
VoidTy = cir::VoidType::get(&getMLIRContext());
@@ -74,6 +107,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
builder.getStringAttr(getTriple().str()));
}
+CIRGenModule::~CIRGenModule() = default;
+
CharUnits CIRGenModule::getNaturalTypeAlignment(QualType t,
LValueBaseInfo *baseInfo) {
assert(!cir::MissingFeatures::opTBAA());
@@ -301,9 +336,9 @@ CIRGenModule::getOrCreateCIRGlobal(const VarDecl *d, mlir::Type ty,
if (!ty)
ty = getTypes().convertTypeForMem(astTy);
- assert(!cir::MissingFeatures::mangledNames());
- return getOrCreateCIRGlobal(d->getIdentifier()->getName(), ty,
- astTy.getAddressSpace(), d, isForDefinition);
+ StringRef mangledName = getMangledName(d);
+ return getOrCreateCIRGlobal(mangledName, ty, astTy.getAddressSpace(), d,
+ isForDefinition);
}
/// Return the mlir::Value for the address of the given global variable. If
@@ -639,13 +674,82 @@ cir::FuncOp CIRGenModule::getAddrOfFunction(clang::GlobalDecl gd,
funcType = convertType(fd->getType());
}
- assert(!cir::MissingFeatures::mangledNames());
- cir::FuncOp func = getOrCreateCIRFunction(
- cast<NamedDecl>(gd.getDecl())->getIdentifier()->getName(), funcType, gd,
- forVTable, dontDefer, /*isThunk=*/false, isForDefinition);
+ StringRef mangledName = getMangledName(gd);
+ cir::FuncOp func =
+ getOrCreateCIRFunction(mangledName, funcType, gd, forVTable, dontDefer,
+ /*isThunk=*/false, isForDefinition);
return func;
}
+static std::string getMangledNameImpl(CIRGenModule &cgm, GlobalDecl gd,
+ const NamedDecl *nd) {
+ SmallString<256> buffer;
+
+ llvm::raw_svector_ostream out(buffer);
+ MangleContext &mc = cgm.getCXXABI().getMangleContext();
+
+ assert(!cir::MissingFeatures::moduleNameHash());
+
+ if (mc.shouldMangleDeclName(nd)) {
+ mc.mangleName(gd.getWithDecl(nd), out);
+ } else {
+ IdentifierInfo *ii = nd->getIdentifier();
+ assert(ii && "Attempt to mangle unnamed decl.");
+
+ const auto *fd = dyn_cast<FunctionDecl>(nd);
+ if (fd &&
+ fd->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
+ cgm.errorNYI(nd->getSourceRange(), "getMangledName: X86RegCall");
+ out << ii->getName();
+ } else if (fd && fd->hasAttr<CUDAGlobalAttr>() &&
+ gd.getKernelReferenceKind() == KernelReferenceKind::Stub) {
+ out << "__device_stub__" << ii->getName();
+ } else {
+ out << ii->getName();
+ }
+ }
+
+ // Check if the module name hash should be appended for internal linkage
+ // symbols. This should come before multi-version target suffixes are
+ // appendded. This is to keep the name and module hash suffix of the internal
+ // linkage function together. The unique suffix should only be added when name
+ // mangling is done to make sure that the final name can be properly
+ // demangled. For example, for C functions without prototypes, name mangling
+ // is not done and the unique suffix should not be appended then.
+ assert(!cir::MissingFeatures::moduleNameHash());
+
+ if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
+ if (fd->isMultiVersion()) {
+ cgm.errorNYI(nd->getSourceRange(),
+ "getMangledName: multi-version functions");
+ }
+ }
+ if (cgm.getLangOpts().GPURelocatableDeviceCode) {
+ cgm.errorNYI(nd->getSourceRange(),
+ "getMangledName: GPU relocatable device code");
+ }
+
+ return std::string(out.str());
+}
+
+StringRef CIRGenModule::getMangledName(GlobalDecl gd) {
+ GlobalDecl canonicalGd = gd.getCanonicalDecl();
+
+ // Some ABIs don't have constructor variants. Make sure that base and complete
+ // constructors get mangled the same.
+ if (const auto *cd = dyn_cast<CXXConstructorDecl>(canonicalGd.getDecl())) {
+ errorNYI(cd->getSourceRange(), "getMangledName: C++ constructor");
+ return cast<NamedDecl>(gd.getDecl())->getIdentifier()->getName();
+ }
+
+ // Keep the first result in the case of a mangling collision.
+ const auto *nd = cast<NamedDecl>(gd.getDecl());
+ std::string mangledName = getMangledNameImpl(*this, gd, nd);
+
+ auto result = manglings.insert(std::make_pair(mangledName, gd));
+ return mangledDeclNames[canonicalGd] = result.first->first();
+}
+
cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
StringRef mangledName, mlir::Type funcType, GlobalDecl gd, bool forVTable,
bool dontDefer, bool isThunk, ForDefinition_t isForDefinition,
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 1fb97334d7bd2..1c14959700cf9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -45,6 +45,7 @@ class VarDecl;
namespace CIRGen {
class CIRGenFunction;
+class CIRGenCXXABI;
enum ForDefinition_t : bool { NotForDefinition = false, ForDefinition = true };
@@ -59,7 +60,7 @@ class CIRGenModule : public CIRGenTypeCache {
const clang::CodeGenOptions &cgo,
clang::DiagnosticsEngine &diags);
- ~CIRGenModule() = default;
+ ~CIRGenModule();
private:
mutable std::unique_ptr<TargetCIRGenInfo> theTargetCIRGenInfo;
@@ -80,6 +81,8 @@ class CIRGenModule : public CIRGenTypeCache {
const clang::TargetInfo ⌖
+ std::unique_ptr<CIRGenCXXABI> abi;
+
CIRGenTypes genTypes;
/// Per-function codegen information. Updated everytime emitCIR is called
@@ -94,6 +97,8 @@ class CIRGenModule : public CIRGenTypeCache {
const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; }
CIRGenTypes &getTypes() { return genTypes; }
const clang::LangOptions &getLangOpts() const { return langOpts; }
+
+ CIRGenCXXABI &getCXXABI() const { return *abi; }
mlir::MLIRContext &getMLIRContext() { return *builder.getContext(); }
const cir::CIRDataLayout getDataLayout() const {
@@ -169,6 +174,8 @@ class CIRGenModule : public CIRGenTypeCache {
/// expression of the given type.
mlir::Value emitNullConstant(QualType t, mlir::Location loc);
+ llvm::StringRef getMangledName(clang::GlobalDecl gd);
+
cir::FuncOp
getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType,
clang::GlobalDecl gd, bool forVTable,
@@ -226,6 +233,11 @@ class CIRGenModule : public CIRGenTypeCache {
const T &name) {
return errorNYI(loc.getBegin(), feature, name) << loc;
}
+
+private:
+ // An ordered map of canonical GlobalDecls to their mangled names.
+ llvm::MapVector<clang::GlobalDecl, llvm::StringRef> mangledDeclNames;
+ llvm::StringMap<clang::GlobalDecl, llvm::BumpPtrAllocator> manglings;
};
} // namespace CIRGen
diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp
index 08f6d730f161a..cae970aedf95d 100644
--- a/clang/test/CIR/CodeGen/array.cpp
+++ b/clang/test/CIR/CodeGen/array.cpp
@@ -93,7 +93,7 @@ void func() {
// CIR: %[[TMP:.*]] = cir.load %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR" cir.store %[[TMP]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func()
+// LLVM: define void @_Z4funcv()
// LLVM-NEXT: %[[ARR:.*]] = alloca [10 x i32], i64 1, align 16
// LLVM-NEXT: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM-NEXT: %[[INIT_2:.*]] = alloca i32, i64 1, align 4
@@ -135,7 +135,7 @@ void func2() {
// CIR: %[[ELE_1_PTR:.*]] = cir.ptr_stride(%[[LOAD_1]] : !cir.ptr<!s32i>, %[[OFFSET_1]] : !s64i), !cir.ptr<!s32i>
// CIR: cir.store %[[ELE_1_PTR]], %[[ELE_ALLOCA]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
-// LLVM: define void @func2()
+// LLVM: define void @_Z5func2v()
// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[ARR_PTR:.*]] = getelementptr i32, ptr %[[ARR]], i32 0
@@ -175,7 +175,7 @@ void func3() {
// CIR: %[[ELE_TMP:.*]] = cir.load %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[ELE_TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func3()
+// LLVM: define void @_Z5func3v()
// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
// LLVM: %[[IDX:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
@@ -227,7 +227,7 @@ void func4() {
// CIR: %[[TMP:.*]] = cir.load %[[ELE_0]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func4()
+// LLVM: define void @_Z5func4v()
// LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
@@ -271,7 +271,7 @@ void func5() {
// CIR: %10 = cir.ptr_stride(%7 : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET_1]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
// CIR: cir.store %10, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>
-// LLVM: define void @func5()
+// LLVM: define void @_Z5func5v()
// LLVM: %[[ARR:.*]] = alloca [2 x [1 x i32]], i64 1, align 4
// LLVM: %[[TMP:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[ARR_PTR:.*]] = getelementptr [1 x i32], ptr %[[ARR]], i32 0
@@ -304,7 +304,7 @@ void func6() {
// CIR: %[[V1:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store %[[V1]], %[[ELE_PTR]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func6()
+// LLVM: define void @_Z5func6v()
// LLVM: %[[VAR:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[ARR:.*]] = alloca [2 x i32], i64 1, align 4
// LLVM: store i32 4, ptr %[[VAR]], align 4
@@ -337,7 +337,7 @@ void func7() {
// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[TMP]] : !cir.ptr<!cir.ptr<!s32i>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
// CIR: cir.store %[[ELE_PTR]], %[[ARR_TMP]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
-// LLVM: define void @func7()
+// LLVM: define void @_Z5func7v()
// LLVM: %[[ARR:.*]] = alloca [1 x ptr], i64 1, align 8
// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[ELE_PTR:.*]] = getelementptr ptr, ptr %[[ARR]], i32 0
@@ -355,7 +355,7 @@ void func8(int arr[10]) {
int e2 = arr[1];
}
-// CIR: cir.func @func8(%[[ARG:.*]]: !cir.ptr<!s32i>
+// CIR: cir.func @_Z5func8Pi(%[[ARG:.*]]: !cir.ptr<!s32i>
// CIR: %[[ARR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arr", init]
// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
// CIR: %[[INIT_2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e2", init]
@@ -371,7 +371,7 @@ void func8(int arr[10]) {
// CIR: %[[TMP_4:.*]] = cir.load %[[ELE_1]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[TMP_4]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func8(ptr %[[ARG:.*]])
+// LLVM: define void @_Z5func8Pi(ptr %[[ARG:.*]])
// LLVM: %[[ARR:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[INIT_2:.*]] = alloca i32, i64 1, align 4
@@ -402,7 +402,7 @@ void func9(int arr[10][5]) {
int e = arr[1][2];
}
-// CIR: cir.func @func9(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>
+// CIR: cir.func @_Z5func9PA5_i(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>
// CIR: %[[ARR:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, ["arr", init]
// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
// CIR: cir.store %[[ARG]], %[[ARR]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
@@ -415,7 +415,7 @@ void func9(int arr[10][5]) {
// CIR: %[[TMP_2:.*]] = cir.load %[[ARR_1_2]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func9(ptr %[[ARG:.*]])
+// LLVM: define void @_Z5func9PA5_i(ptr %[[ARG:.*]])
// LLVM: %[[ARR:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: store ptr %[[ARG]], ptr %[[ARR]], align 8
@@ -439,7 +439,7 @@ void func10(int *a) {
int e = a[5];
}
-// CIR: cir.func @func10(%[[ARG:.*]]: !cir.ptr<!s32i>
+// CIR: cir.func @_Z6func10Pi(%[[ARG:.*]]: !cir.ptr<!s32i>
// CIR: %[[ARR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["a", init]
// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
// CIR: cir.store %[[ARG]], %[[ARR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
@@ -449,7 +449,7 @@ void func10(int *a) {
// CIR: %[[TMP_2:.*]] = cir.load %[[ELE]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
-// LLVM: define void @func10(ptr %[[ARG:.*]]) {
+// LLVM: define void @_Z6func10Pi(ptr %[[ARG:.*]]) {
// LLVM: %[[ARR:.*]] = alloca ptr, i64 1, align 8
// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
// LLVM: store ptr %[[ARG]], ptr %[[ARR]], align 8
diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp
index 0f8431325a86f..1f289e905dd09 100644
--- a/clang/test/CIR/CodeGen/basic.cpp
+++ b/clang/test/CIR/CodeGen/basic.cpp
@@ -6,7 +6,7 @@ int f1() {
}
// CHECK: module
-// CHECK: cir.func @f1() -> !s32i
+// CHECK: cir.func @_Z2f1v() -> !s32i
// CHECK: %[[RV:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CHECK: %[[I_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i"] {alignment = 4 : i64}
// CHECK: %[[I:.*]] = cir.load %[[I_PTR]] : !cir.ptr<!s32i>, !s32i
@@ -19,7 +19,7 @@ int f2() {
return i;
}
-// CHECK: cir.func @f2() -> !s32i
+// CHECK: cir.func @_Z2f2v() -> !s32i
// CHECK: %[[RV:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CHECK: %[[I_PTR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init, const] {alignment = 4 : i64}
// CHECK: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
@@ -33,7 +33,7 @@ int f3(int i) {
return i;
}
-// CHECK: cir.func @f3(%[[ARG:.*]]: !s32i loc({{.*}})) -> !s32i
+// CHECK: cir.func @_Z2f3i(%[[ARG:.*]]: !s32i loc({{.*}})) -> !s32i
// CHECK: %[[ARG_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init] {alignment = 4 : i64}
// CHECK: %[[RV:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CHECK: cir.store %[[ARG]], %[[ARG_ALLOCA]] : !s32i, !cir.ptr<!s32i>
@@ -46,7 +46,7 @@ int f4(const int i) {
return i;
}
-// CHECK: cir.func @f4(%[[ARG:.*]]: !s32i loc({{.*}})) -> !s32i
+// CHECK: cir.func @_Z2f4i(%[[ARG:.*]]: !s32i loc({{.*}})) -> !s32i
// CHECK: %[[ARG_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init, const] {alignment = 4 : i64}
// CHECK: %[[RV:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
// CHECK: cir.store %[[ARG]], %[[ARG_ALLOCA]] : !s32i, !cir.ptr<!s32i>
@@ -66,7 +66,7 @@ int *f5() {
return p;
}
-// CHECK: cir.func @f5() -> !cir.ptr<!s32i>
+// CHECK: cir.func @_Z2f5v() -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[RET_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["__retval"] {alignment = 8 : i64}
// CHECK-NEXT: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["p", init] {alignment = 8 : i64}
// CHECK-NEXT: %[[NULLPTR:.*]] = cir.const #cir.ptr<null> : !cir.ptr<!s32i>
@@ -95,7 +95,7 @@ size_type max_size() {
return size_type(~0) / sizeof(_Tp);
}
-// CHECK: cir.func @max_size()
+// CHECK: cir.func @_Z8max_sizev() -> !u64i
// CHECK: %0 = cir.alloca !u64i, !cir.ptr<!u64i>, ["__retval"] {alignment = 8 : i64}
// CHECK: %1 = cir.const #cir.int<0> : !s32i
// CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i
diff --...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems alright so far. Though if we're going to add the CUDA global, we should have a test for it.
out << ii->getName(); | ||
} else if (fd && fd->hasAttr<CUDAGlobalAttr>() && | ||
gd.getKernelReferenceKind() == KernelReferenceKind::Stub) { | ||
out << "__device_stub__" << ii->getName(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We should either have tests for this, or just leave this errorNYI.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Agreed. I don't think we can hit this yet. I'll make it NYI.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
✅ With the latest revision this PR passed the C/C++ code formatter. |
We have been using the default names for global symbols to this point. This change introduces proper name mangling for functions.
This requires introducing a CXXABI class in the CIRGenModule. Because only target independent name mangling is handled in this patch, the CXXABI class does not require a target-specific implementation. The general mechanism for selecting an implementation is introduced here, but the actual target-specific subclasses are deferred until needed.